From 7cafa7c598988b721015e776fab00160e7b449f7 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Wed, 25 Sep 2024 08:52:53 +0200 Subject: [PATCH 001/836] chore: move PodStatus to API package (#5594) The `PodStatus` is now part of the API package, as is it referred to by the API itself. See: #3762 Signed-off-by: Leonardo Cecchi Signed-off-by: Armando Ruocco Co-authored-by: Armando Ruocco --- api/v1/base_funcs.go | 74 ++++++++ api/v1/base_funcs_test.go | 163 ++++++++++++++++++ api/v1/base_types.go | 59 ++++--- api/v1/base_types_test.go | 58 ------- api/v1/cluster_types.go | 18 +- api/v1/zz_generated.deepcopy.go | 3 +- internal/controller/replicas.go | 2 +- pkg/postgres/replication/explicit_test.go | 17 +- pkg/postgres/replication/legacy.go | 5 +- pkg/postgres/replication/legacy_test.go | 15 +- pkg/postgres/replication/suite_test.go | 7 +- .../persistentvolumeclaim/status.go | 2 +- pkg/utils/pod_conditions.go | 35 ---- pkg/utils/pod_conditions_test.go | 102 ----------- 14 files changed, 290 insertions(+), 270 deletions(-) create mode 100644 api/v1/base_funcs.go create mode 100644 api/v1/base_funcs_test.go delete mode 100644 api/v1/base_types_test.go diff --git a/api/v1/base_funcs.go b/api/v1/base_funcs.go new file mode 100644 index 0000000000..e0fdf25203 --- /dev/null +++ b/api/v1/base_funcs.go @@ -0,0 +1,74 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" +) + +// SecretKeySelectorToCore transforms a SecretKeySelector structure to the +// analogue one in the corev1 namespace +func SecretKeySelectorToCore(selector *SecretKeySelector) *corev1.SecretKeySelector { + if selector == nil { + return nil + } + + return &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: selector.LocalObjectReference.Name, + }, + Key: selector.Key, + } +} + +// ConfigMapKeySelectorToCore transforms a ConfigMapKeySelector structure to the analogue +// one in the corev1 namespace +func ConfigMapKeySelectorToCore(selector *ConfigMapKeySelector) *corev1.ConfigMapKeySelector { + if selector == nil { + return nil + } + + return &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: selector.Name, + }, + Key: selector.Key, + } +} + +// ListStatusPods return a list of active Pods +func ListStatusPods(podList []corev1.Pod) map[PodStatus][]string { + podsNames := make(map[PodStatus][]string) + + for _, pod := range podList { + if !pod.DeletionTimestamp.IsZero() { + continue + } + switch { + case utils.IsPodReady(pod): + podsNames[PodHealthy] = append(podsNames[PodHealthy], pod.Name) + case utils.IsPodActive(pod): + podsNames[PodReplicating] = append(podsNames[PodReplicating], pod.Name) + default: + podsNames[PodFailed] = append(podsNames[PodFailed], pod.Name) + } + } + + return podsNames +} diff --git a/api/v1/base_funcs_test.go b/api/v1/base_funcs_test.go new file mode 100644 index 0000000000..a6fa574401 --- /dev/null +++ b/api/v1/base_funcs_test.go @@ -0,0 +1,163 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Base type mappings for secrets", func() { + It("correctly map nil values", func() { + Expect(SecretKeySelectorToCore(nil)).To(BeNil()) + }) + + It("correctly map non-nil values", func() { + selector := SecretKeySelector{ + LocalObjectReference: LocalObjectReference{ + Name: "thisName", + }, + Key: "thisKey", + } + + Expect(selector.Name).To(Equal("thisName")) + Expect(selector.Key).To(Equal("thisKey")) + }) +}) + +var _ = Describe("Base type mappings for configmaps", func() { + It("correctly map nil values", func() { + Expect(ConfigMapKeySelectorToCore(nil)).To(BeNil()) + }) + + It("correctly map non-nil values", func() { + selector := ConfigMapKeySelector{ + LocalObjectReference: LocalObjectReference{ + Name: "thisName", + }, + Key: "thisKey", + } + + Expect(selector.Name).To(Equal("thisName")) + Expect(selector.Key).To(Equal("thisKey")) + }) +}) + +var _ = Describe("Properly builds ListStatusPods", func() { + healthyPod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "healthyPod", + }, + Status: corev1.PodStatus{ + Conditions: []corev1.PodCondition{ + { + Type: corev1.ContainersReady, + Status: corev1.ConditionTrue, + }, + }, + }, + } + activePod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "activePod", + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + Conditions: []corev1.PodCondition{ + { + Type: corev1.ContainersReady, + Status: corev1.ConditionFalse, + }, + }, + }, + } + failedPod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "failedPod", + }, + Status: corev1.PodStatus{ + Phase: corev1.PodFailed, + Conditions: []corev1.PodCondition{ + { + Type: corev1.ContainersReady, + Status: corev1.ConditionFalse, + }, + }, + }, + } + + now := metav1.Now() + terminatingPod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "terminatingPod", + DeletionTimestamp: &now, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + Conditions: []corev1.PodCondition{ + { + Type: corev1.ContainersReady, + Status: corev1.ConditionTrue, + }, + }, + }, + } + + It("Detects healthy pods", func() { + podList := []corev1.Pod{healthyPod, healthyPod} + expectedStatus := map[PodStatus][]string{ + PodHealthy: {"healthyPod", "healthyPod"}, + } + podStatus := ListStatusPods(podList) + Expect(podStatus).To(BeEquivalentTo(expectedStatus)) + }) + + It("Detects active pods", func() { + podList := []corev1.Pod{healthyPod, activePod} + expectedStatus := map[PodStatus][]string{ + PodHealthy: {"healthyPod"}, + PodReplicating: {"activePod"}, + } + podStatus := ListStatusPods(podList) + Expect(podStatus).To(BeEquivalentTo(expectedStatus)) + }) + + It("Detects failed pods", func() { + podList := []corev1.Pod{healthyPod, activePod, failedPod} + expectedStatus := map[PodStatus][]string{ + PodHealthy: {"healthyPod"}, + PodReplicating: {"activePod"}, + PodFailed: {"failedPod"}, + } + podStatus := ListStatusPods(podList) + Expect(podStatus).To(BeEquivalentTo(expectedStatus)) + }) + + It("Excludes terminating pods", func() { + podList := []corev1.Pod{healthyPod, activePod, failedPod, terminatingPod} + expectedStatus := map[PodStatus][]string{ + PodHealthy: {"healthyPod"}, + PodReplicating: {"activePod"}, + PodFailed: {"failedPod"}, + } + podStatus := ListStatusPods(podList) + Expect(podStatus).To(BeEquivalentTo(expectedStatus)) + }) +}) diff --git a/api/v1/base_types.go b/api/v1/base_types.go index 325845146b..6ae61a7ec2 100644 --- a/api/v1/base_types.go +++ b/api/v1/base_types.go @@ -17,35 +17,34 @@ limitations under the License. package v1 import ( - corev1 "k8s.io/api/core/v1" + machineryapi "github.com/cloudnative-pg/machinery/pkg/api" ) -// SecretKeySelectorToCore transforms a SecretKeySelector structure to the -// analogue one in the corev1 namespace -func SecretKeySelectorToCore(selector *SecretKeySelector) *corev1.SecretKeySelector { - if selector == nil { - return nil - } - - return &corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: selector.LocalObjectReference.Name, - }, - Key: selector.Key, - } -} - -// ConfigMapKeySelectorToCore transforms a ConfigMapKeySelector structure to the analogue -// one in the corev1 namespace -func ConfigMapKeySelectorToCore(selector *ConfigMapKeySelector) *corev1.ConfigMapKeySelector { - if selector == nil { - return nil - } - - return &corev1.ConfigMapKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: selector.Name, - }, - Key: selector.Key, - } -} +// PodStatus represent the possible status of pods +type PodStatus string + +const ( + // PodHealthy means that a Pod is active and ready + PodHealthy = "healthy" + + // PodReplicating means that a Pod is still not ready but still active + PodReplicating = "replicating" + + // PodFailed means that a Pod will not be scheduled again (deleted or evicted) + PodFailed = "failed" +) + +// LocalObjectReference contains enough information to let you locate a +// local object with a known type inside the same namespace +// +kubebuilder:object:generate:=false +type LocalObjectReference = machineryapi.LocalObjectReference + +// SecretKeySelector contains enough information to let you locate +// the key of a Secret +// +kubebuilder:object:generate:=false +type SecretKeySelector = machineryapi.SecretKeySelector + +// ConfigMapKeySelector contains enough information to let you locate +// the key of a ConfigMap +// +kubebuilder:object:generate:=false +type ConfigMapKeySelector = machineryapi.ConfigMapKeySelector diff --git a/api/v1/base_types_test.go b/api/v1/base_types_test.go deleted file mode 100644 index 706f6c7c1f..0000000000 --- a/api/v1/base_types_test.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -var _ = Describe("Base type mappings for secrets", func() { - It("correctly map nil values", func() { - Expect(SecretKeySelectorToCore(nil)).To(BeNil()) - }) - - It("correctly map non-nil values", func() { - selector := SecretKeySelector{ - LocalObjectReference: LocalObjectReference{ - Name: "thisName", - }, - Key: "thisKey", - } - - Expect(selector.Name).To(Equal("thisName")) - Expect(selector.Key).To(Equal("thisKey")) - }) -}) - -var _ = Describe("Base type mappings for configmaps", func() { - It("correctly map nil values", func() { - Expect(ConfigMapKeySelectorToCore(nil)).To(BeNil()) - }) - - It("correctly map non-nil values", func() { - selector := ConfigMapKeySelector{ - LocalObjectReference: LocalObjectReference{ - Name: "thisName", - }, - Key: "thisKey", - } - - Expect(selector.Name).To(Equal("thisName")) - Expect(selector.Key).To(Equal("thisKey")) - }) -}) diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go index 6f1d702621..9c6027ad57 100644 --- a/api/v1/cluster_types.go +++ b/api/v1/cluster_types.go @@ -25,7 +25,6 @@ import ( "strings" "time" - machineryapi "github.com/cloudnative-pg/machinery/pkg/api" "github.com/cloudnative-pg/machinery/pkg/log" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" corev1 "k8s.io/api/core/v1" @@ -41,21 +40,6 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" ) -// LocalObjectReference contains enough information to let you locate a -// local object with a known type inside the same namespace -// +kubebuilder:object:generate:=false -type LocalObjectReference = machineryapi.LocalObjectReference - -// SecretKeySelector contains enough information to let you locate -// the key of a Secret -// +kubebuilder:object:generate:=false -type SecretKeySelector = machineryapi.SecretKeySelector - -// ConfigMapKeySelector contains enough information to let you locate -// the key of a ConfigMap -// +kubebuilder:object:generate:=false -type ConfigMapKeySelector = machineryapi.ConfigMapKeySelector - const ( // PrimaryPodDisruptionBudgetSuffix is the suffix appended to the cluster name // to get the name of the PDB used for the cluster primary @@ -798,7 +782,7 @@ type ClusterStatus struct { // InstancesStatus indicates in which status the instances are // +optional - InstancesStatus map[utils.PodStatus][]string `json:"instancesStatus,omitempty"` + InstancesStatus map[PodStatus][]string `json:"instancesStatus,omitempty"` // The reported state of the instances during the last reconciliation loop // +optional diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index 8b0a114f63..14c71da945 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -22,7 +22,6 @@ package v1 import ( pkgapi "github.com/cloudnative-pg/barman-cloud/pkg/api" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/machinery/pkg/api" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" appsv1 "k8s.io/api/apps/v1" @@ -854,7 +853,7 @@ func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { *out = *in if in.InstancesStatus != nil { in, out := &in.InstancesStatus, &out.InstancesStatus - *out = make(map[utils.PodStatus][]string, len(*in)) + *out = make(map[PodStatus][]string, len(*in)) for key, val := range *in { var outVal []string if val == nil { diff --git a/internal/controller/replicas.go b/internal/controller/replicas.go index 399452d797..7e5a28bac8 100644 --- a/internal/controller/replicas.go +++ b/internal/controller/replicas.go @@ -186,7 +186,7 @@ func (r *ClusterReconciler) setPrimaryOnSchedulableNode( contextLogger := log.FromContext(ctx) // Checking failed pods, e.g. pending pods due to missing PVCs - _, hasFailedPods := cluster.Status.InstancesStatus[utils.PodFailed] + _, hasFailedPods := cluster.Status.InstancesStatus[apiv1.PodFailed] // Checking whether there are pods on other nodes podsOnOtherNodes := GetPodsNotOnPrimaryNode(status, primaryPod) diff --git a/pkg/postgres/replication/explicit_test.go b/pkg/postgres/replication/explicit_test.go index 2a3e13f9d5..80de4739d3 100644 --- a/pkg/postgres/replication/explicit_test.go +++ b/pkg/postgres/replication/explicit_test.go @@ -20,7 +20,6 @@ import ( "k8s.io/utils/ptr" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -38,8 +37,8 @@ var _ = Describe("synchronous replica configuration with the new API", func() { } cluster.Status = apiv1.ClusterStatus{ CurrentPrimary: "one", - InstancesStatus: map[utils.PodStatus][]string{ - utils.PodHealthy: {"one", "two", "three"}, + InstancesStatus: map[apiv1.PodStatus][]string{ + apiv1.PodHealthy: {"one", "two", "three"}, }, } @@ -57,8 +56,8 @@ var _ = Describe("synchronous replica configuration with the new API", func() { } cluster.Status = apiv1.ClusterStatus{ CurrentPrimary: "one", - InstancesStatus: map[utils.PodStatus][]string{ - utils.PodHealthy: {"one", "two", "three"}, + InstancesStatus: map[apiv1.PodStatus][]string{ + apiv1.PodHealthy: {"one", "two", "three"}, }, } @@ -76,8 +75,8 @@ var _ = Describe("synchronous replica configuration with the new API", func() { } cluster.Status = apiv1.ClusterStatus{ CurrentPrimary: "one", - InstancesStatus: map[utils.PodStatus][]string{ - utils.PodHealthy: {"one", "two", "three"}, + InstancesStatus: map[apiv1.PodStatus][]string{ + apiv1.PodHealthy: {"one", "two", "three"}, }, } @@ -95,8 +94,8 @@ var _ = Describe("synchronous replica configuration with the new API", func() { } cluster.Status = apiv1.ClusterStatus{ CurrentPrimary: "one", - InstancesStatus: map[utils.PodStatus][]string{ - utils.PodHealthy: {"one", "two", "three"}, + InstancesStatus: map[apiv1.PodStatus][]string{ + apiv1.PodHealthy: {"one", "two", "three"}, }, } diff --git a/pkg/postgres/replication/legacy.go b/pkg/postgres/replication/legacy.go index e422fa96cc..81c61d47e9 100644 --- a/pkg/postgres/replication/legacy.go +++ b/pkg/postgres/replication/legacy.go @@ -24,7 +24,6 @@ import ( "github.com/cloudnative-pg/machinery/pkg/log" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) // legacySynchronousStandbyNames sets the standby node list with the @@ -53,7 +52,7 @@ func getSyncReplicasData(cluster *apiv1.Cluster) (syncReplicas int, electableSyn // We start with the number of healthy replicas (healthy pods minus one) // and verify it is greater than 0 and between minSyncReplicas and maxSyncReplicas. // Formula: 1 <= minSyncReplicas <= SyncReplicas <= maxSyncReplicas < readyReplicas - readyReplicas := len(cluster.Status.InstancesStatus[utils.PodHealthy]) - 1 + readyReplicas := len(cluster.Status.InstancesStatus[apiv1.PodHealthy]) - 1 // If the number of ready replicas is negative, // there are no healthy Pods so no sync replica can be configured @@ -148,7 +147,7 @@ func getElectableSyncReplicas(cluster *apiv1.Cluster) []string { func getSortedNonPrimaryInstanceNames(cluster *apiv1.Cluster) []string { var nonPrimaryInstances []string - for _, instance := range cluster.Status.InstancesStatus[utils.PodHealthy] { + for _, instance := range cluster.Status.InstancesStatus[apiv1.PodHealthy] { if cluster.Status.CurrentPrimary != instance { nonPrimaryInstances = append(nonPrimaryInstances, instance) } diff --git a/pkg/postgres/replication/legacy_test.go b/pkg/postgres/replication/legacy_test.go index d83e280680..96467ab3e0 100644 --- a/pkg/postgres/replication/legacy_test.go +++ b/pkg/postgres/replication/legacy_test.go @@ -18,7 +18,6 @@ package replication import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -69,9 +68,9 @@ var _ = Describe("ensuring the correctness of synchronous replica data calculati cluster := createFakeCluster("exampleOnePod") cluster.Status = apiv1.ClusterStatus{ CurrentPrimary: "exampleOnePod-1", - InstancesStatus: map[utils.PodStatus][]string{ - utils.PodHealthy: {"exampleOnePod-1"}, - utils.PodFailed: {"exampleOnePod-2", "exampleOnePod-3"}, + InstancesStatus: map[apiv1.PodStatus][]string{ + apiv1.PodHealthy: {"exampleOnePod-1"}, + apiv1.PodFailed: {"exampleOnePod-2", "exampleOnePod-3"}, }, } number, names := getSyncReplicasData(cluster) @@ -85,8 +84,8 @@ var _ = Describe("ensuring the correctness of synchronous replica data calculati cluster := createFakeCluster("exampleNoPods") cluster.Status = apiv1.ClusterStatus{ CurrentPrimary: "example-1", - InstancesStatus: map[utils.PodStatus][]string{ - utils.PodFailed: {"exampleNoPods-1", "exampleNoPods-2", "exampleNoPods-3"}, + InstancesStatus: map[apiv1.PodStatus][]string{ + apiv1.PodFailed: {"exampleNoPods-1", "exampleNoPods-2", "exampleNoPods-3"}, }, } number, names := getSyncReplicasData(cluster) @@ -103,8 +102,8 @@ var _ = Describe("legacy synchronous_standby_names configuration", func() { cluster.Spec.MaxSyncReplicas = 2 cluster.Status = apiv1.ClusterStatus{ CurrentPrimary: "example-1", - InstancesStatus: map[utils.PodStatus][]string{ - utils.PodHealthy: {"one", "two", "three"}, + InstancesStatus: map[apiv1.PodStatus][]string{ + apiv1.PodHealthy: {"one", "two", "three"}, }, } synchronousStandbyNames := legacySynchronousStandbyNames(cluster) diff --git a/pkg/postgres/replication/suite_test.go b/pkg/postgres/replication/suite_test.go index d4750906f2..baad4001eb 100644 --- a/pkg/postgres/replication/suite_test.go +++ b/pkg/postgres/replication/suite_test.go @@ -21,7 +21,6 @@ import ( "testing" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -41,9 +40,9 @@ func createFakeCluster(name string) *apiv1.Cluster { cluster.Spec.MinSyncReplicas = 1 cluster.Status = apiv1.ClusterStatus{ CurrentPrimary: primaryPod, - InstancesStatus: map[utils.PodStatus][]string{ - utils.PodHealthy: {primaryPod, fmt.Sprintf("%s-2", name), fmt.Sprintf("%s-3", name)}, - utils.PodFailed: {}, + InstancesStatus: map[apiv1.PodStatus][]string{ + apiv1.PodHealthy: {primaryPod, fmt.Sprintf("%s-2", name), fmt.Sprintf("%s-3", name)}, + apiv1.PodFailed: {}, }, } return cluster diff --git a/pkg/reconciler/persistentvolumeclaim/status.go b/pkg/reconciler/persistentvolumeclaim/status.go index aa51e4c317..fbef1258a1 100644 --- a/pkg/reconciler/persistentvolumeclaim/status.go +++ b/pkg/reconciler/persistentvolumeclaim/status.go @@ -139,7 +139,7 @@ func EnrichStatus( filteredPods := utils.FilterActivePods(runningInstances) cluster.Status.ReadyInstances = utils.CountReadyPods(filteredPods) - cluster.Status.InstancesStatus = utils.ListStatusPods(runningInstances) + cluster.Status.InstancesStatus = apiv1.ListStatusPods(runningInstances) cluster.Status.PVCCount = int32(len(managedPVCs)) //nolint:gosec cluster.Status.InitializingPVC = result.getSorted(initializing) diff --git a/pkg/utils/pod_conditions.go b/pkg/utils/pod_conditions.go index caea69e2a8..f206761361 100644 --- a/pkg/utils/pod_conditions.go +++ b/pkg/utils/pod_conditions.go @@ -23,20 +23,6 @@ import ( var utilsLog = log.WithName("utils") -// PodStatus represent the possible status of pods -type PodStatus string - -const ( - // PodHealthy means that a Pod is active and ready - PodHealthy = "healthy" - - // PodReplicating means that a Pod is still not ready but still active - PodReplicating = "replicating" - - // PodFailed means that a Pod will not be scheduled again (deleted or evicted) - PodFailed = "failed" -) - // IsPodReady check if a Pod is ready or not func IsPodReady(pod corev1.Pod) bool { for _, c := range pod.Status.Conditions { @@ -117,24 +103,3 @@ func CountReadyPods(podList []corev1.Pod) int { } return readyPods } - -// ListStatusPods return a list of active Pods -func ListStatusPods(podList []corev1.Pod) map[PodStatus][]string { - podsNames := make(map[PodStatus][]string) - - for _, pod := range podList { - if !pod.DeletionTimestamp.IsZero() { - continue - } - switch { - case IsPodReady(pod): - podsNames[PodHealthy] = append(podsNames[PodHealthy], pod.Name) - case IsPodActive(pod): - podsNames[PodReplicating] = append(podsNames[PodReplicating], pod.Name) - default: - podsNames[PodFailed] = append(podsNames[PodFailed], pod.Name) - } - } - - return podsNames -} diff --git a/pkg/utils/pod_conditions_test.go b/pkg/utils/pod_conditions_test.go index c63c362242..987b6e77cb 100644 --- a/pkg/utils/pod_conditions_test.go +++ b/pkg/utils/pod_conditions_test.go @@ -162,106 +162,4 @@ var _ = Describe("Pod conditions test suite", func() { } Expect(IsPodUnschedulable(pod)).To(BeFalse()) }) - - Describe("Properly builds ListStatusPods", func() { - healthyPod := corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "healthyPod", - }, - Status: corev1.PodStatus{ - Conditions: []corev1.PodCondition{ - { - Type: corev1.ContainersReady, - Status: corev1.ConditionTrue, - }, - }, - }, - } - activePod := corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "activePod", - }, - Status: corev1.PodStatus{ - Phase: corev1.PodRunning, - Conditions: []corev1.PodCondition{ - { - Type: corev1.ContainersReady, - Status: corev1.ConditionFalse, - }, - }, - }, - } - failedPod := corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "failedPod", - }, - Status: corev1.PodStatus{ - Phase: corev1.PodFailed, - Conditions: []corev1.PodCondition{ - { - Type: corev1.ContainersReady, - Status: corev1.ConditionFalse, - }, - }, - }, - } - - now := metav1.Now() - terminatingPod := corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "terminatingPod", - DeletionTimestamp: &now, - }, - Status: corev1.PodStatus{ - Phase: corev1.PodRunning, - Conditions: []corev1.PodCondition{ - { - Type: corev1.ContainersReady, - Status: corev1.ConditionTrue, - }, - }, - }, - } - - It("Detects healthy pods", func() { - podList := []corev1.Pod{healthyPod, healthyPod} - expectedStatus := map[PodStatus][]string{ - PodHealthy: {"healthyPod", "healthyPod"}, - } - podStatus := ListStatusPods(podList) - Expect(podStatus).To(BeEquivalentTo(expectedStatus)) - }) - - It("Detects active pods", func() { - podList := []corev1.Pod{healthyPod, activePod} - expectedStatus := map[PodStatus][]string{ - PodHealthy: {"healthyPod"}, - PodReplicating: {"activePod"}, - } - podStatus := ListStatusPods(podList) - Expect(podStatus).To(BeEquivalentTo(expectedStatus)) - }) - - It("Detects failed pods", func() { - podList := []corev1.Pod{healthyPod, activePod, failedPod} - expectedStatus := map[PodStatus][]string{ - PodHealthy: {"healthyPod"}, - PodReplicating: {"activePod"}, - PodFailed: {"failedPod"}, - } - podStatus := ListStatusPods(podList) - Expect(podStatus).To(BeEquivalentTo(expectedStatus)) - }) - - It("Excludes terminating pods", func() { - podList := []corev1.Pod{healthyPod, activePod, failedPod, terminatingPod} - expectedStatus := map[PodStatus][]string{ - PodHealthy: {"healthyPod"}, - PodReplicating: {"activePod"}, - PodFailed: {"failedPod"}, - } - podStatus := ListStatusPods(podList) - Expect(podStatus).To(BeEquivalentTo(expectedStatus)) - }) - }) }) From c9dcf88b475d2168f8829ff01217c9dc399583d9 Mon Sep 17 00:00:00 2001 From: svenakela Date: Wed, 25 Sep 2024 10:18:40 +0200 Subject: [PATCH 002/836] docs: add Alpcot to `ADOPTERS.md` (#5622) Alpcot uses CloudNativePG for both public-facing and internal applications deployed in the cloud and in-house Kubernetes. Signed-off-by: svenakela --- ADOPTERS.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ADOPTERS.md b/ADOPTERS.md index a732916833..b874c42e83 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -46,4 +46,4 @@ This list is sorted in chronological order, based on the submission date. | [Microsoft Azure](https://azure.microsoft.com/en-us/) | @KenKilty | 2024-08-22 | Learn how to [deploy](https://learn.microsoft.com/azure/aks/postgresql-ha-overview) PostgreSQL on [Azure Kubernetes Services (AKS)](https://learn.microsoft.com/azure/aks/what-is-aks) with [EDB commercial support](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/enterprisedb-corp.edb-enterprise) and [EDB Postgres-as-a-Service](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/enterprisedb-corp.biganimal-prod-v1) offerings available in the [Azure Marketplace](https://azuremarketplace.microsoft.com/).| | [PZU Group](https://www.pzu.pl) | @MichaluxPL | 2024-08-26 | PZU is one of the largest financial institutions in Poland and also the largest insurance company in Central and Eastern Europe. CloudNativePG is used as on-premise cloud solution/DBaaS to provide highly available PostgreSQL clusters.| | [Telnyx](https://www.telnyx.com) | @aryklein | 2024-09-24 | Telnyx leverages PostgreSQL as its relational database for internal services, managing databases with high availability using CloudNativePG across multiple Kubernetes clusters in different sites, with distributed replica clusters to ensure data redundancy and resilience. | - +| [Alpcot](https://alpcot.se) | @svenakela | 2024-09-24 | Alpcot uses CloudNativePG for both public-facing and internal applications deployed in the cloud and in-house Kubernetes. | From 4395ebf078ce49cfbea4649443da5d6719022e92 Mon Sep 17 00:00:00 2001 From: Miguel Moncada Isla <48254102+Mmoncadaisla@users.noreply.github.com> Date: Wed, 25 Sep 2024 10:22:42 +0200 Subject: [PATCH 003/836] docs: Add Cambium to ADOPTERS.md (#5624) Signed-off-by: Miguel Moncada Isla <48254102+Mmoncadaisla@users.noreply.github.com> Signed-off-by: Gabriele Bartolini Co-authored-by: Gabriele Bartolini --- ADOPTERS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/ADOPTERS.md b/ADOPTERS.md index b874c42e83..69ab475777 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -47,3 +47,4 @@ This list is sorted in chronological order, based on the submission date. | [PZU Group](https://www.pzu.pl) | @MichaluxPL | 2024-08-26 | PZU is one of the largest financial institutions in Poland and also the largest insurance company in Central and Eastern Europe. CloudNativePG is used as on-premise cloud solution/DBaaS to provide highly available PostgreSQL clusters.| | [Telnyx](https://www.telnyx.com) | @aryklein | 2024-09-24 | Telnyx leverages PostgreSQL as its relational database for internal services, managing databases with high availability using CloudNativePG across multiple Kubernetes clusters in different sites, with distributed replica clusters to ensure data redundancy and resilience. | | [Alpcot](https://alpcot.se) | @svenakela | 2024-09-24 | Alpcot uses CloudNativePG for both public-facing and internal applications deployed in the cloud and in-house Kubernetes. | +| [Cambium](https://www.cambium.earth) | @Mmoncadaisla | 2024-09-25 | Cambium leverages CloudNativePG at its core to analyze and visualize geospatial data for carbon market applications, ranging from site selection to monitoring, reporting, and verification. | \ No newline at end of file From b2187b8e3eeb00424dd2d6ee9a153fec0371a0d2 Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Wed, 25 Sep 2024 10:35:33 +0200 Subject: [PATCH 004/836] docs: improve instructions for `ADOPTERS.md` (#5628) Signed-off-by: Gabriele Bartolini --- ADOPTERS.md | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/ADOPTERS.md b/ADOPTERS.md index 69ab475777..a5c6b48ee0 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -1,24 +1,29 @@ # Adopters -Below you can find a list of organizations and users who have agreed to -tell the world that they are using PostgreSQL in Kubernetes with our -CloudNativePG operator in a production environment. +Below is a list of organizations and users who have publicly shared that +they’re using PostgreSQL in Kubernetes with the CloudNativePG operator in a +production environment. -The goal of this list is to inspire others to do the same and to grow -this open source community and project. +The purpose of this list is to inspire others to join the movement and help +grow our open-source community and project. -Please add your organization to this list. It takes 5 minutes of your time, -but it means a lot to us. +Adding your organization takes just 5 minutes of your time, but it means a lot +to us! -## Updating this list +## How to Add Your Organization -To add your organization to this list, you can either: +You can add your organization to this list in two ways: -- [open a pull request](https://github.com/cloudnative-pg/cloudnative-pg/pulls) to directly update this file, or -- [edit this file](https://github.com/cloudnative-pg/cloudnative-pg/blob/main/ADOPTERS.md) directly in GitHub +- [Open a pull request](https://github.com/cloudnative-pg/cloudnative-pg/pulls) + to directly update this file. +- [Edit the file](https://github.com/cloudnative-pg/cloudnative-pg/blob/main/ADOPTERS.md) + directly on GitHub. -Feel free to ask in the Slack chat if you any questions and/or require -assistance with updating this list. +Use the commit title: **"docs: add to `ADOPTERS.md`"** and +be sure to [sign off your work](contribute/README.md#sign-your-work). + +If you need any assistance, feel free to ask in our Slack chat—we’re here to +help! ## CloudNativePG Adopters From 7925a3feb5fa88fcbd8538f532633dd80309a0b7 Mon Sep 17 00:00:00 2001 From: simonerocchi <32674385+simonerocchi@users.noreply.github.com> Date: Wed, 25 Sep 2024 11:00:12 +0200 Subject: [PATCH 005/836] docs: add Mind Informatica to `ADOPTERS.md` (#5626) Mind Informatica has adopted CloudNativePG to run PostgreSQL clusters for their web applications. Signed-off-by: simonerocchi <32674385+simonerocchi@users.noreply.github.com> Signed-off-by: Gabriele Bartolini Co-authored-by: Gabriele Bartolini Co-authored-by: Leonardo Cecchi --- ADOPTERS.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ADOPTERS.md b/ADOPTERS.md index a5c6b48ee0..d1ae6f7385 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -52,4 +52,5 @@ This list is sorted in chronological order, based on the submission date. | [PZU Group](https://www.pzu.pl) | @MichaluxPL | 2024-08-26 | PZU is one of the largest financial institutions in Poland and also the largest insurance company in Central and Eastern Europe. CloudNativePG is used as on-premise cloud solution/DBaaS to provide highly available PostgreSQL clusters.| | [Telnyx](https://www.telnyx.com) | @aryklein | 2024-09-24 | Telnyx leverages PostgreSQL as its relational database for internal services, managing databases with high availability using CloudNativePG across multiple Kubernetes clusters in different sites, with distributed replica clusters to ensure data redundancy and resilience. | | [Alpcot](https://alpcot.se) | @svenakela | 2024-09-24 | Alpcot uses CloudNativePG for both public-facing and internal applications deployed in the cloud and in-house Kubernetes. | -| [Cambium](https://www.cambium.earth) | @Mmoncadaisla | 2024-09-25 | Cambium leverages CloudNativePG at its core to analyze and visualize geospatial data for carbon market applications, ranging from site selection to monitoring, reporting, and verification. | \ No newline at end of file +| [Cambium](https://www.cambium.earth) | @Mmoncadaisla | 2024-09-25 | Cambium leverages CloudNativePG at its core to analyze and visualize geospatial data for carbon market applications, ranging from site selection to monitoring, reporting, and verification. | +| [MIND Informatica srl](https://mind-informatica.com) | @simonerocchi | 2024-09-25 | We use CloudNativePG to run PostgreSQL clusters for our web applications. | From d82d091be5a84323cdf46ff5afd0bde2de104525 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Wed, 25 Sep 2024 16:31:05 +0200 Subject: [PATCH 006/836] docs: fix latest minor snapshot installation instructions (#5631) Signed-off-by: Marco Nenciarini --- docs/src/installation_upgrade.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/installation_upgrade.md b/docs/src/installation_upgrade.md index f6a7e1b14b..c21dbb09db 100644 --- a/docs/src/installation_upgrade.md +++ b/docs/src/installation_upgrade.md @@ -72,7 +72,7 @@ specific minor release, you can just run: ```sh curl -sSfL \ - https://raw.githubusercontent.com/cloudnative-pg/artifacts/main/manifests/operator-manifest.yaml | \ + https://raw.githubusercontent.com/cloudnative-pg/artifacts/release-1.24/manifests/operator-manifest.yaml | \ kubectl apply --server-side -f - ``` From 9b7151982d4fe76fea2378d04be33d6872a9947b Mon Sep 17 00:00:00 2001 From: Jeremy Schneider Date: Wed, 25 Sep 2024 09:32:54 -0700 Subject: [PATCH 007/836] docs: add GEICO Tech to `ADOPTERS.md` (#5623) Add GEICO Tech as an adopter of CloudNativePG. Signed-off-by: Jeremy Schneider --- ADOPTERS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/ADOPTERS.md b/ADOPTERS.md index d1ae6f7385..38662aa076 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -52,5 +52,6 @@ This list is sorted in chronological order, based on the submission date. | [PZU Group](https://www.pzu.pl) | @MichaluxPL | 2024-08-26 | PZU is one of the largest financial institutions in Poland and also the largest insurance company in Central and Eastern Europe. CloudNativePG is used as on-premise cloud solution/DBaaS to provide highly available PostgreSQL clusters.| | [Telnyx](https://www.telnyx.com) | @aryklein | 2024-09-24 | Telnyx leverages PostgreSQL as its relational database for internal services, managing databases with high availability using CloudNativePG across multiple Kubernetes clusters in different sites, with distributed replica clusters to ensure data redundancy and resilience. | | [Alpcot](https://alpcot.se) | @svenakela | 2024-09-24 | Alpcot uses CloudNativePG for both public-facing and internal applications deployed in the cloud and in-house Kubernetes. | +| [GEICO Tech](https://www.geico.com/tech/) | @ardentperf | 2024-09-24 | GEICO Tech is building the most consumer-centric insurance offerings in America. CloudNativePG is used to provide a highly available Kubernetes-based Postgres service, both in the cloud and on-premises. | | [Cambium](https://www.cambium.earth) | @Mmoncadaisla | 2024-09-25 | Cambium leverages CloudNativePG at its core to analyze and visualize geospatial data for carbon market applications, ranging from site selection to monitoring, reporting, and verification. | | [MIND Informatica srl](https://mind-informatica.com) | @simonerocchi | 2024-09-25 | We use CloudNativePG to run PostgreSQL clusters for our web applications. | From fb244cbbf92c9d2a645cb76844c8b9cf0cd9c56f Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Thu, 26 Sep 2024 11:08:27 +0200 Subject: [PATCH 008/836] chore: remove image catalog interface from API (#5634) Signed-off-by: Leonardo Cecchi --- api/v1/clusterimagecatalog_types.go | 5 ----- ...ricimagecatalog_types.go => genericimagecatalog_iface.go} | 2 -- api/v1/imagecatalog_types.go | 5 ----- 3 files changed, 12 deletions(-) rename api/v1/{genericimagecatalog_types.go => genericimagecatalog_iface.go} (79%) diff --git a/api/v1/clusterimagecatalog_types.go b/api/v1/clusterimagecatalog_types.go index 4915a615a1..6562c19890 100644 --- a/api/v1/clusterimagecatalog_types.go +++ b/api/v1/clusterimagecatalog_types.go @@ -45,11 +45,6 @@ type ClusterImageCatalogList struct { Items []ClusterImageCatalog `json:"items"` } -// GetObjectMeta returns the ObjectMeta of the ClusterImageCatalog -func (c *ClusterImageCatalog) GetObjectMeta() *metav1.ObjectMeta { - return &c.ObjectMeta -} - // GetSpec returns the Spec of the ClusterImageCatalog func (c *ClusterImageCatalog) GetSpec() *ImageCatalogSpec { return &c.Spec diff --git a/api/v1/genericimagecatalog_types.go b/api/v1/genericimagecatalog_iface.go similarity index 79% rename from api/v1/genericimagecatalog_types.go rename to api/v1/genericimagecatalog_iface.go index 11929b35bf..635fff02fa 100644 --- a/api/v1/genericimagecatalog_types.go +++ b/api/v1/genericimagecatalog_iface.go @@ -12,8 +12,6 @@ type GenericImageCatalog interface { runtime.Object metav1.Object - // GetObjectMeta returns the ObjectMeta of the GenericImageCatalog - GetObjectMeta() *metav1.ObjectMeta // GetSpec returns the Spec of the GenericImageCatalog GetSpec() *ImageCatalogSpec } diff --git a/api/v1/imagecatalog_types.go b/api/v1/imagecatalog_types.go index c8c386ff22..6f7a9d4d50 100644 --- a/api/v1/imagecatalog_types.go +++ b/api/v1/imagecatalog_types.go @@ -64,11 +64,6 @@ type ImageCatalogList struct { Items []ImageCatalog `json:"items"` } -// GetObjectMeta returns the ObjectMeta of the ImageCatalog -func (c *ImageCatalog) GetObjectMeta() *metav1.ObjectMeta { - return &c.ObjectMeta -} - // GetSpec returns the Spec of the ImageCatalog func (c *ImageCatalog) GetSpec() *ImageCatalogSpec { return &c.Spec From 2d8ad34a289e797806a15237d01acd3598963374 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niccol=C3=B2=20Fei?= Date: Thu, 26 Sep 2024 11:38:37 +0200 Subject: [PATCH 009/836] feat(database): add support for database creation from templates (#5565) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes #5521 Signed-off-by: Niccolò Fei Signed-off-by: Gabriele Quaresima Co-authored-by: Gabriele Quaresima --- api/v1/database_types.go | 5 +++++ config/crd/bases/postgresql.cnpg.io_databases.yaml | 7 +++++++ docs/src/cloudnative-pg.v1.md | 7 +++++++ internal/management/controller/database_controller_sql.go | 3 +++ .../management/controller/database_controller_sql_test.go | 8 +++++--- 5 files changed, 27 insertions(+), 3 deletions(-) diff --git a/api/v1/database_types.go b/api/v1/database_types.go index f185a5a635..8cb52ad810 100644 --- a/api/v1/database_types.go +++ b/api/v1/database_types.go @@ -47,6 +47,11 @@ type DatabaseSpec struct { // The owner Owner string `json:"owner"` + // The name of the template from which to create the new database + // +optional + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="template is immutable" + Template string `json:"template,omitempty"` + // The encoding (cannot be changed) // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="encoding is immutable" // +optional diff --git a/config/crd/bases/postgresql.cnpg.io_databases.yaml b/config/crd/bases/postgresql.cnpg.io_databases.yaml index 9a09276061..b348c25dd9 100644 --- a/config/crd/bases/postgresql.cnpg.io_databases.yaml +++ b/config/crd/bases/postgresql.cnpg.io_databases.yaml @@ -108,6 +108,13 @@ spec: tablespace: description: The default tablespace of this database type: string + template: + description: The name of the template from which to create the new + database + type: string + x-kubernetes-validations: + - message: template is immutable + rule: self == oldSelf required: - cluster - name diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md index 72aa2d1b8f..868b609256 100644 --- a/docs/src/cloudnative-pg.v1.md +++ b/docs/src/cloudnative-pg.v1.md @@ -2300,6 +2300,13 @@ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-

The owner

+template
+string + + +

The name of the template from which to create the new database

+ + encoding
string diff --git a/internal/management/controller/database_controller_sql.go b/internal/management/controller/database_controller_sql.go index b8875e8a73..1cf32d83a0 100644 --- a/internal/management/controller/database_controller_sql.go +++ b/internal/management/controller/database_controller_sql.go @@ -60,6 +60,9 @@ func createDatabase( if len(obj.Spec.Owner) > 0 { sqlCreateDatabase += fmt.Sprintf(" OWNER %s", pgx.Identifier{obj.Spec.Owner}.Sanitize()) } + if len(obj.Spec.Template) > 0 { + sqlCreateDatabase += fmt.Sprintf(" TEMPLATE %s", pgx.Identifier{obj.Spec.Template}.Sanitize()) + } if len(obj.Spec.Tablespace) > 0 { sqlCreateDatabase += fmt.Sprintf(" TABLESPACE %s", pgx.Identifier{obj.Spec.Tablespace}.Sanitize()) } diff --git a/internal/management/controller/database_controller_sql_test.go b/internal/management/controller/database_controller_sql_test.go index 3cfdf123a7..444267b36e 100644 --- a/internal/management/controller/database_controller_sql_test.go +++ b/internal/management/controller/database_controller_sql_test.go @@ -13,6 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + package controller import ( @@ -88,17 +89,18 @@ var _ = Describe("Managed Database SQL", func() { Context("createDatabase", func() { It("should create a new Database", func(ctx SpecContext) { database.Spec.IsTemplate = ptr.To(true) + database.Spec.Template = "myTemplate" database.Spec.Tablespace = "myTablespace" database.Spec.AllowConnections = ptr.To(true) database.Spec.ConnectionLimit = ptr.To(-1) expectedValue := sqlmock.NewResult(0, 1) expectedQuery := fmt.Sprintf( - "CREATE DATABASE %s OWNER %s TABLESPACE %s "+ + "CREATE DATABASE %s OWNER %s TEMPLATE %s TABLESPACE %s "+ "ALLOW_CONNECTIONS %t CONNECTION LIMIT %d IS_TEMPLATE %t", pgx.Identifier{database.Spec.Name}.Sanitize(), pgx.Identifier{database.Spec.Owner}.Sanitize(), - pgx.Identifier{database.Spec.Tablespace}.Sanitize(), *database.Spec.AllowConnections, - *database.Spec.ConnectionLimit, *database.Spec.IsTemplate, + pgx.Identifier{database.Spec.Template}.Sanitize(), pgx.Identifier{database.Spec.Tablespace}.Sanitize(), + *database.Spec.AllowConnections, *database.Spec.ConnectionLimit, *database.Spec.IsTemplate, ) dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedValue) From e12d51ece45ec260a73233016a763e258472bfcf Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Thu, 26 Sep 2024 12:09:11 +0200 Subject: [PATCH 010/836] chore(refactor): isolate API types (#5595) Signed-off-by: Leonardo Cecchi --- api/v1/backup_funcs.go | 236 +++ ...kup_types_test.go => backup_funcs_test.go} | 0 api/v1/backup_types.go | 215 --- api/v1/cluster_conditions.go | 51 + api/v1/cluster_funcs.go | 1409 ++++++++++++++++ ...er_types_test.go => cluster_funcs_test.go} | 0 api/v1/cluster_types.go | 1416 ----------------- api/v1/clusterimagecatalog_funcs.go | 22 + api/v1/clusterimagecatalog_types.go | 5 - api/v1/imagecatalog_funcs.go | 5 + api/v1/imagecatalog_types.go | 5 - api/v1/pooler_funcs.go | 57 + ...ler_types_test.go => pooler_funcs_test.go} | 0 api/v1/pooler_types.go | 40 - api/v1/scheduledbackup_funcs.go | 82 + ..._test.go => scheduledbackup_funcs_test.go} | 0 api/v1/scheduledbackup_types.go | 61 - 17 files changed, 1862 insertions(+), 1742 deletions(-) create mode 100644 api/v1/backup_funcs.go rename api/v1/{backup_types_test.go => backup_funcs_test.go} (100%) create mode 100644 api/v1/cluster_conditions.go create mode 100644 api/v1/cluster_funcs.go rename api/v1/{cluster_types_test.go => cluster_funcs_test.go} (100%) create mode 100644 api/v1/clusterimagecatalog_funcs.go create mode 100644 api/v1/pooler_funcs.go rename api/v1/{pooler_types_test.go => pooler_funcs_test.go} (100%) create mode 100644 api/v1/scheduledbackup_funcs.go rename api/v1/{scheduledbackup_types_test.go => scheduledbackup_funcs_test.go} (100%) diff --git a/api/v1/backup_funcs.go b/api/v1/backup_funcs.go new file mode 100644 index 0000000000..9c56e8503d --- /dev/null +++ b/api/v1/backup_funcs.go @@ -0,0 +1,236 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "sort" + "strings" + + volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" +) + +// SetAsFailed marks a certain backup as invalid +func (backupStatus *BackupStatus) SetAsFailed( + err error, +) { + backupStatus.Phase = BackupPhaseFailed + + if err != nil { + backupStatus.Error = err.Error() + } else { + backupStatus.Error = "" + } +} + +// SetAsFinalizing marks a certain backup as finalizing +func (backupStatus *BackupStatus) SetAsFinalizing() { + backupStatus.Phase = BackupPhaseFinalizing + backupStatus.Error = "" +} + +// SetAsCompleted marks a certain backup as completed +func (backupStatus *BackupStatus) SetAsCompleted() { + backupStatus.Phase = BackupPhaseCompleted + backupStatus.Error = "" + backupStatus.StoppedAt = ptr.To(metav1.Now()) +} + +// SetAsStarted marks a certain backup as started +func (backupStatus *BackupStatus) SetAsStarted(podName, containerID string, method BackupMethod) { + backupStatus.Phase = BackupPhaseStarted + backupStatus.InstanceID = &InstanceID{ + PodName: podName, + ContainerID: containerID, + } + backupStatus.Method = method +} + +// SetSnapshotElements sets the Snapshots field from a list of VolumeSnapshot +func (snapshotStatus *BackupSnapshotStatus) SetSnapshotElements(snapshots []volumesnapshot.VolumeSnapshot) { + snapshotNames := make([]BackupSnapshotElementStatus, len(snapshots)) + for idx, volumeSnapshot := range snapshots { + snapshotNames[idx] = BackupSnapshotElementStatus{ + Name: volumeSnapshot.Name, + Type: volumeSnapshot.Annotations[utils.PvcRoleLabelName], + TablespaceName: volumeSnapshot.Labels[utils.TablespaceNameLabelName], + } + } + snapshotStatus.Elements = snapshotNames +} + +// IsDone check if a backup is completed or still in progress +func (backupStatus *BackupStatus) IsDone() bool { + return backupStatus.Phase == BackupPhaseCompleted || backupStatus.Phase == BackupPhaseFailed +} + +// GetOnline tells whether this backup was taken while the database +// was up +func (backupStatus *BackupStatus) GetOnline() bool { + if backupStatus.Online == nil { + return false + } + + return *backupStatus.Online +} + +// IsCompletedVolumeSnapshot checks if a backup is completed using the volume snapshot method. +// It returns true if the backup's method is BackupMethodVolumeSnapshot and its status phase is BackupPhaseCompleted. +// Otherwise, it returns false. +func (backup *Backup) IsCompletedVolumeSnapshot() bool { + return backup != nil && + backup.Spec.Method == BackupMethodVolumeSnapshot && + backup.Status.Phase == BackupPhaseCompleted +} + +// IsInProgress check if a certain backup is in progress or not +func (backupStatus *BackupStatus) IsInProgress() bool { + return backupStatus.Phase == BackupPhasePending || + backupStatus.Phase == BackupPhaseStarted || + backupStatus.Phase == BackupPhaseRunning +} + +// GetPendingBackupNames returns the pending backup list +func (list BackupList) GetPendingBackupNames() []string { + // Retry the backup if another backup is running + pendingBackups := make([]string, 0, len(list.Items)) + for _, concurrentBackup := range list.Items { + if concurrentBackup.Status.IsDone() { + continue + } + if !concurrentBackup.Status.IsInProgress() { + pendingBackups = append(pendingBackups, concurrentBackup.Name) + } + } + + return pendingBackups +} + +// CanExecuteBackup control if we can start a reconciliation loop for a certain backup. +// +// A reconciliation loop can start if: +// - there's no backup running, and if the first of the sorted list of backups +// - the current backup is running and is the first running backup of the list +// +// As a side effect, this function will sort the backup list +func (list *BackupList) CanExecuteBackup(backupName string) bool { + var foundRunningBackup bool + + list.SortByName() + + for _, concurrentBackup := range list.Items { + if concurrentBackup.Status.IsInProgress() { + if backupName == concurrentBackup.Name && !foundRunningBackup { + return true + } + + foundRunningBackup = true + if backupName != concurrentBackup.Name { + return false + } + } + } + + pendingBackups := list.GetPendingBackupNames() + if len(pendingBackups) > 0 && pendingBackups[0] != backupName { + return false + } + + return true +} + +// SortByName sorts the backup items in alphabetical order +func (list *BackupList) SortByName() { + // Sort the list of backups in alphabetical order + sort.Slice(list.Items, func(i, j int) bool { + return strings.Compare(list.Items[i].Name, list.Items[j].Name) <= 0 + }) +} + +// SortByReverseCreationTime sorts the backup items in reverse creation time (starting from the latest one) +func (list *BackupList) SortByReverseCreationTime() { + // Sort the list of backups in reverse creation time + sort.Slice(list.Items, func(i, j int) bool { + return list.Items[i].CreationTimestamp.Time.Compare(list.Items[j].CreationTimestamp.Time) > 0 + }) +} + +// GetStatus gets the backup status +func (backup *Backup) GetStatus() *BackupStatus { + return &backup.Status +} + +// GetMetadata get the metadata +func (backup *Backup) GetMetadata() *metav1.ObjectMeta { + return &backup.ObjectMeta +} + +// GetName get the backup name +func (backup *Backup) GetName() string { + return backup.Name +} + +// GetNamespace get the backup namespace +func (backup *Backup) GetNamespace() string { + return backup.Namespace +} + +// GetAssignedInstance fetches the instance that was assigned to the backup execution +func (backup *Backup) GetAssignedInstance(ctx context.Context, cli client.Client) (*corev1.Pod, error) { + if backup.Status.InstanceID == nil || len(backup.Status.InstanceID.PodName) == 0 { + return nil, nil + } + + var previouslyElectedPod corev1.Pod + if err := cli.Get( + ctx, + client.ObjectKey{Namespace: backup.Namespace, Name: backup.Status.InstanceID.PodName}, + &previouslyElectedPod, + ); err != nil { + return nil, err + } + + return &previouslyElectedPod, nil +} + +// GetVolumeSnapshotConfiguration overrides the configuration value with the ones specified +// in the backup, if present. +func (backup *Backup) GetVolumeSnapshotConfiguration( + clusterConfig VolumeSnapshotConfiguration, +) VolumeSnapshotConfiguration { + config := clusterConfig + if backup.Spec.Online != nil { + config.Online = backup.Spec.Online + } + + if backup.Spec.OnlineConfiguration != nil { + config.OnlineConfiguration = *backup.Spec.OnlineConfiguration + } + + return config +} + +// IsEmpty checks if the plugin configuration is empty or not +func (configuration *BackupPluginConfiguration) IsEmpty() bool { + return configuration == nil || len(configuration.Name) == 0 +} diff --git a/api/v1/backup_types_test.go b/api/v1/backup_funcs_test.go similarity index 100% rename from api/v1/backup_types_test.go rename to api/v1/backup_funcs_test.go diff --git a/api/v1/backup_types.go b/api/v1/backup_types.go index 2701b47502..739e9a731b 100644 --- a/api/v1/backup_types.go +++ b/api/v1/backup_types.go @@ -17,18 +17,8 @@ limitations under the License. package v1 import ( - "context" - "sort" - "strings" - barmanApi "github.com/cloudnative-pg/barman-cloud/pkg/api" - volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/ptr" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) // BackupPhase is the phase of the backup @@ -345,211 +335,6 @@ type BackupList struct { Items []Backup `json:"items"` } -// SetAsFailed marks a certain backup as invalid -func (backupStatus *BackupStatus) SetAsFailed( - err error, -) { - backupStatus.Phase = BackupPhaseFailed - - if err != nil { - backupStatus.Error = err.Error() - } else { - backupStatus.Error = "" - } -} - -// SetAsFinalizing marks a certain backup as finalizing -func (backupStatus *BackupStatus) SetAsFinalizing() { - backupStatus.Phase = BackupPhaseFinalizing - backupStatus.Error = "" -} - -// SetAsCompleted marks a certain backup as completed -func (backupStatus *BackupStatus) SetAsCompleted() { - backupStatus.Phase = BackupPhaseCompleted - backupStatus.Error = "" - backupStatus.StoppedAt = ptr.To(metav1.Now()) -} - -// SetAsStarted marks a certain backup as started -func (backupStatus *BackupStatus) SetAsStarted(podName, containerID string, method BackupMethod) { - backupStatus.Phase = BackupPhaseStarted - backupStatus.InstanceID = &InstanceID{ - PodName: podName, - ContainerID: containerID, - } - backupStatus.Method = method -} - -// SetSnapshotElements sets the Snapshots field from a list of VolumeSnapshot -func (snapshotStatus *BackupSnapshotStatus) SetSnapshotElements(snapshots []volumesnapshot.VolumeSnapshot) { - snapshotNames := make([]BackupSnapshotElementStatus, len(snapshots)) - for idx, volumeSnapshot := range snapshots { - snapshotNames[idx] = BackupSnapshotElementStatus{ - Name: volumeSnapshot.Name, - Type: volumeSnapshot.Annotations[utils.PvcRoleLabelName], - TablespaceName: volumeSnapshot.Labels[utils.TablespaceNameLabelName], - } - } - snapshotStatus.Elements = snapshotNames -} - -// IsDone check if a backup is completed or still in progress -func (backupStatus *BackupStatus) IsDone() bool { - return backupStatus.Phase == BackupPhaseCompleted || backupStatus.Phase == BackupPhaseFailed -} - -// GetOnline tells whether this backup was taken while the database -// was up -func (backupStatus *BackupStatus) GetOnline() bool { - if backupStatus.Online == nil { - return false - } - - return *backupStatus.Online -} - -// IsCompletedVolumeSnapshot checks if a backup is completed using the volume snapshot method. -// It returns true if the backup's method is BackupMethodVolumeSnapshot and its status phase is BackupPhaseCompleted. -// Otherwise, it returns false. -func (backup *Backup) IsCompletedVolumeSnapshot() bool { - return backup != nil && - backup.Spec.Method == BackupMethodVolumeSnapshot && - backup.Status.Phase == BackupPhaseCompleted -} - -// IsInProgress check if a certain backup is in progress or not -func (backupStatus *BackupStatus) IsInProgress() bool { - return backupStatus.Phase == BackupPhasePending || - backupStatus.Phase == BackupPhaseStarted || - backupStatus.Phase == BackupPhaseRunning -} - -// GetPendingBackupNames returns the pending backup list -func (list BackupList) GetPendingBackupNames() []string { - // Retry the backup if another backup is running - pendingBackups := make([]string, 0, len(list.Items)) - for _, concurrentBackup := range list.Items { - if concurrentBackup.Status.IsDone() { - continue - } - if !concurrentBackup.Status.IsInProgress() { - pendingBackups = append(pendingBackups, concurrentBackup.Name) - } - } - - return pendingBackups -} - -// CanExecuteBackup control if we can start a reconciliation loop for a certain backup. -// -// A reconciliation loop can start if: -// - there's no backup running, and if the first of the sorted list of backups -// - the current backup is running and is the first running backup of the list -// -// As a side effect, this function will sort the backup list -func (list *BackupList) CanExecuteBackup(backupName string) bool { - var foundRunningBackup bool - - list.SortByName() - - for _, concurrentBackup := range list.Items { - if concurrentBackup.Status.IsInProgress() { - if backupName == concurrentBackup.Name && !foundRunningBackup { - return true - } - - foundRunningBackup = true - if backupName != concurrentBackup.Name { - return false - } - } - } - - pendingBackups := list.GetPendingBackupNames() - if len(pendingBackups) > 0 && pendingBackups[0] != backupName { - return false - } - - return true -} - -// SortByName sorts the backup items in alphabetical order -func (list *BackupList) SortByName() { - // Sort the list of backups in alphabetical order - sort.Slice(list.Items, func(i, j int) bool { - return strings.Compare(list.Items[i].Name, list.Items[j].Name) <= 0 - }) -} - -// SortByReverseCreationTime sorts the backup items in reverse creation time (starting from the latest one) -func (list *BackupList) SortByReverseCreationTime() { - // Sort the list of backups in reverse creation time - sort.Slice(list.Items, func(i, j int) bool { - return list.Items[i].CreationTimestamp.Time.Compare(list.Items[j].CreationTimestamp.Time) > 0 - }) -} - -// GetStatus gets the backup status -func (backup *Backup) GetStatus() *BackupStatus { - return &backup.Status -} - -// GetMetadata get the metadata -func (backup *Backup) GetMetadata() *metav1.ObjectMeta { - return &backup.ObjectMeta -} - -// GetName get the backup name -func (backup *Backup) GetName() string { - return backup.Name -} - -// GetNamespace get the backup namespace -func (backup *Backup) GetNamespace() string { - return backup.Namespace -} - -// GetAssignedInstance fetches the instance that was assigned to the backup execution -func (backup *Backup) GetAssignedInstance(ctx context.Context, cli client.Client) (*corev1.Pod, error) { - if backup.Status.InstanceID == nil || len(backup.Status.InstanceID.PodName) == 0 { - return nil, nil - } - - var previouslyElectedPod corev1.Pod - if err := cli.Get( - ctx, - client.ObjectKey{Namespace: backup.Namespace, Name: backup.Status.InstanceID.PodName}, - &previouslyElectedPod, - ); err != nil { - return nil, err - } - - return &previouslyElectedPod, nil -} - -// GetVolumeSnapshotConfiguration overrides the configuration value with the ones specified -// in the backup, if present. -func (backup *Backup) GetVolumeSnapshotConfiguration( - clusterConfig VolumeSnapshotConfiguration, -) VolumeSnapshotConfiguration { - config := clusterConfig - if backup.Spec.Online != nil { - config.Online = backup.Spec.Online - } - - if backup.Spec.OnlineConfiguration != nil { - config.OnlineConfiguration = *backup.Spec.OnlineConfiguration - } - - return config -} - -// IsEmpty checks if the plugin configuration is empty or not -func (configuration *BackupPluginConfiguration) IsEmpty() bool { - return configuration == nil || len(configuration.Name) == 0 -} - func init() { SchemeBuilder.Register(&Backup{}, &BackupList{}) } diff --git a/api/v1/cluster_conditions.go b/api/v1/cluster_conditions.go new file mode 100644 index 0000000000..9d1e83947a --- /dev/null +++ b/api/v1/cluster_conditions.go @@ -0,0 +1,51 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// A Condition that can be used to communicate the Backup progress +var ( + // BackupSucceededCondition is added to a backup + // when it was completed correctly + BackupSucceededCondition = &metav1.Condition{ + Type: string(ConditionBackup), + Status: metav1.ConditionTrue, + Reason: string(ConditionReasonLastBackupSucceeded), + Message: "Backup was successful", + } + + // BackupStartingCondition is added to a backup + // when it started + BackupStartingCondition = &metav1.Condition{ + Type: string(ConditionBackup), + Status: metav1.ConditionFalse, + Reason: string(ConditionBackupStarted), + Message: "New Backup starting up", + } + + // BuildClusterBackupFailedCondition builds + // ConditionReasonLastBackupFailed condition + BuildClusterBackupFailedCondition = func(err error) *metav1.Condition { + return &metav1.Condition{ + Type: string(ConditionBackup), + Status: metav1.ConditionFalse, + Reason: string(ConditionReasonLastBackupFailed), + Message: err.Error(), + } + } +) diff --git a/api/v1/cluster_funcs.go b/api/v1/cluster_funcs.go new file mode 100644 index 0000000000..211ec3bc7f --- /dev/null +++ b/api/v1/cluster_funcs.go @@ -0,0 +1,1409 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "fmt" + "regexp" + "slices" + "strconv" + "strings" + "time" + + "github.com/cloudnative-pg/machinery/pkg/log" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" + "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" + "github.com/cloudnative-pg/cloudnative-pg/pkg/stringset" + "github.com/cloudnative-pg/cloudnative-pg/pkg/system" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" +) + +// GetOnline tells whether this volume snapshot configuration allows +// online backups +func (configuration *VolumeSnapshotConfiguration) GetOnline() bool { + if configuration.Online == nil { + return true + } + + return *configuration.Online +} + +// GetWaitForArchive tells whether to wait for archive or not +func (o OnlineConfiguration) GetWaitForArchive() bool { + if o.WaitForArchive == nil { + return true + } + + return *o.WaitForArchive +} + +// GetImmediateCheckpoint tells whether to execute an immediate checkpoint +func (o OnlineConfiguration) GetImmediateCheckpoint() bool { + if o.ImmediateCheckpoint == nil { + return false + } + + return *o.ImmediateCheckpoint +} + +// GetEnabledPluginNames gets the name of the plugins that are involved +// in the reconciliation of this cluster +func (pluginList PluginConfigurationList) GetEnabledPluginNames() (result []string) { + pluginNames := make([]string, 0, len(pluginList)) + for _, pluginDeclaration := range pluginList { + if pluginDeclaration.IsEnabled() { + pluginNames = append(pluginNames, pluginDeclaration.Name) + } + } + return pluginNames +} + +// GetShmLimit gets the `/dev/shm` memory size limit +func (e *EphemeralVolumesSizeLimitConfiguration) GetShmLimit() *resource.Quantity { + if e == nil { + return nil + } + + return e.Shm +} + +// GetTemporaryDataLimit gets the temporary storage size limit +func (e *EphemeralVolumesSizeLimitConfiguration) GetTemporaryDataLimit() *resource.Quantity { + if e == nil { + return nil + } + + return e.TemporaryData +} + +// MergeMetadata adds the passed custom annotations and labels in the service account. +func (st *ServiceAccountTemplate) MergeMetadata(sa *corev1.ServiceAccount) { + if st == nil { + return + } + if sa.Labels == nil { + sa.Labels = map[string]string{} + } + if sa.Annotations == nil { + sa.Annotations = map[string]string{} + } + + utils.MergeMap(sa.Labels, st.Metadata.Labels) + utils.MergeMap(sa.Annotations, st.Metadata.Annotations) +} + +// MatchesTopology checks if the two topologies have +// the same label values (labels are specified in SyncReplicaElectionConstraints.NodeLabelsAntiAffinity) +func (topologyLabels PodTopologyLabels) MatchesTopology(instanceTopology PodTopologyLabels) bool { + log.Debug("matching topology", "main", topologyLabels, "second", instanceTopology) + for mainLabelName, mainLabelValue := range topologyLabels { + if mainLabelValue != instanceTopology[mainLabelName] { + return false + } + } + return true +} + +// GetAvailableArchitecture returns an AvailableArchitecture given it's name. It returns nil if it's not found. +func (status *ClusterStatus) GetAvailableArchitecture(archName string) *AvailableArchitecture { + for _, architecture := range status.AvailableArchitectures { + if architecture.GoArch == archName { + return &architecture + } + } + return nil +} + +// DeepCopyInto needs to be manually added for the controller-gen compiler to work correctly, given that it cannot +// generate the DeepCopyInto for the regexp type. +// The method is empty because we don't want to transfer the cache when invoking DeepCopyInto +func (receiver synchronizeReplicasCache) DeepCopyInto(*synchronizeReplicasCache) {} + +func (r *SynchronizeReplicasConfiguration) compileRegex() []error { + if r == nil { + return nil + } + if r.compiled { + return r.compileErrors + } + + var errs []error + for _, pattern := range r.ExcludePatterns { + re, err := regexp.Compile(pattern) + if err != nil { + errs = append(errs, err) + continue + } + r.compiledPatterns = append(r.compiledPatterns, *re) + } + + r.compiled = true + r.compileErrors = errs + return errs +} + +// GetEnabled returns false if synchronized replication slots are disabled, defaults to true +func (r *SynchronizeReplicasConfiguration) GetEnabled() bool { + if r != nil && r.Enabled != nil { + return *r.Enabled + } + return true +} + +// IsExcludedByUser returns if a replication slot should not be reconciled on the replicas +func (r *SynchronizeReplicasConfiguration) IsExcludedByUser(slotName string) (bool, error) { + if r == nil { + return false, nil + } + + // this is an unexpected issue, validation should happen at webhook level + if errs := r.compileRegex(); len(errs) > 0 { + return false, errs[0] + } + + for _, re := range r.compiledPatterns { + if re.MatchString(slotName) { + return true, nil + } + } + + return false, nil +} + +// GetEnabled returns false if replication slots are disabled, default is true +func (r *ReplicationSlotsConfiguration) GetEnabled() bool { + return r.SynchronizeReplicas.GetEnabled() || r.HighAvailability.GetEnabled() +} + +// GetUpdateInterval returns the update interval, defaulting to DefaultReplicationSlotsUpdateInterval if empty +func (r *ReplicationSlotsConfiguration) GetUpdateInterval() time.Duration { + if r == nil || r.UpdateInterval <= 0 { + return DefaultReplicationSlotsUpdateInterval + } + return time.Duration(r.UpdateInterval) * time.Second +} + +// GetSlotPrefix returns the HA slot prefix, defaulting to DefaultReplicationSlotsHASlotPrefix if empty +func (r *ReplicationSlotsHAConfiguration) GetSlotPrefix() string { + if r == nil || r.SlotPrefix == "" { + return DefaultReplicationSlotsHASlotPrefix + } + return r.SlotPrefix +} + +// GetSlotNameFromInstanceName returns the slot name, given the instance name. +// It returns an empty string if High Availability Replication Slots are disabled +func (r *ReplicationSlotsHAConfiguration) GetSlotNameFromInstanceName(instanceName string) string { + if r == nil || !r.GetEnabled() { + return "" + } + + slotName := fmt.Sprintf( + "%s%s", + r.GetSlotPrefix(), + instanceName, + ) + sanitizedName := slotNameNegativeRegex.ReplaceAllString(strings.ToLower(slotName), "_") + + return sanitizedName +} + +// GetEnabled returns false if replication slots are disabled, default is true +func (r *ReplicationSlotsHAConfiguration) GetEnabled() bool { + if r != nil && r.Enabled != nil { + return *r.Enabled + } + return true +} + +// ToPostgreSQLConfigurationKeyword returns the contained value as a valid PostgreSQL parameter to be injected +// in the 'synchronous_standby_names' field +func (s SynchronousReplicaConfigurationMethod) ToPostgreSQLConfigurationKeyword() string { + return strings.ToUpper(string(s)) +} + +func (c *CertificatesConfiguration) getServerAltDNSNames() []string { + if c == nil { + return nil + } + + return c.ServerAltDNSNames +} + +// HasElements returns true if it contains any Reference +func (s *SQLRefs) HasElements() bool { + if s == nil { + return false + } + + return len(s.ConfigMapRefs) != 0 || + len(s.SecretRefs) != 0 +} + +// GetBackupID gets the backup ID +func (target *RecoveryTarget) GetBackupID() string { + return target.BackupID +} + +// GetTargetTime gets the target time +func (target *RecoveryTarget) GetTargetTime() string { + return target.TargetTime +} + +// GetTargetLSN gets the target LSN +func (target *RecoveryTarget) GetTargetLSN() string { + return target.TargetLSN +} + +// GetTargetTLI gets the target timeline +func (target *RecoveryTarget) GetTargetTLI() string { + return target.TargetTLI +} + +// GetSizeOrNil returns the requests storage size +func (s *StorageConfiguration) GetSizeOrNil() *resource.Quantity { + if s == nil { + return nil + } + + if s.Size != "" { + quantity, err := resource.ParseQuantity(s.Size) + if err != nil { + return nil + } + + return &quantity + } + + if s.PersistentVolumeClaimTemplate != nil { + return s.PersistentVolumeClaimTemplate.Resources.Requests.Storage() + } + + return nil +} + +// AreDefaultQueriesDisabled checks whether default monitoring queries should be disabled +func (m *MonitoringConfiguration) AreDefaultQueriesDisabled() bool { + return m != nil && m.DisableDefaultQueries != nil && *m.DisableDefaultQueries +} + +// GetServerName returns the server name, defaulting to the name of the external cluster or using the one specified +// in the BarmanObjectStore +func (in ExternalCluster) GetServerName() string { + if in.BarmanObjectStore != nil && in.BarmanObjectStore.ServerName != "" { + return in.BarmanObjectStore.ServerName + } + return in.Name +} + +// IsEnabled returns true when this plugin is enabled +func (config *PluginConfiguration) IsEnabled() bool { + if config.Enabled == nil { + return true + } + return *config.Enabled +} + +// GetRoleSecretsName gets the name of the secret which is used to store the role's password +func (roleConfiguration *RoleConfiguration) GetRoleSecretsName() string { + if roleConfiguration.PasswordSecret != nil { + return roleConfiguration.PasswordSecret.Name + } + return "" +} + +// GetRoleInherit return the inherit attribute of a roleConfiguration +func (roleConfiguration *RoleConfiguration) GetRoleInherit() bool { + if roleConfiguration.Inherit != nil { + return *roleConfiguration.Inherit + } + return true +} + +// SetManagedRoleSecretVersion Add or update or delete the resource version of the managed role secret +func (secretResourceVersion *SecretsResourceVersion) SetManagedRoleSecretVersion(secret string, version *string) { + if secretResourceVersion.ManagedRoleSecretVersions == nil { + secretResourceVersion.ManagedRoleSecretVersions = make(map[string]string) + } + if version == nil { + delete(secretResourceVersion.ManagedRoleSecretVersions, secret) + } else { + secretResourceVersion.ManagedRoleSecretVersions[secret] = *version + } +} + +// SetExternalClusterSecretVersion Add or update or delete the resource version of the secret used in external clusters +func (secretResourceVersion *SecretsResourceVersion) SetExternalClusterSecretVersion( + secretName string, + version *string, +) { + if secretResourceVersion.ExternalClusterSecretVersions == nil { + secretResourceVersion.ExternalClusterSecretVersions = make(map[string]string) + } + + if version == nil { + delete(secretResourceVersion.ExternalClusterSecretVersions, secretName) + return + } + + secretResourceVersion.ExternalClusterSecretVersions[secretName] = *version +} + +// SetInContext records the cluster in the given context +func (cluster *Cluster) SetInContext(ctx context.Context) context.Context { + return context.WithValue(ctx, utils.ContextKeyCluster, cluster) +} + +// GetImageName get the name of the image that should be used +// to create the pods +func (cluster *Cluster) GetImageName() string { + // If the image is specified in the status, use that one + // It should be there since the first reconciliation + if len(cluster.Status.Image) > 0 { + return cluster.Status.Image + } + + // Fallback to the information we have in the spec + if len(cluster.Spec.ImageName) > 0 { + return cluster.Spec.ImageName + } + + // TODO: check: does a scenario exists in which we do have an imageCatalog + // and no status.image? In that case this should probably error out, not + // returning the default image name. + return configuration.Current.PostgresImageName +} + +// GetPostgresqlVersion gets the PostgreSQL image version detecting it from the +// image name or from the ImageCatalogRef. +// Example: +// +// ghcr.io/cloudnative-pg/postgresql:14.0 corresponds to version 140000 +// ghcr.io/cloudnative-pg/postgresql:13.2 corresponds to version 130002 +// ghcr.io/cloudnative-pg/postgresql:9.6.3 corresponds to version 90603 +func (cluster *Cluster) GetPostgresqlVersion() (int, error) { + if cluster.Spec.ImageCatalogRef != nil { + return postgres.GetPostgresVersionFromTag(strconv.Itoa(cluster.Spec.ImageCatalogRef.Major)) + } + + image := cluster.GetImageName() + tag := utils.GetImageTag(image) + return postgres.GetPostgresVersionFromTag(tag) +} + +// GetPostgresqlMajorVersion gets the PostgreSQL image major version used in the Cluster +func (cluster *Cluster) GetPostgresqlMajorVersion() (int, error) { + version, err := cluster.GetPostgresqlVersion() + if err != nil { + return 0, err + } + return postgres.GetPostgresMajorVersion(version), nil +} + +// GetImagePullSecret get the name of the pull secret to use +// to download the PostgreSQL image +func (cluster *Cluster) GetImagePullSecret() string { + return cluster.Name + ClusterSecretSuffix +} + +// GetSuperuserSecretName get the secret name of the PostgreSQL superuser +func (cluster *Cluster) GetSuperuserSecretName() string { + if cluster.Spec.SuperuserSecret != nil && + cluster.Spec.SuperuserSecret.Name != "" { + return cluster.Spec.SuperuserSecret.Name + } + + return fmt.Sprintf("%v%v", cluster.Name, SuperUserSecretSuffix) +} + +// GetEnableLDAPAuth return true if bind or bind+search method are +// configured in the cluster configuration +func (cluster *Cluster) GetEnableLDAPAuth() bool { + if cluster.Spec.PostgresConfiguration.LDAP != nil && + (cluster.Spec.PostgresConfiguration.LDAP.BindAsAuth != nil || + cluster.Spec.PostgresConfiguration.LDAP.BindSearchAuth != nil) { + return true + } + return false +} + +// GetLDAPSecretName gets the secret name containing the LDAP password +func (cluster *Cluster) GetLDAPSecretName() string { + if cluster.Spec.PostgresConfiguration.LDAP != nil && + cluster.Spec.PostgresConfiguration.LDAP.BindSearchAuth != nil && + cluster.Spec.PostgresConfiguration.LDAP.BindSearchAuth.BindPassword != nil { + return cluster.Spec.PostgresConfiguration.LDAP.BindSearchAuth.BindPassword.Name + } + return "" +} + +// ContainsManagedRolesConfiguration returns true iff there are managed roles configured +func (cluster *Cluster) ContainsManagedRolesConfiguration() bool { + return cluster.Spec.Managed != nil && len(cluster.Spec.Managed.Roles) > 0 +} + +// GetExternalClusterSecrets returns the secrets used by external Clusters +func (cluster *Cluster) GetExternalClusterSecrets() *stringset.Data { + secrets := stringset.New() + + if cluster.Spec.ExternalClusters != nil { + for _, externalCluster := range cluster.Spec.ExternalClusters { + if externalCluster.Password != nil { + secrets.Put(externalCluster.Password.Name) + } + if externalCluster.SSLKey != nil { + secrets.Put(externalCluster.SSLKey.Name) + } + if externalCluster.SSLCert != nil { + secrets.Put(externalCluster.SSLCert.Name) + } + if externalCluster.SSLRootCert != nil { + secrets.Put(externalCluster.SSLRootCert.Name) + } + } + } + return secrets +} + +// UsesSecretInManagedRoles checks if the given secret name is used in a managed role +func (cluster *Cluster) UsesSecretInManagedRoles(secretName string) bool { + if !cluster.ContainsManagedRolesConfiguration() { + return false + } + for _, role := range cluster.Spec.Managed.Roles { + if role.PasswordSecret != nil && role.PasswordSecret.Name == secretName { + return true + } + } + return false +} + +// GetApplicationSecretName get the name of the application secret for any bootstrap type +func (cluster *Cluster) GetApplicationSecretName() string { + bootstrap := cluster.Spec.Bootstrap + if bootstrap == nil { + return fmt.Sprintf("%v%v", cluster.Name, ApplicationUserSecretSuffix) + } + recovery := bootstrap.Recovery + if recovery != nil && recovery.Secret != nil && recovery.Secret.Name != "" { + return recovery.Secret.Name + } + + pgBaseBackup := bootstrap.PgBaseBackup + if pgBaseBackup != nil && pgBaseBackup.Secret != nil && pgBaseBackup.Secret.Name != "" { + return pgBaseBackup.Secret.Name + } + + initDB := bootstrap.InitDB + if initDB != nil && initDB.Secret != nil && initDB.Secret.Name != "" { + return initDB.Secret.Name + } + + return fmt.Sprintf("%v%v", cluster.Name, ApplicationUserSecretSuffix) +} + +// GetApplicationDatabaseName get the name of the application database for a specific bootstrap +func (cluster *Cluster) GetApplicationDatabaseName() string { + bootstrap := cluster.Spec.Bootstrap + if bootstrap == nil { + return "" + } + + if bootstrap.Recovery != nil && bootstrap.Recovery.Database != "" { + return bootstrap.Recovery.Database + } + + if bootstrap.PgBaseBackup != nil && bootstrap.PgBaseBackup.Database != "" { + return bootstrap.PgBaseBackup.Database + } + + if bootstrap.InitDB != nil && bootstrap.InitDB.Database != "" { + return bootstrap.InitDB.Database + } + + return "" +} + +// GetApplicationDatabaseOwner get the owner user of the application database for a specific bootstrap +func (cluster *Cluster) GetApplicationDatabaseOwner() string { + bootstrap := cluster.Spec.Bootstrap + if bootstrap == nil { + return "" + } + + if bootstrap.Recovery != nil && bootstrap.Recovery.Owner != "" { + return bootstrap.Recovery.Owner + } + + if bootstrap.PgBaseBackup != nil && bootstrap.PgBaseBackup.Owner != "" { + return bootstrap.PgBaseBackup.Owner + } + + if bootstrap.InitDB != nil && bootstrap.InitDB.Owner != "" { + return bootstrap.InitDB.Owner + } + + return "" +} + +// GetServerCASecretName get the name of the secret containing the CA +// of the cluster +func (cluster *Cluster) GetServerCASecretName() string { + if cluster.Spec.Certificates != nil && cluster.Spec.Certificates.ServerCASecret != "" { + return cluster.Spec.Certificates.ServerCASecret + } + return fmt.Sprintf("%v%v", cluster.Name, DefaultServerCaSecretSuffix) +} + +// GetServerTLSSecretName get the name of the secret containing the +// certificate that is used for the PostgreSQL servers +func (cluster *Cluster) GetServerTLSSecretName() string { + if cluster.Spec.Certificates != nil && cluster.Spec.Certificates.ServerTLSSecret != "" { + return cluster.Spec.Certificates.ServerTLSSecret + } + return fmt.Sprintf("%v%v", cluster.Name, ServerSecretSuffix) +} + +// GetClientCASecretName get the name of the secret containing the CA +// of the cluster +func (cluster *Cluster) GetClientCASecretName() string { + if cluster.Spec.Certificates != nil && cluster.Spec.Certificates.ClientCASecret != "" { + return cluster.Spec.Certificates.ClientCASecret + } + return fmt.Sprintf("%v%v", cluster.Name, ClientCaSecretSuffix) +} + +// GetFixedInheritedAnnotations gets the annotations that should be +// inherited by all resources according the cluster spec +func (cluster *Cluster) GetFixedInheritedAnnotations() map[string]string { + if cluster.Spec.InheritedMetadata == nil || cluster.Spec.InheritedMetadata.Annotations == nil { + return nil + } + return cluster.Spec.InheritedMetadata.Annotations +} + +// GetFixedInheritedLabels gets the labels that should be +// inherited by all resources according the cluster spec +func (cluster *Cluster) GetFixedInheritedLabels() map[string]string { + if cluster.Spec.InheritedMetadata == nil || cluster.Spec.InheritedMetadata.Labels == nil { + return nil + } + return cluster.Spec.InheritedMetadata.Labels +} + +// GetReplicationSecretName get the name of the secret for the replication user +func (cluster *Cluster) GetReplicationSecretName() string { + if cluster.Spec.Certificates != nil && cluster.Spec.Certificates.ReplicationTLSSecret != "" { + return cluster.Spec.Certificates.ReplicationTLSSecret + } + return fmt.Sprintf("%v%v", cluster.Name, ReplicationSecretSuffix) +} + +// GetServiceAnyName return the name of the service that is used as DNS +// domain for all the nodes, even if they are not ready +func (cluster *Cluster) GetServiceAnyName() string { + return fmt.Sprintf("%v%v", cluster.Name, ServiceAnySuffix) +} + +// GetServiceReadName return the default name of the service that is used for +// read transactions (including the primary) +func (cluster *Cluster) GetServiceReadName() string { + return fmt.Sprintf("%v%v", cluster.Name, ServiceReadSuffix) +} + +// GetServiceReadOnlyName return the default name of the service that is used for +// read-only transactions (excluding the primary) +func (cluster *Cluster) GetServiceReadOnlyName() string { + return fmt.Sprintf("%v%v", cluster.Name, ServiceReadOnlySuffix) +} + +// GetServiceReadWriteName return the default name of the service that is used for +// read-write transactions +func (cluster *Cluster) GetServiceReadWriteName() string { + return fmt.Sprintf("%v%v", cluster.Name, ServiceReadWriteSuffix) +} + +// GetMaxStartDelay get the amount of time of startDelay config option +func (cluster *Cluster) GetMaxStartDelay() int32 { + if cluster.Spec.MaxStartDelay > 0 { + return cluster.Spec.MaxStartDelay + } + return DefaultStartupDelay +} + +// GetMaxStopDelay get the amount of time PostgreSQL has to stop +func (cluster *Cluster) GetMaxStopDelay() int32 { + if cluster.Spec.MaxStopDelay > 0 { + return cluster.Spec.MaxStopDelay + } + return 1800 +} + +// GetSmartShutdownTimeout is used to ensure that smart shutdown timeout is a positive integer +func (cluster *Cluster) GetSmartShutdownTimeout() int32 { + if cluster.Spec.SmartShutdownTimeout != nil { + return *cluster.Spec.SmartShutdownTimeout + } + return 180 +} + +// GetRestartTimeout is used to have a timeout for operations that involve +// a restart of a PostgreSQL instance +func (cluster *Cluster) GetRestartTimeout() int32 { + return cluster.GetMaxStopDelay() + cluster.GetMaxStartDelay() +} + +// GetMaxSwitchoverDelay get the amount of time PostgreSQL has to stop before switchover +func (cluster *Cluster) GetMaxSwitchoverDelay() int32 { + if cluster.Spec.MaxSwitchoverDelay > 0 { + return cluster.Spec.MaxSwitchoverDelay + } + return DefaultMaxSwitchoverDelay +} + +// GetPrimaryUpdateStrategy get the cluster primary update strategy, +// defaulting to unsupervised +func (cluster *Cluster) GetPrimaryUpdateStrategy() PrimaryUpdateStrategy { + strategy := cluster.Spec.PrimaryUpdateStrategy + if strategy == "" { + return PrimaryUpdateStrategyUnsupervised + } + + return strategy +} + +// GetPrimaryUpdateMethod get the cluster primary update method, +// defaulting to restart +func (cluster *Cluster) GetPrimaryUpdateMethod() PrimaryUpdateMethod { + strategy := cluster.Spec.PrimaryUpdateMethod + if strategy == "" { + return PrimaryUpdateMethodRestart + } + + return strategy +} + +// GetEnablePDB get the cluster EnablePDB value, defaults to true +func (cluster *Cluster) GetEnablePDB() bool { + if cluster.Spec.EnablePDB == nil { + return true + } + + return *cluster.Spec.EnablePDB +} + +// IsNodeMaintenanceWindowInProgress check if the upgrade mode is active or not +func (cluster *Cluster) IsNodeMaintenanceWindowInProgress() bool { + return cluster.Spec.NodeMaintenanceWindow != nil && cluster.Spec.NodeMaintenanceWindow.InProgress +} + +// GetPgCtlTimeoutForPromotion returns the timeout that should be waited for an instance to be promoted +// to primary. As default, DefaultPgCtlTimeoutForPromotion is big enough to simulate an infinite timeout +func (cluster *Cluster) GetPgCtlTimeoutForPromotion() int32 { + timeout := cluster.Spec.PostgresConfiguration.PgCtlTimeoutForPromotion + if timeout == 0 { + return DefaultPgCtlTimeoutForPromotion + } + return timeout +} + +// IsReusePVCEnabled check if in a maintenance window we should reuse PVCs +func (cluster *Cluster) IsReusePVCEnabled() bool { + reusePVC := true + if cluster.Spec.NodeMaintenanceWindow != nil && cluster.Spec.NodeMaintenanceWindow.ReusePVC != nil { + reusePVC = *cluster.Spec.NodeMaintenanceWindow.ReusePVC + } + return reusePVC +} + +// IsInstanceFenced check if in a given instance should be fenced +func (cluster *Cluster) IsInstanceFenced(instance string) bool { + fencedInstances, err := utils.GetFencedInstances(cluster.Annotations) + if err != nil { + return false + } + + if fencedInstances.Has(utils.FenceAllInstances) { + return true + } + return fencedInstances.Has(instance) +} + +// ShouldResizeInUseVolumes is true when we should resize PVC we already +// created +func (cluster *Cluster) ShouldResizeInUseVolumes() bool { + if cluster.Spec.StorageConfiguration.ResizeInUseVolumes == nil { + return true + } + + return *cluster.Spec.StorageConfiguration.ResizeInUseVolumes +} + +// ShouldCreateApplicationSecret returns true if for this cluster, +// during the bootstrap phase, we need to create a secret to store application credentials +func (cluster *Cluster) ShouldCreateApplicationSecret() bool { + return cluster.ShouldInitDBCreateApplicationSecret() || + cluster.ShouldPgBaseBackupCreateApplicationSecret() || + cluster.ShouldRecoveryCreateApplicationSecret() +} + +// ShouldInitDBCreateApplicationSecret returns true if for this cluster, +// during the bootstrap phase using initDB, we need to create an new application secret +func (cluster *Cluster) ShouldInitDBCreateApplicationSecret() bool { + return cluster.ShouldInitDBCreateApplicationDatabase() && + (cluster.Spec.Bootstrap.InitDB.Secret == nil || + cluster.Spec.Bootstrap.InitDB.Secret.Name == "") +} + +// ShouldPgBaseBackupCreateApplicationSecret returns true if for this cluster, +// during the bootstrap phase using pg_basebackup, we need to create an application secret +func (cluster *Cluster) ShouldPgBaseBackupCreateApplicationSecret() bool { + return cluster.ShouldPgBaseBackupCreateApplicationDatabase() && + (cluster.Spec.Bootstrap.PgBaseBackup.Secret == nil || + cluster.Spec.Bootstrap.PgBaseBackup.Secret.Name == "") +} + +// ShouldRecoveryCreateApplicationSecret returns true if for this cluster, +// during the bootstrap phase using recovery, we need to create an application secret +func (cluster *Cluster) ShouldRecoveryCreateApplicationSecret() bool { + return cluster.ShouldRecoveryCreateApplicationDatabase() && + (cluster.Spec.Bootstrap.Recovery.Secret == nil || + cluster.Spec.Bootstrap.Recovery.Secret.Name == "") +} + +// ShouldCreateApplicationDatabase returns true if for this cluster, +// during the bootstrap phase, we need to create an application database +func (cluster *Cluster) ShouldCreateApplicationDatabase() bool { + return cluster.ShouldInitDBCreateApplicationDatabase() || + cluster.ShouldRecoveryCreateApplicationDatabase() || + cluster.ShouldPgBaseBackupCreateApplicationDatabase() +} + +// ShouldInitDBRunPostInitApplicationSQLRefs returns true if for this cluster, +// during the bootstrap phase using initDB, we need to run post init SQL files +// for the application database from provided references. +func (cluster *Cluster) ShouldInitDBRunPostInitApplicationSQLRefs() bool { + if cluster.Spec.Bootstrap == nil { + return false + } + + if cluster.Spec.Bootstrap.InitDB == nil { + return false + } + + return cluster.Spec.Bootstrap.InitDB.PostInitApplicationSQLRefs.HasElements() +} + +// ShouldInitDBRunPostInitTemplateSQLRefs returns true if for this cluster, +// during the bootstrap phase using initDB, we need to run post init SQL files +// for the `template1` database from provided references. +func (cluster *Cluster) ShouldInitDBRunPostInitTemplateSQLRefs() bool { + if cluster.Spec.Bootstrap == nil { + return false + } + + if cluster.Spec.Bootstrap.InitDB == nil { + return false + } + + return cluster.Spec.Bootstrap.InitDB.PostInitTemplateSQLRefs.HasElements() +} + +// ShouldInitDBRunPostInitSQLRefs returns true if for this cluster, +// during the bootstrap phase using initDB, we need to run post init SQL files +// for the `postgres` database from provided references. +func (cluster *Cluster) ShouldInitDBRunPostInitSQLRefs() bool { + if cluster.Spec.Bootstrap == nil { + return false + } + + if cluster.Spec.Bootstrap.InitDB == nil { + return false + } + + return cluster.Spec.Bootstrap.InitDB.PostInitSQLRefs.HasElements() +} + +// ShouldInitDBCreateApplicationDatabase returns true if the application database needs to be created during initdb +// job +func (cluster *Cluster) ShouldInitDBCreateApplicationDatabase() bool { + if cluster.Spec.Bootstrap == nil { + return false + } + + if cluster.Spec.Bootstrap.InitDB == nil { + return false + } + + initDBParameters := cluster.Spec.Bootstrap.InitDB + return initDBParameters.Owner != "" && initDBParameters.Database != "" +} + +// ShouldPgBaseBackupCreateApplicationDatabase returns true if the application database needs to be created during the +// pg_basebackup job +func (cluster *Cluster) ShouldPgBaseBackupCreateApplicationDatabase() bool { + // we skip creating the application database if cluster is a replica + if cluster.IsReplica() { + return false + } + if cluster.Spec.Bootstrap == nil { + return false + } + + if cluster.Spec.Bootstrap.PgBaseBackup == nil { + return false + } + + pgBaseBackupParameters := cluster.Spec.Bootstrap.PgBaseBackup + return pgBaseBackupParameters.Owner != "" && pgBaseBackupParameters.Database != "" +} + +// ShouldRecoveryCreateApplicationDatabase returns true if the application database needs to be created during the +// recovery job +func (cluster *Cluster) ShouldRecoveryCreateApplicationDatabase() bool { + // we skip creating the application database if cluster is a replica + if cluster.IsReplica() { + return false + } + + if cluster.Spec.Bootstrap == nil { + return false + } + + if cluster.Spec.Bootstrap.Recovery == nil { + return false + } + + recoveryParameters := cluster.Spec.Bootstrap.Recovery + return recoveryParameters.Owner != "" && recoveryParameters.Database != "" +} + +// ShouldCreateProjectedVolume returns whether we should create the projected all in one volume +func (cluster *Cluster) ShouldCreateProjectedVolume() bool { + return cluster.Spec.ProjectedVolumeTemplate != nil +} + +// ShouldCreateWalArchiveVolume returns whether we should create the wal archive volume +func (cluster *Cluster) ShouldCreateWalArchiveVolume() bool { + return cluster.Spec.WalStorage != nil +} + +// ShouldPromoteFromReplicaCluster returns true if the cluster should promote +func (cluster *Cluster) ShouldPromoteFromReplicaCluster() bool { + // If there's no replica cluster configuration there's no + // promotion token too, so we don't need to promote. + if cluster.Spec.ReplicaCluster == nil { + return false + } + + // If we don't have a promotion token, we don't need to promote + if len(cluster.Spec.ReplicaCluster.PromotionToken) == 0 { + return false + } + + // If the current token was already used, there's no need to + // promote + if cluster.Spec.ReplicaCluster.PromotionToken == cluster.Status.LastPromotionToken { + return false + } + return true +} + +// ContainsTablespaces returns true if for this cluster, we need to create tablespaces +func (cluster *Cluster) ContainsTablespaces() bool { + return len(cluster.Spec.Tablespaces) != 0 +} + +// GetPostgresUID returns the UID that is being used for the "postgres" +// user +func (cluster Cluster) GetPostgresUID() int64 { + if cluster.Spec.PostgresUID == 0 { + return defaultPostgresUID + } + return cluster.Spec.PostgresUID +} + +// GetPostgresGID returns the GID that is being used for the "postgres" +// user +func (cluster Cluster) GetPostgresGID() int64 { + if cluster.Spec.PostgresGID == 0 { + return defaultPostgresGID + } + return cluster.Spec.PostgresGID +} + +// ExternalCluster gets the external server with a known name, returning +// true if the server was found and false otherwise +func (cluster Cluster) ExternalCluster(name string) (ExternalCluster, bool) { + for _, server := range cluster.Spec.ExternalClusters { + if server.Name == name { + return server, true + } + } + + return ExternalCluster{}, false +} + +// IsReplica checks if this is a replica cluster or not +func (cluster Cluster) IsReplica() bool { + // Before introducing the "primary" field, the + // "enabled" parameter was declared as a "boolean" + // and was not declared "omitempty". + // + // Legacy replica clusters will have the "replica" stanza + // and the "enabled" field set explicitly to true. + // + // The following code is designed to not change the + // previous semantics. + r := cluster.Spec.ReplicaCluster + if r == nil { + return false + } + + if r.Enabled != nil { + return *r.Enabled + } + + clusterName := r.Self + if len(clusterName) == 0 { + clusterName = cluster.Name + } + + return clusterName != r.Primary +} + +var slotNameNegativeRegex = regexp.MustCompile("[^a-z0-9_]+") + +// GetSlotNameFromInstanceName returns the slot name, given the instance name. +// It returns an empty string if High Availability Replication Slots are disabled +func (cluster Cluster) GetSlotNameFromInstanceName(instanceName string) string { + if cluster.Spec.ReplicationSlots == nil || + cluster.Spec.ReplicationSlots.HighAvailability == nil || + !cluster.Spec.ReplicationSlots.HighAvailability.GetEnabled() { + return "" + } + + return cluster.Spec.ReplicationSlots.HighAvailability.GetSlotNameFromInstanceName(instanceName) +} + +// GetBarmanEndpointCAForReplicaCluster checks if this is a replica cluster which needs barman endpoint CA +func (cluster Cluster) GetBarmanEndpointCAForReplicaCluster() *SecretKeySelector { + if !cluster.IsReplica() { + return nil + } + sourceName := cluster.Spec.ReplicaCluster.Source + externalCluster, found := cluster.ExternalCluster(sourceName) + if !found || externalCluster.BarmanObjectStore == nil { + return nil + } + return externalCluster.BarmanObjectStore.EndpointCA +} + +// GetClusterAltDNSNames returns all the names needed to build a valid Server Certificate +func (cluster *Cluster) GetClusterAltDNSNames() []string { + buildServiceNames := func(serviceName string, enabled bool) []string { + if !enabled { + return nil + } + return []string{ + serviceName, + fmt.Sprintf("%v.%v", serviceName, cluster.Namespace), + fmt.Sprintf("%v.%v.svc", serviceName, cluster.Namespace), + fmt.Sprintf("%v.%v.svc.cluster.local", serviceName, cluster.Namespace), + } + } + altDNSNames := slices.Concat( + buildServiceNames(cluster.GetServiceReadWriteName(), cluster.IsReadWriteServiceEnabled()), + buildServiceNames(cluster.GetServiceReadName(), cluster.IsReadServiceEnabled()), + buildServiceNames(cluster.GetServiceReadOnlyName(), cluster.IsReadOnlyServiceEnabled()), + ) + + if cluster.Spec.Managed != nil && cluster.Spec.Managed.Services != nil { + for _, service := range cluster.Spec.Managed.Services.Additional { + altDNSNames = append(altDNSNames, buildServiceNames(service.ServiceTemplate.ObjectMeta.Name, true)...) + } + } + + return append(altDNSNames, cluster.Spec.Certificates.getServerAltDNSNames()...) +} + +// UsesSecret checks whether a given secret is used by a Cluster. +// +// This function is also used to discover the set of clusters that +// should be reconciled when a certain secret changes. +func (cluster *Cluster) UsesSecret(secret string) bool { + if _, ok := cluster.Status.SecretsResourceVersion.Metrics[secret]; ok { + return true + } + certificates := cluster.Status.Certificates + switch secret { + case cluster.GetSuperuserSecretName(), + cluster.GetApplicationSecretName(), + certificates.ClientCASecret, + certificates.ReplicationTLSSecret, + certificates.ServerCASecret, + certificates.ServerTLSSecret: + return true + } + + if cluster.UsesSecretInManagedRoles(secret) { + return true + } + + if cluster.Spec.Backup.IsBarmanEndpointCASet() && cluster.Spec.Backup.BarmanObjectStore.EndpointCA.Name == secret { + return true + } + + if endpointCA := cluster.GetBarmanEndpointCAForReplicaCluster(); endpointCA != nil && endpointCA.Name == secret { + return true + } + + if cluster.Status.PoolerIntegrations != nil { + for _, pgBouncerSecretName := range cluster.Status.PoolerIntegrations.PgBouncerIntegration.Secrets { + if pgBouncerSecretName == secret { + return true + } + } + } + + // watch the secrets defined in external clusters + return cluster.GetExternalClusterSecrets().Has(secret) +} + +// UsesConfigMap checks whether a given secret is used by a Cluster +func (cluster *Cluster) UsesConfigMap(config string) (ok bool) { + if _, ok := cluster.Status.ConfigMapResourceVersion.Metrics[config]; ok { + return true + } + return false +} + +// IsPodMonitorEnabled checks if the PodMonitor object needs to be created +func (cluster *Cluster) IsPodMonitorEnabled() bool { + if cluster.Spec.Monitoring != nil { + return cluster.Spec.Monitoring.EnablePodMonitor + } + + return false +} + +// IsMetricsTLSEnabled checks if the metrics endpoint should use TLS +func (cluster *Cluster) IsMetricsTLSEnabled() bool { + if cluster.Spec.Monitoring != nil && cluster.Spec.Monitoring.TLSConfig != nil { + return cluster.Spec.Monitoring.TLSConfig.Enabled + } + + return false +} + +// GetEnableSuperuserAccess returns if the superuser access is enabled or not +func (cluster *Cluster) GetEnableSuperuserAccess() bool { + if cluster.Spec.EnableSuperuserAccess != nil { + return *cluster.Spec.EnableSuperuserAccess + } + + return false +} + +// LogTimestampsWithMessage prints useful information about timestamps in stdout +func (cluster *Cluster) LogTimestampsWithMessage(ctx context.Context, logMessage string) { + contextLogger := log.FromContext(ctx) + + currentTimestamp := utils.GetCurrentTimestamp() + keysAndValues := []interface{}{ + "phase", cluster.Status.Phase, + "currentTimestamp", currentTimestamp, + "targetPrimaryTimestamp", cluster.Status.TargetPrimaryTimestamp, + "currentPrimaryTimestamp", cluster.Status.CurrentPrimaryTimestamp, + } + + var errs []string + + // Elapsed time since the last request of promotion (TargetPrimaryTimestamp) + if diff, err := utils.DifferenceBetweenTimestamps( + currentTimestamp, + cluster.Status.TargetPrimaryTimestamp, + ); err == nil { + keysAndValues = append( + keysAndValues, + "msPassedSinceTargetPrimaryTimestamp", + diff.Milliseconds(), + ) + } else { + errs = append(errs, err.Error()) + } + + // Elapsed time since the last promotion (CurrentPrimaryTimestamp) + if currentPrimaryDifference, err := utils.DifferenceBetweenTimestamps( + currentTimestamp, + cluster.Status.CurrentPrimaryTimestamp, + ); err == nil { + keysAndValues = append( + keysAndValues, + "msPassedSinceCurrentPrimaryTimestamp", + currentPrimaryDifference.Milliseconds(), + ) + } else { + errs = append(errs, err.Error()) + } + + // Difference between the last promotion and the last request of promotion + // When positive, it is the amount of time required in the last promotion + // of a standby to a primary. If negative, it means we have a failover/switchover + // in progress, and the value represents the last measured uptime of the primary. + if currentPrimaryTargetDifference, err := utils.DifferenceBetweenTimestamps( + cluster.Status.CurrentPrimaryTimestamp, + cluster.Status.TargetPrimaryTimestamp, + ); err == nil { + keysAndValues = append( + keysAndValues, + "msDifferenceBetweenCurrentAndTargetPrimary", + currentPrimaryTargetDifference.Milliseconds(), + ) + } else { + errs = append(errs, err.Error()) + } + + if len(errs) > 0 { + keysAndValues = append(keysAndValues, "timestampParsingErrors", errs) + } + + contextLogger.Info(logMessage, keysAndValues...) +} + +// SetInheritedDataAndOwnership sets the cluster as owner of the passed object and then +// sets all the needed annotations and labels +func (cluster *Cluster) SetInheritedDataAndOwnership(obj *metav1.ObjectMeta) { + cluster.SetInheritedData(obj) + utils.SetAsOwnedBy(obj, cluster.ObjectMeta, cluster.TypeMeta) +} + +// SetInheritedData sets all the needed annotations and labels +func (cluster *Cluster) SetInheritedData(obj *metav1.ObjectMeta) { + utils.InheritAnnotations(obj, cluster.Annotations, cluster.GetFixedInheritedAnnotations(), configuration.Current) + utils.InheritLabels(obj, cluster.Labels, cluster.GetFixedInheritedLabels(), configuration.Current) + utils.LabelClusterName(obj, cluster.GetName()) + utils.SetOperatorVersion(obj, versions.Version) +} + +// ShouldForceLegacyBackup if present takes a backup without passing the name argument even on barman version 3.3.0+. +// This is needed to test both backup system in the E2E suite +func (cluster *Cluster) ShouldForceLegacyBackup() bool { + return cluster.Annotations[utils.LegacyBackupAnnotationName] == "true" +} + +// GetSeccompProfile return the proper SeccompProfile set in the cluster for Pods and Containers +func (cluster *Cluster) GetSeccompProfile() *corev1.SeccompProfile { + if cluster.Spec.SeccompProfile != nil { + return cluster.Spec.SeccompProfile + } + + return &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + } +} + +// GetCoredumpFilter get the coredump filter value from the cluster annotation +func (cluster *Cluster) GetCoredumpFilter() string { + value, ok := cluster.Annotations[utils.CoredumpFilter] + if ok { + return value + } + return system.DefaultCoredumpFilter +} + +// IsInplaceRestartPhase returns true if the cluster is in a phase that handles the Inplace restart +func (cluster *Cluster) IsInplaceRestartPhase() bool { + return cluster.Status.Phase == PhaseInplacePrimaryRestart || + cluster.Status.Phase == PhaseInplaceDeletePrimaryRestart +} + +// GetTablespaceConfiguration returns the tablespaceConfiguration for the given name +// otherwise return nil +func (cluster *Cluster) GetTablespaceConfiguration(name string) *TablespaceConfiguration { + for _, tbsConfig := range cluster.Spec.Tablespaces { + if name == tbsConfig.Name { + return &tbsConfig + } + } + + return nil +} + +// GetServerCASecretObjectKey returns a types.NamespacedName pointing to the secret +func (cluster *Cluster) GetServerCASecretObjectKey() types.NamespacedName { + return types.NamespacedName{Namespace: cluster.Namespace, Name: cluster.GetServerCASecretName()} +} + +// IsBarmanBackupConfigured returns true if one of the possible backup destination +// is configured, false otherwise +func (backupConfiguration *BackupConfiguration) IsBarmanBackupConfigured() bool { + return backupConfiguration != nil && backupConfiguration.BarmanObjectStore != nil && + backupConfiguration.BarmanObjectStore.BarmanCredentials.ArePopulated() +} + +// IsBarmanEndpointCASet returns true if we have a CA bundle for the endpoint +// false otherwise +func (backupConfiguration *BackupConfiguration) IsBarmanEndpointCASet() bool { + return backupConfiguration != nil && + backupConfiguration.BarmanObjectStore != nil && + backupConfiguration.BarmanObjectStore.EndpointCA != nil && + backupConfiguration.BarmanObjectStore.EndpointCA.Name != "" && + backupConfiguration.BarmanObjectStore.EndpointCA.Key != "" +} + +// UpdateBackupTimes sets the firstRecoverabilityPoint and lastSuccessfulBackup +// for the provided method, as well as the overall firstRecoverabilityPoint and +// lastSuccessfulBackup for the cluster +func (cluster *Cluster) UpdateBackupTimes( + backupMethod BackupMethod, + firstRecoverabilityPoint *time.Time, + lastSuccessfulBackup *time.Time, +) { + type comparer func(a metav1.Time, b metav1.Time) bool + // tryGetMaxTime gets either the newest or oldest time from a set of backup times, + // depending on the comparer argument passed to it + tryGetMaxTime := func(m map[BackupMethod]metav1.Time, compare comparer) string { + var maximum metav1.Time + for _, ts := range m { + if maximum.IsZero() || compare(ts, maximum) { + maximum = ts + } + } + result := "" + if !maximum.IsZero() { + result = maximum.Format(time.RFC3339) + } + + return result + } + + setTime := func(backupTimes map[BackupMethod]metav1.Time, value *time.Time) map[BackupMethod]metav1.Time { + if value == nil { + delete(backupTimes, backupMethod) + return backupTimes + } + + if backupTimes == nil { + backupTimes = make(map[BackupMethod]metav1.Time) + } + + backupTimes[backupMethod] = metav1.NewTime(*value) + return backupTimes + } + + cluster.Status.FirstRecoverabilityPointByMethod = setTime(cluster.Status.FirstRecoverabilityPointByMethod, + firstRecoverabilityPoint) + cluster.Status.FirstRecoverabilityPoint = tryGetMaxTime( + cluster.Status.FirstRecoverabilityPointByMethod, + // we pass a comparer to get the first among the recoverability points + func(a metav1.Time, b metav1.Time) bool { + return a.Before(&b) + }) + + cluster.Status.LastSuccessfulBackupByMethod = setTime(cluster.Status.LastSuccessfulBackupByMethod, + lastSuccessfulBackup) + cluster.Status.LastSuccessfulBackup = tryGetMaxTime( + cluster.Status.LastSuccessfulBackupByMethod, + // we pass a comparer to get the last among the last backup times per method + func(a metav1.Time, b metav1.Time) bool { + return b.Before(&a) + }) +} + +// IsReadServiceEnabled checks if the read service is enabled for the cluster. +// It returns false if the read service is listed in the DisabledDefaultServices slice. +func (cluster *Cluster) IsReadServiceEnabled() bool { + if cluster.Spec.Managed == nil || cluster.Spec.Managed.Services == nil { + return true + } + + return !slices.Contains(cluster.Spec.Managed.Services.DisabledDefaultServices, ServiceSelectorTypeR) +} + +// IsReadWriteServiceEnabled checks if the read-write service is enabled for the cluster. +// It returns false if the read-write service is listed in the DisabledDefaultServices slice. +func (cluster *Cluster) IsReadWriteServiceEnabled() bool { + if cluster.Spec.Managed == nil || cluster.Spec.Managed.Services == nil { + return true + } + return !slices.Contains(cluster.Spec.Managed.Services.DisabledDefaultServices, ServiceSelectorTypeRW) +} + +// IsReadOnlyServiceEnabled checks if the read-only service is enabled for the cluster. +// It returns false if the read-only service is listed in the DisabledDefaultServices slice. +func (cluster *Cluster) IsReadOnlyServiceEnabled() bool { + if cluster.Spec.Managed == nil || cluster.Spec.Managed.Services == nil { + return true + } + + return !slices.Contains(cluster.Spec.Managed.Services.DisabledDefaultServices, ServiceSelectorTypeRO) +} + +// BuildPostgresOptions create the list of options that +// should be added to the PostgreSQL configuration to +// recover given a certain target +func (target *RecoveryTarget) BuildPostgresOptions() string { + result := "" + + if target == nil { + return result + } + + if target.TargetTLI != "" { + result += fmt.Sprintf( + "recovery_target_timeline = '%v'\n", + target.TargetTLI) + } + if target.TargetXID != "" { + result += fmt.Sprintf( + "recovery_target_xid = '%v'\n", + target.TargetXID) + } + if target.TargetName != "" { + result += fmt.Sprintf( + "recovery_target_name = '%v'\n", + target.TargetName) + } + if target.TargetLSN != "" { + result += fmt.Sprintf( + "recovery_target_lsn = '%v'\n", + target.TargetLSN) + } + if target.TargetTime != "" { + result += fmt.Sprintf( + "recovery_target_time = '%v'\n", + utils.ConvertToPostgresFormat(target.TargetTime)) + } + if target.TargetImmediate != nil && *target.TargetImmediate { + result += "recovery_target = immediate\n" + } + if target.Exclusive != nil && *target.Exclusive { + result += "recovery_target_inclusive = false\n" + } else { + result += "recovery_target_inclusive = true\n" + } + + return result +} diff --git a/api/v1/cluster_types_test.go b/api/v1/cluster_funcs_test.go similarity index 100% rename from api/v1/cluster_types_test.go rename to api/v1/cluster_funcs_test.go diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go index 9c6027ad57..d259c48703 100644 --- a/api/v1/cluster_types.go +++ b/api/v1/cluster_types.go @@ -17,27 +17,12 @@ limitations under the License. package v1 import ( - "context" - "fmt" "regexp" - "slices" - "strconv" - "strings" - "time" - "github.com/cloudnative-pg/machinery/pkg/log" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - - "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" - "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" - "github.com/cloudnative-pg/cloudnative-pg/pkg/stringset" - "github.com/cloudnative-pg/cloudnative-pg/pkg/system" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" - "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" ) const ( @@ -179,16 +164,6 @@ type VolumeSnapshotConfiguration struct { OnlineConfiguration OnlineConfiguration `json:"onlineConfiguration,omitempty"` } -// GetOnline tells whether this volume snapshot configuration allows -// online backups -func (configuration *VolumeSnapshotConfiguration) GetOnline() bool { - if configuration.Online == nil { - return true - } - - return *configuration.Online -} - // OnlineConfiguration contains the configuration parameters for the online volume snapshot type OnlineConfiguration struct { // If false, the function will return immediately after the backup is completed, @@ -213,24 +188,6 @@ type OnlineConfiguration struct { ImmediateCheckpoint *bool `json:"immediateCheckpoint,omitempty"` } -// GetWaitForArchive tells whether to wait for archive or not -func (o OnlineConfiguration) GetWaitForArchive() bool { - if o.WaitForArchive == nil { - return true - } - - return *o.WaitForArchive -} - -// GetImmediateCheckpoint tells whether to execute an immediate checkpoint -func (o OnlineConfiguration) GetImmediateCheckpoint() bool { - if o.ImmediateCheckpoint == nil { - return false - } - - return *o.ImmediateCheckpoint -} - // ImageCatalogRef defines the reference to a major version in an ImageCatalog type ImageCatalogRef struct { // +kubebuilder:validation:XValidation:rule="self.kind == 'ImageCatalog' || self.kind == 'ClusterImageCatalog'",message="Only image catalogs are supported" @@ -523,18 +480,6 @@ type ClusterSpec struct { // configuration parameters type PluginConfigurationList []PluginConfiguration -// GetEnabledPluginNames gets the name of the plugins that are involved -// in the reconciliation of this cluster -func (pluginList PluginConfigurationList) GetEnabledPluginNames() (result []string) { - pluginNames := make([]string, 0, len(pluginList)) - for _, pluginDeclaration := range pluginList { - if pluginDeclaration.IsEnabled() { - pluginNames = append(pluginNames, pluginDeclaration.Name) - } - } - return pluginNames -} - const ( // PhaseSwitchover when a cluster is changing the primary node PhaseSwitchover = "Switchover in progress" @@ -604,24 +549,6 @@ type EphemeralVolumesSizeLimitConfiguration struct { TemporaryData *resource.Quantity `json:"temporaryData,omitempty"` } -// GetShmLimit gets the `/dev/shm` memory size limit -func (e *EphemeralVolumesSizeLimitConfiguration) GetShmLimit() *resource.Quantity { - if e == nil { - return nil - } - - return e.Shm -} - -// GetTemporaryDataLimit gets the temporary storage size limit -func (e *EphemeralVolumesSizeLimitConfiguration) GetTemporaryDataLimit() *resource.Quantity { - if e == nil { - return nil - } - - return e.TemporaryData -} - // ServiceAccountTemplate contains the template needed to generate the service accounts type ServiceAccountTemplate struct { // Metadata are the metadata to be used for the generated @@ -629,37 +556,9 @@ type ServiceAccountTemplate struct { Metadata Metadata `json:"metadata"` } -// MergeMetadata adds the passed custom annotations and labels in the service account. -func (st *ServiceAccountTemplate) MergeMetadata(sa *corev1.ServiceAccount) { - if st == nil { - return - } - if sa.Labels == nil { - sa.Labels = map[string]string{} - } - if sa.Annotations == nil { - sa.Annotations = map[string]string{} - } - - utils.MergeMap(sa.Labels, st.Metadata.Labels) - utils.MergeMap(sa.Annotations, st.Metadata.Annotations) -} - // PodTopologyLabels represent the topology of a Pod. map[labelName]labelValue type PodTopologyLabels map[string]string -// MatchesTopology checks if the two topologies have -// the same label values (labels are specified in SyncReplicaElectionConstraints.NodeLabelsAntiAffinity) -func (topologyLabels PodTopologyLabels) MatchesTopology(instanceTopology PodTopologyLabels) bool { - log.Debug("matching topology", "main", topologyLabels, "second", instanceTopology) - for mainLabelName, mainLabelValue := range topologyLabels { - if mainLabelValue != instanceTopology[mainLabelName] { - return false - } - } - return true -} - // PodName is the name of a Pod type PodName string @@ -760,16 +659,6 @@ type AvailableArchitecture struct { Hash string `json:"hash"` } -// GetAvailableArchitecture returns an AvailableArchitecture given it's name. It returns nil if it's not found. -func (status *ClusterStatus) GetAvailableArchitecture(archName string) *AvailableArchitecture { - for _, architecture := range status.AvailableArchitectures { - if architecture.GoArch == archName { - return &architecture - } - } - return nil -} - // ClusterStatus defines the observed state of Cluster type ClusterStatus struct { // The total number of PVC Groups detected in the cluster. It may differ from the number of existing instance pods. @@ -1000,38 +889,6 @@ const ( ConditionClusterReady ClusterConditionType = "Ready" ) -// A Condition that can be used to communicate the Backup progress -var ( - // BackupSucceededCondition is added to a backup - // when it was completed correctly - BackupSucceededCondition = &metav1.Condition{ - Type: string(ConditionBackup), - Status: metav1.ConditionTrue, - Reason: string(ConditionReasonLastBackupSucceeded), - Message: "Backup was successful", - } - - // BackupStartingCondition is added to a backup - // when it started - BackupStartingCondition = &metav1.Condition{ - Type: string(ConditionBackup), - Status: metav1.ConditionFalse, - Reason: string(ConditionBackupStarted), - Message: "New Backup starting up", - } - - // BuildClusterBackupFailedCondition builds - // ConditionReasonLastBackupFailed condition - BuildClusterBackupFailedCondition = func(err error) *metav1.Condition { - return &metav1.Condition{ - Type: string(ConditionBackup), - Status: metav1.ConditionFalse, - Reason: string(ConditionReasonLastBackupFailed), - Message: err.Error(), - } - } -) - // ConditionStatus defines conditions of resources type ConditionStatus string @@ -1164,62 +1021,6 @@ type synchronizeReplicasCache struct { compileErrors []error `json:"-"` } -// DeepCopyInto needs to be manually added for the controller-gen compiler to work correctly, given that it cannot -// generate the DeepCopyInto for the regexp type. -// The method is empty because we don't want to transfer the cache when invoking DeepCopyInto -func (receiver synchronizeReplicasCache) DeepCopyInto(*synchronizeReplicasCache) {} - -func (r *SynchronizeReplicasConfiguration) compileRegex() []error { - if r == nil { - return nil - } - if r.compiled { - return r.compileErrors - } - - var errs []error - for _, pattern := range r.ExcludePatterns { - re, err := regexp.Compile(pattern) - if err != nil { - errs = append(errs, err) - continue - } - r.compiledPatterns = append(r.compiledPatterns, *re) - } - - r.compiled = true - r.compileErrors = errs - return errs -} - -// GetEnabled returns false if synchronized replication slots are disabled, defaults to true -func (r *SynchronizeReplicasConfiguration) GetEnabled() bool { - if r != nil && r.Enabled != nil { - return *r.Enabled - } - return true -} - -// IsExcludedByUser returns if a replication slot should not be reconciled on the replicas -func (r *SynchronizeReplicasConfiguration) IsExcludedByUser(slotName string) (bool, error) { - if r == nil { - return false, nil - } - - // this is an unexpected issue, validation should happen at webhook level - if errs := r.compileRegex(); len(errs) > 0 { - return false, errs[0] - } - - for _, re := range r.compiledPatterns { - if re.MatchString(slotName) { - return true, nil - } - } - - return false, nil -} - // ReplicationSlotsConfiguration encapsulates the configuration // of replication slots type ReplicationSlotsConfiguration struct { @@ -1240,19 +1041,6 @@ type ReplicationSlotsConfiguration struct { SynchronizeReplicas *SynchronizeReplicasConfiguration `json:"synchronizeReplicas,omitempty"` } -// GetEnabled returns false if replication slots are disabled, default is true -func (r *ReplicationSlotsConfiguration) GetEnabled() bool { - return r.SynchronizeReplicas.GetEnabled() || r.HighAvailability.GetEnabled() -} - -// GetUpdateInterval returns the update interval, defaulting to DefaultReplicationSlotsUpdateInterval if empty -func (r *ReplicationSlotsConfiguration) GetUpdateInterval() time.Duration { - if r == nil || r.UpdateInterval <= 0 { - return DefaultReplicationSlotsUpdateInterval - } - return time.Duration(r.UpdateInterval) * time.Second -} - // ReplicationSlotsHAConfiguration encapsulates the configuration // of the replication slots that are automatically managed by // the operator to control the streaming replication connections @@ -1282,39 +1070,6 @@ type ReplicationSlotsHAConfiguration struct { SlotPrefix string `json:"slotPrefix,omitempty"` } -// GetSlotPrefix returns the HA slot prefix, defaulting to DefaultReplicationSlotsHASlotPrefix if empty -func (r *ReplicationSlotsHAConfiguration) GetSlotPrefix() string { - if r == nil || r.SlotPrefix == "" { - return DefaultReplicationSlotsHASlotPrefix - } - return r.SlotPrefix -} - -// GetSlotNameFromInstanceName returns the slot name, given the instance name. -// It returns an empty string if High Availability Replication Slots are disabled -func (r *ReplicationSlotsHAConfiguration) GetSlotNameFromInstanceName(instanceName string) string { - if r == nil || !r.GetEnabled() { - return "" - } - - slotName := fmt.Sprintf( - "%s%s", - r.GetSlotPrefix(), - instanceName, - ) - sanitizedName := slotNameNegativeRegex.ReplaceAllString(strings.ToLower(slotName), "_") - - return sanitizedName -} - -// GetEnabled returns false if replication slots are disabled, default is true -func (r *ReplicationSlotsHAConfiguration) GetEnabled() bool { - if r != nil && r.Enabled != nil { - return *r.Enabled - } - return true -} - // KubernetesUpgradeStrategy tells the operator if the user want to // allocate more space while upgrading a k8s node which is hosting // the PostgreSQL Pods or just wait for the node to come up @@ -1403,12 +1158,6 @@ const ( SynchronousReplicaConfigurationMethodAny = SynchronousReplicaConfigurationMethod("any") ) -// ToPostgreSQLConfigurationKeyword returns the contained value as a valid PostgreSQL parameter to be injected -// in the 'synchronous_standby_names' field -func (s SynchronousReplicaConfigurationMethod) ToPostgreSQLConfigurationKeyword() string { - return strings.ToUpper(string(s)) -} - // SynchronousReplicaConfiguration contains the configuration of the // PostgreSQL synchronous replication feature. // Important: at this moment, also `.spec.minSyncReplicas` and `.spec.maxSyncReplicas` @@ -1624,14 +1373,6 @@ type CertificatesConfiguration struct { ServerAltDNSNames []string `json:"serverAltDNSNames,omitempty"` } -func (c *CertificatesConfiguration) getServerAltDNSNames() []string { - if c == nil { - return nil - } - - return c.ServerAltDNSNames -} - // CertificatesStatus contains configuration certificates and related expiration dates. type CertificatesStatus struct { // Needed configurations to handle server certificates, initialized with default values, if needed. @@ -1806,16 +1547,6 @@ type SQLRefs struct { ConfigMapRefs []ConfigMapKeySelector `json:"configMapRefs,omitempty"` } -// HasElements returns true if it contains any Reference -func (s *SQLRefs) HasElements() bool { - if s == nil { - return false - } - - return len(s.ConfigMapRefs) != 0 || - len(s.SecretRefs) != 0 -} - // BootstrapRecovery contains the configuration required to restore // from an existing cluster using 3 methodologies: external cluster, // volume snapshots or backup objects. Full recovery and Point-In-Time @@ -1965,26 +1696,6 @@ type RecoveryTarget struct { Exclusive *bool `json:"exclusive,omitempty"` } -// GetBackupID gets the backup ID -func (target *RecoveryTarget) GetBackupID() string { - return target.BackupID -} - -// GetTargetTime gets the target time -func (target *RecoveryTarget) GetTargetTime() string { - return target.TargetTime -} - -// GetTargetLSN gets the target LSN -func (target *RecoveryTarget) GetTargetLSN() string { - return target.TargetLSN -} - -// GetTargetTLI gets the target timeline -func (target *RecoveryTarget) GetTargetTLI() string { - return target.TargetTLI -} - // StorageConfiguration is the configuration used to create and reconcile PVCs, // usable for WAL volumes, PGDATA volumes, or tablespaces type StorageConfiguration struct { @@ -2011,28 +1722,6 @@ type StorageConfiguration struct { PersistentVolumeClaimTemplate *corev1.PersistentVolumeClaimSpec `json:"pvcTemplate,omitempty"` } -// GetSizeOrNil returns the requests storage size -func (s *StorageConfiguration) GetSizeOrNil() *resource.Quantity { - if s == nil { - return nil - } - - if s.Size != "" { - quantity, err := resource.ParseQuantity(s.Size) - if err != nil { - return nil - } - - return &quantity - } - - if s.PersistentVolumeClaimTemplate != nil { - return s.PersistentVolumeClaimTemplate.Resources.Requests.Storage() - } - - return nil -} - // TablespaceConfiguration is the configuration of a tablespace, and includes // the storage specification for the tablespace type TablespaceConfiguration struct { @@ -2219,11 +1908,6 @@ type MonitoringConfiguration struct { PodMonitorRelabelConfigs []monitoringv1.RelabelConfig `json:"podMonitorRelabelings,omitempty"` } -// AreDefaultQueriesDisabled checks whether default monitoring queries should be disabled -func (m *MonitoringConfiguration) AreDefaultQueriesDisabled() bool { - return m != nil && m.DisableDefaultQueries != nil && *m.DisableDefaultQueries -} - // ClusterMonitoringTLSConfiguration is the type containing the TLS configuration // for the cluster's monitoring type ClusterMonitoringTLSConfiguration struct { @@ -2274,15 +1958,6 @@ type ExternalCluster struct { BarmanObjectStore *BarmanObjectStoreConfiguration `json:"barmanObjectStore,omitempty"` } -// GetServerName returns the server name, defaulting to the name of the external cluster or using the one specified -// in the BarmanObjectStore -func (in ExternalCluster) GetServerName() string { - if in.BarmanObjectStore != nil && in.BarmanObjectStore.ServerName != "" { - return in.BarmanObjectStore.ServerName - } - return in.Name -} - // EnsureOption represents whether we should enforce the presence or absence of // a Role in a PostgreSQL instance type EnsureOption string @@ -2371,14 +2046,6 @@ type PluginConfiguration struct { Parameters map[string]string `json:"parameters,omitempty"` } -// IsEnabled returns true when this plugin is enabled -func (config *PluginConfiguration) IsEnabled() bool { - if config.Enabled == nil { - return true - } - return *config.Enabled -} - // PluginStatus is the status of a loaded plugin type PluginStatus struct { // Name is the name of the plugin @@ -2500,22 +2167,6 @@ type RoleConfiguration struct { BypassRLS bool `json:"bypassrls,omitempty"` // Row-Level Security } -// GetRoleSecretsName gets the name of the secret which is used to store the role's password -func (roleConfiguration *RoleConfiguration) GetRoleSecretsName() string { - if roleConfiguration.PasswordSecret != nil { - return roleConfiguration.PasswordSecret.Name - } - return "" -} - -// GetRoleInherit return the inherit attribute of a roleConfiguration -func (roleConfiguration *RoleConfiguration) GetRoleInherit() bool { - if roleConfiguration.Inherit != nil { - return *roleConfiguration.Inherit - } - return true -} - // +genclient // +kubebuilder:object:root=true // +kubebuilder:storageversion @@ -2612,1073 +2263,6 @@ type ConfigMapResourceVersion struct { Metrics map[string]string `json:"metrics,omitempty"` } -// SetManagedRoleSecretVersion Add or update or delete the resource version of the managed role secret -func (secretResourceVersion *SecretsResourceVersion) SetManagedRoleSecretVersion(secret string, version *string) { - if secretResourceVersion.ManagedRoleSecretVersions == nil { - secretResourceVersion.ManagedRoleSecretVersions = make(map[string]string) - } - if version == nil { - delete(secretResourceVersion.ManagedRoleSecretVersions, secret) - } else { - secretResourceVersion.ManagedRoleSecretVersions[secret] = *version - } -} - -// SetExternalClusterSecretVersion Add or update or delete the resource version of the secret used in external clusters -func (secretResourceVersion *SecretsResourceVersion) SetExternalClusterSecretVersion( - secretName string, - version *string, -) { - if secretResourceVersion.ExternalClusterSecretVersions == nil { - secretResourceVersion.ExternalClusterSecretVersions = make(map[string]string) - } - - if version == nil { - delete(secretResourceVersion.ExternalClusterSecretVersions, secretName) - return - } - - secretResourceVersion.ExternalClusterSecretVersions[secretName] = *version -} - -// SetInContext records the cluster in the given context -func (cluster *Cluster) SetInContext(ctx context.Context) context.Context { - return context.WithValue(ctx, utils.ContextKeyCluster, cluster) -} - -// GetImageName get the name of the image that should be used -// to create the pods -func (cluster *Cluster) GetImageName() string { - // If the image is specified in the status, use that one - // It should be there since the first reconciliation - if len(cluster.Status.Image) > 0 { - return cluster.Status.Image - } - - // Fallback to the information we have in the spec - if len(cluster.Spec.ImageName) > 0 { - return cluster.Spec.ImageName - } - - // TODO: check: does a scenario exists in which we do have an imageCatalog - // and no status.image? In that case this should probably error out, not - // returning the default image name. - return configuration.Current.PostgresImageName -} - -// GetPostgresqlVersion gets the PostgreSQL image version detecting it from the -// image name or from the ImageCatalogRef. -// Example: -// -// ghcr.io/cloudnative-pg/postgresql:14.0 corresponds to version 140000 -// ghcr.io/cloudnative-pg/postgresql:13.2 corresponds to version 130002 -// ghcr.io/cloudnative-pg/postgresql:9.6.3 corresponds to version 90603 -func (cluster *Cluster) GetPostgresqlVersion() (int, error) { - if cluster.Spec.ImageCatalogRef != nil { - return postgres.GetPostgresVersionFromTag(strconv.Itoa(cluster.Spec.ImageCatalogRef.Major)) - } - - image := cluster.GetImageName() - tag := utils.GetImageTag(image) - return postgres.GetPostgresVersionFromTag(tag) -} - -// GetPostgresqlMajorVersion gets the PostgreSQL image major version used in the Cluster -func (cluster *Cluster) GetPostgresqlMajorVersion() (int, error) { - version, err := cluster.GetPostgresqlVersion() - if err != nil { - return 0, err - } - return postgres.GetPostgresMajorVersion(version), nil -} - -// GetImagePullSecret get the name of the pull secret to use -// to download the PostgreSQL image -func (cluster *Cluster) GetImagePullSecret() string { - return cluster.Name + ClusterSecretSuffix -} - -// GetSuperuserSecretName get the secret name of the PostgreSQL superuser -func (cluster *Cluster) GetSuperuserSecretName() string { - if cluster.Spec.SuperuserSecret != nil && - cluster.Spec.SuperuserSecret.Name != "" { - return cluster.Spec.SuperuserSecret.Name - } - - return fmt.Sprintf("%v%v", cluster.Name, SuperUserSecretSuffix) -} - -// GetEnableLDAPAuth return true if bind or bind+search method are -// configured in the cluster configuration -func (cluster *Cluster) GetEnableLDAPAuth() bool { - if cluster.Spec.PostgresConfiguration.LDAP != nil && - (cluster.Spec.PostgresConfiguration.LDAP.BindAsAuth != nil || - cluster.Spec.PostgresConfiguration.LDAP.BindSearchAuth != nil) { - return true - } - return false -} - -// GetLDAPSecretName gets the secret name containing the LDAP password -func (cluster *Cluster) GetLDAPSecretName() string { - if cluster.Spec.PostgresConfiguration.LDAP != nil && - cluster.Spec.PostgresConfiguration.LDAP.BindSearchAuth != nil && - cluster.Spec.PostgresConfiguration.LDAP.BindSearchAuth.BindPassword != nil { - return cluster.Spec.PostgresConfiguration.LDAP.BindSearchAuth.BindPassword.Name - } - return "" -} - -// ContainsManagedRolesConfiguration returns true iff there are managed roles configured -func (cluster *Cluster) ContainsManagedRolesConfiguration() bool { - return cluster.Spec.Managed != nil && len(cluster.Spec.Managed.Roles) > 0 -} - -// GetExternalClusterSecrets returns the secrets used by external Clusters -func (cluster *Cluster) GetExternalClusterSecrets() *stringset.Data { - secrets := stringset.New() - - if cluster.Spec.ExternalClusters != nil { - for _, externalCluster := range cluster.Spec.ExternalClusters { - if externalCluster.Password != nil { - secrets.Put(externalCluster.Password.Name) - } - if externalCluster.SSLKey != nil { - secrets.Put(externalCluster.SSLKey.Name) - } - if externalCluster.SSLCert != nil { - secrets.Put(externalCluster.SSLCert.Name) - } - if externalCluster.SSLRootCert != nil { - secrets.Put(externalCluster.SSLRootCert.Name) - } - } - } - return secrets -} - -// UsesSecretInManagedRoles checks if the given secret name is used in a managed role -func (cluster *Cluster) UsesSecretInManagedRoles(secretName string) bool { - if !cluster.ContainsManagedRolesConfiguration() { - return false - } - for _, role := range cluster.Spec.Managed.Roles { - if role.PasswordSecret != nil && role.PasswordSecret.Name == secretName { - return true - } - } - return false -} - -// GetApplicationSecretName get the name of the application secret for any bootstrap type -func (cluster *Cluster) GetApplicationSecretName() string { - bootstrap := cluster.Spec.Bootstrap - if bootstrap == nil { - return fmt.Sprintf("%v%v", cluster.Name, ApplicationUserSecretSuffix) - } - recovery := bootstrap.Recovery - if recovery != nil && recovery.Secret != nil && recovery.Secret.Name != "" { - return recovery.Secret.Name - } - - pgBaseBackup := bootstrap.PgBaseBackup - if pgBaseBackup != nil && pgBaseBackup.Secret != nil && pgBaseBackup.Secret.Name != "" { - return pgBaseBackup.Secret.Name - } - - initDB := bootstrap.InitDB - if initDB != nil && initDB.Secret != nil && initDB.Secret.Name != "" { - return initDB.Secret.Name - } - - return fmt.Sprintf("%v%v", cluster.Name, ApplicationUserSecretSuffix) -} - -// GetApplicationDatabaseName get the name of the application database for a specific bootstrap -func (cluster *Cluster) GetApplicationDatabaseName() string { - bootstrap := cluster.Spec.Bootstrap - if bootstrap == nil { - return "" - } - - if bootstrap.Recovery != nil && bootstrap.Recovery.Database != "" { - return bootstrap.Recovery.Database - } - - if bootstrap.PgBaseBackup != nil && bootstrap.PgBaseBackup.Database != "" { - return bootstrap.PgBaseBackup.Database - } - - if bootstrap.InitDB != nil && bootstrap.InitDB.Database != "" { - return bootstrap.InitDB.Database - } - - return "" -} - -// GetApplicationDatabaseOwner get the owner user of the application database for a specific bootstrap -func (cluster *Cluster) GetApplicationDatabaseOwner() string { - bootstrap := cluster.Spec.Bootstrap - if bootstrap == nil { - return "" - } - - if bootstrap.Recovery != nil && bootstrap.Recovery.Owner != "" { - return bootstrap.Recovery.Owner - } - - if bootstrap.PgBaseBackup != nil && bootstrap.PgBaseBackup.Owner != "" { - return bootstrap.PgBaseBackup.Owner - } - - if bootstrap.InitDB != nil && bootstrap.InitDB.Owner != "" { - return bootstrap.InitDB.Owner - } - - return "" -} - -// GetServerCASecretName get the name of the secret containing the CA -// of the cluster -func (cluster *Cluster) GetServerCASecretName() string { - if cluster.Spec.Certificates != nil && cluster.Spec.Certificates.ServerCASecret != "" { - return cluster.Spec.Certificates.ServerCASecret - } - return fmt.Sprintf("%v%v", cluster.Name, DefaultServerCaSecretSuffix) -} - -// GetServerTLSSecretName get the name of the secret containing the -// certificate that is used for the PostgreSQL servers -func (cluster *Cluster) GetServerTLSSecretName() string { - if cluster.Spec.Certificates != nil && cluster.Spec.Certificates.ServerTLSSecret != "" { - return cluster.Spec.Certificates.ServerTLSSecret - } - return fmt.Sprintf("%v%v", cluster.Name, ServerSecretSuffix) -} - -// GetClientCASecretName get the name of the secret containing the CA -// of the cluster -func (cluster *Cluster) GetClientCASecretName() string { - if cluster.Spec.Certificates != nil && cluster.Spec.Certificates.ClientCASecret != "" { - return cluster.Spec.Certificates.ClientCASecret - } - return fmt.Sprintf("%v%v", cluster.Name, ClientCaSecretSuffix) -} - -// GetFixedInheritedAnnotations gets the annotations that should be -// inherited by all resources according the cluster spec -func (cluster *Cluster) GetFixedInheritedAnnotations() map[string]string { - if cluster.Spec.InheritedMetadata == nil || cluster.Spec.InheritedMetadata.Annotations == nil { - return nil - } - return cluster.Spec.InheritedMetadata.Annotations -} - -// GetFixedInheritedLabels gets the labels that should be -// inherited by all resources according the cluster spec -func (cluster *Cluster) GetFixedInheritedLabels() map[string]string { - if cluster.Spec.InheritedMetadata == nil || cluster.Spec.InheritedMetadata.Labels == nil { - return nil - } - return cluster.Spec.InheritedMetadata.Labels -} - -// GetReplicationSecretName get the name of the secret for the replication user -func (cluster *Cluster) GetReplicationSecretName() string { - if cluster.Spec.Certificates != nil && cluster.Spec.Certificates.ReplicationTLSSecret != "" { - return cluster.Spec.Certificates.ReplicationTLSSecret - } - return fmt.Sprintf("%v%v", cluster.Name, ReplicationSecretSuffix) -} - -// GetServiceAnyName return the name of the service that is used as DNS -// domain for all the nodes, even if they are not ready -func (cluster *Cluster) GetServiceAnyName() string { - return fmt.Sprintf("%v%v", cluster.Name, ServiceAnySuffix) -} - -// GetServiceReadName return the default name of the service that is used for -// read transactions (including the primary) -func (cluster *Cluster) GetServiceReadName() string { - return fmt.Sprintf("%v%v", cluster.Name, ServiceReadSuffix) -} - -// GetServiceReadOnlyName return the default name of the service that is used for -// read-only transactions (excluding the primary) -func (cluster *Cluster) GetServiceReadOnlyName() string { - return fmt.Sprintf("%v%v", cluster.Name, ServiceReadOnlySuffix) -} - -// GetServiceReadWriteName return the default name of the service that is used for -// read-write transactions -func (cluster *Cluster) GetServiceReadWriteName() string { - return fmt.Sprintf("%v%v", cluster.Name, ServiceReadWriteSuffix) -} - -// GetMaxStartDelay get the amount of time of startDelay config option -func (cluster *Cluster) GetMaxStartDelay() int32 { - if cluster.Spec.MaxStartDelay > 0 { - return cluster.Spec.MaxStartDelay - } - return DefaultStartupDelay -} - -// GetMaxStopDelay get the amount of time PostgreSQL has to stop -func (cluster *Cluster) GetMaxStopDelay() int32 { - if cluster.Spec.MaxStopDelay > 0 { - return cluster.Spec.MaxStopDelay - } - return 1800 -} - -// GetSmartShutdownTimeout is used to ensure that smart shutdown timeout is a positive integer -func (cluster *Cluster) GetSmartShutdownTimeout() int32 { - if cluster.Spec.SmartShutdownTimeout != nil { - return *cluster.Spec.SmartShutdownTimeout - } - return 180 -} - -// GetRestartTimeout is used to have a timeout for operations that involve -// a restart of a PostgreSQL instance -func (cluster *Cluster) GetRestartTimeout() int32 { - return cluster.GetMaxStopDelay() + cluster.GetMaxStartDelay() -} - -// GetMaxSwitchoverDelay get the amount of time PostgreSQL has to stop before switchover -func (cluster *Cluster) GetMaxSwitchoverDelay() int32 { - if cluster.Spec.MaxSwitchoverDelay > 0 { - return cluster.Spec.MaxSwitchoverDelay - } - return DefaultMaxSwitchoverDelay -} - -// GetPrimaryUpdateStrategy get the cluster primary update strategy, -// defaulting to unsupervised -func (cluster *Cluster) GetPrimaryUpdateStrategy() PrimaryUpdateStrategy { - strategy := cluster.Spec.PrimaryUpdateStrategy - if strategy == "" { - return PrimaryUpdateStrategyUnsupervised - } - - return strategy -} - -// GetPrimaryUpdateMethod get the cluster primary update method, -// defaulting to restart -func (cluster *Cluster) GetPrimaryUpdateMethod() PrimaryUpdateMethod { - strategy := cluster.Spec.PrimaryUpdateMethod - if strategy == "" { - return PrimaryUpdateMethodRestart - } - - return strategy -} - -// GetEnablePDB get the cluster EnablePDB value, defaults to true -func (cluster *Cluster) GetEnablePDB() bool { - if cluster.Spec.EnablePDB == nil { - return true - } - - return *cluster.Spec.EnablePDB -} - -// IsNodeMaintenanceWindowInProgress check if the upgrade mode is active or not -func (cluster *Cluster) IsNodeMaintenanceWindowInProgress() bool { - return cluster.Spec.NodeMaintenanceWindow != nil && cluster.Spec.NodeMaintenanceWindow.InProgress -} - -// GetPgCtlTimeoutForPromotion returns the timeout that should be waited for an instance to be promoted -// to primary. As default, DefaultPgCtlTimeoutForPromotion is big enough to simulate an infinite timeout -func (cluster *Cluster) GetPgCtlTimeoutForPromotion() int32 { - timeout := cluster.Spec.PostgresConfiguration.PgCtlTimeoutForPromotion - if timeout == 0 { - return DefaultPgCtlTimeoutForPromotion - } - return timeout -} - -// IsReusePVCEnabled check if in a maintenance window we should reuse PVCs -func (cluster *Cluster) IsReusePVCEnabled() bool { - reusePVC := true - if cluster.Spec.NodeMaintenanceWindow != nil && cluster.Spec.NodeMaintenanceWindow.ReusePVC != nil { - reusePVC = *cluster.Spec.NodeMaintenanceWindow.ReusePVC - } - return reusePVC -} - -// IsInstanceFenced check if in a given instance should be fenced -func (cluster *Cluster) IsInstanceFenced(instance string) bool { - fencedInstances, err := utils.GetFencedInstances(cluster.Annotations) - if err != nil { - return false - } - - if fencedInstances.Has(utils.FenceAllInstances) { - return true - } - return fencedInstances.Has(instance) -} - -// ShouldResizeInUseVolumes is true when we should resize PVC we already -// created -func (cluster *Cluster) ShouldResizeInUseVolumes() bool { - if cluster.Spec.StorageConfiguration.ResizeInUseVolumes == nil { - return true - } - - return *cluster.Spec.StorageConfiguration.ResizeInUseVolumes -} - -// ShouldCreateApplicationSecret returns true if for this cluster, -// during the bootstrap phase, we need to create a secret to store application credentials -func (cluster *Cluster) ShouldCreateApplicationSecret() bool { - return cluster.ShouldInitDBCreateApplicationSecret() || - cluster.ShouldPgBaseBackupCreateApplicationSecret() || - cluster.ShouldRecoveryCreateApplicationSecret() -} - -// ShouldInitDBCreateApplicationSecret returns true if for this cluster, -// during the bootstrap phase using initDB, we need to create an new application secret -func (cluster *Cluster) ShouldInitDBCreateApplicationSecret() bool { - return cluster.ShouldInitDBCreateApplicationDatabase() && - (cluster.Spec.Bootstrap.InitDB.Secret == nil || - cluster.Spec.Bootstrap.InitDB.Secret.Name == "") -} - -// ShouldPgBaseBackupCreateApplicationSecret returns true if for this cluster, -// during the bootstrap phase using pg_basebackup, we need to create an application secret -func (cluster *Cluster) ShouldPgBaseBackupCreateApplicationSecret() bool { - return cluster.ShouldPgBaseBackupCreateApplicationDatabase() && - (cluster.Spec.Bootstrap.PgBaseBackup.Secret == nil || - cluster.Spec.Bootstrap.PgBaseBackup.Secret.Name == "") -} - -// ShouldRecoveryCreateApplicationSecret returns true if for this cluster, -// during the bootstrap phase using recovery, we need to create an application secret -func (cluster *Cluster) ShouldRecoveryCreateApplicationSecret() bool { - return cluster.ShouldRecoveryCreateApplicationDatabase() && - (cluster.Spec.Bootstrap.Recovery.Secret == nil || - cluster.Spec.Bootstrap.Recovery.Secret.Name == "") -} - -// ShouldCreateApplicationDatabase returns true if for this cluster, -// during the bootstrap phase, we need to create an application database -func (cluster *Cluster) ShouldCreateApplicationDatabase() bool { - return cluster.ShouldInitDBCreateApplicationDatabase() || - cluster.ShouldRecoveryCreateApplicationDatabase() || - cluster.ShouldPgBaseBackupCreateApplicationDatabase() -} - -// ShouldInitDBRunPostInitApplicationSQLRefs returns true if for this cluster, -// during the bootstrap phase using initDB, we need to run post init SQL files -// for the application database from provided references. -func (cluster *Cluster) ShouldInitDBRunPostInitApplicationSQLRefs() bool { - if cluster.Spec.Bootstrap == nil { - return false - } - - if cluster.Spec.Bootstrap.InitDB == nil { - return false - } - - return cluster.Spec.Bootstrap.InitDB.PostInitApplicationSQLRefs.HasElements() -} - -// ShouldInitDBRunPostInitTemplateSQLRefs returns true if for this cluster, -// during the bootstrap phase using initDB, we need to run post init SQL files -// for the `template1` database from provided references. -func (cluster *Cluster) ShouldInitDBRunPostInitTemplateSQLRefs() bool { - if cluster.Spec.Bootstrap == nil { - return false - } - - if cluster.Spec.Bootstrap.InitDB == nil { - return false - } - - return cluster.Spec.Bootstrap.InitDB.PostInitTemplateSQLRefs.HasElements() -} - -// ShouldInitDBRunPostInitSQLRefs returns true if for this cluster, -// during the bootstrap phase using initDB, we need to run post init SQL files -// for the `postgres` database from provided references. -func (cluster *Cluster) ShouldInitDBRunPostInitSQLRefs() bool { - if cluster.Spec.Bootstrap == nil { - return false - } - - if cluster.Spec.Bootstrap.InitDB == nil { - return false - } - - return cluster.Spec.Bootstrap.InitDB.PostInitSQLRefs.HasElements() -} - -// ShouldInitDBCreateApplicationDatabase returns true if the application database needs to be created during initdb -// job -func (cluster *Cluster) ShouldInitDBCreateApplicationDatabase() bool { - if cluster.Spec.Bootstrap == nil { - return false - } - - if cluster.Spec.Bootstrap.InitDB == nil { - return false - } - - initDBParameters := cluster.Spec.Bootstrap.InitDB - return initDBParameters.Owner != "" && initDBParameters.Database != "" -} - -// ShouldPgBaseBackupCreateApplicationDatabase returns true if the application database needs to be created during the -// pg_basebackup job -func (cluster *Cluster) ShouldPgBaseBackupCreateApplicationDatabase() bool { - // we skip creating the application database if cluster is a replica - if cluster.IsReplica() { - return false - } - if cluster.Spec.Bootstrap == nil { - return false - } - - if cluster.Spec.Bootstrap.PgBaseBackup == nil { - return false - } - - pgBaseBackupParameters := cluster.Spec.Bootstrap.PgBaseBackup - return pgBaseBackupParameters.Owner != "" && pgBaseBackupParameters.Database != "" -} - -// ShouldRecoveryCreateApplicationDatabase returns true if the application database needs to be created during the -// recovery job -func (cluster *Cluster) ShouldRecoveryCreateApplicationDatabase() bool { - // we skip creating the application database if cluster is a replica - if cluster.IsReplica() { - return false - } - - if cluster.Spec.Bootstrap == nil { - return false - } - - if cluster.Spec.Bootstrap.Recovery == nil { - return false - } - - recoveryParameters := cluster.Spec.Bootstrap.Recovery - return recoveryParameters.Owner != "" && recoveryParameters.Database != "" -} - -// ShouldCreateProjectedVolume returns whether we should create the projected all in one volume -func (cluster *Cluster) ShouldCreateProjectedVolume() bool { - return cluster.Spec.ProjectedVolumeTemplate != nil -} - -// ShouldCreateWalArchiveVolume returns whether we should create the wal archive volume -func (cluster *Cluster) ShouldCreateWalArchiveVolume() bool { - return cluster.Spec.WalStorage != nil -} - -// ShouldPromoteFromReplicaCluster returns true if the cluster should promote -func (cluster *Cluster) ShouldPromoteFromReplicaCluster() bool { - // If there's no replica cluster configuration there's no - // promotion token too, so we don't need to promote. - if cluster.Spec.ReplicaCluster == nil { - return false - } - - // If we don't have a promotion token, we don't need to promote - if len(cluster.Spec.ReplicaCluster.PromotionToken) == 0 { - return false - } - - // If the current token was already used, there's no need to - // promote - if cluster.Spec.ReplicaCluster.PromotionToken == cluster.Status.LastPromotionToken { - return false - } - return true -} - -// ContainsTablespaces returns true if for this cluster, we need to create tablespaces -func (cluster *Cluster) ContainsTablespaces() bool { - return len(cluster.Spec.Tablespaces) != 0 -} - -// GetPostgresUID returns the UID that is being used for the "postgres" -// user -func (cluster Cluster) GetPostgresUID() int64 { - if cluster.Spec.PostgresUID == 0 { - return defaultPostgresUID - } - return cluster.Spec.PostgresUID -} - -// GetPostgresGID returns the GID that is being used for the "postgres" -// user -func (cluster Cluster) GetPostgresGID() int64 { - if cluster.Spec.PostgresGID == 0 { - return defaultPostgresGID - } - return cluster.Spec.PostgresGID -} - -// ExternalCluster gets the external server with a known name, returning -// true if the server was found and false otherwise -func (cluster Cluster) ExternalCluster(name string) (ExternalCluster, bool) { - for _, server := range cluster.Spec.ExternalClusters { - if server.Name == name { - return server, true - } - } - - return ExternalCluster{}, false -} - -// IsReplica checks if this is a replica cluster or not -func (cluster Cluster) IsReplica() bool { - // Before introducing the "primary" field, the - // "enabled" parameter was declared as a "boolean" - // and was not declared "omitempty". - // - // Legacy replica clusters will have the "replica" stanza - // and the "enabled" field set explicitly to true. - // - // The following code is designed to not change the - // previous semantics. - r := cluster.Spec.ReplicaCluster - if r == nil { - return false - } - - if r.Enabled != nil { - return *r.Enabled - } - - clusterName := r.Self - if len(clusterName) == 0 { - clusterName = cluster.Name - } - - return clusterName != r.Primary -} - -var slotNameNegativeRegex = regexp.MustCompile("[^a-z0-9_]+") - -// GetSlotNameFromInstanceName returns the slot name, given the instance name. -// It returns an empty string if High Availability Replication Slots are disabled -func (cluster Cluster) GetSlotNameFromInstanceName(instanceName string) string { - if cluster.Spec.ReplicationSlots == nil || - cluster.Spec.ReplicationSlots.HighAvailability == nil || - !cluster.Spec.ReplicationSlots.HighAvailability.GetEnabled() { - return "" - } - - return cluster.Spec.ReplicationSlots.HighAvailability.GetSlotNameFromInstanceName(instanceName) -} - -// GetBarmanEndpointCAForReplicaCluster checks if this is a replica cluster which needs barman endpoint CA -func (cluster Cluster) GetBarmanEndpointCAForReplicaCluster() *SecretKeySelector { - if !cluster.IsReplica() { - return nil - } - sourceName := cluster.Spec.ReplicaCluster.Source - externalCluster, found := cluster.ExternalCluster(sourceName) - if !found || externalCluster.BarmanObjectStore == nil { - return nil - } - return externalCluster.BarmanObjectStore.EndpointCA -} - -// GetClusterAltDNSNames returns all the names needed to build a valid Server Certificate -func (cluster *Cluster) GetClusterAltDNSNames() []string { - buildServiceNames := func(serviceName string, enabled bool) []string { - if !enabled { - return nil - } - return []string{ - serviceName, - fmt.Sprintf("%v.%v", serviceName, cluster.Namespace), - fmt.Sprintf("%v.%v.svc", serviceName, cluster.Namespace), - fmt.Sprintf("%v.%v.svc.cluster.local", serviceName, cluster.Namespace), - } - } - altDNSNames := slices.Concat( - buildServiceNames(cluster.GetServiceReadWriteName(), cluster.IsReadWriteServiceEnabled()), - buildServiceNames(cluster.GetServiceReadName(), cluster.IsReadServiceEnabled()), - buildServiceNames(cluster.GetServiceReadOnlyName(), cluster.IsReadOnlyServiceEnabled()), - ) - - if cluster.Spec.Managed != nil && cluster.Spec.Managed.Services != nil { - for _, service := range cluster.Spec.Managed.Services.Additional { - altDNSNames = append(altDNSNames, buildServiceNames(service.ServiceTemplate.ObjectMeta.Name, true)...) - } - } - - return append(altDNSNames, cluster.Spec.Certificates.getServerAltDNSNames()...) -} - -// UsesSecret checks whether a given secret is used by a Cluster. -// -// This function is also used to discover the set of clusters that -// should be reconciled when a certain secret changes. -func (cluster *Cluster) UsesSecret(secret string) bool { - if _, ok := cluster.Status.SecretsResourceVersion.Metrics[secret]; ok { - return true - } - certificates := cluster.Status.Certificates - switch secret { - case cluster.GetSuperuserSecretName(), - cluster.GetApplicationSecretName(), - certificates.ClientCASecret, - certificates.ReplicationTLSSecret, - certificates.ServerCASecret, - certificates.ServerTLSSecret: - return true - } - - if cluster.UsesSecretInManagedRoles(secret) { - return true - } - - if cluster.Spec.Backup.IsBarmanEndpointCASet() && cluster.Spec.Backup.BarmanObjectStore.EndpointCA.Name == secret { - return true - } - - if endpointCA := cluster.GetBarmanEndpointCAForReplicaCluster(); endpointCA != nil && endpointCA.Name == secret { - return true - } - - if cluster.Status.PoolerIntegrations != nil { - for _, pgBouncerSecretName := range cluster.Status.PoolerIntegrations.PgBouncerIntegration.Secrets { - if pgBouncerSecretName == secret { - return true - } - } - } - - // watch the secrets defined in external clusters - return cluster.GetExternalClusterSecrets().Has(secret) -} - -// UsesConfigMap checks whether a given secret is used by a Cluster -func (cluster *Cluster) UsesConfigMap(config string) (ok bool) { - if _, ok := cluster.Status.ConfigMapResourceVersion.Metrics[config]; ok { - return true - } - return false -} - -// IsPodMonitorEnabled checks if the PodMonitor object needs to be created -func (cluster *Cluster) IsPodMonitorEnabled() bool { - if cluster.Spec.Monitoring != nil { - return cluster.Spec.Monitoring.EnablePodMonitor - } - - return false -} - -// IsMetricsTLSEnabled checks if the metrics endpoint should use TLS -func (cluster *Cluster) IsMetricsTLSEnabled() bool { - if cluster.Spec.Monitoring != nil && cluster.Spec.Monitoring.TLSConfig != nil { - return cluster.Spec.Monitoring.TLSConfig.Enabled - } - - return false -} - -// GetEnableSuperuserAccess returns if the superuser access is enabled or not -func (cluster *Cluster) GetEnableSuperuserAccess() bool { - if cluster.Spec.EnableSuperuserAccess != nil { - return *cluster.Spec.EnableSuperuserAccess - } - - return false -} - -// LogTimestampsWithMessage prints useful information about timestamps in stdout -func (cluster *Cluster) LogTimestampsWithMessage(ctx context.Context, logMessage string) { - contextLogger := log.FromContext(ctx) - - currentTimestamp := utils.GetCurrentTimestamp() - keysAndValues := []interface{}{ - "phase", cluster.Status.Phase, - "currentTimestamp", currentTimestamp, - "targetPrimaryTimestamp", cluster.Status.TargetPrimaryTimestamp, - "currentPrimaryTimestamp", cluster.Status.CurrentPrimaryTimestamp, - } - - var errs []string - - // Elapsed time since the last request of promotion (TargetPrimaryTimestamp) - if diff, err := utils.DifferenceBetweenTimestamps( - currentTimestamp, - cluster.Status.TargetPrimaryTimestamp, - ); err == nil { - keysAndValues = append( - keysAndValues, - "msPassedSinceTargetPrimaryTimestamp", - diff.Milliseconds(), - ) - } else { - errs = append(errs, err.Error()) - } - - // Elapsed time since the last promotion (CurrentPrimaryTimestamp) - if currentPrimaryDifference, err := utils.DifferenceBetweenTimestamps( - currentTimestamp, - cluster.Status.CurrentPrimaryTimestamp, - ); err == nil { - keysAndValues = append( - keysAndValues, - "msPassedSinceCurrentPrimaryTimestamp", - currentPrimaryDifference.Milliseconds(), - ) - } else { - errs = append(errs, err.Error()) - } - - // Difference between the last promotion and the last request of promotion - // When positive, it is the amount of time required in the last promotion - // of a standby to a primary. If negative, it means we have a failover/switchover - // in progress, and the value represents the last measured uptime of the primary. - if currentPrimaryTargetDifference, err := utils.DifferenceBetweenTimestamps( - cluster.Status.CurrentPrimaryTimestamp, - cluster.Status.TargetPrimaryTimestamp, - ); err == nil { - keysAndValues = append( - keysAndValues, - "msDifferenceBetweenCurrentAndTargetPrimary", - currentPrimaryTargetDifference.Milliseconds(), - ) - } else { - errs = append(errs, err.Error()) - } - - if len(errs) > 0 { - keysAndValues = append(keysAndValues, "timestampParsingErrors", errs) - } - - contextLogger.Info(logMessage, keysAndValues...) -} - -// SetInheritedDataAndOwnership sets the cluster as owner of the passed object and then -// sets all the needed annotations and labels -func (cluster *Cluster) SetInheritedDataAndOwnership(obj *metav1.ObjectMeta) { - cluster.SetInheritedData(obj) - utils.SetAsOwnedBy(obj, cluster.ObjectMeta, cluster.TypeMeta) -} - -// SetInheritedData sets all the needed annotations and labels -func (cluster *Cluster) SetInheritedData(obj *metav1.ObjectMeta) { - utils.InheritAnnotations(obj, cluster.Annotations, cluster.GetFixedInheritedAnnotations(), configuration.Current) - utils.InheritLabels(obj, cluster.Labels, cluster.GetFixedInheritedLabels(), configuration.Current) - utils.LabelClusterName(obj, cluster.GetName()) - utils.SetOperatorVersion(obj, versions.Version) -} - -// ShouldForceLegacyBackup if present takes a backup without passing the name argument even on barman version 3.3.0+. -// This is needed to test both backup system in the E2E suite -func (cluster *Cluster) ShouldForceLegacyBackup() bool { - return cluster.Annotations[utils.LegacyBackupAnnotationName] == "true" -} - -// GetSeccompProfile return the proper SeccompProfile set in the cluster for Pods and Containers -func (cluster *Cluster) GetSeccompProfile() *corev1.SeccompProfile { - if cluster.Spec.SeccompProfile != nil { - return cluster.Spec.SeccompProfile - } - - return &corev1.SeccompProfile{ - Type: corev1.SeccompProfileTypeRuntimeDefault, - } -} - -// GetCoredumpFilter get the coredump filter value from the cluster annotation -func (cluster *Cluster) GetCoredumpFilter() string { - value, ok := cluster.Annotations[utils.CoredumpFilter] - if ok { - return value - } - return system.DefaultCoredumpFilter -} - -// IsInplaceRestartPhase returns true if the cluster is in a phase that handles the Inplace restart -func (cluster *Cluster) IsInplaceRestartPhase() bool { - return cluster.Status.Phase == PhaseInplacePrimaryRestart || - cluster.Status.Phase == PhaseInplaceDeletePrimaryRestart -} - -// GetTablespaceConfiguration returns the tablespaceConfiguration for the given name -// otherwise return nil -func (cluster *Cluster) GetTablespaceConfiguration(name string) *TablespaceConfiguration { - for _, tbsConfig := range cluster.Spec.Tablespaces { - if name == tbsConfig.Name { - return &tbsConfig - } - } - - return nil -} - -// GetServerCASecretObjectKey returns a types.NamespacedName pointing to the secret -func (cluster *Cluster) GetServerCASecretObjectKey() types.NamespacedName { - return types.NamespacedName{Namespace: cluster.Namespace, Name: cluster.GetServerCASecretName()} -} - -// IsBarmanBackupConfigured returns true if one of the possible backup destination -// is configured, false otherwise -func (backupConfiguration *BackupConfiguration) IsBarmanBackupConfigured() bool { - return backupConfiguration != nil && backupConfiguration.BarmanObjectStore != nil && - backupConfiguration.BarmanObjectStore.BarmanCredentials.ArePopulated() -} - -// IsBarmanEndpointCASet returns true if we have a CA bundle for the endpoint -// false otherwise -func (backupConfiguration *BackupConfiguration) IsBarmanEndpointCASet() bool { - return backupConfiguration != nil && - backupConfiguration.BarmanObjectStore != nil && - backupConfiguration.BarmanObjectStore.EndpointCA != nil && - backupConfiguration.BarmanObjectStore.EndpointCA.Name != "" && - backupConfiguration.BarmanObjectStore.EndpointCA.Key != "" -} - -// UpdateBackupTimes sets the firstRecoverabilityPoint and lastSuccessfulBackup -// for the provided method, as well as the overall firstRecoverabilityPoint and -// lastSuccessfulBackup for the cluster -func (cluster *Cluster) UpdateBackupTimes( - backupMethod BackupMethod, - firstRecoverabilityPoint *time.Time, - lastSuccessfulBackup *time.Time, -) { - type comparer func(a metav1.Time, b metav1.Time) bool - // tryGetMaxTime gets either the newest or oldest time from a set of backup times, - // depending on the comparer argument passed to it - tryGetMaxTime := func(m map[BackupMethod]metav1.Time, compare comparer) string { - var maximum metav1.Time - for _, ts := range m { - if maximum.IsZero() || compare(ts, maximum) { - maximum = ts - } - } - result := "" - if !maximum.IsZero() { - result = maximum.Format(time.RFC3339) - } - - return result - } - - setTime := func(backupTimes map[BackupMethod]metav1.Time, value *time.Time) map[BackupMethod]metav1.Time { - if value == nil { - delete(backupTimes, backupMethod) - return backupTimes - } - - if backupTimes == nil { - backupTimes = make(map[BackupMethod]metav1.Time) - } - - backupTimes[backupMethod] = metav1.NewTime(*value) - return backupTimes - } - - cluster.Status.FirstRecoverabilityPointByMethod = setTime(cluster.Status.FirstRecoverabilityPointByMethod, - firstRecoverabilityPoint) - cluster.Status.FirstRecoverabilityPoint = tryGetMaxTime( - cluster.Status.FirstRecoverabilityPointByMethod, - // we pass a comparer to get the first among the recoverability points - func(a metav1.Time, b metav1.Time) bool { - return a.Before(&b) - }) - - cluster.Status.LastSuccessfulBackupByMethod = setTime(cluster.Status.LastSuccessfulBackupByMethod, - lastSuccessfulBackup) - cluster.Status.LastSuccessfulBackup = tryGetMaxTime( - cluster.Status.LastSuccessfulBackupByMethod, - // we pass a comparer to get the last among the last backup times per method - func(a metav1.Time, b metav1.Time) bool { - return b.Before(&a) - }) -} - -// IsReadServiceEnabled checks if the read service is enabled for the cluster. -// It returns false if the read service is listed in the DisabledDefaultServices slice. -func (cluster *Cluster) IsReadServiceEnabled() bool { - if cluster.Spec.Managed == nil || cluster.Spec.Managed.Services == nil { - return true - } - - return !slices.Contains(cluster.Spec.Managed.Services.DisabledDefaultServices, ServiceSelectorTypeR) -} - -// IsReadWriteServiceEnabled checks if the read-write service is enabled for the cluster. -// It returns false if the read-write service is listed in the DisabledDefaultServices slice. -func (cluster *Cluster) IsReadWriteServiceEnabled() bool { - if cluster.Spec.Managed == nil || cluster.Spec.Managed.Services == nil { - return true - } - return !slices.Contains(cluster.Spec.Managed.Services.DisabledDefaultServices, ServiceSelectorTypeRW) -} - -// IsReadOnlyServiceEnabled checks if the read-only service is enabled for the cluster. -// It returns false if the read-only service is listed in the DisabledDefaultServices slice. -func (cluster *Cluster) IsReadOnlyServiceEnabled() bool { - if cluster.Spec.Managed == nil || cluster.Spec.Managed.Services == nil { - return true - } - - return !slices.Contains(cluster.Spec.Managed.Services.DisabledDefaultServices, ServiceSelectorTypeRO) -} - -// BuildPostgresOptions create the list of options that -// should be added to the PostgreSQL configuration to -// recover given a certain target -func (target *RecoveryTarget) BuildPostgresOptions() string { - result := "" - - if target == nil { - return result - } - - if target.TargetTLI != "" { - result += fmt.Sprintf( - "recovery_target_timeline = '%v'\n", - target.TargetTLI) - } - if target.TargetXID != "" { - result += fmt.Sprintf( - "recovery_target_xid = '%v'\n", - target.TargetXID) - } - if target.TargetName != "" { - result += fmt.Sprintf( - "recovery_target_name = '%v'\n", - target.TargetName) - } - if target.TargetLSN != "" { - result += fmt.Sprintf( - "recovery_target_lsn = '%v'\n", - target.TargetLSN) - } - if target.TargetTime != "" { - result += fmt.Sprintf( - "recovery_target_time = '%v'\n", - utils.ConvertToPostgresFormat(target.TargetTime)) - } - if target.TargetImmediate != nil && *target.TargetImmediate { - result += "recovery_target = immediate\n" - } - if target.Exclusive != nil && *target.Exclusive { - result += "recovery_target_inclusive = false\n" - } else { - result += "recovery_target_inclusive = true\n" - } - - return result -} - func init() { SchemeBuilder.Register(&Cluster{}, &ClusterList{}) } diff --git a/api/v1/clusterimagecatalog_funcs.go b/api/v1/clusterimagecatalog_funcs.go new file mode 100644 index 0000000000..a698a1ad8f --- /dev/null +++ b/api/v1/clusterimagecatalog_funcs.go @@ -0,0 +1,22 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// GetSpec returns the Spec of the ClusterImageCatalog +func (c *ClusterImageCatalog) GetSpec() *ImageCatalogSpec { + return &c.Spec +} diff --git a/api/v1/clusterimagecatalog_types.go b/api/v1/clusterimagecatalog_types.go index 6562c19890..850822fbec 100644 --- a/api/v1/clusterimagecatalog_types.go +++ b/api/v1/clusterimagecatalog_types.go @@ -45,11 +45,6 @@ type ClusterImageCatalogList struct { Items []ClusterImageCatalog `json:"items"` } -// GetSpec returns the Spec of the ClusterImageCatalog -func (c *ClusterImageCatalog) GetSpec() *ImageCatalogSpec { - return &c.Spec -} - func init() { SchemeBuilder.Register(&ClusterImageCatalog{}, &ClusterImageCatalogList{}) } diff --git a/api/v1/imagecatalog_funcs.go b/api/v1/imagecatalog_funcs.go index 22f3a80c90..1c4420f7b8 100644 --- a/api/v1/imagecatalog_funcs.go +++ b/api/v1/imagecatalog_funcs.go @@ -16,6 +16,11 @@ limitations under the License. package v1 +// GetSpec returns the Spec of the ImageCatalog +func (c *ImageCatalog) GetSpec() *ImageCatalogSpec { + return &c.Spec +} + // FindImageForMajor finds the correct image for the selected major version func (spec *ImageCatalogSpec) FindImageForMajor(major int) (string, bool) { for _, entry := range spec.Images { diff --git a/api/v1/imagecatalog_types.go b/api/v1/imagecatalog_types.go index 6f7a9d4d50..2d5d5c13d0 100644 --- a/api/v1/imagecatalog_types.go +++ b/api/v1/imagecatalog_types.go @@ -64,11 +64,6 @@ type ImageCatalogList struct { Items []ImageCatalog `json:"items"` } -// GetSpec returns the Spec of the ImageCatalog -func (c *ImageCatalog) GetSpec() *ImageCatalogSpec { - return &c.Spec -} - func init() { SchemeBuilder.Register(&ImageCatalog{}, &ImageCatalogList{}) } diff --git a/api/v1/pooler_funcs.go b/api/v1/pooler_funcs.go new file mode 100644 index 0000000000..b3af4d83e0 --- /dev/null +++ b/api/v1/pooler_funcs.go @@ -0,0 +1,57 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// IsPaused returns whether all database should be paused or not. +func (in PgBouncerSpec) IsPaused() bool { + return in.Paused != nil && *in.Paused +} + +// GetAuthQuerySecretName returns the specified AuthQuerySecret name for PgBouncer +// if provided or the default name otherwise. +func (in *Pooler) GetAuthQuerySecretName() string { + if in.Spec.PgBouncer != nil && in.Spec.PgBouncer.AuthQuerySecret != nil { + return in.Spec.PgBouncer.AuthQuerySecret.Name + } + + return in.Spec.Cluster.Name + DefaultPgBouncerPoolerSecretSuffix +} + +// GetAuthQuery returns the specified AuthQuery name for PgBouncer +// if provided or the default name otherwise. +func (in *Pooler) GetAuthQuery() string { + if in.Spec.PgBouncer.AuthQuery != "" { + return in.Spec.PgBouncer.AuthQuery + } + + return DefaultPgBouncerPoolerAuthQuery +} + +// IsAutomatedIntegration returns whether the Pooler integration with the +// Cluster is automated or not. +func (in *Pooler) IsAutomatedIntegration() bool { + if in.Spec.PgBouncer == nil { + return true + } + // If the user specified an AuthQuerySecret or an AuthQuery, the integration + // is not going to be handled by the operator. + if (in.Spec.PgBouncer.AuthQuerySecret != nil && in.Spec.PgBouncer.AuthQuerySecret.Name != "") || + in.Spec.PgBouncer.AuthQuery != "" { + return false + } + return true +} diff --git a/api/v1/pooler_types_test.go b/api/v1/pooler_funcs_test.go similarity index 100% rename from api/v1/pooler_types_test.go rename to api/v1/pooler_funcs_test.go diff --git a/api/v1/pooler_types.go b/api/v1/pooler_types.go index e366bf31a6..5fc3bdb9d1 100644 --- a/api/v1/pooler_types.go +++ b/api/v1/pooler_types.go @@ -184,11 +184,6 @@ type PgBouncerSpec struct { Paused *bool `json:"paused,omitempty"` } -// IsPaused returns whether all database should be paused or not. -func (in PgBouncerSpec) IsPaused() bool { - return in.Paused != nil && *in.Paused -} - // PoolerStatus defines the observed state of Pooler type PoolerStatus struct { // The resource version of the config object @@ -272,38 +267,3 @@ type PoolerList struct { func init() { SchemeBuilder.Register(&Pooler{}, &PoolerList{}) } - -// GetAuthQuerySecretName returns the specified AuthQuerySecret name for PgBouncer -// if provided or the default name otherwise. -func (in *Pooler) GetAuthQuerySecretName() string { - if in.Spec.PgBouncer != nil && in.Spec.PgBouncer.AuthQuerySecret != nil { - return in.Spec.PgBouncer.AuthQuerySecret.Name - } - - return in.Spec.Cluster.Name + DefaultPgBouncerPoolerSecretSuffix -} - -// GetAuthQuery returns the specified AuthQuery name for PgBouncer -// if provided or the default name otherwise. -func (in *Pooler) GetAuthQuery() string { - if in.Spec.PgBouncer.AuthQuery != "" { - return in.Spec.PgBouncer.AuthQuery - } - - return DefaultPgBouncerPoolerAuthQuery -} - -// IsAutomatedIntegration returns whether the Pooler integration with the -// Cluster is automated or not. -func (in *Pooler) IsAutomatedIntegration() bool { - if in.Spec.PgBouncer == nil { - return true - } - // If the user specified an AuthQuerySecret or an AuthQuery, the integration - // is not going to be handled by the operator. - if (in.Spec.PgBouncer.AuthQuerySecret != nil && in.Spec.PgBouncer.AuthQuerySecret.Name != "") || - in.Spec.PgBouncer.AuthQuery != "" { - return false - } - return true -} diff --git a/api/v1/scheduledbackup_funcs.go b/api/v1/scheduledbackup_funcs.go new file mode 100644 index 0000000000..6acf6c88c9 --- /dev/null +++ b/api/v1/scheduledbackup_funcs.go @@ -0,0 +1,82 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" +) + +// IsSuspended check if a scheduled backup has been suspended or not +func (scheduledBackup ScheduledBackup) IsSuspended() bool { + if scheduledBackup.Spec.Suspend == nil { + return false + } + + return *scheduledBackup.Spec.Suspend +} + +// IsImmediate check if a backup has to be issued immediately upon creation or not +func (scheduledBackup ScheduledBackup) IsImmediate() bool { + if scheduledBackup.Spec.Immediate == nil { + return false + } + + return *scheduledBackup.Spec.Immediate +} + +// GetName gets the scheduled backup name +func (scheduledBackup *ScheduledBackup) GetName() string { + return scheduledBackup.Name +} + +// GetNamespace gets the scheduled backup name +func (scheduledBackup *ScheduledBackup) GetNamespace() string { + return scheduledBackup.Namespace +} + +// GetSchedule get the cron-like schedule of this scheduled backup +func (scheduledBackup *ScheduledBackup) GetSchedule() string { + return scheduledBackup.Spec.Schedule +} + +// GetStatus gets the status that the caller may update +func (scheduledBackup *ScheduledBackup) GetStatus() *ScheduledBackupStatus { + return &scheduledBackup.Status +} + +// CreateBackup creates a backup from this scheduled backup +func (scheduledBackup *ScheduledBackup) CreateBackup(name string) *Backup { + backup := Backup{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: scheduledBackup.Namespace, + }, + Spec: BackupSpec{ + Cluster: scheduledBackup.Spec.Cluster, + Target: scheduledBackup.Spec.Target, + Method: scheduledBackup.Spec.Method, + Online: scheduledBackup.Spec.Online, + OnlineConfiguration: scheduledBackup.Spec.OnlineConfiguration, + PluginConfiguration: scheduledBackup.Spec.PluginConfiguration, + }, + } + utils.InheritAnnotations(&backup.ObjectMeta, scheduledBackup.Annotations, nil, configuration.Current) + return &backup +} diff --git a/api/v1/scheduledbackup_types_test.go b/api/v1/scheduledbackup_funcs_test.go similarity index 100% rename from api/v1/scheduledbackup_types_test.go rename to api/v1/scheduledbackup_funcs_test.go diff --git a/api/v1/scheduledbackup_types.go b/api/v1/scheduledbackup_types.go index e75ce2f2bc..1929db5d95 100644 --- a/api/v1/scheduledbackup_types.go +++ b/api/v1/scheduledbackup_types.go @@ -18,9 +18,6 @@ package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) // ScheduledBackupSpec defines the desired state of ScheduledBackup @@ -133,64 +130,6 @@ type ScheduledBackupList struct { Items []ScheduledBackup `json:"items"` } -// IsSuspended check if a scheduled backup has been suspended or not -func (scheduledBackup ScheduledBackup) IsSuspended() bool { - if scheduledBackup.Spec.Suspend == nil { - return false - } - - return *scheduledBackup.Spec.Suspend -} - -// IsImmediate check if a backup has to be issued immediately upon creation or not -func (scheduledBackup ScheduledBackup) IsImmediate() bool { - if scheduledBackup.Spec.Immediate == nil { - return false - } - - return *scheduledBackup.Spec.Immediate -} - -// GetName gets the scheduled backup name -func (scheduledBackup *ScheduledBackup) GetName() string { - return scheduledBackup.Name -} - -// GetNamespace gets the scheduled backup name -func (scheduledBackup *ScheduledBackup) GetNamespace() string { - return scheduledBackup.Namespace -} - -// GetSchedule get the cron-like schedule of this scheduled backup -func (scheduledBackup *ScheduledBackup) GetSchedule() string { - return scheduledBackup.Spec.Schedule -} - -// GetStatus gets the status that the caller may update -func (scheduledBackup *ScheduledBackup) GetStatus() *ScheduledBackupStatus { - return &scheduledBackup.Status -} - -// CreateBackup creates a backup from this scheduled backup -func (scheduledBackup *ScheduledBackup) CreateBackup(name string) *Backup { - backup := Backup{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: scheduledBackup.Namespace, - }, - Spec: BackupSpec{ - Cluster: scheduledBackup.Spec.Cluster, - Target: scheduledBackup.Spec.Target, - Method: scheduledBackup.Spec.Method, - Online: scheduledBackup.Spec.Online, - OnlineConfiguration: scheduledBackup.Spec.OnlineConfiguration, - PluginConfiguration: scheduledBackup.Spec.PluginConfiguration, - }, - } - utils.InheritAnnotations(&backup.ObjectMeta, scheduledBackup.Annotations, nil, configuration.Current) - return &backup -} - func init() { SchemeBuilder.Register(&ScheduledBackup{}, &ScheduledBackupList{}) } From a40da6f02679e69602abf4891fc5dac7dd1eccd6 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Thu, 26 Sep 2024 12:21:38 +0200 Subject: [PATCH 011/836] fix: use patch while setting the new primary instance (#5559) This patch removes an unnecessary update on the status sub-resource that could cause an error and a retry due to the change of the latest observed generated version. Signed-off-by: Armando Ruocco --- internal/controller/cluster_status.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/internal/controller/cluster_status.go b/internal/controller/cluster_status.go index f766fdd807..c039cc29a5 100644 --- a/internal/controller/cluster_status.go +++ b/internal/controller/cluster_status.go @@ -717,9 +717,10 @@ func (r *ClusterReconciler) setPrimaryInstance( cluster *apiv1.Cluster, podName string, ) error { + origCluster := cluster.DeepCopy() cluster.Status.TargetPrimary = podName cluster.Status.TargetPrimaryTimestamp = utils.GetCurrentTimestamp() - return r.Status().Update(ctx, cluster) + return r.Status().Patch(ctx, cluster, client.MergeFrom(origCluster)) } // RegisterPhase update phase in the status cluster with the From cf403a829d496a57dc46d04817b80c1ba381782c Mon Sep 17 00:00:00 2001 From: Jeremy Schneider Date: Fri, 27 Sep 2024 02:10:10 -0700 Subject: [PATCH 012/836] fix(docs): broken disk-full link (#5650) This link moved with the Postgres 17 release today Signed-off-by: Jeremy Schneider --- docs/src/instance_manager.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/instance_manager.md b/docs/src/instance_manager.md index d285838ce7..c8c924f4fe 100644 --- a/docs/src/instance_manager.md +++ b/docs/src/instance_manager.md @@ -102,7 +102,7 @@ Please refer to the ["Failover" section](failover.md) for details. ## Disk Full Failure Storage exhaustion is a well known issue for PostgreSQL clusters. -The [PostgreSQL documentation](https://www.postgresql.org/docs/current/disk-full.html) +The [PostgreSQL documentation](https://www.postgresql.org/docs/current/diskusage.html#DISK-FULL) highlights the possible failure scenarios and the importance of monitoring disk usage to prevent it from becoming full. From af6b367a0864e8927b8f3e08488831af07fdf3d6 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Fri, 27 Sep 2024 11:18:20 +0200 Subject: [PATCH 013/836] feat: spread Pod rollout (#5391) By default, all PostgreSQL clusters are rolled out simultaneously, which may lead to a spike in resource usage, especially when managing multiple clusters. This patch introduces two configuration options at the operator level that allow you to introduce delays between cluster roll-outs or even between instances within the same cluster, helping to distribute resource usage over time: - `CLUSTERS_ROLLOUT_DELAY`: Defines the number of seconds to wait between roll-outs of Pods of different PostgreSQL clusters (default: `0`). - `INSTANCES_ROLLOUT_DELAY`: Defines the number of seconds to wait between roll-outs of Pods of individual instances within the same PostgreSQL cluster (default: `0`). Closes: #5384 Signed-off-by: Leonardo Cecchi Signed-off-by: Armando Ruocco Signed-off-by: Gabriele Bartolini Signed-off-by: Gabriele Quaresima Co-authored-by: Armando Ruocco Co-authored-by: Gabriele Bartolini Co-authored-by: Gabriele Quaresima --- api/v1/cluster_types.go | 4 + docs/src/installation_upgrade.md | 30 +++- docs/src/operator_capability_levels.md | 30 ++-- docs/src/operator_conf.md | 36 ++-- internal/configuration/configuration.go | 25 +++ internal/configuration/configuration_test.go | 22 +++ internal/controller/cluster_controller.go | 22 +++ internal/controller/cluster_upgrade.go | 42 ++++- internal/controller/cluster_upgrade_test.go | 44 +++++ internal/controller/rollout/doc.go | 20 +++ internal/controller/rollout/rollout.go | 110 ++++++++++++ internal/controller/rollout/rollout_test.go | 164 ++++++++++++++++++ internal/controller/rollout/suite_test.go | 30 ++++ .../fixtures/configmap-support/configmap.yaml | 2 + 14 files changed, 546 insertions(+), 35 deletions(-) create mode 100644 internal/controller/rollout/doc.go create mode 100644 internal/controller/rollout/rollout.go create mode 100644 internal/controller/rollout/rollout_test.go create mode 100644 internal/controller/rollout/suite_test.go diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go index d259c48703..87171977b8 100644 --- a/api/v1/cluster_types.go +++ b/api/v1/cluster_types.go @@ -496,6 +496,10 @@ const ( // PhaseUpgrade upgrade in process PhaseUpgrade = "Upgrading cluster" + // PhaseUpgradeDelayed is set when a cluster need to be upgraded + // but the operation is being delayed by the operator configuration + PhaseUpgradeDelayed = "Cluster upgrade delayed" + // PhaseWaitingForUser set the status to wait for an action from the user PhaseWaitingForUser = "Waiting for user action" diff --git a/docs/src/installation_upgrade.md b/docs/src/installation_upgrade.md index c21dbb09db..e158ffeba1 100644 --- a/docs/src/installation_upgrade.md +++ b/docs/src/installation_upgrade.md @@ -152,13 +152,14 @@ by applying the manifest of the newer version for plain Kubernetes installations, or using the native package manager of the used distribution (please follow the instructions in the above sections). -The second step is automatically executed after having updated the controller, -by default triggering a rolling update of every deployed PostgreSQL instance to -use the new instance manager. The rolling update procedure culminates with a -switchover, which is controlled by the `primaryUpdateStrategy` option, by -default set to `unsupervised`. When set to `supervised`, users need to complete -the rolling update by manually promoting a new instance through the `cnpg` -plugin for `kubectl`. + +The second step is automatically triggered after updating the controller. By +default, this initiates a rolling update of every deployed PostgreSQL cluster, +upgrading one instance at a time to use the new instance manager. The rolling +update concludes with a switchover, which is governed by the +`primaryUpdateStrategy` option. The default value, `unsupervised`, completes +the switchover automatically. If set to `supervised`, the user must manually +promote the new primary instance using the `cnpg` plugin for `kubectl`. !!! Seealso "Rolling updates" This process is discussed in-depth on the [Rolling Updates](rolling_update.md) page. @@ -173,6 +174,21 @@ the instance manager. This approach does not require a restart of the PostgreSQL instance, thereby avoiding a switchover within the cluster. This feature, which is disabled by default, is described in detail below. +### Spread Upgrades + +By default, all PostgreSQL clusters are rolled out simultaneously, which may +lead to a spike in resource usage, especially when managing multiple clusters. +CloudNativePG provides two configuration options at the [operator level](operator_conf.md) +that allow you to introduce delays between cluster roll-outs or even between +instances within the same cluster, helping to distribute resource usage over +time: + +- `CLUSTERS_ROLLOUT_DELAY`: Defines the number of seconds to wait between + roll-outs of different PostgreSQL clusters (default: `0`). +- `INSTANCES_ROLLOUT_DELAY`: Defines the number of seconds to wait between + roll-outs of individual instances within the same PostgreSQL cluster (default: + `0`). + ### In-place updates of the instance manager By default, CloudNativePG issues a rolling update of the cluster diff --git a/docs/src/operator_capability_levels.md b/docs/src/operator_capability_levels.md index 4c6aed31f7..ce7f31e8c0 100644 --- a/docs/src/operator_capability_levels.md +++ b/docs/src/operator_capability_levels.md @@ -75,7 +75,7 @@ of the CloudNativePG deployment in your Kubernetes infrastructure. ### Self-contained instance manager Instead of relying on an external tool to -coordinate PostgreSQL instances in the Kubernetes cluster pods, +coordinate PostgreSQL instances in the Kubernetes cluster pods, such as Patroni or Stolon, the operator injects the operator executable inside each pod, in a file named `/controller/manager`. The application is used to control the underlying @@ -223,7 +223,7 @@ includes integration with cert-manager. ### Certificate authentication for streaming replication -To authorize streaming replication connections from the standby servers, +To authorize streaming replication connections from the standby servers, the operator relies on TLS client certificate authentication. This method is used instead of relying on a password (and therefore a secret). @@ -285,16 +285,20 @@ workload, in this case PostgreSQL servers. This includes PostgreSQL minor release updates (security and bug fixes normally) as well as major online upgrades. -### Upgrade of the operator +### Operator Upgrade -You can upgrade the operator seamlessly as a new deployment. Because of the instance -manager's injection, a change in the -operator doesn't require a change in the operand. -The operator can manage older versions of the operand. +Upgrading the operator is seamless and can be done as a new deployment. After +upgrading the controller, a rolling update of all deployed PostgreSQL clusters +is initiated. You can choose to update all clusters simultaneously or +distribute their upgrades over time. -CloudNativePG also supports [in-place updates of the instance manager](installation_upgrade.md#in-place-updates-of-the-instance-manager) -following an upgrade of the operator. In-place updates don't require a rolling -update (and subsequent switchover) of the cluster. +Thanks to the instance manager's injection, upgrading the operator does not +require changes to the operand, allowing the operator to manage older versions +of it. + +Additionally, CloudNativePG supports [in-place updates of the instance manager](installation_upgrade.md#in-place-updates-of-the-instance-manager) +following an operator upgrade. In-place updates do not require a rolling update +or a subsequent switchover of the cluster. ### Upgrade of the managed workload @@ -355,8 +359,8 @@ user action. The operator transparently sets the `archive_command` to rely on `barman-cloud-wal-archive` to ship WAL files to the defined endpoint. You can decide the compression algorithm, as well as the number of parallel jobs to concurrently upload WAL files -in the archive. In addition, `Instance Manager` checks -the correctness of the archive destination by performing the `barman-cloud-check-wal-archive` +in the archive. In addition, `Instance Manager` checks +the correctness of the archive destination by performing the `barman-cloud-check-wal-archive` command before beginning to ship the first set of WAL files. ### PostgreSQL backups @@ -373,7 +377,7 @@ Base backups can be saved on: Base backups are defined at the cluster level, declaratively, through the `backup` parameter in the cluster definition. -You can define base backups in two ways: +You can define base backups in two ways: - On-demand, through the `Backup` custom resource definition - Scheduled, through the `ScheduledBackup`custom resource definition, using a cron-like syntax diff --git a/docs/src/operator_conf.md b/docs/src/operator_conf.md index 26bf16678a..72033f6016 100644 --- a/docs/src/operator_conf.md +++ b/docs/src/operator_conf.md @@ -35,17 +35,19 @@ The operator looks for the following environment variables to be defined in the Name | Description ---- | ----------- -`INHERITED_ANNOTATIONS` | list of annotation names that, when defined in a `Cluster` metadata, will be inherited by all the generated resources, including pods -`INHERITED_LABELS` | list of label names that, when defined in a `Cluster` metadata, will be inherited by all the generated resources, including pods -`PULL_SECRET_NAME` | name of an additional pull secret to be defined in the operator's namespace and to be used to download images -`ENABLE_AZURE_PVC_UPDATES` | Enables to delete Postgres pod if its PVC is stuck in Resizing condition. This feature is mainly for the Azure environment (default `false`) -`ENABLE_INSTANCE_MANAGER_INPLACE_UPDATES` | when set to `true`, enables in-place updates of the instance manager after an update of the operator, avoiding rolling updates of the cluster (default `false`) -`MONITORING_QUERIES_CONFIGMAP` | The name of a ConfigMap in the operator's namespace with a set of default queries (to be specified under the key `queries`) to be applied to all created Clusters -`MONITORING_QUERIES_SECRET` | The name of a Secret in the operator's namespace with a set of default queries (to be specified under the key `queries`) to be applied to all created Clusters `CERTIFICATE_DURATION` | Determines the lifetime of the generated certificates in days. Default is 90. +`CLUSTERS_ROLLOUT_DELAY` | The duration (in seconds) to wait between the roll-outs of different clusters during an operator upgrade. This setting controls the timing of upgrades across clusters, spreading them out to reduce system impact. The default value is `0` which means no delay between PostgreSQL cluster upgrades. +`CREATE_ANY_SERVICE` | When set to `true`, will create `-any` service for the cluster. Default is `false` +`ENABLE_AZURE_PVC_UPDATES` | Enables to delete Postgres pod if its PVC is stuck in Resizing condition. This feature is mainly for the Azure environment (default `false`) +`ENABLE_INSTANCE_MANAGER_INPLACE_UPDATES` | When set to `true`, enables in-place updates of the instance manager after an update of the operator, avoiding rolling updates of the cluster (default `false`) `EXPIRING_CHECK_THRESHOLD` | Determines the threshold, in days, for identifying a certificate as expiring. Default is 7. -`CREATE_ANY_SERVICE` | when set to `true`, will create `-any` service for the cluster. Default is `false` `INCLUDE_PLUGINS` | A comma-separated list of plugins to be always included in the Cluster's reconciliation. +`INHERITED_ANNOTATIONS` | List of annotation names that, when defined in a `Cluster` metadata, will be inherited by all the generated resources, including pods +`INHERITED_LABELS` | List of label names that, when defined in a `Cluster` metadata, will be inherited by all the generated resources, including pods +`INSTANCES_ROLLOUT_DELAY` | The duration (in seconds) to wait between roll-outs of individual PostgreSQL instances within the same cluster during an operator upgrade. The default value is `0`, meaning no delay between upgrades of instances in the same PostgreSQL cluster. +`MONITORING_QUERIES_CONFIGMAP` | The name of a ConfigMap in the operator's namespace with a set of default queries (to be specified under the key `queries`) to be applied to all created Clusters +`MONITORING_QUERIES_SECRET` | The name of a Secret in the operator's namespace with a set of default queries (to be specified under the key `queries`) to be applied to all created Clusters +`PULL_SECRET_NAME` | Name of an additional pull secret to be defined in the operator's namespace and to be used to download images Values in `INHERITED_ANNOTATIONS` and `INHERITED_LABELS` support path-like wildcards. For example, the value `example.com/*` will match both the value `example.com/one` and `example.com/two`. @@ -62,9 +64,10 @@ will ignore the configuration parameter. The example below customizes the behavior of the operator, by defining the label/annotation names to be inherited by the resources created by -any `Cluster` object that is deployed at a later time, and by enabling +any `Cluster` object that is deployed at a later time, by enabling [in-place updates for the instance -manager](installation_upgrade.md#in-place-updates-of-the-instance-manager). +manager](installation_upgrade.md#in-place-updates-of-the-instance-manager), +and by spreading upgrades. ```yaml apiVersion: v1 @@ -73,9 +76,11 @@ metadata: name: cnpg-controller-manager-config namespace: cnpg-system data: + CLUSTERS_ROLLOUT_DELAY: '60' + ENABLE_INSTANCE_MANAGER_INPLACE_UPDATES: 'true' INHERITED_ANNOTATIONS: categories INHERITED_LABELS: environment, workload, app - ENABLE_INSTANCE_MANAGER_INPLACE_UPDATES: 'true' + INSTANCES_ROLLOUT_DELAY: '10' ``` ## Defining an operator secret @@ -84,7 +89,8 @@ The example below customizes the behavior of the operator, by defining the label/annotation names to be inherited by the resources created by any `Cluster` object that is deployed at a later time, and by enabling [in-place updates for the instance -manager](installation_upgrade.md#in-place-updates-of-the-instance-manager). +manager](installation_upgrade.md#in-place-updates-of-the-instance-manager), +and by spreading upgrades. ```yaml apiVersion: v1 @@ -94,9 +100,11 @@ metadata: namespace: cnpg-system type: Opaque stringData: + CLUSTERS_ROLLOUT_DELAY: '60' + ENABLE_INSTANCE_MANAGER_INPLACE_UPDATES: 'true' INHERITED_ANNOTATIONS: categories INHERITED_LABELS: environment, workload, app - ENABLE_INSTANCE_MANAGER_INPLACE_UPDATES: 'true' + INSTANCES_ROLLOUT_DELAY: '10' ``` ## Restarting the operator to reload configs @@ -160,7 +168,7 @@ Then on the edit page scroll down the container args and add `--pprof-server=tru - /manager ``` -Save the changes, the deployment now will execute a rollout and the new pod will have the PPROF server enabled. +Save the changes, the deployment now will execute a roll-out and the new pod will have the PPROF server enabled. Once the pod is running you can exec inside the container by doing: diff --git a/internal/configuration/configuration.go b/internal/configuration/configuration.go index b2b2a7e6d6..a70989aa55 100644 --- a/internal/configuration/configuration.go +++ b/internal/configuration/configuration.go @@ -21,6 +21,7 @@ package configuration import ( "path" "strings" + "time" "github.com/cloudnative-pg/machinery/pkg/log" @@ -108,6 +109,19 @@ type Data struct { // the -any service. Defaults to false. CreateAnyService bool `json:"createAnyService" env:"CREATE_ANY_SERVICE"` + // The duration (in seconds) to wait between the roll-outs of different + // clusters during an operator upgrade. This setting controls the + // timing of upgrades across clusters, spreading them out to reduce + // system impact. The default value is 0, which means no delay between + // PostgreSQL cluster upgrades. + ClustersRolloutDelay int `json:"clustersRolloutDelay" env:"CLUSTERS_ROLLOUT_DELAY"` + + // The duration (in seconds) to wait between roll-outs of individual + // PostgreSQL instances within the same cluster during an operator + // upgrade. The default value is 0, meaning no delay between upgrades + // of instances in the same PostgreSQL cluster. + InstancesRolloutDelay int `json:"instancesRolloutDelay" env:"INSTANCES_ROLLOUT_DELAY"` + // IncludePlugins is a comma-separated list of plugins to always be // included in the Cluster reconciliation IncludePlugins string `json:"includePlugins" env:"INCLUDE_PLUGINS"` @@ -154,6 +168,17 @@ func (config *Data) IsLabelInherited(name string) bool { return evaluateGlobPatterns(config.InheritedLabels, name) } +// GetClustersRolloutDelay gets the delay between roll-outs of different clusters +func (config *Data) GetClustersRolloutDelay() time.Duration { + return time.Duration(config.ClustersRolloutDelay) * time.Second +} + +// GetInstancesRolloutDelay gets the delay between roll-outs of pods belonging +// to the same cluster +func (config *Data) GetInstancesRolloutDelay() time.Duration { + return time.Duration(config.InstancesRolloutDelay) * time.Second +} + // WatchedNamespaces get the list of additional watched namespaces. // The result is a list of namespaces specified in the WATCHED_NAMESPACE where // each namespace is separated by comma diff --git a/internal/configuration/configuration_test.go b/internal/configuration/configuration_test.go index 51c5be0d1e..97c5250334 100644 --- a/internal/configuration/configuration_test.go +++ b/internal/configuration/configuration_test.go @@ -17,6 +17,8 @@ limitations under the License. package configuration import ( + "time" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) @@ -145,4 +147,24 @@ var _ = Describe("Annotation and label inheritance", func() { }).GetIncludePlugins()).To(ContainElements("a", "b", "c")) }) }) + + It("returns correct delay for clusters rollout", func() { + config := Data{ClustersRolloutDelay: 10} + Expect(config.GetClustersRolloutDelay()).To(Equal(10 * time.Second)) + }) + + It("returns zero as default delay for clusters rollout when not set", func() { + config := Data{} + Expect(config.GetClustersRolloutDelay()).To(BeZero()) + }) + + It("returns correct delay for instances rollout", func() { + config := Data{InstancesRolloutDelay: 5} + Expect(config.GetInstancesRolloutDelay()).To(Equal(5 * time.Second)) + }) + + It("returns zero as default delay for instances rollout when not set", func() { + config := Data{} + Expect(config.GetInstancesRolloutDelay()).To(BeZero()) + }) }) diff --git a/internal/controller/cluster_controller.go b/internal/controller/cluster_controller.go index 4a446d21ea..00818e33f0 100644 --- a/internal/controller/cluster_controller.go +++ b/internal/controller/cluster_controller.go @@ -47,6 +47,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/operatorclient" "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository" "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" + rolloutManager "github.com/cloudnative-pg/cloudnative-pg/internal/controller/rollout" "github.com/cloudnative-pg/cloudnative-pg/pkg/certs" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/hibernation" @@ -78,6 +79,8 @@ type ClusterReconciler struct { Recorder record.EventRecorder InstanceClient instance.Client Plugins repository.Interface + + rolloutManager *rolloutManager.Manager } // NewClusterReconciler creates a new ClusterReconciler initializing it @@ -93,6 +96,10 @@ func NewClusterReconciler( Scheme: mgr.GetScheme(), Recorder: mgr.GetEventRecorderFor("cloudnative-pg"), Plugins: plugins, + rolloutManager: rolloutManager.New( + configuration.Current.GetClustersRolloutDelay(), + configuration.Current.GetInstancesRolloutDelay(), + ), } } @@ -924,6 +931,21 @@ func (r *ClusterReconciler) handleRollingUpdate( "not connected via streaming replication, waiting for 5 seconds", ) return ctrl.Result{RequeueAfter: 5 * time.Second}, nil + case errors.Is(err, errRolloutDelayed): + contextLogger.Warning( + "A Pod need to be rolled out, but the rollout is being delayed", + ) + if err := r.RegisterPhase( + ctx, + cluster, + apiv1.PhaseUpgradeDelayed, + "The cluster need to be update, but the operator is configured to delay "+ + "the operation", + ); err != nil { + return ctrl.Result{}, err + } + + return ctrl.Result{RequeueAfter: 15 * time.Second}, nil case err != nil: return ctrl.Result{}, err case done: diff --git a/internal/controller/cluster_upgrade.go b/internal/controller/cluster_upgrade.go index 8e7d3c0245..cd3e6ac43a 100644 --- a/internal/controller/cluster_upgrade.go +++ b/internal/controller/cluster_upgrade.go @@ -42,6 +42,10 @@ import ( // instance is not connected via streaming replication var errLogShippingReplicaElected = errors.New("log shipping replica elected as a new post-switchover primary") +// errRolloutDelayed is raised the a pod rollout have been delayed because +// of the operator configuration +var errRolloutDelayed = errors.New("pod rollout delayed") + type rolloutReason = string func (r *ClusterReconciler) rolloutRequiredInstances( @@ -72,6 +76,19 @@ func (r *ClusterReconciler) rolloutRequiredInstances( continue } + managerResult := r.rolloutManager.CoordinateRollout(client.ObjectKeyFromObject(cluster), postgresqlStatus.Pod.Name) + if !managerResult.RolloutAllowed { + r.Recorder.Eventf( + cluster, + "Normal", + "RolloutDelayed", + "Rollout of pod %s have been delayed for %s", + postgresqlStatus.Pod.Name, + managerResult.TimeToWait.String(), + ) + return false, errRolloutDelayed + } + restartMessage := fmt.Sprintf("Restarting instance %s, because: %s", postgresqlStatus.Pod.Name, podRollout.reason) if err := r.RegisterPhase(ctx, cluster, apiv1.PhaseUpgrade, restartMessage); err != nil { @@ -105,6 +122,21 @@ func (r *ClusterReconciler) rolloutRequiredInstances( return false, nil } + managerResult := r.rolloutManager.CoordinateRollout( + client.ObjectKeyFromObject(cluster), + primaryPostgresqlStatus.Pod.Name) + if !managerResult.RolloutAllowed { + r.Recorder.Eventf( + cluster, + "Normal", + "RolloutDelayed", + "Rollout of pod %s have been delayed for %s", + primaryPostgresqlStatus.Pod.Name, + managerResult.TimeToWait.String(), + ) + return false, errRolloutDelayed + } + return r.updatePrimaryPod(ctx, cluster, podList, *primaryPostgresqlStatus.Pod, podRollout.canBeInPlace, podRollout.primaryForceRecreate, podRollout.reason) } @@ -236,7 +268,11 @@ type rollout struct { required bool canBeInPlace bool primaryForceRecreate bool - reason string + + needsChangeOperatorImage bool + needsChangeOperandImage bool + + reason string } type rolloutChecker func( @@ -260,6 +296,7 @@ func isInstanceNeedingRollout( required: true, reason: fmt.Sprintf("pod '%s' is not reporting the executable hash", status.Pod.Name), + needsChangeOperatorImage: true, } } @@ -469,6 +506,7 @@ func checkPodImageIsOutdated(pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, required: true, reason: fmt.Sprintf("the instance is using a different image: %s -> %s", pgCurrentImageName, targetImageName), + needsChangeOperandImage: true, }, nil } @@ -491,6 +529,7 @@ func checkPodInitContainerIsOutdated(pod *corev1.Pod, _ *apiv1.Cluster) (rollout required: true, reason: fmt.Sprintf("the instance is using an old init container image: %s -> %s", opCurrentImageName, configuration.Current.OperatorImageName), + needsChangeOperatorImage: true, }, nil } @@ -598,6 +637,7 @@ func checkPodSpecIsOutdated(pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, e required: true, reason: fmt.Sprintf("the instance is using an old init container image: %s -> %s", opCurrentImageName, configuration.Current.OperatorImageName), + needsChangeOperatorImage: true, }, nil } diff --git a/internal/controller/cluster_upgrade_test.go b/internal/controller/cluster_upgrade_test.go index 7898a8adf2..2037ce1245 100644 --- a/internal/controller/cluster_upgrade_test.go +++ b/internal/controller/cluster_upgrade_test.go @@ -81,6 +81,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { rollout := isInstanceNeedingRollout(ctx, status, &cluster) Expect(rollout.required).To(BeTrue()) Expect(rollout.reason).To(BeEquivalentTo("the instance is using a different image: postgres:13.10 -> postgres:13.11")) + Expect(rollout.needsChangeOperandImage).To(BeTrue()) + Expect(rollout.needsChangeOperatorImage).To(BeFalse()) }) It("requires rollout when a restart annotation has been added to the cluster", func(ctx SpecContext) { @@ -99,6 +101,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { Expect(rollout.required).To(BeTrue()) Expect(rollout.reason).To(Equal("cluster has been explicitly restarted via annotation")) Expect(rollout.canBeInPlace).To(BeTrue()) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeFalse()) rollout = isInstanceNeedingRollout(ctx, status, &cluster) Expect(rollout.required).To(BeFalse()) @@ -157,6 +161,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { rollout = isInstanceNeedingRollout(ctx, status, &cluster) Expect(rollout.required).To(BeTrue()) Expect(rollout.reason).To(Equal("Postgres needs a restart to apply some configuration changes")) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeFalse()) }) It("requires pod rollout if executable does not have a hash", func(ctx SpecContext) { @@ -170,6 +176,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { Expect(rollout.required).To(BeTrue()) Expect(rollout.reason).To(Equal("pod 'test-1' is not reporting the executable hash")) Expect(rollout.canBeInPlace).To(BeFalse()) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeTrue()) }) It("checkPodSpecIsOutdated should not return any error", func() { @@ -202,6 +210,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { Expect(rollout.required).To(BeTrue()) Expect(rollout.reason).To(BeEquivalentTo("Postgres needs a restart to apply some configuration changes")) Expect(rollout.canBeInPlace).To(BeTrue()) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeFalse()) }) When("the PodSpec annotation is not available", func() { @@ -220,6 +230,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { rollout := isInstanceNeedingRollout(ctx, status, &cluster) Expect(rollout.required).To(BeTrue()) Expect(rollout.reason).To(ContainSubstring("scheduler name changed")) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeFalse()) }) }) @@ -242,6 +254,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { rollout := isInstanceNeedingRollout(ctx, status, &cluster) Expect(rollout.required).To(BeTrue()) Expect(rollout.reason).To(ContainSubstring("scheduler-name")) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeFalse()) }) When("cluster has resources specified", func() { @@ -272,6 +286,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { Expect(rollout.required).To(BeTrue()) Expect(rollout.reason).To(ContainSubstring("original and target PodSpec differ in containers")) Expect(rollout.reason).To(ContainSubstring("container postgres differs in resources")) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeFalse()) }) It("should trigger a rollout when the cluster has Resources deleted from spec", func(ctx SpecContext) { pod := specs.PodWithExistingStorage(clusterWithResources, 1) @@ -288,6 +304,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { Expect(rollout.required).To(BeTrue()) Expect(rollout.reason).To(ContainSubstring("original and target PodSpec differ in containers")) Expect(rollout.reason).To(ContainSubstring("container postgres differs in resources")) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeFalse()) }) }) @@ -313,6 +331,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { rollout := isInstanceNeedingRollout(ctx, status, cluster) Expect(rollout.required).To(BeTrue()) Expect(rollout.reason).To(Equal("environment variable configuration hash changed")) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeFalse()) }) It("should not trigger a rollout on operator changes with inplace upgrades", func(ctx SpecContext) { @@ -361,6 +381,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { rollout := isInstanceNeedingRollout(ctx, status, &cluster) Expect(rollout.reason).To(ContainSubstring("the instance is using an old init container image")) Expect(rollout.required).To(BeTrue()) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeTrue()) }) }) @@ -386,6 +408,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { Expect(rollout.required).To(BeTrue()) Expect(rollout.reason).To(ContainSubstring("original and target PodSpec differ in containers")) Expect(rollout.reason).To(ContainSubstring("container postgres differs in environment")) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeFalse()) }) It("should not trigger a rollout on operator changes with inplace upgrades", func(ctx SpecContext) { @@ -432,6 +456,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { rollout := isInstanceNeedingRollout(ctx, status, &cluster) Expect(rollout.reason).To(ContainSubstring("the instance is using an old init container image")) Expect(rollout.required).To(BeTrue()) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeTrue()) }) }) @@ -532,6 +558,8 @@ var _ = Describe("Test pod rollout due to topology", func() { rollout := isInstanceNeedingRollout(ctx, status, cluster) Expect(rollout.reason).To(ContainSubstring("topology-spread-constraints")) Expect(rollout.required).To(BeTrue()) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeFalse()) }) It("should require rollout when the LabelSelector maps are different", func(ctx SpecContext) { @@ -547,6 +575,8 @@ var _ = Describe("Test pod rollout due to topology", func() { rollout := isInstanceNeedingRollout(ctx, status, cluster) Expect(rollout.reason).To(ContainSubstring("topology-spread-constraints")) Expect(rollout.required).To(BeTrue()) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeFalse()) }) It("should require rollout when TopologySpreadConstraints is nil in one of the objects", func(ctx SpecContext) { @@ -560,6 +590,8 @@ var _ = Describe("Test pod rollout due to topology", func() { rollout := isInstanceNeedingRollout(ctx, status, cluster) Expect(rollout.reason).To(ContainSubstring("topology-spread-constraints")) Expect(rollout.required).To(BeTrue()) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeFalse()) }) It("should not require rollout if pod and spec both lack TopologySpreadConstraints", func(ctx SpecContext) { @@ -575,6 +607,8 @@ var _ = Describe("Test pod rollout due to topology", func() { rollout := isInstanceNeedingRollout(ctx, status, cluster) Expect(rollout.reason).To(BeEmpty()) Expect(rollout.required).To(BeFalse()) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeFalse()) }) }) @@ -588,6 +622,8 @@ var _ = Describe("Test pod rollout due to topology", func() { rollout := isInstanceNeedingRollout(ctx, status, cluster) Expect(rollout.reason).To(BeEmpty()) Expect(rollout.required).To(BeFalse()) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeFalse()) }) It("should require rollout when the cluster and pod do not have "+ @@ -603,6 +639,8 @@ var _ = Describe("Test pod rollout due to topology", func() { rollout := isInstanceNeedingRollout(ctx, status, cluster) Expect(rollout.reason).To(ContainSubstring("does not have up-to-date TopologySpreadConstraints")) Expect(rollout.required).To(BeTrue()) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeFalse()) }) It("should require rollout when the LabelSelector maps are different", func(ctx SpecContext) { @@ -620,6 +658,8 @@ var _ = Describe("Test pod rollout due to topology", func() { rollout := isInstanceNeedingRollout(ctx, status, cluster) Expect(rollout.reason).To(ContainSubstring("does not have up-to-date TopologySpreadConstraints")) Expect(rollout.required).To(BeTrue()) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeFalse()) }) It("should require rollout when TopologySpreadConstraints is nil in one of the objects", func(ctx SpecContext) { @@ -635,6 +675,8 @@ var _ = Describe("Test pod rollout due to topology", func() { rollout := isInstanceNeedingRollout(ctx, status, cluster) Expect(rollout.reason).To(ContainSubstring("does not have up-to-date TopologySpreadConstraints")) Expect(rollout.required).To(BeTrue()) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeFalse()) }) It("should not require rollout if pod and spec both lack TopologySpreadConstraints", func(ctx SpecContext) { @@ -651,6 +693,8 @@ var _ = Describe("Test pod rollout due to topology", func() { rollout := isInstanceNeedingRollout(ctx, status, cluster) Expect(rollout.reason).To(BeEmpty()) Expect(rollout.required).To(BeFalse()) + Expect(rollout.needsChangeOperandImage).To(BeFalse()) + Expect(rollout.needsChangeOperatorImage).To(BeFalse()) }) }) }) diff --git a/internal/controller/rollout/doc.go b/internal/controller/rollout/doc.go new file mode 100644 index 0000000000..a36ea9c5f3 --- /dev/null +++ b/internal/controller/rollout/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package rollout contains the rollout manager, allowing +// CloudNative-PG to spread Pod rollouts depending on +// the passed configuration +package rollout diff --git a/internal/controller/rollout/rollout.go b/internal/controller/rollout/rollout.go new file mode 100644 index 0000000000..cfe930fb6b --- /dev/null +++ b/internal/controller/rollout/rollout.go @@ -0,0 +1,110 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rollout + +import ( + "sync" + "time" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// The type of functions returning a moment in time +type timeFunc func() time.Time + +// Manager is the rollout manager. It is safe to use +// concurrently +type Manager struct { + m sync.Mutex + + // The amount of time we wait between rollouts of + // different clusters + clusterRolloutDelay time.Duration + + // The amount of time we wait between instances of + // the same cluster + instanceRolloutDelay time.Duration + + // This is used to get the current time. Mainly + // used by the unit tests to inject a fake time + timeProvider timeFunc + + // The following data is relative to the the last + // rollout + lastInstance string + lastCluster client.ObjectKey + lastUpdate time.Time +} + +// Result is the output of the rollout manager, telling the +// operator how much time we need to wait to rollout a Pod +type Result struct { + // This is true when the Pod can be rolled out immediately + RolloutAllowed bool + + // This is set with the amount of time the operator need + // to wait to rollout that Pod + TimeToWait time.Duration +} + +// New creates a new rollout manager with the passed configuration +func New(clusterRolloutDelay, instancesRolloutDelay time.Duration) *Manager { + return &Manager{ + timeProvider: time.Now, + clusterRolloutDelay: clusterRolloutDelay, + instanceRolloutDelay: instancesRolloutDelay, + } +} + +// CoordinateRollout is called to check whether this rollout is allowed or not +// by the manager +func (manager *Manager) CoordinateRollout( + cluster client.ObjectKey, + instanceName string, +) Result { + manager.m.Lock() + defer manager.m.Unlock() + + if manager.lastCluster == cluster { + return manager.coordinateRolloutWithTime(cluster, instanceName, manager.instanceRolloutDelay) + } + return manager.coordinateRolloutWithTime(cluster, instanceName, manager.clusterRolloutDelay) +} + +func (manager *Manager) coordinateRolloutWithTime( + cluster client.ObjectKey, + instanceName string, + t time.Duration, +) Result { + now := manager.timeProvider() + timeSinceLastRollout := now.Sub(manager.lastUpdate) + + if manager.lastUpdate.IsZero() || timeSinceLastRollout >= t { + manager.lastCluster = cluster + manager.lastInstance = instanceName + manager.lastUpdate = now + return Result{ + RolloutAllowed: true, + TimeToWait: 0, + } + } + + return Result{ + RolloutAllowed: false, + TimeToWait: t - timeSinceLastRollout, + } +} diff --git a/internal/controller/rollout/rollout_test.go b/internal/controller/rollout/rollout_test.go new file mode 100644 index 0000000000..bafaf17666 --- /dev/null +++ b/internal/controller/rollout/rollout_test.go @@ -0,0 +1,164 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rollout + +import ( + "time" + + "sigs.k8s.io/controller-runtime/pkg/client" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Rollout manager", func() { + It("should coordinate rollouts when delays are set", func() { + startTime := time.Now() + currentTime := startTime + + const ( + clustersRolloutDelay = 10 * time.Minute + instancesRolloutDelay = 5 * time.Minute + ) + + m := New(clustersRolloutDelay, instancesRolloutDelay) + m.timeProvider = func() time.Time { + return currentTime + } + + By("allowing the first rollout immediately", func() { + result := m.CoordinateRollout(client.ObjectKey{ + Namespace: "default", + Name: "cluster-example", + }, "cluster-example-1") + + Expect(result.RolloutAllowed).To(BeTrue()) + Expect(result.TimeToWait).To(BeZero()) + }) + + By("waiting for one minute", func() { + currentTime = currentTime.Add(1 * time.Minute) + }) + + By("checking that a rollout of an instance is not allowed", func() { + result := m.CoordinateRollout(client.ObjectKey{ + Namespace: "default", + Name: "cluster-example", + }, "cluster-example-2") + + Expect(result.RolloutAllowed).To(BeFalse()) + Expect(result.TimeToWait).To(Equal(4 * time.Minute)) + Expect(m.lastUpdate).To(Equal(startTime)) + }) + + By("checking that a rollout of a cluster is not allowed", func() { + result := m.CoordinateRollout(client.ObjectKey{ + Namespace: "default", + Name: "cluster-bis", + }, "cluster-bis-1") + + Expect(result.RolloutAllowed).To(BeFalse()) + Expect(result.TimeToWait).To(Equal(9 * time.Minute)) + Expect(m.lastUpdate).To(Equal(startTime)) + }) + + By("waiting for five minutes", func() { + currentTime = currentTime.Add(5 * time.Minute) + }) + + By("checking that a rollout of a cluster is still not allowed", func() { + result := m.CoordinateRollout(client.ObjectKey{ + Namespace: "default", + Name: "cluster-bis", + }, "cluster-bis-1") + + Expect(result.RolloutAllowed).To(BeFalse()) + Expect(result.TimeToWait).To(Equal(4 * time.Minute)) + Expect(m.lastUpdate).To(Equal(startTime)) + }) + + By("checking that a rollout of an instance is allowed", func() { + result := m.CoordinateRollout(client.ObjectKey{ + Namespace: "default", + Name: "cluster-example", + }, "cluster-example-2") + + Expect(result.RolloutAllowed).To(BeTrue()) + Expect(result.TimeToWait).To(BeZero()) + Expect(m.lastUpdate).To(Equal(currentTime)) + }) + + By("waiting for other eleven minutes", func() { + currentTime = currentTime.Add(11 * time.Minute) + }) + + By("checking that a rollout of a cluster is allowed", func() { + result := m.CoordinateRollout(client.ObjectKey{ + Namespace: "default", + Name: "cluster-bis", + }, "cluster-bis-1") + + Expect(result.RolloutAllowed).To(BeTrue()) + Expect(result.TimeToWait).To(BeZero()) + Expect(m.lastUpdate).To(Equal(currentTime)) + }) + }) + + It("should allow all rollouts when delays are not set", func() { + m := New(0, 0) + + By("allowing the first rollout immediately", func() { + result := m.CoordinateRollout(client.ObjectKey{ + Namespace: "default", + Name: "cluster-example", + }, "cluster-example-1") + + Expect(result.RolloutAllowed).To(BeTrue()) + Expect(result.TimeToWait).To(BeZero()) + }) + + By("allowing a rollout of an instance", func() { + result := m.CoordinateRollout(client.ObjectKey{ + Namespace: "default", + Name: "cluster-example", + }, "cluster-example-2") + + Expect(result.RolloutAllowed).To(BeTrue()) + Expect(result.TimeToWait).To(BeZero()) + }) + + By("allowing a rollout of an cluster", func() { + result := m.CoordinateRollout(client.ObjectKey{ + Namespace: "default", + Name: "cluster-bis", + }, "cluster-bis-1") + + Expect(result.RolloutAllowed).To(BeTrue()) + Expect(result.TimeToWait).To(BeZero()) + }) + + By("allowing a rollout of another instance", func() { + result := m.CoordinateRollout(client.ObjectKey{ + Namespace: "default", + Name: "cluster-example", + }, "cluster-example-3") + + Expect(result.RolloutAllowed).To(BeTrue()) + Expect(result.TimeToWait).To(BeZero()) + }) + }) +}) diff --git a/internal/controller/rollout/suite_test.go b/internal/controller/rollout/suite_test.go new file mode 100644 index 0000000000..3a8ee45027 --- /dev/null +++ b/internal/controller/rollout/suite_test.go @@ -0,0 +1,30 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rollout + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestCerts(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Rollout manager suite") +} diff --git a/tests/e2e/fixtures/configmap-support/configmap.yaml b/tests/e2e/fixtures/configmap-support/configmap.yaml index 99d5f9d09f..13bc3f3fd1 100644 --- a/tests/e2e/fixtures/configmap-support/configmap.yaml +++ b/tests/e2e/fixtures/configmap-support/configmap.yaml @@ -4,6 +4,8 @@ data: # wrong example2.com on purpose to check overwriting via secret works fine INHERITED_LABELS: environment, example2.com/* MONITORING_QUERIES_CONFIGMAP: "" + CLUSTERS_ROLLOUT_DELAY: '1' + INSTANCES_ROLLOUT_DELAY: '1' kind: ConfigMap metadata: name: cnpg-controller-manager-config From b277d16d17855a1b3efc6838c9ceb9fe99fe0348 Mon Sep 17 00:00:00 2001 From: Jeremy Schneider Date: Fri, 27 Sep 2024 02:26:18 -0700 Subject: [PATCH 014/836] fix(docs): example `xid_age` alert (#5570) Because autovacuum_freeze_max_age defaults to 200 million, autovacuum generally does not advance the oldest xid on tables and databases until they reach this age. Accordingly, any alert less than 200 million is not useful. A more reasonable default alert would be something like 300 million. Note that vacuum_failsafe_age doesn't start until 1.6 billion by default. If someone is decreasing the failsafe age (for example to under a billion) then it might even make sense to raise the alarming threshold above the failsafe age. Signed-off-by: Jeremy Schneider --- docs/src/samples/monitoring/alerts.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/src/samples/monitoring/alerts.yaml b/docs/src/samples/monitoring/alerts.yaml index a4b89b96fc..1fe4051708 100644 --- a/docs/src/samples/monitoring/alerts.yaml +++ b/docs/src/samples/monitoring/alerts.yaml @@ -21,10 +21,10 @@ groups: severity: warning - alert: PGDatabase annotations: - description: Over 150,000,000 transactions from frozen xid on pod {{ $labels.pod }} + description: Over 300,000,000 transactions from frozen xid on pod {{ $labels.pod }} summary: Number of transactions from the frozen XID to the current one expr: |- - cnpg_pg_database_xid_age > 150000000 + cnpg_pg_database_xid_age > 300000000 for: 1m labels: severity: warning From 21246066518c795f68bec9fbd64f1d30c44bec91 Mon Sep 17 00:00:00 2001 From: TillHein <12776762+TillHein@users.noreply.github.com> Date: Fri, 27 Sep 2024 11:31:22 +0200 Subject: [PATCH 015/836] fix(docs): increase `xid_age` alerting limit (#5226) The default `xid_age` value for a forced auto-vacuum is set to 200.000.000. The new values reflect these defaults. ``` #autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum ``` Issues with 'xid' typically occur around the age of 200000000, providing plenty of time to respond to the elevated value alert. Signed-off-by: Till Hein Co-authored-by: Till Hein --- docs/src/samples/monitoring/prometheusrule.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/samples/monitoring/prometheusrule.yaml b/docs/src/samples/monitoring/prometheusrule.yaml index 3c72759637..b74c66590b 100644 --- a/docs/src/samples/monitoring/prometheusrule.yaml +++ b/docs/src/samples/monitoring/prometheusrule.yaml @@ -29,7 +29,7 @@ spec: description: Over 150,000,000 transactions from frozen xid on pod {{ $labels.pod }} summary: Number of transactions from the frozen XID to the current one expr: |- - cnpg_pg_database_xid_age > 150000000 + cnpg_pg_database_xid_age > 300000000 for: 1m labels: severity: warning From a7e3eca01001fab59a59039966da849d9319f5d3 Mon Sep 17 00:00:00 2001 From: Peggie Date: Fri, 27 Sep 2024 11:34:20 +0200 Subject: [PATCH 016/836] feat: Public Cloud K8S versions update (#5644) Update the versions used to test the operator on public cloud providers Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: public-cloud-k8s-versions-check --- .github/aks_versions.json | 4 ++-- .github/eks_versions.json | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/aks_versions.json b/.github/aks_versions.json index b6064884d3..24b881347c 100644 --- a/.github/aks_versions.json +++ b/.github/aks_versions.json @@ -1,6 +1,6 @@ [ - "1.30.3", - "1.29.7", + "1.30.4", + "1.29.8", "1.28.9", "1.27.9" ] diff --git a/.github/eks_versions.json b/.github/eks_versions.json index 9fbe6428a3..537d9f2b64 100644 --- a/.github/eks_versions.json +++ b/.github/eks_versions.json @@ -1,4 +1,5 @@ [ + "1.31", "1.30", "1.29", "1.28", From 6e28aa8081f2f6b9e3c09575e20744acc5deb46b Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Fri, 27 Sep 2024 11:36:21 +0200 Subject: [PATCH 017/836] chore: add names to controllers (#5641) Controller-runtime v0.19.0 errors when a controller manager detects multiple controllers with the same name; this is done to improve the clarity of the log messages. This patch adds a name for every controller in the operator, complying with this rule. For more information, see https://github.com/kubernetes-sigs/controller-runtime/pull/2902 Closes #5640 Signed-off-by: Jonathan Gonzalez V. Signed-off-by: Leonardo Cecchi Co-authored-by: Leonardo Cecchi --- internal/cmd/manager/instance/run/cmd.go | 6 ++---- internal/controller/backup_controller.go | 1 + internal/controller/cluster_controller.go | 1 + internal/controller/plugin_controller.go | 1 + internal/controller/pooler_controller.go | 1 + internal/controller/scheduledbackup_controller.go | 1 + internal/management/controller/database_controller.go | 1 + internal/management/controller/externalservers/manager.go | 1 + internal/management/controller/tablespaces/manager.go | 1 + 9 files changed, 10 insertions(+), 4 deletions(-) diff --git a/internal/cmd/manager/instance/run/cmd.go b/internal/cmd/manager/instance/run/cmd.go index f1007407ca..d4f78f0f21 100644 --- a/internal/cmd/manager/instance/run/cmd.go +++ b/internal/cmd/manager/instance/run/cmd.go @@ -193,6 +193,7 @@ func runSubCommand(ctx context.Context, instance *postgres.Instance) error { reconciler := controller.NewInstanceReconciler(instance, mgr.GetClient(), metricsExporter) err = ctrl.NewControllerManagedBy(mgr). For(&apiv1.Cluster{}). + Named("instance-cluster"). Complete(reconciler) if err != nil { setupLog.Error(err, "unable to create instance controller") @@ -202,10 +203,7 @@ func runSubCommand(ctx context.Context, instance *postgres.Instance) error { // database reconciler dbReconciler := controller.NewDatabaseReconciler(mgr, instance) - err = ctrl.NewControllerManagedBy(mgr). - For(&apiv1.Database{}). - Complete(dbReconciler) - if err != nil { + if err := dbReconciler.SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create database controller") return err } diff --git a/internal/controller/backup_controller.go b/internal/controller/backup_controller.go index b1d0745f44..424ee3966c 100644 --- a/internal/controller/backup_controller.go +++ b/internal/controller/backup_controller.go @@ -664,6 +664,7 @@ func (r *BackupReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manage controllerBuilder := ctrl.NewControllerManagedBy(mgr). For(&apiv1.Backup{}). + Named("backup"). Watches(&apiv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(r.mapClustersToBackup()), builder.WithPredicates(clustersWithBackupPredicate), diff --git a/internal/controller/cluster_controller.go b/internal/controller/cluster_controller.go index 00818e33f0..44a09a3cc2 100644 --- a/internal/controller/cluster_controller.go +++ b/internal/controller/cluster_controller.go @@ -989,6 +989,7 @@ func (r *ClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manag return ctrl.NewControllerManagedBy(mgr). For(&apiv1.Cluster{}). + Named("cluster"). Owns(&corev1.Pod{}). Owns(&batchv1.Job{}). Owns(&corev1.Service{}). diff --git a/internal/controller/plugin_controller.go b/internal/controller/plugin_controller.go index b29f0ca7e8..7d4e606e2e 100644 --- a/internal/controller/plugin_controller.go +++ b/internal/controller/plugin_controller.go @@ -225,6 +225,7 @@ func (r *PluginReconciler) SetupWithManager(mgr ctrl.Manager, operatorNamespace return ctrl.NewControllerManagedBy(mgr). For(&corev1.Service{}). + Named("plugin"). WithEventFilter(pluginServicesPredicate). Complete(r) } diff --git a/internal/controller/pooler_controller.go b/internal/controller/pooler_controller.go index 8be6af3ec6..af54dbb344 100644 --- a/internal/controller/pooler_controller.go +++ b/internal/controller/pooler_controller.go @@ -129,6 +129,7 @@ func (r *PoolerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr func (r *PoolerReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&apiv1.Pooler{}). + Named("pooler"). Owns(&v1.Deployment{}). Owns(&corev1.Service{}). Owns(&corev1.ServiceAccount{}). diff --git a/internal/controller/scheduledbackup_controller.go b/internal/controller/scheduledbackup_controller.go index 45c71ad5a8..538d53193e 100644 --- a/internal/controller/scheduledbackup_controller.go +++ b/internal/controller/scheduledbackup_controller.go @@ -353,5 +353,6 @@ func (r *ScheduledBackupReconciler) SetupWithManager(ctx context.Context, mgr ct return ctrl.NewControllerManagedBy(mgr). For(&apiv1.ScheduledBackup{}). + Named("scheduled-backup"). Complete(r) } diff --git a/internal/management/controller/database_controller.go b/internal/management/controller/database_controller.go index 3ffb963530..da078659d2 100644 --- a/internal/management/controller/database_controller.go +++ b/internal/management/controller/database_controller.go @@ -237,6 +237,7 @@ func NewDatabaseReconciler( func (r *DatabaseReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&apiv1.Database{}). + Named("instance-database"). Complete(r) } diff --git a/internal/management/controller/externalservers/manager.go b/internal/management/controller/externalservers/manager.go index 496ddcba21..0c6eb00687 100644 --- a/internal/management/controller/externalservers/manager.go +++ b/internal/management/controller/externalservers/manager.go @@ -46,6 +46,7 @@ func NewReconciler(instance *postgres.Instance, client client.Client) *Reconcile func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&apiv1.Cluster{}). + Named("instance-external-server"). Complete(r) } diff --git a/internal/management/controller/tablespaces/manager.go b/internal/management/controller/tablespaces/manager.go index 9e561b7b5a..a484432d1e 100644 --- a/internal/management/controller/tablespaces/manager.go +++ b/internal/management/controller/tablespaces/manager.go @@ -47,6 +47,7 @@ func NewTablespaceReconciler(instance *postgres.Instance, client client.Client) func (r *TablespaceReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&apiv1.Cluster{}). + Named("instance-tablespaces"). Complete(r) } From 8e62b477b2edd3ed2fabbf9b5c683fbe44011e5f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niccol=C3=B2=20Fei?= Date: Fri, 27 Sep 2024 11:42:34 +0200 Subject: [PATCH 018/836] ci: avoid testing deprecated versions of AKS (#5638) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The last [release of AKS](https://github.com/Azure/AKS/releases/tag/2024-08-27) deprecated v1.27, meaning that now you need to [Enable the long-term support plan](https://learn.microsoft.com/en-gb/azure/aks/long-term-support#enable-long-term-support) to get access to this version. Right now the cloud provider E2E test for AKS 1.27 are failing during the creation with: ``` ERROR: (K8sVersionNotSupported) Managed cluster ***-3482-aks1279PostgreSQL17rc1 is on version 1.27.9 which is not supported in this region. Please use [az aks get-versions] command to get the supported version list in this region. For more information, please check https://aka.ms/supported-version-list Code: K8sVersionNotSupported Message: Managed cluster ***-3482-aks1279PostgreSQL17rc1 is on version 1.27.9 which is not supported in this region. Please use [az aks get-versions] command to get the supported version list in this region. For more information, please check https://aka.ms/supported-version-list ``` This patch makes it so that we only test versions that are currently supported (e.g `KubernetesOfficial`), effectively excluding the ones that are only under `AKSLongTermSupport`. Signed-off-by: Niccolò Fei --- .github/workflows/k8s-versions-check.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/k8s-versions-check.yml b/.github/workflows/k8s-versions-check.yml index fd5a05ca2c..3230320766 100644 --- a/.github/workflows/k8s-versions-check.yml +++ b/.github/workflows/k8s-versions-check.yml @@ -57,7 +57,7 @@ jobs: name: Get updated AKS versions run: | az aks get-versions --location westeurope \ - --query 'reverse(sort(values[? isPreview != `true`].patchVersions.keys(@)[]))' -o tsv | \ + --query "reverse(sort(values[? isPreview != 'true' && contains(capabilities.supportPlan, 'KubernetesOfficial')].patchVersions.keys(@)[]))" -o tsv | \ sort -urk 1,1.5 | \ awk -vv=$MINIMAL_K8S '$0>=v {print $0}' | \ jq -Rn '[inputs]' | tee .github/aks_versions.json From 708b58b33672d5230028ba6f2d9b7652d55e0b04 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Fri, 27 Sep 2024 11:55:46 +0200 Subject: [PATCH 019/836] chore: remove `deleteOldCustomQueriesConfigmap` (#5555) The code was used to cleanup `custom monitoring queries configmaps from older versions (v1.10 and v1.11)` and is not used anymore. Closes #5554 Signed-off-by: Armando Ruocco --- internal/controller/cluster_controller.go | 74 ----------------------- internal/controller/cluster_create.go | 6 -- 2 files changed, 80 deletions(-) diff --git a/internal/controller/cluster_controller.go b/internal/controller/cluster_controller.go index 44a09a3cc2..2e208ac696 100644 --- a/internal/controller/cluster_controller.go +++ b/internal/controller/cluster_controller.go @@ -1374,77 +1374,3 @@ func (r *ClusterReconciler) markPVCReadyForCompletedJobs( return nil } - -// TODO: only required to cleanup custom monitoring queries configmaps from older versions (v1.10 and v1.11) -// that could have been copied with the source configmap name instead of the new default one. -// Should be removed in future releases. -func (r *ClusterReconciler) deleteOldCustomQueriesConfigmap(ctx context.Context, cluster *apiv1.Cluster) { - contextLogger := log.FromContext(ctx) - - // if the cluster didn't have default monitoring queries, do nothing - if cluster.Spec.Monitoring.AreDefaultQueriesDisabled() || - configuration.Current.MonitoringQueriesConfigmap == "" || - configuration.Current.MonitoringQueriesConfigmap == apiv1.DefaultMonitoringConfigMapName { - return - } - - // otherwise, remove the old default monitoring queries configmap from the cluster and delete it, if present - oldCmID := -1 - for idx, cm := range cluster.Spec.Monitoring.CustomQueriesConfigMap { - if cm.Name == configuration.Current.MonitoringQueriesConfigmap && - cm.Key == apiv1.DefaultMonitoringKey { - oldCmID = idx - break - } - } - - // if we didn't find it, do nothing - if oldCmID < 0 { - return - } - - // if we found it, we are going to get it and check it was actually created by the operator or was already deleted - var oldCm corev1.ConfigMap - err := r.Get(ctx, types.NamespacedName{ - Name: configuration.Current.MonitoringQueriesConfigmap, - Namespace: cluster.Namespace, - }, &oldCm) - // if we found it, we check the annotation the operator should have set to be sure it was created by us - if err == nil { // nolint:nestif - // if it was, we delete it and proceed to remove it from the cluster monitoring spec - if _, ok := oldCm.Annotations[utils.OperatorVersionAnnotationName]; ok { - err = r.Delete(ctx, &oldCm) - // if there is any error except the cm was already deleted, we return - if err != nil && !apierrs.IsNotFound(err) { - contextLogger.Warning("error while deleting old default monitoring custom queries configmap", - "err", err, - "configmap", configuration.Current.MonitoringQueriesConfigmap) - return - } - } else { - // it exists, but it's not handled by the operator, we do nothing - contextLogger.Warning("A configmap with the same name as the old default monitoring queries "+ - "configmap exists, but doesn't have the required annotation, so it won't be deleted, "+ - "nor removed from the cluster monitoring spec", - "configmap", oldCm.Name) - return - } - } else if !apierrs.IsNotFound(err) { - // if there is any error except the cm was already deleted, we return - contextLogger.Warning("error while getting old default monitoring custom queries configmap", - "err", err, - "configmap", configuration.Current.MonitoringQueriesConfigmap) - return - } - // both if it exists or not, if we are here we should delete it from the list of custom queries configmaps - oldCluster := cluster.DeepCopy() - cluster.Spec.Monitoring.CustomQueriesConfigMap = append(cluster.Spec.Monitoring.CustomQueriesConfigMap[:oldCmID], - cluster.Spec.Monitoring.CustomQueriesConfigMap[oldCmID+1:]...) - err = r.Patch(ctx, cluster, client.MergeFrom(oldCluster)) - if err != nil { - log.Warning("had an error while removing the old custom monitoring queries configmap from "+ - "the monitoring section in the cluster", - "err", err, - "configmap", configuration.Current.MonitoringQueriesConfigmap) - } -} diff --git a/internal/controller/cluster_create.go b/internal/controller/cluster_create.go index 239cff3e0e..acda6f82fa 100644 --- a/internal/controller/cluster_create.go +++ b/internal/controller/cluster_create.go @@ -98,12 +98,6 @@ func (r *ClusterReconciler) createPostgresClusterObjects(ctx context.Context, cl return err } - // TODO: only required to cleanup custom monitoring queries configmaps from older versions (v1.10 and v1.11) - // that could have been copied with the source configmap name instead of the new default one. - // Should be removed in future releases. - // should never return an error, not a requirement, just a nice to have - r.deleteOldCustomQueriesConfigmap(ctx, cluster) - return nil } From 4e37e23cf9f38294524cbdce495bc62fd7883224 Mon Sep 17 00:00:00 2001 From: Zekiye Aydemir Date: Fri, 27 Sep 2024 13:31:22 +0300 Subject: [PATCH 020/836] feat: enable configuration of `full_page_writes` (#5516) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch enables users to customize the `full_page_writes` GUC in PostgreSQL. It is set to `on` by default as per PostgreSQL recommendation. Closes #5506 Signed-off-by: zekiyeaydemir Signed-off-by: Jonathan Gonzalez V. Co-authored-by: Jonathan Gonzalez V. --- docs/src/postgresql_conf.md | 3 +-- pkg/postgres/configuration.go | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/docs/src/postgresql_conf.md b/docs/src/postgresql_conf.md index c85573a4d0..b3e76b6c74 100644 --- a/docs/src/postgresql_conf.md +++ b/docs/src/postgresql_conf.md @@ -66,6 +66,7 @@ The **global default parameters** are: ```text archive_mode = 'on' dynamic_shared_memory_type = 'posix' +full_page_writes = 'on' logging_collector = 'on' log_destination = 'csvlog' log_directory = '/controller/log' @@ -111,7 +112,6 @@ The following parameters are **fixed** and exclusively controlled by the operato ```text archive_command = '/controller/manager wal-archive %p' -full_page_writes = 'on' hot_standby = 'true' listen_addresses = '*' port = '5432' @@ -592,7 +592,6 @@ Users are not allowed to set the following configuration parameters in the - `data_sync_retry` - `event_source` - `external_pid_file` -- `full_page_writes` - `hba_file` - `hot_standby` - `ident_file` diff --git a/pkg/postgres/configuration.go b/pkg/postgres/configuration.go index 6423097d33..4095888839 100644 --- a/pkg/postgres/configuration.go +++ b/pkg/postgres/configuration.go @@ -431,7 +431,6 @@ var ( // The following parameters need a reload to be applied "archive_cleanup_command": blockedConfigurationParameter, "archive_command": fixedConfigurationParameter, - "full_page_writes": fixedConfigurationParameter, "log_destination": blockedConfigurationParameter, "log_directory": blockedConfigurationParameter, "log_file_mode": blockedConfigurationParameter, @@ -467,6 +466,7 @@ var ( CnpgConfigurationSettings = ConfigurationSettings{ GlobalDefaultSettings: SettingsCollection{ "archive_timeout": "5min", + "full_page_writes": "on", "max_parallel_workers": "32", "max_worker_processes": "32", "max_replication_slots": "32", @@ -512,7 +512,6 @@ var ( "/controller/manager wal-archive --log-destination %s/%s.json %%p", LogPath, LogFileName), "port": fmt.Sprint(ServerPort), - "full_page_writes": "on", "ssl": "on", "ssl_cert_file": ServerCertificateLocation, "ssl_key_file": ServerKeyLocation, From 0c61f4cee71e3c7b4fd5632c9835848bb07a431b Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 27 Sep 2024 15:54:15 +0200 Subject: [PATCH 021/836] fix(deps): update module sigs.k8s.io/controller-runtime to v0.19.0 (main) (#5402) https://github.com/kubernetes-sigs/controller-runtime `v0.18.4` -> `v0.19.0` k8s.io/api `v0.30.3` -> `v0.31.0` k8s.io/apiextensions-apiserver `v0.30.3` -> `v0.31.0` k8s.io/apimachinery `v0.30.3` -> `v0.31.0` k8s.io/client-go `v0.30.3` -> `v0.31.0` github.com/moby/spdystream `v0.2.0` -> `v0.4.0 google.golang.org/genproto/googleapis/rpc `v0.0.0-20240528184218-531527333157` -> `v0.0.0-20240701130421-f6361c86f094` Signed-off-by: Jonathan Gonzalez V. Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: Jonathan Gonzalez V. --- .../bases/postgresql.cnpg.io_clusters.yaml | 39 ++-- .../crd/bases/postgresql.cnpg.io_poolers.yaml | 176 +++++++++++++----- go.mod | 16 +- go.sum | 33 ++-- 4 files changed, 180 insertions(+), 84 deletions(-) diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml index ae2bdc9475..128d44e47d 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml @@ -146,7 +146,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -161,7 +161,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -327,7 +327,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -342,7 +342,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -505,7 +505,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -520,7 +520,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -686,7 +686,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -701,7 +701,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -2457,7 +2457,7 @@ spec: set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -4146,10 +4146,13 @@ spec: format: int32 type: integer sources: - description: sources is the list of volume projections + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. items: - description: Projection that may be projected along with other - supported volume types + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. properties: clusterTrustBundle: description: |- @@ -4589,6 +4592,12 @@ spec: the Pod where this field is used. It makes that resource available inside a container. type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string required: - name type: object @@ -4899,7 +4908,7 @@ spec: set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -5157,7 +5166,7 @@ spec: set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -5569,7 +5578,7 @@ spec: set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- diff --git a/config/crd/bases/postgresql.cnpg.io_poolers.yaml b/config/crd/bases/postgresql.cnpg.io_poolers.yaml index 7e5ffcfd45..e09e39b615 100644 --- a/config/crd/bases/postgresql.cnpg.io_poolers.yaml +++ b/config/crd/bases/postgresql.cnpg.io_poolers.yaml @@ -1061,7 +1061,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1076,7 +1076,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1244,7 +1244,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1259,7 +1259,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1425,7 +1425,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1440,7 +1440,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1608,7 +1608,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1623,7 +1623,7 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -2592,6 +2592,12 @@ spec: the Pod where this field is used. It makes that resource available inside a container. type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string required: - name type: object @@ -2715,7 +2721,7 @@ spec: procMount: description: |- procMount denotes the type of proc mount to use for the containers. - The default is DefaultProcMount which uses the container runtime defaults for + The default value is Default which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. @@ -4074,6 +4080,12 @@ spec: the Pod where this field is used. It makes that resource available inside a container. type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string required: - name type: object @@ -4185,7 +4197,7 @@ spec: procMount: description: |- procMount denotes the type of proc mount to use for the containers. - The default is DefaultProcMount which uses the container runtime defaults for + The default value is Default which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. @@ -5593,6 +5605,12 @@ spec: the Pod where this field is used. It makes that resource available inside a container. type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string required: - name type: object @@ -5716,7 +5734,7 @@ spec: procMount: description: |- procMount denotes the type of proc mount to use for the containers. - The default is DefaultProcMount which uses the container runtime defaults for + The default value is Default which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. @@ -6149,9 +6167,11 @@ spec: x-kubernetes-list-type: map nodeName: description: |- - NodeName is a request to schedule this pod onto a specific node. If it is non-empty, - the scheduler simply schedules this pod onto that node, assuming that it fits resource - requirements. + NodeName indicates in which node this pod is scheduled. + If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. + Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. + This field should not be used to express a desire for the pod to be scheduled on a specific node. + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename type: string nodeSelector: additionalProperties: @@ -6184,6 +6204,7 @@ spec: - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups + - spec.securityContext.supplementalGroupsPolicy - spec.containers[*].securityContext.appArmorProfile - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile @@ -6277,7 +6298,10 @@ spec: This field is immutable. items: description: |- - PodResourceClaim references exactly one ResourceClaim through a ClaimSource. + PodResourceClaim references exactly one ResourceClaim, either directly + or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim + for the pod. + It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name. properties: @@ -6286,30 +6310,32 @@ spec: Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL. type: string - source: - description: Source describes where to find the ResourceClaim. - properties: - resourceClaimName: - description: |- - ResourceClaimName is the name of a ResourceClaim object in the same - namespace as this pod. - type: string - resourceClaimTemplateName: - description: |- - ResourceClaimTemplateName is the name of a ResourceClaimTemplate - object in the same namespace as this pod. + resourceClaimName: + description: |- + ResourceClaimName is the name of a ResourceClaim object in the same + namespace as this pod. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + resourceClaimTemplateName: + description: |- + ResourceClaimTemplateName is the name of a ResourceClaimTemplate + object in the same namespace as this pod. - The template will be used to create a new ResourceClaim, which will - be bound to this pod. When this pod is deleted, the ResourceClaim - will also be deleted. The pod name and resource name, along with a - generated component, will be used to form a unique name for the - ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + The template will be used to create a new ResourceClaim, which will + be bound to this pod. When this pod is deleted, the ResourceClaim + will also be deleted. The pod name and resource name, along with a + generated component, will be used to form a unique name for the + ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. - This field is immutable and no changes will be made to the - corresponding ResourceClaim by the control plane after creating the - ResourceClaim. - type: string - type: object + This field is immutable and no changes will be made to the + corresponding ResourceClaim by the control plane after creating the + ResourceClaim. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string required: - name type: object @@ -6493,18 +6519,28 @@ spec: type: object supplementalGroups: description: |- - A list of groups applied to the first process run in each container, in addition - to the container's primary GID, the fsGroup (if specified), and group memberships - defined in the container image for the uid of the container process. If unspecified, - no additional groups are added to any container. Note that group memberships - defined in the container image for the uid of the container process are still effective, - even if they are not included in this list. + A list of groups applied to the first process run in each container, in + addition to the container's primary GID and fsGroup (if specified). If + the SupplementalGroupsPolicy feature is enabled, the + supplementalGroupsPolicy field determines whether these are in addition + to or instead of any group memberships defined in the container image. + If unspecified, no additional groups are added, though group memberships + defined in the container image may still be used, depending on the + supplementalGroupsPolicy field. Note that this field cannot be set when spec.os.name is windows. items: format: int64 type: integer type: array x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + description: |- + Defines how supplemental groups of the first container processes are calculated. + Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + and the container runtime must implement support for this feature. + Note that this field cannot be set when spec.os.name is windows. + type: string sysctls: description: |- Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported @@ -6884,6 +6920,7 @@ spec: the blob storage type: string fsType: + default: ext4 description: |- fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. @@ -6897,6 +6934,7 @@ spec: set). defaults to shared' type: string readOnly: + default: false description: |- readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. @@ -7506,7 +7544,7 @@ spec: set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -7729,6 +7767,41 @@ spec: required: - path type: object + image: + description: |- + image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + The volume is resolved at pod startup depending on which PullPolicy value is provided: + + - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + + The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + The volume will be mounted read-only (ro) and non-executable files (noexec). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object iscsi: description: |- iscsi represents an ISCSI Disk resource that is attached to a @@ -7760,6 +7833,7 @@ spec: description: iqn is the target iSCSI Qualified Name. type: string iscsiInterface: + default: default description: |- iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp). @@ -7911,10 +7985,13 @@ spec: format: int32 type: integer sources: - description: sources is the list of volume projections + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. items: - description: Projection that may be projected - along with other supported volume types + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. properties: clusterTrustBundle: description: |- @@ -8299,6 +8376,7 @@ spec: More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it type: string keyring: + default: /etc/ceph/keyring description: |- keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. @@ -8313,6 +8391,7 @@ spec: type: array x-kubernetes-list-type: atomic pool: + default: rbd description: |- pool is the rados pool name. Default is rbd. @@ -8343,6 +8422,7 @@ spec: type: object x-kubernetes-map-type: atomic user: + default: admin description: |- user is the rados user name. Default is admin. @@ -8357,6 +8437,7 @@ spec: volume attached and mounted on Kubernetes nodes. properties: fsType: + default: xfs description: |- fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. @@ -8397,6 +8478,7 @@ spec: communication with Gateway, default false type: boolean storageMode: + default: ThinProvisioned description: |- storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned. diff --git a/go.mod b/go.mod index 1541ca8dc3..4872e9310d 100644 --- a/go.mod +++ b/go.mod @@ -39,13 +39,13 @@ require ( golang.org/x/term v0.24.0 google.golang.org/grpc v1.65.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.30.3 - k8s.io/apiextensions-apiserver v0.30.3 - k8s.io/apimachinery v0.30.3 + k8s.io/api v0.31.0 + k8s.io/apiextensions-apiserver v0.31.0 + k8s.io/apimachinery v0.31.0 k8s.io/cli-runtime v0.30.3 - k8s.io/client-go v0.30.3 + k8s.io/client-go v0.31.0 k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3 - sigs.k8s.io/controller-runtime v0.18.4 + sigs.k8s.io/controller-runtime v0.19.0 sigs.k8s.io/yaml v1.4.0 ) @@ -58,6 +58,7 @@ require ( github.com/evanphx/json-patch v5.9.0+incompatible // indirect github.com/fatih/color v1.16.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-errors/errors v1.5.1 // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect @@ -86,7 +87,7 @@ require ( github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/moby/spdystream v0.2.0 // indirect + github.com/moby/spdystream v0.4.0 // indirect github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect @@ -99,6 +100,7 @@ require ( github.com/prometheus/common v0.59.1 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/spf13/pflag v1.0.5 // indirect + github.com/x448/float16 v0.8.4 // indirect github.com/xlab/treeprint v1.2.0 // indirect go.starlark.net v0.0.0-20240411212711-9b43f0afd521 // indirect go.uber.org/zap v1.27.0 // indirect @@ -112,7 +114,7 @@ require ( golang.org/x/time v0.6.0 // indirect golang.org/x/tools v0.25.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index 7d2722b6f1..ff707b2573 100644 --- a/go.sum +++ b/go.sum @@ -41,6 +41,8 @@ github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= @@ -77,7 +79,6 @@ github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaU github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= @@ -130,8 +131,8 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc= github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= -github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/spdystream v0.4.0 h1:Vy79D6mHeJJjiPdFEL2yku1kl0chZpJfZcPpb16BRl8= +github.com/moby/spdystream v0.4.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -191,6 +192,8 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/thoas/go-funk v0.9.3 h1:7+nAEx3kn5ZJcnDm2Bh23N2yOtweO14bi//dvRtgLpw= github.com/thoas/go-funk v0.9.3/go.mod h1:+IWnUfUmFO1+WVYQWQtIJHeRRdaIyyYglZN7xzUPe4Q= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -255,8 +258,8 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= @@ -275,24 +278,24 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.30.3 h1:ImHwK9DCsPA9uoU3rVh4QHAHHK5dTSv1nxJUapx8hoQ= -k8s.io/api v0.30.3/go.mod h1:GPc8jlzoe5JG3pb0KJCSLX5oAFIW3/qNJITlDj8BH04= -k8s.io/apiextensions-apiserver v0.30.3 h1:oChu5li2vsZHx2IvnGP3ah8Nj3KyqG3kRSaKmijhB9U= -k8s.io/apiextensions-apiserver v0.30.3/go.mod h1:uhXxYDkMAvl6CJw4lrDN4CPbONkF3+XL9cacCT44kV4= -k8s.io/apimachinery v0.30.3 h1:q1laaWCmrszyQuSQCfNB8cFgCuDAoPszKY4ucAjDwHc= -k8s.io/apimachinery v0.30.3/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo= +k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE= +k8s.io/apiextensions-apiserver v0.31.0 h1:fZgCVhGwsclj3qCw1buVXCV6khjRzKC5eCFt24kyLSk= +k8s.io/apiextensions-apiserver v0.31.0/go.mod h1:b9aMDEYaEe5sdK+1T0KU78ApR/5ZVp4i56VacZYEHxk= +k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc= +k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= k8s.io/cli-runtime v0.30.3 h1:aG69oRzJuP2Q4o8dm+f5WJIX4ZBEwrvdID0+MXyUY6k= k8s.io/cli-runtime v0.30.3/go.mod h1:hwrrRdd9P84CXSKzhHxrOivAR9BRnkMt0OeP5mj7X30= -k8s.io/client-go v0.30.3 h1:bHrJu3xQZNXIi8/MoxYtZBBWQQXwy16zqJwloXXfD3k= -k8s.io/client-go v0.30.3/go.mod h1:8d4pf8vYu665/kUbsxWAQ/JDBNWqfFeZnvFiVdmx89U= +k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8= +k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 h1:1dWzkmJrrprYvjGwh9kEUxmcUV/CtNU8QM7h1FLWQOo= k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38/go.mod h1:coRQXBK9NxO98XUv3ZD6AK3xzHCxV6+b7lrquKwaKzA= k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3 h1:b2FmK8YH+QEwq/Sy2uAEhmqL5nPfGYbJOcaqjeYYZoA= k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHvm5BZw= -sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= +sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q= +sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kustomize/api v0.17.1 h1:MYJBOP/yQ3/5tp4/sf6HiiMfNNyO97LmtnirH9SLNr4= From be423b21831b53089e4d21ba73c050cf1bd95ac5 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 27 Sep 2024 18:59:06 +0200 Subject: [PATCH 022/836] fix(deps): update kubernetes patches (main) (#5658) https://redirect.github.com/kubernetes/api `v0.31.0` -> `v0.31.1` https://redirect.github.com/kubernetes/apiextensions-apiserver `v0.31.0` -> `v0.31.1` https://redirect.github.com/kubernetes/apimachinery `v0.31.0` -> `v0.31.1` https://redirect.github.com/kubernetes/client-go `v0.31.0` -> `v0.31.1` https://redirect.github.com/kubernetes/utils `702e33f` -> `49e7df5` --- go.mod | 10 +++++----- go.sum | 20 ++++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index 4872e9310d..77c1df4a40 100644 --- a/go.mod +++ b/go.mod @@ -39,12 +39,12 @@ require ( golang.org/x/term v0.24.0 google.golang.org/grpc v1.65.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.31.0 - k8s.io/apiextensions-apiserver v0.31.0 - k8s.io/apimachinery v0.31.0 + k8s.io/api v0.31.1 + k8s.io/apiextensions-apiserver v0.31.1 + k8s.io/apimachinery v0.31.1 k8s.io/cli-runtime v0.30.3 - k8s.io/client-go v0.31.0 - k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3 + k8s.io/client-go v0.31.1 + k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 sigs.k8s.io/controller-runtime v0.19.0 sigs.k8s.io/yaml v1.4.0 ) diff --git a/go.sum b/go.sum index ff707b2573..1c961cd36b 100644 --- a/go.sum +++ b/go.sum @@ -278,22 +278,22 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo= -k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE= -k8s.io/apiextensions-apiserver v0.31.0 h1:fZgCVhGwsclj3qCw1buVXCV6khjRzKC5eCFt24kyLSk= -k8s.io/apiextensions-apiserver v0.31.0/go.mod h1:b9aMDEYaEe5sdK+1T0KU78ApR/5ZVp4i56VacZYEHxk= -k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc= -k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU= +k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI= +k8s.io/apiextensions-apiserver v0.31.1 h1:L+hwULvXx+nvTYX/MKM3kKMZyei+UiSXQWciX/N6E40= +k8s.io/apiextensions-apiserver v0.31.1/go.mod h1:tWMPR3sgW+jsl2xm9v7lAyRF1rYEK71i9G5dRtkknoQ= +k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= +k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= k8s.io/cli-runtime v0.30.3 h1:aG69oRzJuP2Q4o8dm+f5WJIX4ZBEwrvdID0+MXyUY6k= k8s.io/cli-runtime v0.30.3/go.mod h1:hwrrRdd9P84CXSKzhHxrOivAR9BRnkMt0OeP5mj7X30= -k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8= -k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU= +k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0= +k8s.io/client-go v0.31.1/go.mod h1:sKI8871MJN2OyeqRlmA4W4KM9KBdBUpDLu/43eGemCg= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 h1:1dWzkmJrrprYvjGwh9kEUxmcUV/CtNU8QM7h1FLWQOo= k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38/go.mod h1:coRQXBK9NxO98XUv3ZD6AK3xzHCxV6+b7lrquKwaKzA= -k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3 h1:b2FmK8YH+QEwq/Sy2uAEhmqL5nPfGYbJOcaqjeYYZoA= -k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 h1:MDF6h2H/h4tbzmtIKTuctcwZmY0tY9mD9fNT47QO6HI= +k8s.io/utils v0.0.0-20240921022957-49e7df575cb6/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q= sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= From 7e3dba4280153e8693b0b6a405141fdfc5df31f7 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sat, 28 Sep 2024 15:25:43 +0200 Subject: [PATCH 023/836] fix(deps): update module k8s.io/cli-runtime to v0.31.1 (main) (#5303) https://github.com/kubernetes/cli-runtime `v0.30.3` -> `v0.31.1` sigs.k8s.io/kustomize/api `v0.17.1` -> `v0.17.2` sigs.k8s.io/kustomize/kyaml `v0.17.0` -> `v0.17.1` --- go.mod | 7 +++---- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/go.mod b/go.mod index 77c1df4a40..8e5341a1db 100644 --- a/go.mod +++ b/go.mod @@ -42,7 +42,7 @@ require ( k8s.io/api v0.31.1 k8s.io/apiextensions-apiserver v0.31.1 k8s.io/apimachinery v0.31.1 - k8s.io/cli-runtime v0.30.3 + k8s.io/cli-runtime v0.31.1 k8s.io/client-go v0.31.1 k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 sigs.k8s.io/controller-runtime v0.19.0 @@ -55,7 +55,6 @@ require ( github.com/blang/semver/v4 v4.0.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/emicklei/go-restful/v3 v3.12.1 // indirect - github.com/evanphx/json-patch v5.9.0+incompatible // indirect github.com/fatih/color v1.16.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect @@ -122,7 +121,7 @@ require ( k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/kustomize/api v0.17.1 // indirect - sigs.k8s.io/kustomize/kyaml v0.17.0 // indirect + sigs.k8s.io/kustomize/api v0.17.2 // indirect + sigs.k8s.io/kustomize/kyaml v0.17.1 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/go.sum b/go.sum index 1c961cd36b..c0d62b12f0 100644 --- a/go.sum +++ b/go.sum @@ -284,8 +284,8 @@ k8s.io/apiextensions-apiserver v0.31.1 h1:L+hwULvXx+nvTYX/MKM3kKMZyei+UiSXQWciX/ k8s.io/apiextensions-apiserver v0.31.1/go.mod h1:tWMPR3sgW+jsl2xm9v7lAyRF1rYEK71i9G5dRtkknoQ= k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= -k8s.io/cli-runtime v0.30.3 h1:aG69oRzJuP2Q4o8dm+f5WJIX4ZBEwrvdID0+MXyUY6k= -k8s.io/cli-runtime v0.30.3/go.mod h1:hwrrRdd9P84CXSKzhHxrOivAR9BRnkMt0OeP5mj7X30= +k8s.io/cli-runtime v0.31.1 h1:/ZmKhmZ6hNqDM+yf9s3Y4KEYakNXUn5sod2LWGGwCuk= +k8s.io/cli-runtime v0.31.1/go.mod h1:pKv1cDIaq7ehWGuXQ+A//1OIF+7DI+xudXtExMCbe9U= k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0= k8s.io/client-go v0.31.1/go.mod h1:sKI8871MJN2OyeqRlmA4W4KM9KBdBUpDLu/43eGemCg= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= @@ -298,10 +298,10 @@ sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/kustomize/api v0.17.1 h1:MYJBOP/yQ3/5tp4/sf6HiiMfNNyO97LmtnirH9SLNr4= -sigs.k8s.io/kustomize/api v0.17.1/go.mod h1:ffn5491s2EiNrJSmgqcWGzQUVhc/pB0OKNI0HsT/0tA= -sigs.k8s.io/kustomize/kyaml v0.17.0 h1:G2bWs03V9Ur2PinHLzTUJ8Ded+30SzXZKiO92SRDs3c= -sigs.k8s.io/kustomize/kyaml v0.17.0/go.mod h1:6lxkYF1Cv9Ic8g/N7I86cvxNc5iinUo/P2vKsHNmpyE= +sigs.k8s.io/kustomize/api v0.17.2 h1:E7/Fjk7V5fboiuijoZHgs4aHuexi5Y2loXlVOAVAG5g= +sigs.k8s.io/kustomize/api v0.17.2/go.mod h1:UWTz9Ct+MvoeQsHcJ5e+vziRRkwimm3HytpZgIYqye0= +sigs.k8s.io/kustomize/kyaml v0.17.1 h1:TnxYQxFXzbmNG6gOINgGWQt09GghzgTP6mIurOgrLCQ= +sigs.k8s.io/kustomize/kyaml v0.17.1/go.mod h1:9V0mCjIEYjlXuCdYsSXvyoy2BTsLESH7TlGV81S282U= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= From 049db84ed82ba334ad4d46d6ffac0df6066534fa Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sat, 28 Sep 2024 17:59:24 +0200 Subject: [PATCH 024/836] chore(deps): update dependency ubuntu to v24 (main) (#5660) --- .github/workflows/backport.yml | 6 ++-- .github/workflows/chatops.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/continuous-delivery.yml | 34 +++++++++---------- .github/workflows/continuous-integration.yml | 32 ++++++++--------- .github/workflows/k8s-versions-check.yml | 2 +- .../latest-postgres-version-check.yml | 2 +- .github/workflows/release-pr.yml | 2 +- .github/workflows/release-publish.yml | 12 +++---- .github/workflows/release-tag.yml | 2 +- .github/workflows/require-labels.yml | 2 +- .github/workflows/snyk.yml | 2 +- .github/workflows/spellcheck.yml | 4 +-- 13 files changed, 52 insertions(+), 52 deletions(-) diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 3cb706ce64..7cdf956314 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -20,7 +20,7 @@ jobs: github.event.pull_request.merged == false && !contains(github.event.pull_request.labels.*.name, 'backport-requested') && !contains(github.event.pull_request.labels.*.name, 'do not backport') - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Label the pull request @@ -69,7 +69,7 @@ jobs: contains(github.event.pull_request.labels.*.name, 'backport-requested :arrow_backward:') ) && !contains(github.event.pull_request.labels.*.name, 'do not backport') - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 strategy: fail-fast: false matrix: @@ -141,7 +141,7 @@ jobs: env: PR: ${{ github.event.pull_request.number }} COMMIT: ${{ needs.back-porting-pr.outputs.commit }} - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: create ticket uses: dacbd/create-issue-action@v2 diff --git a/.github/workflows/chatops.yml b/.github/workflows/chatops.yml index b2058ca48e..eac136d990 100644 --- a/.github/workflows/chatops.yml +++ b/.github/workflows/chatops.yml @@ -14,7 +14,7 @@ jobs: if: | github.event.issue.pull_request && startsWith(github.event.comment.body, '/ok-to-merge') - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Check User Permission id: checkUser diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index d2e84c807b..aa638e4e5a 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -36,7 +36,7 @@ env: jobs: duplicate_runs: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 name: Skip duplicate runs continue-on-error: true outputs: diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index a2a9e8f77f..07120f15b0 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -63,7 +63,7 @@ jobs: # Trigger the workflow on release-* branches for smoke testing whenever it's a scheduled run. # Note: this is a workaround since we can't directly schedule-run a workflow from a non default branch smoke_test_release_branches: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 name: smoke test release-* branches when it's a scheduled run if: github.event_name == 'schedule' strategy: @@ -84,7 +84,7 @@ jobs: github.event.issue.pull_request && startsWith(github.event.comment.body, '/test') name: Retrieve command - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 outputs: github_ref: ${{ steps.refs.outputs.head_sha }} depth: ${{ env.DEPTH }} @@ -161,7 +161,7 @@ jobs: name: Parse arguments if: | github.event_name == 'workflow_dispatch' || github.event_name == 'schedule' - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 outputs: github_ref: ${{ github.ref }} depth: ${{ env.DEPTH }} @@ -199,7 +199,7 @@ jobs: needs: - check_commenter - test_arguments - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 if: | ( needs.check_commenter.result == 'success' || @@ -242,7 +242,7 @@ jobs: if: | always() && !cancelled() && needs.evaluate_options.result == 'success' - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 permissions: contents: read packages: write @@ -506,7 +506,7 @@ jobs: needs.buildx.result == 'success' && needs.buildx.outputs.upload_artifacts == 'true' && github.repository_owner == 'cloudnative-pg' - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Checkout artifact @@ -572,7 +572,7 @@ jobs: if: | (always() && !cancelled()) && needs.buildx.result == 'success' - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 outputs: image: ${{ needs.buildx.outputs.image }} localMatrix: ${{ steps.generate-jobs.outputs.localMatrix }} @@ -620,7 +620,7 @@ jobs: strategy: fail-fast: false matrix: ${{ fromJSON(needs.generate-jobs.outputs.localMatrix) }} - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 env: # TEST_DEPTH determines the maximum test level the suite should be running TEST_DEPTH: ${{ needs.evaluate_options.outputs.test_level }} @@ -822,7 +822,7 @@ jobs: - buildx - generate-jobs - evaluate_options - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 outputs: azure_storage_account: ${{ steps.setup.outputs.azure_storage_account }} steps: @@ -868,7 +868,7 @@ jobs: fail-fast: false max-parallel: 8 matrix: ${{ fromJSON(needs.generate-jobs.outputs.aksMatrix) }} - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 env: # TEST_DEPTH determines the maximum test level the suite should be running TEST_DEPTH: ${{ needs.evaluate_options.outputs.test_level }} @@ -1168,7 +1168,7 @@ jobs: - generate-jobs - e2e-aks-setup - e2e-aks - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 env: AZURE_STORAGE_ACCOUNT: ${{ needs.e2e-aks-setup.outputs.azure_storage_account }} steps: @@ -1209,7 +1209,7 @@ jobs: fail-fast: false max-parallel: 6 matrix: ${{ fromJSON(needs.generate-jobs.outputs.eksMatrix) }} - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 env: # TEST_DEPTH determines the maximum test level the suite should be running TEST_DEPTH: ${{ needs.evaluate_options.outputs.test_level }} @@ -1597,7 +1597,7 @@ jobs: fail-fast: false max-parallel: 6 matrix: ${{ fromJSON(needs.generate-jobs.outputs.gkeMatrix) }} - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 env: # TEST_DEPTH determines the maximum test level the suite should be running TEST_DEPTH: ${{ needs.evaluate_options.outputs.test_level }} @@ -1894,7 +1894,7 @@ jobs: fail-fast: false max-parallel: 6 matrix: ${{ fromJSON(needs.generate-jobs.outputs.openshiftMatrix) }} - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 env: # TEST_DEPTH determines the maximum test level the suite should be running TEST_DEPTH: ${{ needs.evaluate_options.outputs.test_level }} @@ -2107,7 +2107,7 @@ jobs: needs.e2e-openshift.result == 'success' || needs.e2e-openshift.result == 'failure' )) - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Create a directory for the artifacts run: mkdir test-artifacts @@ -2190,7 +2190,7 @@ jobs: needs.e2e-local.result == 'success' && github.event_name == 'issue_comment' && needs.evaluate_options.outputs.test_level == '4' - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Check preconditions id: get_pr_number_and_labels @@ -2219,7 +2219,7 @@ jobs: always() && needs.e2e-local.result == 'failure' && github.event_name == 'issue_comment' - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Check preconditions id: get_pr_number_and_labels diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 427ccc631e..212f877293 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -46,7 +46,7 @@ jobs: # Trigger the workflow on release-* branches for smoke testing whenever it's a scheduled run. # Note: this is a workaround since we can't directly schedule-run a workflow from a non default branch smoke_test_release_branches: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 name: smoke test release-* branches when it's a scheduled run if: github.event_name == 'schedule' strategy: @@ -65,7 +65,7 @@ jobs: # 1. it's on 'main' branch # 2. it's triggered by events in the 'do_not_skip' list duplicate_runs: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 name: Skip duplicate runs continue-on-error: true outputs: @@ -86,7 +86,7 @@ jobs: name: Check changed files needs: duplicate_runs if: ${{ needs.duplicate_runs.outputs.should_skip != 'true' }} - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 outputs: docs-changed: ${{ steps.filter.outputs.docs-changed }} operator-changed: ${{ steps.filter.outputs.operator-changed }} @@ -145,7 +145,7 @@ jobs: - change-triage # We need always run linter as go linter is a required check if: needs.duplicate_runs.outputs.should_skip != 'true' - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Checkout code uses: actions/checkout@v4 @@ -175,7 +175,7 @@ jobs: if: | needs.duplicate_runs.outputs.should_skip != 'true' && needs.change-triage.outputs.renovate-changed == 'true' - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Checkout code uses: actions/checkout@v4 @@ -194,7 +194,7 @@ jobs: needs.change-triage.outputs.operator-changed == 'true' || needs.change-triage.outputs.go-code-changed == 'true' ) - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Run govulncheck uses: golang/govulncheck-action@v1 @@ -211,7 +211,7 @@ jobs: if: | needs.duplicate_runs.outputs.should_skip != 'true' && needs.change-triage.outputs.shell-script-changed == 'true' - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 env: SHELLCHECK_OPTS: -a -S style steps: @@ -233,7 +233,7 @@ jobs: needs.change-triage.outputs.operator-changed == 'true' || needs.change-triage.outputs.go-code-changed == 'true' ) - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 outputs: k8sMatrix: ${{ steps.get-k8s-versions.outputs.k8s_versions }} latest_k8s_version: ${{ steps.get-k8s-versions.outputs.latest_k8s_version }} @@ -272,7 +272,7 @@ jobs: needs.change-triage.outputs.operator-changed == 'true' || needs.change-triage.outputs.go-code-changed == 'true' ) - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 strategy: matrix: # The Unit test is performed per multiple supported k8s versions (each job for each k8s version) as below: @@ -315,7 +315,7 @@ jobs: needs.change-triage.outputs.go-code-changed == 'true' || needs.change-triage.outputs.docs-changed == 'true' ) - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Checkout code uses: actions/checkout@v4 @@ -350,7 +350,7 @@ jobs: needs.change-triage.outputs.go-code-changed == 'true' || needs.change-triage.outputs.operator-changed == 'true' ) - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Checkout code uses: actions/checkout@v4 @@ -403,7 +403,7 @@ jobs: (needs.tests.result == 'success' || needs.tests.result == 'skipped') && (needs.apidoc.result == 'success' || needs.apidoc.result == 'skipped') && (needs.crd.result == 'success' || needs.crd.result == 'skipped') - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 permissions: actions: read contents: read @@ -702,7 +702,7 @@ jobs: olm-bundle: name: Create OLM bundle and catalog - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 permissions: contents: read packages: write @@ -760,7 +760,7 @@ jobs: preflight: name: Run openshift-preflight test - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 needs: - buildx - olm-bundle @@ -816,7 +816,7 @@ jobs: olm-scorecard: name: Run OLM scorecard test - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 needs: - buildx - olm-bundle @@ -867,7 +867,7 @@ jobs: matrix: test: [ kiwi, lemon, orange ] name: Run OLM ${{ matrix.test }} test - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 needs: - buildx - olm-bundle diff --git a/.github/workflows/k8s-versions-check.yml b/.github/workflows/k8s-versions-check.yml index 3230320766..4178b76391 100644 --- a/.github/workflows/k8s-versions-check.yml +++ b/.github/workflows/k8s-versions-check.yml @@ -33,7 +33,7 @@ env: jobs: check-public-clouds-k8s-versions: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Checkout code diff --git a/.github/workflows/latest-postgres-version-check.yml b/.github/workflows/latest-postgres-version-check.yml index 88e57afde5..395bf9aaa4 100644 --- a/.github/workflows/latest-postgres-version-check.yml +++ b/.github/workflows/latest-postgres-version-check.yml @@ -14,7 +14,7 @@ defaults: jobs: check-latest-postgres-version: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Checkout code uses: actions/checkout@v4 diff --git a/.github/workflows/release-pr.yml b/.github/workflows/release-pr.yml index d5ad3a7278..29fbc22605 100644 --- a/.github/workflows/release-pr.yml +++ b/.github/workflows/release-pr.yml @@ -9,7 +9,7 @@ on: jobs: pull-request: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Checkout diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml index ae6ee87f8a..11cf5be297 100644 --- a/.github/workflows/release-publish.yml +++ b/.github/workflows/release-publish.yml @@ -19,7 +19,7 @@ jobs: check-version: name: Evaluate release tag - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 outputs: is_latest: ${{ env.IS_LATEST }} is_stable: ${{ env.IS_STABLE }} @@ -49,7 +49,7 @@ jobs: release: name: Create Github release - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 needs: - check-version steps: @@ -87,7 +87,7 @@ jobs: release-binaries: name: Build containers - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 needs: - check-version outputs: @@ -260,7 +260,7 @@ jobs: olm-bundle: name: Create OLM bundle and catalog - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 needs: - check-version - release-binaries @@ -326,7 +326,7 @@ jobs: operatorhub_pr: name: Create remote PR for OperatorHub - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 needs: - release-binaries - olm-bundle @@ -410,7 +410,7 @@ jobs: github.repository_owner == 'cloudnative-pg' env: VERSION: ${{ needs.release-binaries.outputs.version }} - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Checkout artifact diff --git a/.github/workflows/release-tag.yml b/.github/workflows/release-tag.yml index b9fcc094af..3708028ec6 100644 --- a/.github/workflows/release-tag.yml +++ b/.github/workflows/release-tag.yml @@ -13,7 +13,7 @@ on: jobs: tag: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Checkout diff --git a/.github/workflows/require-labels.yml b/.github/workflows/require-labels.yml index de18c2dcae..edebf5b995 100644 --- a/.github/workflows/require-labels.yml +++ b/.github/workflows/require-labels.yml @@ -16,7 +16,7 @@ env: jobs: require-labels: name: Require labels - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Require labels uses: docker://agilepathway/pull-request-label-checker:v1.6.55 diff --git a/.github/workflows/snyk.yml b/.github/workflows/snyk.yml index 81e3bba54f..e41fca8302 100644 --- a/.github/workflows/snyk.yml +++ b/.github/workflows/snyk.yml @@ -11,7 +11,7 @@ on: jobs: security: name: Security scan - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Checkout code uses: actions/checkout@v4 diff --git a/.github/workflows/spellcheck.yml b/.github/workflows/spellcheck.yml index 8dd5b1403c..a8bbd7866c 100644 --- a/.github/workflows/spellcheck.yml +++ b/.github/workflows/spellcheck.yml @@ -8,7 +8,7 @@ jobs: # Check code for non-inclusive language woke: name: Run woke - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Checkout uses: actions/checkout@v4 @@ -22,7 +22,7 @@ jobs: # Enforce en-us spell check spellcheck: name: Run spellcheck - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Checkout uses: actions/checkout@v4 From 0b56b775c5af82e8c98a3e08cbacc8b7def8e2db Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sat, 28 Sep 2024 18:01:47 +0200 Subject: [PATCH 025/836] feat: update default PostgreSQL version to 17.0 (#5504) Update default PostgreSQL version from 16.4 to 17.0 Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: postgres-versions-updater --- .github/pg_versions.json | 4 ++-- docs/src/bootstrap.md | 10 +++++----- docs/src/declarative_hibernation.md | 2 +- docs/src/image_catalog.md | 4 ++-- docs/src/kubectl-plugin.md | 8 ++++---- docs/src/monitoring.md | 2 +- docs/src/postgis.md | 2 +- docs/src/samples/cluster-example-full.yaml | 2 +- docs/src/scheduling.md | 2 +- docs/src/ssl_connections.md | 2 +- docs/src/troubleshooting.md | 4 ++-- pkg/versions/versions.go | 2 +- 12 files changed, 22 insertions(+), 22 deletions(-) diff --git a/.github/pg_versions.json b/.github/pg_versions.json index d2df502134..cf76550465 100644 --- a/.github/pg_versions.json +++ b/.github/pg_versions.json @@ -1,7 +1,7 @@ { "17": [ - "17rc1", - "17rc1-5" + "17.0", + "17.0-3" ], "16": [ "16.4", diff --git a/docs/src/bootstrap.md b/docs/src/bootstrap.md index 53f78de012..3cf3cb41b1 100644 --- a/docs/src/bootstrap.md +++ b/docs/src/bootstrap.md @@ -504,7 +504,7 @@ file on the source PostgreSQL instance: host replication streaming_replica all md5 ``` -The following manifest creates a new PostgreSQL 16.4 cluster, +The following manifest creates a new PostgreSQL 17.0 cluster, called `target-db`, using the `pg_basebackup` bootstrap method to clone an external PostgreSQL cluster defined as `source-db` (in the `externalClusters` array). As you can see, the `source-db` @@ -519,7 +519,7 @@ metadata: name: target-db spec: instances: 3 - imageName: ghcr.io/cloudnative-pg/postgresql:16.4 + imageName: ghcr.io/cloudnative-pg/postgresql:17.0 bootstrap: pg_basebackup: @@ -539,7 +539,7 @@ spec: ``` All the requirements must be met for the clone operation to work, including -the same PostgreSQL version (in our case 16.4). +the same PostgreSQL version (in our case 17.0). #### TLS certificate authentication @@ -554,7 +554,7 @@ in the same Kubernetes cluster. This example can be easily adapted to cover an instance that resides outside the Kubernetes cluster. -The manifest defines a new PostgreSQL 16.4 cluster called `cluster-clone-tls`, +The manifest defines a new PostgreSQL 17.0 cluster called `cluster-clone-tls`, which is bootstrapped using the `pg_basebackup` method from the `cluster-example` external cluster. The host is identified by the read/write service in the same cluster, while the `streaming_replica` user is authenticated @@ -569,7 +569,7 @@ metadata: name: cluster-clone-tls spec: instances: 3 - imageName: ghcr.io/cloudnative-pg/postgresql:16.4 + imageName: ghcr.io/cloudnative-pg/postgresql:17.0 bootstrap: pg_basebackup: diff --git a/docs/src/declarative_hibernation.md b/docs/src/declarative_hibernation.md index 84ec83844c..1b7a64f7af 100644 --- a/docs/src/declarative_hibernation.md +++ b/docs/src/declarative_hibernation.md @@ -58,7 +58,7 @@ $ kubectl cnpg status Cluster Summary Name: cluster-example Namespace: default -PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:16.4 +PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:17.0 Primary instance: cluster-example-2 Status: Cluster in healthy state Instances: 3 diff --git a/docs/src/image_catalog.md b/docs/src/image_catalog.md index f455fd79ba..a84890a480 100644 --- a/docs/src/image_catalog.md +++ b/docs/src/image_catalog.md @@ -32,7 +32,7 @@ spec: - major: 15 image: ghcr.io/cloudnative-pg/postgresql:15.6 - major: 16 - image: ghcr.io/cloudnative-pg/postgresql:16.4 + image: ghcr.io/cloudnative-pg/postgresql:17.0 ``` **Example of a Cluster-Wide Catalog using `ClusterImageCatalog` Resource:** @@ -47,7 +47,7 @@ spec: - major: 15 image: ghcr.io/cloudnative-pg/postgresql:15.6 - major: 16 - image: ghcr.io/cloudnative-pg/postgresql:16.4 + image: ghcr.io/cloudnative-pg/postgresql:17.0 ``` A `Cluster` resource has the flexibility to reference either an `ImageCatalog` diff --git a/docs/src/kubectl-plugin.md b/docs/src/kubectl-plugin.md index 4902585ae6..01d9af2a29 100755 --- a/docs/src/kubectl-plugin.md +++ b/docs/src/kubectl-plugin.md @@ -254,7 +254,7 @@ Cluster in healthy state Name: sandbox Namespace: default System ID: 7039966298120953877 -PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:16.4 +PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:17.0 Primary instance: sandbox-2 Instances: 3 Ready instances: 3 @@ -299,7 +299,7 @@ Cluster in healthy state Name: sandbox Namespace: default System ID: 7039966298120953877 -PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:16.4 +PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:17.0 Primary instance: sandbox-2 Instances: 3 Ready instances: 3 @@ -981,7 +981,7 @@ it from the actual pod. This means that you will be using the `postgres` user. ```shell kubectl cnpg psql cluster-example -psql (16.4 (Debian 16.4-1.pgdg110+1)) +psql (17.0 (Debian 17.0-1.pgdg110+1)) Type "help" for help. postgres=# @@ -992,7 +992,7 @@ select to work against a replica by using the `--replica` option: ```shell kubectl cnpg psql --replica cluster-example -psql (16.4 (Debian 16.4-1.pgdg110+1)) +psql (17.0 (Debian 17.0-1.pgdg110+1)) Type "help" for help. diff --git a/docs/src/monitoring.md b/docs/src/monitoring.md index 20581eeb54..7814949e06 100644 --- a/docs/src/monitoring.md +++ b/docs/src/monitoring.md @@ -217,7 +217,7 @@ cnpg_collector_up{cluster="cluster-example"} 1 # HELP cnpg_collector_postgres_version Postgres version # TYPE cnpg_collector_postgres_version gauge -cnpg_collector_postgres_version{cluster="cluster-example",full="16.4"} 16.4 +cnpg_collector_postgres_version{cluster="cluster-example",full="17.0"} 17.0 # HELP cnpg_collector_last_failed_backup_timestamp The last failed backup as a unix timestamp # TYPE cnpg_collector_last_failed_backup_timestamp gauge diff --git a/docs/src/postgis.md b/docs/src/postgis.md index ada7b8dc4f..a31fb607c7 100644 --- a/docs/src/postgis.md +++ b/docs/src/postgis.md @@ -100,7 +100,7 @@ values from the ones in this document): ```console $ kubectl exec -ti postgis-example-1 -- psql app Defaulted container "postgres" out of: postgres, bootstrap-controller (init) -psql (16.4 (Debian 16.4-1.pgdg110+1)) +psql (17.0 (Debian 17.0-1.pgdg110+1)) Type "help" for help. app=# SELECT * FROM pg_available_extensions WHERE name ~ '^postgis' ORDER BY 1; diff --git a/docs/src/samples/cluster-example-full.yaml b/docs/src/samples/cluster-example-full.yaml index 335c7ebfb7..f0fa1fe10e 100644 --- a/docs/src/samples/cluster-example-full.yaml +++ b/docs/src/samples/cluster-example-full.yaml @@ -35,7 +35,7 @@ metadata: name: cluster-example-full spec: description: "Example of cluster" - imageName: ghcr.io/cloudnative-pg/postgresql:16.4 + imageName: ghcr.io/cloudnative-pg/postgresql:17.0 # imagePullSecret is only required if the images are located in a private registry # imagePullSecrets: # - name: private_registry_access diff --git a/docs/src/scheduling.md b/docs/src/scheduling.md index 046db44ec0..57eb71b69b 100644 --- a/docs/src/scheduling.md +++ b/docs/src/scheduling.md @@ -40,7 +40,7 @@ metadata: name: cluster-example spec: instances: 3 - imageName: ghcr.io/cloudnative-pg/postgresql:16.4 + imageName: ghcr.io/cloudnative-pg/postgresql:17.0 affinity: enablePodAntiAffinity: true # Default value diff --git a/docs/src/ssl_connections.md b/docs/src/ssl_connections.md index f26fd19329..a7826eb3c7 100644 --- a/docs/src/ssl_connections.md +++ b/docs/src/ssl_connections.md @@ -173,7 +173,7 @@ Output: version -------------------------------------------------------------------------------------- ------------------ -PostgreSQL 16.4 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 8.3.1 20191121 (Red Hat +PostgreSQL 17.0 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 8.3.1 20191121 (Red Hat 8.3.1-5), 64-bit (1 row) ``` diff --git a/docs/src/troubleshooting.md b/docs/src/troubleshooting.md index df9dc94940..2caded71b9 100644 --- a/docs/src/troubleshooting.md +++ b/docs/src/troubleshooting.md @@ -218,7 +218,7 @@ Cluster in healthy state Name: cluster-example Namespace: default System ID: 7044925089871458324 -PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:16.4-3 +PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:17.0-3 Primary instance: cluster-example-1 Instances: 3 Ready instances: 3 @@ -294,7 +294,7 @@ kubectl describe cluster -n | grep "Image Name" Output: ```shell - Image Name: ghcr.io/cloudnative-pg/postgresql:16.4-3 + Image Name: ghcr.io/cloudnative-pg/postgresql:17.0-3 ``` !!! Note diff --git a/pkg/versions/versions.go b/pkg/versions/versions.go index 50cea755ce..91112799a3 100644 --- a/pkg/versions/versions.go +++ b/pkg/versions/versions.go @@ -23,7 +23,7 @@ const ( Version = "1.24.0" // DefaultImageName is the default image used by the operator to create pods - DefaultImageName = "ghcr.io/cloudnative-pg/postgresql:16.4" + DefaultImageName = "ghcr.io/cloudnative-pg/postgresql:17.0" // DefaultOperatorImageName is the default operator image used by the controller in the pods running PostgreSQL DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.24.0" From 25281f568abe7eed3f3f161e479bdfdf391ffad2 Mon Sep 17 00:00:00 2001 From: Peggie Date: Sat, 28 Sep 2024 18:02:11 +0200 Subject: [PATCH 026/836] feat: Public Cloud K8S versions update (#5670) Update the versions used to test the operator on public cloud providers Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: public-cloud-k8s-versions-check --- .github/aks_versions.json | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/aks_versions.json b/.github/aks_versions.json index 24b881347c..4c3493a7b5 100644 --- a/.github/aks_versions.json +++ b/.github/aks_versions.json @@ -1,6 +1,5 @@ [ "1.30.4", "1.29.8", - "1.28.9", - "1.27.9" + "1.28.9" ] From a1b487fbc9b2d89b36d1e6f7a9e049799d7c0220 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Tue, 1 Oct 2024 10:25:43 +0200 Subject: [PATCH 027/836] fix: remove `pg_database_size` from status probe (#5689) The instance manager status probe was using `pg_database_size()` which scans the whole `PGDATA` to compute the current size of every database. Instead of doing that in the status probe, causing a scan in each reconciliation loop, we now do that only when requested by the `kubectl cnpg status` plugin (the only place where this information was actually used). Closes: #5686 Signed-off-by: Leonardo Cecchi Signed-off-by: Gabriele Bartolini Co-authored-by: Gabriele Bartolini --- internal/cmd/plugin/status/status.go | 57 +++++++++++++++++++++++----- pkg/management/postgres/probes.go | 6 +-- pkg/postgres/status.go | 1 - 3 files changed, 50 insertions(+), 14 deletions(-) diff --git a/internal/cmd/plugin/status/status.go b/internal/cmd/plugin/status/status.go index 15285733fc..3836696d2d 100644 --- a/internal/cmd/plugin/status/status.go +++ b/internal/cmd/plugin/status/status.go @@ -66,6 +66,9 @@ type PostgresqlStatus struct { // ErrorList store the possible errors while getting the PostgreSQL status ErrorList []error + + // The size of the cluster + TotalClusterSize string } func (fullStatus *PostgresqlStatus) getReplicationSlotList() postgres.PgReplicationSlotList { @@ -99,6 +102,10 @@ func getPrintableIntegerPointer(i *int) string { func Status(ctx context.Context, clusterName string, verbose bool, format plugin.OutputFormat) error { var cluster apiv1.Cluster var errs []error + + // Create a Kubernetes client suitable for calling the "Exec" subresource + clientInterface := kubernetes.NewForConfigOrDie(plugin.Config) + // Get the Cluster object err := plugin.Client.Get(ctx, client.ObjectKey{Namespace: plugin.Namespace, Name: clusterName}, &cluster) if err != nil { @@ -112,12 +119,12 @@ func Status(ctx context.Context, clusterName string, verbose bool, format plugin } errs = append(errs, status.ErrorList...) - status.printBasicInfo() + status.printBasicInfo(ctx, clientInterface) status.printHibernationInfo() status.printDemotionTokenInfo() status.printPromotionTokenInfo() if verbose { - errs = append(errs, status.printPostgresConfiguration(ctx)...) + errs = append(errs, status.printPostgresConfiguration(ctx, clientInterface)...) } status.printCertificatesStatus() status.printBackupStatus() @@ -188,9 +195,33 @@ func listFencedInstances(fencedInstances *stringset.Data) string { return strings.Join(fencedInstances.ToList(), ", ") } -func (fullStatus *PostgresqlStatus) printBasicInfo() { +func (fullStatus *PostgresqlStatus) getClusterSize(ctx context.Context, client kubernetes.Interface) (string, error) { + timeout := time.Second * 10 + + // Compute the disk space through `du` + output, _, err := utils.ExecCommand( + ctx, + client, + plugin.Config, + fullStatus.PrimaryPod, + specs.PostgresContainerName, + &timeout, + "du", + "-sLh", + specs.PgDataPath) + if err != nil { + return "", err + } + + size, _, _ := strings.Cut(output, "\t") + return size, nil +} + +func (fullStatus *PostgresqlStatus) printBasicInfo(ctx context.Context, client kubernetes.Interface) { summary := tabby.New() + clusterSize, clusterSizeErr := fullStatus.getClusterSize(ctx, client) + cluster := fullStatus.Cluster if cluster.IsReplica() { @@ -214,6 +245,7 @@ func (fullStatus *PostgresqlStatus) printBasicInfo() { summary.AddLine("Name:", cluster.Name) summary.AddLine("Namespace:", cluster.Namespace) + if primaryInstanceStatus != nil { summary.AddLine("System ID:", primaryInstanceStatus.SystemID) } @@ -257,6 +289,13 @@ func (fullStatus *PostgresqlStatus) printBasicInfo() { fmt.Println(aurora.Red("Switchover in progress")) } } + + if clusterSizeErr != nil { + summary.AddLine("Size:", aurora.Red(clusterSizeErr.Error())) + } else { + summary.AddLine("Size:", clusterSize) + } + if !cluster.IsReplica() && primaryInstanceStatus != nil { lsnInfo := fmt.Sprintf( "%s (Timeline: %d - WAL File: %s)", @@ -395,13 +434,15 @@ func (fullStatus *PostgresqlStatus) getStatus(isPrimaryFenced bool, cluster *api } } -func (fullStatus *PostgresqlStatus) printPostgresConfiguration(ctx context.Context) []error { +func (fullStatus *PostgresqlStatus) printPostgresConfiguration( + ctx context.Context, + client kubernetes.Interface, +) []error { timeout := time.Second * 10 - clientInterface := kubernetes.NewForConfigOrDie(plugin.Config) var errs []error // Read PostgreSQL configuration from custom.conf - customConf, _, err := utils.ExecCommand(ctx, clientInterface, plugin.Config, fullStatus.PrimaryPod, + customConf, _, err := utils.ExecCommand(ctx, client, plugin.Config, fullStatus.PrimaryPod, specs.PostgresContainerName, &timeout, "cat", @@ -411,7 +452,7 @@ func (fullStatus *PostgresqlStatus) printPostgresConfiguration(ctx context.Conte } // Read PostgreSQL HBA Rules from pg_hba.conf - pgHBAConf, _, err := utils.ExecCommand(ctx, clientInterface, plugin.Config, fullStatus.PrimaryPod, + pgHBAConf, _, err := utils.ExecCommand(ctx, client, plugin.Config, fullStatus.PrimaryPod, specs.PostgresContainerName, &timeout, "cat", path.Join(specs.PgDataPath, constants.PostgresqlHBARulesFile)) if err != nil { @@ -662,7 +703,6 @@ func (fullStatus *PostgresqlStatus) printInstancesStatus() { fmt.Println(aurora.Green("Instances status")) status.AddHeader( "Name", - "Database Size", "Current LSN", // For standby use "Replay LSN" "Replication role", "Status", @@ -693,7 +733,6 @@ func (fullStatus *PostgresqlStatus) printInstancesStatus() { replicaRole := getReplicaRole(instance, fullStatus) status.AddLine( instance.Pod.Name, - instance.TotalInstanceSize, getCurrentLSN(instance), replicaRole, statusMsg, diff --git a/pkg/management/postgres/probes.go b/pkg/management/postgres/probes.go index b7e5852364..77a819fcd0 100644 --- a/pkg/management/postgres/probes.go +++ b/pkg/management/postgres/probes.go @@ -92,10 +92,8 @@ func (instance *Instance) GetStatus() (result *postgres.PostgresqlStatus, err er -- True if this is a primary instance NOT pg_is_in_recovery() as primary, -- True if at least one column requires a restart - EXISTS(SELECT 1 FROM pg_settings WHERE pending_restart), - -- The size of database in human readable format - (SELECT pg_size_pretty(SUM(pg_database_size(oid))) FROM pg_database)`) - err = row.Scan(&result.SystemID, &result.IsPrimary, &result.PendingRestart, &result.TotalInstanceSize) + EXISTS(SELECT 1 FROM pg_settings WHERE pending_restart)`) + err = row.Scan(&result.SystemID, &result.IsPrimary, &result.PendingRestart) if err != nil { return result, err } diff --git a/pkg/postgres/status.go b/pkg/postgres/status.go index 14d2601a3a..fc530c7593 100644 --- a/pkg/postgres/status.go +++ b/pkg/postgres/status.go @@ -43,7 +43,6 @@ type PostgresqlStatus struct { IsArchivingWAL bool `json:"isArchivingWAL,omitempty"` Node string `json:"node"` Pod *corev1.Pod `json:"pod"` - TotalInstanceSize string `json:"totalInstanceSize"` // populated when MightBeUnavailable reported a healthy status even if it found an error MightBeUnavailableMaskedError string `json:"mightBeUnavailableMaskedError,omitempty"` From 829808376542bb9eb0a42c42db7e20cdef4d238e Mon Sep 17 00:00:00 2001 From: smiyc <36233521+smiyc@users.noreply.github.com> Date: Tue, 1 Oct 2024 10:49:14 +0200 Subject: [PATCH 028/836] fix(plugin): handle `potential` sync in `status` command (#5533) closes #5525 Signed-off-by: Daniel Chambre --- internal/cmd/plugin/status/status.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/internal/cmd/plugin/status/status.go b/internal/cmd/plugin/status/status.go index 3836696d2d..41489ea976 100644 --- a/internal/cmd/plugin/status/status.go +++ b/internal/cmd/plugin/status/status.go @@ -697,6 +697,7 @@ func (fullStatus *PostgresqlStatus) printInstancesStatus() { // else: // if it is paused, print "Standby (paused)" // else if SyncState = sync/quorum print "Standby (sync)" + // else if SyncState = potential print "Standby (potential sync)" // else print "Standby (async)" status := tabby.New() @@ -852,6 +853,8 @@ func getReplicaRole(instance postgres.PostgresqlStatus, fullStatus *PostgresqlSt switch state.SyncState { case "quorum", "sync": return "Standby (sync)" + case "potential": + return "Standby (potential sync)" case "async": return "Standby (async)" default: From 0b87a9562751bb41273e66e6486d05fbe9ca88e5 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Tue, 1 Oct 2024 18:41:21 +0200 Subject: [PATCH 029/836] refactor: use PostgreSQL functions from machinery (#5684) Signed-off-by: Jonathan Gonzalez V. Signed-off-by: Gabriele Bartolini Signed-off-by: Leonardo Cecchi Signed-off-by: Francesco Canovai Signed-off-by: Marco Nenciarini Co-authored-by: Jonathan Gonzalez V. Co-authored-by: Gabriele Bartolini Co-authored-by: Francesco Canovai Co-authored-by: Marco Nenciarini --- api/v1/cluster_funcs.go | 25 ++--- api/v1/cluster_funcs_test.go | 13 +-- api/v1/cluster_webhook.go | 26 +++--- api/v1/cluster_webhook_test.go | 17 +++- docs/src/supported_releases.md | 2 +- go.mod | 4 +- go.sum | 8 +- .../cmd/manager/instance/pgbasebackup/cmd.go | 2 +- .../management/controller/instance_startup.go | 6 +- pkg/management/postgres/configuration.go | 4 +- pkg/management/postgres/instance.go | 5 +- pkg/management/postgres/join.go | 2 +- pkg/postgres/configuration.go | 39 ++++---- pkg/postgres/configuration_test.go | 46 +++++----- pkg/postgres/version.go | 92 ------------------- pkg/postgres/version_test.go | 82 ----------------- pkg/utils/imagename.go | 88 ------------------ pkg/utils/imagename_test.go | 49 ---------- tests/e2e/cluster_microservice_test.go | 14 +-- tests/e2e/rolling_update_test.go | 35 +++---- tests/utils/environment.go | 11 ++- tests/utils/version.go | 17 ++-- 22 files changed, 142 insertions(+), 445 deletions(-) delete mode 100644 pkg/postgres/version.go delete mode 100644 pkg/postgres/version_test.go delete mode 100644 pkg/utils/imagename.go delete mode 100644 pkg/utils/imagename_test.go diff --git a/api/v1/cluster_funcs.go b/api/v1/cluster_funcs.go index 211ec3bc7f..3fc919188c 100644 --- a/api/v1/cluster_funcs.go +++ b/api/v1/cluster_funcs.go @@ -25,14 +25,15 @@ import ( "strings" "time" + "github.com/cloudnative-pg/machinery/pkg/image/reference" "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/postgres/version" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" - "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/stringset" "github.com/cloudnative-pg/cloudnative-pg/pkg/system" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" @@ -399,26 +400,16 @@ func (cluster *Cluster) GetImageName() string { // image name or from the ImageCatalogRef. // Example: // -// ghcr.io/cloudnative-pg/postgresql:14.0 corresponds to version 140000 -// ghcr.io/cloudnative-pg/postgresql:13.2 corresponds to version 130002 -// ghcr.io/cloudnative-pg/postgresql:9.6.3 corresponds to version 90603 -func (cluster *Cluster) GetPostgresqlVersion() (int, error) { +// ghcr.io/cloudnative-pg/postgresql:14.0 corresponds to version (14,0) +// ghcr.io/cloudnative-pg/postgresql:13.2 corresponds to version (13,2) +func (cluster *Cluster) GetPostgresqlVersion() (version.Data, error) { if cluster.Spec.ImageCatalogRef != nil { - return postgres.GetPostgresVersionFromTag(strconv.Itoa(cluster.Spec.ImageCatalogRef.Major)) + return version.FromTag(strconv.Itoa(cluster.Spec.ImageCatalogRef.Major)) } image := cluster.GetImageName() - tag := utils.GetImageTag(image) - return postgres.GetPostgresVersionFromTag(tag) -} - -// GetPostgresqlMajorVersion gets the PostgreSQL image major version used in the Cluster -func (cluster *Cluster) GetPostgresqlMajorVersion() (int, error) { - version, err := cluster.GetPostgresqlVersion() - if err != nil { - return 0, err - } - return postgres.GetPostgresMajorVersion(version), nil + tag := reference.New(image).Tag + return version.FromTag(tag) } // GetImagePullSecret get the name of the pull secret to use diff --git a/api/v1/cluster_funcs_test.go b/api/v1/cluster_funcs_test.go index 03a92ae90a..7d459faca0 100644 --- a/api/v1/cluster_funcs_test.go +++ b/api/v1/cluster_funcs_test.go @@ -21,6 +21,7 @@ import ( "time" barmanCatalog "github.com/cloudnative-pg/barman-cloud/pkg/catalog" + "github.com/cloudnative-pg/machinery/pkg/postgres/version" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -770,19 +771,15 @@ var _ = Describe("A config map resource version", func() { var _ = Describe("PostgreSQL version detection", func() { tests := []struct { imageName string - postgresVersion int + postgresVersion version.Data }{ { "ghcr.io/cloudnative-pg/postgresql:14.0", - 140000, + version.New(14, 0), }, { "ghcr.io/cloudnative-pg/postgresql:13.2", - 130002, - }, - { - "ghcr.io/cloudnative-pg/postgresql:9.6.3", - 90603, + version.New(13, 2), }, } @@ -802,7 +799,7 @@ var _ = Describe("PostgreSQL version detection", func() { }, Major: 16, } - Expect(cluster.GetPostgresqlVersion()).To(Equal(160000)) + Expect(cluster.GetPostgresqlVersion()).To(Equal(version.New(16, 0))) }) }) diff --git a/api/v1/cluster_webhook.go b/api/v1/cluster_webhook.go index 8b82c6b9e5..16b8c73c52 100644 --- a/api/v1/cluster_webhook.go +++ b/api/v1/cluster_webhook.go @@ -24,7 +24,9 @@ import ( "strings" barmanWebhooks "github.com/cloudnative-pg/barman-cloud/pkg/api/webhooks" + "github.com/cloudnative-pg/machinery/pkg/image/reference" "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/postgres/version" "github.com/cloudnative-pg/machinery/pkg/types" storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" v1 "k8s.io/api/core/v1" @@ -126,7 +128,7 @@ func (r *Cluster) setDefaults(preserveUserSettings bool) { // validateImageName function info := postgres.ConfigurationInfo{ Settings: postgres.CnpgConfigurationSettings, - MajorVersion: psqlVersion, + Version: psqlVersion, UserSettings: r.Spec.PostgresConfiguration.Parameters, IsReplicaCluster: r.IsReplica(), PreserveFixedSettingsFromUser: preserveUserSettings, @@ -965,7 +967,7 @@ func (r *Cluster) validateImageName() field.ErrorList { } // We have to check if the image has a valid tag - tag := utils.GetImageTag(r.Spec.ImageName) + tag := reference.New(r.Spec.ImageName).Tag switch tag { case "latest": result = append( @@ -982,7 +984,7 @@ func (r *Cluster) validateImageName() field.ErrorList { r.Spec.ImageName, "Can't use just the image sha as we can't detect upgrades")) default: - _, err := postgres.GetPostgresVersionFromTag(tag) + _, err := version.FromTag(tag) if err != nil { result = append( result, @@ -1094,7 +1096,7 @@ func (r *Cluster) validateConfiguration() field.ErrorList { // validateImageName function return result } - if pgVersion < 110000 { + if pgVersion.Major() < 11 { result = append(result, field.Invalid( field.NewPath("spec", "imageName"), @@ -1103,7 +1105,7 @@ func (r *Cluster) validateConfiguration() field.ErrorList { } info := postgres.ConfigurationInfo{ Settings: postgres.CnpgConfigurationSettings, - MajorVersion: pgVersion, + Version: pgVersion, UserSettings: r.Spec.PostgresConfiguration.Parameters, IsReplicaCluster: r.IsReplica(), IsWalArchivingDisabled: utils.IsWalArchivingDisabled(&r.ObjectMeta), @@ -1369,7 +1371,7 @@ func validateSyncReplicaElectionConstraint(constraints SyncReplicaElectionConstr // to a new one. func (r *Cluster) validateImageChange(old *Cluster) field.ErrorList { var result field.ErrorList - var newMajor, oldMajor int + var newVersion, oldVersion version.Data var err error var newImagePath *field.Path if r.Spec.ImageCatalogRef != nil { @@ -1379,7 +1381,7 @@ func (r *Cluster) validateImageChange(old *Cluster) field.ErrorList { } r.Status.Image = "" - newMajor, err = r.GetPostgresqlVersion() + newVersion, err = r.GetPostgresqlVersion() if err != nil { // The validation error will be already raised by the // validateImageName function @@ -1387,23 +1389,23 @@ func (r *Cluster) validateImageChange(old *Cluster) field.ErrorList { } old.Status.Image = "" - oldMajor, err = old.GetPostgresqlVersion() + oldVersion, err = old.GetPostgresqlVersion() if err != nil { // The validation error will be already raised by the // validateImageName function return result } - status := postgres.IsUpgradePossible(oldMajor, newMajor) + status := version.IsUpgradePossible(oldVersion, newVersion) if !status { result = append( result, field.Invalid( newImagePath, - newMajor, + newVersion, fmt.Sprintf("can't upgrade between majors %v and %v", - oldMajor, newMajor))) + oldVersion, newVersion))) } return result @@ -2194,7 +2196,7 @@ func (r *Cluster) validateReplicationSlots() field.ErrorList { return nil } - if psqlVersion < 110000 { + if psqlVersion.Major() < 11 { if replicationSlots.HighAvailability.GetEnabled() { return field.ErrorList{ field.Invalid( diff --git a/api/v1/cluster_webhook_test.go b/api/v1/cluster_webhook_test.go index cb9b456696..85a28e9044 100644 --- a/api/v1/cluster_webhook_test.go +++ b/api/v1/cluster_webhook_test.go @@ -23,6 +23,8 @@ import ( "strings" "time" + "github.com/cloudnative-pg/machinery/pkg/image/reference" + pgversion "github.com/cloudnative-pg/machinery/pkg/postgres/version" storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -1440,6 +1442,9 @@ var _ = Describe("validate image name change", func() { Expect(clusterNew.validateImageChange(&clusterOld)).To(HaveLen(1)) }) It("doesn't complain going from default imageName to same major imageCatalogRef", func() { + defaultImageRef := reference.New(versions.DefaultImageName) + version, err := pgversion.FromTag(defaultImageRef.Tag) + Expect(err).ToNot(HaveOccurred()) clusterOld := Cluster{ Spec: ClusterSpec{}, } @@ -1450,7 +1455,7 @@ var _ = Describe("validate image name change", func() { Name: "test", Kind: "ImageCatalog", }, - Major: 16, + Major: int(version.Major()), }, }, } @@ -1497,7 +1502,7 @@ var _ = Describe("validate image name change", func() { } Expect(clusterNew.validateImageChange(&clusterOld)).To(HaveLen(1)) }) - It("complains going from default imageName to different major imageCatalogRef", func() { + It("complains going from imageCatalogRef to different major default imageName", func() { clusterOld := Cluster{ Spec: ClusterSpec{ ImageCatalogRef: &ImageCatalogRef{ @@ -1514,7 +1519,11 @@ var _ = Describe("validate image name change", func() { } Expect(clusterNew.validateImageChange(&clusterOld)).To(HaveLen(1)) }) - It("doesn't complain going from default imageName to same major imageCatalogRef", func() { + It("doesn't complain going from imageCatalogRef to same major default imageName", func() { + imageNameRef := reference.New(versions.DefaultImageName) + version, err := pgversion.FromTag(imageNameRef.Tag) + Expect(err).ToNot(HaveOccurred()) + clusterOld := Cluster{ Spec: ClusterSpec{ ImageCatalogRef: &ImageCatalogRef{ @@ -1522,7 +1531,7 @@ var _ = Describe("validate image name change", func() { Name: "test", Kind: "ImageCatalog", }, - Major: 16, + Major: int(version.Major()), }, }, } diff --git a/docs/src/supported_releases.md b/docs/src/supported_releases.md index 5aee05fb84..c3d367e268 100644 --- a/docs/src/supported_releases.md +++ b/docs/src/supported_releases.md @@ -81,7 +81,7 @@ Git tags for versions are prefixed with `v`. | Version | Currently supported | Release date | End of life | Supported Kubernetes versions | Tested, but not supported | Supported Postgres versions | |-----------------|----------------------|-------------------|---------------------|-------------------------------|---------------------------|-----------------------------| -| 1.24.x | Yes | August 22, 2024 | ~ February, 2025 | 1.28, 1.29, 1.30, 1.31 | 1.27 | 12 - 16 | +| 1.24.x | Yes | August 22, 2024 | ~ February, 2025 | 1.28, 1.29, 1.30, 1.31 | 1.27 | 12 - 17 | | 1.23.x | Yes | April 24, 2024 | ~ November, 2024 | 1.27, 1.28, 1.29 | 1.30, 1.31 | 12 - 16 | | main | No, development only | | | | | 12 - 16 | diff --git a/go.mod b/go.mod index 8e5341a1db..8a88225e10 100644 --- a/go.mod +++ b/go.mod @@ -6,13 +6,13 @@ toolchain go1.23.1 require ( github.com/DATA-DOG/go-sqlmock v1.5.2 - github.com/Masterminds/semver/v3 v3.2.1 + github.com/Masterminds/semver/v3 v3.3.0 github.com/avast/retry-go/v4 v4.6.0 github.com/blang/semver v3.5.1+incompatible github.com/cheynewallace/tabby v1.1.1 github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a github.com/cloudnative-pg/cnpg-i v0.0.0-20240820123829-5844b833f4eb - github.com/cloudnative-pg/machinery v0.0.0-20240919131343-9dd62b9257c7 + github.com/cloudnative-pg/machinery v0.0.0-20241001075747-34c8797af80f github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/evanphx/json-patch/v5 v5.9.0 github.com/go-logr/logr v1.4.2 diff --git a/go.sum b/go.sum index c0d62b12f0..c6ecb0ceac 100644 --- a/go.sum +++ b/go.sum @@ -2,8 +2,8 @@ github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25 github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= -github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= -github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0= +github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/avast/retry-go/v4 v4.6.0 h1:K9xNA+KeB8HHc2aWFuLb25Offp+0iVRXEvFx8IinRJA= @@ -22,8 +22,8 @@ github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a h1:0v1 github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a/go.mod h1:Jm0tOp5oB7utpt8wz6RfSv31h1mThOtffjfyxVupriE= github.com/cloudnative-pg/cnpg-i v0.0.0-20240820123829-5844b833f4eb h1:kZQk+KUCTHQMEgcH8j2/ypcG2HY58zKocmVUvX6c1IA= github.com/cloudnative-pg/cnpg-i v0.0.0-20240820123829-5844b833f4eb/go.mod h1:UILpBDaWvXcYC5kY5DMaVEEQY5483CBApMuHIn0GJdg= -github.com/cloudnative-pg/machinery v0.0.0-20240919131343-9dd62b9257c7 h1:glRSFwMeX1tb1wlN6ZxihPH3nMXL9ZlwU1/xvNFB0iE= -github.com/cloudnative-pg/machinery v0.0.0-20240919131343-9dd62b9257c7/go.mod h1:bWp1Es5zlxElg4Z/c5f0RKOkDcyNvDHdYIvNcPQU4WM= +github.com/cloudnative-pg/machinery v0.0.0-20241001075747-34c8797af80f h1:RgPmQJkuSu3eTdfd4T2K95RYQi57LHB2+Jfsu/faKOM= +github.com/cloudnative-pg/machinery v0.0.0-20241001075747-34c8797af80f/go.mod h1:bWp1Es5zlxElg4Z/c5f0RKOkDcyNvDHdYIvNcPQU4WM= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= diff --git a/internal/cmd/manager/instance/pgbasebackup/cmd.go b/internal/cmd/manager/instance/pgbasebackup/cmd.go index 433770cec6..d8ccead7cf 100644 --- a/internal/cmd/manager/instance/pgbasebackup/cmd.go +++ b/internal/cmd/manager/instance/pgbasebackup/cmd.go @@ -134,7 +134,7 @@ func (env *CloneInfo) bootstrapUsingPgbasebackup(ctx context.Context) error { "Error while parsing PostgreSQL server version to define connection options, defaulting to PostgreSQL 11", "imageName", cluster.GetImageName(), "err", err) - } else if pgVersion >= 120000 { + } else if pgVersion.Major() >= 12 { // We explicitly disable wal_sender_timeout for join-related pg_basebackup executions. // A short timeout could not be enough in case the instance is slow to send data, // like when the I/O is overloaded. diff --git a/internal/management/controller/instance_startup.go b/internal/management/controller/instance_startup.go index 1bc852613d..ea796fcb88 100644 --- a/internal/management/controller/instance_startup.go +++ b/internal/management/controller/instance_startup.go @@ -236,7 +236,7 @@ func (r *InstanceReconciler) verifyPgDataCoherenceForPrimary(ctx context.Context return err } - pgMajorVersion, err := cluster.GetPostgresqlMajorVersion() + pgVersion, err := cluster.GetPostgresqlVersion() if err != nil { return err } @@ -262,7 +262,7 @@ func (r *InstanceReconciler) verifyPgDataCoherenceForPrimary(ctx context.Context // The only way to check if we really need to start it up before // invoking pg_rewind is to try using pg_rewind and, on failures, // retrying after having started up the instance. - err = r.instance.Rewind(ctx, pgMajorVersion) + err = r.instance.Rewind(ctx, pgVersion) if err != nil { contextLogger.Info( "pg_rewind failed, starting the server to complete the crash recovery", @@ -277,7 +277,7 @@ func (r *InstanceReconciler) verifyPgDataCoherenceForPrimary(ctx context.Context } // Then let's go back to the point of the new primary - err = r.instance.Rewind(ctx, pgMajorVersion) + err = r.instance.Rewind(ctx, pgVersion) if err != nil { return err } diff --git a/pkg/management/postgres/configuration.go b/pkg/management/postgres/configuration.go index 209777fc5d..08bb4e1aad 100644 --- a/pkg/management/postgres/configuration.go +++ b/pkg/management/postgres/configuration.go @@ -100,7 +100,7 @@ func (instance *Instance) GeneratePostgresqlHBA(cluster *apiv1.Cluster, ldapBind // See: // https://www.postgresql.org/docs/14/release-14.html defaultAuthenticationMethod := "scram-sha-256" - if version < 140000 { + if version.Major() < 14 { defaultAuthenticationMethod = "md5" } @@ -429,7 +429,7 @@ func createPostgresqlConfiguration(cluster *apiv1.Cluster, preserveUserSettings info := postgres.ConfigurationInfo{ Settings: postgres.CnpgConfigurationSettings, - MajorVersion: fromVersion, + Version: fromVersion, UserSettings: cluster.Spec.PostgresConfiguration.Parameters, IncludingSharedPreloadLibraries: true, AdditionalSharedPreloadLibraries: cluster.Spec.PostgresConfiguration.AdditionalLibraries, diff --git a/pkg/management/postgres/instance.go b/pkg/management/postgres/instance.go index 035d2ad736..0496445681 100644 --- a/pkg/management/postgres/instance.go +++ b/pkg/management/postgres/instance.go @@ -36,6 +36,7 @@ import ( "github.com/cloudnative-pg/machinery/pkg/fileutils" "github.com/cloudnative-pg/machinery/pkg/fileutils/compatibility" "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/postgres/version" "go.uber.org/atomic" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/retry" @@ -982,7 +983,7 @@ func (instance *Instance) removePgControlFileBackup() error { // Rewind uses pg_rewind to align this data directory with the contents of the primary node. // If postgres major version is >= 13, add "--restore-target-wal" option -func (instance *Instance) Rewind(ctx context.Context, postgresMajorVersion int) error { +func (instance *Instance) Rewind(ctx context.Context, postgresVersion version.Data) error { contextLogger := log.FromContext(ctx) // Signal the liveness probe that we are running pg_rewind before starting postgres @@ -1002,7 +1003,7 @@ func (instance *Instance) Rewind(ctx context.Context, postgresMajorVersion int) // As PostgreSQL 13 introduces support of restore from the WAL archive in pg_rewind, // let’s automatically use it, if possible - if postgresMajorVersion >= 130000 { + if postgresVersion.Major() >= 13 { options = append(options, "--restore-target-wal") } diff --git a/pkg/management/postgres/join.go b/pkg/management/postgres/join.go index 419d4ccfd1..fb3cfc7914 100644 --- a/pkg/management/postgres/join.go +++ b/pkg/management/postgres/join.go @@ -79,7 +79,7 @@ func (info InitInfo) Join(cluster *apiv1.Cluster) error { "Error while parsing PostgreSQL server version to define connection options, defaulting to PostgreSQL 11", "imageName", cluster.GetImageName(), "err", err) - } else if pgVersion >= 120000 { + } else if pgVersion.Major() >= 12 { // We explicitly disable wal_sender_timeout for join-related pg_basebackup executions. // A short timeout could not be enough in case the instance is slow to send data, // like when the I/O is overloaded. diff --git a/pkg/postgres/configuration.go b/pkg/postgres/configuration.go index 4095888839..9bd377c26e 100644 --- a/pkg/postgres/configuration.go +++ b/pkg/postgres/configuration.go @@ -25,6 +25,8 @@ import ( "strings" "text/template" "time" + + "github.com/cloudnative-pg/machinery/pkg/postgres/version" ) // WalLevelValue a value that is assigned to the 'wal_level' configuration field @@ -241,15 +243,15 @@ var hbaTemplate = template.Must(template.New("pg_hba.conf").Parse(hbaTemplateStr var identTemplate = template.Must(template.New("pg_ident.conf").Parse(identTemplateString)) // MajorVersionRangeUnlimited is used to represent an unbound limit in a MajorVersionRange -const MajorVersionRangeUnlimited = 0 +var MajorVersionRangeUnlimited = version.Data{} -// MajorVersionRange is used to represent a range of PostgreSQL versions -type MajorVersionRange = struct { +// VersionRange is used to represent a range of PostgreSQL versions +type VersionRange struct { // The minimum limit of PostgreSQL major version, extreme included - Min int + Min version.Data // The maximum limit of PostgreSQL version, extreme excluded, or MajorVersionRangeUnlimited - Max int + Max version.Data } // SettingsCollection is a collection of PostgreSQL settings @@ -265,7 +267,7 @@ type ConfigurationSettings struct { // The following settings are like GlobalPostgresSettings // but are relative only to certain PostgreSQL versions - DefaultSettings map[MajorVersionRange]SettingsCollection + DefaultSettings map[VersionRange]SettingsCollection // The following settings are applied to the final PostgreSQL configuration, // even if the user specified something different @@ -284,8 +286,8 @@ type ConfigurationInfo struct { // The database settings to be used Settings ConfigurationSettings - // The major version - MajorVersion int + // The PostgreSQL version + Version version.Data // The list of user-level settings UserSettings map[string]string @@ -487,19 +489,19 @@ var ( // the parameter cannot be changed without a restart. SharedPreloadLibraries: "", }, - DefaultSettings: map[MajorVersionRange]SettingsCollection{ - {MajorVersionRangeUnlimited, 120000}: { + DefaultSettings: map[VersionRange]SettingsCollection{ + {MajorVersionRangeUnlimited, version.New(12, 0)}: { "wal_keep_segments": "32", }, - {120000, 130000}: { + {version.New(12, 0), version.New(13, 0)}: { "wal_keep_segments": "32", "shared_memory_type": "mmap", }, - {130000, MajorVersionRangeUnlimited}: { + {version.New(13, 0), MajorVersionRangeUnlimited}: { "wal_keep_size": "512MB", "shared_memory_type": "mmap", }, - {120000, MajorVersionRangeUnlimited}: { + {version.New(12, 0), MajorVersionRangeUnlimited}: { "ssl_max_protocol_version": "TLSv1.3", "ssl_min_protocol_version": "TLSv1.3", }, @@ -645,9 +647,7 @@ func CreatePostgresqlConfiguration(info ConfigurationInfo) *PgConfiguration { configuration.OverwriteConfig(key, value) } - // IMPORTANT: yes, this field is called MajorVersion but actually - // it's just the PostgreSQL version number - if info.MajorVersion >= 170000 { + if info.Version.Major() >= 17 { configuration.OverwriteConfig("allow_alter_system", info.getAlterSystemEnabledValue()) } } @@ -716,8 +716,11 @@ func setDefaultConfigurations(info ConfigurationInfo, configuration *PgConfigura // apply settings relative to a certain PostgreSQL version for constraints, settings := range info.Settings.DefaultSettings { - if constraints.Min == MajorVersionRangeUnlimited || (constraints.Min <= info.MajorVersion) { - if constraints.Max == MajorVersionRangeUnlimited || (info.MajorVersion < constraints.Max) { + if constraints.Min == MajorVersionRangeUnlimited || + constraints.Min == info.Version || + constraints.Min.Less(info.Version) { + if constraints.Max == MajorVersionRangeUnlimited || + info.Version.Less(constraints.Max) { for key, value := range settings { configuration.OverwriteConfig(key, value) } diff --git a/pkg/postgres/configuration_test.go b/pkg/postgres/configuration_test.go index 986d3ccf2b..adc3891582 100644 --- a/pkg/postgres/configuration_test.go +++ b/pkg/postgres/configuration_test.go @@ -20,6 +20,8 @@ import ( "strings" "time" + "github.com/cloudnative-pg/machinery/pkg/postgres/version" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) @@ -32,7 +34,7 @@ var _ = Describe("PostgreSQL configuration creation", func() { It("apply the default settings", func() { info := ConfigurationInfo{ Settings: CnpgConfigurationSettings, - MajorVersion: 100000, + Version: version.New(10, 0), UserSettings: settings, IncludingMandatory: true, } @@ -43,8 +45,8 @@ var _ = Describe("PostgreSQL configuration creation", func() { It("enforce the mandatory values", func() { info := ConfigurationInfo{ - Settings: CnpgConfigurationSettings, - MajorVersion: 100000, + Settings: CnpgConfigurationSettings, + Version: version.New(10, 0), UserSettings: map[string]string{ "hot_standby": "off", }, @@ -57,7 +59,7 @@ var _ = Describe("PostgreSQL configuration creation", func() { It("generate a config file", func() { info := ConfigurationInfo{ Settings: CnpgConfigurationSettings, - MajorVersion: 100000, + Version: version.New(10, 0), UserSettings: settings, IncludingMandatory: true, } @@ -82,7 +84,7 @@ var _ = Describe("PostgreSQL configuration creation", func() { It("will use appropriate settings", func() { info := ConfigurationInfo{ Settings: CnpgConfigurationSettings, - MajorVersion: 100000, + Version: version.New(10, 0), UserSettings: settings, IncludingMandatory: true, } @@ -95,7 +97,7 @@ var _ = Describe("PostgreSQL configuration creation", func() { It("will use appropriate settings", func() { info := ConfigurationInfo{ Settings: CnpgConfigurationSettings, - MajorVersion: 130000, + Version: version.New(13, 0), UserSettings: settings, IncludingMandatory: true, } @@ -110,7 +112,7 @@ var _ = Describe("PostgreSQL configuration creation", func() { It("will set archive_mode to always", func() { info := ConfigurationInfo{ Settings: CnpgConfigurationSettings, - MajorVersion: 130000, + Version: version.New(13, 0), UserSettings: settings, IncludingMandatory: true, IsReplicaCluster: true, @@ -124,7 +126,7 @@ var _ = Describe("PostgreSQL configuration creation", func() { It("will set archive_mode to on", func() { info := ConfigurationInfo{ Settings: CnpgConfigurationSettings, - MajorVersion: 130000, + Version: version.New(13, 0), UserSettings: settings, IncludingMandatory: true, IsReplicaCluster: false, @@ -137,7 +139,7 @@ var _ = Describe("PostgreSQL configuration creation", func() { It("adds shared_preload_library correctly", func() { info := ConfigurationInfo{ Settings: CnpgConfigurationSettings, - MajorVersion: 130000, + Version: version.New(13, 0), IncludingMandatory: true, IncludingSharedPreloadLibraries: true, AdditionalSharedPreloadLibraries: []string{"some_library", "another_library", ""}, @@ -151,8 +153,8 @@ var _ = Describe("PostgreSQL configuration creation", func() { It("checks if PreserveFixedSettingsFromUser works properly", func() { info := ConfigurationInfo{ - Settings: CnpgConfigurationSettings, - MajorVersion: 100000, + Settings: CnpgConfigurationSettings, + Version: version.New(10, 0), UserSettings: map[string]string{ "ssl": "off", "recovery_target_name": "test", @@ -195,7 +197,7 @@ var _ = Describe("PostgreSQL configuration creation", func() { It("can properly set allow_alter_system to on", func() { info := ConfigurationInfo{ IsAlterSystemEnabled: true, - MajorVersion: 170000, + Version: version.New(17, 0), IncludingMandatory: true, } config := CreatePostgresqlConfiguration(info) @@ -205,7 +207,7 @@ var _ = Describe("PostgreSQL configuration creation", func() { It("can properly set allow_alter_system to off", func() { info := ConfigurationInfo{ IsAlterSystemEnabled: false, - MajorVersion: 180000, + Version: version.New(18, 0), IncludingMandatory: true, } config := CreatePostgresqlConfiguration(info) @@ -217,7 +219,7 @@ var _ = Describe("PostgreSQL configuration creation", func() { It("should not set allow_alter_system", func() { info := ConfigurationInfo{ IsAlterSystemEnabled: false, - MajorVersion: 140000, + Version: version.New(14, 0), IncludingMandatory: true, } config := CreatePostgresqlConfiguration(info) @@ -228,7 +230,7 @@ var _ = Describe("PostgreSQL configuration creation", func() { It("should not set allow_alter_system", func() { info := ConfigurationInfo{ IsAlterSystemEnabled: true, - MajorVersion: 140000, + Version: version.New(14, 0), IncludingMandatory: true, } config := CreatePostgresqlConfiguration(info) @@ -307,7 +309,7 @@ var _ = Describe("pgaudit", func() { It("adds pgaudit to shared_preload_library", func() { info := ConfigurationInfo{ Settings: CnpgConfigurationSettings, - MajorVersion: 130000, + Version: version.New(13, 0), UserSettings: map[string]string{"pgaudit.something": "something"}, IncludingSharedPreloadLibraries: true, IncludingMandatory: true, @@ -324,7 +326,7 @@ var _ = Describe("pgaudit", func() { It("adds pg_stat_statements to shared_preload_library", func() { info := ConfigurationInfo{ Settings: CnpgConfigurationSettings, - MajorVersion: 130000, + Version: version.New(13, 0), UserSettings: map[string]string{"pg_stat_statements.something": "something"}, IncludingMandatory: true, IncludingSharedPreloadLibraries: true, @@ -340,8 +342,8 @@ var _ = Describe("pgaudit", func() { It("adds pg_stat_statements and pgaudit to shared_preload_library", func() { info := ConfigurationInfo{ - Settings: CnpgConfigurationSettings, - MajorVersion: 130000, + Settings: CnpgConfigurationSettings, + Version: version.New(13, 0), UserSettings: map[string]string{ "pg_stat_statements.something": "something", "pgaudit.somethingelse": "somethingelse", @@ -361,7 +363,7 @@ var _ = Describe("pg_failover_slots", func() { It("adds pg_failover_slots to shared_preload_library", func() { info := ConfigurationInfo{ Settings: CnpgConfigurationSettings, - MajorVersion: 130000, + Version: version.New(13, 0), UserSettings: map[string]string{"pg_failover_slots.something": "something"}, IncludingMandatory: true, IncludingSharedPreloadLibraries: true, @@ -378,7 +380,7 @@ var _ = Describe("recovery_min_apply_delay", func() { It("is not added when zero", func() { info := ConfigurationInfo{ Settings: CnpgConfigurationSettings, - MajorVersion: 130000, + Version: version.New(13, 0), UserSettings: map[string]string{"pg_failover_slots.something": "something"}, IncludingMandatory: true, IncludingSharedPreloadLibraries: true, @@ -391,7 +393,7 @@ var _ = Describe("recovery_min_apply_delay", func() { It("is added to the configuration when specified", func() { info := ConfigurationInfo{ Settings: CnpgConfigurationSettings, - MajorVersion: 130000, + Version: version.New(13, 0), UserSettings: map[string]string{"pg_failover_slots.something": "something"}, IncludingMandatory: true, IncludingSharedPreloadLibraries: true, diff --git a/pkg/postgres/version.go b/pkg/postgres/version.go deleted file mode 100644 index 2384b1003a..0000000000 --- a/pkg/postgres/version.go +++ /dev/null @@ -1,92 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package postgres - -import ( - "fmt" - "regexp" - "strconv" - "strings" -) - -const firstMajorWithoutMinor = 10 - -var semanticVersionRegex = regexp.MustCompile(`^(\d\.?)+`) - -// GetPostgresVersionFromTag parse a PostgreSQL version string returning -// a major version ID. Example: -// -// GetPostgresVersionFromTag("9.5.3") == 90503 -// GetPostgresVersionFromTag("10.2") == 100002 -// GetPostgresVersionFromTag("15beta1") == 150000 -func GetPostgresVersionFromTag(version string) (int, error) { - if !semanticVersionRegex.MatchString(version) { - return 0, - fmt.Errorf("version not starting with a semantic version regex (%v): %s", semanticVersionRegex, version) - } - - if versionOnly := semanticVersionRegex.FindString(version); versionOnly != "" { - version = versionOnly - } - - splitVersion := strings.Split(version, ".") - - idx := 0 - majorVersion, err := strconv.Atoi(splitVersion[idx]) - if err != nil { - return 0, fmt.Errorf("wrong PostgreSQL major in version %v", version) - } - parsedVersion := majorVersion * 10000 - idx++ - - if majorVersion < firstMajorWithoutMinor { - if len(splitVersion) <= idx { - return 0, fmt.Errorf("missing PostgreSQL minor in version %v", version) - } - minorVersion, err := strconv.Atoi(splitVersion[idx]) - if err != nil || minorVersion >= 100 { - return 0, fmt.Errorf("wrong PostgreSQL minor in version %v", version) - } - parsedVersion += minorVersion * 100 - idx++ - } - - if len(splitVersion) > idx { - patchLevel, err := strconv.Atoi(splitVersion[idx]) - if err != nil || patchLevel >= 100 { - return 0, fmt.Errorf("wrong PostgreSQL patch level in version %v", version) - } - parsedVersion += patchLevel - } - - return parsedVersion, nil -} - -// GetPostgresMajorVersion gets only the Major version from a PostgreSQL version string. -// Example: -// -// GetPostgresMajorVersion("90503") == 90500 -// GetPostgresMajorVersion("100002") == 100000 -func GetPostgresMajorVersion(parsedVersion int) int { - return parsedVersion - parsedVersion%100 -} - -// IsUpgradePossible detect if it's possible to upgrade from fromVersion to -// toVersion -func IsUpgradePossible(fromVersion, toVersion int) bool { - return GetPostgresMajorVersion(fromVersion) == GetPostgresMajorVersion(toVersion) -} diff --git a/pkg/postgres/version_test.go b/pkg/postgres/version_test.go deleted file mode 100644 index 3ef75e84db..0000000000 --- a/pkg/postgres/version_test.go +++ /dev/null @@ -1,82 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package postgres - -import ( - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -var _ = Describe("PostgreSQL version handling", func() { - Describe("parsing", func() { - It("should parse versions < 10", func() { - Expect(GetPostgresVersionFromTag("9.5.3")).To(Equal(90503)) - Expect(GetPostgresVersionFromTag("9.4")).To(Equal(90400)) - }) - - It("should parse versions >= 10", func() { - Expect(GetPostgresVersionFromTag("10.3")).To(Equal(100003)) - Expect(GetPostgresVersionFromTag("12.3")).To(Equal(120003)) - }) - - It("should ignore extra components", func() { - Expect(GetPostgresVersionFromTag("3.4.3.2.5")).To(Equal(30403)) - Expect(GetPostgresVersionFromTag("10.11.12")).To(Equal(100011)) - Expect(GetPostgresVersionFromTag("9.4_beautiful")).To(Equal(90400)) - Expect(GetPostgresVersionFromTag("11-1")).To(Equal(110000)) - Expect(GetPostgresVersionFromTag("15beta1")).To(Equal(150000)) - }) - - It("should gracefully handle errors", func() { - _, err := GetPostgresVersionFromTag("") - Expect(err).To(HaveOccurred()) - - _, err = GetPostgresVersionFromTag("8") - Expect(err).To(HaveOccurred()) - - _, err = GetPostgresVersionFromTag("9.five") - Expect(err).To(HaveOccurred()) - - _, err = GetPostgresVersionFromTag("10.old") - Expect(err).To(HaveOccurred()) - }) - }) - - Describe("major version extraction", func() { - It("should extract the major version for PostgreSQL >= 10", func() { - Expect(GetPostgresMajorVersion(100003)).To(Equal(100000)) - }) - - It("should extract the major version for PostgreSQL < 10", func() { - Expect(GetPostgresMajorVersion(90504)).To(Equal(90500)) - Expect(GetPostgresMajorVersion(90400)).To(Equal(90400)) - }) - }) - - Describe("detect whenever a version upgrade is possible using the numeric version", func() { - It("succeed when the major version is the same", func() { - Expect(IsUpgradePossible(100000, 100003)).To(BeTrue()) - Expect(IsUpgradePossible(90302, 90303)).To(BeTrue()) - }) - - It("prevent upgrading to a different major version", func() { - Expect(IsUpgradePossible(100003, 110003)).To(BeFalse()) - Expect(IsUpgradePossible(90604, 100000)).To(BeFalse()) - Expect(IsUpgradePossible(90503, 900604)).To(BeFalse()) - }) - }) -}) diff --git a/pkg/utils/imagename.go b/pkg/utils/imagename.go deleted file mode 100644 index 0a9e04c4d2..0000000000 --- a/pkg/utils/imagename.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "fmt" - "regexp" - "strings" -) - -var ( - digestRegex = regexp.MustCompile(`@sha256:(?P[a-fA-F0-9]+)$`) - tagRegex = regexp.MustCompile(`:(?P[^/]+)$`) - hostRegex = regexp.MustCompile(`^[^./:]+((\.[^./:]+)+(:[0-9]+)?|:[0-9]+)/`) -) - -// Reference . -type Reference struct { - Name string - Tag string - Digest string -} - -// GetNormalizedName returns the normalized name of a reference -func (r *Reference) GetNormalizedName() (name string) { - name = r.Name - if r.Tag != "" { - name = fmt.Sprintf("%s:%s", name, r.Tag) - } - if r.Digest != "" { - name = fmt.Sprintf("%s@sha256:%s", name, r.Digest) - } - return name -} - -// NewReference parses the image name and returns an error if the name is invalid. -func NewReference(name string) *Reference { - reference := &Reference{} - - if !strings.Contains(name, "/") { - name = "docker.io/library/" + name - } else if !hostRegex.MatchString(name) { - name = "docker.io/" + name - } - - if digestRegex.MatchString(name) { - res := digestRegex.FindStringSubmatch(name) - reference.Digest = res[1] // digest capture group index - name = strings.TrimSuffix(name, res[0]) - } - - if tagRegex.MatchString(name) { - res := tagRegex.FindStringSubmatch(name) - reference.Tag = res[1] // tag capture group index - name = strings.TrimSuffix(name, res[0]) - } else if reference.Digest == "" { - reference.Tag = "latest" - } - - // everything else is the name - reference.Name = name - - return reference -} - -// GetImageTag gets the image tag from a full image string. -// Example: -// -// GetImageTag("postgres") == "latest" -// GetImageTag("ghcr.io/cloudnative-pg/postgresql:12.3") == "12.3" -func GetImageTag(imageName string) string { - ref := NewReference(imageName) - return ref.Tag -} diff --git a/pkg/utils/imagename_test.go b/pkg/utils/imagename_test.go deleted file mode 100644 index 15bf1bec38..0000000000 --- a/pkg/utils/imagename_test.go +++ /dev/null @@ -1,49 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -var _ = Describe("image name management", func() { - It("should normalize image names", func() { - Expect(NewReference("postgres").GetNormalizedName()).To( - Equal("docker.io/library/postgres:latest")) - Expect(NewReference("myimage/postgres").GetNormalizedName()).To( - Equal("docker.io/myimage/postgres:latest")) - Expect(NewReference("localhost:5000/postgres").GetNormalizedName()).To( - Equal("localhost:5000/postgres:latest")) - Expect(NewReference("registry.localhost:5000/postgres:14.4").GetNormalizedName()).To( - Equal("registry.localhost:5000/postgres:14.4")) - Expect(NewReference("ghcr.io/cloudnative-pg/postgresql:34").GetNormalizedName()).To( - Equal("ghcr.io/cloudnative-pg/postgresql:34")) - }) - - It("should extract tag names", func() { - Expect(GetImageTag("postgres")).To(Equal("latest")) - Expect(GetImageTag("postgres:34.3")).To(Equal("34.3")) - Expect(GetImageTag("postgres:13@sha256:cff94de382ca538861622bbe84cfe03f44f307a9846a5c5eda672cf4dc692866")). - To(Equal("13")) - }) - - It("should not extract a tag name", func() { - Expect(GetImageTag("postgres@sha256:cff94dd382ca538861622bbe84cfe03f44f307a9846a5c5eda672cf4dc692866")). - To(BeEmpty()) - }) -}) diff --git a/tests/e2e/cluster_microservice_test.go b/tests/e2e/cluster_microservice_test.go index 571db88a81..7252bf13b3 100644 --- a/tests/e2e/cluster_microservice_test.go +++ b/tests/e2e/cluster_microservice_test.go @@ -22,13 +22,13 @@ import ( "strings" "time" + "github.com/cloudnative-pg/machinery/pkg/image/reference" + "github.com/cloudnative-pg/machinery/pkg/postgres/version" batchv1 "k8s.io/api/batch/v1" "k8s.io/apimachinery/pkg/types" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" "github.com/cloudnative-pg/cloudnative-pg/tests" testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" @@ -172,15 +172,15 @@ var _ = Describe("Imports with Microservice Approach", Label(tests.LabelImportin // shouldSkip skip this test if the current POSTGRES_IMG is already the latest major func shouldSkip(postgresImage string) bool { // Get the current tag - currentImageReference := utils.NewReference(postgresImage) - currentImageVersion, err := postgres.GetPostgresVersionFromTag(currentImageReference.Tag) + currentImageReference := reference.New(postgresImage) + currentImageVersion, err := version.FromTag(currentImageReference.Tag) Expect(err).ToNot(HaveOccurred()) // Get the default tag - defaultImageReference := utils.NewReference(versions.DefaultImageName) - defaultImageVersion, err := postgres.GetPostgresVersionFromTag(defaultImageReference.Tag) + defaultImageReference := reference.New(versions.DefaultImageName) + defaultImageVersion, err := version.FromTag(defaultImageReference.Tag) Expect(err).ToNot(HaveOccurred()) - return currentImageVersion >= defaultImageVersion + return currentImageVersion.Major() >= defaultImageVersion.Major() } // assertCreateTableWithDataOnSourceCluster will create on the source Cluster, as postgres superUser: diff --git a/tests/e2e/rolling_update_test.go b/tests/e2e/rolling_update_test.go index 9c61427c56..ec50ba5119 100644 --- a/tests/e2e/rolling_update_test.go +++ b/tests/e2e/rolling_update_test.go @@ -19,6 +19,8 @@ package e2e import ( "os" + "github.com/cloudnative-pg/machinery/pkg/image/reference" + "github.com/cloudnative-pg/machinery/pkg/postgres/version" corev1 "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -27,7 +29,6 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" - "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" @@ -308,7 +309,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun }) } - newImageCatalog := func(namespace string, name string, major int, image string) *apiv1.ImageCatalog { + newImageCatalog := func(namespace string, name string, major uint64, image string) *apiv1.ImageCatalog { imgCat := &apiv1.ImageCatalog{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, @@ -318,7 +319,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun Images: []apiv1.CatalogImage{ { Image: image, - Major: major, + Major: int(major), }, }, }, @@ -328,7 +329,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun } newImageCatalogCluster := func( - namespace string, name string, major int, instances int, storageClass string, + namespace string, name string, major uint64, instances int, storageClass string, ) *apiv1.Cluster { cluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ @@ -343,7 +344,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun Name: name, Kind: "ImageCatalog", }, - Major: major, + Major: int(major), }, PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ @@ -376,7 +377,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun return cluster } - newClusterImageCatalog := func(name string, major int, image string) *apiv1.ClusterImageCatalog { + newClusterImageCatalog := func(name string, major uint64, image string) *apiv1.ClusterImageCatalog { imgCat := &apiv1.ClusterImageCatalog{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -385,7 +386,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun Images: []apiv1.CatalogImage{ { Image: image, - Major: major, + Major: int(major), }, }, }, @@ -522,7 +523,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun var storageClass string var preRollingImg string var updatedImageName string - var major int + var pgVersion version.Data BeforeEach(func() { storageClass = os.Getenv("E2E_DEFAULT_STORAGE_CLASS") preRollingImg = os.Getenv("E2E_PRE_ROLLING_UPDATE_IMG") @@ -533,11 +534,11 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun // We automate the extraction of the major version from the image, because we don't want to keep maintaining // the major version in the test - version, err := postgres.GetPostgresVersionFromTag(utils.GetImageTag(preRollingImg)) + var err error + pgVersion, err = version.FromTag(reference.New(preRollingImg).Tag) if err != nil { Expect(err).ToNot(HaveOccurred()) } - major = postgres.GetPostgresMajorVersion(version) / 10000 }) Context("ImageCatalog", func() { @@ -558,8 +559,8 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun Expect(err).ToNot(HaveOccurred()) // Create a new image catalog and a new cluster - catalog := newImageCatalog(namespace, clusterName, major, preRollingImg) - cluster := newImageCatalogCluster(namespace, clusterName, major, 3, storageClass) + catalog := newImageCatalog(namespace, clusterName, pgVersion.Major(), preRollingImg) + cluster := newImageCatalogCluster(namespace, clusterName, pgVersion.Major(), 3, storageClass) AssertRollingUpdateWithImageCatalog(cluster, catalog, updatedImageName, true) }) @@ -577,8 +578,8 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun namespace, err := env.CreateUniqueTestNamespace(namespacePrefix) Expect(err).ToNot(HaveOccurred()) - catalog := newImageCatalog(namespace, clusterName, major, preRollingImg) - cluster := newImageCatalogCluster(namespace, clusterName, major, 1, storageClass) + catalog := newImageCatalog(namespace, clusterName, pgVersion.Major(), preRollingImg) + cluster := newImageCatalogCluster(namespace, clusterName, pgVersion.Major(), 1, storageClass) AssertRollingUpdateWithImageCatalog(cluster, catalog, updatedImageName, false) }) }) @@ -589,7 +590,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun ) var catalog *apiv1.ClusterImageCatalog BeforeEach(func() { - catalog = newClusterImageCatalog(clusterName, major, preRollingImg) + catalog = newClusterImageCatalog(clusterName, pgVersion.Major(), preRollingImg) }) AfterEach(func() { err := env.Client.Delete(env.Ctx, catalog) @@ -613,7 +614,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun namespace, err := env.CreateUniqueTestNamespace(namespacePrefix) Expect(err).ToNot(HaveOccurred()) - cluster := newImageCatalogCluster(namespace, clusterName, major, 3, storageClass) + cluster := newImageCatalogCluster(namespace, clusterName, pgVersion.Major(), 3, storageClass) cluster.Spec.ImageCatalogRef.Kind = "ClusterImageCatalog" AssertRollingUpdateWithImageCatalog(cluster, catalog, updatedImageName, true) }) @@ -631,7 +632,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun namespace, err := env.CreateUniqueTestNamespace(namespacePrefix) Expect(err).ToNot(HaveOccurred()) - cluster := newImageCatalogCluster(namespace, clusterName, major, 1, storageClass) + cluster := newImageCatalogCluster(namespace, clusterName, pgVersion.Major(), 1, storageClass) cluster.Spec.ImageCatalogRef.Kind = "ClusterImageCatalog" AssertRollingUpdateWithImageCatalog(cluster, catalog, updatedImageName, false) }) diff --git a/tests/utils/environment.go b/tests/utils/environment.go index ea9cefe4bb..90e019327e 100644 --- a/tests/utils/environment.go +++ b/tests/utils/environment.go @@ -25,7 +25,9 @@ import ( "sync" "time" + "github.com/cloudnative-pg/machinery/pkg/image/reference" "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/postgres/version" "github.com/go-logr/logr" storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" @@ -46,7 +48,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log/zap" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" @@ -78,7 +79,7 @@ type TestingEnvironment struct { Scheme *runtime.Scheme PreserveNamespaces []string Log logr.Logger - PostgresVersion int + PostgresVersion uint64 createdNamespaces *uniqueStringSlice AzureConfiguration AzureConfiguration SternLogDir string @@ -137,12 +138,12 @@ func NewTestingEnvironment() (*TestingEnvironment, error) { if postgresImageFromUser, exist := os.LookupEnv("POSTGRES_IMG"); exist { postgresImage = postgresImageFromUser } - imageReference := utils.NewReference(postgresImage) - postgresImageVersion, err := postgres.GetPostgresVersionFromTag(imageReference.Tag) + imageReference := reference.New(postgresImage) + postgresImageVersion, err := version.FromTag(imageReference.Tag) if err != nil { return nil, err } - env.PostgresVersion = postgresImageVersion / 10000 + env.PostgresVersion = postgresImageVersion.Major() env.Client, err = client.New(env.RestClientConfig, client.Options{Scheme: env.Scheme}) if err != nil { diff --git a/tests/utils/version.go b/tests/utils/version.go index 1c814faf8c..2416df4b08 100644 --- a/tests/utils/version.go +++ b/tests/utils/version.go @@ -19,32 +19,33 @@ package utils import ( "fmt" - "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/machinery/pkg/image/reference" + "github.com/cloudnative-pg/machinery/pkg/postgres/version" + "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" ) // BumpPostgresImageMajorVersion returns a postgresImage incrementing the major version of the argument (if available) func BumpPostgresImageMajorVersion(postgresImage string) (string, error) { - imageReference := utils.NewReference(postgresImage) + imageReference := reference.New(postgresImage) - postgresImageVersion, err := postgres.GetPostgresVersionFromTag(imageReference.Tag) + postgresImageVersion, err := version.FromTag(imageReference.Tag) if err != nil { return "", err } - targetPostgresImageVersionInt := postgresImageVersion + 1_00_00 + targetPostgresImageMajorVersionInt := postgresImageVersion.Major() + 1 - defaultImageVersion, err := postgres.GetPostgresVersionFromTag(utils.GetImageTag(versions.DefaultImageName)) + defaultImageVersion, err := version.FromTag(reference.New(versions.DefaultImageName).Tag) if err != nil { return "", err } - if targetPostgresImageVersionInt >= defaultImageVersion { + if targetPostgresImageMajorVersionInt >= defaultImageVersion.Major() { return postgresImage, nil } - imageReference.Tag = fmt.Sprintf("%d", postgresImageVersion/10000+1) + imageReference.Tag = fmt.Sprintf("%d", postgresImageVersion.Major()+1) return imageReference.GetNormalizedName(), nil } From 306cf515ae5e7054823b1e2d87f98b524606ba08 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 2 Oct 2024 19:28:44 +0200 Subject: [PATCH 030/836] fix(deps): update all non-major go dependencies (main) (#5323) https://github.com/cloudnative-pg/cnpg-i `5844b83` -> `7e24b2e` https://github.com/cloudnative-pg/machinery `34c8797` -> `0e5ba4f` https://github.com/goreleaser/goreleaser `v2.2.0` -> `v2.3.2` https://github.com/jackc/pgx `v5.6.0` -> `v5.7.1` https://github.com/jackc/puddle `v2.2.1` -> `v2.2.2` https://github.com/prometheus/client_golang `v1.20.3` -> `v1.20.4` https://github.com/grpc/grpc-go `v1.65.0` -> `v1.67.1` github.com/jackc/pgservicefil `v0.0.0-20221227161230-091c0ba34f0a` -> `v0.0.0-20240606120523-5a60cdf6a761 google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 -> v0.0.0-20240814211410-ddb44dafa142` --- Makefile | 2 +- go.mod | 16 ++++++++-------- go.sum | 32 ++++++++++++++++---------------- 3 files changed, 25 insertions(+), 25 deletions(-) diff --git a/Makefile b/Makefile index 25c7e19161..9fba4183a6 100644 --- a/Makefile +++ b/Makefile @@ -43,7 +43,7 @@ BUILD_IMAGE ?= true POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions.go" | cut -f 2 -d \") KUSTOMIZE_VERSION ?= v5.4.3 CONTROLLER_TOOLS_VERSION ?= v0.16.3 -GORELEASER_VERSION ?= v2.2.0 +GORELEASER_VERSION ?= v2.3.2 SPELLCHECK_VERSION ?= 0.42.0 WOKE_VERSION ?= 0.19.0 OPERATOR_SDK_VERSION ?= v1.37.0 diff --git a/go.mod b/go.mod index 8a88225e10..1c06369baf 100644 --- a/go.mod +++ b/go.mod @@ -11,15 +11,15 @@ require ( github.com/blang/semver v3.5.1+incompatible github.com/cheynewallace/tabby v1.1.1 github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a - github.com/cloudnative-pg/cnpg-i v0.0.0-20240820123829-5844b833f4eb - github.com/cloudnative-pg/machinery v0.0.0-20241001075747-34c8797af80f + github.com/cloudnative-pg/cnpg-i v0.0.0-20241001103001-7e24b2eccd50 + github.com/cloudnative-pg/machinery v0.0.0-20241001153943-0e5ba4f9a0e1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/evanphx/json-patch/v5 v5.9.0 github.com/go-logr/logr v1.4.2 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 - github.com/jackc/pgx/v5 v5.6.0 - github.com/jackc/puddle/v2 v2.2.1 + github.com/jackc/pgx/v5 v5.7.1 + github.com/jackc/puddle/v2 v2.2.2 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 github.com/kubernetes-csi/external-snapshotter/client/v8 v8.0.0 github.com/lib/pq v1.10.9 @@ -28,7 +28,7 @@ require ( github.com/onsi/ginkgo/v2 v2.20.2 github.com/onsi/gomega v1.34.2 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.75.2 - github.com/prometheus/client_golang v1.20.3 + github.com/prometheus/client_golang v1.20.4 github.com/robfig/cron v1.2.0 github.com/sethvargo/go-password v0.3.1 github.com/spf13/cobra v1.8.1 @@ -37,7 +37,7 @@ require ( go.uber.org/atomic v1.11.0 go.uber.org/multierr v1.11.0 golang.org/x/term v0.24.0 - google.golang.org/grpc v1.65.0 + google.golang.org/grpc v1.67.1 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.31.1 k8s.io/apiextensions-apiserver v0.31.1 @@ -78,7 +78,7 @@ require ( github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect - github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect + github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.17.9 // indirect @@ -113,7 +113,7 @@ require ( golang.org/x/time v0.6.0 // indirect golang.org/x/tools v0.25.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index c6ecb0ceac..f8365e307d 100644 --- a/go.sum +++ b/go.sum @@ -20,10 +20,10 @@ github.com/cheynewallace/tabby v1.1.1 h1:JvUR8waht4Y0S3JF17G6Vhyt+FRhnqVCkk8l4Yr github.com/cheynewallace/tabby v1.1.1/go.mod h1:Pba/6cUL8uYqvOc9RkyvFbHGrQ9wShyrn6/S/1OYVys= github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a h1:0v1ML9Eibfq3helbT9GtU0EstqFtG91k/MPO9azY5ME= github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a/go.mod h1:Jm0tOp5oB7utpt8wz6RfSv31h1mThOtffjfyxVupriE= -github.com/cloudnative-pg/cnpg-i v0.0.0-20240820123829-5844b833f4eb h1:kZQk+KUCTHQMEgcH8j2/ypcG2HY58zKocmVUvX6c1IA= -github.com/cloudnative-pg/cnpg-i v0.0.0-20240820123829-5844b833f4eb/go.mod h1:UILpBDaWvXcYC5kY5DMaVEEQY5483CBApMuHIn0GJdg= -github.com/cloudnative-pg/machinery v0.0.0-20241001075747-34c8797af80f h1:RgPmQJkuSu3eTdfd4T2K95RYQi57LHB2+Jfsu/faKOM= -github.com/cloudnative-pg/machinery v0.0.0-20241001075747-34c8797af80f/go.mod h1:bWp1Es5zlxElg4Z/c5f0RKOkDcyNvDHdYIvNcPQU4WM= +github.com/cloudnative-pg/cnpg-i v0.0.0-20241001103001-7e24b2eccd50 h1:Rm/bbC0GNCuWth5fHVMos99RzNczbWRVBdjubh3JMPs= +github.com/cloudnative-pg/cnpg-i v0.0.0-20241001103001-7e24b2eccd50/go.mod h1:lTWPq8pluS0PSnRMwt0zShftbyssoRhTJ5zAip8unl8= +github.com/cloudnative-pg/machinery v0.0.0-20241001153943-0e5ba4f9a0e1 h1:qrxfp0vR+zqC+L1yTdQTqRHvnLLcVk4CdWB1RwLd8UE= +github.com/cloudnative-pg/machinery v0.0.0-20241001153943-0e5ba4f9a0e1/go.mod h1:bWp1Es5zlxElg4Z/c5f0RKOkDcyNvDHdYIvNcPQU4WM= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= @@ -91,12 +91,12 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2 github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= -github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= -github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.6.0 h1:SWJzexBzPL5jb0GEsrPMLIsi/3jOo7RHlzTjcAeDrPY= -github.com/jackc/pgx/v5 v5.6.0/go.mod h1:DNZ/vlrUnhWCoFGxHAG8U2ljioxukquj7utPDgtQdTw= -github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= -github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.7.1 h1:x7SYsPBYDkHDksogeSmZZ5xzThcTgRz++I5E+ePFUcs= +github.com/jackc/pgx/v5 v5.7.1/go.mod h1:e7O26IywZZ+naJtWWos6i6fvWK+29etgITqrqHLfoZA= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -159,8 +159,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.75.2 h1:6UsAv+jAevuGO2yZFU/BukV4o9NKnFMOuoouSA4G0ns= github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.75.2/go.mod h1:XYrdZw5dW12Cjkt4ndbeNZZTBp4UCHtW0ccR9+sTtPU= -github.com/prometheus/client_golang v1.20.3 h1:oPksm4K8B+Vt35tUhw6GbSNSgVlVSBH0qELP/7u83l4= -github.com/prometheus/client_golang v1.20.3/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= +github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.59.1 h1:LXb1quJHWm1P6wq/U824uxYi4Sg0oGvNeUm1z5dJoX0= @@ -258,10 +258,10 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1:e7S5W7MGGLaSu8j3YjdezkZ+m1/Nm0uRVRMEMGk26Xs= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= +google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From 89d840aefa666d1bdc2826908f5e850475b2141b Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 2 Oct 2024 22:18:23 +0200 Subject: [PATCH 031/836] chore(deps): update dependency go to v1.23.2 (main) (#5699) --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 1c06369baf..d430eac802 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module github.com/cloudnative-pg/cloudnative-pg go 1.22.0 -toolchain go1.23.1 +toolchain go1.23.2 require ( github.com/DATA-DOG/go-sqlmock v1.5.2 From e3f8b2bbcbff191a2de966df68d2553945d67ab3 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Thu, 3 Oct 2024 09:40:22 +0200 Subject: [PATCH 032/836] chore: add a new way to connect to the clusters from tests (#5509) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We've been using a pod to use a psql client to connect, this implements a port-forward that can be used by the tests to connect locally to the pod clusters removing the need of the psql pod. Closes #5513 --------- Signed-off-by: Jonathan Gonzalez V. Signed-off-by: Niccolò Fei Signed-off-by: Francesco Canovai Co-authored-by: Niccolò Fei Co-authored-by: Francesco Canovai --- tests/e2e/asserts_test.go | 427 +++++++++++++--------- tests/e2e/backup_restore_test.go | 132 ++++--- tests/e2e/cluster_microservice_test.go | 15 +- tests/e2e/cluster_monolithic_test.go | 156 ++++---- tests/e2e/cluster_setup_test.go | 61 ++-- tests/e2e/configuration_update_test.go | 52 +-- tests/e2e/connection_test.go | 10 +- tests/e2e/declarative_hibernation_test.go | 4 +- tests/e2e/drain_node_test.go | 20 +- tests/e2e/hibernation_test.go | 8 +- tests/e2e/managed_roles_test.go | 36 +- tests/e2e/metrics_test.go | 33 +- tests/e2e/operator_unavailable_test.go | 12 +- tests/e2e/pg_basebackup_test.go | 43 ++- tests/e2e/pg_data_corruption_test.go | 4 +- tests/e2e/pgbouncer_test.go | 4 +- tests/e2e/replica_mode_cluster_test.go | 15 +- tests/e2e/suite_test.go | 19 +- tests/e2e/tablespaces_test.go | 47 ++- tests/e2e/update_user_test.go | 19 +- tests/e2e/volume_snapshot_test.go | 73 +++- tests/utils/psql_client.go | 113 ------ tests/utils/psql_connection.go | 227 ++++++++++++ tests/utils/time.go | 35 +- 24 files changed, 912 insertions(+), 653 deletions(-) delete mode 100644 tests/utils/psql_client.go create mode 100644 tests/utils/psql_connection.go diff --git a/tests/e2e/asserts_test.go b/tests/e2e/asserts_test.go index aaf97e0e68..badcfe420c 100644 --- a/tests/e2e/asserts_test.go +++ b/tests/e2e/asserts_test.go @@ -17,6 +17,7 @@ limitations under the License. package e2e import ( + "database/sql" "errors" "fmt" "os" @@ -376,13 +377,13 @@ func AssertUpdateSecret(field string, value string, secretName string, namespace // AssertConnection is used if a connection from a pod to a postgresql // database works func AssertConnection(host string, user string, dbname string, - password string, queryingPod corev1.Pod, timeout int, env *testsUtils.TestingEnvironment, + password string, queryingPod *corev1.Pod, timeout int, env *testsUtils.TestingEnvironment, ) { By(fmt.Sprintf("connecting to the %v service as %v", host, user), func() { Eventually(func() string { dsn := fmt.Sprintf("host=%v user=%v dbname=%v password=%v sslmode=require", host, user, dbname, password) commandTimeout := time.Second * 10 - stdout, _, err := env.ExecCommand(env.Ctx, queryingPod, specs.PostgresContainerName, &commandTimeout, + stdout, _, err := env.ExecCommand(env.Ctx, *queryingPod, specs.PostgresContainerName, &commandTimeout, "psql", dsn, "-tAc", "SELECT 1") if err != nil { return "" @@ -432,51 +433,46 @@ func AssertDatabaseIsReady(namespace, clusterName, dbName string) { } // AssertCreateTestData create test on the "app" database -func AssertCreateTestData(namespace, clusterName, tableName string, pod *corev1.Pod) { +func AssertCreateTestData(env *testsUtils.TestingEnvironment, namespace, clusterName, tableName string) { AssertDatabaseIsReady(namespace, clusterName, testsUtils.AppDBName) By(fmt.Sprintf("creating test data in cluster %v", clusterName), func() { + forward, conn, err := testsUtils.ForwardPSQLConnection( + env, + namespace, + clusterName, + testsUtils.AppDBName, + apiv1.ApplicationUserSecretSuffix, + ) + Expect(err).ToNot(HaveOccurred()) + query := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %v AS VALUES (1),(2);", tableName) - Eventually(func() error { - _, _, err := env.ExecCommandWithPsqlClient( - namespace, - clusterName, - pod, - apiv1.ApplicationUserSecretSuffix, - testsUtils.AppDBName, - query, - ) - if err != nil { - return err - } - return nil - }, RetryTimeout, PollingTime).Should(BeNil()) + _, err = conn.Exec(query) + Expect(err).ToNot(HaveOccurred()) + forward.Close() }) } // AssertCreateTestDataWithDatabaseName create test data in a given database. func AssertCreateTestDataWithDatabaseName( + env *testsUtils.TestingEnvironment, namespace, clusterName, databaseName, tableName string, - pod *corev1.Pod, ) { By(fmt.Sprintf("creating test data in cluster %v", clusterName), func() { + forward, conn, err := testsUtils.ForwardPSQLConnection( + env, + namespace, + clusterName, + databaseName, + apiv1.ApplicationUserSecretSuffix, + ) + Expect(err).ToNot(HaveOccurred()) query := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %v AS VALUES (1),(2);", tableName) - Eventually(func() error { - _, _, err := env.ExecCommandWithPsqlClient( - namespace, - clusterName, - pod, - apiv1.ApplicationUserSecretSuffix, - databaseName, - query, - ) - if err != nil { - return err - } - return nil - }, RetryTimeout, PollingTime).Should(BeNil()) + _, err = conn.Exec(query) + Expect(err).ToNot(HaveOccurred()) + forward.Close() }) } @@ -488,25 +484,26 @@ type TableLocator struct { } // AssertCreateTestDataInTablespace create test data. -func AssertCreateTestDataInTablespace(tl TableLocator, pod *corev1.Pod) { +func AssertCreateTestDataInTablespace(env *testsUtils.TestingEnvironment, tl TableLocator) { AssertDatabaseIsReady(tl.Namespace, tl.ClusterName, testsUtils.AppDBName) By(fmt.Sprintf("creating test data in tablespace %q", tl.Tablespace), func() { + forward, conn, err := testsUtils.ForwardPSQLConnection( + env, + tl.Namespace, + tl.ClusterName, + testsUtils.AppDBName, + apiv1.ApplicationUserSecretSuffix, + ) + defer func() { + forward.Close() + }() + Expect(err).ToNot(HaveOccurred()) + query := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %v TABLESPACE %v AS VALUES (1),(2);", tl.TableName, tl.Tablespace) - Eventually(func() error { - _, _, err := env.ExecCommandWithPsqlClient( - tl.Namespace, - tl.ClusterName, - pod, - apiv1.ApplicationUserSecretSuffix, - testsUtils.AppDBName, - query, - ) - if err != nil { - return err - } - return nil - }, RetryTimeout, PollingTime).Should(BeNil()) + + _, err = conn.Exec(query) + Expect(err).ToNot(HaveOccurred()) }) } @@ -533,41 +530,33 @@ func AssertCreateTestDataLargeObject(namespace, clusterName string, oid int, dat // insertRecordIntoTableWithDatabaseName insert an entry into a table func insertRecordIntoTableWithDatabaseName( + env *testsUtils.TestingEnvironment, namespace, clusterName, databaseName, tableName string, value int, - pod *corev1.Pod, ) { - query := fmt.Sprintf("INSERT INTO %v VALUES (%v);", tableName, value) - appUser, appUserPass, err := testsUtils.GetCredentials(clusterName, namespace, apiv1.ApplicationUserSecretSuffix, env) - Expect(err).ToNot(HaveOccurred()) - host, err := testsUtils.GetHostName(namespace, clusterName, env) - Expect(err).ToNot(HaveOccurred()) - _, _, err = testsUtils.RunQueryFromPod( - pod, - host, + forward, conn, err := testsUtils.ForwardPSQLConnection( + env, + namespace, + clusterName, databaseName, - appUser, - appUserPass, - query, - env) + apiv1.ApplicationUserSecretSuffix, + ) + defer func() { + forward.Close() + }() + Expect(err).ToNot(HaveOccurred()) + + _, err = conn.Exec(fmt.Sprintf("INSERT INTO %s VALUES (%d);", tableName, value)) Expect(err).ToNot(HaveOccurred()) } // insertRecordIntoTable insert an entry into a table -func insertRecordIntoTable(namespace, clusterName, tableName string, value int, pod *corev1.Pod) { - query := fmt.Sprintf("INSERT INTO %v VALUES (%v);", tableName, value) - _, _, err := env.ExecCommandWithPsqlClient( - namespace, - clusterName, - pod, - apiv1.ApplicationUserSecretSuffix, - testsUtils.AppDBName, - query, - ) - Expect(err).NotTo(HaveOccurred()) +func insertRecordIntoTable(tableName string, value int, conn *sql.DB) { + _, err := conn.Exec(fmt.Sprintf("INSERT INTO %s VALUES (%d)", tableName, value)) + Expect(err).ToNot(HaveOccurred()) } // AssertDatabaseExists assert if database exists @@ -638,23 +627,28 @@ func AssertDataExpectedCountWithDatabaseName(namespace, podName, databaseName st } // AssertDataExpectedCount verifies that an expected amount of rows exists on the table -func AssertDataExpectedCount(namespace, clusterName, tableName string, expectedValue int, pod *corev1.Pod) { +func AssertDataExpectedCount( + env *testsUtils.TestingEnvironment, + namespace, + clusterName, + tableName string, + expectedValue int, +) { By(fmt.Sprintf("verifying test data in table %v", tableName), func() { - query := fmt.Sprintf("select count(*) from %v", tableName) - Eventually(func() (int, error) { - stdout, _, err := env.ExecCommandWithPsqlClient( - namespace, - clusterName, - pod, - apiv1.ApplicationUserSecretSuffix, - testsUtils.AppDBName, - query) - if err != nil { - return 0, err - } - nRows, err := strconv.Atoi(strings.Trim(stdout, "\n")) - return nRows, err - }, 300).Should(BeEquivalentTo(expectedValue)) + row, err := testsUtils.RunQueryRowOverForward( + env, + namespace, + clusterName, + testsUtils.AppDBName, + apiv1.ApplicationUserSecretSuffix, + fmt.Sprintf("SELECT COUNT(*) FROM %s", tableName), + ) + Expect(err).ToNot(HaveOccurred()) + + var nRows int + err = row.Scan(&nRows) + Expect(err).ToNot(HaveOccurred()) + Expect(nRows).Should(BeEquivalentTo(expectedValue)) }) } @@ -997,7 +991,6 @@ func AssertReplicaModeCluster( srcClusterDBName, replicaClusterSample, testTableName string, - pod *corev1.Pod, ) { var primaryReplicaCluster *corev1.Pod commandTimeout := time.Second * 10 @@ -1005,13 +998,7 @@ func AssertReplicaModeCluster( AssertDatabaseIsReady(namespace, srcClusterName, srcClusterDBName) - AssertCreateTestDataWithDatabaseName( - namespace, - srcClusterName, - srcClusterDBName, - testTableName, - pod, - ) + AssertCreateTestDataWithDatabaseName(env, namespace, srcClusterName, srcClusterDBName, testTableName) By("creating replica cluster", func() { replicaClusterName, err := env.GetResourceNameFromYAML(replicaClusterSample) @@ -1034,7 +1021,7 @@ func AssertReplicaModeCluster( }) By("writing some new data to the source cluster", func() { - insertRecordIntoTableWithDatabaseName(namespace, srcClusterName, srcClusterDBName, testTableName, 3, pod) + insertRecordIntoTableWithDatabaseName(env, namespace, srcClusterName, srcClusterDBName, testTableName, 3) }) By("checking new data have been copied correctly in replica cluster", func() { @@ -1131,7 +1118,7 @@ func AssertDetachReplicaModeCluster( }) By("writing some new data to the source cluster", func() { - AssertCreateTestDataWithDatabaseName(namespace, srcClusterName, srcDatabaseName, testTableName, psqlClientPod) + AssertCreateTestDataWithDatabaseName(env, namespace, srcClusterName, srcDatabaseName, testTableName) }) By("verifying that replica cluster was not modified", func() { @@ -1358,17 +1345,21 @@ func AssertCustomMetricsResourcesExist(namespace, sampleFile string, configMapsC }) } -func AssertCreationOfTestDataForTargetDB(namespace, clusterName, targetDBName, tableName string, pod *corev1.Pod) { +func AssertCreationOfTestDataForTargetDB( + env *testsUtils.TestingEnvironment, + namespace, + clusterName, + targetDBName, + tableName string, +) { By(fmt.Sprintf("creating target database '%v' and table '%v'", targetDBName, tableName), func() { - host, err := testsUtils.GetHostName(namespace, clusterName, env) - Expect(err).ToNot(HaveOccurred()) - appUser, appUserPass, err := testsUtils.GetCredentials(clusterName, namespace, apiv1.ApplicationUserSecretSuffix, env) - Expect(err).ToNot(HaveOccurred()) - // We need to gather the cluster primary to create the database via superuser currentPrimary, err := env.GetClusterPrimary(namespace, clusterName) Expect(err).ToNot(HaveOccurred()) + appUser, _, err := testsUtils.GetCredentials(clusterName, namespace, apiv1.ApplicationUserSecretSuffix, env) + Expect(err).ToNot(HaveOccurred()) + // Create database commandTimeout := time.Second * 10 createDBQuery := fmt.Sprintf("CREATE DATABASE %v OWNER %v", targetDBName, appUser) @@ -1381,31 +1372,27 @@ func AssertCreationOfTestDataForTargetDB(namespace, clusterName, targetDBName, t ) Expect(err).ToNot(HaveOccurred()) - // Create table on target database - createTableQuery := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %v (id int);", tableName) - _, _, err = testsUtils.RunQueryFromPod( - pod, - host, - targetDBName, - appUser, - appUserPass, - createTableQuery, + forward, conn, err := testsUtils.ForwardPSQLConnection( env, + namespace, + clusterName, + targetDBName, + apiv1.ApplicationUserSecretSuffix, ) Expect(err).ToNot(HaveOccurred()) + // Create table on target database + createTableQuery := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %v (id int);", tableName) + _, err = conn.Exec(createTableQuery) + Expect(err).ToNot(HaveOccurred()) + // Grant a permission grantRoleQuery := "GRANT SELECT ON all tables in schema public to pg_monitor;" - _, _, err = testsUtils.RunQueryFromPod( - pod, - host, - targetDBName, - appUser, - appUserPass, - grantRoleQuery, - env, - ) + _, err = conn.Exec(grantRoleQuery) Expect(err).ToNot(HaveOccurred()) + + // Close the connection and forward + forward.Close() }) } @@ -1437,7 +1424,7 @@ func AssertApplicationDatabaseConnection( // rwService := fmt.Sprintf("%v-rw.%v.svc", clusterName, namespace) rwService := testsUtils.CreateServiceFQDN(namespace, testsUtils.GetReadWriteServiceName(clusterName)) - AssertConnection(rwService, appUser, appDB, appPassword, *pod, 60, env) + AssertConnection(rwService, appUser, appDB, appPassword, pod, 60, env) }) } @@ -1570,7 +1557,7 @@ func AssertROSASTokenUnableToWrite(containerName string, id string, key string) Expect(err).To(HaveOccurred()) } -func AssertClusterAsyncReplica(namespace, sourceClusterFile, restoreClusterFile, tableName string, pod *corev1.Pod) { +func AssertClusterAsyncReplica(namespace, sourceClusterFile, restoreClusterFile, tableName string) { By("Async Replication into external cluster", func() { restoredClusterName, err := env.GetResourceNameFromYAML(restoreClusterFile) Expect(err).ToNot(HaveOccurred()) @@ -1582,30 +1569,56 @@ func AssertClusterAsyncReplica(namespace, sourceClusterFile, restoreClusterFile, AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[testsUtils.ClusterIsReadySlow], env) // Test data should be present on restored primary - // NOTE: We use the credentials from the `source-cluster` for the psql connection - // given that this is a replica cluster restoredPrimary, err := env.GetClusterPrimary(namespace, restoredClusterName) Expect(err).ToNot(HaveOccurred()) + + // We need the credentials from the source cluster because the replica cluster + // doesn't create the credentials on its own namespace appUser, appUserPass, err := testsUtils.GetCredentials( - sourceClusterName, namespace, apiv1.ApplicationUserSecretSuffix, env) + sourceClusterName, + namespace, + apiv1.ApplicationUserSecretSuffix, + env, + ) Expect(err).ToNot(HaveOccurred()) - rwService := testsUtils.CreateServiceFQDN(namespace, testsUtils.GetReadWriteServiceName(restoredClusterName)) - query := "SELECT count(*) FROM " + tableName - out, _, err := testsUtils.RunQueryFromPod( - restoredPrimary, - rwService, + + forwardRestored, connRestored, err := testsUtils.ForwardPSQLConnectionWithCreds( + env, + namespace, + restoredClusterName, testsUtils.AppDBName, appUser, appUserPass, - query, + ) + defer func() { + _ = connRestored.Close() + forwardRestored.Close() + }() + Expect(err).ToNot(HaveOccurred()) + + row := connRestored.QueryRow(fmt.Sprintf("SELECT count(*) FROM %s", tableName)) + var countString string + err = row.Scan(&countString) + Expect(err).ToNot(HaveOccurred()) + Expect(countString).To(BeEquivalentTo("2")) + + forwardSource, connSource, err := testsUtils.ForwardPSQLConnection( env, + namespace, + sourceClusterName, + testsUtils.AppDBName, + apiv1.ApplicationUserSecretSuffix, ) - Expect(strings.Trim(out, "\n"), err).To(BeEquivalentTo("2")) + defer func() { + _ = connSource.Close() + forwardSource.Close() + }() + Expect(err).ToNot(HaveOccurred()) // Insert new data in the source cluster - insertRecordIntoTable(namespace, sourceClusterName, tableName, 3, pod) + insertRecordIntoTable(tableName, 3, connSource) AssertArchiveWalOnMinio(namespace, sourceClusterName, sourceClusterName) - AssertDataExpectedCount(namespace, sourceClusterName, tableName, 3, pod) + AssertDataExpectedCount(env, namespace, sourceClusterName, tableName, 3) cluster, err := env.GetCluster(namespace, restoredClusterName) Expect(err).ToNot(HaveOccurred()) @@ -1616,7 +1629,7 @@ func AssertClusterAsyncReplica(namespace, sourceClusterFile, restoreClusterFile, }) } -func AssertClusterRestoreWithApplicationDB(namespace, restoreClusterFile, tableName string, pod *corev1.Pod) { +func AssertClusterRestoreWithApplicationDB(namespace, restoreClusterFile, tableName string) { restoredClusterName, err := env.GetResourceNameFromYAML(restoreClusterFile) Expect(err).ToNot(HaveOccurred()) @@ -1627,20 +1640,24 @@ func AssertClusterRestoreWithApplicationDB(namespace, restoreClusterFile, tableN AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[testsUtils.ClusterIsReadySlow], env) // Test data should be present on restored primary - AssertDataExpectedCount(namespace, restoredClusterName, tableName, 2, pod) + AssertDataExpectedCount(env, namespace, restoredClusterName, tableName, 2) }) By("Ensuring the restored cluster is on timeline 2", func() { - out, _, err := env.ExecCommandWithPsqlClient( + row, err := testsUtils.RunQueryRowOverForward( + env, namespace, restoredClusterName, - pod, - apiv1.ApplicationUserSecretSuffix, testsUtils.AppDBName, - "select substring(pg_walfile_name(pg_current_wal_lsn()), 1, 8)", + apiv1.ApplicationUserSecretSuffix, + "SELECT substring(pg_walfile_name(pg_current_wal_lsn()), 1, 8)", ) Expect(err).ToNot(HaveOccurred()) - Expect(strings.Trim(out, "\n"), err).To(Equal("00000002")) + + var timeline string + err = row.Scan(&timeline) + Expect(err).ToNot(HaveOccurred()) + Expect(timeline).To(BeEquivalentTo("00000002")) }) // Restored standby should be attached to restored primary @@ -1653,6 +1670,8 @@ func AssertClusterRestoreWithApplicationDB(namespace, restoreClusterFile, tableN secretName := restoredClusterName + apiv1.ApplicationUserSecretSuffix By("checking the restored cluster with pre-defined app password connectable", func() { + primaryPod, err := env.GetClusterPrimary(namespace, restoredClusterName) + Expect(err).ToNot(HaveOccurred()) AssertApplicationDatabaseConnection( namespace, restoredClusterName, @@ -1660,12 +1679,16 @@ func AssertClusterRestoreWithApplicationDB(namespace, restoreClusterFile, tableN testsUtils.AppDBName, appUserPass, secretName, - pod) + primaryPod) }) By("update user application password for restored cluster and verify connectivity", func() { const newPassword = "eeh2Zahohx" //nolint:gosec AssertUpdateSecret("password", newPassword, secretName, namespace, restoredClusterName, 30, env) + + primaryPod, err := env.GetClusterPrimary(namespace, restoredClusterName) + Expect(err).ToNot(HaveOccurred()) + AssertApplicationDatabaseConnection( namespace, restoredClusterName, @@ -1673,11 +1696,11 @@ func AssertClusterRestoreWithApplicationDB(namespace, restoreClusterFile, tableN testsUtils.AppDBName, newPassword, secretName, - pod) + primaryPod) }) } -func AssertClusterRestore(namespace, restoreClusterFile, tableName string, pod *corev1.Pod) { +func AssertClusterRestore(namespace, restoreClusterFile, tableName string) { restoredClusterName, err := env.GetResourceNameFromYAML(restoreClusterFile) Expect(err).ToNot(HaveOccurred()) @@ -1689,7 +1712,7 @@ func AssertClusterRestore(namespace, restoreClusterFile, tableName string, pod * // Test data should be present on restored primary primary := restoredClusterName + "-1" - AssertDataExpectedCount(namespace, restoredClusterName, tableName, 2, pod) + AssertDataExpectedCount(env, namespace, restoredClusterName, tableName, 2) // Restored primary should be on timeline 2 out, _, err := env.ExecQueryInInstancePod( @@ -1837,7 +1860,7 @@ func AssertSuspendScheduleBackups(namespace, scheduledBackupName string) { }) } -func AssertClusterWasRestoredWithPITRAndApplicationDB(namespace, clusterName, tableName, lsn string, pod *corev1.Pod) { +func AssertClusterWasRestoredWithPITRAndApplicationDB(namespace, clusterName, tableName, lsn string) { // We give more time than the usual 600s, since the recovery is slower AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReadySlow], env) @@ -1848,16 +1871,20 @@ func AssertClusterWasRestoredWithPITRAndApplicationDB(namespace, clusterName, ta By("Ensuring the restored cluster is on timeline 3", func() { // Restored primary should be on timeline 3 - stdOut, _, err := env.ExecCommandWithPsqlClient( + row, err := testsUtils.RunQueryRowOverForward( + env, namespace, clusterName, - pod, - apiv1.ApplicationUserSecretSuffix, testsUtils.AppDBName, + apiv1.ApplicationUserSecretSuffix, "select substring(pg_walfile_name(pg_current_wal_lsn()), 1, 8)", ) Expect(err).ToNot(HaveOccurred()) - Expect(strings.Trim(stdOut, "\n"), err).To(Equal(lsn)) + + var currentWalLsn string + err = row.Scan(¤tWalLsn) + Expect(err).ToNot(HaveOccurred()) + Expect(currentWalLsn).To(Equal(lsn)) // Restored standby should be attached to restored primary Expect(testsUtils.CountReplicas(env, primaryInfo)).To(BeEquivalentTo(2)) @@ -1865,13 +1892,16 @@ func AssertClusterWasRestoredWithPITRAndApplicationDB(namespace, clusterName, ta By(fmt.Sprintf("after restored, 3rd entry should not be exists in table '%v'", tableName), func() { // Only 2 entries should be present - AssertDataExpectedCount(namespace, clusterName, tableName, 2, pod) + AssertDataExpectedCount(env, namespace, clusterName, tableName, 2) }) // Gather credentials appUser, appUserPass, err := testsUtils.GetCredentials(clusterName, namespace, apiv1.ApplicationUserSecretSuffix, env) Expect(err).ToNot(HaveOccurred()) + primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + By("checking the restored cluster with auto generated app password connectable", func() { AssertApplicationDatabaseConnection( namespace, @@ -1880,7 +1910,7 @@ func AssertClusterWasRestoredWithPITRAndApplicationDB(namespace, clusterName, ta testsUtils.AppDBName, appUserPass, secretName, - pod) + primaryPod) }) By("update user application password for restored cluster and verify connectivity", func() { @@ -1893,31 +1923,32 @@ func AssertClusterWasRestoredWithPITRAndApplicationDB(namespace, clusterName, ta testsUtils.AppDBName, newPassword, secretName, - pod) + primaryPod) }) } -func AssertClusterWasRestoredWithPITR(namespace, clusterName, tableName, lsn string, pod *corev1.Pod) { - primaryInfo := &corev1.Pod{} - var err error - +func AssertClusterWasRestoredWithPITR(namespace, clusterName, tableName, lsn string) { By("restoring a backup cluster with PITR in a new cluster", func() { // We give more time than the usual 600s, since the recovery is slower AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReadySlow], env) - primaryInfo, err = env.GetClusterPrimary(namespace, clusterName) + primaryInfo, err := env.GetClusterPrimary(namespace, clusterName) Expect(err).ToNot(HaveOccurred()) // Restored primary should be on timeline 3 - stdOut, _, err := env.ExecCommandWithPsqlClient( + row, err := testsUtils.RunQueryRowOverForward( + env, namespace, clusterName, - pod, - apiv1.ApplicationUserSecretSuffix, testsUtils.AppDBName, + apiv1.ApplicationUserSecretSuffix, "select substring(pg_walfile_name(pg_current_wal_lsn()), 1, 8)", ) Expect(err).ToNot(HaveOccurred()) - Expect(strings.Trim(stdOut, "\n"), err).To(Equal(lsn)) + + var currentWalLsn string + err = row.Scan(¤tWalLsn) + Expect(err).ToNot(HaveOccurred()) + Expect(currentWalLsn).To(Equal(lsn)) // Restored standby should be attached to restored primary Expect(testsUtils.CountReplicas(env, primaryInfo)).To(BeEquivalentTo(2)) @@ -1925,7 +1956,7 @@ func AssertClusterWasRestoredWithPITR(namespace, clusterName, tableName, lsn str By(fmt.Sprintf("after restored, 3rd entry should not be exists in table '%v'", tableName), func() { // Only 2 entries should be present - AssertDataExpectedCount(namespace, clusterName, tableName, 2, pod) + AssertDataExpectedCount(env, namespace, clusterName, tableName, 2) }) } @@ -2001,7 +2032,6 @@ func prepareClusterForPITROnMinio( backupSampleFile string, expectedVal int, currentTimestamp *string, - pod *corev1.Pod, ) { const tableNamePitr = "for_restore" @@ -2021,16 +2051,28 @@ func prepareClusterForPITROnMinio( }) // Write a table and insert 2 entries on the "app" database - AssertCreateTestData(namespace, clusterName, tableNamePitr, pod) + AssertCreateTestData(env, namespace, clusterName, tableNamePitr) By("getting currentTimestamp", func() { - ts, err := testsUtils.GetCurrentTimestamp(namespace, clusterName, env, pod) + ts, err := testsUtils.GetCurrentTimestamp(namespace, clusterName, env) *currentTimestamp = ts Expect(err).ToNot(HaveOccurred()) }) By(fmt.Sprintf("writing 3rd entry into test table '%v'", tableNamePitr), func() { - insertRecordIntoTable(namespace, clusterName, tableNamePitr, 3, pod) + forward, conn, err := testsUtils.ForwardPSQLConnection( + env, + namespace, + clusterName, + testsUtils.AppDBName, + apiv1.ApplicationUserSecretSuffix, + ) + defer func() { + forward.Close() + }() + Expect(err).ToNot(HaveOccurred()) + + insertRecordIntoTable(tableNamePitr, 3, conn) }) AssertArchiveWalOnMinio(namespace, clusterName, clusterName) AssertArchiveConditionMet(namespace, clusterName, "5m") @@ -2044,7 +2086,6 @@ func prepareClusterForPITROnAzureBlob( azureConfig testsUtils.AzureConfiguration, expectedVal int, currentTimestamp *string, - pod *corev1.Pod, ) { const tableNamePitr = "for_restore" By("backing up a cluster and verifying it exists on Azure Blob", func() { @@ -2061,16 +2102,27 @@ func prepareClusterForPITROnAzureBlob( }) // Write a table and insert 2 entries on the "app" database - AssertCreateTestData(namespace, clusterName, tableNamePitr, pod) + AssertCreateTestData(env, namespace, clusterName, tableNamePitr) By("getting currentTimestamp", func() { - ts, err := testsUtils.GetCurrentTimestamp(namespace, clusterName, env, pod) + ts, err := testsUtils.GetCurrentTimestamp(namespace, clusterName, env) *currentTimestamp = ts Expect(err).ToNot(HaveOccurred()) }) By(fmt.Sprintf("writing 3rd entry into test table '%v'", tableNamePitr), func() { - insertRecordIntoTable(namespace, clusterName, tableNamePitr, 3, pod) + forward, conn, err := testsUtils.ForwardPSQLConnection( + env, + namespace, + clusterName, + testsUtils.AppDBName, + apiv1.ApplicationUserSecretSuffix, + ) + defer func() { + forward.Close() + }() + Expect(err).ToNot(HaveOccurred()) + insertRecordIntoTable(tableNamePitr, 3, conn) }) AssertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration) AssertArchiveConditionMet(namespace, clusterName, "5m") @@ -2108,12 +2160,11 @@ func prepareClusterBackupOnAzurite( clusterSampleFile, backupFile, tableName string, - pod *corev1.Pod, ) { // Setting up Azurite and az cli along with Postgresql cluster prepareClusterOnAzurite(namespace, clusterName, clusterSampleFile) // Write a table and some data on the "app" database - AssertCreateTestData(namespace, clusterName, tableName, pod) + AssertCreateTestData(env, namespace, clusterName, tableName) AssertArchiveWalOnAzurite(namespace, clusterName) By("backing up a cluster and verifying it exists on azurite", func() { @@ -2137,7 +2188,6 @@ func prepareClusterForPITROnAzurite( clusterName, backupSampleFile string, currentTimestamp *string, - pod *corev1.Pod, ) { By("backing up a cluster and verifying it exists on azurite", func() { // We create a Backup @@ -2154,16 +2204,27 @@ func prepareClusterForPITROnAzurite( }) // Write a table and insert 2 entries on the "app" database - AssertCreateTestData(namespace, clusterName, "for_restore", pod) + AssertCreateTestData(env, namespace, clusterName, "for_restore") By("getting currentTimestamp", func() { - ts, err := testsUtils.GetCurrentTimestamp(namespace, clusterName, env, pod) + ts, err := testsUtils.GetCurrentTimestamp(namespace, clusterName, env) *currentTimestamp = ts Expect(err).ToNot(HaveOccurred()) }) By(fmt.Sprintf("writing 3rd entry into test table '%v'", "for_restore"), func() { - insertRecordIntoTable(namespace, clusterName, "for_restore", 3, pod) + forward, conn, err := testsUtils.ForwardPSQLConnection( + env, + namespace, + clusterName, + testsUtils.AppDBName, + apiv1.ApplicationUserSecretSuffix, + ) + defer func() { + forward.Close() + }() + Expect(err).ToNot(HaveOccurred()) + insertRecordIntoTable("for_restore", 3, conn) }) AssertArchiveWalOnAzurite(namespace, clusterName) } @@ -2254,15 +2315,17 @@ func assertReadWriteConnectionUsingPgBouncerService( appUser, generatedAppUserPassword, err := testsUtils.GetCredentials( clusterName, namespace, apiv1.ApplicationUserSecretSuffix, env) Expect(err).ToNot(HaveOccurred()) - AssertConnection(poolerService, appUser, "app", generatedAppUserPassword, *psqlClientPod, 180, env) + primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + AssertConnection(poolerService, appUser, "app", generatedAppUserPassword, primaryPod, 180, env) // verify that, if pooler type setup read write then it will allow both read and // write operations or if pooler type setup read only then it will allow only read operations if isPoolerRW { - AssertWritesToPrimarySucceeds(psqlClientPod, poolerService, "app", appUser, + AssertWritesToPrimarySucceeds(primaryPod, poolerService, "app", appUser, generatedAppUserPassword) } else { - AssertWritesToReplicaFails(psqlClientPod, poolerService, "app", appUser, + AssertWritesToReplicaFails(primaryPod, poolerService, "app", appUser, generatedAppUserPassword) } } @@ -2595,7 +2658,7 @@ func DeleteTableUsingPgBouncerService( appUser, generatedAppUserPassword, err := testsUtils.GetCredentials( clusterName, namespace, apiv1.ApplicationUserSecretSuffix, env) Expect(err).ToNot(HaveOccurred()) - AssertConnection(poolerService, appUser, "app", generatedAppUserPassword, *pod, 180, env) + AssertConnection(poolerService, appUser, "app", generatedAppUserPassword, pod, 180, env) _, _, err = testsUtils.RunQueryFromPod( pod, poolerService, "app", appUser, generatedAppUserPassword, diff --git a/tests/e2e/backup_restore_test.go b/tests/e2e/backup_restore_test.go index 05ae213875..8ba08452e4 100644 --- a/tests/e2e/backup_restore_test.go +++ b/tests/e2e/backup_restore_test.go @@ -118,12 +118,12 @@ var _ = Describe("Backup and restore", Label(tests.LabelBackupRestore), func() { backupName, err := env.GetResourceNameFromYAML(backupFile) Expect(err).ToNot(HaveOccurred()) // Create required test data - AssertCreationOfTestDataForTargetDB(namespace, clusterName, targetDBOne, testTableName, psqlClientPod) - AssertCreationOfTestDataForTargetDB(namespace, clusterName, targetDBTwo, testTableName, psqlClientPod) - AssertCreationOfTestDataForTargetDB(namespace, clusterName, targetDBSecret, testTableName, psqlClientPod) + AssertCreationOfTestDataForTargetDB(env, namespace, clusterName, targetDBOne, testTableName) + AssertCreationOfTestDataForTargetDB(env, namespace, clusterName, targetDBTwo, testTableName) + AssertCreationOfTestDataForTargetDB(env, namespace, clusterName, targetDBSecret, testTableName) // Write a table and some data on the "app" database - AssertCreateTestData(namespace, clusterName, tableName, psqlClientPod) + AssertCreateTestData(env, namespace, clusterName, tableName) AssertArchiveWalOnMinio(namespace, clusterName, clusterName) latestTar := minioPath(clusterName, "data.tar") @@ -217,7 +217,7 @@ var _ = Describe("Backup and restore", Label(tests.LabelBackupRestore), func() { }) // Restore backup in a new cluster, also cover if no application database is configured - AssertClusterRestore(namespace, clusterRestoreSampleFile, tableName, psqlClientPod) + AssertClusterRestore(namespace, clusterRestoreSampleFile, tableName) cluster, err := env.GetCluster(namespace, restoredClusterName) Expect(err).ToNot(HaveOccurred()) @@ -264,12 +264,12 @@ var _ = Describe("Backup and restore", Label(tests.LabelBackupRestore), func() { AssertCreateCluster(namespace, targetClusterName, clusterWithMinioStandbySampleFile, env) // Create required test data - AssertCreationOfTestDataForTargetDB(namespace, targetClusterName, targetDBOne, testTableName, psqlClientPod) - AssertCreationOfTestDataForTargetDB(namespace, targetClusterName, targetDBTwo, testTableName, psqlClientPod) - AssertCreationOfTestDataForTargetDB(namespace, targetClusterName, targetDBSecret, testTableName, psqlClientPod) + AssertCreationOfTestDataForTargetDB(env, namespace, targetClusterName, targetDBOne, testTableName) + AssertCreationOfTestDataForTargetDB(env, namespace, targetClusterName, targetDBTwo, testTableName) + AssertCreationOfTestDataForTargetDB(env, namespace, targetClusterName, targetDBSecret, testTableName) // Write a table and some data on the "app" database - AssertCreateTestData(namespace, targetClusterName, tableName, psqlClientPod) + AssertCreateTestData(env, namespace, targetClusterName, tableName) AssertArchiveWalOnMinio(namespace, targetClusterName, targetClusterName) latestTar := minioPath(targetClusterName, "data.tar") @@ -308,12 +308,12 @@ var _ = Describe("Backup and restore", Label(tests.LabelBackupRestore), func() { AssertCreateCluster(namespace, targetClusterName, clusterWithMinioSampleFile, env) // Create required test data - AssertCreationOfTestDataForTargetDB(namespace, targetClusterName, targetDBOne, testTableName, psqlClientPod) - AssertCreationOfTestDataForTargetDB(namespace, targetClusterName, targetDBTwo, testTableName, psqlClientPod) - AssertCreationOfTestDataForTargetDB(namespace, targetClusterName, targetDBSecret, testTableName, psqlClientPod) + AssertCreationOfTestDataForTargetDB(env, namespace, targetClusterName, targetDBOne, testTableName) + AssertCreationOfTestDataForTargetDB(env, namespace, targetClusterName, targetDBTwo, testTableName) + AssertCreationOfTestDataForTargetDB(env, namespace, targetClusterName, targetDBSecret, testTableName) // Write a table and some data on the "app" database - AssertCreateTestData(namespace, targetClusterName, tableName, psqlClientPod) + AssertCreateTestData(env, namespace, targetClusterName, tableName) AssertArchiveWalOnMinio(namespace, targetClusterName, targetClusterName) latestTar := minioPath(targetClusterName, "data.tar") @@ -361,12 +361,12 @@ var _ = Describe("Backup and restore", Label(tests.LabelBackupRestore), func() { AssertCreateCluster(namespace, customClusterName, clusterWithMinioCustomSampleFile, env) // Create required test data - AssertCreationOfTestDataForTargetDB(namespace, customClusterName, targetDBOne, testTableName, psqlClientPod) - AssertCreationOfTestDataForTargetDB(namespace, customClusterName, targetDBTwo, testTableName, psqlClientPod) - AssertCreationOfTestDataForTargetDB(namespace, customClusterName, targetDBSecret, testTableName, psqlClientPod) + AssertCreationOfTestDataForTargetDB(env, namespace, customClusterName, targetDBOne, testTableName) + AssertCreationOfTestDataForTargetDB(env, namespace, customClusterName, targetDBTwo, testTableName) + AssertCreationOfTestDataForTargetDB(env, namespace, customClusterName, targetDBSecret, testTableName) // Write a table and some data on the "app" database - AssertCreateTestData(namespace, customClusterName, tableName, psqlClientPod) + AssertCreateTestData(env, namespace, customClusterName, tableName) AssertArchiveWalOnMinio(namespace, customClusterName, clusterServerName) @@ -387,7 +387,7 @@ var _ = Describe("Backup and restore", Label(tests.LabelBackupRestore), func() { }) // Restore backup in a new cluster - AssertClusterRestore(namespace, clusterRestoreSampleFile, tableName, psqlClientPod) + AssertClusterRestore(namespace, clusterRestoreSampleFile, tableName) By("deleting the primary cluster", func() { err = DeleteResourcesFromFile(namespace, clusterWithMinioCustomSampleFile) @@ -429,7 +429,6 @@ var _ = Describe("Backup and restore", Label(tests.LabelBackupRestore), func() { backupFilePITR, 3, currentTimestamp, - psqlClientPod, ) cluster, err := testUtils.CreateClusterFromBackupUsingPITR( @@ -440,9 +439,10 @@ var _ = Describe("Backup and restore", Label(tests.LabelBackupRestore), func() { env, ) Expect(err).NotTo(HaveOccurred()) + AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[testUtils.ClusterIsReady], env) // Restore backup in a new cluster, also cover if no application database is configured - AssertClusterWasRestoredWithPITR(namespace, restoredClusterName, tableName, "00000003", psqlClientPod) + AssertClusterWasRestoredWithPITR(namespace, restoredClusterName, tableName, "00000003") By("deleting the restored cluster", func() { Expect(testUtils.DeleteObject(env, cluster)).To(Succeed()) @@ -539,7 +539,7 @@ var _ = Describe("Backup and restore", Label(tests.LabelBackupRestore), func() { // be there It("backs up and restore a cluster", func() { // Write a table and some data on the "app" database - AssertCreateTestData(namespace, clusterName, tableName, psqlClientPod) + AssertCreateTestData(env, namespace, clusterName, tableName) AssertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration) By("uploading a backup", func() { // We create a backup @@ -557,7 +557,7 @@ var _ = Describe("Backup and restore", Label(tests.LabelBackupRestore), func() { }) // Restore backup in a new cluster - AssertClusterRestore(namespace, clusterRestoreSampleFile, tableName, psqlClientPod) + AssertClusterRestore(namespace, clusterRestoreSampleFile, tableName) By("deleting the restored cluster", func() { err := DeleteResourcesFromFile(namespace, clusterRestoreSampleFile) @@ -589,16 +589,22 @@ var _ = Describe("Backup and restore", Label(tests.LabelBackupRestore), func() { env.AzureConfiguration, 2, currentTimestamp, - psqlClientPod) + ) AssertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration) - cluster, err := testUtils.CreateClusterFromBackupUsingPITR(namespace, restoredClusterName, - backupFile, *currentTimestamp, env) + cluster, err := testUtils.CreateClusterFromBackupUsingPITR( + namespace, + restoredClusterName, + backupFile, + *currentTimestamp, + env, + ) Expect(err).ToNot(HaveOccurred()) + AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[testUtils.ClusterIsReady], env) // Restore backup in a new cluster, also cover if no application database is configured - AssertClusterWasRestoredWithPITR(namespace, restoredClusterName, tableName, "00000002", psqlClientPod) + AssertClusterWasRestoredWithPITR(namespace, restoredClusterName, tableName, "00000002") By("deleting the restored cluster", func() { Expect(testUtils.DeleteObject(env, cluster)).To(Succeed()) }) @@ -664,12 +670,12 @@ var _ = Describe("Backup and restore", Label(tests.LabelBackupRestore), func() { Expect(err).ToNot(HaveOccurred()) }) // Setup Azurite and az cli along with Postgresql cluster - prepareClusterBackupOnAzurite(namespace, clusterName, azuriteBlobSampleFile, backupFile, tableName, psqlClientPod) + prepareClusterBackupOnAzurite(namespace, clusterName, azuriteBlobSampleFile, backupFile, tableName) }) It("restores a backed up cluster", func() { // Restore backup in a new cluster - AssertClusterRestoreWithApplicationDB(namespace, clusterRestoreSampleFile, tableName, psqlClientPod) + AssertClusterRestoreWithApplicationDB(namespace, clusterRestoreSampleFile, tableName) }) // Create a scheduled backup with the 'immediate' option enabled. @@ -693,7 +699,7 @@ var _ = Describe("Backup and restore", Label(tests.LabelBackupRestore), func() { backupFilePITR = fixturesDir + "/backup/azurite/backup-pitr.yaml" ) - prepareClusterForPITROnAzurite(namespace, clusterName, backupFilePITR, currentTimestamp, psqlClientPod) + prepareClusterForPITROnAzurite(namespace, clusterName, backupFilePITR, currentTimestamp) cluster, err := testUtils.CreateClusterFromBackupUsingPITR( namespace, @@ -703,9 +709,10 @@ var _ = Describe("Backup and restore", Label(tests.LabelBackupRestore), func() { env, ) Expect(err).NotTo(HaveOccurred()) + AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[testUtils.ClusterIsReady], env) // Restore backup in a new cluster, also cover if no application database is configured - AssertClusterWasRestoredWithPITR(namespace, restoredClusterName, tableName, "00000002", psqlClientPod) + AssertClusterWasRestoredWithPITR(namespace, restoredClusterName, tableName, "00000002") By("deleting the restored cluster", func() { Expect(testUtils.DeleteObject(env, cluster)).To(Succeed()) @@ -814,7 +821,7 @@ var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.Label Expect(err).ToNot(HaveOccurred()) // Write a table and some data on the "app" database - AssertCreateTestData(namespace, clusterName, tableName, psqlClientPod) + AssertCreateTestData(env, namespace, clusterName, tableName) AssertArchiveWalOnMinio(namespace, clusterName, clusterName) @@ -844,10 +851,10 @@ var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.Label // Restoring cluster using a recovery barman object store, which is defined // in the externalClusters section - AssertClusterRestore(namespace, externalClusterFileMinio, tableName, psqlClientPod) + AssertClusterRestore(namespace, externalClusterFileMinio, tableName) // verify test data on restored external cluster - AssertDataExpectedCount(namespace, externalClusterName, tableName, 2, psqlClientPod) + AssertDataExpectedCount(env, namespace, externalClusterName, tableName, 2) By("deleting the restored cluster", func() { err = DeleteResourcesFromFile(namespace, externalClusterFileMinio) @@ -862,14 +869,25 @@ var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.Label // timestamp. It will use to restore cluster from source using PITR By("getting currentTimestamp", func() { - ts, err := testUtils.GetCurrentTimestamp(namespace, clusterName, env, psqlClientPod) + ts, err := testUtils.GetCurrentTimestamp(namespace, clusterName, env) *currentTimestamp = ts Expect(err).ToNot(HaveOccurred()) }) By(fmt.Sprintf("writing 2 more entries in table '%v'", tableName), func() { + forward, conn, err := testUtils.ForwardPSQLConnection( + env, + namespace, + clusterName, + testUtils.AppDBName, + apiv1.ApplicationUserSecretSuffix, + ) + defer func() { + forward.Close() + }() + Expect(err).ToNot(HaveOccurred()) // insert 2 more rows entries 3,4 on the "app" database - insertRecordIntoTable(namespace, clusterName, tableName, 3, psqlClientPod) - insertRecordIntoTable(namespace, clusterName, tableName, 4, psqlClientPod) + insertRecordIntoTable(tableName, 3, conn) + insertRecordIntoTable(tableName, 4, conn) }) By("creating second backup and verifying it exists on minio", func() { testUtils.ExecuteBackup(namespace, sourceTakeSecondBackupFileMinio, false, @@ -893,7 +911,7 @@ var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.Label externalClusterRestoreName, tableName, "00000002", - psqlClientPod) + ) By("delete restored cluster", func() { Expect(testUtils.DeleteObject(env, restoredCluster)).To(Succeed()) }) @@ -901,7 +919,7 @@ var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.Label It("restore cluster from barman object using replica option in spec", func() { // Write a table and some data on the "app" database - AssertCreateTestData(namespace, clusterName, "for_restore_repl", psqlClientPod) + AssertCreateTestData(env, namespace, clusterName, "for_restore_repl") AssertArchiveWalOnMinio(namespace, clusterName, clusterName) @@ -922,7 +940,7 @@ var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.Label clusterSourceFileMinio, externalClusterFileMinioReplica, "for_restore_repl", - psqlClientPod) + ) }) }) @@ -956,7 +974,7 @@ var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.Label It("restores a cluster from barman object using 'barmanObjectStore' option in 'externalClusters' section", func() { // Write a table and some data on the "app" database - AssertCreateTestData(namespace, clusterName, tableName, psqlClientPod) + AssertCreateTestData(env, namespace, clusterName, tableName) AssertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration) By("backing up a cluster and verifying it exists on azure blob storage", func() { @@ -971,7 +989,7 @@ var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.Label // Restoring cluster using a recovery barman object store, which is defined // in the externalClusters section - AssertClusterRestore(namespace, externalClusterFileAzure, tableName, psqlClientPod) + AssertClusterRestore(namespace, externalClusterFileAzure, tableName) }) It("restores a cluster with 'PITR' from barman object using "+ @@ -985,7 +1003,7 @@ var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.Label env.AzureConfiguration, 1, currentTimestamp, - psqlClientPod) + ) restoredCluster, err := testUtils.CreateClusterFromExternalClusterBackupWithPITROnAzure( namespace, @@ -1000,8 +1018,12 @@ var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.Label // Restoring cluster using a recovery barman object store, which is defined // in the externalClusters section - AssertClusterWasRestoredWithPITRAndApplicationDB(namespace, externalClusterName, - tableName, "00000002", psqlClientPod) + AssertClusterWasRestoredWithPITRAndApplicationDB( + namespace, + externalClusterName, + tableName, + "00000002", + ) By("delete restored cluster", func() { Expect(testUtils.DeleteObject(env, restoredCluster)).To(Succeed()) @@ -1038,7 +1060,7 @@ var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.Label It("restores cluster from barman object using 'barmanObjectStore' option in 'externalClusters' section", func() { // Write a table and some data on the "app" database - AssertCreateTestData(namespace, clusterName, tableName, psqlClientPod) + AssertCreateTestData(env, namespace, clusterName, tableName) // Create a WAL on the primary and check if it arrives in the // Azure Blob Storage within a short time @@ -1055,7 +1077,7 @@ var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.Label }) // Restore backup in a new cluster - AssertClusterRestoreWithApplicationDB(namespace, clusterRestoreFileAzureSAS, tableName, psqlClientPod) + AssertClusterRestoreWithApplicationDB(namespace, clusterRestoreFileAzureSAS, tableName) }) It("restores a cluster with 'PITR' from barman object using "+ @@ -1069,7 +1091,7 @@ var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.Label env.AzureConfiguration, 1, currentTimestamp, - psqlClientPod) + ) restoredCluster, err := testUtils.CreateClusterFromExternalClusterBackupWithPITROnAzure( namespace, @@ -1084,8 +1106,12 @@ var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.Label // Restoring cluster using a recovery barman object store, which is defined // in the externalClusters section - AssertClusterWasRestoredWithPITRAndApplicationDB(namespace, externalClusterName, - tableName, "00000002", psqlClientPod) + AssertClusterWasRestoredWithPITRAndApplicationDB( + namespace, + externalClusterName, + tableName, + "00000002", + ) By("delete restored cluster", func() { Expect(testUtils.DeleteObject(env, restoredCluster)).To(Succeed()) @@ -1126,12 +1152,12 @@ var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.Label azuriteBlobSampleFile, backupFileAzurite, tableName, - psqlClientPod) + ) }) It("restore cluster from barman object using 'barmanObjectStore' option in 'externalClusters' section", func() { // Restore backup in a new cluster - AssertClusterRestoreWithApplicationDB(namespace, externalClusterFileAzurite, tableName, psqlClientPod) + AssertClusterRestoreWithApplicationDB(namespace, externalClusterFileAzurite, tableName) }) It("restores a cluster with 'PITR' from barman object using 'barmanObjectStore' "+ @@ -1141,7 +1167,7 @@ var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.Label backupFileAzuritePITR = fixturesBackupDir + "backup-azurite-pitr.yaml" ) - prepareClusterForPITROnAzurite(namespace, clusterName, backupFileAzuritePITR, currentTimestamp, psqlClientPod) + prepareClusterForPITROnAzurite(namespace, clusterName, backupFileAzuritePITR, currentTimestamp) // Create a cluster from a particular time using external backup. restoredCluster, err := testUtils.CreateClusterFromExternalClusterBackupWithPITROnAzurite( @@ -1153,7 +1179,7 @@ var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.Label externalClusterRestoreName, tableName, "00000002", - psqlClientPod) + ) By("delete restored cluster", func() { Expect(testUtils.DeleteObject(env, restoredCluster)).To(Succeed()) diff --git a/tests/e2e/cluster_microservice_test.go b/tests/e2e/cluster_microservice_test.go index 7252bf13b3..57704d213a 100644 --- a/tests/e2e/cluster_microservice_test.go +++ b/tests/e2e/cluster_microservice_test.go @@ -69,14 +69,17 @@ var _ = Describe("Imports with Microservice Approach", Label(tests.LabelImportin data := "large object test" namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) Expect(err).ToNot(HaveOccurred()) + AssertCreateCluster(namespace, sourceClusterName, sourceSampleFile, env) - AssertCreateTestData(namespace, sourceClusterName, tableName, psqlClientPod) - AssertCreateTestDataLargeObject(namespace, sourceClusterName, oid, data, psqlClientPod) + AssertCreateTestData(env, namespace, sourceClusterName, tableName) + primaryPod, err := env.GetClusterPrimary(namespace, sourceClusterName) + Expect(err).ToNot(HaveOccurred()) + AssertCreateTestDataLargeObject(namespace, sourceClusterName, oid, data, primaryPod) importedClusterName = "cluster-pgdump-large-object" cluster := AssertClusterImport(namespace, importedClusterName, sourceClusterName, "app") - AssertDataExpectedCount(namespace, importedClusterName, tableName, 2, psqlClientPod) - AssertLargeObjectValue(namespace, importedClusterName, oid, data, psqlClientPod) + AssertDataExpectedCount(env, namespace, importedClusterName, tableName, 2) + AssertLargeObjectValue(namespace, importedClusterName, oid, data, primaryPod) By("deleting the imported database", func() { Expect(testsUtils.DeleteObject(env, cluster)).To(Succeed()) }) @@ -95,7 +98,7 @@ var _ = Describe("Imports with Microservice Approach", Label(tests.LabelImportin importedClusterName = "cluster-pgdump" AssertClusterImport(namespace, importedClusterName, sourceClusterName, "app") - AssertDataExpectedCount(namespace, importedClusterName, tableName, 2, psqlClientPod) + AssertDataExpectedCount(env, namespace, importedClusterName, tableName, 2) assertTableAndDataOnImportedCluster(namespace, tableName, importedClusterName) }) @@ -317,7 +320,7 @@ func assertImportRenamesSelectedDatabase( AssertClusterStandbysAreStreaming(namespace, importedClusterName, 120) }) - AssertDataExpectedCount(namespace, importedClusterName, tableName, 2, psqlClientPod) + AssertDataExpectedCount(env, namespace, importedClusterName, tableName, 2) By("verifying that only 'app' DB exists in the imported cluster", func() { importedPrimaryPod, err := env.GetClusterPrimary(namespace, importedClusterName) diff --git a/tests/e2e/cluster_monolithic_test.go b/tests/e2e/cluster_monolithic_test.go index e2dd3ad181..89281f09b6 100644 --- a/tests/e2e/cluster_monolithic_test.go +++ b/tests/e2e/cluster_monolithic_test.go @@ -17,9 +17,11 @@ limitations under the License. package e2e import ( + "database/sql" "fmt" "os" - "strings" + + "github.com/lib/pq" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/tests" @@ -49,9 +51,9 @@ var _ = Describe("Imports with Monolithic Approach", Label(tests.LabelImportingD databaseTwo = "db2" ) - var namespace, sourceClusterName, sourceClusterHost, - sourceClusterSuperUser, sourceClusterPass, - targetClusterHost, targetClusterSuperUser, targetClusterPass string + var namespace, sourceClusterName string + var forwardTarget *testsUtils.PSQLForwardConnection + var connTarget *sql.DB BeforeEach(func() { if testLevelEnv.Depth < int(level) { @@ -73,41 +75,30 @@ var _ = Describe("Imports with Monolithic Approach", Label(tests.LabelImportingD AssertCreateCluster(namespace, sourceClusterName, sourceClusterFile, env) }) - By("creating several roles, one of them a superuser", func() { + By("creating several roles, one of them a superuser and source databases", func() { + forward, conn, err := testsUtils.ForwardPSQLConnection( + env, + namespace, + sourceClusterName, + testsUtils.PostgresDBName, + apiv1.SuperUserSecretSuffix, + ) + defer func() { + forward.Close() + }() + Expect(err).ToNot(HaveOccurred()) + // create 1st user with superuser role createSuperUserQuery := fmt.Sprintf("create user %v with superuser password '123';", databaseSuperUser) - sourceClusterHost, err = testsUtils.GetHostName(namespace, sourceClusterName, env) - Expect(err).ToNot(HaveOccurred()) - sourceClusterSuperUser, sourceClusterPass, err = testsUtils.GetCredentials( - sourceClusterName, namespace, apiv1.SuperUserSecretSuffix, env) - Expect(err).ToNot(HaveOccurred()) - _, _, err = testsUtils.RunQueryFromPod( - psqlClientPod, - sourceClusterHost, - testsUtils.PostgresDBName, - sourceClusterSuperUser, - sourceClusterPass, - createSuperUserQuery, - env, - ) + _, err = conn.Exec(createSuperUserQuery) Expect(err).ToNot(HaveOccurred()) // create 2nd user createUserQuery := fmt.Sprintf("create user %v;", databaseUserTwo) - _, _, err = testsUtils.RunQueryFromPod( - psqlClientPod, - sourceClusterHost, - testsUtils.PostgresDBName, - sourceClusterSuperUser, - sourceClusterPass, - createUserQuery, - env, - ) + _, err = conn.Exec(createUserQuery) Expect(err).ToNot(HaveOccurred()) - }) - By("creating the source databases", func() { queries := []string{ fmt.Sprintf("create database %v;", databaseOne), fmt.Sprintf("alter database %v owner to %v;", databaseOne, databaseSuperUser), @@ -116,30 +107,22 @@ var _ = Describe("Imports with Monolithic Approach", Label(tests.LabelImportingD } for _, query := range queries { - _, _, err = testsUtils.RunQueryFromPod( - psqlClientPod, - sourceClusterHost, - testsUtils.PostgresDBName, - sourceClusterSuperUser, - sourceClusterPass, - query, - env, - ) + _, err := conn.Exec(query) Expect(err).ToNot(HaveOccurred()) } // create test data and insert some records in both databases for _, database := range sourceDatabases { - query := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %v AS VALUES (1),(2);", tableName) - _, _, err = testsUtils.RunQueryFromPod( - psqlClientPod, - sourceClusterHost, - database, - sourceClusterSuperUser, - sourceClusterPass, - query, - env, - ) + query := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s AS VALUES (1),(2);", tableName) + conn, err := forward.GetPooler().Connection(database) + // We need to set the max idle connection back to a higher number + // otherwise the conn.Exec() will close the connection + // and that will produce a RST packet from PostgreSQL that will kill the + // port-forward tunnel + // More about the RST packet here https://www.postgresql.org/message-id/165ba87e-fa48-4eae-b1f3-f9a831b4890b%40Spark + conn.SetMaxIdleConns(3) + Expect(err).ToNot(HaveOccurred()) + _, err = conn.Exec(query) Expect(err).ToNot(HaveOccurred()) } }) @@ -161,59 +144,60 @@ var _ = Describe("Imports with Monolithic Approach", Label(tests.LabelImportingD AssertClusterIsReady(namespace, targetClusterName, testTimeouts[testsUtils.ClusterIsReady], env) }) + By("connect to the imported cluster", func() { + forwardTarget, connTarget, err = testsUtils.ForwardPSQLConnection( + env, + namespace, + targetClusterName, + testsUtils.PostgresDBName, + apiv1.SuperUserSecretSuffix, + ) + Expect(err).ToNot(HaveOccurred()) + }) + By("verifying that the specified source databases were imported", func() { - targetClusterHost, err = testsUtils.GetHostName(namespace, targetClusterName, env) + stmt, err := connTarget.Prepare("SELECT datname FROM pg_database WHERE datname IN ($1)") Expect(err).ToNot(HaveOccurred()) - targetClusterSuperUser, targetClusterPass, err = testsUtils.GetCredentials( - targetClusterName, namespace, apiv1.SuperUserSecretSuffix, env) + rows, err := stmt.QueryContext(env.Ctx, pq.Array(sourceDatabases)) Expect(err).ToNot(HaveOccurred()) - for _, database := range sourceDatabases { - databaseEntryQuery := fmt.Sprintf("SELECT datname FROM pg_database where datname='%v'", database) - stdOut, _, err := testsUtils.RunQueryFromPod( - psqlClientPod, - targetClusterHost, - testsUtils.PostgresDBName, - targetClusterSuperUser, - targetClusterPass, - databaseEntryQuery, - env, - ) + var datName string + for rows.Next() { + err = rows.Scan(&datName) Expect(err).ToNot(HaveOccurred()) - Expect(strings.Contains(stdOut, database)).Should(BeTrue()) + Expect(sourceDatabases).Should(ContainElement(datName)) } }) By(fmt.Sprintf("verifying that the source superuser '%s' became a normal user in target", databaseSuperUser), func() { - getSuperUserQuery := "select * from pg_user where usesuper" - stdOut, _, err := testsUtils.RunQueryFromPod( - psqlClientPod, - targetClusterHost, - testsUtils.PostgresDBName, - targetClusterSuperUser, - targetClusterPass, - getSuperUserQuery, - env, - ) + row := connTarget.QueryRow(fmt.Sprintf("SELECT usesuper FROM pg_user WHERE usename='%s'", databaseSuperUser)) + var superUser bool + err := row.Scan(&superUser) Expect(err).ToNot(HaveOccurred()) - Expect(strings.Contains(stdOut, databaseSuperUser)).Should(BeFalse()) + Expect(superUser).Should(BeFalse()) }) By("verifying the test data was imported from the source databases", func() { for _, database := range sourceDatabases { - selectQuery := fmt.Sprintf("select count(*) from %v", tableName) - stdOut, _, err := testsUtils.RunQueryFromPod( - psqlClientPod, - targetClusterHost, - database, - targetClusterSuperUser, - targetClusterPass, - selectQuery, - env, - ) + selectQuery := fmt.Sprintf("SELECT COUNT(*) FROM %s", tableName) + connTemp, err := forwardTarget.GetPooler().Connection(database) + // We need to set the max idle connection back to a higher number + // otherwise the conn.Exec() will close the connection + // and that will produce a RST packet from PostgreSQL that will kill the + // port-forward tunnel + // More about the RST packet here https://www.postgresql.org/message-id/165ba87e-fa48-4eae-b1f3-f9a831b4890b%40Spark + connTemp.SetMaxIdleConns(3) Expect(err).ToNot(HaveOccurred()) - Expect(strings.TrimSpace(stdOut)).Should(BeEquivalentTo("2")) + row := connTemp.QueryRow(selectQuery) + var count int + err = row.Scan(&count) + Expect(err).ToNot(HaveOccurred()) + Expect(count).To(BeEquivalentTo(2)) } }) + + By("close connection to imported and the source cluster", func() { + forwardTarget.Close() + }) }) }) diff --git a/tests/e2e/cluster_setup_test.go b/tests/e2e/cluster_setup_test.go index e3f6eabfaf..3515d9c3d0 100644 --- a/tests/e2e/cluster_setup_test.go +++ b/tests/e2e/cluster_setup_test.go @@ -39,7 +39,9 @@ var _ = Describe("Cluster setup", Label(tests.LabelSmoke, tests.LabelBasic), fun clusterName = "postgresql-storage-class" level = tests.Highest ) + var namespace string + BeforeEach(func() { if testLevelEnv.Depth < int(level) { Skip("Test depth is lower than the amount requested for this test") @@ -73,23 +75,24 @@ var _ = Describe("Cluster setup", Label(tests.LabelSmoke, tests.LabelBasic), fun err := env.Client.Get(env.Ctx, namespacedName, pod) Expect(err).ToNot(HaveOccurred()) - // Put something in the database. We'll check later if it still exists - appUser, appUserPass, err := testsUtils.GetCredentials( - clusterName, namespace, apiv1.ApplicationUserSecretSuffix, env) - Expect(err).NotTo(HaveOccurred()) - host, err := testsUtils.GetHostName(namespace, clusterName, env) - Expect(err).NotTo(HaveOccurred()) - query := "CREATE TABLE IF NOT EXISTS test (id bigserial PRIMARY KEY, t text);" - _, _, err = testsUtils.RunQueryFromPod( - psqlClientPod, - host, - testsUtils.AppDBName, - appUser, - appUserPass, - query, + forward, conn, err := testsUtils.ForwardPSQLConnection( env, + namespace, + clusterName, + testsUtils.AppDBName, + apiv1.ApplicationUserSecretSuffix, ) - Expect(err).ToNot(HaveOccurred()) + Expect(err).NotTo(HaveOccurred()) + + query := "CREATE TABLE IF NOT EXISTS test (id bigserial PRIMARY KEY, t text);" + _, err = conn.Exec(query) + Expect(err).NotTo(HaveOccurred()) + + // Here we need to close the connection and close the forward, if we don't do both steps + // the PostgreSQL connection will be there and PostgreSQL will not restart in time because + // of the connection that wasn't close and stays idle + _ = conn.Close() + forward.Close() // We kill the pid 1 process. // The pod should be restarted and the count of the restarts @@ -118,18 +121,22 @@ var _ = Describe("Cluster setup", Label(tests.LabelSmoke, tests.LabelBasic), fun return int32(-1), nil }, timeout).Should(BeEquivalentTo(restart + 1)) - Eventually(func() (bool, error) { - query = "SELECT * FROM test" - _, _, err = env.ExecCommandWithPsqlClient( - namespace, - clusterName, - psqlClientPod, - apiv1.ApplicationUserSecretSuffix, - testsUtils.AppDBName, - query, - ) - return err == nil, err - }, timeout).Should(BeTrue()) + AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReady], env) + + forward, conn, err = testsUtils.ForwardPSQLConnection( + env, + namespace, + clusterName, + testsUtils.AppDBName, + apiv1.ApplicationUserSecretSuffix, + ) + defer func() { + forward.Close() + }() + Expect(err).NotTo(HaveOccurred()) + + _, err = conn.Exec("SELECT * FROM test") + Expect(err).NotTo(HaveOccurred()) }) }) diff --git a/tests/e2e/configuration_update_test.go b/tests/e2e/configuration_update_test.go index 28e921a492..9c0346ea49 100644 --- a/tests/e2e/configuration_update_test.go +++ b/tests/e2e/configuration_update_test.go @@ -483,38 +483,38 @@ var _ = Describe("Configuration update with primaryUpdateMethod", Label(tests.La oldPrimaryPodName = primaryPodInfo.GetName() - appUser, appUserPass, err := utils.GetCredentials(clusterName, namespace, apiv1.ApplicationUserSecretSuffix, env) + forward, conn, err := utils.ForwardPSQLConnection( + env, + namespace, + clusterName, + utils.AppDBName, + apiv1.ApplicationUserSecretSuffix, + ) Expect(err).ToNot(HaveOccurred()) - host, err := utils.GetHostName(namespace, clusterName, env) + defer func() { + // Here we need to close the connection and close the forward, if we don't do both steps + // the PostgreSQL connection will be there and PostgreSQL will not restart in time because + // of the connection that wasn't close and stays idle + _ = conn.Close() + forward.Close() + }() + + query := "SELECT TO_CHAR(pg_postmaster_start_time(), 'YYYY-MM-DD HH24:MI:SS');" + var startTime string + row := conn.QueryRow(query) + err = row.Scan(&startTime) Expect(err).ToNot(HaveOccurred()) - query := "select to_char(pg_postmaster_start_time(), 'YYYY-MM-DD HH24:MI:SS');" - stdout, _, cmdErr := utils.RunQueryFromPod( - psqlClientPod, - host, - utils.AppDBName, - appUser, - appUserPass, - query, - env) - Expect(cmdErr).ToNot(HaveOccurred()) - primaryStartTime, err = cnpgTypes.ParseTargetTime(nil, strings.Trim(stdout, "\n")) + primaryStartTime, err = cnpgTypes.ParseTargetTime(nil, startTime) Expect(err).NotTo(HaveOccurred()) - query = "show max_connections" - stdout, _, cmdErr = utils.RunQueryFromPod( - psqlClientPod, - host, - utils.AppDBName, - appUser, - appUserPass, - query, - env) - Expect(cmdErr).ToNot(HaveOccurred()) - v, err := strconv.Atoi(strings.Trim(stdout, "\n")) - Expect(err).NotTo(HaveOccurred()) + query = "show max_connections" + row = conn.QueryRow(query) + var maxConnections int + err = row.Scan(&maxConnections) + Expect(err).ToNot(HaveOccurred()) - newMaxConnectionsValue = v + 10 + newMaxConnectionsValue = maxConnections + 10 }) By(fmt.Sprintf("updating max_connection value to %v", newMaxConnectionsValue), func() { diff --git a/tests/e2e/connection_test.go b/tests/e2e/connection_test.go index 34c2df06e7..67227ca118 100644 --- a/tests/e2e/connection_test.go +++ b/tests/e2e/connection_test.go @@ -53,18 +53,20 @@ var _ = Describe("Connection via services", Label(tests.LabelServiceConnectivity superuserPassword string, env *utils.TestingEnvironment, ) { + primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) // We test -rw, -ro and -r services with the app user and the superuser rwService := fmt.Sprintf("%v-rw.%v.svc", clusterName, namespace) rService := fmt.Sprintf("%v-r.%v.svc", clusterName, namespace) roService := fmt.Sprintf("%v-ro.%v.svc", clusterName, namespace) services := []string{rwService, roService, rService} for _, service := range services { - AssertConnection(service, "postgres", appDBName, superuserPassword, *psqlClientPod, 10, env) - AssertConnection(service, appDBUser, appDBName, appPassword, *psqlClientPod, 10, env) + AssertConnection(service, "postgres", appDBName, superuserPassword, primaryPod, 10, env) + AssertConnection(service, appDBUser, appDBName, appPassword, primaryPod, 10, env) } - AssertWritesToReplicaFails(psqlClientPod, roService, appDBName, appDBUser, appPassword) - AssertWritesToPrimarySucceeds(psqlClientPod, rwService, appDBName, appDBUser, appPassword) + AssertWritesToReplicaFails(primaryPod, roService, appDBName, appDBUser, appPassword) + AssertWritesToPrimarySucceeds(primaryPod, rwService, appDBName, appDBUser, appPassword) } Context("Auto-generated passwords", func() { diff --git a/tests/e2e/declarative_hibernation_test.go b/tests/e2e/declarative_hibernation_test.go index 0f1b164af7..3b88a044a9 100644 --- a/tests/e2e/declarative_hibernation_test.go +++ b/tests/e2e/declarative_hibernation_test.go @@ -55,7 +55,7 @@ var _ = Describe("Cluster declarative hibernation", func() { By("creating a new cluster", func() { AssertCreateCluster(namespace, clusterName, sampleFileCluster, env) // Write a table and some data on the "app" database - AssertCreateTestData(namespace, clusterName, tableName, psqlClientPod) + AssertCreateTestData(env, namespace, clusterName, tableName) }) By("hibernating the new cluster", func() { @@ -114,7 +114,7 @@ var _ = Describe("Cluster declarative hibernation", func() { }) By("verifying the data has been preserved", func() { - AssertDataExpectedCount(namespace, clusterName, tableName, 2, psqlClientPod) + AssertDataExpectedCount(env, namespace, clusterName, tableName, 2) }) }) }) diff --git a/tests/e2e/drain_node_test.go b/tests/e2e/drain_node_test.go index dae77e85c1..19e183db2d 100644 --- a/tests/e2e/drain_node_test.go +++ b/tests/e2e/drain_node_test.go @@ -118,9 +118,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La // Load test data oldPrimary := clusterName + "-1" - primary, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - AssertCreateTestData(namespace, clusterName, "test", primary) + AssertCreateTestData(env, namespace, clusterName, "test") // We create a mapping between the pod names and the UIDs of // their volumes. We do not expect the UIDs to change. @@ -181,7 +179,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La }) // Expect the (previously created) test data to be available - primary, err = env.GetClusterPrimary(namespace, clusterName) + primary, err := env.GetClusterPrimary(namespace, clusterName) Expect(err).ToNot(HaveOccurred()) AssertDataExpectedCountWithDatabaseName(namespace, primary.Name, "app", "test", 2) AssertClusterStandbysAreStreaming(namespace, clusterName, 120) @@ -232,9 +230,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La // Load test data oldPrimary := clusterName + "-1" - primary, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - AssertCreateTestData(namespace, clusterName, "test", primary) + AssertCreateTestData(env, namespace, clusterName, "test") // We create a mapping between the pod names and the UIDs of // their volumes. We do not expect the UIDs to change. @@ -299,7 +295,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La }) // Expect the (previously created) test data to be available - primary, err = env.GetClusterPrimary(namespace, clusterName) + primary, err := env.GetClusterPrimary(namespace, clusterName) Expect(err).ToNot(HaveOccurred()) AssertDataExpectedCountWithDatabaseName(namespace, primary.Name, "app", "test", 2) AssertClusterStandbysAreStreaming(namespace, clusterName, 120) @@ -364,9 +360,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La }) // Load test data - primary, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - AssertCreateTestData(namespace, clusterName, "test", primary) + AssertCreateTestData(env, namespace, clusterName, "test") // We uncordon a cordoned node. New pods can go there. By("uncordon node for pod failover", func() { @@ -403,7 +397,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La }) // Expect the (previously created) test data to be available - primary, err = env.GetClusterPrimary(namespace, clusterName) + primary, err := env.GetClusterPrimary(namespace, clusterName) Expect(err).ToNot(HaveOccurred()) AssertDataExpectedCountWithDatabaseName(namespace, primary.Name, "app", "test", 2) AssertClusterStandbysAreStreaming(namespace, clusterName, 120) @@ -441,7 +435,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La // Load test data primary, err := env.GetClusterPrimary(namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - AssertCreateTestData(namespace, clusterName, "test", primary) + AssertCreateTestData(env, namespace, clusterName, "test") // Drain the node containing the primary pod and store the list of running pods _ = nodes.DrainPrimaryNode(namespace, clusterName, diff --git a/tests/e2e/hibernation_test.go b/tests/e2e/hibernation_test.go index 29c1ab5b2f..58f68418ee 100644 --- a/tests/e2e/hibernation_test.go +++ b/tests/e2e/hibernation_test.go @@ -225,7 +225,7 @@ var _ = Describe("Cluster Hibernation with plugin", Label(tests.LabelPlugin), fu var beforeHibernationPgDataPvcUID types.UID // Write a table and some data on the "app" database - AssertCreateTestData(namespace, clusterName, tableName, psqlClientPod) + AssertCreateTestData(env, namespace, clusterName, tableName) clusterManifest, currentPrimary := getPrimaryAndClusterManifest(namespace, clusterName) By("collecting pgWal pvc details of current primary", func() { @@ -289,7 +289,7 @@ var _ = Describe("Cluster Hibernation with plugin", Label(tests.LabelPlugin), fu AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReady], env) // Test data should be present after hibernation off - AssertDataExpectedCount(namespace, clusterName, tableName, 2, psqlClientPod) + AssertDataExpectedCount(env, namespace, clusterName, tableName, 2) } When("cluster setup with PG-WAL volume", func() { @@ -316,7 +316,7 @@ var _ = Describe("Cluster Hibernation with plugin", Label(tests.LabelPlugin), fu Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFileClusterWithOutPGWalVolume, env) // Write a table and some data on the "app" database - AssertCreateTestData(namespace, clusterName, tableName, psqlClientPod) + AssertCreateTestData(env, namespace, clusterName, tableName) clusterManifest, currentPrimary := getPrimaryAndClusterManifest(namespace, clusterName) @@ -363,7 +363,7 @@ var _ = Describe("Cluster Hibernation with plugin", Label(tests.LabelPlugin), fu AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReady], env) // Test data should be present after hibernation off - AssertDataExpectedCount(namespace, clusterName, tableName, 2, psqlClientPod) + AssertDataExpectedCount(env, namespace, clusterName, tableName, 2) }) }) When("cluster hibernation after switchover", func() { diff --git a/tests/e2e/managed_roles_test.go b/tests/e2e/managed_roles_test.go index c8ed83f364..0f77768921 100644 --- a/tests/e2e/managed_roles_test.go +++ b/tests/e2e/managed_roles_test.go @@ -171,11 +171,14 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic }) By("Verifying connectivity of new managed role", func() { + primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + rwService := fmt.Sprintf("%v-rw.%v.svc", clusterName, namespace) // assert connectable use username and password defined in secrets - AssertConnection(rwService, username, "postgres", password, *psqlClientPod, 30, env) + AssertConnection(rwService, username, "postgres", password, primaryPod, 30, env) - AssertConnection(rwService, userWithHashedPassword, "postgres", userWithHashedPassword, *psqlClientPod, 30, env) + AssertConnection(rwService, userWithHashedPassword, "postgres", userWithHashedPassword, primaryPod, 30, env) }) By("ensuring the app role has been granted createdb in the managed stanza", func() { @@ -210,10 +213,13 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic ) Expect(err).NotTo(HaveOccurred()) + primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + pass := string(appUserSecret.Data["password"]) rwService := fmt.Sprintf("%v-rw.%v.svc", clusterName, namespace) // assert connectable use username and password defined in secrets - AssertConnection(rwService, appUsername, "postgres", pass, *psqlClientPod, 30, env) + AssertConnection(rwService, appUsername, "postgres", pass, primaryPod, 30, env) }) By("Verify show unrealizable role configurations in the status", func() { @@ -233,6 +239,8 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic }) It("can update role attributes in the spec and they are applied in the database", func() { + var primaryPod *corev1.Pod + var err error expectedLogin := false expectedCreateDB := false expectedCreateRole := true @@ -253,7 +261,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic }) By("Verify the role has been updated in the database", func() { - primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) + primaryPod, err = env.GetClusterPrimary(namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Eventually(func() string { @@ -264,7 +272,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic stdout, _, err := env.ExecQueryInInstancePod( utils.PodLocator{ Namespace: namespace, - PodName: primaryPodInfo.Name, + PodName: primaryPod.Name, }, utils.DatabaseName("postgres"), query) @@ -279,7 +287,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic dsn := fmt.Sprintf("host=%v user=%v dbname=%v password=%v sslmode=require", rwService, username, "postgres", password) timeout := time.Second * 10 - _, _, err := env.ExecCommand(env.Ctx, *psqlClientPod, specs.PostgresContainerName, &timeout, + _, _, err := env.ExecCommand(env.Ctx, *primaryPod, specs.PostgresContainerName, &timeout, "psql", dsn, "-tAc", "SELECT 1") Expect(err).To(HaveOccurred()) }) @@ -296,7 +304,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic By("the connectivity should be success again", func() { rwService := fmt.Sprintf("%v-rw.%v.svc", clusterName, namespace) // assert connectable use username and password defined in secrets - AssertConnection(rwService, username, "postgres", password, *psqlClientPod, 30, env) + AssertConnection(rwService, username, "postgres", password, primaryPod, 30, env) }) }) @@ -515,7 +523,10 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic }) It("Can update role password in secrets and db and verify the connectivity", func() { + var primaryPod *corev1.Pod + var err error newPassword := "ThisIsNew" + By("update password from secrets", func() { var secret corev1.Secret err := env.Client.Get(env.Ctx, *secretNameSpacedName, &secret) @@ -528,21 +539,22 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic }) By("Verify connectivity using changed password in secret", func() { + primaryPod, err = env.GetClusterPrimary(namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + rwService := fmt.Sprintf("%v-rw.%v.svc", clusterName, namespace) // assert connectable use username and password defined in secrets - AssertConnection(rwService, username, "postgres", newPassword, *psqlClientPod, 30, env) + AssertConnection(rwService, username, "postgres", newPassword, primaryPod, 30, env) }) By("Update password in database", func() { - primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) query := fmt.Sprintf("ALTER ROLE %s WITH PASSWORD %s", username, pq.QuoteLiteral(newPassword)) _, _, err = env.ExecQueryInInstancePod( utils.PodLocator{ Namespace: namespace, - PodName: primaryPodInfo.Name, + PodName: primaryPod.Name, }, utils.DatabaseName("postgres"), query) @@ -551,7 +563,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic By("Verify password in secrets could still valid", func() { rwService := fmt.Sprintf("%v-rw.%v.svc", clusterName, namespace) - AssertConnection(rwService, username, "postgres", newPassword, *psqlClientPod, 60, env) + AssertConnection(rwService, username, "postgres", newPassword, primaryPod, 60, env) }) }) diff --git a/tests/e2e/metrics_test.go b/tests/e2e/metrics_test.go index 34df963db0..9907f59266 100644 --- a/tests/e2e/metrics_test.go +++ b/tests/e2e/metrics_test.go @@ -149,9 +149,9 @@ var _ = Describe("Metrics", Label(tests.LabelObservability), func() { // Create the cluster AssertCreateCluster(namespace, metricsClusterName, clusterMetricsDBFile, env) - AssertCreationOfTestDataForTargetDB(namespace, metricsClusterName, targetDBOne, testTableName, psqlClientPod) - AssertCreationOfTestDataForTargetDB(namespace, metricsClusterName, targetDBTwo, testTableName, psqlClientPod) - AssertCreationOfTestDataForTargetDB(namespace, metricsClusterName, targetDBSecret, testTableName, psqlClientPod) + AssertCreationOfTestDataForTargetDB(env, namespace, metricsClusterName, targetDBOne, testTableName) + AssertCreationOfTestDataForTargetDB(env, namespace, metricsClusterName, targetDBTwo, testTableName) + AssertCreationOfTestDataForTargetDB(env, namespace, metricsClusterName, targetDBSecret, testTableName) cluster, err := env.GetCluster(namespace, metricsClusterName) Expect(err).ToNot(HaveOccurred()) @@ -287,22 +287,23 @@ var _ = Describe("Metrics", Label(tests.LabelObservability), func() { srcClusterDatabaseName, replicaClusterSampleFile, testTableName, - psqlClientPod) + ) By(fmt.Sprintf("grant select permission for %v table to pg_monitor", testTableName), func() { - cmd := fmt.Sprintf("GRANT SELECT ON %v TO pg_monitor", testTableName) - appUser, appUserPass, err := utils.GetCredentials(srcClusterName, namespace, apiv1.ApplicationUserSecretSuffix, env) - Expect(err).ToNot(HaveOccurred()) - host, err := utils.GetHostName(namespace, srcClusterName, env) - Expect(err).ToNot(HaveOccurred()) - _, _, err = utils.RunQueryFromPod( - psqlClientPod, - host, + forward, conn, err := utils.ForwardPSQLConnection( + env, + namespace, + srcClusterName, srcClusterDatabaseName, - appUser, - appUserPass, - cmd, - env) + apiv1.ApplicationUserSecretSuffix, + ) + defer func() { + forward.Close() + }() + Expect(err).ToNot(HaveOccurred()) + + cmd := fmt.Sprintf("GRANT SELECT ON %v TO pg_monitor", testTableName) + _, err = conn.Exec(cmd) Expect(err).ToNot(HaveOccurred()) }) replicaCluster, err := env.GetCluster(namespace, replicaClusterName) diff --git a/tests/e2e/operator_unavailable_test.go b/tests/e2e/operator_unavailable_test.go index fb09f0aa8a..d2088c7a68 100644 --- a/tests/e2e/operator_unavailable_test.go +++ b/tests/e2e/operator_unavailable_test.go @@ -59,9 +59,7 @@ var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, te // Load test data currentPrimary := clusterName + "-1" - primary, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - AssertCreateTestData(namespace, clusterName, "test", primary) + AssertCreateTestData(env, namespace, clusterName, "test") By("scaling down operator replicas to zero", func() { err := env.ScaleOperatorDeployment(0) @@ -123,7 +121,7 @@ var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, te }, timeout).Should(BeTrue()) }) // Expect the test data previously created to be available - primary, err = env.GetClusterPrimary(namespace, clusterName) + primary, err := env.GetClusterPrimary(namespace, clusterName) Expect(err).ToNot(HaveOccurred()) AssertDataExpectedCountWithDatabaseName(namespace, primary.Name, "app", "test", 2) }) @@ -142,9 +140,7 @@ var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, te // Load test data currentPrimary := clusterName + "-1" - primary, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - AssertCreateTestData(namespace, clusterName, "test", primary) + AssertCreateTestData(env, namespace, clusterName, "test") operatorNamespace, err := env.GetOperatorNamespaceName() Expect(err).ToNot(HaveOccurred()) @@ -216,7 +212,7 @@ var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, te }, timeout).Should(BeTrue()) }) // Expect the test data previously created to be available - primary, err = env.GetClusterPrimary(namespace, clusterName) + primary, err := env.GetClusterPrimary(namespace, clusterName) Expect(err).ToNot(HaveOccurred()) AssertDataExpectedCountWithDatabaseName(namespace, primary.Name, "app", "test", 2) }) diff --git a/tests/e2e/pg_basebackup_test.go b/tests/e2e/pg_basebackup_test.go index a364578c9f..59cb5542ad 100644 --- a/tests/e2e/pg_basebackup_test.go +++ b/tests/e2e/pg_basebackup_test.go @@ -52,7 +52,7 @@ var _ = Describe("Bootstrap with pg_basebackup", Label(tests.LabelRecovery), fun srcClusterName, err = env.GetResourceNameFromYAML(srcCluster) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, srcClusterName, srcCluster, env) - AssertCreateTestData(namespace, srcClusterName, tableName, psqlClientPod) + AssertCreateTestData(env, namespace, srcClusterName, tableName) }) It("using basic authentication", func() { @@ -65,9 +65,12 @@ var _ = Describe("Bootstrap with pg_basebackup", Label(tests.LabelRecovery), fun secretName := dstClusterName + apiv1.ApplicationUserSecretSuffix + primaryPod, err := env.GetClusterPrimary(namespace, dstClusterName) + Expect(err).ToNot(HaveOccurred()) + By("checking the dst cluster with auto generated app password connectable", func() { AssertApplicationDatabaseConnection(namespace, dstClusterName, - appUser, utils.AppDBName, "", secretName, psqlClientPod) + appUser, utils.AppDBName, "", secretName, primaryPod) }) By("update user application password for dst cluster and verify connectivity", func() { @@ -80,19 +83,30 @@ var _ = Describe("Bootstrap with pg_basebackup", Label(tests.LabelRecovery), fun utils.AppDBName, newPassword, secretName, - psqlClientPod) + primaryPod) }) By("checking data have been copied correctly", func() { - AssertDataExpectedCount(namespace, dstClusterName, tableName, 2, psqlClientPod) + AssertDataExpectedCount(env, namespace, dstClusterName, tableName, 2) }) By("writing some new data to the dst cluster", func() { - insertRecordIntoTable(namespace, dstClusterName, tableName, 3, psqlClientPod) + forward, conn, err := utils.ForwardPSQLConnection( + env, + namespace, + dstClusterName, + utils.AppDBName, + apiv1.ApplicationUserSecretSuffix, + ) + defer func() { + forward.Close() + }() + Expect(err).ToNot(HaveOccurred()) + insertRecordIntoTable(tableName, 3, conn) }) By("checking the src cluster was not modified", func() { - AssertDataExpectedCount(namespace, srcClusterName, tableName, 2, psqlClientPod) + AssertDataExpectedCount(env, namespace, srcClusterName, tableName, 2) }) }) @@ -105,15 +119,26 @@ var _ = Describe("Bootstrap with pg_basebackup", Label(tests.LabelRecovery), fun AssertClusterIsReady(namespace, dstClusterName, testTimeouts[utils.ClusterIsReadySlow], env) By("checking data have been copied correctly", func() { - AssertDataExpectedCount(namespace, dstClusterName, tableName, 2, psqlClientPod) + AssertDataExpectedCount(env, namespace, dstClusterName, tableName, 2) }) By("writing some new data to the dst cluster", func() { - insertRecordIntoTable(namespace, dstClusterName, tableName, 3, psqlClientPod) + forward, conn, err := utils.ForwardPSQLConnection( + env, + namespace, + dstClusterName, + utils.AppDBName, + apiv1.ApplicationUserSecretSuffix, + ) + defer func() { + forward.Close() + }() + Expect(err).ToNot(HaveOccurred()) + insertRecordIntoTable(tableName, 3, conn) }) By("checking the src cluster was not modified", func() { - AssertDataExpectedCount(namespace, srcClusterName, tableName, 2, psqlClientPod) + AssertDataExpectedCount(env, namespace, srcClusterName, tableName, 2) }) }) }) diff --git a/tests/e2e/pg_data_corruption_test.go b/tests/e2e/pg_data_corruption_test.go index 9d2ddc9e8c..996e886f12 100644 --- a/tests/e2e/pg_data_corruption_test.go +++ b/tests/e2e/pg_data_corruption_test.go @@ -58,7 +58,7 @@ var _ = Describe("PGDATA Corruption", Label(tests.LabelRecovery), Ordered, func( clusterName, err := env.GetResourceNameFromYAML(sampleFile) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) - AssertCreateTestData(namespace, clusterName, tableName, psqlClientPod) + AssertCreateTestData(env, namespace, clusterName, tableName) By("gathering current primary pod and pvc", func() { oldPrimaryPod, err := env.GetClusterPrimary(namespace, clusterName) @@ -187,7 +187,7 @@ var _ = Describe("PGDATA Corruption", Label(tests.LabelRecovery), Ordered, func( }, 300).Should(BeTrue()) }) AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReadyQuick], env) - AssertDataExpectedCount(namespace, clusterName, tableName, 2, psqlClientPod) + AssertDataExpectedCount(env, namespace, clusterName, tableName, 2) AssertClusterStandbysAreStreaming(namespace, clusterName, 120) } diff --git a/tests/e2e/pgbouncer_test.go b/tests/e2e/pgbouncer_test.go index 7bb591540f..3e7f9542f1 100644 --- a/tests/e2e/pgbouncer_test.go +++ b/tests/e2e/pgbouncer_test.go @@ -50,7 +50,9 @@ var _ = Describe("PGBouncer Connections", Label(tests.LabelServiceConnectivity), AssertCreateCluster(namespace, clusterName, sampleFile, env) }) JustAfterEach(func() { - DeleteTableUsingPgBouncerService(namespace, clusterName, poolerBasicAuthRWSampleFile, env, psqlClientPod) + primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + DeleteTableUsingPgBouncerService(namespace, clusterName, poolerBasicAuthRWSampleFile, env, primaryPod) }) It("can connect to Postgres via pgbouncer service using basic authentication", func() { diff --git a/tests/e2e/replica_mode_cluster_test.go b/tests/e2e/replica_mode_cluster_test.go index 5b669b8347..977b0a7daf 100644 --- a/tests/e2e/replica_mode_cluster_test.go +++ b/tests/e2e/replica_mode_cluster_test.go @@ -85,7 +85,7 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { sourceDBName, replicaClusterSampleTLS, testTableName, - psqlClientPod) + ) replicaName, err := env.GetResourceNameFromYAML(replicaClusterSampleTLS) Expect(err).ToNot(HaveOccurred()) @@ -120,7 +120,7 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { sourceDBName, replicaClusterSampleBasicAuth, testTableName, - psqlClientPod) + ) AssertDetachReplicaModeCluster( namespace, @@ -163,7 +163,7 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { sourceDBName, clusterTwoFile, testTableName, - psqlClientPod) + ) // turn the src cluster into a replica By("setting replica mode on the src cluster", func() { @@ -211,8 +211,7 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { }) By("creating a new data in the new source cluster", func() { - AssertCreateTestDataWithDatabaseName(namespace, clusterTwoName, sourceDBName, - "new_test_table", clusterTwoPrimary) + AssertCreateTestDataWithDatabaseName(env, namespace, clusterTwoName, sourceDBName, "new_test_table") }) By("checking that the data is present in the old src cluster", func() { @@ -256,7 +255,7 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { sourceDBName, replicaClusterSample, testTableName, - psqlClientPod) + ) // Get primary from replica cluster primaryReplicaCluster, err := env.GetClusterPrimary(replicaNamespace, replicaClusterName) @@ -341,7 +340,7 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { sourceDBName, replicaClusterSample, testTableName, - psqlClientPod) + ) }) }) @@ -412,7 +411,7 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { sourceDBName, replicaClusterSample, testTableName, - psqlClientPod) + ) }) }) }) diff --git a/tests/e2e/suite_test.go b/tests/e2e/suite_test.go index b00cad1623..c70c87180c 100644 --- a/tests/e2e/suite_test.go +++ b/tests/e2e/suite_test.go @@ -25,6 +25,7 @@ import ( "github.com/cloudnative-pg/machinery/pkg/fileutils" "github.com/onsi/ginkgo/v2/types" "github.com/thoas/go-funk" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -42,17 +43,15 @@ import ( ) const ( - fixturesDir = "./fixtures" - RetryTimeout = utils.RetryTimeout - PollingTime = utils.PollingTime - psqlClientNamespace = "psql-client-namespace" + fixturesDir = "./fixtures" + RetryTimeout = utils.RetryTimeout + PollingTime = utils.PollingTime ) var ( env *utils.TestingEnvironment testLevelEnv *tests.TestEnvLevel testCloudVendorEnv *utils.TestEnvVendor - psqlClientPod *corev1.Pod expectedOperatorPodName string operatorPodWasRenamed bool operatorWasRestarted bool @@ -88,12 +87,8 @@ var _ = SynchronizedBeforeSuite(func() []byte { <-sternOperatorDoneChan }) - psqlPod, err := utils.GetPsqlClient(psqlClientNamespace, env) - Expect(err).ShouldNot(HaveOccurred()) - DeferCleanup(func() { - err := env.DeleteNamespaceAndWait(psqlClientNamespace, 300) - Expect(err).ToNot(HaveOccurred()) - }) + _ = corev1.AddToScheme(env.Scheme) + _ = appsv1.AddToScheme(env.Scheme) // Set up a global MinIO service on his own namespace err = env.CreateNamespace(minioEnv.Namespace) @@ -109,7 +104,6 @@ var _ = SynchronizedBeforeSuite(func() []byte { caSecret := minioEnv.CaPair.GenerateCASecret(minioEnv.Namespace, minioEnv.CaSecretName) minioEnv.CaSecretObj = *caSecret objs := map[string]corev1.Pod{ - "psql": *psqlPod, "minio": *minioClient, } @@ -147,7 +141,6 @@ var _ = SynchronizedBeforeSuite(func() []byte { panic(err) } - psqlClientPod = objs["psql"] minioEnv.Client = objs["minio"] }) diff --git a/tests/e2e/tablespaces_test.go b/tests/e2e/tablespaces_test.go index 899e49524a..4027611134 100644 --- a/tests/e2e/tablespaces_test.go +++ b/tests/e2e/tablespaces_test.go @@ -422,14 +422,14 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, TableName: table1, Tablespace: tablespace1, } - AssertCreateTestDataInTablespace(tl1, psqlClientPod) + AssertCreateTestDataInTablespace(env, tl1) tl2 := TableLocator{ Namespace: namespace, ClusterName: clusterName, TableName: table2, Tablespace: tablespace2, } - AssertCreateTestDataInTablespace(tl2, psqlClientPod) + AssertCreateTestDataInTablespace(env, tl2) primaryPod, err := env.GetClusterPrimary(namespace, clusterName) Expect(err).ToNot(HaveOccurred()) @@ -509,40 +509,49 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, }) By("verifying the correct data exists in the restored cluster", func() { - restoredPrimary, err := env.GetClusterPrimary(namespace, clusterToRestoreName) - Expect(err).ToNot(HaveOccurred()) - - AssertDataExpectedCount(namespace, clusterToRestoreName, table1, 2, restoredPrimary) - AssertDataExpectedCount(namespace, clusterToRestoreName, table2, 2, restoredPrimary) + AssertDataExpectedCount(env, namespace, clusterToRestoreName, table1, 2) + AssertDataExpectedCount(env, namespace, clusterToRestoreName, table2, 2) }) }) It(fmt.Sprintf("can create the cluster by recovery from volume snapshot backup with pitr %v", backupName), func() { By("inserting test data and creating WALs on the cluster to be snapshotted", func() { + forward, conn, err := testUtils.ForwardPSQLConnection( + env, + namespace, + clusterName, + testUtils.AppDBName, + apiv1.ApplicationUserSecretSuffix, + ) + defer func() { + forward.Close() + }() + Expect(err).ToNot(HaveOccurred()) + // Insert 2 more rows which we expect not to be present at the end of the recovery - insertRecordIntoTable(namespace, clusterName, table1, 3, psqlClientPod) - insertRecordIntoTable(namespace, clusterName, table1, 4, psqlClientPod) + insertRecordIntoTable(table1, 3, conn) + insertRecordIntoTable(table1, 4, conn) - insertRecordIntoTable(namespace, clusterName, table2, 3, psqlClientPod) - insertRecordIntoTable(namespace, clusterName, table2, 4, psqlClientPod) + insertRecordIntoTable(table2, 3, conn) + insertRecordIntoTable(table2, 4, conn) // Because GetCurrentTimestamp() rounds down to the second and is executed // right after the creation of the test data, we wait for 1s to avoid not // including the newly created data within the recovery_target_time time.Sleep(1 * time.Second) // Get the recovery_target_time and pass it to the template engine - recoveryTargetTime, err := testUtils.GetCurrentTimestamp(namespace, clusterName, env, psqlClientPod) + recoveryTargetTime, err := testUtils.GetCurrentTimestamp(namespace, clusterName, env) Expect(err).ToNot(HaveOccurred()) err = os.Setenv(recoveryTargetTimeEnv, recoveryTargetTime) Expect(err).ToNot(HaveOccurred()) // Insert 2 more rows which we expect not to be present at the end of the recovery - insertRecordIntoTable(namespace, clusterName, table1, 5, psqlClientPod) - insertRecordIntoTable(namespace, clusterName, table1, 6, psqlClientPod) + insertRecordIntoTable(table1, 5, conn) + insertRecordIntoTable(table1, 6, conn) - insertRecordIntoTable(namespace, clusterName, table2, 5, psqlClientPod) - insertRecordIntoTable(namespace, clusterName, table2, 6, psqlClientPod) + insertRecordIntoTable(table2, 5, conn) + insertRecordIntoTable(table2, 6, conn) // Close and archive the current WAL file AssertArchiveWalOnMinio(namespace, clusterName, clusterName) @@ -580,10 +589,8 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, }) By("verifying the correct data exists in the restored cluster", func() { - recoveryPrimary, err := env.GetClusterPrimary(namespace, clusterToPITRName) - Expect(err).ToNot(HaveOccurred()) - AssertDataExpectedCount(namespace, clusterToPITRName, table1, 4, recoveryPrimary) - AssertDataExpectedCount(namespace, clusterToPITRName, table2, 4, recoveryPrimary) + AssertDataExpectedCount(env, namespace, clusterToPITRName, table1, 4) + AssertDataExpectedCount(env, namespace, clusterToPITRName, table2, 4) }) }) }) diff --git a/tests/e2e/update_user_test.go b/tests/e2e/update_user_test.go index 7cca6c938e..5d8455be05 100644 --- a/tests/e2e/update_user_test.go +++ b/tests/e2e/update_user_test.go @@ -62,10 +62,14 @@ var _ = Describe("Update user and superuser password", Label(tests.LabelServiceC appSecretName := clusterName + apiv1.ApplicationUserSecretSuffix superUserSecretName := clusterName + apiv1.SuperUserSecretSuffix + primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + By("update user application password", func() { const newPassword = "eeh2Zahohx" //nolint:gosec + AssertUpdateSecret("password", newPassword, appSecretName, namespace, clusterName, 30, env) - AssertConnection(host, testsUtils.AppUser, testsUtils.AppDBName, newPassword, *psqlClientPod, 60, env) + AssertConnection(host, testsUtils.AppUser, testsUtils.AppDBName, newPassword, primaryPod, 60, env) }) By("fail updating user application password with wrong user in secret", func() { @@ -78,7 +82,7 @@ var _ = Describe("Update user and superuser password", Label(tests.LabelServiceC timeout := time.Second * 10 dsn := testsUtils.CreateDSN(host, newUser, testsUtils.AppDBName, newPassword, testsUtils.Require, 5432) - _, _, err := env.ExecCommand(env.Ctx, *psqlClientPod, + _, _, err := env.ExecCommand(env.Ctx, *primaryPod, specs.PostgresContainerName, &timeout, "psql", dsn, "-tAc", "SELECT 1") Expect(err).To(HaveOccurred()) @@ -109,7 +113,7 @@ var _ = Describe("Update user and superuser password", Label(tests.LabelServiceC const newPassword = "fi6uCae7" //nolint:gosec AssertUpdateSecret("password", newPassword, superUserSecretName, namespace, clusterName, 30, env) - AssertConnection(host, testsUtils.PostgresUser, testsUtils.PostgresDBName, newPassword, *psqlClientPod, 60, env) + AssertConnection(host, testsUtils.PostgresUser, testsUtils.PostgresDBName, newPassword, primaryPod, 60, env) }) }) }) @@ -138,6 +142,9 @@ var _ = Describe("Enable superuser password", Label(tests.LabelServiceConnectivi Name: secretName, } + primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + By("ensure superuser access is disabled by default", func() { Eventually(func(g Gomega) { err = env.Client.Get(env.Ctx, namespacedName, &secret) @@ -145,13 +152,11 @@ var _ = Describe("Enable superuser password", Label(tests.LabelServiceConnectivi g.Expect(apierrors.IsNotFound(err)).To(BeTrue()) }, 200).Should(Succeed()) - pod, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) timeout := time.Second * 10 // We should have the `postgres` user with a null password Eventually(func() string { - stdout, _, err := env.ExecCommand(env.Ctx, *pod, specs.PostgresContainerName, &timeout, + stdout, _, err := env.ExecCommand(env.Ctx, *primaryPod, specs.PostgresContainerName, &timeout, "psql", "-U", "postgres", "-tAc", "SELECT rolpassword IS NULL FROM pg_authid WHERE rolname='postgres'") if err != nil { @@ -181,7 +186,7 @@ var _ = Describe("Enable superuser password", Label(tests.LabelServiceConnectivi superUser, superUserPass, err := testsUtils.GetCredentials(clusterName, namespace, apiv1.SuperUserSecretSuffix, env) Expect(err).ToNot(HaveOccurred()) - AssertConnection(host, superUser, testsUtils.PostgresDBName, superUserPass, *psqlClientPod, 60, env) + AssertConnection(host, superUser, testsUtils.PostgresDBName, superUserPass, primaryPod, 60, env) }) By("disable superuser access", func() { diff --git a/tests/e2e/volume_snapshot_test.go b/tests/e2e/volume_snapshot_test.go index b65ff80fbe..55718ee38c 100644 --- a/tests/e2e/volume_snapshot_test.go +++ b/tests/e2e/volume_snapshot_test.go @@ -242,21 +242,32 @@ var _ = Describe("Verify Volume Snapshot", By("inserting test data and creating WALs on the cluster to be snapshotted", func() { // Create a "test" table with values 1,2 - AssertCreateTestData(namespace, clusterToSnapshotName, tableName, psqlClientPod) + AssertCreateTestData(env, namespace, clusterToSnapshotName, tableName) // Because GetCurrentTimestamp() rounds down to the second and is executed // right after the creation of the test data, we wait for 1s to avoid not // including the newly created data within the recovery_target_time time.Sleep(1 * time.Second) // Get the recovery_target_time and pass it to the template engine - recoveryTargetTime, err := testUtils.GetCurrentTimestamp(namespace, clusterToSnapshotName, env, psqlClientPod) + recoveryTargetTime, err := testUtils.GetCurrentTimestamp(namespace, clusterToSnapshotName, env) Expect(err).ToNot(HaveOccurred()) err = os.Setenv(recoveryTargetTimeEnv, recoveryTargetTime) Expect(err).ToNot(HaveOccurred()) + forward, conn, err := testUtils.ForwardPSQLConnection( + env, + namespace, + clusterToSnapshotName, + testUtils.AppDBName, + apiv1.ApplicationUserSecretSuffix, + ) + defer func() { + forward.Close() + }() + Expect(err).ToNot(HaveOccurred()) // Insert 2 more rows which we expect not to be present at the end of the recovery - insertRecordIntoTable(namespace, clusterToSnapshotName, tableName, 3, psqlClientPod) - insertRecordIntoTable(namespace, clusterToSnapshotName, tableName, 4, psqlClientPod) + insertRecordIntoTable(tableName, 3, conn) + insertRecordIntoTable(tableName, 4, conn) // Close and archive the current WAL file AssertArchiveWalOnMinio(namespace, clusterToSnapshotName, clusterToSnapshotName) @@ -271,9 +282,7 @@ var _ = Describe("Verify Volume Snapshot", }) By("verifying the correct data exists in the restored cluster", func() { - restoredPrimary, err := env.GetClusterPrimary(namespace, clusterToRestoreName) - Expect(err).ToNot(HaveOccurred()) - AssertDataExpectedCount(namespace, clusterToRestoreName, tableName, 2, restoredPrimary) + AssertDataExpectedCount(env, namespace, clusterToRestoreName, tableName, 2) }) }) }) @@ -351,12 +360,13 @@ var _ = Describe("Verify Volume Snapshot", By("creating the cluster on which to execute the backup", func() { AssertCreateCluster(namespace, clusterToBackupName, clusterToBackupFilePath, env) + AssertClusterIsReady(namespace, clusterToBackupName, testTimeouts[testUtils.ClusterIsReadySlow], env) }) }) It("can create a declarative cold backup and restoring using it", func() { By("inserting test data", func() { - AssertCreateTestData(namespace, clusterToBackupName, tableName, psqlClientPod) + AssertCreateTestData(env, namespace, clusterToBackupName, tableName) }) backupName, err := env.GetResourceNameFromYAML(backupFileFilePath) @@ -408,10 +418,15 @@ var _ = Describe("Verify Volume Snapshot", By("executing the restore", func() { CreateResourceFromFile(namespace, clusterToRestoreFilePath) + AssertClusterIsReady(namespace, + clusterToRestoreName, + testTimeouts[testUtils.ClusterIsReady], + env, + ) }) By("checking that the data is present on the restored cluster", func() { - AssertDataExpectedCount(namespace, clusterToRestoreName, tableName, 2, psqlClientPod) + AssertDataExpectedCount(env, namespace, clusterToRestoreName, tableName, 2) }) }) It("can take a snapshot targeting the primary", func() { @@ -592,12 +607,23 @@ var _ = Describe("Verify Volume Snapshot", }) By("inserting test data and creating WALs on the cluster to be snapshotted", func() { + forward, conn, err := testUtils.ForwardPSQLConnection( + env, + namespace, + clusterToSnapshotName, + testUtils.AppDBName, + apiv1.ApplicationUserSecretSuffix, + ) + defer func() { + forward.Close() + }() + Expect(err).ToNot(HaveOccurred()) // Create a "test" table with values 1,2 - AssertCreateTestData(namespace, clusterToSnapshotName, tableName, psqlClientPod) + AssertCreateTestData(env, namespace, clusterToSnapshotName, tableName) // Insert 2 more rows which we expect not to be present at the end of the recovery - insertRecordIntoTable(namespace, clusterToSnapshotName, tableName, 3, psqlClientPod) - insertRecordIntoTable(namespace, clusterToSnapshotName, tableName, 4, psqlClientPod) + insertRecordIntoTable(tableName, 3, conn) + insertRecordIntoTable(tableName, 4, conn) // Close and archive the current WAL file AssertArchiveWalOnMinio(namespace, clusterToSnapshotName, clusterToSnapshotName) @@ -658,9 +684,7 @@ var _ = Describe("Verify Volume Snapshot", }) By("verifying the correct data exists in the restored cluster", func() { - restoredPrimary, err := env.GetClusterPrimary(namespace, clusterToRestoreName) - Expect(err).ToNot(HaveOccurred()) - AssertDataExpectedCount(namespace, clusterToRestoreName, tableName, 4, restoredPrimary) + AssertDataExpectedCount(env, namespace, clusterToRestoreName, tableName, 4) }) }) @@ -668,9 +692,20 @@ var _ = Describe("Verify Volume Snapshot", // insert some data after the snapshot is taken, we want to verify the data exists in // the new pod when cluster scaled up By("inserting more test data and creating WALs on the cluster snapshotted", func() { + forward, conn, err := testUtils.ForwardPSQLConnection( + env, + namespace, + clusterToSnapshotName, + testUtils.AppDBName, + apiv1.ApplicationUserSecretSuffix, + ) + defer func() { + forward.Close() + }() + Expect(err).ToNot(HaveOccurred()) // Insert 2 more rows which we expect not to be present at the end of the recovery - insertRecordIntoTable(namespace, clusterToSnapshotName, tableName, 5, psqlClientPod) - insertRecordIntoTable(namespace, clusterToSnapshotName, tableName, 6, psqlClientPod) + insertRecordIntoTable(tableName, 5, conn) + insertRecordIntoTable(tableName, 6, conn) // Close and archive the current WAL file AssertArchiveWalOnMinio(namespace, clusterToSnapshotName, clusterToSnapshotName) @@ -705,8 +740,8 @@ var _ = Describe("Verify Volume Snapshot", podList, err := env.GetClusterReplicas(namespace, clusterToSnapshotName) Expect(err).ToNot(HaveOccurred()) Expect(podList.Items).To(HaveLen(2)) - AssertDataExpectedCount(namespace, clusterToSnapshotName, tableName, 6, &podList.Items[0]) - AssertDataExpectedCount(namespace, clusterToSnapshotName, tableName, 6, &podList.Items[1]) + AssertDataExpectedCount(env, namespace, clusterToSnapshotName, tableName, 6) + AssertDataExpectedCount(env, namespace, clusterToSnapshotName, tableName, 6) }) }) }) diff --git a/tests/utils/psql_client.go b/tests/utils/psql_client.go deleted file mode 100644 index b64f03dd34..0000000000 --- a/tests/utils/psql_client.go +++ /dev/null @@ -1,113 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/ptr" - - "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" - "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" -) - -// GetPsqlClient gets a psql client pod for service connectivity -func GetPsqlClient(namespace string, env *TestingEnvironment) (*corev1.Pod, error) { - _ = corev1.AddToScheme(env.Scheme) - _ = appsv1.AddToScheme(env.Scheme) - pod := &corev1.Pod{} - err := env.CreateNamespace(namespace) - if err != nil { - return pod, err - } - pod, err = createPsqlClient(namespace, env) - if err != nil { - return pod, err - } - err = PodWaitForReady(env, pod, 300) - if err != nil { - return pod, err - } - return pod, nil -} - -// createPsqlClient creates a psql client -func createPsqlClient(namespace string, env *TestingEnvironment) (*corev1.Pod, error) { - seccompProfile := &corev1.SeccompProfile{ - Type: corev1.SeccompProfileTypeRuntimeDefault, - } - - psqlPod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - // The pod name follows a convention: "psql-client-0", derived from the StatefulSet name. - Name: "psql-client-0", - Labels: map[string]string{"run": "psql"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: specs.PostgresContainerName, - Image: versions.DefaultImageName, - // override the default Entrypoint ("docker-entrypoint.sh") of the image - Command: []string{"bash", "-c"}, - // override the default Cmd ("postgres") of the image - // sleep enough time to keep the pod running until we finish the E2E tests - Args: []string{"sleep 7200"}, - SecurityContext: &corev1.SecurityContext{ - AllowPrivilegeEscalation: ptr.To(false), - SeccompProfile: seccompProfile, - }, - }, - }, - DNSPolicy: corev1.DNSClusterFirst, - RestartPolicy: corev1.RestartPolicyAlways, - SecurityContext: &corev1.PodSecurityContext{ - SeccompProfile: seccompProfile, - }, - }, - } - - // The psql pod might be deleted by, for example, a node drain. As such we need to use - // either a StatefulSet or a Deployment to make sure the pod is always getting recreated. - // To avoid having to reference a new random name created by the Deployment each time the - // pod gets recreated, we choose to use a StatefulSet. - psqlStatefulSet := appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: "psql-client", - Labels: map[string]string{"run": "psql"}, - }, - Spec: appsv1.StatefulSetSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"run": "psql"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: psqlPod.ObjectMeta, - Spec: psqlPod.Spec, - }, - }, - } - - err := env.Client.Create(env.Ctx, &psqlStatefulSet) - if err != nil { - return &corev1.Pod{}, err - } - - return psqlPod, nil -} diff --git a/tests/utils/psql_connection.go b/tests/utils/psql_connection.go new file mode 100644 index 0000000000..b88e07c50b --- /dev/null +++ b/tests/utils/psql_connection.go @@ -0,0 +1,227 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "database/sql" + "fmt" + "net/http" + "os" + "strconv" + "time" + + "github.com/onsi/ginkgo/v2" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/portforward" + "k8s.io/client-go/transport/spdy" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/configfile" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/pool" +) + +// PSQLForwardConnection manage the creation of a port forward to connect by psql client locally +type PSQLForwardConnection struct { + namespace string + pod string + stopChan chan struct{} + readyChan chan struct{} + pooler *pool.ConnectionPool + portForward *portforward.PortForwarder + err error +} + +// psqlForwardConnectionNew initialize and create the proper forward configuration +func psqlForwardConnectionNew(env *TestingEnvironment, namespace, pod string) (*PSQLForwardConnection, error) { + psqlc := &PSQLForwardConnection{} + if pod == "" { + return nil, fmt.Errorf("pod not provided") + } + psqlc.namespace = namespace + psqlc.pod = pod + + req := psqlc.createRequest(env) + + transport, upgrader, err := spdy.RoundTripperFor(env.RestClientConfig) + if err != nil { + return nil, err + } + dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, "POST", req.URL()) + + psqlc.readyChan = make(chan struct{}, 1) + psqlc.stopChan = make(chan struct{}) + + psqlc.portForward, err = portforward.New( + dialer, + []string{"0:5432"}, + psqlc.stopChan, + psqlc.readyChan, + os.Stdout, + os.Stderr, + ) + + return psqlc, err +} + +func (psqlc *PSQLForwardConnection) createRequest(env *TestingEnvironment) *rest.Request { + return env.Interface.CoreV1(). + RESTClient(). + Post(). + Resource("pods"). + Namespace(psqlc.namespace). + Name(psqlc.pod). + SubResource("portforward") +} + +// startAndWait will begin the forward and wait to be ready +func (psqlc *PSQLForwardConnection) startAndWait() error { + go func() { + ginkgo.GinkgoWriter.Printf("Starting port-forward\n") + psqlc.err = psqlc.portForward.ForwardPorts() + if psqlc.err != nil { + ginkgo.GinkgoWriter.Printf("port-forward failed with error %s\n", psqlc.err.Error()) + return + } + }() + select { + case <-psqlc.readyChan: + ginkgo.GinkgoWriter.Printf("port-forward ready\n") + return nil + case <-psqlc.stopChan: + ginkgo.GinkgoWriter.Printf("port-forward closed\n") + return psqlc.err + } +} + +// GetPooler returns the connection Pooler +func (psqlc *PSQLForwardConnection) GetPooler() *pool.ConnectionPool { + return psqlc.pooler +} + +// getLocalPort gets the local port needed to connect to Postgres +func (psqlc *PSQLForwardConnection) getLocalPort() (string, error) { + forwardedPorts, err := psqlc.portForward.GetPorts() + if err != nil { + return "", err + } + + return strconv.Itoa(int(forwardedPorts[0].Local)), nil +} + +// Close will stop the forward and exit +func (psqlc *PSQLForwardConnection) Close() { + psqlc.portForward.Close() +} + +// createConnectionParameters return the parameters require to create a connection +// to the current forwarded port +func (psqlc *PSQLForwardConnection) createConnectionParameters(user, password string) (map[string]string, error) { + port, err := psqlc.getLocalPort() + if err != nil { + return nil, err + } + + return map[string]string{ + "host": "localhost", + "port": port, + "user": user, + "password": password, + }, nil +} + +// ForwardPSQLConnection simplifies the creation of forwarded connection to PostgreSQL cluster +func ForwardPSQLConnection( + env *TestingEnvironment, + namespace, + clusterName, + dbname, + secretSuffix string, +) (*PSQLForwardConnection, *sql.DB, error) { + user, pass, err := GetCredentials(clusterName, namespace, secretSuffix, env) + if err != nil { + return nil, nil, err + } + + return ForwardPSQLConnectionWithCreds(env, namespace, clusterName, dbname, user, pass) +} + +// ForwardPSQLConnectionWithCreds does the same as ForwardPSQLConnection but without trying to +// get the credentials using the cluster +func ForwardPSQLConnectionWithCreds( + env *TestingEnvironment, + namespace, + clusterName, + dbname, + userApp, + passApp string, +) (*PSQLForwardConnection, *sql.DB, error) { + cluster, err := env.GetCluster(namespace, clusterName) + if err != nil { + return nil, nil, err + } + + forward, err := psqlForwardConnectionNew(env, namespace, cluster.Status.CurrentPrimary) + if err != nil { + return nil, nil, err + } + + if err = forward.startAndWait(); err != nil { + return nil, nil, err + } + + connParameters, err := forward.createConnectionParameters(userApp, passApp) + if err != nil { + return nil, nil, err + } + + forward.pooler = pool.NewPostgresqlConnectionPool(configfile.CreateConnectionString(connParameters)) + conn, err := forward.pooler.Connection(dbname) + if err != nil { + return nil, nil, err + } + conn.SetMaxOpenConns(10) + conn.SetMaxIdleConns(10) + conn.SetConnMaxLifetime(time.Hour) + conn.SetConnMaxIdleTime(time.Hour) + + return forward, conn, err +} + +// RunQueryRowOverForward runs QueryRow with a given query, returning the result Row +func RunQueryRowOverForward( + env *TestingEnvironment, + namespace, + clusterName, + dbname, + secretSuffix, + query string, +) (*sql.Row, error) { + forward, conn, err := ForwardPSQLConnection( + env, + namespace, + clusterName, + dbname, + secretSuffix, + ) + if err != nil { + return nil, err + } + defer func() { + forward.Close() + }() + + return conn.QueryRow(query), nil +} diff --git a/tests/utils/time.go b/tests/utils/time.go index c89738e91e..ecce38b9bd 100644 --- a/tests/utils/time.go +++ b/tests/utils/time.go @@ -17,36 +17,27 @@ limitations under the License. package utils import ( - "strings" - - corev1 "k8s.io/api/core/v1" - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" ) // GetCurrentTimestamp getting current time stamp from postgres server -func GetCurrentTimestamp(namespace, clusterName string, env *TestingEnvironment, podName *corev1.Pod) (string, error) { - host, err := GetHostName(namespace, clusterName, env) - if err != nil { - return "", err - } - appUser, appUserPass, err := GetCredentials(clusterName, namespace, apiv1.ApplicationUserSecretSuffix, env) - if err != nil { - return "", err - } - query := "select TO_CHAR(CURRENT_TIMESTAMP,'YYYY-MM-DD HH24:MI:SS.US');" - stdOut, _, err := RunQueryFromPod( - podName, - host, - AppDBName, - appUser, - appUserPass, - query, +func GetCurrentTimestamp(namespace, clusterName string, env *TestingEnvironment) (string, error) { + row, err := RunQueryRowOverForward( env, + namespace, + clusterName, + AppDBName, + apiv1.ApplicationUserSecretSuffix, + "select TO_CHAR(CURRENT_TIMESTAMP,'YYYY-MM-DD HH24:MI:SS.US');", ) if err != nil { return "", err } - currentTimestamp := strings.Trim(stdOut, "\n") + + var currentTimestamp string + if err = row.Scan(¤tTimestamp); err != nil { + return "", err + } + return currentTimestamp, nil } From 101979ec93816e3e5ee338041dca3b19c7dba159 Mon Sep 17 00:00:00 2001 From: Francesco Canovai Date: Thu, 3 Oct 2024 10:21:41 +0200 Subject: [PATCH 033/836] ci: sync api to api repo (#5679) Trigger the `sync-api` workflow on the `API` repository when pushing to the `main` branch. Signed-off-by: Francesco Canovai --- .github/workflows/sync-api.yml | 17 +++++++++++++++++ api/v1/cluster_funcs.go | 5 ----- api/v1/zz_api_repo_funcs_to_copy.go | 27 +++++++++++++++++++++++++++ 3 files changed, 44 insertions(+), 5 deletions(-) create mode 100644 .github/workflows/sync-api.yml create mode 100644 api/v1/zz_api_repo_funcs_to_copy.go diff --git a/.github/workflows/sync-api.yml b/.github/workflows/sync-api.yml new file mode 100644 index 0000000000..8160eb257e --- /dev/null +++ b/.github/workflows/sync-api.yml @@ -0,0 +1,17 @@ +name: Sync API + +on: + push: + branches: + - main + +jobs: + trigger-sync: + runs-on: ubuntu-latest + steps: + - name: Invoke repository dispatch + uses: peter-evans/repository-dispatch@v3 + with: + token: ${{ secrets.REPO_GHA_PAT }} + repository: cloudnative-pg/api + event-type: sync-api diff --git a/api/v1/cluster_funcs.go b/api/v1/cluster_funcs.go index 3fc919188c..41206f8f21 100644 --- a/api/v1/cluster_funcs.go +++ b/api/v1/cluster_funcs.go @@ -136,11 +136,6 @@ func (status *ClusterStatus) GetAvailableArchitecture(archName string) *Availabl return nil } -// DeepCopyInto needs to be manually added for the controller-gen compiler to work correctly, given that it cannot -// generate the DeepCopyInto for the regexp type. -// The method is empty because we don't want to transfer the cache when invoking DeepCopyInto -func (receiver synchronizeReplicasCache) DeepCopyInto(*synchronizeReplicasCache) {} - func (r *SynchronizeReplicasConfiguration) compileRegex() []error { if r == nil { return nil diff --git a/api/v1/zz_api_repo_funcs_to_copy.go b/api/v1/zz_api_repo_funcs_to_copy.go new file mode 100644 index 0000000000..f7cbea2733 --- /dev/null +++ b/api/v1/zz_api_repo_funcs_to_copy.go @@ -0,0 +1,27 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// IMPORTANT: +// This file contains the functions that need to be copied from the api/v1 package to the cloudnative-pg/api +// repository. This is currently required because the controller-gen tool cannot generate DeepCopyInto for the +// regexp type. This will be removed once the controller-gen tool supports this feature. + +// DeepCopyInto needs to be manually added for the controller-gen compiler to work correctly, given that it cannot +// generate the DeepCopyInto for the regexp type. +// The method is empty because we don't want to transfer the cache when invoking DeepCopyInto. +func (receiver synchronizeReplicasCache) DeepCopyInto(*synchronizeReplicasCache) {} From 1900d119d8bffd372136ee5cb967974c33b925b0 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 3 Oct 2024 23:13:25 +0200 Subject: [PATCH 034/836] chore(deps): update dependency redhat-openshift-ecosystem/openshift-preflight to v1.10.1 (main) (#5714) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 9fba4183a6..d9577c7cd0 100644 --- a/Makefile +++ b/Makefile @@ -48,7 +48,7 @@ SPELLCHECK_VERSION ?= 0.42.0 WOKE_VERSION ?= 0.19.0 OPERATOR_SDK_VERSION ?= v1.37.0 OPM_VERSION ?= v1.47.0 -PREFLIGHT_VERSION ?= 1.10.0 +PREFLIGHT_VERSION ?= 1.10.1 OPENSHIFT_VERSIONS ?= v4.12-v4.17 ARCH ?= amd64 From 097f09986e01ae3cf9adee3846431be460635ca1 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Fri, 4 Oct 2024 12:09:59 +0200 Subject: [PATCH 035/836] fix(backup,plugin): add `BackupStartingCondition` condition to cluster while using the plugin method (#5697) The operator now correctly adds the `BackupStartingCondition` to the cluster resource while taking a backup with a plugin. Signed-off-by: Armando Ruocco --- pkg/management/postgres/backup.go | 1 - pkg/management/postgres/webserver/plugin_backup.go | 9 +++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/pkg/management/postgres/backup.go b/pkg/management/postgres/backup.go index f50fbd477a..54d2a88af9 100644 --- a/pkg/management/postgres/backup.go +++ b/pkg/management/postgres/backup.go @@ -209,7 +209,6 @@ func (b *BackupCommand) takeBackup(ctx context.Context) error { // Update backup status in cluster conditions on startup if err := b.retryWithRefreshedCluster(ctx, func() error { - // TODO: this condition is set only here, never removed or handled? return conditions.Patch(ctx, b.Client, b.Cluster, apiv1.BackupStartingCondition) }); err != nil { b.Log.Error(err, "Error changing backup condition (backup started)") diff --git a/pkg/management/postgres/webserver/plugin_backup.go b/pkg/management/postgres/webserver/plugin_backup.go index 0e253ab393..bbd1c993bd 100644 --- a/pkg/management/postgres/webserver/plugin_backup.go +++ b/pkg/management/postgres/webserver/plugin_backup.go @@ -100,6 +100,15 @@ func (b *PluginBackupCommand) invokeStart(ctx context.Context) { backupLog.Info("Plugin backup started") b.Recorder.Event(b.Backup, "Normal", "Starting", "Backup started") + // Update backup status in cluster conditions on startup + if err := b.retryWithRefreshedCluster(ctx, func() error { + return conditions.Patch(ctx, b.Client, b.Cluster, apiv1.BackupStartingCondition) + }); err != nil { + backupLog.Error(err, "Error changing backup condition (backup started)") + // We do not terminate here because we could still have a good backup + // even if we are unable to communicate with the Kubernetes API server + } + response, err := cli.Backup( ctx, b.Cluster, From a24e34f1b4a013a72c92ce6eecd60807755d46e8 Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Fri, 4 Oct 2024 16:38:26 +0200 Subject: [PATCH 036/836] docs: clarify use cases for `pg_basebackup` bootstrap (#5720) Closes #5719 Signed-off-by: Gabriele Bartolini Signed-off-by: Leonardo Cecchi Co-authored-by: Leonardo Cecchi Co-authored-by: Jaime Silvela --- .wordlist-en-custom.txt | 1 + docs/src/bootstrap.md | 77 ++++++++++++++++++++++++----------------- 2 files changed, 47 insertions(+), 31 deletions(-) diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index 26de5d448c..77385b9ac0 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -500,6 +500,7 @@ allowPrivilegeEscalation allowVolumeExpansion amd angus +anonymization api apiGroup apiGroups diff --git a/docs/src/bootstrap.md b/docs/src/bootstrap.md index 3cf3cb41b1..87525b4679 100644 --- a/docs/src/bootstrap.md +++ b/docs/src/bootstrap.md @@ -389,44 +389,59 @@ to the ["Recovery" section](recovery.md). ### Bootstrap from a live cluster (`pg_basebackup`) -The `pg_basebackup` bootstrap mode lets you create a new cluster (*target*) as -an exact physical copy of an existing and **binary compatible** PostgreSQL -instance (*source*), through a valid *streaming replication* connection. -The source instance can be either a primary or a standby PostgreSQL server. +The `pg_basebackup` bootstrap mode allows you to create a new cluster +(*target*) as an exact physical copy of an existing and **binary-compatible** +PostgreSQL instance (*source*) managed by CloudNativePG, using a valid +*streaming replication* connection. The source instance can either be a primary +or a standby PostgreSQL server. It’s crucial to thoroughly review the +requirements section below, as the pros and cons of PostgreSQL physical +replication fully apply. + +The primary use cases for this method include: + +- Reporting and business intelligence clusters that need to be regenerated + periodically (daily, weekly) +- Test databases containing live data that require periodic regeneration + (daily, weekly, monthly) and anonymization +- Rapid spin-up of a standalone replica cluster +- Physical migrations of CloudNativePG clusters to different namespaces or + Kubernetes clusters -The primary use case for this method is represented by **migrations** to CloudNativePG, -either from outside Kubernetes or within Kubernetes (e.g., from another operator). +!!! Important + Avoid using this method, based on physical replication, to migrate an + existing PostgreSQL cluster outside of Kubernetes into CloudNativePG unless you + are completely certain that all requirements are met and the operation has been + thoroughly tested. The CloudNativePG community does not endorse this approach + for such use cases and recommends using logical import instead. It is + exceedingly rare that all requirements for physical replication are met in a + way that seamlessly works with CloudNativePG. !!! Warning - The current implementation creates a *snapshot* of the origin PostgreSQL - instance when the cloning process terminates and immediately starts - the created cluster. See ["Current limitations"](#current-limitations) below for details. - -Similar to the case of the `recovery` bootstrap method, once the clone operation -completes, the operator will take ownership of the target cluster, starting from -the first instance. This includes overriding some configuration parameters, as -required by CloudNativePG, resetting the superuser password, creating -the `streaming_replica` user, managing the replicas, and so on. The resulting -cluster will be completely independent of the source instance. + In its current implementation, this method clones the source PostgreSQL + instance, thereby creating a *snapshot*. Once the cloning process has finished, + the new cluster is immediately started. + Refer to ["Current limitations"](#current-limitations) for more details. + +Similar to the `recovery` bootstrap method, once the cloning operation is +complete, the operator takes full ownership of the target cluster, starting +from the first instance. This includes overriding certain configuration +parameters as required by CloudNativePG, resetting the superuser password, +creating the `streaming_replica` user, managing replicas, and more. The +resulting cluster operates independently from the source instance. !!! Important - Configuring the network between the target instance and the source instance - goes beyond the scope of CloudNativePG documentation, as it depends - on the actual context and environment. + Configuring the network connection between the target and source instances + lies outside the scope of CloudNativePG documentation, as it depends heavily on + the specific context and environment. -The streaming replication client on the target instance, which will be -transparently managed by `pg_basebackup`, can authenticate itself on the source -instance in any of the following ways: +The streaming replication client on the target instance, managed transparently +by `pg_basebackup`, can authenticate on the source instance using one of the +following methods: -1. via [username/password](#usernamepassword-authentication) -2. via [TLS client certificate](#tls-certificate-authentication) +1. [Username/password](#usernamepassword-authentication) +2. [TLS client certificate](#tls-certificate-authentication) -The latter is the recommended one if you connect to a source managed -by CloudNativePG or configured for TLS authentication. -The first option is, however, the most common form of authentication to a -PostgreSQL server in general, and might be the easiest way if the source -instance is on a traditional environment outside Kubernetes. -Both cases are explained below. +Both authentication methods are detailed below. #### Requirements @@ -650,7 +665,7 @@ instance using a second connection (see the `--wal-method=stream` option for Once the backup is completed, the new instance will be started on a new timeline and diverge from the source. For this reason, it is advised to stop all write operations to the source database -before migrating to the target database in Kubernetes. +before migrating to the target database. !!! Important Before you attempt a migration, you must test both the procedure From 5d4195ca16a62bf24f989bda58a15a5f525adab7 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Fri, 4 Oct 2024 17:59:42 +0200 Subject: [PATCH 037/836] fix(backup,plugin): keep plugin connection open while running a backup (#5726) This patch keeps the plugin connection pool open when a backup is running. Before, it was closed as soon as the backup request was correctly received. Signed-off-by: Leonardo Cecchi Signed-off-by: Armando Ruocco Co-authored-by: Armando Ruocco --- api/v1/backup_funcs.go | 12 +++++ pkg/management/postgres/webserver/local.go | 4 +- .../postgres/webserver/plugin_backup.go | 48 ++++++++----------- 3 files changed, 33 insertions(+), 31 deletions(-) diff --git a/api/v1/backup_funcs.go b/api/v1/backup_funcs.go index 9c56e8503d..c41e09ee12 100644 --- a/api/v1/backup_funcs.go +++ b/api/v1/backup_funcs.go @@ -24,6 +24,7 @@ import ( volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" @@ -230,6 +231,17 @@ func (backup *Backup) GetVolumeSnapshotConfiguration( return config } +// EnsureGVKIsPresent ensures that the GroupVersionKind (GVK) metadata is present in the Backup object. +// This is necessary because informers do not automatically include metadata inside the object. +// By setting the GVK, we ensure that components such as the plugins have enough metadata to typecheck the object. +func (backup *Backup) EnsureGVKIsPresent() { + backup.SetGroupVersionKind(schema.GroupVersionKind{ + Group: GroupVersion.Group, + Version: GroupVersion.Version, + Kind: BackupKind, + }) +} + // IsEmpty checks if the plugin configuration is empty or not func (configuration *BackupPluginConfiguration) IsEmpty() bool { return configuration == nil || len(configuration.Name) == 0 diff --git a/pkg/management/postgres/webserver/local.go b/pkg/management/postgres/webserver/local.go index 89579382d6..61e99860e8 100644 --- a/pkg/management/postgres/webserver/local.go +++ b/pkg/management/postgres/webserver/local.go @@ -227,7 +227,5 @@ func (ws *localWebserverEndpoints) startPluginBackup( cluster *apiv1.Cluster, backup *apiv1.Backup, ) { - cmd := NewPluginBackupCommand(cluster, backup, ws.typedClient, ws.eventRecorder) - cmd.Start(ctx) - cmd.Close() + NewPluginBackupCommand(cluster, backup, ws.typedClient, ws.eventRecorder).Start(ctx) } diff --git a/pkg/management/postgres/webserver/plugin_backup.go b/pkg/management/postgres/webserver/plugin_backup.go index bbd1c993bd..0c0d18acdf 100644 --- a/pkg/management/postgres/webserver/plugin_backup.go +++ b/pkg/management/postgres/webserver/plugin_backup.go @@ -43,8 +43,6 @@ type PluginBackupCommand struct { Backup *apiv1.Backup Client client.Client Recorder record.EventRecorder - Log log.Logger - Plugins repository.Interface } // NewPluginBackupCommand initializes a BackupCommand object, taking a physical @@ -55,23 +53,13 @@ func NewPluginBackupCommand( client client.Client, recorder record.EventRecorder, ) *PluginBackupCommand { - logger := log.WithValues( - "pluginConfiguration", backup.Spec.PluginConfiguration, - "backupName", backup.Name, - "backupNamespace", backup.Name) - - plugins := repository.New() - if err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir); err != nil { - logger.Error(err, "Error while discovering plugins") - } + backup.EnsureGVKIsPresent() return &PluginBackupCommand{ Cluster: cluster, Backup: backup, Client: client, Recorder: recorder, - Log: logger, - Plugins: plugins, } } @@ -80,31 +68,33 @@ func (b *PluginBackupCommand) Start(ctx context.Context) { go b.invokeStart(ctx) } -// Close closes all the connections to the plugins -func (b *PluginBackupCommand) Close() { - b.Plugins.Close() -} - func (b *PluginBackupCommand) invokeStart(ctx context.Context) { - backupLog := b.Log.WithValues( + contextLogger := log.FromContext(ctx).WithValues( + "pluginConfiguration", b.Backup.Spec.PluginConfiguration, "backupName", b.Backup.Name, "backupNamespace", b.Backup.Name) - cli, err := pluginClient.WithPlugins(ctx, b.Plugins, b.Cluster.Spec.Plugins.GetEnabledPluginNames()...) + plugins := repository.New() + if err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir); err != nil { + contextLogger.Error(err, "Error while discovering plugins") + } + defer plugins.Close() + + cli, err := pluginClient.WithPlugins(ctx, plugins, b.Cluster.Spec.Plugins.GetEnabledPluginNames()...) if err != nil { b.markBackupAsFailed(ctx, err) return } // record the backup beginning - backupLog.Info("Plugin backup started") + contextLogger.Info("Plugin backup started") b.Recorder.Event(b.Backup, "Normal", "Starting", "Backup started") // Update backup status in cluster conditions on startup if err := b.retryWithRefreshedCluster(ctx, func() error { return conditions.Patch(ctx, b.Client, b.Cluster, apiv1.BackupStartingCondition) }); err != nil { - backupLog.Error(err, "Error changing backup condition (backup started)") + contextLogger.Error(err, "Error changing backup condition (backup started)") // We do not terminate here because we could still have a good backup // even if we are unable to communicate with the Kubernetes API server } @@ -120,7 +110,7 @@ func (b *PluginBackupCommand) invokeStart(ctx context.Context) { return } - backupLog.Info("Backup completed") + contextLogger.Info("Backup completed") b.Recorder.Event(b.Backup, "Normal", "Completed", "Backup completed") // Set the status to completed @@ -146,28 +136,30 @@ func (b *PluginBackupCommand) invokeStart(ctx context.Context) { } if err := postgres.PatchBackupStatusAndRetry(ctx, b.Client, b.Backup); err != nil { - backupLog.Error(err, "Can't set backup status as completed") + contextLogger.Error(err, "Can't set backup status as completed") } // Update backup status in cluster conditions on backup completion if err := b.retryWithRefreshedCluster(ctx, func() error { return conditions.Patch(ctx, b.Client, b.Cluster, apiv1.BackupSucceededCondition) }); err != nil { - b.Log.Error(err, "Can't update the cluster with the completed backup data") + contextLogger.Error(err, "Can't update the cluster with the completed backup data") } } func (b *PluginBackupCommand) markBackupAsFailed(ctx context.Context, failure error) { + contextLogger := log.FromContext(ctx) + backupStatus := b.Backup.GetStatus() // record the failure - b.Log.Error(failure, "Backup failed") + contextLogger.Error(failure, "Backup failed") b.Recorder.Event(b.Backup, "Normal", "Failed", "Backup failed") // update backup status as failed backupStatus.SetAsFailed(failure) if err := postgres.PatchBackupStatusAndRetry(ctx, b.Client, b.Backup); err != nil { - b.Log.Error(err, "Can't mark backup as failed") + contextLogger.Error(err, "Can't mark backup as failed") // We do not terminate here because we still want to set the condition on the cluster. } @@ -180,7 +172,7 @@ func (b *PluginBackupCommand) markBackupAsFailed(ctx context.Context, failure er b.Cluster.Status.LastFailedBackup = utils.GetCurrentTimestampWithFormat(time.RFC3339) return b.Client.Status().Patch(ctx, b.Cluster, client.MergeFrom(origCluster)) }); failErr != nil { - b.Log.Error(failErr, "while setting cluster condition for failed backup") + contextLogger.Error(failErr, "while setting cluster condition for failed backup") } } From 920e4e5f3bfb828033034baa3197bb7c54764428 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sun, 6 Oct 2024 21:48:43 +0200 Subject: [PATCH 038/836] chore(deps): update dependency rook/rook to v1.15.3 (main) (#5718) --- .github/workflows/continuous-delivery.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index 07120f15b0..d7a95ec095 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -37,7 +37,7 @@ env: GOLANG_VERSION: "1.23.x" KUBEBUILDER_VERSION: "2.3.1" KIND_VERSION: "v0.24.0" - ROOK_VERSION: "v1.15.2" + ROOK_VERSION: "v1.15.3" EXTERNAL_SNAPSHOTTER_VERSION: "v8.1.0" OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing" BUILD_PUSH_PROVENANCE: "" From d497f055a7f970c7589b1386d77773a327eb1e74 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 7 Oct 2024 00:23:11 +0200 Subject: [PATCH 039/836] fix(deps): update module github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring to v0.77.1 (main) (#5659) https://github.com/prometheus-operator/prometheus-operator `v0.75.2` -> `v0.77.1` go `1.22.0` -> `1.23` --- go.mod | 4 ++-- go.sum | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index d430eac802..92b74b60a5 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/cloudnative-pg/cloudnative-pg -go 1.22.0 +go 1.23 toolchain go1.23.2 @@ -27,7 +27,7 @@ require ( github.com/mitchellh/go-ps v1.0.0 github.com/onsi/ginkgo/v2 v2.20.2 github.com/onsi/gomega v1.34.2 - github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.75.2 + github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.77.1 github.com/prometheus/client_golang v1.20.4 github.com/robfig/cron v1.2.0 github.com/sethvargo/go-password v0.3.1 diff --git a/go.sum b/go.sum index f8365e307d..0d4f7caa1a 100644 --- a/go.sum +++ b/go.sum @@ -157,8 +157,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.75.2 h1:6UsAv+jAevuGO2yZFU/BukV4o9NKnFMOuoouSA4G0ns= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.75.2/go.mod h1:XYrdZw5dW12Cjkt4ndbeNZZTBp4UCHtW0ccR9+sTtPU= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.77.1 h1:XGoEXT6WTTihO+MD8MAao+YaQIH905HbK0WK2lyo28k= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.77.1/go.mod h1:D0KY8md81DQKdaR/cXwnhoWB3MYYyc/UjvqE8GFkIvA= github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= From 71e6a32789133b461c55d30c7aace33fe77fec11 Mon Sep 17 00:00:00 2001 From: Jaime Silvela Date: Mon, 7 Oct 2024 10:01:59 +0200 Subject: [PATCH 040/836] feat(database): add support for encoding, collate, and locale fields (#5573) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes #5522 Signed-off-by: Jaime Silvela Signed-off-by: Niccolò Fei Co-authored-by: Niccolò Fei --- .wordlist-en-custom.txt | 1 + api/v1/database_types.go | 30 ++++++++++ .../bases/postgresql.cnpg.io_databases.yaml | 36 ++++++++++++ docs/src/cloudnative-pg.v1.md | 42 ++++++++++++++ docs/src/samples/database-example-icu.yaml | 16 +++++ .../controller/database_controller_sql.go | 58 +++++++++++++++---- .../database_controller_sql_test.go | 26 +++++++++ .../declarative_database_management_test.go | 45 +++++++++++--- .../database.yaml.template | 3 + 9 files changed, 240 insertions(+), 17 deletions(-) create mode 100644 docs/src/samples/database-example-icu.yaml diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index 77385b9ac0..b4188b65aa 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -796,6 +796,7 @@ http httpGet https hugepages +icu ident imageCatalogRef imageName diff --git a/api/v1/database_types.go b/api/v1/database_types.go index 8cb52ad810..38624c528f 100644 --- a/api/v1/database_types.go +++ b/api/v1/database_types.go @@ -57,6 +57,36 @@ type DatabaseSpec struct { // +optional Encoding string `json:"encoding,omitempty"` + // The locale (cannot be changed) + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="locale is immutable" + // +optional + Locale string `json:"locale,omitempty"` + + // The locale provider (cannot be changed) + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="locale_provider is immutable" + // +optional + LocaleProvider string `json:"locale_provider,omitempty"` + + // The LC_COLLATE (cannot be changed) + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="lc_collate is immutable" + // +optional + LcCollate string `json:"lc_collate,omitempty"` + + // The LC_CTYPE (cannot be changed) + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="lc_ctype is immutable" + // +optional + LcCtype string `json:"lc_ctype,omitempty"` + + // The ICU_LOCALE (cannot be changed) + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="icu_locale is immutable" + // +optional + IcuLocale string `json:"icu_locale,omitempty"` + + // The ICU_RULES (cannot be changed) + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="icu_rules is immutable" + // +optional + IcuRules string `json:"icu_rules,omitempty"` + // True when the database is a template // +optional IsTemplate *bool `json:"isTemplate,omitempty"` diff --git a/config/crd/bases/postgresql.cnpg.io_databases.yaml b/config/crd/bases/postgresql.cnpg.io_databases.yaml index b348c25dd9..4ee6935904 100644 --- a/config/crd/bases/postgresql.cnpg.io_databases.yaml +++ b/config/crd/bases/postgresql.cnpg.io_databases.yaml @@ -93,9 +93,45 @@ spec: x-kubernetes-validations: - message: encoding is immutable rule: self == oldSelf + icu_locale: + description: The ICU_LOCALE (cannot be changed) + type: string + x-kubernetes-validations: + - message: icu_locale is immutable + rule: self == oldSelf + icu_rules: + description: The ICU_RULES (cannot be changed) + type: string + x-kubernetes-validations: + - message: icu_rules is immutable + rule: self == oldSelf isTemplate: description: True when the database is a template type: boolean + lc_collate: + description: The LC_COLLATE (cannot be changed) + type: string + x-kubernetes-validations: + - message: lc_collate is immutable + rule: self == oldSelf + lc_ctype: + description: The LC_CTYPE (cannot be changed) + type: string + x-kubernetes-validations: + - message: lc_ctype is immutable + rule: self == oldSelf + locale: + description: The locale (cannot be changed) + type: string + x-kubernetes-validations: + - message: locale is immutable + rule: self == oldSelf + locale_provider: + description: The locale provider (cannot be changed) + type: string + x-kubernetes-validations: + - message: locale_provider is immutable + rule: self == oldSelf name: description: The name inside PostgreSQL type: string diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md index 868b609256..22eb5d401e 100644 --- a/docs/src/cloudnative-pg.v1.md +++ b/docs/src/cloudnative-pg.v1.md @@ -2314,6 +2314,48 @@ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-

The encoding (cannot be changed)

+locale
+string + + +

The locale (cannot be changed)

+ + +locale_provider
+string + + +

The locale provider (cannot be changed)

+ + +lc_collate
+string + + +

The LC_COLLATE (cannot be changed)

+ + +lc_ctype
+string + + +

The LC_CTYPE (cannot be changed)

+ + +icu_locale
+string + + +

The ICU_LOCALE (cannot be changed)

+ + +icu_rules
+string + + +

The ICU_RULES (cannot be changed)

+ + isTemplate
bool diff --git a/docs/src/samples/database-example-icu.yaml b/docs/src/samples/database-example-icu.yaml new file mode 100644 index 0000000000..7a6bba7e4d --- /dev/null +++ b/docs/src/samples/database-example-icu.yaml @@ -0,0 +1,16 @@ +# NOTE: this manifest will only work properly if the Postgres version supports +# ICU locales and rules (version 16 and newer) +apiVersion: postgresql.cnpg.io/v1 +kind: Database +metadata: + name: db-icu +spec: + name: declarative-icu + owner: app + encoding: UTF8 + locale_provider: icu + icu_locale: en + icu_rules: fr + template: template0 + cluster: + name: cluster-example diff --git a/internal/management/controller/database_controller_sql.go b/internal/management/controller/database_controller_sql.go index 1cf32d83a0..cd01a4f926 100644 --- a/internal/management/controller/database_controller_sql.go +++ b/internal/management/controller/database_controller_sql.go @@ -20,7 +20,9 @@ import ( "context" "database/sql" "fmt" + "strings" + "github.com/cloudnative-pg/machinery/pkg/log" "github.com/jackc/pgx/v5" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" @@ -56,27 +58,54 @@ func createDatabase( db *sql.DB, obj *apiv1.Database, ) error { - sqlCreateDatabase := fmt.Sprintf("CREATE DATABASE %s ", pgx.Identifier{obj.Spec.Name}.Sanitize()) + var sqlCreateDatabase strings.Builder + sqlCreateDatabase.WriteString(fmt.Sprintf("CREATE DATABASE %s ", pgx.Identifier{obj.Spec.Name}.Sanitize())) if len(obj.Spec.Owner) > 0 { - sqlCreateDatabase += fmt.Sprintf(" OWNER %s", pgx.Identifier{obj.Spec.Owner}.Sanitize()) + sqlCreateDatabase.WriteString(fmt.Sprintf(" OWNER %s", pgx.Identifier{obj.Spec.Owner}.Sanitize())) } if len(obj.Spec.Template) > 0 { - sqlCreateDatabase += fmt.Sprintf(" TEMPLATE %s", pgx.Identifier{obj.Spec.Template}.Sanitize()) + sqlCreateDatabase.WriteString(fmt.Sprintf(" TEMPLATE %s", pgx.Identifier{obj.Spec.Template}.Sanitize())) } if len(obj.Spec.Tablespace) > 0 { - sqlCreateDatabase += fmt.Sprintf(" TABLESPACE %s", pgx.Identifier{obj.Spec.Tablespace}.Sanitize()) + sqlCreateDatabase.WriteString(fmt.Sprintf(" TABLESPACE %s", pgx.Identifier{obj.Spec.Tablespace}.Sanitize())) } if obj.Spec.AllowConnections != nil { - sqlCreateDatabase += fmt.Sprintf(" ALLOW_CONNECTIONS %v", *obj.Spec.AllowConnections) + sqlCreateDatabase.WriteString(fmt.Sprintf(" ALLOW_CONNECTIONS %v", *obj.Spec.AllowConnections)) } if obj.Spec.ConnectionLimit != nil { - sqlCreateDatabase += fmt.Sprintf(" CONNECTION LIMIT %v", *obj.Spec.ConnectionLimit) + sqlCreateDatabase.WriteString(fmt.Sprintf(" CONNECTION LIMIT %v", *obj.Spec.ConnectionLimit)) } if obj.Spec.IsTemplate != nil { - sqlCreateDatabase += fmt.Sprintf(" IS_TEMPLATE %v", *obj.Spec.IsTemplate) + sqlCreateDatabase.WriteString(fmt.Sprintf(" IS_TEMPLATE %v", *obj.Spec.IsTemplate)) + } + if obj.Spec.Encoding != "" { + sqlCreateDatabase.WriteString(fmt.Sprintf(" ENCODING %s", pgx.Identifier{obj.Spec.Encoding}.Sanitize())) + } + if obj.Spec.Locale != "" { + sqlCreateDatabase.WriteString(fmt.Sprintf(" LOCALE %s", pgx.Identifier{obj.Spec.Locale}.Sanitize())) + } + if obj.Spec.LocaleProvider != "" { + sqlCreateDatabase.WriteString(fmt.Sprintf(" LOCALE_PROVIDER %s", pgx.Identifier{obj.Spec.LocaleProvider}.Sanitize())) + } + if obj.Spec.LcCollate != "" { + sqlCreateDatabase.WriteString(fmt.Sprintf(" LC_COLLATE %s", pgx.Identifier{obj.Spec.LcCollate}.Sanitize())) + } + if obj.Spec.LcCtype != "" { + sqlCreateDatabase.WriteString(fmt.Sprintf(" LC_CTYPE %s", pgx.Identifier{obj.Spec.LcCtype}.Sanitize())) + } + if obj.Spec.IcuLocale != "" { + sqlCreateDatabase.WriteString(fmt.Sprintf(" ICU_LOCALE %s", pgx.Identifier{obj.Spec.IcuLocale}.Sanitize())) + } + if obj.Spec.IcuRules != "" { + sqlCreateDatabase.WriteString(fmt.Sprintf(" ICU_RULES %s", pgx.Identifier{obj.Spec.IcuRules}.Sanitize())) } - _, err := db.ExecContext(ctx, sqlCreateDatabase) + contextLogger, ctx := log.SetupLogger(ctx) + + _, err := db.ExecContext(ctx, sqlCreateDatabase.String()) + if err != nil { + contextLogger.Error(err, "while creating database", "query", sqlCreateDatabase.String()) + } return err } @@ -86,6 +115,8 @@ func updateDatabase( db *sql.DB, obj *apiv1.Database, ) error { + contextLogger, ctx := log.SetupLogger(ctx) + if obj.Spec.AllowConnections != nil { changeAllowConnectionsSQL := fmt.Sprintf( "ALTER DATABASE %s WITH ALLOW_CONNECTIONS %v", @@ -93,6 +124,7 @@ func updateDatabase( *obj.Spec.AllowConnections) if _, err := db.ExecContext(ctx, changeAllowConnectionsSQL); err != nil { + contextLogger.Error(err, "while altering database", "query", changeAllowConnectionsSQL) return fmt.Errorf("while altering database %q with allow_connections %t: %w", obj.Spec.Name, *obj.Spec.AllowConnections, err) } @@ -105,6 +137,7 @@ func updateDatabase( *obj.Spec.ConnectionLimit) if _, err := db.ExecContext(ctx, changeConnectionsLimitSQL); err != nil { + contextLogger.Error(err, "while altering database", "query", changeConnectionsLimitSQL) return fmt.Errorf("while altering database %q with connection limit %d: %w", obj.Spec.Name, *obj.Spec.ConnectionLimit, err) } @@ -117,6 +150,7 @@ func updateDatabase( *obj.Spec.IsTemplate) if _, err := db.ExecContext(ctx, changeIsTemplateSQL); err != nil { + contextLogger.Error(err, "while altering database", "query", changeIsTemplateSQL) return fmt.Errorf("while altering database %q with is_template %t: %w", obj.Spec.Name, *obj.Spec.IsTemplate, err) } @@ -129,6 +163,7 @@ func updateDatabase( pgx.Identifier{obj.Spec.Owner}.Sanitize()) if _, err := db.ExecContext(ctx, changeOwnerSQL); err != nil { + contextLogger.Error(err, "while altering database", "query", changeOwnerSQL) return fmt.Errorf("while altering database %q owner %s to: %w", obj.Spec.Name, obj.Spec.Owner, err) } @@ -141,6 +176,7 @@ func updateDatabase( pgx.Identifier{obj.Spec.Tablespace}.Sanitize()) if _, err := db.ExecContext(ctx, changeTablespaceSQL); err != nil { + contextLogger.Error(err, "while altering database", "query", changeTablespaceSQL) return fmt.Errorf("while altering database %q tablespace %s: %w", obj.Spec.Name, obj.Spec.Tablespace, err) } @@ -154,11 +190,13 @@ func dropDatabase( db *sql.DB, obj *apiv1.Database, ) error { + contextLogger, ctx := log.SetupLogger(ctx) + query := fmt.Sprintf("DROP DATABASE IF EXISTS %s", pgx.Identifier{obj.Spec.Name}.Sanitize()) _, err := db.ExecContext( ctx, - fmt.Sprintf("DROP DATABASE IF EXISTS %s", pgx.Identifier{obj.Spec.Name}.Sanitize()), - ) + query) if err != nil { + contextLogger.Error(err, "while dropping database", "query", query) return fmt.Errorf("while dropping database %q: %w", obj.Spec.Name, err) } diff --git a/internal/management/controller/database_controller_sql_test.go b/internal/management/controller/database_controller_sql_test.go index 444267b36e..b95a13e076 100644 --- a/internal/management/controller/database_controller_sql_test.go +++ b/internal/management/controller/database_controller_sql_test.go @@ -107,6 +107,32 @@ var _ = Describe("Managed Database SQL", func() { err = createDatabase(ctx, db, database) Expect(err).ToNot(HaveOccurred()) }) + + It("should create a new Database with locale and encoding kind fields", func(ctx SpecContext) { + database.Spec.Locale = "POSIX" + database.Spec.LocaleProvider = "icu" + database.Spec.LcCtype = "en_US.utf8" + database.Spec.LcCollate = "C" + database.Spec.Encoding = "LATIN1" + database.Spec.IcuLocale = "en" + database.Spec.IcuRules = "fr" + + expectedValue := sqlmock.NewResult(0, 1) + expectedQuery := fmt.Sprintf( + "CREATE DATABASE %s OWNER %s "+ + "ENCODING %s LOCALE %s LOCALE_PROVIDER %s LC_COLLATE %s LC_CTYPE %s "+ + "ICU_LOCALE %s ICU_RULES %s", + pgx.Identifier{database.Spec.Name}.Sanitize(), pgx.Identifier{database.Spec.Owner}.Sanitize(), + pgx.Identifier{database.Spec.Encoding}.Sanitize(), pgx.Identifier{database.Spec.Locale}.Sanitize(), + pgx.Identifier{database.Spec.LocaleProvider}.Sanitize(), pgx.Identifier{database.Spec.LcCollate}.Sanitize(), + pgx.Identifier{database.Spec.LcCtype}.Sanitize(), + pgx.Identifier{database.Spec.IcuLocale}.Sanitize(), pgx.Identifier{database.Spec.IcuRules}.Sanitize(), + ) + dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedValue) + + err = createDatabase(ctx, db, database) + Expect(err).ToNot(HaveOccurred()) + }) }) Context("updateDatabase", func() { diff --git a/tests/e2e/declarative_database_management_test.go b/tests/e2e/declarative_database_management_test.go index ee9f59942a..6861d17a33 100644 --- a/tests/e2e/declarative_database_management_test.go +++ b/tests/e2e/declarative_database_management_test.go @@ -17,6 +17,7 @@ limitations under the License. package e2e import ( + "fmt" "time" "k8s.io/apimachinery/pkg/types" @@ -48,16 +49,15 @@ var _ = Describe("Declarative databases management test", Label(tests.LabelSmoke Context("plain vanilla cluster", Ordered, func() { const ( namespacePrefix = "declarative-db" - databaseCrdName = "db-declarative" dbname = "declarative" ) var ( - clusterName, namespace string - database *apiv1.Database + clusterName, namespace, databaseObjectName string + database *apiv1.Database + err error ) BeforeAll(func() { - var err error // Create a cluster in a namespace we'll delete after the test namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) Expect(err).ToNot(HaveOccurred()) @@ -88,11 +88,28 @@ var _ = Describe("Declarative databases management test", Label(tests.LabelSmoke }, 300).Should(Succeed()) } + assertDatabaseHasExpectedFields := func(namespace, primaryPod string, db apiv1.Database) { + query := fmt.Sprintf("select count(*) from pg_database where datname = '%s' "+ + "and encoding = %s and datctype = '%s' and datcollate = '%s'", + db.Spec.Name, db.Spec.Encoding, db.Spec.LcCtype, db.Spec.LcCollate) + Eventually(func(g Gomega) { + stdout, _, err := env.ExecQueryInInstancePod( + utils.PodLocator{ + Namespace: namespace, + PodName: primaryPod, + }, + "postgres", + query) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(stdout).Should(ContainSubstring("1")) + }, 30).Should(Succeed()) + } + When("Database CRD reclaim policy is set to retain (default) inside spec", func() { It("can add a declarative database", func() { By("applying Database CRD manifest", func() { CreateResourceFromFile(namespace, databaseManifest) - _, err := env.GetResourceNameFromYAML(databaseManifest) + databaseObjectName, err = env.GetResourceNameFromYAML(databaseManifest) Expect(err).NotTo(HaveOccurred()) }) By("ensuring the Database CRD succeeded reconciliation", func() { @@ -100,7 +117,7 @@ var _ = Describe("Declarative databases management test", Label(tests.LabelSmoke database = &apiv1.Database{} databaseNamespacedName := types.NamespacedName{ Namespace: namespace, - Name: databaseCrdName, + Name: databaseObjectName, } Eventually(func(g Gomega) { @@ -110,11 +127,25 @@ var _ = Describe("Declarative databases management test", Label(tests.LabelSmoke }, 300).WithPolling(10 * time.Second).Should(Succeed()) }) - By("verifying new database has been added", func() { + By("verifying new database has been created with the expected fields", func() { primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) Expect(err).ToNot(HaveOccurred()) assertDatabaseExists(namespace, primaryPodInfo.Name, dbname, true) + + // NOTE: the `pg_database` table in Postgres does not contain fields + // for the owner nor the template. + // Its fields are dependent on the version of Postgres, so we pick + // a subset that is available to check even on PG v12 + expectedDatabaseFields := apiv1.Database{ + Spec: apiv1.DatabaseSpec{ + Name: "declarative", + LcCtype: "en_US.utf8", + LcCollate: "C", // this is the default value + Encoding: "0", // corresponds to SQL_ASCII + }, + } + assertDatabaseHasExpectedFields(namespace, primaryPodInfo.Name, expectedDatabaseFields) }) }) diff --git a/tests/e2e/fixtures/declarative_databases/database.yaml.template b/tests/e2e/fixtures/declarative_databases/database.yaml.template index afa83d0ccd..3ded03c50a 100644 --- a/tests/e2e/fixtures/declarative_databases/database.yaml.template +++ b/tests/e2e/fixtures/declarative_databases/database.yaml.template @@ -5,5 +5,8 @@ metadata: spec: name: declarative owner: app + lc_ctype: "en_US.utf8" + encoding: SQL_ASCII + template: template0 cluster: name: cluster-with-declarative-databases From d0a14c66a76f35fd45eb08096e07ee2bb7aeebfc Mon Sep 17 00:00:00 2001 From: Gabriele Quaresima Date: Mon, 7 Oct 2024 10:31:01 +0200 Subject: [PATCH 041/836] feat(database): prevent creation of Database objects on reserved PG databases (#5630) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes #5627 Signed-off-by: Gabriele Quaresima Signed-off-by: Niccolò Fei Co-authored-by: Niccolò Fei --- api/v1/database_types.go | 3 +++ config/crd/bases/postgresql.cnpg.io_databases.yaml | 6 ++++++ 2 files changed, 9 insertions(+) diff --git a/api/v1/database_types.go b/api/v1/database_types.go index 38624c528f..dd7bd58cf5 100644 --- a/api/v1/database_types.go +++ b/api/v1/database_types.go @@ -42,6 +42,9 @@ type DatabaseSpec struct { // The name inside PostgreSQL // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="name is immutable" + // +kubebuilder:validation:XValidation:rule="self != 'postgres'",message="the name postgres is reserved" + // +kubebuilder:validation:XValidation:rule="self != 'template0'",message="the name template0 is reserved" + // +kubebuilder:validation:XValidation:rule="self != 'template1'",message="the name template1 is reserved" Name string `json:"name"` // The owner diff --git a/config/crd/bases/postgresql.cnpg.io_databases.yaml b/config/crd/bases/postgresql.cnpg.io_databases.yaml index 4ee6935904..f49202505e 100644 --- a/config/crd/bases/postgresql.cnpg.io_databases.yaml +++ b/config/crd/bases/postgresql.cnpg.io_databases.yaml @@ -138,6 +138,12 @@ spec: x-kubernetes-validations: - message: name is immutable rule: self == oldSelf + - message: the name postgres is reserved + rule: self != 'postgres' + - message: the name template0 is reserved + rule: self != 'template0' + - message: the name template1 is reserved + rule: self != 'template1' owner: description: The owner type: string From 33d8a9471efc05171d3777cab9cc7ff69b13ca24 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Mon, 7 Oct 2024 11:13:43 +0200 Subject: [PATCH 042/836] chore: move stringset to machinery (#5728) Signed-off-by: Leonardo Cecchi Signed-off-by: Francesco Canovai Co-authored-by: Francesco Canovai --- api/v1/cluster_funcs.go | 2 +- api/v1/cluster_funcs_test.go | 2 +- api/v1/cluster_webhook.go | 2 +- api/v1/pooler_webhook.go | 3 +- go.mod | 2 +- go.sum | 4 +- internal/cmd/plugin/status/status.go | 2 +- pkg/configfile/configfile.go | 3 +- pkg/multicache/multinamespaced_cache.go | 3 +- pkg/specs/roles.go | 2 +- pkg/stringset/stringset.go | 113 ------------------------ pkg/stringset/stringset_test.go | 76 ---------------- pkg/stringset/suite_test.go | 33 ------- pkg/utils/fencing.go | 3 +- 14 files changed, 12 insertions(+), 238 deletions(-) delete mode 100644 pkg/stringset/stringset.go delete mode 100644 pkg/stringset/stringset_test.go delete mode 100644 pkg/stringset/suite_test.go diff --git a/api/v1/cluster_funcs.go b/api/v1/cluster_funcs.go index 41206f8f21..f3caf8d59c 100644 --- a/api/v1/cluster_funcs.go +++ b/api/v1/cluster_funcs.go @@ -28,13 +28,13 @@ import ( "github.com/cloudnative-pg/machinery/pkg/image/reference" "github.com/cloudnative-pg/machinery/pkg/log" "github.com/cloudnative-pg/machinery/pkg/postgres/version" + "github.com/cloudnative-pg/machinery/pkg/stringset" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" - "github.com/cloudnative-pg/cloudnative-pg/pkg/stringset" "github.com/cloudnative-pg/cloudnative-pg/pkg/system" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" diff --git a/api/v1/cluster_funcs_test.go b/api/v1/cluster_funcs_test.go index 7d459faca0..8d6f0950ac 100644 --- a/api/v1/cluster_funcs_test.go +++ b/api/v1/cluster_funcs_test.go @@ -22,12 +22,12 @@ import ( barmanCatalog "github.com/cloudnative-pg/barman-cloud/pkg/catalog" "github.com/cloudnative-pg/machinery/pkg/postgres/version" + "github.com/cloudnative-pg/machinery/pkg/stringset" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" - "github.com/cloudnative-pg/cloudnative-pg/pkg/stringset" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" . "github.com/onsi/ginkgo/v2" diff --git a/api/v1/cluster_webhook.go b/api/v1/cluster_webhook.go index 16b8c73c52..197358d786 100644 --- a/api/v1/cluster_webhook.go +++ b/api/v1/cluster_webhook.go @@ -27,6 +27,7 @@ import ( "github.com/cloudnative-pg/machinery/pkg/image/reference" "github.com/cloudnative-pg/machinery/pkg/log" "github.com/cloudnative-pg/machinery/pkg/postgres/version" + "github.com/cloudnative-pg/machinery/pkg/stringset" "github.com/cloudnative-pg/machinery/pkg/types" storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" v1 "k8s.io/api/core/v1" @@ -44,7 +45,6 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" - "github.com/cloudnative-pg/cloudnative-pg/pkg/stringset" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) diff --git a/api/v1/pooler_webhook.go b/api/v1/pooler_webhook.go index c82205ed0c..24241a836a 100644 --- a/api/v1/pooler_webhook.go +++ b/api/v1/pooler_webhook.go @@ -20,6 +20,7 @@ import ( "fmt" "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/stringset" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -27,8 +28,6 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - - "github.com/cloudnative-pg/cloudnative-pg/pkg/stringset" ) var ( diff --git a/go.mod b/go.mod index 92b74b60a5..b3734a9ea5 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/cheynewallace/tabby v1.1.1 github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a github.com/cloudnative-pg/cnpg-i v0.0.0-20241001103001-7e24b2eccd50 - github.com/cloudnative-pg/machinery v0.0.0-20241001153943-0e5ba4f9a0e1 + github.com/cloudnative-pg/machinery v0.0.0-20241007084552-267a543ce26f github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/evanphx/json-patch/v5 v5.9.0 github.com/go-logr/logr v1.4.2 diff --git a/go.sum b/go.sum index 0d4f7caa1a..92f5c0833b 100644 --- a/go.sum +++ b/go.sum @@ -22,8 +22,8 @@ github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a h1:0v1 github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a/go.mod h1:Jm0tOp5oB7utpt8wz6RfSv31h1mThOtffjfyxVupriE= github.com/cloudnative-pg/cnpg-i v0.0.0-20241001103001-7e24b2eccd50 h1:Rm/bbC0GNCuWth5fHVMos99RzNczbWRVBdjubh3JMPs= github.com/cloudnative-pg/cnpg-i v0.0.0-20241001103001-7e24b2eccd50/go.mod h1:lTWPq8pluS0PSnRMwt0zShftbyssoRhTJ5zAip8unl8= -github.com/cloudnative-pg/machinery v0.0.0-20241001153943-0e5ba4f9a0e1 h1:qrxfp0vR+zqC+L1yTdQTqRHvnLLcVk4CdWB1RwLd8UE= -github.com/cloudnative-pg/machinery v0.0.0-20241001153943-0e5ba4f9a0e1/go.mod h1:bWp1Es5zlxElg4Z/c5f0RKOkDcyNvDHdYIvNcPQU4WM= +github.com/cloudnative-pg/machinery v0.0.0-20241007084552-267a543ce26f h1:tdh7vyJBadzToa2pYYC5gERr35kum4N2571VWtXnkPk= +github.com/cloudnative-pg/machinery v0.0.0-20241007084552-267a543ce26f/go.mod h1:bWp1Es5zlxElg4Z/c5f0RKOkDcyNvDHdYIvNcPQU4WM= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= diff --git a/internal/cmd/plugin/status/status.go b/internal/cmd/plugin/status/status.go index 41489ea976..1889975c55 100644 --- a/internal/cmd/plugin/status/status.go +++ b/internal/cmd/plugin/status/status.go @@ -27,6 +27,7 @@ import ( "time" "github.com/cheynewallace/tabby" + "github.com/cloudnative-pg/machinery/pkg/stringset" types "github.com/cloudnative-pg/machinery/pkg/types" "github.com/logrusorgru/aurora/v4" corev1 "k8s.io/api/core/v1" @@ -44,7 +45,6 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/hibernation" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" - "github.com/cloudnative-pg/cloudnative-pg/pkg/stringset" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) diff --git a/pkg/configfile/configfile.go b/pkg/configfile/configfile.go index 14ac64bcc1..9b5aa1b584 100644 --- a/pkg/configfile/configfile.go +++ b/pkg/configfile/configfile.go @@ -23,9 +23,8 @@ import ( "strings" "github.com/cloudnative-pg/machinery/pkg/fileutils" + "github.com/cloudnative-pg/machinery/pkg/stringset" "github.com/lib/pq" - - "github.com/cloudnative-pg/cloudnative-pg/pkg/stringset" ) // UpdatePostgresConfigurationFile search and replace options in a Postgres configuration file. diff --git a/pkg/multicache/multinamespaced_cache.go b/pkg/multicache/multinamespaced_cache.go index 80f69eecfb..a3222085db 100644 --- a/pkg/multicache/multinamespaced_cache.go +++ b/pkg/multicache/multinamespaced_cache.go @@ -24,12 +24,11 @@ import ( "fmt" "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/stringset" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/cloudnative-pg/cloudnative-pg/pkg/stringset" ) type multiNamespaceCache struct { diff --git a/pkg/specs/roles.go b/pkg/specs/roles.go index e48a3b6ad4..f0d9bf4cb1 100644 --- a/pkg/specs/roles.go +++ b/pkg/specs/roles.go @@ -19,11 +19,11 @@ package specs import ( "slices" + "github.com/cloudnative-pg/machinery/pkg/stringset" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/pkg/stringset" ) // CreateRole create a role with the permissions needed by the instance manager diff --git a/pkg/stringset/stringset.go b/pkg/stringset/stringset.go deleted file mode 100644 index f5678ec4a0..0000000000 --- a/pkg/stringset/stringset.go +++ /dev/null @@ -1,113 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package stringset implements a basic set of strings -package stringset - -import ( - "slices" -) - -// Data represent a set of strings -type Data struct { - innerMap map[string]struct{} -} - -// New create a new empty set of strings -func New() *Data { - return &Data{ - innerMap: make(map[string]struct{}), - } -} - -// From create a empty set of strings given -// a slice of strings -func From(strings []string) *Data { - result := New() - for _, value := range strings { - result.Put(value) - } - return result -} - -// FromKeys create a string set from the -// keys of a map -func FromKeys[T any](v map[string]T) *Data { - result := New() - for key := range v { - result.Put(key) - } - return result -} - -// Put a string in the set -func (set *Data) Put(key string) { - set.innerMap[key] = struct{}{} -} - -// Delete deletes a string from the set. If the string doesn't exist -// this is a no-op -func (set *Data) Delete(key string) { - delete(set.innerMap, key) -} - -// Has checks if a string is in the set or not -func (set *Data) Has(key string) bool { - _, ok := set.innerMap[key] - return ok -} - -// Len returns the map of the set -func (set *Data) Len() int { - return len(set.innerMap) -} - -// ToList returns the strings contained in this set as -// a string slice -func (set *Data) ToList() (result []string) { - result = make([]string, 0, len(set.innerMap)) - for key := range set.innerMap { - result = append(result, key) - } - return -} - -// ToSortedList returns the string container in this set -// as a sorted string slice -func (set *Data) ToSortedList() []string { - result := set.ToList() - slices.Sort(result) - return result -} - -// Eq compares two string sets for equality -func (set *Data) Eq(other *Data) bool { - if set == nil || other == nil { - return false - } - - if set.Len() != other.Len() { - return false - } - - for key := range set.innerMap { - if !other.Has(key) { - return false - } - } - - return true -} diff --git a/pkg/stringset/stringset_test.go b/pkg/stringset/stringset_test.go deleted file mode 100644 index abf6e5548b..0000000000 --- a/pkg/stringset/stringset_test.go +++ /dev/null @@ -1,76 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package stringset - -import ( - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -var _ = Describe("String set", func() { - It("starts as an empty set", func() { - Expect(New().Len()).To(Equal(0)) - }) - - It("starts with a list of strings", func() { - Expect(From([]string{"one", "two"}).Len()).To(Equal(2)) - Expect(From([]string{"one", "two", "two"}).Len()).To(Equal(2)) - }) - - It("store string keys", func() { - set := New() - Expect(set.Has("test")).To(BeFalse()) - Expect(set.Has("test2")).To(BeFalse()) - - set.Put("test") - Expect(set.Has("test")).To(BeTrue()) - Expect(set.Has("test2")).To(BeFalse()) - }) - - It("removes string keys", func() { - set := From([]string{"one", "two"}) - set.Delete("one") - Expect(set.ToList()).To(Equal([]string{"two"})) - }) - - It("constructs a string slice given a set", func() { - Expect(From([]string{"one", "two"}).ToList()).To(ContainElements("one", "two")) - }) - - It("compares two string set for equality", func() { - Expect(From([]string{"one", "two"}).Eq(From([]string{"one", "two"}))).To(BeTrue()) - Expect(From([]string{"one", "two"}).Eq(From([]string{"two", "three"}))).To(BeFalse()) - Expect(From([]string{"one", "two"}).Eq(From([]string{"one", "two", "three"}))).To(BeFalse()) - Expect(From([]string{"one", "two", "three"}).Eq(From([]string{"one", "two"}))).To(BeFalse()) - }) - - It("constructs a sorted string slice given a set", func() { - Expect(From([]string{"one", "two", "three", "four"}).ToSortedList()).To( - HaveExactElements("four", "one", "three", "two")) - Expect(New().ToList()).To(BeEmpty()) - }) - - It("constructs a string set from a map having string as keys", func() { - Expect(FromKeys(map[string]int{ - "one": 1, - "two": 2, - "three": 3, - }).ToSortedList()).To( - HaveExactElements("one", "three", "two"), - ) - }) -}) diff --git a/pkg/stringset/suite_test.go b/pkg/stringset/suite_test.go deleted file mode 100644 index bb29e64601..0000000000 --- a/pkg/stringset/suite_test.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package stringset - -import ( - "testing" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -// These tests use Ginkgo (BDD-style Go testing framework). Refer to -// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. - -func TestConfigFile(t *testing.T) { - RegisterFailHandler(Fail) - - RunSpecs(t, "Configuration File Parsing Suite") -} diff --git a/pkg/utils/fencing.go b/pkg/utils/fencing.go index 7cedb1cbf5..c7ec6c37aa 100644 --- a/pkg/utils/fencing.go +++ b/pkg/utils/fencing.go @@ -24,12 +24,11 @@ import ( "slices" "sort" + "github.com/cloudnative-pg/machinery/pkg/stringset" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/cloudnative-pg/cloudnative-pg/pkg/stringset" ) var ( From 1f2841e7c1b0dee3c572a9d8dfaf82345fc2b35d Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Mon, 7 Oct 2024 11:46:30 +0200 Subject: [PATCH 043/836] docs: provide guidelines for commercial support (#5712) Define standard guidelines in the `SUPPORT.md` file for professional organisations and individuals who want to be listed as support providers for CloudNativePG. Closes #5706 Signed-off-by: Gabriele Bartolini --- README.md | 1 + SUPPORT.md | 30 ++++++++++++++++++++++++++++++ docs/mkdocs.yml | 1 - docs/src/commercial_support.md | 12 ------------ 4 files changed, 31 insertions(+), 13 deletions(-) create mode 100644 SUPPORT.md delete mode 100644 docs/src/commercial_support.md diff --git a/README.md b/README.md index 50fd765c9b..43b055e250 100644 --- a/README.md +++ b/README.md @@ -20,6 +20,7 @@ CloudNativePG was originally built and sponsored by [EDB](https://www.enterprise - [Governance policies](https://github.com/cloudnative-pg/governance/blob/main/GOVERNANCE.md) - [Contributing](CONTRIBUTING.md) - [Adopters](ADOPTERS.md) +- [Commercial Support](https://cloudnative-pg.io/support/) - [License](LICENSE) ## Getting Started diff --git a/SUPPORT.md b/SUPPORT.md new file mode 100644 index 0000000000..06f152bdbf --- /dev/null +++ b/SUPPORT.md @@ -0,0 +1,30 @@ +# Commercial Support for CloudNativePG + +CloudNativePG is an independent open-source project and does not officially +endorse any specific company or service provider. + +However, to assist users in finding professional support, the +"[Commercial Support](https://cloudnative-pg.io/support/)" +page offers an alphabetical list of companies and individuals providing +CloudNativePG-related products or services. + +*Please note that the CloudNativePG authors are not responsible for the accuracy +or content provided by the listed companies or individuals.* + +## How to Get Listed + +To have your company or personal services featured on this list, please submit +a [pull request to the CloudNativePG website](https://github.com/cloudnative-pg/cloudnative-pg.github.io) +by adding a `.md` file in the [`content/support` folder](https://github.com/cloudnative-pg/cloudnative-pg.github.io/tree/main/content/support) +containing the following information: + +1. **Organisation Name**: Clearly specify the name of your company or entity. +2. **Organisation Logo**: Provide your company logo in SVG format. +3. **Website Link**: Include a link to your homepage or a dedicated landing + page that explicitly mentions CloudNativePG support and includes at least one + link back to [cloudnative-pg.io](https://cloudnative-pg.io). + +[CloudNativePG maintainers will vet each submission](https://github.com/cloudnative-pg/governance/blob/main/GOVERNANCE.md#voting) +and reserve the right to reject your application or request changes if your website +doesn’t clearly mention CloudNativePG support or if it doesn't include at least +one link back to [cloudnative-pg.io](https://cloudnative-pg.io). diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 8ea186759c..b3dd9c55f7 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -63,7 +63,6 @@ nav: - samples.md - networking.md - benchmarking.md - - commercial_support.md - faq.md - cloudnative-pg.v1.md - supported_releases.md diff --git a/docs/src/commercial_support.md b/docs/src/commercial_support.md deleted file mode 100644 index 1b9ca9da55..0000000000 --- a/docs/src/commercial_support.md +++ /dev/null @@ -1,12 +0,0 @@ -# Commercial support - -CloudNativePG is an independent open source project that doesn't endorse any -company. - -The ["Support" page](https://cloudnative-pg.io/support/) website lists -third-party companies and individuals that provide products or services related -to CloudNativePG. - -If you're providing commercial support for CloudNativePG, add yourself or your -organization to that list. - From 28a8dcbbbb05cd12a99d4da87284e96592addcb2 Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Mon, 7 Oct 2024 11:47:31 +0200 Subject: [PATCH 044/836] docs: mention OpenShift certified operator by EDB (#5724) Closes #5723 Signed-off-by: Gabriele Bartolini --- docs/src/installation_upgrade.md | 7 +++++-- docs/src/supported_releases.md | 12 ++++++++---- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/docs/src/installation_upgrade.md b/docs/src/installation_upgrade.md index e158ffeba1..c2fb1a3ac7 100644 --- a/docs/src/installation_upgrade.md +++ b/docs/src/installation_upgrade.md @@ -86,10 +86,13 @@ The operator can be installed using the provided [Helm chart](https://github.com ### Using OLM -CloudNativePG can also be installed using the -[Operator Lifecycle Manager (OLM)](https://olm.operatorframework.io/docs/) +CloudNativePG can also be installed via the [Operator Lifecycle Manager (OLM)](https://olm.operatorframework.io/docs/) directly from [OperatorHub.io](https://operatorhub.io/operator/cloudnative-pg). +For deployments on Red Hat OpenShift, EDB offers and fully supports a certified +version of CloudNativePG, available through the +[Red Hat OpenShift Container Platform](https://catalog.redhat.com/software/container-stacks/detail/653fd4035eece8598f66d97b). + ## Details about the deployment In Kubernetes, the operator is by default installed in the `cnpg-system` diff --git a/docs/src/supported_releases.md b/docs/src/supported_releases.md index c3d367e268..c8d9434545 100644 --- a/docs/src/supported_releases.md +++ b/docs/src/supported_releases.md @@ -88,10 +88,14 @@ Git tags for versions are prefixed with `v`. The list of supported Kubernetes versions in the table depends on what the CloudNativePG maintainers think is reasonable to support and to test. -At the moment, the CloudNativePG community doesn't support or test any -additional Kubernetes distribution, like Red Hat OpenShift. This might change -in the future and, in that case, that would be reflected in an official policy -written by the CloudNativePG maintainers. +Currently, the CloudNativePG community does not officially support or test any +Kubernetes distributions beyond the standard/vanilla one - such as Red Hat +OpenShift. This may change in the future, and if it does, the CloudNativePG +maintainers will update the official policy accordingly. + +If you plan to deploy CloudNativePG on Red Hat OpenShift, you can use the +[certified operator provided by EDB](https://catalog.redhat.com/software/container-stacks/detail/653fd4035eece8598f66d97b), +which comes with full support from EDB. ### Supported PostgreSQL versions From cd5a706a6008d7ecbc0af5b8e9c5c5b10038b820 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Mon, 7 Oct 2024 12:29:44 +0200 Subject: [PATCH 045/836] chore: move time management functions to machinery (#5727) Signed-off-by: Leonardo Cecchi --- api/v1/cluster_funcs.go | 11 +-- go.mod | 2 +- go.sum | 4 +- internal/cmd/plugin/backup/cmd.go | 3 +- internal/cmd/plugin/promote/promote.go | 4 +- internal/cmd/plugin/reload/reload.go | 3 +- internal/controller/cluster_status.go | 5 +- internal/controller/replicas.go | 7 +- .../controller/scheduledbackup_controller.go | 3 +- .../controller/instance_controller.go | 6 +- .../management/controller/instance_startup.go | 4 +- pkg/management/postgres/backup.go | 6 +- .../postgres/webserver/plugin_backup.go | 4 +- pkg/utils/strings.go | 27 ------ pkg/utils/time.go | 80 ----------------- pkg/utils/time_test.go | 88 ------------------- tests/e2e/tablespaces_test.go | 3 +- 17 files changed, 36 insertions(+), 224 deletions(-) delete mode 100644 pkg/utils/strings.go delete mode 100644 pkg/utils/time.go delete mode 100644 pkg/utils/time_test.go diff --git a/api/v1/cluster_funcs.go b/api/v1/cluster_funcs.go index f3caf8d59c..a90dbd48bc 100644 --- a/api/v1/cluster_funcs.go +++ b/api/v1/cluster_funcs.go @@ -27,6 +27,7 @@ import ( "github.com/cloudnative-pg/machinery/pkg/image/reference" "github.com/cloudnative-pg/machinery/pkg/log" + pgTime "github.com/cloudnative-pg/machinery/pkg/postgres/time" "github.com/cloudnative-pg/machinery/pkg/postgres/version" "github.com/cloudnative-pg/machinery/pkg/stringset" corev1 "k8s.io/api/core/v1" @@ -1116,7 +1117,7 @@ func (cluster *Cluster) GetEnableSuperuserAccess() bool { func (cluster *Cluster) LogTimestampsWithMessage(ctx context.Context, logMessage string) { contextLogger := log.FromContext(ctx) - currentTimestamp := utils.GetCurrentTimestamp() + currentTimestamp := pgTime.GetCurrentTimestamp() keysAndValues := []interface{}{ "phase", cluster.Status.Phase, "currentTimestamp", currentTimestamp, @@ -1127,7 +1128,7 @@ func (cluster *Cluster) LogTimestampsWithMessage(ctx context.Context, logMessage var errs []string // Elapsed time since the last request of promotion (TargetPrimaryTimestamp) - if diff, err := utils.DifferenceBetweenTimestamps( + if diff, err := pgTime.DifferenceBetweenTimestamps( currentTimestamp, cluster.Status.TargetPrimaryTimestamp, ); err == nil { @@ -1141,7 +1142,7 @@ func (cluster *Cluster) LogTimestampsWithMessage(ctx context.Context, logMessage } // Elapsed time since the last promotion (CurrentPrimaryTimestamp) - if currentPrimaryDifference, err := utils.DifferenceBetweenTimestamps( + if currentPrimaryDifference, err := pgTime.DifferenceBetweenTimestamps( currentTimestamp, cluster.Status.CurrentPrimaryTimestamp, ); err == nil { @@ -1158,7 +1159,7 @@ func (cluster *Cluster) LogTimestampsWithMessage(ctx context.Context, logMessage // When positive, it is the amount of time required in the last promotion // of a standby to a primary. If negative, it means we have a failover/switchover // in progress, and the value represents the last measured uptime of the primary. - if currentPrimaryTargetDifference, err := utils.DifferenceBetweenTimestamps( + if currentPrimaryTargetDifference, err := pgTime.DifferenceBetweenTimestamps( cluster.Status.CurrentPrimaryTimestamp, cluster.Status.TargetPrimaryTimestamp, ); err == nil { @@ -1380,7 +1381,7 @@ func (target *RecoveryTarget) BuildPostgresOptions() string { if target.TargetTime != "" { result += fmt.Sprintf( "recovery_target_time = '%v'\n", - utils.ConvertToPostgresFormat(target.TargetTime)) + pgTime.ConvertToPostgresFormat(target.TargetTime)) } if target.TargetImmediate != nil && *target.TargetImmediate { result += "recovery_target = immediate\n" diff --git a/go.mod b/go.mod index b3734a9ea5..dd3d77f58a 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/cheynewallace/tabby v1.1.1 github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a github.com/cloudnative-pg/cnpg-i v0.0.0-20241001103001-7e24b2eccd50 - github.com/cloudnative-pg/machinery v0.0.0-20241007084552-267a543ce26f + github.com/cloudnative-pg/machinery v0.0.0-20241007093555-1e197af1f392 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/evanphx/json-patch/v5 v5.9.0 github.com/go-logr/logr v1.4.2 diff --git a/go.sum b/go.sum index 92f5c0833b..b6aa799796 100644 --- a/go.sum +++ b/go.sum @@ -22,8 +22,8 @@ github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a h1:0v1 github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a/go.mod h1:Jm0tOp5oB7utpt8wz6RfSv31h1mThOtffjfyxVupriE= github.com/cloudnative-pg/cnpg-i v0.0.0-20241001103001-7e24b2eccd50 h1:Rm/bbC0GNCuWth5fHVMos99RzNczbWRVBdjubh3JMPs= github.com/cloudnative-pg/cnpg-i v0.0.0-20241001103001-7e24b2eccd50/go.mod h1:lTWPq8pluS0PSnRMwt0zShftbyssoRhTJ5zAip8unl8= -github.com/cloudnative-pg/machinery v0.0.0-20241007084552-267a543ce26f h1:tdh7vyJBadzToa2pYYC5gERr35kum4N2571VWtXnkPk= -github.com/cloudnative-pg/machinery v0.0.0-20241007084552-267a543ce26f/go.mod h1:bWp1Es5zlxElg4Z/c5f0RKOkDcyNvDHdYIvNcPQU4WM= +github.com/cloudnative-pg/machinery v0.0.0-20241007093555-1e197af1f392 h1:DHaSe0PoLnIQFWIpRqB9RiBlNzbdLuVbiCtc9tN+FL0= +github.com/cloudnative-pg/machinery v0.0.0-20241007093555-1e197af1f392/go.mod h1:bWp1Es5zlxElg4Z/c5f0RKOkDcyNvDHdYIvNcPQU4WM= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= diff --git a/internal/cmd/plugin/backup/cmd.go b/internal/cmd/plugin/backup/cmd.go index 8e770b5e51..cf11010fcf 100644 --- a/internal/cmd/plugin/backup/cmd.go +++ b/internal/cmd/plugin/backup/cmd.go @@ -23,6 +23,7 @@ import ( "strconv" "time" + pgTime "github.com/cloudnative-pg/machinery/pkg/postgres/time" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" @@ -74,7 +75,7 @@ func NewCmd() *cobra.Command { backupName = fmt.Sprintf( "%s-%s", clusterName, - utils.ToCompactISO8601(time.Now()), + pgTime.ToCompactISO8601(time.Now()), ) } diff --git a/internal/cmd/plugin/promote/promote.go b/internal/cmd/plugin/promote/promote.go index 7b4ea964fa..0e45c4a45b 100644 --- a/internal/cmd/plugin/promote/promote.go +++ b/internal/cmd/plugin/promote/promote.go @@ -21,13 +21,13 @@ import ( "context" "fmt" + pgTime "github.com/cloudnative-pg/machinery/pkg/postgres/time" v1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/status" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) // Promote command implementation @@ -57,7 +57,7 @@ func Promote(ctx context.Context, clusterName string, serverName string) error { // The Pod exists, let's update status fields origCluster := cluster.DeepCopy() cluster.Status.TargetPrimary = serverName - cluster.Status.TargetPrimaryTimestamp = utils.GetCurrentTimestamp() + cluster.Status.TargetPrimaryTimestamp = pgTime.GetCurrentTimestamp() if err := status.RegisterPhaseWithOrigCluster( ctx, plugin.Client, diff --git a/internal/cmd/plugin/reload/reload.go b/internal/cmd/plugin/reload/reload.go index 17967bbf55..236910207f 100644 --- a/internal/cmd/plugin/reload/reload.go +++ b/internal/cmd/plugin/reload/reload.go @@ -21,6 +21,7 @@ import ( "context" "fmt" + pgTime "github.com/cloudnative-pg/machinery/pkg/postgres/time" "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" @@ -42,7 +43,7 @@ func Reload(ctx context.Context, clusterName string) error { if clusterRestarted.Annotations == nil { clusterRestarted.Annotations = make(map[string]string) } - clusterRestarted.Annotations[utils.ClusterReloadAnnotationName] = utils.GetCurrentTimestamp() + clusterRestarted.Annotations[utils.ClusterReloadAnnotationName] = pgTime.GetCurrentTimestamp() clusterRestarted.ManagedFields = nil err = plugin.Client.Patch(ctx, clusterRestarted, client.MergeFrom(&cluster)) diff --git a/internal/controller/cluster_status.go b/internal/controller/cluster_status.go index c039cc29a5..3be647985d 100644 --- a/internal/controller/cluster_status.go +++ b/internal/controller/cluster_status.go @@ -24,6 +24,7 @@ import ( "sort" "github.com/cloudnative-pg/machinery/pkg/log" + pgTime "github.com/cloudnative-pg/machinery/pkg/postgres/time" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" @@ -297,7 +298,7 @@ func (r *ClusterReconciler) updateResourceStatus( "targetPrimary", cluster.Status.TargetPrimary, "instances", resources.instances) cluster.Status.TargetPrimary = cluster.Status.CurrentPrimary - cluster.Status.TargetPrimaryTimestamp = utils.GetCurrentTimestamp() + cluster.Status.TargetPrimaryTimestamp = pgTime.GetCurrentTimestamp() } } @@ -719,7 +720,7 @@ func (r *ClusterReconciler) setPrimaryInstance( ) error { origCluster := cluster.DeepCopy() cluster.Status.TargetPrimary = podName - cluster.Status.TargetPrimaryTimestamp = utils.GetCurrentTimestamp() + cluster.Status.TargetPrimaryTimestamp = pgTime.GetCurrentTimestamp() return r.Status().Patch(ctx, cluster, client.MergeFrom(origCluster)) } diff --git a/internal/controller/replicas.go b/internal/controller/replicas.go index 7e5a28bac8..24f3ab2c14 100644 --- a/internal/controller/replicas.go +++ b/internal/controller/replicas.go @@ -23,6 +23,7 @@ import ( "time" "github.com/cloudnative-pg/machinery/pkg/log" + pgTime "github.com/cloudnative-pg/machinery/pkg/postgres/time" corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -350,13 +351,13 @@ func (r *ClusterReconciler) evaluateFailoverDelay( } if cluster.Status.CurrentPrimaryFailingSinceTimestamp == "" { - cluster.Status.CurrentPrimaryFailingSinceTimestamp = utils.GetCurrentTimestamp() + cluster.Status.CurrentPrimaryFailingSinceTimestamp = pgTime.GetCurrentTimestamp() if err := r.Status().Update(ctx, cluster); err != nil { return err } } - primaryFailingSince, err := utils.DifferenceBetweenTimestamps( - utils.GetCurrentTimestamp(), + primaryFailingSince, err := pgTime.DifferenceBetweenTimestamps( + pgTime.GetCurrentTimestamp(), cluster.Status.CurrentPrimaryFailingSinceTimestamp, ) if err != nil { diff --git a/internal/controller/scheduledbackup_controller.go b/internal/controller/scheduledbackup_controller.go index 538d53193e..d3a96d1f55 100644 --- a/internal/controller/scheduledbackup_controller.go +++ b/internal/controller/scheduledbackup_controller.go @@ -24,6 +24,7 @@ import ( "time" "github.com/cloudnative-pg/machinery/pkg/log" + pgTime "github.com/cloudnative-pg/machinery/pkg/postgres/time" "github.com/robfig/cron" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -238,7 +239,7 @@ func createBackup( // So we have no backup running, let's create a backup. // Let's have deterministic names to avoid creating the job two // times - name := fmt.Sprintf("%s-%s", scheduledBackup.GetName(), utils.ToCompactISO8601(backupTime)) + name := fmt.Sprintf("%s-%s", scheduledBackup.GetName(), pgTime.ToCompactISO8601(backupTime)) backup := scheduledBackup.CreateBackup(name) metadata := &backup.ObjectMeta if metadata.Labels == nil { diff --git a/internal/management/controller/instance_controller.go b/internal/management/controller/instance_controller.go index eaa211fb13..518ce96207 100644 --- a/internal/management/controller/instance_controller.go +++ b/internal/management/controller/instance_controller.go @@ -30,6 +30,7 @@ import ( "github.com/cloudnative-pg/machinery/pkg/fileutils" "github.com/cloudnative-pg/machinery/pkg/log" + pgTime "github.com/cloudnative-pg/machinery/pkg/postgres/time" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -58,7 +59,6 @@ import ( externalcluster "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/replicaclusterswitch" clusterstatus "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/status" "github.com/cloudnative-pg/cloudnative-pg/pkg/system" - pkgUtils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) const ( @@ -1208,7 +1208,7 @@ func (r *InstanceReconciler) reconcilePrimary(ctx context.Context, cluster *apiv // if the currentPrimary doesn't match the PodName we set the correct value. if cluster.Status.CurrentPrimary != r.instance.PodName { cluster.Status.CurrentPrimary = r.instance.PodName - cluster.Status.CurrentPrimaryTimestamp = pkgUtils.GetCurrentTimestamp() + cluster.Status.CurrentPrimaryTimestamp = pgTime.GetCurrentTimestamp() if err := r.client.Status().Patch(ctx, cluster, client.MergeFrom(oldCluster)); err != nil { return err @@ -1277,7 +1277,7 @@ func (r *InstanceReconciler) reconcileDesignatedPrimary( oldCluster := cluster.DeepCopy() cluster.Status.CurrentPrimary = r.instance.PodName - cluster.Status.CurrentPrimaryTimestamp = pkgUtils.GetCurrentTimestamp() + cluster.Status.CurrentPrimaryTimestamp = pgTime.GetCurrentTimestamp() if r.instance.RequiresDesignatedPrimaryTransition { externalcluster.SetDesignatedPrimaryTransitionCompleted(cluster) } diff --git a/internal/management/controller/instance_startup.go b/internal/management/controller/instance_startup.go index ea796fcb88..160d8687b6 100644 --- a/internal/management/controller/instance_startup.go +++ b/internal/management/controller/instance_startup.go @@ -25,6 +25,7 @@ import ( "github.com/cloudnative-pg/machinery/pkg/fileutils" "github.com/cloudnative-pg/machinery/pkg/log" + pgTime "github.com/cloudnative-pg/machinery/pkg/postgres/time" corev1 "k8s.io/api/core/v1" "k8s.io/client-go/util/retry" "sigs.k8s.io/controller-runtime/pkg/client" @@ -33,7 +34,6 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/internal/controller" postgresSpec "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" - pkgUtils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) // refreshServerCertificateFiles gets the latest server certificates files from the @@ -204,7 +204,7 @@ func (r *InstanceReconciler) verifyPgDataCoherenceForPrimary(ctx context.Context oldCluster := cluster.DeepCopy() cluster.Status.CurrentPrimary = r.instance.PodName - cluster.Status.CurrentPrimaryTimestamp = pkgUtils.GetCurrentTimestamp() + cluster.Status.CurrentPrimaryTimestamp = pgTime.GetCurrentTimestamp() return r.client.Status().Patch(ctx, cluster, client.MergeFrom(oldCluster)) } return nil diff --git a/pkg/management/postgres/backup.go b/pkg/management/postgres/backup.go index 54d2a88af9..72f6b635fa 100644 --- a/pkg/management/postgres/backup.go +++ b/pkg/management/postgres/backup.go @@ -31,6 +31,7 @@ import ( barmanCredentials "github.com/cloudnative-pg/barman-cloud/pkg/credentials" "github.com/cloudnative-pg/machinery/pkg/fileutils" "github.com/cloudnative-pg/machinery/pkg/log" + pgTime "github.com/cloudnative-pg/machinery/pkg/postgres/time" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -43,7 +44,6 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/conditions" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/resources" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" // this is needed to correctly open the sql connection with the pgx driver _ "github.com/jackc/pgx/v5/stdlib" @@ -190,7 +190,7 @@ func (b *BackupCommand) run(ctx context.Context) { meta.SetStatusCondition(&b.Cluster.Status.Conditions, *apiv1.BuildClusterBackupFailedCondition(err)) - b.Cluster.Status.LastFailedBackup = utils.GetCurrentTimestampWithFormat(time.RFC3339) + b.Cluster.Status.LastFailedBackup = pgTime.GetCurrentTimestampWithFormat(time.RFC3339) return b.Client.Status().Patch(ctx, b.Cluster, client.MergeFrom(origCluster)) }); failErr != nil { b.Log.Error(failErr, "while setting cluster condition for failed backup") @@ -341,7 +341,7 @@ func (b *BackupCommand) setupBackupStatus() { backupStatus := b.Backup.GetStatus() if b.Capabilities.ShouldExecuteBackupWithName(b.Cluster) { - backupStatus.BackupName = fmt.Sprintf("backup-%v", utils.ToCompactISO8601(time.Now())) + backupStatus.BackupName = fmt.Sprintf("backup-%v", pgTime.ToCompactISO8601(time.Now())) } backupStatus.BarmanCredentials = barmanConfiguration.BarmanCredentials backupStatus.EndpointCA = barmanConfiguration.EndpointCA diff --git a/pkg/management/postgres/webserver/plugin_backup.go b/pkg/management/postgres/webserver/plugin_backup.go index 0c0d18acdf..0619cff770 100644 --- a/pkg/management/postgres/webserver/plugin_backup.go +++ b/pkg/management/postgres/webserver/plugin_backup.go @@ -21,6 +21,7 @@ import ( "time" "github.com/cloudnative-pg/machinery/pkg/log" + pgTime "github.com/cloudnative-pg/machinery/pkg/postgres/time" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" @@ -34,7 +35,6 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/conditions" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/resources" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) // PluginBackupCommand represent a backup command that is being executed @@ -169,7 +169,7 @@ func (b *PluginBackupCommand) markBackupAsFailed(ctx context.Context, failure er meta.SetStatusCondition(&b.Cluster.Status.Conditions, *apiv1.BuildClusterBackupFailedCondition(failure)) - b.Cluster.Status.LastFailedBackup = utils.GetCurrentTimestampWithFormat(time.RFC3339) + b.Cluster.Status.LastFailedBackup = pgTime.GetCurrentTimestampWithFormat(time.RFC3339) return b.Client.Status().Patch(ctx, b.Cluster, client.MergeFrom(origCluster)) }); failErr != nil { contextLogger.Error(failErr, "while setting cluster condition for failed backup") diff --git a/pkg/utils/strings.go b/pkg/utils/strings.go deleted file mode 100644 index 2ee8b0d1f4..0000000000 --- a/pkg/utils/strings.go +++ /dev/null @@ -1,27 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -// StringInSlice looks for a search string inside the string slice -func StringInSlice(slice []string, search string) bool { - for _, s := range slice { - if s == search { - return true - } - } - return false -} diff --git a/pkg/utils/time.go b/pkg/utils/time.go deleted file mode 100644 index ee0cb79ab7..0000000000 --- a/pkg/utils/time.go +++ /dev/null @@ -1,80 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// ConvertToPostgresFormat converts timestamps to PostgreSQL time format, if needed. -// e.g. "2006-01-02T15:04:05Z07:00" --> "2006-01-02 15:04:05.000000Z07:00" -// If the conversion fails, the input timestamp is returned as it is. -func ConvertToPostgresFormat(timestamp string) string { - if t, err := time.Parse(metav1.RFC3339Micro, timestamp); err == nil { - return t.Format("2006-01-02 15:04:05.000000Z07:00") - } - - if t, err := time.Parse(time.RFC3339, timestamp); err == nil { - return t.Format("2006-01-02 15:04:05.000000Z07:00") - } - - return timestamp -} - -// GetCurrentTimestamp returns the current timestamp as a string in RFC3339Micro format -func GetCurrentTimestamp() string { - t := time.Now() - return t.Format(metav1.RFC3339Micro) -} - -// GetCurrentTimestampWithFormat returns the current timestamp as a string with the specified format -func GetCurrentTimestampWithFormat(format string) string { - t := time.Now() - return t.Format(format) -} - -// DifferenceBetweenTimestamps returns the time.Duration difference between two timestamps strings in time.RFC3339. -func DifferenceBetweenTimestamps(first, second string) (time.Duration, error) { - parsedTimestamp, err := time.Parse(metav1.RFC3339Micro, first) - if err != nil { - return 0, err - } - - parsedTimestampTwo, err := time.Parse(metav1.RFC3339Micro, second) - if err != nil { - return 0, err - } - - return parsedTimestamp.Sub(parsedTimestampTwo), nil -} - -// ToCompactISO8601 converts a time.Time into a compacted version of the ISO8601 timestamp, -// removing any separators for brevity. -// -// For example: -// -// Given: 2022-01-02 15:04:05 (UTC) -// Returns: 20220102150405 -// -// This compact format is useful for generating concise, yet human-readable timestamps that -// can serve as suffixes for backup-related objects or any other contexts where space or -// character count might be a concern. -func ToCompactISO8601(t time.Time) string { - return t.Format("20060102150405") -} diff --git a/pkg/utils/time_test.go b/pkg/utils/time_test.go deleted file mode 100644 index a46d6c3d7f..0000000000 --- a/pkg/utils/time_test.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -var _ = Describe("Time conversion", func() { - It("properly works given a string in RFC3339 format", func() { - res := ConvertToPostgresFormat("2021-09-01T10:22:47+03:00") - Expect(res).To(BeEquivalentTo("2021-09-01 10:22:47.000000+03:00")) - }) - It("return same input string if not in RFC3339 format", func() { - res := ConvertToPostgresFormat("2001-09-29 01:02:03") - Expect(res).To(BeEquivalentTo("2001-09-29 01:02:03")) - }) -}) - -var _ = Describe("Parsing targetTime", func() { - It("should calculate correctly the difference between two timestamps", func() { - By("having the first time bigger than the second", func() { - time1 := "2022-07-06T13:11:09.000000Z" - time2 := "2022-07-06T13:11:07.000000Z" - expectedSecondDifference := float64(2) - difference, err := DifferenceBetweenTimestamps(time1, time2) - Expect(err).ToNot(HaveOccurred()) - Expect(difference.Seconds()).To(Equal(expectedSecondDifference)) - }) - By("having the first time smaller than the second", func() { - time1 := "2022-07-06T13:11:07.000000Z" - time2 := "2022-07-06T13:11:09.000000Z" - expectedSecondDifference := float64(-2) - difference, err := DifferenceBetweenTimestamps(time1, time2) - Expect(err).ToNot(HaveOccurred()) - Expect(difference.Seconds()).To(Equal(expectedSecondDifference)) - }) - By("having first or second time wrong", func() { - time1 := "2022-07-06T13:12:09.000000Z" - - _, err := DifferenceBetweenTimestamps(time1, "") - Expect(err).To(HaveOccurred()) - - _, err = DifferenceBetweenTimestamps("", time1) - Expect(err).To(HaveOccurred()) - }) - }) - - It("should be RFC3339Micro format", func() { - time1 := GetCurrentTimestamp() - - _, err := time.Parse(metav1.RFC3339Micro, time1) - Expect(err).ToNot(HaveOccurred()) - }) -}) - -var _ = Describe("ToCompactISO8601", func() { - It("should return a string in the expected format for a given time", func() { - testTime := time.Date(2022, 0o1, 0o2, 15, 0o4, 0o5, 0, time.UTC) - compactISO8601 := ToCompactISO8601(testTime) - Expect(compactISO8601).To(Equal("20220102150405")) - }) - - It("should return a string of length 14", func() { - testTime := time.Now() - compactISO8601 := ToCompactISO8601(testTime) - Expect(compactISO8601).To(HaveLen(14)) - }) -}) diff --git a/tests/e2e/tablespaces_test.go b/tests/e2e/tablespaces_test.go index 4027611134..b3093e5ab0 100644 --- a/tests/e2e/tablespaces_test.go +++ b/tests/e2e/tablespaces_test.go @@ -28,6 +28,7 @@ import ( "strings" "time" + pgTime "github.com/cloudnative-pg/machinery/pkg/postgres/time" volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -440,7 +441,7 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, Expect(err).ToNot(HaveOccurred()) }) - backupName = clusterName + utils.GetCurrentTimestampWithFormat("20060102150405") + backupName = clusterName + pgTime.GetCurrentTimestampWithFormat("20060102150405") By("creating a volumeSnapshot and waiting until it's completed", func() { err := testUtils.CreateOnDemandBackupViaKubectlPlugin( namespace, From 90bc4ff37a9146ee1698126d20341deea907bc4c Mon Sep 17 00:00:00 2001 From: Jaime Silvela Date: Mon, 7 Oct 2024 17:56:40 +0200 Subject: [PATCH 046/836] refactor: use getters for main fields in postgres.Instance (#5685) Closes: #5544 Signed-off-by: Jaime Silvela Signed-off-by: Gabriele Quaresima Signed-off-by: Armando Ruocco Co-authored-by: Gabriele Quaresima Co-authored-by: Armando Ruocco --- internal/cmd/manager/instance/join/cmd.go | 13 +++-- internal/cmd/manager/instance/run/cmd.go | 14 +++--- internal/management/controller/cache.go | 2 +- .../controller/database_controller_test.go | 9 ++-- .../controller/externalservers/manager.go | 4 +- .../controller/instance_controller.go | 48 +++++++++++-------- .../management/controller/instance_startup.go | 20 ++++---- internal/management/controller/manager.go | 6 +-- .../management/controller/roles/runnable.go | 12 ++--- .../controller/roles/runnable_test.go | 4 +- .../controller/slots/runner/runner.go | 2 +- .../controller/tablespaces/controller_test.go | 4 +- .../controller/tablespaces/manager.go | 4 +- pkg/management/postgres/instance.go | 38 +++++++++++---- pkg/management/postgres/instance_replica.go | 6 +-- pkg/management/postgres/probes.go | 4 +- pkg/management/postgres/probes_test.go | 5 +- pkg/management/postgres/restore.go | 6 +-- pkg/management/postgres/webserver/local.go | 11 +++-- .../webserver/metricserver/pg_collector.go | 6 +-- pkg/management/postgres/webserver/remote.go | 8 +++- pkg/management/upgrade/upgrade.go | 3 +- 22 files changed, 127 insertions(+), 102 deletions(-) diff --git a/internal/cmd/manager/instance/join/cmd.go b/internal/cmd/manager/instance/join/cmd.go index 424ff8bb41..449f92563a 100644 --- a/internal/cmd/manager/instance/join/cmd.go +++ b/internal/cmd/manager/instance/join/cmd.go @@ -53,14 +53,13 @@ func NewCmd() *cobra.Command { }, RunE: func(cmd *cobra.Command, _ []string) error { ctx := cmd.Context() - instance := postgres.NewInstance() - - // The following are needed to correctly + // The fields in the instance are needed to correctly // download the secret containing the TLS // certificates - instance.Namespace = namespace - instance.PodName = podName - instance.ClusterName = clusterName + instance := postgres.NewInstance(). + WithNamespace(namespace). + WithPodName(podName). + WithClusterName(clusterName) info := postgres.InitInfo{ PgData: pgData, @@ -112,7 +111,7 @@ func joinSubCommand(ctx context.Context, instance *postgres.Instance, info postg // Download the cluster definition from the API server var cluster apiv1.Cluster if err := reconciler.GetClient().Get(ctx, - ctrl.ObjectKey{Namespace: instance.Namespace, Name: instance.ClusterName}, + ctrl.ObjectKey{Namespace: instance.GetNamespaceName(), Name: instance.GetClusterName()}, &cluster, ); err != nil { log.Error(err, "Error while getting cluster") diff --git a/internal/cmd/manager/instance/run/cmd.go b/internal/cmd/manager/instance/run/cmd.go index d4f78f0f21..1d37ac850a 100644 --- a/internal/cmd/manager/instance/run/cmd.go +++ b/internal/cmd/manager/instance/run/cmd.go @@ -88,12 +88,12 @@ func NewCmd() *cobra.Command { }, RunE: func(cmd *cobra.Command, _ []string) error { ctx := log.IntoContext(cmd.Context(), log.GetLogger()) - instance := postgres.NewInstance() + instance := postgres.NewInstance(). + WithPodName(podName). + WithClusterName(clusterName). + WithNamespace(namespace) instance.PgData = pgData - instance.Namespace = namespace - instance.PodName = podName - instance.ClusterName = clusterName instance.StatusPortTLS = statusPortTLS instance.MetricsPortTLS = metricsPortTLS @@ -152,14 +152,14 @@ func runSubCommand(ctx context.Context, instance *postgres.Instance) error { Cache: cache.Options{ ByObject: map[client.Object]cache.ByObject{ &apiv1.Cluster{}: { - Field: fields.OneTermEqualSelector("metadata.name", instance.ClusterName), + Field: fields.OneTermEqualSelector("metadata.name", instance.GetClusterName()), Namespaces: map[string]cache.Config{ - instance.Namespace: {}, + instance.GetNamespaceName(): {}, }, }, &apiv1.Database{}: { Namespaces: map[string]cache.Config{ - instance.Namespace: {}, + instance.GetNamespaceName(): {}, }, }, }, diff --git a/internal/management/controller/cache.go b/internal/management/controller/cache.go index bc031de24d..4d3b06a2b6 100644 --- a/internal/management/controller/cache.go +++ b/internal/management/controller/cache.go @@ -47,7 +47,7 @@ func (r *InstanceReconciler) updateCacheFromCluster(ctx context.Context, cluster } func (r *InstanceReconciler) updateWALRestoreSettingsCache(ctx context.Context, cluster *apiv1.Cluster) { - _, env, barmanConfiguration, err := walrestore.GetRecoverConfiguration(cluster, r.instance.PodName) + _, env, barmanConfiguration, err := walrestore.GetRecoverConfiguration(cluster, r.instance.GetPodName()) if errors.Is(err, walrestore.ErrNoBackupConfigured) { cache.Delete(cache.WALRestoreKey) return diff --git a/internal/management/controller/database_controller_test.go b/internal/management/controller/database_controller_test.go index 99a1231fe9..81a8373b86 100644 --- a/internal/management/controller/database_controller_test.go +++ b/internal/management/controller/database_controller_test.go @@ -86,11 +86,10 @@ var _ = Describe("Managed Database status", func() { db, dbMock, err = sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) - pgInstance := &postgres.Instance{ - Namespace: "default", - PodName: "cluster-example-1", - ClusterName: "cluster-example", - } + pgInstance := postgres.NewInstance(). + WithNamespace("default"). + WithPodName("cluster-example-1"). + WithClusterName("cluster-example") f := fakeInstanceData{ Instance: pgInstance, diff --git a/internal/management/controller/externalservers/manager.go b/internal/management/controller/externalservers/manager.go index 0c6eb00687..db488336aa 100644 --- a/internal/management/controller/externalservers/manager.go +++ b/internal/management/controller/externalservers/manager.go @@ -55,8 +55,8 @@ func (r *Reconciler) getCluster(ctx context.Context) (*apiv1.Cluster, error) { var cluster apiv1.Cluster err := r.client.Get(ctx, types.NamespacedName{ - Namespace: r.instance.Namespace, - Name: r.instance.ClusterName, + Namespace: r.instance.GetNamespaceName(), + Name: r.instance.GetClusterName(), }, &cluster) if err != nil { diff --git a/internal/management/controller/instance_controller.go b/internal/management/controller/instance_controller.go index 518ce96207..358a6e20ad 100644 --- a/internal/management/controller/instance_controller.go +++ b/internal/management/controller/instance_controller.go @@ -239,14 +239,14 @@ func (r *InstanceReconciler) Reconcile( if result, err := reconciler.ReconcileReplicationSlots( ctx, - r.instance.PodName, + r.instance.GetPodName(), infrastructure.NewPostgresManager(r.instance.ConnectionPool()), cluster, ); err != nil || !result.IsZero() { return result, err } - if r.instance.PodName == cluster.Status.CurrentPrimary { + if r.instance.GetPodName() == cluster.Status.CurrentPrimary { result, err := roles.Reconcile(ctx, r.instance, cluster, r.client) if err != nil || !result.IsZero() { return result, err @@ -296,7 +296,7 @@ func (r *InstanceReconciler) Reconcile( } func (r *InstanceReconciler) configureSlotReplicator(cluster *apiv1.Cluster) { - switch r.instance.PodName { + switch r.instance.GetPodName() { case cluster.Status.CurrentPrimary, cluster.Status.TargetPrimary: r.instance.ConfigureSlotReplicator(nil) default: @@ -308,7 +308,7 @@ func (r *InstanceReconciler) restartPrimaryInplaceIfRequested( ctx context.Context, cluster *apiv1.Cluster, ) (bool, error) { - isPrimary := cluster.Status.CurrentPrimary == r.instance.PodName + isPrimary := cluster.Status.CurrentPrimary == r.instance.GetPodName() restartRequested := isPrimary && cluster.Status.Phase == apiv1.PhaseInplacePrimaryRestart if restartRequested { if cluster.Status.CurrentPrimary != cluster.Status.TargetPrimary { @@ -366,7 +366,7 @@ func (r *InstanceReconciler) refreshConfigurationFiles( } func (r *InstanceReconciler) reconcileFencing(ctx context.Context, cluster *apiv1.Cluster) *reconcile.Result { - fencingRequired := cluster.IsInstanceFenced(r.instance.PodName) + fencingRequired := cluster.IsInstanceFenced(r.instance.GetPodName()) isFenced := r.instance.IsFenced() switch { case !isFenced && fencingRequired: @@ -411,7 +411,7 @@ func (r *InstanceReconciler) initialize(ctx context.Context, cluster *apiv1.Clus return err } - r.instance.SetFencing(cluster.IsInstanceFenced(r.instance.PodName)) + r.instance.SetFencing(cluster.IsInstanceFenced(r.instance.GetPodName())) return nil } @@ -428,7 +428,8 @@ func (r *InstanceReconciler) verifyParametersForFollower(cluster *apiv1.Cluster) // we use a file as a flag to ensure the pod has been restarted already. I.e. on // newly created pod we don't need to check the enforced parameters - filename := path.Join(r.instance.PgData, fmt.Sprintf("%s-%s", constants.Startup, r.instance.PodName)) + filename := path.Join(r.instance.PgData, fmt.Sprintf("%s-%s", + constants.Startup, r.instance.GetPodName())) exists, err := fileutils.FileExists(filename) if err != nil { return err @@ -482,7 +483,7 @@ func (r *InstanceReconciler) reconcileOldPrimary( ) (restarted bool, err error) { contextLogger := log.FromContext(ctx) - if cluster.Status.TargetPrimary == r.instance.PodName { + if cluster.Status.TargetPrimary == r.instance.GetPodName() { return false, nil } @@ -744,7 +745,7 @@ func (r *InstanceReconciler) reconcileClusterRoleWithoutDB( return false, err } // Reconcile replica role - if cluster.Status.TargetPrimary != r.instance.PodName { + if cluster.Status.TargetPrimary != r.instance.GetPodName() { if !isPrimary { // We need to ensure that this instance is replicating from the correct server return r.instance.RefreshReplicaConfiguration(ctx, cluster, r.client) @@ -767,7 +768,7 @@ func (r *InstanceReconciler) reconcileMetrics( exporter := r.metricsServerExporter // We should never reset the SwitchoverRequired metrics as it needs the primary instance restarts, // however, if the cluster is healthy we make sure it is set to 0. - if cluster.Status.CurrentPrimary == r.instance.PodName { + if cluster.Status.CurrentPrimary == r.instance.GetPodName() { if cluster.Status.Phase == apiv1.PhaseWaitingForUser { exporter.Metrics.SwitchoverRequired.Set(1) } else { @@ -814,7 +815,7 @@ func (r *InstanceReconciler) reconcileMonitoringQueries( var configMap corev1.ConfigMap err := r.GetClient().Get( ctx, - client.ObjectKey{Namespace: r.instance.Namespace, Name: reference.Name}, + client.ObjectKey{Namespace: r.instance.GetNamespaceName(), Name: reference.Name}, &configMap) if err != nil { contextLogger.Warning("Unable to get configMap containing custom monitoring queries", @@ -841,7 +842,12 @@ func (r *InstanceReconciler) reconcileMonitoringQueries( for _, reference := range cluster.Spec.Monitoring.CustomQueriesSecret { var secret corev1.Secret - err := r.GetClient().Get(ctx, client.ObjectKey{Namespace: r.instance.Namespace, Name: reference.Name}, &secret) + err := r.GetClient().Get(ctx, + client.ObjectKey{ + Namespace: r.instance.GetNamespaceName(), + Name: reference.Name, + }, + &secret) if err != nil { contextLogger.Warning("Unable to get secret containing custom monitoring queries", "reference", reference, @@ -1177,7 +1183,7 @@ func (r *InstanceReconciler) refreshFileFromSecret( func (r *InstanceReconciler) reconcilePrimary(ctx context.Context, cluster *apiv1.Cluster) error { contextLogger := log.FromContext(ctx) - if cluster.Status.TargetPrimary != r.instance.PodName || cluster.IsReplica() { + if cluster.Status.TargetPrimary != r.instance.GetPodName() || cluster.IsReplica() { return nil } @@ -1206,8 +1212,8 @@ func (r *InstanceReconciler) reconcilePrimary(ctx context.Context, cluster *apiv } // if the currentPrimary doesn't match the PodName we set the correct value. - if cluster.Status.CurrentPrimary != r.instance.PodName { - cluster.Status.CurrentPrimary = r.instance.PodName + if cluster.Status.CurrentPrimary != r.instance.GetPodName() { + cluster.Status.CurrentPrimary = r.instance.GetPodName() cluster.Status.CurrentPrimaryTimestamp = pgTime.GetCurrentTimestamp() if err := r.client.Status().Patch(ctx, cluster, client.MergeFrom(oldCluster)); err != nil { @@ -1238,7 +1244,7 @@ func (r *InstanceReconciler) reconcilePrimary(ctx context.Context, cluster *apiv func (r *InstanceReconciler) handlePromotion(ctx context.Context, cluster *apiv1.Cluster) error { contextLogger := log.FromContext(ctx) contextLogger.Info("I'm the target primary, wait for the wal_receiver to be terminated") - if r.instance.PodName != cluster.Status.CurrentPrimary { + if r.instance.GetPodName() != cluster.Status.CurrentPrimary { // if the cluster is not replicating it means it's doing a failover and // we have to wait for wal receivers to be down err := r.waitForWalReceiverDown() @@ -1262,7 +1268,7 @@ func (r *InstanceReconciler) reconcileDesignatedPrimary( cluster *apiv1.Cluster, ) (changed bool, err error) { // If I'm already the current designated primary everything is ok. - if cluster.Status.CurrentPrimary == r.instance.PodName && !r.instance.RequiresDesignatedPrimaryTransition { + if cluster.Status.CurrentPrimary == r.instance.GetPodName() && !r.instance.RequiresDesignatedPrimaryTransition { return false, nil } @@ -1276,7 +1282,7 @@ func (r *InstanceReconciler) reconcileDesignatedPrimary( log.FromContext(ctx).Info("Setting myself as the current designated primary") oldCluster := cluster.DeepCopy() - cluster.Status.CurrentPrimary = r.instance.PodName + cluster.Status.CurrentPrimary = r.instance.GetPodName() cluster.Status.CurrentPrimaryTimestamp = pgTime.GetCurrentTimestamp() if r.instance.RequiresDesignatedPrimaryTransition { externalcluster.SetDesignatedPrimaryTransitionCompleted(cluster) @@ -1350,7 +1356,7 @@ func (r *InstanceReconciler) reconcileUser(ctx context.Context, username string, var secret corev1.Secret err := r.GetClient().Get( ctx, - client.ObjectKey{Namespace: r.instance.Namespace, Name: secretName}, + client.ObjectKey{Namespace: r.instance.GetNamespaceName(), Name: secretName}, &secret) if err != nil { if apierrors.IsNotFound(err) { @@ -1393,7 +1399,7 @@ func (r *InstanceReconciler) refreshPGHBA(ctx context.Context, cluster *apiv1.Cl err := r.GetClient().Get(ctx, types.NamespacedName{ Name: ldapSecretName, - Namespace: r.instance.Namespace, + Namespace: r.instance.GetNamespaceName(), }, &ldapBindPasswordSecret) if err != nil { return false, err @@ -1454,7 +1460,7 @@ func (r *InstanceReconciler) dropStaleReplicationConnections( return ctrl.Result{}, nil } - if cluster.Status.CurrentPrimary == r.instance.PodName { + if cluster.Status.CurrentPrimary == r.instance.GetPodName() { return ctrl.Result{}, nil } diff --git a/internal/management/controller/instance_startup.go b/internal/management/controller/instance_startup.go index 160d8687b6..caeeda9c55 100644 --- a/internal/management/controller/instance_startup.go +++ b/internal/management/controller/instance_startup.go @@ -48,7 +48,7 @@ func (r *InstanceReconciler) refreshServerCertificateFiles(ctx context.Context, func() error { err := r.GetClient().Get( ctx, - client.ObjectKey{Namespace: r.instance.Namespace, Name: cluster.Status.Certificates.ServerTLSSecret}, + client.ObjectKey{Namespace: r.instance.GetNamespaceName(), Name: cluster.Status.Certificates.ServerTLSSecret}, &secret) if err != nil { contextLogger.Info("Error accessing server TLS Certificate. Retrying with exponential backoff.", @@ -86,7 +86,7 @@ func (r *InstanceReconciler) refreshReplicationUserCertificate( var secret corev1.Secret err := r.GetClient().Get( ctx, - client.ObjectKey{Namespace: r.instance.Namespace, Name: cluster.Status.Certificates.ReplicationTLSSecret}, + client.ObjectKey{Namespace: r.instance.GetNamespaceName(), Name: cluster.Status.Certificates.ReplicationTLSSecret}, &secret) if err != nil { return false, err @@ -105,7 +105,7 @@ func (r *InstanceReconciler) refreshClientCA(ctx context.Context, cluster *apiv1 var secret corev1.Secret err := r.GetClient().Get( ctx, - client.ObjectKey{Namespace: r.instance.Namespace, Name: cluster.Status.Certificates.ClientCASecret}, + client.ObjectKey{Namespace: r.instance.GetNamespaceName(), Name: cluster.Status.Certificates.ClientCASecret}, &secret) if err != nil { return false, err @@ -120,7 +120,7 @@ func (r *InstanceReconciler) refreshServerCA(ctx context.Context, cluster *apiv1 var secret corev1.Secret err := r.GetClient().Get( ctx, - client.ObjectKey{Namespace: r.instance.Namespace, Name: cluster.Status.Certificates.ServerCASecret}, + client.ObjectKey{Namespace: r.instance.GetNamespaceName(), Name: cluster.Status.Certificates.ServerCASecret}, &secret) if err != nil { return false, err @@ -148,7 +148,7 @@ func (r *InstanceReconciler) refreshBarmanEndpointCA(ctx context.Context, cluste var secret corev1.Secret err := r.GetClient().Get( ctx, - client.ObjectKey{Namespace: r.instance.Namespace, Name: secretKeySelector.Name}, + client.ObjectKey{Namespace: r.instance.GetNamespaceName(), Name: secretKeySelector.Name}, &secret) if err != nil { return false, err @@ -194,7 +194,7 @@ func (r *InstanceReconciler) verifyPgDataCoherenceForPrimary(ctx context.Context "of the cluster is resumed, demoting immediately") return r.instance.Demote(ctx, cluster) - case targetPrimary == r.instance.PodName: + case targetPrimary == r.instance.GetPodName(): if currentPrimary == "" { // This means that this cluster has been just started up and the // current primary still need to be written @@ -203,7 +203,7 @@ func (r *InstanceReconciler) verifyPgDataCoherenceForPrimary(ctx context.Context "targetPrimary", targetPrimary) oldCluster := cluster.DeepCopy() - cluster.Status.CurrentPrimary = r.instance.PodName + cluster.Status.CurrentPrimary = r.instance.GetPodName() cluster.Status.CurrentPrimaryTimestamp = pgTime.GetCurrentTimestamp() return r.client.Status().Patch(ctx, cluster, client.MergeFrom(oldCluster)) } @@ -349,12 +349,12 @@ func (r *InstanceReconciler) ReconcileTablespaces( mountPoint := specs.MountForTablespace(tbsName) if tbsMount, err := fileutils.FileExists(mountPoint); err != nil { contextLogger.Error(err, "while checking for mountpoint", "instance", - r.instance.PodName, "tablespace", tbsName) + r.instance.GetPodName(), "tablespace", tbsName) return err } else if !tbsMount { contextLogger.Error(fmt.Errorf("mountpoint not found"), "mountpoint for tablespaces is missing", - "instance", r.instance.PodName, "tablespace", tbsName) + "instance", r.instance.GetPodName(), "tablespace", tbsName) continue } @@ -369,7 +369,7 @@ func (r *InstanceReconciler) ReconcileTablespaces( if err != nil { contextLogger.Error(err, "could not create data dir in tablespace mount", - "instance", r.instance.PodName, "tablespace", tbsName) + "instance", r.instance.GetPodName(), "tablespace", tbsName) return fmt.Errorf("while creating data dir in tablespace %s: %w", mountPoint, err) } } diff --git a/internal/management/controller/manager.go b/internal/management/controller/manager.go index 9ea68245ae..b1c01130d7 100644 --- a/internal/management/controller/manager.go +++ b/internal/management/controller/manager.go @@ -85,8 +85,8 @@ func (r *InstanceReconciler) GetCluster(ctx context.Context) (*apiv1.Cluster, er var cluster apiv1.Cluster err := r.GetClient().Get(ctx, types.NamespacedName{ - Namespace: r.instance.Namespace, - Name: r.instance.ClusterName, + Namespace: r.instance.GetNamespaceName(), + Name: r.instance.GetClusterName(), }, &cluster) if err != nil { @@ -102,7 +102,7 @@ func (r *InstanceReconciler) GetSecret(ctx context.Context, name string) (*corev err := r.GetClient().Get(ctx, types.NamespacedName{ Name: name, - Namespace: r.instance.Namespace, + Namespace: r.instance.GetNamespaceName(), }, &secret) if err != nil { return nil, fmt.Errorf("while getting secret: %w", err) diff --git a/internal/management/controller/roles/runnable.go b/internal/management/controller/roles/runnable.go index dce73d51b1..1eed8f037d 100644 --- a/internal/management/controller/roles/runnable.go +++ b/internal/management/controller/roles/runnable.go @@ -138,8 +138,8 @@ func (sr *RoleSynchronizer) reconcile(ctx context.Context, config *apiv1.Managed var remoteCluster apiv1.Cluster if err = sr.client.Get(ctx, types.NamespacedName{ - Name: sr.instance.ClusterName, - Namespace: sr.instance.Namespace, + Name: sr.instance.GetClusterName(), + Namespace: sr.instance.GetNamespaceName(), }, &remoteCluster); err != nil { return err } @@ -154,8 +154,8 @@ func (sr *RoleSynchronizer) reconcile(ctx context.Context, config *apiv1.Managed } if err = sr.client.Get(ctx, types.NamespacedName{ - Name: sr.instance.ClusterName, - Namespace: sr.instance.Namespace, + Name: sr.instance.GetClusterName(), + Namespace: sr.instance.GetNamespaceName(), }, &remoteCluster); err != nil { return err } @@ -181,7 +181,7 @@ func (sr *RoleSynchronizer) synchronizeRoles( storedPasswordState map[string]apiv1.PasswordState, ) (map[string]apiv1.PasswordState, map[string][]string, error) { latestSecretResourceVersion, err := getPasswordSecretResourceVersion( - ctx, sr.client, config.Roles, sr.instance.Namespace) + ctx, sr.client, config.Roles, sr.instance.GetNamespaceName()) if err != nil { return nil, nil, err } @@ -320,7 +320,7 @@ func (sr *RoleSynchronizer) applyRoleCreateUpdate( fmt.Errorf("cannot reconcile: password both provided and disabled: %s", role.PasswordSecret.Name) case role.PasswordSecret != nil && !role.DisablePassword: - passwordSecret, err := getPassword(ctx, sr.client, role, sr.instance.Namespace) + passwordSecret, err := getPassword(ctx, sr.client, role, sr.instance.GetNamespaceName()) if err != nil { return apiv1.PasswordState{}, err } diff --git a/internal/management/controller/roles/runnable_test.go b/internal/management/controller/roles/runnable_test.go index ddd5f2414f..4ba41763c0 100644 --- a/internal/management/controller/roles/runnable_test.go +++ b/internal/management/controller/roles/runnable_test.go @@ -229,9 +229,7 @@ func (m *mockRoleManagerWithError) GetParentRoles(_ context.Context, role Databa var _ = Describe("Role synchronizer tests", func() { roleSynchronizer := RoleSynchronizer{ - instance: &postgres.Instance{ - Namespace: "myPod", - }, + instance: postgres.NewInstance().WithNamespace("myPod"), } When("role configurations are realizable", func() { diff --git a/internal/management/controller/slots/runner/runner.go b/internal/management/controller/slots/runner/runner.go index ac49f9b33e..221a5195e0 100644 --- a/internal/management/controller/slots/runner/runner.go +++ b/internal/management/controller/slots/runner/runner.go @@ -112,7 +112,7 @@ func (sr *Replicator) reconcile(ctx context.Context, config *apiv1.ReplicationSl ctx, infrastructure.NewPostgresManager(primaryPool), infrastructure.NewPostgresManager(localPool), - sr.instance.PodName, + sr.instance.GetPodName(), config, ) return err diff --git a/internal/management/controller/tablespaces/controller_test.go b/internal/management/controller/tablespaces/controller_test.go index 01cc58234a..4c5bf682ec 100644 --- a/internal/management/controller/tablespaces/controller_test.go +++ b/internal/management/controller/tablespaces/controller_test.go @@ -81,9 +81,7 @@ func (mst mockTablespaceStorageManager) getStorageLocation(tablespaceName string var _ = Describe("Tablespace synchronizer tests", func() { tablespaceReconciler := TablespaceReconciler{ - instance: &postgres.Instance{ - Namespace: "myPod", - }, + instance: postgres.NewInstance().WithNamespace("myPod"), } When("tablespace configurations are realizable", func() { diff --git a/internal/management/controller/tablespaces/manager.go b/internal/management/controller/tablespaces/manager.go index a484432d1e..1b793189dc 100644 --- a/internal/management/controller/tablespaces/manager.go +++ b/internal/management/controller/tablespaces/manager.go @@ -56,8 +56,8 @@ func (r *TablespaceReconciler) GetCluster(ctx context.Context) (*apiv1.Cluster, var cluster apiv1.Cluster err := r.GetClient().Get(ctx, types.NamespacedName{ - Namespace: r.instance.Namespace, - Name: r.instance.ClusterName, + Namespace: r.instance.GetNamespaceName(), + Name: r.instance.GetClusterName(), }, &cluster) if err != nil { diff --git a/pkg/management/postgres/instance.go b/pkg/management/postgres/instance.go index 0496445681..54d1be07d2 100644 --- a/pkg/management/postgres/instance.go +++ b/pkg/management/postgres/instance.go @@ -142,13 +142,13 @@ type Instance struct { primaryPool *pool.ConnectionPool // The namespace of the k8s object representing this cluster - Namespace string + namespace string // The name of the Pod where the controller is executing - PodName string + podName string - // The name of the cluster of which this Pod is belonging - ClusterName string + // The name of the cluster this instance belongs in + clusterName string // The sha256 of the config. It is computed on the config string, before // adding the PostgreSQL CNPGConfigSha256 parameter @@ -367,6 +367,24 @@ func NewInstance() *Instance { } } +// WithNamespace specifies the namespace for this Instance +func (instance *Instance) WithNamespace(namespace string) *Instance { + instance.namespace = namespace + return instance +} + +// WithPodName specifies the pod name for this Instance +func (instance *Instance) WithPodName(podName string) *Instance { + instance.podName = podName + return instance +} + +// WithClusterName specifies the name of the cluster this Instance belongs to +func (instance *Instance) WithClusterName(clusterName string) *Instance { + instance.clusterName = clusterName + return instance +} + // RetryUntilServerAvailable is the default retry configuration that is used // to wait for a successful connection to a certain server var RetryUntilServerAvailable = wait.Backoff{ @@ -777,7 +795,7 @@ func (instance *Instance) Demote(ctx context.Context, cluster *apiv1.Cluster) er contextLogger := log.FromContext(ctx) contextLogger.Info("Demoting instance", "pgpdata", instance.PgData) - slotName := cluster.GetSlotNameFromInstanceName(instance.PodName) + slotName := cluster.GetSlotNameFromInstanceName(instance.GetPodName()) _, err := UpdateReplicaConfiguration(instance.PgData, instance.GetPrimaryConnInfo(), slotName) return err } @@ -1113,19 +1131,19 @@ func (instance *Instance) GetInstanceCommandChan() <-chan InstanceCommand { return instance.instanceCommandChan } -// GetClusterName returns the name of the cluster where this instance is running +// GetClusterName returns the name of the cluster where this instance belongs func (instance *Instance) GetClusterName() string { - return instance.ClusterName + return instance.clusterName } // GetPodName returns the name of the pod where this instance is running func (instance *Instance) GetPodName() string { - return instance.PodName + return instance.podName } // GetNamespaceName returns the name of the namespace where this instance is running func (instance *Instance) GetNamespaceName() string { - return instance.Namespace + return instance.namespace } // RequestFastImmediateShutdown request the lifecycle manager to shut down @@ -1250,7 +1268,7 @@ func (instance *Instance) DropConnections() error { // GetPrimaryConnInfo returns the DSN to reach the primary func (instance *Instance) GetPrimaryConnInfo() string { - return buildPrimaryConnInfo(instance.ClusterName+"-rw", instance.PodName) + return buildPrimaryConnInfo(instance.GetClusterName()+"-rw", instance.GetPodName()) } // HandleInstanceCommandRequests execute a command requested by the reconciliation diff --git a/pkg/management/postgres/instance_replica.go b/pkg/management/postgres/instance_replica.go index 5c681b95f8..84dca8e1a0 100644 --- a/pkg/management/postgres/instance_replica.go +++ b/pkg/management/postgres/instance_replica.go @@ -51,7 +51,7 @@ func (instance *Instance) RefreshReplicaConfiguration( return changed, nil } - if cluster.IsReplica() && cluster.Status.TargetPrimary == instance.PodName { + if cluster.IsReplica() && cluster.Status.TargetPrimary == instance.GetPodName() { result, err := instance.writeReplicaConfigurationForDesignatedPrimary(ctx, cli, cluster) return changed || result, err } @@ -60,7 +60,7 @@ func (instance *Instance) RefreshReplicaConfiguration( } func (instance *Instance) writeReplicaConfigurationForReplica(cluster *apiv1.Cluster) (changed bool, err error) { - slotName := cluster.GetSlotNameFromInstanceName(instance.PodName) + slotName := cluster.GetSlotNameFromInstanceName(instance.GetPodName()) return UpdateReplicaConfiguration(instance.PgData, instance.GetPrimaryConnInfo(), slotName) } @@ -75,7 +75,7 @@ func (instance *Instance) writeReplicaConfigurationForDesignatedPrimary( } connectionString, err := external.ConfigureConnectionToServer( - ctx, cli, instance.Namespace, &server) + ctx, cli, instance.GetNamespaceName(), &server) if err != nil { return false, err } diff --git a/pkg/management/postgres/probes.go b/pkg/management/postgres/probes.go index 77a819fcd0..fde50a9f48 100644 --- a/pkg/management/postgres/probes.go +++ b/pkg/management/postgres/probes.go @@ -53,7 +53,7 @@ func (instance *Instance) IsServerHealthy() error { // GetStatus Extract the status of this PostgreSQL database func (instance *Instance) GetStatus() (result *postgres.PostgresqlStatus, err error) { result = &postgres.PostgresqlStatus{ - Pod: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: instance.PodName}}, + Pod: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: instance.GetPodName()}}, InstanceManagerVersion: versions.Version, MightBeUnavailable: instance.MightBeUnavailable(), } @@ -468,7 +468,7 @@ func (instance *Instance) fillWalStatusFromConnection(result *postgres.Postgresq coalesce(sync_priority, 0) FROM pg_catalog.pg_stat_replication WHERE application_name ~ $1 AND usename = $2`, - fmt.Sprintf("%s-[0-9]+$", instance.ClusterName), + fmt.Sprintf("%s-[0-9]+$", instance.GetClusterName()), v1.StreamingReplicationUser, ) if err != nil { diff --git a/pkg/management/postgres/probes_test.go b/pkg/management/postgres/probes_test.go index 514949c65a..1973f861bc 100644 --- a/pkg/management/postgres/probes_test.go +++ b/pkg/management/postgres/probes_test.go @@ -98,10 +98,9 @@ var _ = Describe("probes", func() { }) It("set the information", func() { - instance := &Instance{ + instance := (&Instance{ pgVersion: &semver.Version{Major: 13}, - PodName: "test-1", - } + }).WithPodName("test-1") status := &postgres.PostgresqlStatus{ IsPrimary: false, } diff --git a/pkg/management/postgres/restore.go b/pkg/management/postgres/restore.go index a4cf12afd0..6067f0389c 100644 --- a/pkg/management/postgres/restore.go +++ b/pkg/management/postgres/restore.go @@ -790,9 +790,9 @@ func (info InitInfo) WriteInitialPostgresqlConf(cluster *apiv1.Cluster) error { return fmt.Errorf("while creating a temporary data directory: %w", err) } - temporaryInstance := temporaryInitInfo.GetInstance() - temporaryInstance.Namespace = info.Namespace - temporaryInstance.ClusterName = info.ClusterName + temporaryInstance := temporaryInitInfo.GetInstance(). + WithNamespace(info.Namespace). + WithClusterName(info.ClusterName) _, err = temporaryInstance.RefreshPGHBA(cluster, "") if err != nil { diff --git a/pkg/management/postgres/webserver/local.go b/pkg/management/postgres/webserver/local.go index 61e99860e8..f818623c44 100644 --- a/pkg/management/postgres/webserver/local.go +++ b/pkg/management/postgres/webserver/local.go @@ -81,7 +81,10 @@ func (ws *localWebserverEndpoints) serveCache(w http.ResponseWriter, r *http.Req var cluster apiv1.Cluster err := ws.typedClient.Get( r.Context(), - client.ObjectKey{Name: ws.instance.ClusterName, Namespace: ws.instance.Namespace}, + client.ObjectKey{ + Name: ws.instance.GetClusterName(), + Namespace: ws.instance.GetNamespaceName(), + }, &cluster, ) if apierrs.IsNotFound(err) { @@ -140,8 +143,8 @@ func (ws *localWebserverEndpoints) requestBackup(w http.ResponseWriter, r *http. } if err := ws.typedClient.Get(ctx, client.ObjectKey{ - Namespace: ws.instance.Namespace, - Name: ws.instance.ClusterName, + Namespace: ws.instance.GetNamespaceName(), + Name: ws.instance.GetClusterName(), }, &cluster); err != nil { http.Error( w, @@ -151,7 +154,7 @@ func (ws *localWebserverEndpoints) requestBackup(w http.ResponseWriter, r *http. } if err := ws.typedClient.Get(ctx, client.ObjectKey{ - Namespace: ws.instance.Namespace, + Namespace: ws.instance.GetNamespaceName(), Name: backupName, }, &backup); err != nil { http.Error( diff --git a/pkg/management/postgres/webserver/metricserver/pg_collector.go b/pkg/management/postgres/webserver/metricserver/pg_collector.go index 9bfd6ab53f..a7a74373b3 100644 --- a/pkg/management/postgres/webserver/metricserver/pg_collector.go +++ b/pkg/management/postgres/webserver/metricserver/pg_collector.go @@ -358,13 +358,13 @@ func (e *Exporter) collectPgMetrics(ch chan<- prometheus.Metric) { // First, let's check the connection. No need to proceed if this fails. if err := db.Ping(); err != nil { log.Warning("Unable to collect metrics", "error", err) - e.Metrics.PostgreSQLUp.WithLabelValues(e.instance.ClusterName).Set(0) + e.Metrics.PostgreSQLUp.WithLabelValues(e.instance.GetClusterName()).Set(0) e.Metrics.Error.Set(1) e.Metrics.CollectionDuration.WithLabelValues("Collect.up").Set(time.Since(collectionStart).Seconds()) return } - e.Metrics.PostgreSQLUp.WithLabelValues(e.instance.ClusterName).Set(1) + e.Metrics.PostgreSQLUp.WithLabelValues(e.instance.GetClusterName()).Set(1) e.Metrics.Error.Set(0) e.Metrics.CollectionDuration.WithLabelValues("Collect.up").Set(time.Since(collectionStart).Seconds()) @@ -541,7 +541,7 @@ func collectPGVersion(e *Exporter) error { if err != nil { return err } - e.Metrics.PgVersion.WithLabelValues(majorMinor, e.instance.ClusterName).Set(version) + e.Metrics.PgVersion.WithLabelValues(majorMinor, e.instance.GetClusterName()).Set(version) return nil } diff --git a/pkg/management/postgres/webserver/remote.go b/pkg/management/postgres/webserver/remote.go index 231f6dccd0..7b9d75becd 100644 --- a/pkg/management/postgres/webserver/remote.go +++ b/pkg/management/postgres/webserver/remote.go @@ -357,13 +357,17 @@ func (ws *remoteWebserverEndpoints) pgArchivePartial(w http.ResponseWriter, req var cluster apiv1.Cluster if err := ws.typedClient.Get(req.Context(), - client.ObjectKey{Namespace: ws.instance.Namespace, Name: ws.instance.ClusterName}, + client.ObjectKey{ + Namespace: ws.instance.GetNamespaceName(), + Name: ws.instance.GetClusterName(), + }, &cluster); err != nil { sendBadRequestJSONResponse(w, "NO_CLUSTER_FOUND", err.Error()) return } - if cluster.Status.TargetPrimary != ws.instance.PodName || cluster.Status.CurrentPrimary != ws.instance.PodName { + if cluster.Status.TargetPrimary != ws.instance.GetPodName() || + cluster.Status.CurrentPrimary != ws.instance.GetPodName() { sendBadRequestJSONResponse(w, "NOT_EXPECTED_PRIMARY", "") return } diff --git a/pkg/management/upgrade/upgrade.go b/pkg/management/upgrade/upgrade.go index 8c8ce4ff13..7c08c0bb24 100644 --- a/pkg/management/upgrade/upgrade.go +++ b/pkg/management/upgrade/upgrade.go @@ -82,7 +82,8 @@ func FromReader( } // Validate the hash of this instance manager - if err := validateInstanceManagerHash(typedClient, instance.ClusterName, instance.Namespace, + if err := validateInstanceManagerHash(typedClient, + instance.GetClusterName(), instance.GetNamespaceName(), instanceStatus.InstanceArch, newHash); err != nil { return fmt.Errorf("while validating instance manager binary: %w", err) } From f18665bac28805ebca49092c1eb47c9083470a45 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Tue, 8 Oct 2024 16:45:01 +0200 Subject: [PATCH 047/836] chore: abort certificate creation upon fatal error detection (#5574) Previously, the certificate renewal process attempted to renew or create a certificate upon Secret retrieval, even in cases where the Secret `Get` request failed due to permanent errors like insufficient privileges. This led to incorrect error logging and confusion. With this patch, the certificate renewal process only continues when the Secret is not found or renewal is required. Closes #5575 Signed-off-by: Armando Ruocco --- internal/controller/cluster_pki.go | 117 ++++++++++++++--------------- 1 file changed, 57 insertions(+), 60 deletions(-) diff --git a/internal/controller/cluster_pki.go b/internal/controller/cluster_pki.go index ea79ff06a5..4b00c78958 100644 --- a/internal/controller/cluster_pki.go +++ b/internal/controller/cluster_pki.go @@ -43,23 +43,7 @@ func (r *ClusterReconciler) setupPostgresPKI(ctx context.Context, cluster *apiv1 return fmt.Errorf("generating server CA certificate: %w", err) } - // This is the certificate for the server - serverCertificateName := client.ObjectKey{Namespace: cluster.GetNamespace(), Name: cluster.GetServerTLSSecretName()} - opts := x509.VerifyOptions{KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}} - err = r.ensureServerLeafCertificate( - ctx, - cluster, - serverCertificateName, - cluster.GetServiceReadWriteName(), - serverCaSecret, - certs.CertTypeServer, - cluster.GetClusterAltDNSNames(), - &opts) - if err != nil { - if apierrors.IsNotFound(err) { - return fmt.Errorf("missing specified server TLS secret %s: %w", - cluster.Status.Certificates.ServerTLSSecret, err) - } + if err = r.ensureServerLeafCertificate(ctx, cluster, serverCaSecret); err != nil { return fmt.Errorf("generating server TLS certificate: %w", err) } @@ -71,20 +55,7 @@ func (r *ClusterReconciler) setupPostgresPKI(ctx context.Context, cluster *apiv1 return fmt.Errorf("generating client CA certificate: %w", err) } - // Generating postgres client certificate - replicationSecretName := client.ObjectKey{ - Namespace: cluster.GetNamespace(), - Name: cluster.GetReplicationSecretName(), - } - err = r.ensureReplicationClientLeafCertificate( - ctx, - cluster, - replicationSecretName, - apiv1.StreamingReplicationUser, - clientCaSecret, - certs.CertTypeClient, - nil, - &x509.VerifyOptions{KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}}) + err = r.ensureReplicationClientLeafCertificate(ctx, cluster, clientCaSecret) if err != nil { if apierrors.IsNotFound(err) { return fmt.Errorf("missing specified streaming replication client TLS secret %s: %w", @@ -253,24 +224,34 @@ func (r *ClusterReconciler) renewCASecret(ctx context.Context, secret *v1.Secret func (r *ClusterReconciler) ensureServerLeafCertificate( ctx context.Context, cluster *apiv1.Cluster, - secretName client.ObjectKey, - commonName string, caSecret *v1.Secret, - usage certs.CertType, - altDNSNames []string, - opts *x509.VerifyOptions, ) error { + // This is the certificate for the server + secretName := client.ObjectKey{Namespace: cluster.GetNamespace(), Name: cluster.GetServerTLSSecretName()} + // If not specified generate/renew if cluster.Spec.Certificates == nil || cluster.Spec.Certificates.ServerTLSSecret == "" { - return r.ensureLeafCertificate(ctx, cluster, secretName, commonName, caSecret, usage, altDNSNames, nil) + return r.ensureLeafCertificate( + ctx, + cluster, + secretName, + cluster.GetServiceReadWriteName(), + caSecret, + certs.CertTypeServer, + cluster.GetClusterAltDNSNames(), + nil, + ) } var serverSecret v1.Secret - err := r.Get(ctx, secretName, &serverSecret) - if err != nil { + if err := r.Get(ctx, secretName, &serverSecret); apierrors.IsNotFound(err) { + return fmt.Errorf("missing specified server TLS secret %s: %w", + cluster.Status.Certificates.ServerTLSSecret, err) + } else if err != nil { return err } + opts := &x509.VerifyOptions{KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}} return validateLeafCertificate(caSecret, &serverSecret, opts) } @@ -279,24 +260,37 @@ func (r *ClusterReconciler) ensureServerLeafCertificate( func (r *ClusterReconciler) ensureReplicationClientLeafCertificate( ctx context.Context, cluster *apiv1.Cluster, - secretName client.ObjectKey, - commonName string, caSecret *v1.Secret, - usage certs.CertType, - altDNSNames []string, - opts *x509.VerifyOptions, ) error { + // Generating postgres client certificate + replicationSecretName := client.ObjectKey{ + Namespace: cluster.GetNamespace(), + Name: cluster.GetReplicationSecretName(), + } + // If not specified generate/renew if cluster.Spec.Certificates == nil || cluster.Spec.Certificates.ReplicationTLSSecret == "" { - return r.ensureLeafCertificate(ctx, cluster, secretName, commonName, caSecret, usage, altDNSNames, nil) + return r.ensureLeafCertificate( + ctx, + cluster, + replicationSecretName, + apiv1.StreamingReplicationUser, + caSecret, + certs.CertTypeClient, + nil, + nil, + ) } var replicationClientSecret v1.Secret - err := r.Get(ctx, secretName, &replicationClientSecret) - if err != nil { + if err := r.Get(ctx, replicationSecretName, &replicationClientSecret); apierrors.IsNotFound(err) { + return fmt.Errorf("missing specified replication TLS secret %s: %w", + cluster.Status.Certificates.ServerTLSSecret, err) + } else if err != nil { return err } + opts := &x509.VerifyOptions{KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}} return validateLeafCertificate(caSecret, &replicationClientSecret, opts) } @@ -329,23 +323,26 @@ func (r *ClusterReconciler) ensureLeafCertificate( ) error { var secret v1.Secret err := r.Get(ctx, secretName, &secret) - if err == nil { + switch { + case err == nil: return r.renewAndUpdateCertificate(ctx, caSecret, &secret, altDNSNames) - } - - serverSecret, err := generateCertificateFromCA(caSecret, commonName, usage, altDNSNames, secretName) - if err != nil { - return err - } + case apierrors.IsNotFound(err): + serverSecret, err := generateCertificateFromCA(caSecret, commonName, usage, altDNSNames, secretName) + if err != nil { + return err + } - utils.SetAsOwnedBy(&serverSecret.ObjectMeta, cluster.ObjectMeta, cluster.TypeMeta) - for k, v := range additionalLabels { - if serverSecret.Labels == nil { - serverSecret.Labels = make(map[string]string) + utils.SetAsOwnedBy(&serverSecret.ObjectMeta, cluster.ObjectMeta, cluster.TypeMeta) + for k, v := range additionalLabels { + if serverSecret.Labels == nil { + serverSecret.Labels = make(map[string]string) + } + serverSecret.Labels[k] = v } - serverSecret.Labels[k] = v + return r.Create(ctx, serverSecret) + default: + return err } - return r.Create(ctx, serverSecret) } // generateCertificateFromCA create a certificate secret using the provided CA secret From 9c3105fc993ab14513c72ff3932751cd02eb5b37 Mon Sep 17 00:00:00 2001 From: Gabriele Quaresima Date: Wed, 9 Oct 2024 09:38:40 +0200 Subject: [PATCH 048/836] fix(database): remove 'deleteDatabase' finalizer from orphan Database CRD (#5596) Closes #5593 Signed-off-by: Gabriele Quaresima Signed-off-by: Jaime Silvela Signed-off-by: Marco Nenciarini Co-authored-by: Jaime Silvela Co-authored-by: Marco Nenciarini --- internal/controller/cluster_controller.go | 8 ++ internal/controller/finalizers_delete.go | 68 +++++++++ internal/controller/finalizers_delete_test.go | 134 ++++++++++++++++++ .../controller/database_controller.go | 10 +- pkg/utils/finalizers.go | 23 +++ 5 files changed, 236 insertions(+), 7 deletions(-) create mode 100644 internal/controller/finalizers_delete.go create mode 100644 internal/controller/finalizers_delete_test.go create mode 100644 pkg/utils/finalizers.go diff --git a/internal/controller/cluster_controller.go b/internal/controller/cluster_controller.go index 2e208ac696..0b29964e4a 100644 --- a/internal/controller/cluster_controller.go +++ b/internal/controller/cluster_controller.go @@ -155,6 +155,14 @@ func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct "namespace", req.Namespace, ) } + if err := r.deleteDatabaseFinalizers(ctx, req.NamespacedName); err != nil { + contextLogger.Error( + err, + "error while deleting finalizers of Databases on the cluster", + "clusterName", req.Name, + "namespace", req.Namespace, + ) + } return ctrl.Result{}, err } diff --git a/internal/controller/finalizers_delete.go b/internal/controller/finalizers_delete.go new file mode 100644 index 0000000000..6e88bf429a --- /dev/null +++ b/internal/controller/finalizers_delete.go @@ -0,0 +1,68 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + + "github.com/cloudnative-pg/machinery/pkg/log" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" +) + +// deleteDatabaseFinalizers deletes Database object finalizers when the cluster they were in has been deleted +func (r *ClusterReconciler) deleteDatabaseFinalizers(ctx context.Context, namespacedName types.NamespacedName) error { + contextLogger := log.FromContext(ctx) + + databases := apiv1.DatabaseList{} + if err := r.List(ctx, + &databases, + client.InNamespace(namespacedName.Namespace), + ); err != nil { + return err + } + + for idx := range databases.Items { + database := &databases.Items[idx] + + if database.Spec.ClusterRef.Name != namespacedName.Name { + continue + } + + origDatabase := database.DeepCopy() + if controllerutil.RemoveFinalizer(database, utils.DatabaseFinalizerName) { + contextLogger.Debug("Removing finalizer from database", + "finalizer", utils.DatabaseFinalizerName, "database", database.Name) + if err := r.Patch(ctx, database, client.MergeFrom(origDatabase)); err != nil { + contextLogger.Error( + err, + "error while removing finalizer from database", + "database", database.Name, + "oldFinalizerList", origDatabase.ObjectMeta.Finalizers, + "newFinalizerList", database.ObjectMeta.Finalizers, + ) + return err + } + } + } + + return nil +} diff --git a/internal/controller/finalizers_delete_test.go b/internal/controller/finalizers_delete_test.go new file mode 100644 index 0000000000..7354f68d83 --- /dev/null +++ b/internal/controller/finalizers_delete_test.go @@ -0,0 +1,134 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + schemeBuilder "github.com/cloudnative-pg/cloudnative-pg/internal/scheme" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Database CRD finalizers", func() { + var ( + r ClusterReconciler + scheme *runtime.Scheme + namespacedName types.NamespacedName + ) + + BeforeEach(func() { + scheme = schemeBuilder.BuildWithAllKnownScheme() + r = ClusterReconciler{ + Scheme: scheme, + } + namespacedName = types.NamespacedName{ + Namespace: "test", + Name: "cluster", + } + }) + + It("should delete database finalizers for databases on the cluster", func(ctx SpecContext) { + databaseList := &apiv1.DatabaseList{ + Items: []apiv1.Database{ + { + ObjectMeta: metav1.ObjectMeta{ + Finalizers: []string{ + utils.DatabaseFinalizerName, + }, + Name: "db-1", + Namespace: "test", + }, + Spec: apiv1.DatabaseSpec{ + Name: "db-test", + ClusterRef: corev1.LocalObjectReference{ + Name: "cluster", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Finalizers: []string{ + utils.DatabaseFinalizerName, + }, + Name: "db-2", + Namespace: "test", + }, + Spec: apiv1.DatabaseSpec{ + Name: "db-test-2", + ClusterRef: corev1.LocalObjectReference{ + Name: "cluster", + }, + }, + }, + }, + } + + cli := fake.NewClientBuilder().WithScheme(scheme).WithLists(databaseList).Build() + r.Client = cli + err := r.deleteDatabaseFinalizers(ctx, namespacedName) + Expect(err).ToNot(HaveOccurred()) + + for _, db := range databaseList.Items { + database := &apiv1.Database{} + err = cli.Get(ctx, client.ObjectKeyFromObject(&db), database) + Expect(err).ToNot(HaveOccurred()) + Expect(database.Finalizers).To(BeZero()) + } + }) + + It("should not delete database finalizers for databases in another cluster", + func(ctx SpecContext) { + databaseList := &apiv1.DatabaseList{ + Items: []apiv1.Database{ + { + ObjectMeta: metav1.ObjectMeta{ + Finalizers: []string{ + utils.DatabaseFinalizerName, + }, + Name: "db-1", + Namespace: "test", + }, + Spec: apiv1.DatabaseSpec{ + Name: "db-test", + ClusterRef: corev1.LocalObjectReference{ + Name: "another-cluster", + }, + }, + }, + }, + } + + cli := fake.NewClientBuilder().WithScheme(scheme).WithLists(databaseList).Build() + r.Client = cli + err := r.deleteDatabaseFinalizers(ctx, namespacedName) + Expect(err).ToNot(HaveOccurred()) + + database := &apiv1.Database{} + err = cli.Get(ctx, client.ObjectKeyFromObject(&databaseList.Items[0]), database) + Expect(err).ToNot(HaveOccurred()) + Expect(database.Finalizers).To(BeEquivalentTo([]string{utils.DatabaseFinalizerName})) + }) +}) diff --git a/internal/management/controller/database_controller.go b/internal/management/controller/database_controller.go index da078659d2..ee22286369 100644 --- a/internal/management/controller/database_controller.go +++ b/internal/management/controller/database_controller.go @@ -61,10 +61,6 @@ var errClusterIsReplica = fmt.Errorf("waiting for the cluster to become primary" // database reconciliation loop failures const databaseReconciliationInterval = 30 * time.Second -// databaseFinalizerName is the name of the finalizer -// triggering the deletion of the database -const databaseFinalizerName = utils.MetadataNamespace + "/deleteDatabase" - // +kubebuilder:rbac:groups=postgresql.cnpg.io,resources=databases,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=postgresql.cnpg.io,resources=databases/status,verbs=get;update;patch @@ -136,14 +132,14 @@ func (r *DatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c // Add the finalizer if we don't have it // nolint:nestif if database.DeletionTimestamp.IsZero() { - if controllerutil.AddFinalizer(&database, databaseFinalizerName) { + if controllerutil.AddFinalizer(&database, utils.DatabaseFinalizerName) { if err := r.Update(ctx, &database); err != nil { return ctrl.Result{}, err } } } else { // This database is being deleted - if controllerutil.ContainsFinalizer(&database, databaseFinalizerName) { + if controllerutil.ContainsFinalizer(&database, utils.DatabaseFinalizerName) { if database.Spec.ReclaimPolicy == apiv1.DatabaseReclaimDelete { if err := r.deleteDatabase(ctx, &database); err != nil { return ctrl.Result{}, err @@ -151,7 +147,7 @@ func (r *DatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c } // remove our finalizer from the list and update it. - controllerutil.RemoveFinalizer(&database, databaseFinalizerName) + controllerutil.RemoveFinalizer(&database, utils.DatabaseFinalizerName) if err := r.Update(ctx, &database); err != nil { return ctrl.Result{}, err } diff --git a/pkg/utils/finalizers.go b/pkg/utils/finalizers.go new file mode 100644 index 0000000000..81d958df6d --- /dev/null +++ b/pkg/utils/finalizers.go @@ -0,0 +1,23 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +const ( + // DatabaseFinalizerName is the name of the finalizer + // triggering the deletion of the database + DatabaseFinalizerName = MetadataNamespace + "/deleteDatabase" +) From 0f82afcb9eb36ba10519a30efc1d0286cb30e715 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Wed, 9 Oct 2024 10:57:29 +0200 Subject: [PATCH 049/836] fix: avoid concurrent updates on the designed primary (#5755) This patch fixes a condition where a demoted replica cluster is stuck waiting for the primary to be demoted to a designed primary, due to a concurrent update on the Cluster CR by the operator and the instance manager. The update by the instance manager is now using the update method instead of a patch, relying on the Kubernetes optimistic locking mechanism. Closes: #5754 Signed-off-by: Leonardo Cecchi Co-authored-by: Armando Ruocco --- .../controller/instance_controller.go | 27 ++++++++++++++----- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/internal/management/controller/instance_controller.go b/internal/management/controller/instance_controller.go index 358a6e20ad..c3e65197b7 100644 --- a/internal/management/controller/instance_controller.go +++ b/internal/management/controller/instance_controller.go @@ -36,6 +36,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/retry" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -1281,13 +1282,25 @@ func (r *InstanceReconciler) reconcileDesignatedPrimary( // I'm the primary, need to inform the operator log.FromContext(ctx).Info("Setting myself as the current designated primary") - oldCluster := cluster.DeepCopy() - cluster.Status.CurrentPrimary = r.instance.GetPodName() - cluster.Status.CurrentPrimaryTimestamp = pgTime.GetCurrentTimestamp() - if r.instance.RequiresDesignatedPrimaryTransition { - externalcluster.SetDesignatedPrimaryTransitionCompleted(cluster) - } - return changed, r.client.Status().Patch(ctx, cluster, client.MergeFrom(oldCluster)) + return changed, retry.RetryOnConflict(retry.DefaultBackoff, func() error { + var livingCluster apiv1.Cluster + + err := r.client.Get(ctx, client.ObjectKeyFromObject(cluster), &livingCluster) + if err != nil { + return err + } + + updatedCluster := livingCluster.DeepCopy() + updatedCluster.Status.CurrentPrimary = r.instance.GetPodName() + updatedCluster.Status.CurrentPrimaryTimestamp = pgTime.GetCurrentTimestamp() + if r.instance.RequiresDesignatedPrimaryTransition { + externalcluster.SetDesignatedPrimaryTransitionCompleted(updatedCluster) + } + + cluster.Status = updatedCluster.Status + + return r.client.Status().Update(ctx, updatedCluster) + }) } // waitForWalReceiverDown wait until the wal receiver is down, and it's used From 3373aafb94ca9470453b201757cb0d043cab8990 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Wed, 9 Oct 2024 14:14:36 +0200 Subject: [PATCH 050/836] fix(database): SetupLogger must be called only once per reconciliation cycle (#5772) Closes #5773 Signed-off-by: Marco Nenciarini --- internal/management/controller/database_controller_sql.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/management/controller/database_controller_sql.go b/internal/management/controller/database_controller_sql.go index cd01a4f926..de8eaebdf3 100644 --- a/internal/management/controller/database_controller_sql.go +++ b/internal/management/controller/database_controller_sql.go @@ -58,6 +58,8 @@ func createDatabase( db *sql.DB, obj *apiv1.Database, ) error { + contextLogger := log.FromContext(ctx) + var sqlCreateDatabase strings.Builder sqlCreateDatabase.WriteString(fmt.Sprintf("CREATE DATABASE %s ", pgx.Identifier{obj.Spec.Name}.Sanitize())) if len(obj.Spec.Owner) > 0 { @@ -100,8 +102,6 @@ func createDatabase( sqlCreateDatabase.WriteString(fmt.Sprintf(" ICU_RULES %s", pgx.Identifier{obj.Spec.IcuRules}.Sanitize())) } - contextLogger, ctx := log.SetupLogger(ctx) - _, err := db.ExecContext(ctx, sqlCreateDatabase.String()) if err != nil { contextLogger.Error(err, "while creating database", "query", sqlCreateDatabase.String()) @@ -115,7 +115,7 @@ func updateDatabase( db *sql.DB, obj *apiv1.Database, ) error { - contextLogger, ctx := log.SetupLogger(ctx) + contextLogger := log.FromContext(ctx) if obj.Spec.AllowConnections != nil { changeAllowConnectionsSQL := fmt.Sprintf( @@ -190,7 +190,7 @@ func dropDatabase( db *sql.DB, obj *apiv1.Database, ) error { - contextLogger, ctx := log.SetupLogger(ctx) + contextLogger := log.FromContext(ctx) query := fmt.Sprintf("DROP DATABASE IF EXISTS %s", pgx.Identifier{obj.Spec.Name}.Sanitize()) _, err := db.ExecContext( ctx, From 2a1786589b680e1c19434fd75ab3f6fd89df9c4e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niccol=C3=B2=20Fei?= Date: Wed, 9 Oct 2024 18:11:06 +0200 Subject: [PATCH 051/836] feat(database): enforce only one Database object can manage a given PG database (#5711) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes #5629 Signed-off-by: Niccolò Fei Signed-off-by: Jaime Silvela Signed-off-by: Gabriele Quaresima Signed-off-by: Marco Nenciarini Co-authored-by: Jaime Silvela Co-authored-by: Gabriele Quaresima Co-authored-by: Marco Nenciarini --- .../controller/database_controller.go | 53 ++++++++++++++++ .../controller/database_controller_test.go | 61 ++++++++++++++++++- 2 files changed, 113 insertions(+), 1 deletion(-) diff --git a/internal/management/controller/database_controller.go b/internal/management/controller/database_controller.go index ee22286369..08224c9c6d 100644 --- a/internal/management/controller/database_controller.go +++ b/internal/management/controller/database_controller.go @@ -156,6 +156,15 @@ func (r *DatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c return ctrl.Result{}, nil } + // Make sure the target PG Database is not being managed by another Database Object + if err := r.ensureOnlyOneManager(ctx, database); err != nil { + return r.failedReconciliation( + ctx, + &database, + err, + ) + } + if err := r.reconcileDatabase( ctx, &database, @@ -173,6 +182,50 @@ func (r *DatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c ) } +// ensureOnlyOneManager verifies that the target PostgreSQL Database specified by the given Database object +// is not already managed by another Database object within the same namespace and cluster. +// If another Database object is found to be managing the same PostgreSQL database, this method returns an error. +func (r *DatabaseReconciler) ensureOnlyOneManager( + ctx context.Context, + database apiv1.Database, +) error { + contextLogger := log.FromContext(ctx) + + if database.Status.ObservedGeneration > 0 { + return nil + } + + var databaseList apiv1.DatabaseList + if err := r.Client.List(ctx, &databaseList, + client.InNamespace(r.instance.GetNamespaceName()), + ); err != nil { + contextLogger.Error(err, "while getting database list", "namespace", r.instance.GetNamespaceName()) + return fmt.Errorf("impossible to list database objects in namespace %s: %w", + r.instance.GetNamespaceName(), err) + } + + for _, item := range databaseList.Items { + if item.Name == database.Name { + continue + } + + if item.Spec.ClusterRef.Name != r.instance.GetClusterName() { + continue + } + + if item.Status.ObservedGeneration == 0 { + continue + } + + if item.Spec.Name == database.Spec.Name { + return fmt.Errorf("database %q is already managed by Database object %q", + database.Spec.Name, item.Name) + } + } + + return nil +} + // failedReconciliation marks the reconciliation as failed and logs the corresponding error func (r *DatabaseReconciler) failedReconciliation( ctx context.Context, diff --git a/internal/management/controller/database_controller_test.go b/internal/management/controller/database_controller_test.go index 81a8373b86..3b252db76e 100644 --- a/internal/management/controller/database_controller_test.go +++ b/internal/management/controller/database_controller_test.go @@ -129,7 +129,7 @@ var _ = Describe("Managed Database status", func() { _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ Namespace: database.Namespace, - Name: database.Spec.Name, + Name: database.Name, }}) Expect(err).ToNot(HaveOccurred()) @@ -159,4 +159,63 @@ var _ = Describe("Managed Database status", func() { Expect(database.Status.Ready).To(BeFalse()) Expect(database.Status.Error).To(BeEquivalentTo(exampleError.Error())) }) + + It("marks as failed if the target Database is already being managed", func(ctx SpecContext) { + // The Database obj currently managing "test-database" + currentManager := &apiv1.Database{ + ObjectMeta: metav1.ObjectMeta{ + Name: "current-manager", + Namespace: "default", + }, + Spec: apiv1.DatabaseSpec{ + ClusterRef: corev1.LocalObjectReference{ + Name: cluster.Name, + }, + Name: "test-database", + Owner: "app", + }, + Status: apiv1.DatabaseStatus{ + Ready: true, + ObservedGeneration: 1, + }, + } + + // A new Database Object targeting the same "test-database" + dbDuplicate := &apiv1.Database{ + ObjectMeta: metav1.ObjectMeta{ + Name: "db-duplicate", + Namespace: "default", + Generation: 1, + }, + Spec: apiv1.DatabaseSpec{ + ClusterRef: corev1.LocalObjectReference{ + Name: cluster.Name, + }, + Name: "test-database", + Owner: "app", + }, + } + + Expect(fakeClient.Create(ctx, currentManager)).To(Succeed()) + Expect(fakeClient.Create(ctx, dbDuplicate)).To(Succeed()) + + // Reconcile and get the updated object + _, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ + Namespace: dbDuplicate.Namespace, + Name: dbDuplicate.Name, + }}) + Expect(err).ToNot(HaveOccurred()) + + err = fakeClient.Get(ctx, client.ObjectKey{ + Namespace: dbDuplicate.Namespace, + Name: dbDuplicate.Name, + }, dbDuplicate) + Expect(err).ToNot(HaveOccurred()) + + expectedError := fmt.Sprintf("database %q is already managed by Database object %q", + dbDuplicate.Spec.Name, currentManager.Name) + Expect(dbDuplicate.Status.Ready).To(BeFalse()) + Expect(dbDuplicate.Status.Error).To(BeEquivalentTo(expectedError)) + Expect(dbDuplicate.Status.ObservedGeneration).To(BeZero()) + }) }) From 12e0094213f501a19981848ca59b8d4da5e83043 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Thu, 10 Oct 2024 11:24:22 +0200 Subject: [PATCH 052/836] fix(plugin): avoid jamming JSON logs with `logs` command (#5775) `kubectl cnpg logs cluster` collects the log stream of all the Pods belonging to the selected cluster and forwards their output to a final destination stream with no coordination besides basic locking at the byte level. This resulted in multiple JSON objects being written at the same time, which made the output garbled. Tools relying on the JSON-stream format cannot parse that too. With this patch, we coordinate the reading process, scan the stream line by line, and write every separate line for the final stream. Closes: #5769 Signed-off-by: Leonardo Cecchi Signed-off-by: Marco Nenciarini Signed-off-by: Francesco Canovai Co-authored-by: Marco Nenciarini Co-authored-by: Francesco Canovai --- pkg/utils/logs/cluster_logs.go | 26 ++++++++++++++++++++++---- pkg/utils/logs/cluster_logs_test.go | 4 ++-- 2 files changed, 24 insertions(+), 6 deletions(-) diff --git a/pkg/utils/logs/cluster_logs.go b/pkg/utils/logs/cluster_logs.go index 84e45f47eb..30c42f6e9b 100644 --- a/pkg/utils/logs/cluster_logs.go +++ b/pkg/utils/logs/cluster_logs.go @@ -17,6 +17,7 @@ limitations under the License. package logs import ( + "bufio" "context" "io" "log" @@ -237,9 +238,26 @@ func (csr *ClusterStreamingRequest) streamInGoroutine( } }() - _, err = io.Copy(output, logStream) - if err != nil { - log.Printf("error sending logs to writer, pod %s: %v", podName, err) - return + scanner := bufio.NewScanner(logStream) + scanner.Buffer(make([]byte, 0, 4096), 1024*1024) + bufferedOutput := bufio.NewWriter(output) + +readLoop: + for scanner.Scan() { + select { + case <-ctx.Done(): + break readLoop + default: + data := scanner.Text() + if _, err := bufferedOutput.Write([]byte(data)); err != nil { + log.Printf("error writing log line to output: %v", err) + } + if err := bufferedOutput.WriteByte('\n'); err != nil { + log.Printf("error writing newline to output: %v", err) + } + if err := bufferedOutput.Flush(); err != nil { + log.Printf("error flushing output: %v", err) + } + } } } diff --git a/pkg/utils/logs/cluster_logs_test.go b/pkg/utils/logs/cluster_logs_test.go index 1622498168..ed057c71b3 100644 --- a/pkg/utils/logs/cluster_logs_test.go +++ b/pkg/utils/logs/cluster_logs_test.go @@ -71,7 +71,7 @@ var _ = Describe("Cluster logging tests", func() { }() ctx.Done() wait.Wait() - Expect(logBuffer.String()).To(BeEquivalentTo("fake logs")) + Expect(logBuffer.String()).To(BeEquivalentTo("fake logs\n")) }) It("should catch extra logs if given the follow option", func(ctx context.Context) { @@ -98,6 +98,6 @@ var _ = Describe("Cluster logging tests", func() { time.Sleep(350 * time.Millisecond) cancel() // the fake pod will be seen twice - Expect(logBuffer.String()).To(BeEquivalentTo("fake logsfake logs")) + Expect(logBuffer.String()).To(BeEquivalentTo("fake logs\nfake logs\n")) }) }) From cf316c9266fbc750d14eecc2844ac8295664b173 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Thu, 10 Oct 2024 15:20:17 +0200 Subject: [PATCH 053/836] docs(fix): add missing links to APIs machinery and barman-cloud (#5748) Running the `make apidoc` was throwing the following error: `External link source for` this is related to a missing configuration to properly point the URLs in the documentation, this will add the proper URLs for the machinery and barman-cloud APIs Signed-off-by: Jonathan Gonzalez V. --- docs/config.yaml | 4 ++++ docs/src/cloudnative-pg.v1.md | 40 +++++++++++++++++------------------ 2 files changed, 24 insertions(+), 20 deletions(-) diff --git a/docs/config.yaml b/docs/config.yaml index cc5cde9174..aa77638cf6 100644 --- a/docs/config.yaml +++ b/docs/config.yaml @@ -24,6 +24,10 @@ externalPackages: target: https://pkg.go.dev/time#Duration - match: ^k8s\.io/(api|apimachinery/pkg/apis)/ target: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.28/#{{- lower .TypeIdentifier -}}-{{- arrIndex .PackageSegments -1 -}}-{{- arrIndex .PackageSegments -2 -}} + - match: ^github\.com/cloudnative-pg/machinery + target: https://pkg.go.dev/github.com/cloudnative-pg/machinery/pkg/api/#{{- .TypeIdentifier }} + - match: ^github\.com/cloudnative-pg/barman-cloud + target: https://pkg.go.dev/github.com/cloudnative-pg/barman-cloud/pkg/api/#{{- .TypeIdentifier }} hideTypePatterns: - "ParseError$" diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md index 22eb5d401e..54f24f842d 100644 --- a/docs/src/cloudnative-pg.v1.md +++ b/docs/src/cloudnative-pg.v1.md @@ -366,7 +366,7 @@ documentation

barmanObjectStore
-github.com/cloudnative-pg/barman-cloud/pkg/api.BarmanObjectStoreConfiguration +github.com/cloudnative-pg/barman-cloud/pkg/api.BarmanObjectStoreConfiguration

The configuration for the barman-cloud tool suite

@@ -543,13 +543,13 @@ information that could be needed to correctly restore it.

FieldDescription LocalObjectReference
-github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference +github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference (Members of LocalObjectReference are embedded into this type.) No description provided. endpointCA
-github.com/cloudnative-pg/machinery/pkg/api.SecretKeySelector +github.com/cloudnative-pg/machinery/pkg/api.SecretKeySelector

EndpointCA store the CA bundle of the barman endpoint. @@ -575,7 +575,7 @@ errors with certificate issuer and barman-cloud-wal-archive.

FieldDescription cluster [Required]
-github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference +github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference

The cluster to backup

@@ -643,14 +643,14 @@ Overrides the default settings specified in the cluster '.backup.volumeSnapshot. FieldDescription BarmanCredentials
-github.com/cloudnative-pg/barman-cloud/pkg/api.BarmanCredentials +github.com/cloudnative-pg/barman-cloud/pkg/api.BarmanCredentials (Members of BarmanCredentials are embedded into this type.)

The potential credentials for each cloud provider

endpointCA
-github.com/cloudnative-pg/machinery/pkg/api.SecretKeySelector +github.com/cloudnative-pg/machinery/pkg/api.SecretKeySelector

EndpointCA store the CA bundle of the barman endpoint. @@ -912,7 +912,7 @@ by applications. Defaults to the value of the database key.

secret
-github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference +github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference

Name of the secret containing the initial credentials for the @@ -1082,7 +1082,7 @@ by applications. Defaults to the value of the database key.

secret
-github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference +github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference

Name of the secret containing the initial credentials for the @@ -1178,7 +1178,7 @@ by applications. Defaults to the value of the database key.

secret
-github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference +github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference

Name of the secret containing the initial credentials for the @@ -1490,7 +1490,7 @@ Undefined or 0 disable synchronous replication.

superuserSecret
-github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference +github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference

The secret containing the superuser password. If not defined a new @@ -1517,7 +1517,7 @@ user by setting it to NULL. Disabled by default.

imagePullSecrets
-[]github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference +[]github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference

The list of pull secrets to be used to pull the images

@@ -2577,7 +2577,7 @@ secure and efficient password management for external clusters.

barmanObjectStore
-github.com/cloudnative-pg/barman-cloud/pkg/api.BarmanObjectStoreConfiguration +github.com/cloudnative-pg/barman-cloud/pkg/api.BarmanObjectStoreConfiguration

The configuration for the barman-cloud tool suite

@@ -3167,14 +3167,14 @@ Default: false.

customQueriesConfigMap
-[]github.com/cloudnative-pg/machinery/pkg/api.ConfigMapKeySelector +[]github.com/cloudnative-pg/machinery/pkg/api.ConfigMapKeySelector

The list of config maps containing the custom queries

customQueriesSecret
-[]github.com/cloudnative-pg/machinery/pkg/api.SecretKeySelector +[]github.com/cloudnative-pg/machinery/pkg/api.SecretKeySelector

The list of secrets containing the custom queries

@@ -3409,7 +3409,7 @@ by pgbouncer

authQuerySecret
-github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference +github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference

The credentials of the user that need to be used for the authentication @@ -3707,7 +3707,7 @@ part for now.

FieldDescription cluster [Required]
-github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference +github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference

This is the cluster reference on which the Pooler will work. @@ -4205,7 +4205,7 @@ Reference: https://www.postgresql.org/docs/current/sql-createrole.html

passwordSecret
-github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference +github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference

Secret containing the password of the role (if present) @@ -4332,14 +4332,14 @@ in their respective arrays.

FieldDescription secretRefs
-[]github.com/cloudnative-pg/machinery/pkg/api.SecretKeySelector +[]github.com/cloudnative-pg/machinery/pkg/api.SecretKeySelector

SecretRefs holds a list of references to Secrets

configMapRefs
-[]github.com/cloudnative-pg/machinery/pkg/api.ConfigMapKeySelector +[]github.com/cloudnative-pg/machinery/pkg/api.ConfigMapKeySelector

ConfigMapRefs holds a list of references to ConfigMaps

@@ -4386,7 +4386,7 @@ see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format

cluster [Required]
-github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference +github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference

The cluster to backup

From 6ae24ac9435c892a8738c78cc85a6aeb9bb87086 Mon Sep 17 00:00:00 2001 From: Jaime Silvela Date: Thu, 10 Oct 2024 16:24:11 +0200 Subject: [PATCH 054/836] feat(database): add support for PG17 builtin_locale (#5745) Closes #5709 Signed-off-by: Jaime Silvela --- api/v1/database_types.go | 10 ++++++++++ .../bases/postgresql.cnpg.io_databases.yaml | 12 ++++++++++++ docs/src/cloudnative-pg.v1.md | 14 ++++++++++++++ .../controller/database_controller_sql.go | 7 +++++++ .../database_controller_sql_test.go | 19 +++++++++++++++++++ 5 files changed, 62 insertions(+) diff --git a/api/v1/database_types.go b/api/v1/database_types.go index dd7bd58cf5..243285dcbd 100644 --- a/api/v1/database_types.go +++ b/api/v1/database_types.go @@ -90,6 +90,16 @@ type DatabaseSpec struct { // +optional IcuRules string `json:"icu_rules,omitempty"` + // The BUILTIN_LOCALE (cannot be changed) + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="builtin_locale is immutable" + // +optional + BuiltinLocale string `json:"builtin_locale,omitempty"` + + // The COLLATION_VERSION (cannot be changed) + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="collation_version is immutable" + // +optional + CollationVersion string `json:"collation_version,omitempty"` + // True when the database is a template // +optional IsTemplate *bool `json:"isTemplate,omitempty"` diff --git a/config/crd/bases/postgresql.cnpg.io_databases.yaml b/config/crd/bases/postgresql.cnpg.io_databases.yaml index f49202505e..7c29850d0d 100644 --- a/config/crd/bases/postgresql.cnpg.io_databases.yaml +++ b/config/crd/bases/postgresql.cnpg.io_databases.yaml @@ -61,6 +61,12 @@ spec: allowConnections: description: True when connections to this database are allowed type: boolean + builtin_locale: + description: The BUILTIN_LOCALE (cannot be changed) + type: string + x-kubernetes-validations: + - message: builtin_locale is immutable + rule: self == oldSelf cluster: description: The corresponding cluster properties: @@ -75,6 +81,12 @@ spec: type: string type: object x-kubernetes-map-type: atomic + collation_version: + description: The COLLATION_VERSION (cannot be changed) + type: string + x-kubernetes-validations: + - message: collation_version is immutable + rule: self == oldSelf connectionLimit: description: |- Connection limit, -1 means no limit and -2 means the diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md index 54f24f842d..ba0d311131 100644 --- a/docs/src/cloudnative-pg.v1.md +++ b/docs/src/cloudnative-pg.v1.md @@ -2356,6 +2356,20 @@ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-

The ICU_RULES (cannot be changed)

+builtin_locale
+string + + +

The BUILTIN_LOCALE (cannot be changed)

+ + +collation_version
+string + + +

The COLLATION_VERSION (cannot be changed)

+ + isTemplate
bool diff --git a/internal/management/controller/database_controller_sql.go b/internal/management/controller/database_controller_sql.go index de8eaebdf3..b55ac5a659 100644 --- a/internal/management/controller/database_controller_sql.go +++ b/internal/management/controller/database_controller_sql.go @@ -101,6 +101,13 @@ func createDatabase( if obj.Spec.IcuRules != "" { sqlCreateDatabase.WriteString(fmt.Sprintf(" ICU_RULES %s", pgx.Identifier{obj.Spec.IcuRules}.Sanitize())) } + if obj.Spec.BuiltinLocale != "" { + sqlCreateDatabase.WriteString(fmt.Sprintf(" BUILTIN_LOCALE %s", pgx.Identifier{obj.Spec.BuiltinLocale}.Sanitize())) + } + if obj.Spec.CollationVersion != "" { + sqlCreateDatabase.WriteString(fmt.Sprintf(" COLLATION_VERSION %s", + pgx.Identifier{obj.Spec.CollationVersion}.Sanitize())) + } _, err := db.ExecContext(ctx, sqlCreateDatabase.String()) if err != nil { diff --git a/internal/management/controller/database_controller_sql_test.go b/internal/management/controller/database_controller_sql_test.go index b95a13e076..25a697db47 100644 --- a/internal/management/controller/database_controller_sql_test.go +++ b/internal/management/controller/database_controller_sql_test.go @@ -133,6 +133,25 @@ var _ = Describe("Managed Database SQL", func() { err = createDatabase(ctx, db, database) Expect(err).ToNot(HaveOccurred()) }) + + It("should create a new Database with builtin locale", func(ctx SpecContext) { + database.Spec.LocaleProvider = "builtin" + database.Spec.BuiltinLocale = "C" + database.Spec.CollationVersion = "1.2.3" + + expectedValue := sqlmock.NewResult(0, 1) + expectedQuery := fmt.Sprintf( + "CREATE DATABASE %s OWNER %s "+ + "LOCALE_PROVIDER %s BUILTIN_LOCALE %s COLLATION_VERSION %s", + pgx.Identifier{database.Spec.Name}.Sanitize(), pgx.Identifier{database.Spec.Owner}.Sanitize(), + pgx.Identifier{database.Spec.LocaleProvider}.Sanitize(), pgx.Identifier{database.Spec.BuiltinLocale}.Sanitize(), + pgx.Identifier{database.Spec.CollationVersion}.Sanitize(), + ) + dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedValue) + + err = createDatabase(ctx, db, database) + Expect(err).ToNot(HaveOccurred()) + }) }) Context("updateDatabase", func() { From 19f941b755a2390b2731c641d915ddff666bf306 Mon Sep 17 00:00:00 2001 From: Gabriele Fedi <91485518+GabriFedi97@users.noreply.github.com> Date: Thu, 10 Oct 2024 17:07:20 +0200 Subject: [PATCH 055/836] fix(cnpg-i): ensure instance manager invokes only the available plugins (#5651) The instance manager should try to load only available plugins, as some of them declared in the Cluster spec might be available only to the operator. closes #5648 --------- Signed-off-by: Gabriele Fedi Signed-off-by: Leonardo Cecchi Signed-off-by: Armando Ruocco Co-authored-by: Leonardo Cecchi Co-authored-by: Armando Ruocco --- go.mod | 2 +- go.sum | 4 ++-- internal/cmd/manager/controller/controller.go | 2 +- internal/cmd/manager/walarchive/cmd.go | 13 ++++++++++-- internal/cmd/manager/walrestore/cmd.go | 13 ++++++++++-- internal/cnpi/plugin/repository/setup.go | 20 ++++++++++++------- .../postgres/webserver/plugin_backup.go | 12 ++++++++++- 7 files changed, 50 insertions(+), 16 deletions(-) diff --git a/go.mod b/go.mod index dd3d77f58a..c6b261104a 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/cheynewallace/tabby v1.1.1 github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a github.com/cloudnative-pg/cnpg-i v0.0.0-20241001103001-7e24b2eccd50 - github.com/cloudnative-pg/machinery v0.0.0-20241007093555-1e197af1f392 + github.com/cloudnative-pg/machinery v0.0.0-20241010122207-5ac7af31ef72 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/evanphx/json-patch/v5 v5.9.0 github.com/go-logr/logr v1.4.2 diff --git a/go.sum b/go.sum index b6aa799796..dcf0977370 100644 --- a/go.sum +++ b/go.sum @@ -22,8 +22,8 @@ github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a h1:0v1 github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a/go.mod h1:Jm0tOp5oB7utpt8wz6RfSv31h1mThOtffjfyxVupriE= github.com/cloudnative-pg/cnpg-i v0.0.0-20241001103001-7e24b2eccd50 h1:Rm/bbC0GNCuWth5fHVMos99RzNczbWRVBdjubh3JMPs= github.com/cloudnative-pg/cnpg-i v0.0.0-20241001103001-7e24b2eccd50/go.mod h1:lTWPq8pluS0PSnRMwt0zShftbyssoRhTJ5zAip8unl8= -github.com/cloudnative-pg/machinery v0.0.0-20241007093555-1e197af1f392 h1:DHaSe0PoLnIQFWIpRqB9RiBlNzbdLuVbiCtc9tN+FL0= -github.com/cloudnative-pg/machinery v0.0.0-20241007093555-1e197af1f392/go.mod h1:bWp1Es5zlxElg4Z/c5f0RKOkDcyNvDHdYIvNcPQU4WM= +github.com/cloudnative-pg/machinery v0.0.0-20241010122207-5ac7af31ef72 h1:3pgtSYhv3RDd+51bnlqICNrcVpWQQvriCOvkxtbZpaE= +github.com/cloudnative-pg/machinery v0.0.0-20241010122207-5ac7af31ef72/go.mod h1:bWp1Es5zlxElg4Z/c5f0RKOkDcyNvDHdYIvNcPQU4WM= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= diff --git a/internal/cmd/manager/controller/controller.go b/internal/cmd/manager/controller/controller.go index ea66ab17f8..b61f967844 100644 --- a/internal/cmd/manager/controller/controller.go +++ b/internal/cmd/manager/controller/controller.go @@ -219,7 +219,7 @@ func RunController( } pluginRepository := repository.New() - if err := pluginRepository.RegisterUnixSocketPluginsInPath( + if _, err := pluginRepository.RegisterUnixSocketPluginsInPath( conf.PluginSocketDir, ); err != nil { setupLog.Error(err, "Unable to load sidecar CNPG-i plugins, skipping") diff --git a/internal/cmd/manager/walarchive/cmd.go b/internal/cmd/manager/walarchive/cmd.go index 616cecefa0..bfd897329d 100644 --- a/internal/cmd/manager/walarchive/cmd.go +++ b/internal/cmd/manager/walarchive/cmd.go @@ -29,6 +29,7 @@ import ( barmanArchiver "github.com/cloudnative-pg/barman-cloud/pkg/archiver" "github.com/cloudnative-pg/machinery/pkg/fileutils" "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/stringset" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -251,12 +252,20 @@ func archiveWALViaPlugins( contextLogger := log.FromContext(ctx) plugins := repository.New() - if err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir); err != nil { + availablePluginNames, err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir) + if err != nil { contextLogger.Error(err, "Error while loading local plugins") } defer plugins.Close() - client, err := pluginClient.WithPlugins(ctx, plugins, cluster.Spec.Plugins.GetEnabledPluginNames()...) + availablePluginNamesSet := stringset.From(availablePluginNames) + enabledPluginNamesSet := stringset.From(cluster.Spec.Plugins.GetEnabledPluginNames()) + + client, err := pluginClient.WithPlugins( + ctx, + plugins, + availablePluginNamesSet.Intersect(enabledPluginNamesSet).ToList()..., + ) if err != nil { contextLogger.Error(err, "Error while loading required plugins") return err diff --git a/internal/cmd/manager/walrestore/cmd.go b/internal/cmd/manager/walrestore/cmd.go index c60c0cf194..50d237ff06 100644 --- a/internal/cmd/manager/walrestore/cmd.go +++ b/internal/cmd/manager/walrestore/cmd.go @@ -29,6 +29,7 @@ import ( barmanCommand "github.com/cloudnative-pg/barman-cloud/pkg/command" barmanRestorer "github.com/cloudnative-pg/barman-cloud/pkg/restorer" "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/stringset" "github.com/spf13/cobra" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" @@ -247,12 +248,20 @@ func restoreWALViaPlugins( contextLogger := log.FromContext(ctx) plugins := repository.New() - if err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir); err != nil { + availablePluginNames, err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir) + if err != nil { contextLogger.Error(err, "Error while loading local plugins") } defer plugins.Close() - client, err := pluginClient.WithPlugins(ctx, plugins, cluster.Spec.Plugins.GetEnabledPluginNames()...) + availablePluginNamesSet := stringset.From(availablePluginNames) + enabledPluginNamesSet := stringset.From(cluster.Spec.Plugins.GetEnabledPluginNames()) + + client, err := pluginClient.WithPlugins( + ctx, + plugins, + availablePluginNamesSet.Intersect(enabledPluginNamesSet).ToList()..., + ) if err != nil { contextLogger.Error(err, "Error while loading required plugins") return err diff --git a/internal/cnpi/plugin/repository/setup.go b/internal/cnpi/plugin/repository/setup.go index 76da6773e8..e43b5f1091 100644 --- a/internal/cnpi/plugin/repository/setup.go +++ b/internal/cnpi/plugin/repository/setup.go @@ -25,6 +25,7 @@ import ( "github.com/cloudnative-pg/machinery/pkg/log" "github.com/jackc/puddle/v2" + "go.uber.org/multierr" "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/connection" ) @@ -41,8 +42,9 @@ type Interface interface { RegisterRemotePlugin(name string, address string, tlsConfig *tls.Config) error // RegisterUnixSocketPluginsInPath scans the passed directory - // for plugins that are deployed with unix sockets - RegisterUnixSocketPluginsInPath(pluginsPath string) error + // for plugins that are deployed with unix sockets. + // Return the list of loaded plugin names + RegisterUnixSocketPluginsInPath(pluginsPath string) ([]string, error) // GetConnection gets a connection to the plugin with specified name GetConnection(ctx context.Context, name string) (connection.Interface, error) @@ -149,30 +151,34 @@ func (r *data) RegisterRemotePlugin(name string, address string, tlsConfig *tls. }) } -func (r *data) RegisterUnixSocketPluginsInPath(pluginsPath string) error { +func (r *data) RegisterUnixSocketPluginsInPath(pluginsPath string) ([]string, error) { entries, err := os.ReadDir(pluginsPath) if err != nil { // There's no need to complain if the plugin folder doesn't exist if os.IsNotExist(err) { - return nil + return nil, nil } // Otherwise, this means we can't read that folder and // is a real problem - return err + return nil, err } + pluginsNames := make([]string, 0, len(entries)) + var errors error for _, entry := range entries { name := entry.Name() if err := r.registerUnixSocketPlugin( name, path.Join(pluginsPath, name), ); err != nil { - return err + errors = multierr.Append(errors, err) + } else { + pluginsNames = append(pluginsNames, name) } } - return nil + return pluginsNames, errors } // New creates a new plugin repository diff --git a/pkg/management/postgres/webserver/plugin_backup.go b/pkg/management/postgres/webserver/plugin_backup.go index 0619cff770..d0cc5043eb 100644 --- a/pkg/management/postgres/webserver/plugin_backup.go +++ b/pkg/management/postgres/webserver/plugin_backup.go @@ -55,6 +55,16 @@ func NewPluginBackupCommand( ) *PluginBackupCommand { backup.EnsureGVKIsPresent() + logger := log.WithValues( + "pluginConfiguration", backup.Spec.PluginConfiguration, + "backupName", backup.Name, + "backupNamespace", backup.Name) + + plugins := repository.New() + if _, err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir); err != nil { + logger.Error(err, "Error while discovering plugins") + } + return &PluginBackupCommand{ Cluster: cluster, Backup: backup, @@ -75,7 +85,7 @@ func (b *PluginBackupCommand) invokeStart(ctx context.Context) { "backupNamespace", b.Backup.Name) plugins := repository.New() - if err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir); err != nil { + if _, err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir); err != nil { contextLogger.Error(err, "Error while discovering plugins") } defer plugins.Close() From 68e6b79e32257f81b2b34f74dc88e4d58803bccb Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Thu, 10 Oct 2024 20:16:23 +0200 Subject: [PATCH 056/836] feat(plugin): compact output of `status` command (#5765) Make `--verbose` a countable option and introduce multiple levels of verbosity. The following panels have been moved to level 1 of verbosity: - Physical backups - Unmanaged Replication Slot Status - Managed roles status - Tablespaces status - Pod Disruption Budgets status The following panels to level 2: - PostgreSQL configuration - PostgreSQL HBA - Certificates Status Physical base backups are displayed when they are in progress even with verbosity 0. Closes #5757 Signed-off-by: Gabriele Bartolini Signed-off-by: Leonardo Cecchi Signed-off-by: Jaime Silvela Co-authored-by: Leonardo Cecchi Co-authored-by: Jaime Silvela --- docs/src/kubectl-plugin.md | 182 ++++++++++----------------- internal/cmd/plugin/status/cmd.go | 6 +- internal/cmd/plugin/status/status.go | 56 +++++---- 3 files changed, 101 insertions(+), 143 deletions(-) diff --git a/docs/src/kubectl-plugin.md b/docs/src/kubectl-plugin.md index 01d9af2a29..1a2da0c607 100755 --- a/docs/src/kubectl-plugin.md +++ b/docs/src/kubectl-plugin.md @@ -250,143 +250,95 @@ kubectl cnpg status sandbox ``` ```shell -Cluster in healthy state -Name: sandbox -Namespace: default -System ID: 7039966298120953877 -PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:17.0 -Primary instance: sandbox-2 -Instances: 3 -Ready instances: 3 -Current Write LSN: 3AF/EAFA6168 (Timeline: 8 - WAL File: 00000008000003AF00000075) +Cluster Summary +Name: default/sandbox +System ID: 7423474350493388827 +PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:16.4 +Primary instance: sandbox-1 +Primary start time: 2024-10-08 18:31:57 +0000 UTC (uptime 1m14s) +Status: Cluster in healthy state +Instances: 3 +Ready instances: 3 +Size: 126M +Current Write LSN: 0/604DE38 (Timeline: 1 - WAL File: 000000010000000000000006) Continuous Backup status -First Point of Recoverability: Not Available -Working WAL archiving: OK -Last Archived WAL: 00000008000003AE00000079 @ 2021-12-14T10:16:29.340047Z -Last Failed WAL: - - -Certificates Status -Certificate Name Expiration Date Days Left Until Expiration ----------------- --------------- -------------------------- -cluster-example-ca 2022-05-05 15:02:42 +0000 UTC 87.23 -cluster-example-replication 2022-05-05 15:02:42 +0000 UTC 87.23 -cluster-example-server 2022-05-05 15:02:42 +0000 UTC 87.23 +Not configured Streaming Replication status -Name Sent LSN Write LSN Flush LSN Replay LSN Write Lag Flush Lag Replay Lag State Sync State Sync Priority ----- -------- --------- --------- ---------- --------- --------- ---------- ----- ---------- ------------- -sandbox-1 3AF/EB0524F0 3AF/EB011760 3AF/EAFEDE50 3AF/EAFEDE50 00:00:00.004461 00:00:00.007901 00:00:00.007901 streaming quorum 1 -sandbox-3 3AF/EB0524F0 3AF/EB030B00 3AF/EB030B00 3AF/EB011760 00:00:00.000977 00:00:00.004194 00:00:00.008252 streaming quorum 1 +Replication Slots Enabled +Name Sent LSN Write LSN Flush LSN Replay LSN Write Lag Flush Lag Replay Lag State Sync State Sync Priority Replication Slot +---- -------- --------- --------- ---------- --------- --------- ---------- ----- ---------- ------------- ---------------- +sandbox-2 0/604DE38 0/604DE38 0/604DE38 0/604DE38 00:00:00 00:00:00 00:00:00 streaming async 0 active +sandbox-3 0/604DE38 0/604DE38 0/604DE38 0/604DE38 00:00:00 00:00:00 00:00:00 streaming async 0 active Instances status -Name Database Size Current LSN Replication role Status QoS Manager Version ----- ------------- ----------- ---------------- ------ --- --------------- -sandbox-1 302 GB 3AF/E9FFFFE0 Standby (sync) OK Guaranteed 1.11.0 -sandbox-2 302 GB 3AF/EAFA6168 Primary OK Guaranteed 1.11.0 -sandbox-3 302 GB 3AF/EBAD5D18 Standby (sync) OK Guaranteed 1.11.0 +Name Current LSN Replication role Status QoS Manager Version Node +---- ----------- ---------------- ------ --- --------------- ---- +sandbox-1 0/604DE38 Primary OK BestEffort 1.24.0 k8s-eu-worker +sandbox-2 0/604DE38 Standby (async) OK BestEffort 1.24.0 k8s-eu-worker2 +sandbox-3 0/604DE38 Standby (async) OK BestEffort 1.24.0 k8s-eu-worker ``` -You can also get a more verbose version of the status by adding -`--verbose` or just `-v` +If you require more detailed status information, use the `--verbose` option (or +`-v` for short). The level of detail increases each time the flag is repeated: ```shell kubectl cnpg status sandbox --verbose ``` ```shell -Cluster in healthy state -Name: sandbox -Namespace: default -System ID: 7039966298120953877 -PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:17.0 -Primary instance: sandbox-2 -Instances: 3 -Ready instances: 3 -Current Write LSN: 3B1/61DE3158 (Timeline: 8 - WAL File: 00000008000003B100000030) - -PostgreSQL Configuration -archive_command = '/controller/manager wal-archive --log-destination /controller/log/postgres.json %p' -archive_mode = 'on' -archive_timeout = '5min' -checkpoint_completion_target = '0.9' -checkpoint_timeout = '900s' -cluster_name = 'sandbox' -dynamic_shared_memory_type = 'sysv' -full_page_writes = 'on' -hot_standby = 'true' -jit = 'on' -listen_addresses = '*' -log_autovacuum_min_duration = '1s' -log_checkpoints = 'on' -log_destination = 'csvlog' -log_directory = '/controller/log' -log_filename = 'postgres' -log_lock_waits = 'on' -log_min_duration_statement = '1000' -log_rotation_age = '0' -log_rotation_size = '0' -log_statement = 'ddl' -log_temp_files = '1024' -log_truncate_on_rotation = 'false' -logging_collector = 'on' -maintenance_work_mem = '2GB' -max_connections = '1000' -max_parallel_workers = '32' -max_replication_slots = '32' -max_wal_size = '15GB' -max_worker_processes = '32' -pg_stat_statements.max = '10000' -pg_stat_statements.track = 'all' -port = '5432' -shared_buffers = '16GB' -shared_memory_type = 'sysv' -shared_preload_libraries = 'pg_stat_statements' -ssl = 'on' -ssl_ca_file = '/controller/certificates/client-ca.crt' -ssl_cert_file = '/controller/certificates/server.crt' -ssl_key_file = '/controller/certificates/server.key' -synchronous_standby_names = 'ANY 1 ("sandbox-1","sandbox-3")' -unix_socket_directories = '/controller/run' -wal_keep_size = '512MB' -wal_level = 'logical' -wal_log_hints = 'on' -cnpg.config_sha256 = '3cfa683e23fe513afaee7c97b50ce0628e0cc634bca8b096517538a9a4428efc' - -PostgreSQL HBA Rules - -# Grant local access -local all all peer map=local - -# Require client certificate authentication for the streaming_replica user -hostssl postgres streaming_replica all cert -hostssl replication streaming_replica all cert -hostssl all cnpg_pooler_pgbouncer all cert - -# Otherwise use the default authentication method -host all all all scram-sha-256 - +Cluster Summary +Name: default/sandbox +System ID: 7423474350493388827 +PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:16.4 +Primary instance: sandbox-1 +Primary start time: 2024-10-08 18:31:57 +0000 UTC (uptime 2m4s) +Status: Cluster in healthy state +Instances: 3 +Ready instances: 3 +Size: 126M +Current Write LSN: 0/6053720 (Timeline: 1 - WAL File: 000000010000000000000006) Continuous Backup status -First Point of Recoverability: Not Available -Working WAL archiving: OK -Last Archived WAL: 00000008000003B00000001D @ 2021-12-14T10:20:42.272815Z -Last Failed WAL: - +Not configured + +Physical backups +No running physical backups found Streaming Replication status -Name Sent LSN Write LSN Flush LSN Replay LSN Write Lag Flush Lag Replay Lag State Sync State Sync Priority ----- -------- --------- --------- ---------- --------- --------- ---------- ----- ---------- ------------- -sandbox-1 3B1/61E26448 3B1/61DF82F0 3B1/61DF82F0 3B1/61DF82F0 00:00:00.000333 00:00:00.000333 00:00:00.005484 streaming quorum 1 -sandbox-3 3B1/61E26448 3B1/61E26448 3B1/61DF82F0 3B1/61DF82F0 00:00:00.000756 00:00:00.000756 00:00:00.000756 streaming quorum 1 +Replication Slots Enabled +Name Sent LSN Write LSN Flush LSN Replay LSN Write Lag Flush Lag Replay Lag State Sync State Sync Priority Replication Slot Slot Restart LSN Slot WAL Status Slot Safe WAL Size +---- -------- --------- --------- ---------- --------- --------- ---------- ----- ---------- ------------- ---------------- ---------------- --------------- ------------------ +sandbox-2 0/6053720 0/6053720 0/6053720 0/6053720 00:00:00 00:00:00 00:00:00 streaming async 0 active 0/6053720 reserved NULL +sandbox-3 0/6053720 0/6053720 0/6053720 0/6053720 00:00:00 00:00:00 00:00:00 streaming async 0 active 0/6053720 reserved NULL + +Unmanaged Replication Slot Status +No unmanaged replication slots found + +Managed roles status +No roles managed + +Tablespaces status +No managed tablespaces + +Pod Disruption Budgets status +Name Role Expected Pods Current Healthy Minimum Desired Healthy Disruptions Allowed +---- ---- ------------- --------------- ----------------------- ------------------- +sandbox replica 2 2 1 1 +sandbox-primary primary 1 1 1 0 Instances status -Name Database Size Current LSN Replication role Status QoS Manager Version ----- ------------- ----------- ---------------- ------ --- --------------- -sandbox-1 3B1/610204B8 Standby (sync) OK Guaranteed 1.11.0 -sandbox-2 3B1/61DE3158 Primary OK Guaranteed 1.11.0 -sandbox-3 3B1/62618470 Standby (sync) OK Guaranteed 1.11.0 +Name Current LSN Replication role Status QoS Manager Version Node +---- ----------- ---------------- ------ --- --------------- ---- +sandbox-1 0/6053720 Primary OK BestEffort 1.24.0 k8s-eu-worker +sandbox-2 0/6053720 Standby (async) OK BestEffort 1.24.0 k8s-eu-worker2 +sandbox-3 0/6053720 Standby (async) OK BestEffort 1.24.0 k8s-eu-worker ``` +With an additional `-v` (e.g. `kubectl cnpg status sandbox -v -v`), you can +also view PostgreSQL configuration, HBA settings, and certificates. + The command also supports output in `yaml` and `json` format. ### Promote diff --git a/internal/cmd/plugin/status/cmd.go b/internal/cmd/plugin/status/cmd.go index 4ca0f70a63..a22594523d 100644 --- a/internal/cmd/plugin/status/cmd.go +++ b/internal/cmd/plugin/status/cmd.go @@ -41,15 +41,15 @@ func NewCmd() *cobra.Command { ctx := cmd.Context() clusterName := args[0] - verbose, _ := cmd.Flags().GetBool("verbose") + verbose, _ := cmd.Flags().GetCount("verbose") output, _ := cmd.Flags().GetString("output") return Status(ctx, clusterName, verbose, plugin.OutputFormat(output)) }, } - statusCmd.Flags().BoolP( - "verbose", "v", false, "Include PostgreSQL configuration, HBA rules, and full replication slots info") + statusCmd.Flags().CountP( + "verbose", "v", "Increase verbosity to display more information") statusCmd.Flags().StringP( "output", "o", "text", "Output format. One of text|json") diff --git a/internal/cmd/plugin/status/status.go b/internal/cmd/plugin/status/status.go index 1889975c55..3ae465b96a 100644 --- a/internal/cmd/plugin/status/status.go +++ b/internal/cmd/plugin/status/status.go @@ -99,7 +99,12 @@ func getPrintableIntegerPointer(i *int) string { } // Status implements the "status" subcommand -func Status(ctx context.Context, clusterName string, verbose bool, format plugin.OutputFormat) error { +func Status( + ctx context.Context, + clusterName string, + verbosity int, + format plugin.OutputFormat, +) error { var cluster apiv1.Cluster var errs []error @@ -123,17 +128,19 @@ func Status(ctx context.Context, clusterName string, verbose bool, format plugin status.printHibernationInfo() status.printDemotionTokenInfo() status.printPromotionTokenInfo() - if verbose { + if verbosity > 1 { errs = append(errs, status.printPostgresConfiguration(ctx, clientInterface)...) + status.printCertificatesStatus() } - status.printCertificatesStatus() status.printBackupStatus() - status.printBasebackupStatus() - status.printReplicaStatus(verbose) - status.printUnmanagedReplicationSlotStatus() - status.printRoleManagerStatus() - status.printTablespacesStatus() - status.printPodDisruptionBudgetStatus() + status.printBasebackupStatus(verbosity) + status.printReplicaStatus(verbosity) + if verbosity > 0 { + status.printUnmanagedReplicationSlotStatus() + status.printRoleManagerStatus() + status.printTablespacesStatus() + status.printPodDisruptionBudgetStatus() + } status.printInstancesStatus() if len(errs) > 0 { @@ -217,10 +224,10 @@ func (fullStatus *PostgresqlStatus) getClusterSize(ctx context.Context, client k return size, nil } -func (fullStatus *PostgresqlStatus) printBasicInfo(ctx context.Context, client kubernetes.Interface) { +func (fullStatus *PostgresqlStatus) printBasicInfo(ctx context.Context, k8sClient kubernetes.Interface) { summary := tabby.New() - clusterSize, clusterSizeErr := fullStatus.getClusterSize(ctx, client) + clusterSize, clusterSizeErr := fullStatus.getClusterSize(ctx, k8sClient) cluster := fullStatus.Cluster @@ -243,8 +250,7 @@ func (fullStatus *PostgresqlStatus) printBasicInfo(ctx context.Context, client k isPrimaryFenced := cluster.IsInstanceFenced(cluster.Status.CurrentPrimary) primaryInstanceStatus := fullStatus.tryGetPrimaryInstance() - summary.AddLine("Name:", cluster.Name) - summary.AddLine("Namespace:", cluster.Namespace) + summary.AddLine("Name", client.ObjectKeyFromObject(cluster).String()) if primaryInstanceStatus != nil { summary.AddLine("System ID:", primaryInstanceStatus.SystemID) @@ -529,9 +535,9 @@ func (fullStatus *PostgresqlStatus) areReplicationSlotsEnabled() bool { fullStatus.Cluster.Spec.ReplicationSlots.HighAvailability.GetEnabled() } -func (fullStatus *PostgresqlStatus) printReplicaStatusTableHeader(table *tabby.Tabby, verbose bool) { +func (fullStatus *PostgresqlStatus) printReplicaStatusTableHeader(table *tabby.Tabby, verbosity int) { switch { - case fullStatus.areReplicationSlotsEnabled() && verbose: + case fullStatus.areReplicationSlotsEnabled() && verbosity > 0: table.AddHeader( "Name", "Sent LSN", @@ -549,7 +555,7 @@ func (fullStatus *PostgresqlStatus) printReplicaStatusTableHeader(table *tabby.T "Slot WAL Status", "Slot Safe WAL Size", ) - case fullStatus.areReplicationSlotsEnabled() && !verbose: + case fullStatus.areReplicationSlotsEnabled() && verbosity == 0: table.AddHeader( "Name", "Sent LSN", @@ -585,7 +591,7 @@ func (fullStatus *PostgresqlStatus) printReplicaStatusTableHeader(table *tabby.T func (fullStatus *PostgresqlStatus) addReplicationSlotsColumns( applicationName string, columns *[]interface{}, - verbose bool, + verbosity int, ) { printSlotActivity := func(isActive bool) string { if isActive { @@ -595,18 +601,18 @@ func (fullStatus *PostgresqlStatus) addReplicationSlotsColumns( } slot := fullStatus.getPrintableReplicationSlotInfo(applicationName) switch { - case slot != nil && verbose: + case slot != nil && verbosity > 0: *columns = append(*columns, printSlotActivity(slot.Active), slot.RestartLsn, slot.WalStatus, getPrintableIntegerPointer(slot.SafeWalSize), ) - case slot != nil && !verbose: + case slot != nil && verbosity == 0: *columns = append(*columns, printSlotActivity(slot.Active), ) - case slot == nil && verbose: + case slot == nil && verbosity > 0: *columns = append(*columns, "-", "-", @@ -620,7 +626,7 @@ func (fullStatus *PostgresqlStatus) addReplicationSlotsColumns( } } -func (fullStatus *PostgresqlStatus) printReplicaStatus(verbose bool) { +func (fullStatus *PostgresqlStatus) printReplicaStatus(verbosity int) { if fullStatus.Cluster.IsReplica() { return } @@ -650,13 +656,13 @@ func (fullStatus *PostgresqlStatus) printReplicaStatus(verbose bool) { } status := tabby.New() - fullStatus.printReplicaStatusTableHeader(status, verbose) + fullStatus.printReplicaStatusTableHeader(status, verbosity) // print Replication Slots columns only if the cluster has replication slots enabled addReplicationSlotsColumns := func(_ string, _ *[]interface{}) {} if fullStatus.areReplicationSlotsEnabled() { addReplicationSlotsColumns = func(applicationName string, columns *[]interface{}) { - fullStatus.addReplicationSlotsColumns(applicationName, columns, verbose) + fullStatus.addReplicationSlotsColumns(applicationName, columns, verbosity) } } @@ -977,7 +983,7 @@ func (fullStatus *PostgresqlStatus) printPodDisruptionBudgetStatus() { fmt.Println() } -func (fullStatus *PostgresqlStatus) printBasebackupStatus() { +func (fullStatus *PostgresqlStatus) printBasebackupStatus(verbosity int) { const header = "Physical backups" primaryInstanceStatus := fullStatus.tryGetPrimaryInstance() @@ -988,7 +994,7 @@ func (fullStatus *PostgresqlStatus) printBasebackupStatus() { return } - if len(primaryInstanceStatus.PgStatBasebackupsInfo) == 0 { + if verbosity > 0 && len(primaryInstanceStatus.PgStatBasebackupsInfo) == 0 { fmt.Println(aurora.Green(header)) fmt.Println(aurora.Yellow("No running physical backups found").String()) fmt.Println() From c188f4b09a21d8075dd37dc2ca68b7218ca2efda Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Sat, 12 Oct 2024 10:05:51 +0200 Subject: [PATCH 057/836] fix(plugin): ensure pgadmin4 has a writable home directory (#5800) Closes: #5799 Signed-off-by: Leonardo Cecchi --- internal/cmd/plugin/pgadmin/pgadmin.go | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/internal/cmd/plugin/pgadmin/pgadmin.go b/internal/cmd/plugin/pgadmin/pgadmin.go index 44a23f89c4..078164a29e 100644 --- a/internal/cmd/plugin/pgadmin/pgadmin.go +++ b/internal/cmd/plugin/pgadmin/pgadmin.go @@ -26,6 +26,7 @@ import ( "github.com/sethvargo/go-password/password" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/utils/ptr" @@ -273,6 +274,14 @@ func (cmd *command) generateDeployment() *appsv1.Deployment { Name: pgAdminPassFileVolumeName, MountPath: pgAdminPassFileVolumePath, }, + { + Name: "tmp", + MountPath: "/tmp", + }, + { + Name: "home", + MountPath: "/home/pgadmin", + }, }, ReadinessProbe: &corev1.Probe{ ProbeHandler: corev1.ProbeHandler{ @@ -303,6 +312,21 @@ func (cmd *command) generateDeployment() *appsv1.Deployment { }, }, }, + { + Name: "home", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + { + Name: "tmp", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMediumMemory, + SizeLimit: ptr.To(resource.MustParse("100Mi")), + }, + }, + }, }, }, }, From d20fb269191df2484a562f5ee5cf96f76345c4fe Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sun, 13 Oct 2024 16:56:03 +0200 Subject: [PATCH 058/836] chore(deps): update spellcheck to v0.43.0 (main) (#5807) --- .github/workflows/spellcheck.yml | 2 +- Makefile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/spellcheck.yml b/.github/workflows/spellcheck.yml index a8bbd7866c..847ca8891b 100644 --- a/.github/workflows/spellcheck.yml +++ b/.github/workflows/spellcheck.yml @@ -28,4 +28,4 @@ jobs: uses: actions/checkout@v4 - name: Spellcheck - uses: rojopolis/spellcheck-github-actions@0.42.0 + uses: rojopolis/spellcheck-github-actions@0.43.0 diff --git a/Makefile b/Makefile index d9577c7cd0..61dbcda441 100644 --- a/Makefile +++ b/Makefile @@ -44,7 +44,7 @@ POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions KUSTOMIZE_VERSION ?= v5.4.3 CONTROLLER_TOOLS_VERSION ?= v0.16.3 GORELEASER_VERSION ?= v2.3.2 -SPELLCHECK_VERSION ?= 0.42.0 +SPELLCHECK_VERSION ?= 0.43.0 WOKE_VERSION ?= 0.19.0 OPERATOR_SDK_VERSION ?= v1.37.0 OPM_VERSION ?= v1.47.0 From 47d82aba63108510934cbd7ddeeb117e6be22df6 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Sun, 13 Oct 2024 20:05:28 +0200 Subject: [PATCH 059/836] chore: switch to dl.k8s.io on the E2E tests (#5814) Following this https://github.com/kubernetes/k8s.io/issues/2396 we should have moved away a long time ago, now this change happened and the E2E tests are failing due to a wrong link to download the kubectl client. Signed-off-by: Jonathan Gonzalez V. --- hack/setup-cluster.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh index 00ad1bfa30..636046527e 100755 --- a/hack/setup-cluster.sh +++ b/hack/setup-cluster.sh @@ -299,7 +299,7 @@ install_kubectl() { local binary="${bindir}/kubectl" - curl -sL "https://storage.googleapis.com/kubernetes-release/release/v${KUBECTL_VERSION#v}/bin/${OS}/${ARCH}/kubectl" -o "${binary}" + curl -sL "https://dl.k8s.io/release/v${KUBECTL_VERSION#v}/bin/${OS}/${ARCH}/kubectl" -o "${binary}" chmod +x "${binary}" } From 37d29b4867f2719dac485c7a0c8660c7eb3573a3 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sun, 13 Oct 2024 23:00:34 +0200 Subject: [PATCH 060/836] chore(deps): update kindest/node docker tag to v1.31.1 (main) (#5756) --- hack/e2e/run-e2e-kind.sh | 2 +- hack/setup-cluster.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hack/e2e/run-e2e-kind.sh b/hack/e2e/run-e2e-kind.sh index 3f77232917..f37c274a8d 100755 --- a/hack/e2e/run-e2e-kind.sh +++ b/hack/e2e/run-e2e-kind.sh @@ -29,7 +29,7 @@ E2E_DIR="${HACK_DIR}/e2e" export PRESERVE_CLUSTER=${PRESERVE_CLUSTER:-false} export BUILD_IMAGE=${BUILD_IMAGE:-false} -KIND_NODE_DEFAULT_VERSION=v1.31.0 +KIND_NODE_DEFAULT_VERSION=v1.31.1 export K8S_VERSION=${K8S_VERSION:-$KIND_NODE_DEFAULT_VERSION} export CLUSTER_ENGINE=kind export CLUSTER_NAME=pg-operator-e2e-${K8S_VERSION//./-} diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh index 636046527e..d8ba973fe7 100755 --- a/hack/setup-cluster.sh +++ b/hack/setup-cluster.sh @@ -24,7 +24,7 @@ if [ "${DEBUG-}" = true ]; then fi # Defaults -KIND_NODE_DEFAULT_VERSION=v1.31.0 +KIND_NODE_DEFAULT_VERSION=v1.31.1 K3D_NODE_DEFAULT_VERSION=v1.30.3 CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=v1.15.0 EXTERNAL_SNAPSHOTTER_VERSION=v8.1.0 From bda9e45531e87de84c828fe71053752f1d5094eb Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 14 Oct 2024 11:26:28 +0200 Subject: [PATCH 061/836] chore(deps): update module sigs.k8s.io/controller-tools to v0.16.4 (main) (#5815) --- Makefile | 2 +- config/crd/bases/postgresql.cnpg.io_backups.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_clusters.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_databases.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_poolers.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Makefile b/Makefile index 61dbcda441..3e363b0fd7 100644 --- a/Makefile +++ b/Makefile @@ -42,7 +42,7 @@ LOCALBIN ?= $(shell pwd)/bin BUILD_IMAGE ?= true POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions.go" | cut -f 2 -d \") KUSTOMIZE_VERSION ?= v5.4.3 -CONTROLLER_TOOLS_VERSION ?= v0.16.3 +CONTROLLER_TOOLS_VERSION ?= v0.16.4 GORELEASER_VERSION ?= v2.3.2 SPELLCHECK_VERSION ?= 0.43.0 WOKE_VERSION ?= 0.19.0 diff --git a/config/crd/bases/postgresql.cnpg.io_backups.yaml b/config/crd/bases/postgresql.cnpg.io_backups.yaml index 96be8399da..9e1b5295a4 100644 --- a/config/crd/bases/postgresql.cnpg.io_backups.yaml +++ b/config/crd/bases/postgresql.cnpg.io_backups.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.3 + controller-gen.kubebuilder.io/version: v0.16.4 name: backups.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml b/config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml index 4581679377..0bbb4455be 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.3 + controller-gen.kubebuilder.io/version: v0.16.4 name: clusterimagecatalogs.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml index 128d44e47d..d2f810b24e 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.3 + controller-gen.kubebuilder.io/version: v0.16.4 name: clusters.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_databases.yaml b/config/crd/bases/postgresql.cnpg.io_databases.yaml index 7c29850d0d..ea1fbfdba5 100644 --- a/config/crd/bases/postgresql.cnpg.io_databases.yaml +++ b/config/crd/bases/postgresql.cnpg.io_databases.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.3 + controller-gen.kubebuilder.io/version: v0.16.4 name: databases.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml b/config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml index c961bf2eda..1205cd2261 100644 --- a/config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml +++ b/config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.3 + controller-gen.kubebuilder.io/version: v0.16.4 name: imagecatalogs.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_poolers.yaml b/config/crd/bases/postgresql.cnpg.io_poolers.yaml index e09e39b615..59d32f7571 100644 --- a/config/crd/bases/postgresql.cnpg.io_poolers.yaml +++ b/config/crd/bases/postgresql.cnpg.io_poolers.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.3 + controller-gen.kubebuilder.io/version: v0.16.4 name: poolers.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml b/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml index 15fb35c0ba..6c43327c8e 100644 --- a/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml +++ b/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.3 + controller-gen.kubebuilder.io/version: v0.16.4 name: scheduledbackups.postgresql.cnpg.io spec: group: postgresql.cnpg.io From 8a2bdb8d05b41f787621374f03089954bf9c47ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niccol=C3=B2=20Fei?= Date: Mon, 14 Oct 2024 12:08:50 +0200 Subject: [PATCH 062/836] ci: fix the registry-clean workflow (#5763) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The new version of the workflow allow us not to specify the organization and use the GitHub token for it, this means that the workflow can work on any fork. Closes #5762 Signed-off-by: Niccolò Fei --- .github/workflows/registry-clean.yml | 34 ++++++++++++---------------- 1 file changed, 14 insertions(+), 20 deletions(-) diff --git a/.github/workflows/registry-clean.yml b/.github/workflows/registry-clean.yml index c57ff2a859..4b9d931e44 100644 --- a/.github/workflows/registry-clean.yml +++ b/.github/workflows/registry-clean.yml @@ -9,8 +9,6 @@ on: env: IMAGE_NAME: "cloudnative-pg-testing" - ORG_NAME: "cloudnative-pg" - SNOK_TOKEN: ${{ secrets.REPO_GHA_PAT }} CONTAINER_IMAGE_NAMES: "pgbouncer-testing, postgresql-testing, postgis-testing" jobs: @@ -18,25 +16,21 @@ jobs: name: delete old testing container images runs-on: ubuntu-latest steps: - # once issue https://github.com/snok/container-retention-policy/issues/33 is fixed - # we can merge the two steps into one - - name: Delete '-testing' images for ${{ env.IMAGE_NAME }} - uses: snok/container-retention-policy@v2 + - name: Delete '-testing' operator images in ${{ env.IMAGE_NAME }} + uses: snok/container-retention-policy@v3.0.0 with: image-names: ${{ env.IMAGE_NAME }} - cut-off: 5 days ago UTC - keep-at-least: 1 - account-type: org - org-name: ${{ env.ORG_NAME }} - # use the GITHUB_TOKEN when issue https://github.com/snok/container-retention-policy/issues/27 is fixed - token: ${{ env.SNOK_TOKEN }} - - name: Delete '-testing' images for containers - uses: snok/container-retention-policy@v2 + cut-off: 5d + keep-n-most-recent: 1 + account: ${{ github.repository_owner }} + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Delete '-testing' operand images + uses: snok/container-retention-policy@v3.0.0 + if: ${{ github.repository_owner == 'cloudnative-pg' }} with: image-names: ${{ env.CONTAINER_IMAGE_NAMES }} - cut-off: A week ago UTC - keep-at-least: 1 - account-type: org - org-name: ${{ env.ORG_NAME }} - # use the GITHUB_TOKEN when issue https://github.com/snok/container-retention-policy/issues/27 is fixed - token: ${{ env.SNOK_TOKEN }} + cut-off: 1w + keep-n-most-recent: 1 + account: "cloudnative-pg" + token: ${{ secrets.REPO_GHA_PAT }} From 2d0e592a00bfce74abf62655dabb1617996333d7 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Mon, 14 Oct 2024 12:29:21 +0200 Subject: [PATCH 063/836] chore(logging): avoid creating multiple loggers (#5783) With the latest version of controller-runtime the current logger can be set in the base context and then inherited by all the controllers belonging to the same manager. This patch removes the duplicate loggers in the PostgreSQL instances and in the PgBouncer Pods, reusing the appropriate one. This, in turn, allows for a more consistent logging across the instance manager. Closes: #5782 --------- Signed-off-by: Leonardo Cecchi Signed-off-by: Jonathan Gonzalez V. Signed-off-by: Francesco Canovai Co-authored-by: Francesco Canovai --- api/v1/cluster_funcs.go | 1 - cmd/manager/main.go | 2 + docs/src/logging.md | 1 + internal/cmd/manager/backup/cmd.go | 13 ++-- internal/cmd/manager/bootstrap/cmd.go | 7 +- .../cmd/manager/debug/architectures/cmd.go | 5 +- internal/cmd/manager/instance/initdb/cmd.go | 12 ++-- internal/cmd/manager/instance/join/cmd.go | 10 +-- .../cmd/manager/instance/pgbasebackup/cmd.go | 19 ++--- internal/cmd/manager/instance/restore/cmd.go | 13 ++-- .../manager/instance/restoresnapshot/cmd.go | 3 +- internal/cmd/manager/instance/run/cmd.go | 57 ++++++++------- .../cmd/manager/instance/run/lifecycle/run.go | 8 ++- internal/cmd/manager/instance/status/cmd.go | 21 +++--- internal/cmd/manager/pgbouncer/run/cmd.go | 52 ++++++++------ .../cmd/manager/show/walarchivequeue/cmd.go | 7 +- internal/cmd/manager/walarchive/cmd.go | 11 +-- internal/cmd/manager/walrestore/cmd.go | 2 +- internal/controller/cluster_pki.go | 10 +-- internal/controller/cluster_status.go | 3 +- internal/management/controller/cache.go | 12 ++-- .../controller/database_controller.go | 2 +- .../controller/instance_controller.go | 44 ++++++++---- .../management/controller/manager.go | 16 +++-- .../management/controller/refresh.go | 7 +- .../management/controller/refresh_test.go | 12 ++-- .../pgbouncer/metricsserver/lists.go | 10 +-- .../pgbouncer/metricsserver/metricsserver.go | 4 +- .../metricsserver/metricsserver_test.go | 4 +- .../metricsserver/pgbouncer_collector.go | 9 ++- .../pgbouncer/metricsserver/pools.go | 12 ++-- .../pgbouncer/metricsserver/pools_test.go | 3 +- .../pgbouncer/metricsserver/stats.go | 10 +-- .../pgbouncer/metricsserver/stats_test.go | 3 +- pkg/management/postgres/backup.go | 5 +- pkg/management/postgres/configuration.go | 17 +++-- pkg/management/postgres/initdb.go | 4 +- pkg/management/postgres/instance.go | 70 +++++++++++-------- pkg/management/postgres/join.go | 9 +-- pkg/management/postgres/restore.go | 44 +++++++----- .../postgres/webserver/webserver.go | 12 ++-- 41 files changed, 343 insertions(+), 223 deletions(-) diff --git a/api/v1/cluster_funcs.go b/api/v1/cluster_funcs.go index a90dbd48bc..fdfa88c158 100644 --- a/api/v1/cluster_funcs.go +++ b/api/v1/cluster_funcs.go @@ -118,7 +118,6 @@ func (st *ServiceAccountTemplate) MergeMetadata(sa *corev1.ServiceAccount) { // MatchesTopology checks if the two topologies have // the same label values (labels are specified in SyncReplicaElectionConstraints.NodeLabelsAntiAffinity) func (topologyLabels PodTopologyLabels) MatchesTopology(instanceTopology PodTopologyLabels) bool { - log.Debug("matching topology", "main", topologyLabels, "second", instanceTopology) for mainLabelName, mainLabelValue := range topologyLabels { if mainLabelValue != instanceTopology[mainLabelName] { return false diff --git a/cmd/manager/main.go b/cmd/manager/main.go index 8fe8a46f68..9feee1c6b1 100644 --- a/cmd/manager/main.go +++ b/cmd/manager/main.go @@ -40,6 +40,8 @@ import ( ) func main() { + cobra.EnableTraverseRunHooks = true + logFlags := &log.Flags{} cmd := &cobra.Command{ diff --git a/docs/src/logging.md b/docs/src/logging.md index f5cfd84bda..98ba55b467 100644 --- a/docs/src/logging.md +++ b/docs/src/logging.md @@ -192,6 +192,7 @@ Therefore, all the possible `logger` values are the following: - `postgres`: from the `postgres` instance (having `msg` different than `record`) - `wal-archive`: from the `wal-archive` subcommand of the instance manager - `wal-restore`: from the `wal-restore` subcommand of the instance manager +- `instance-manager`: from the [PostgreSQL instance manager](./instance_manager.md) Except for `postgres`, which has the aforementioned structures, all other possible values have `msg` set to the escaped message that's diff --git a/internal/cmd/manager/backup/cmd.go b/internal/cmd/manager/backup/cmd.go index 6a5c3ec346..f0295a4965 100644 --- a/internal/cmd/manager/backup/cmd.go +++ b/internal/cmd/manager/backup/cmd.go @@ -33,18 +33,19 @@ import ( func NewCmd() *cobra.Command { cmd := cobra.Command{ Use: "backup [backup_name]", - RunE: func(_ *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, args []string) error { + contextLogger := log.FromContext(cmd.Context()) backupURL := url.Local(url.PathPgBackup, url.LocalPort) resp, err := http.Get(backupURL + "?name=" + args[0]) if err != nil { - log.Error(err, "Error while requesting backup") + contextLogger.Error(err, "Error while requesting backup") return err } defer func() { err := resp.Body.Close() if err != nil { - log.Error(err, "Can't close the connection", + contextLogger.Error(err, "Can't close the connection", "backupURL", backupURL, "statusCode", resp.StatusCode, ) @@ -53,7 +54,7 @@ func NewCmd() *cobra.Command { body, err := io.ReadAll(resp.Body) if err != nil { - log.Error(err, "Error while reading backup response body", + contextLogger.Error(err, "Error while reading backup response body", "backupURL", backupURL, "statusCode", resp.StatusCode, ) @@ -61,7 +62,7 @@ func NewCmd() *cobra.Command { } if resp.StatusCode != 200 { - log.Info( + contextLogger.Info( "Error while requesting backup", "backupURL", backupURL, "statusCode", resp.StatusCode, @@ -72,7 +73,7 @@ func NewCmd() *cobra.Command { _, err = os.Stderr.Write(body) if err != nil { - log.Error(err, "Error while starting a backup") + contextLogger.Error(err, "Error while starting a backup") return err } diff --git a/internal/cmd/manager/bootstrap/cmd.go b/internal/cmd/manager/bootstrap/cmd.go index ed87082bf7..311064e2c5 100644 --- a/internal/cmd/manager/bootstrap/cmd.go +++ b/internal/cmd/manager/bootstrap/cmd.go @@ -33,9 +33,10 @@ func NewCmd() *cobra.Command { Use: "bootstrap [target]", Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { + contextLogger := log.FromContext(cmd.Context()) dest := args[0] - log.Info("Installing the manager executable", + contextLogger.Info("Installing the manager executable", "destination", dest, "version", versions.Version, "build", versions.Info) @@ -44,13 +45,13 @@ func NewCmd() *cobra.Command { panic(err) } - log.Info("Setting 0750 permissions") + contextLogger.Info("Setting 0750 permissions") err = os.Chmod(dest, 0o750) // #nosec if err != nil { panic(err) } - log.Info("Bootstrap completed") + contextLogger.Info("Bootstrap completed") return nil }, diff --git a/internal/cmd/manager/debug/architectures/cmd.go b/internal/cmd/manager/debug/architectures/cmd.go index 59ff5fd390..d395963610 100644 --- a/internal/cmd/manager/debug/architectures/cmd.go +++ b/internal/cmd/manager/debug/architectures/cmd.go @@ -32,9 +32,10 @@ func NewCmd() *cobra.Command { cmd := cobra.Command{ Use: "show-architectures", Short: "Lists all the CPU architectures supported by this image", - RunE: func(_ *cobra.Command, _ []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { + contextLogger := log.FromContext(cmd.Context()) if err := run(); err != nil { - log.Error(err, "Error while extracting the list of supported architectures") + contextLogger.Error(err, "Error while extracting the list of supported architectures") return err } diff --git a/internal/cmd/manager/instance/initdb/cmd.go b/internal/cmd/manager/instance/initdb/cmd.go index bbf59790d2..81c453544d 100644 --- a/internal/cmd/manager/instance/initdb/cmd.go +++ b/internal/cmd/manager/instance/initdb/cmd.go @@ -60,28 +60,29 @@ func NewCmd() *cobra.Command { }, RunE: func(cmd *cobra.Command, _ []string) error { ctx := cmd.Context() + contextLogger := log.FromContext(ctx) initDBFlags, err := shellquote.Split(initDBFlagsString) if err != nil { - log.Error(err, "Error while parsing initdb flags") + contextLogger.Error(err, "Error while parsing initdb flags") return err } postInitSQL, err := shellquote.Split(postInitSQLStr) if err != nil { - log.Error(err, "Error while parsing post init SQL queries") + contextLogger.Error(err, "Error while parsing post init SQL queries") return err } postInitApplicationSQL, err := shellquote.Split(postInitApplicationSQLStr) if err != nil { - log.Error(err, "Error while parsing post init template SQL queries") + contextLogger.Error(err, "Error while parsing post init template SQL queries") return err } postInitTemplateSQL, err := shellquote.Split(postInitTemplateSQLStr) if err != nil { - log.Error(err, "Error while parsing post init template SQL queries") + contextLogger.Error(err, "Error while parsing post init template SQL queries") return err } @@ -148,6 +149,7 @@ func NewCmd() *cobra.Command { } func initSubCommand(ctx context.Context, info postgres.InitInfo) error { + contextLogger := log.FromContext(ctx) err := info.CheckTargetDataDirectory(ctx) if err != nil { return err @@ -155,7 +157,7 @@ func initSubCommand(ctx context.Context, info postgres.InitInfo) error { err = info.Bootstrap(ctx) if err != nil { - log.Error(err, "Error while bootstrapping data directory") + contextLogger.Error(err, "Error while bootstrapping data directory") return err } diff --git a/internal/cmd/manager/instance/join/cmd.go b/internal/cmd/manager/instance/join/cmd.go index 449f92563a..e708d79a2d 100644 --- a/internal/cmd/manager/instance/join/cmd.go +++ b/internal/cmd/manager/instance/join/cmd.go @@ -93,13 +93,15 @@ func NewCmd() *cobra.Command { } func joinSubCommand(ctx context.Context, instance *postgres.Instance, info postgres.InitInfo) error { + contextLogger := log.FromContext(ctx) + if err := info.CheckTargetDataDirectory(ctx); err != nil { return err } client, err := management.NewControllerRuntimeClient() if err != nil { - log.Error(err, "Error creating Kubernetes client") + contextLogger.Error(err, "Error creating Kubernetes client") return err } @@ -114,7 +116,7 @@ func joinSubCommand(ctx context.Context, instance *postgres.Instance, info postg ctrl.ObjectKey{Namespace: instance.GetNamespaceName(), Name: instance.GetClusterName()}, &cluster, ); err != nil { - log.Error(err, "Error while getting cluster") + contextLogger.Error(err, "Error while getting cluster") return err } @@ -130,8 +132,8 @@ func joinSubCommand(ctx context.Context, instance *postgres.Instance, info postg reconciler.RefreshSecrets(ctx, &cluster) // Run "pg_basebackup" to download the data directory from the primary - if err := info.Join(&cluster); err != nil { - log.Error(err, "Error joining node") + if err := info.Join(ctx, &cluster); err != nil { + contextLogger.Error(err, "Error joining node") return err } diff --git a/internal/cmd/manager/instance/pgbasebackup/cmd.go b/internal/cmd/manager/instance/pgbasebackup/cmd.go index d8ccead7cf..3f9d9719e1 100644 --- a/internal/cmd/manager/instance/pgbasebackup/cmd.go +++ b/internal/cmd/manager/instance/pgbasebackup/cmd.go @@ -57,7 +57,10 @@ func NewCmd() *cobra.Command { Namespace: namespace, }) }, - RunE: func(_ *cobra.Command, _ []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + contextLogger := log.FromContext(ctx) + client, err := management.NewControllerRuntimeClient() if err != nil { return err @@ -73,10 +76,8 @@ func NewCmd() *cobra.Command { client: client, } - ctx := context.Background() - if err = env.bootstrapUsingPgbasebackup(ctx); err != nil { - log.Error(err, "Unable to boostrap cluster") + contextLogger.Error(err, "Unable to boostrap cluster") } return err }, @@ -101,6 +102,8 @@ func NewCmd() *cobra.Command { // bootstrapUsingPgbasebackup creates a new data dir from the configuration func (env *CloneInfo) bootstrapUsingPgbasebackup(ctx context.Context) error { + contextLogger := log.FromContext(ctx) + var cluster apiv1.Cluster err := env.client.Get(ctx, ctrl.ObjectKey{Namespace: env.info.Namespace, Name: env.info.ClusterName}, &cluster) if err != nil { @@ -130,7 +133,7 @@ func (env *CloneInfo) bootstrapUsingPgbasebackup(ctx context.Context) error { pgVersion, err := cluster.GetPostgresqlVersion() if err != nil { - log.Warning( + contextLogger.Warning( "Error while parsing PostgreSQL server version to define connection options, defaulting to PostgreSQL 11", "imageName", cluster.GetImageName(), "err", err) @@ -141,7 +144,7 @@ func (env *CloneInfo) bootstrapUsingPgbasebackup(ctx context.Context) error { connectionString += " options='-c wal_sender_timeout=0s'" } - err = postgres.ClonePgData(connectionString, env.info.PgData, env.info.PgWal) + err = postgres.ClonePgData(ctx, connectionString, env.info.PgData, env.info.PgWal) if err != nil { return err } @@ -158,11 +161,11 @@ func (env *CloneInfo) bootstrapUsingPgbasebackup(ctx context.Context) error { // configureInstanceAsNewPrimary sets up this instance as a new primary server, using // the configuration created by the user and setting up the global objects as needed func (env *CloneInfo) configureInstanceAsNewPrimary(ctx context.Context, cluster *apiv1.Cluster) error { - if err := env.info.WriteInitialPostgresqlConf(cluster); err != nil { + if err := env.info.WriteInitialPostgresqlConf(ctx, cluster); err != nil { return err } - if err := env.info.WriteRestoreHbaConf(); err != nil { + if err := env.info.WriteRestoreHbaConf(ctx); err != nil { return err } diff --git a/internal/cmd/manager/instance/restore/cmd.go b/internal/cmd/manager/instance/restore/cmd.go index 4d94211861..f2d4d63052 100644 --- a/internal/cmd/manager/instance/restore/cmd.go +++ b/internal/cmd/manager/instance/restore/cmd.go @@ -82,6 +82,7 @@ func NewCmd() *cobra.Command { } func restoreSubCommand(ctx context.Context, info postgres.InitInfo) error { + contextLogger := log.FromContext(ctx) err := info.CheckTargetDataDirectory(ctx) if err != nil { return err @@ -89,15 +90,17 @@ func restoreSubCommand(ctx context.Context, info postgres.InitInfo) error { err = info.Restore(ctx) if err != nil { - log.Error(err, "Error while restoring a backup") - cleanupDataDirectoryIfNeeded(err, info.PgData) + contextLogger.Error(err, "Error while restoring a backup") + cleanupDataDirectoryIfNeeded(ctx, err, info.PgData) return err } return nil } -func cleanupDataDirectoryIfNeeded(restoreError error, dataDirectory string) { +func cleanupDataDirectoryIfNeeded(ctx context.Context, restoreError error, dataDirectory string) { + contextLogger := log.FromContext(ctx) + var barmanError *barmanCommand.CloudRestoreError if !errors.As(restoreError, &barmanError) { return @@ -107,9 +110,9 @@ func cleanupDataDirectoryIfNeeded(restoreError error, dataDirectory string) { return } - log.Info("Cleaning up data directory", "directory", dataDirectory) + contextLogger.Info("Cleaning up data directory", "directory", dataDirectory) if err := fileutils.RemoveDirectory(dataDirectory); err != nil && !os.IsNotExist(err) { - log.Error( + contextLogger.Error( err, "error occurred cleaning up data directory", "directory", dataDirectory) diff --git a/internal/cmd/manager/instance/restoresnapshot/cmd.go b/internal/cmd/manager/instance/restoresnapshot/cmd.go index d3e22e890c..6d84b6f714 100644 --- a/internal/cmd/manager/instance/restoresnapshot/cmd.go +++ b/internal/cmd/manager/instance/restoresnapshot/cmd.go @@ -55,6 +55,7 @@ func NewCmd() *cobra.Command { }, RunE: func(cmd *cobra.Command, _ []string) error { ctx := cmd.Context() + contextLogger := log.FromContext(ctx) info := postgres.InitInfo{ ClusterName: clusterName, @@ -81,7 +82,7 @@ func NewCmd() *cobra.Command { err := execute(ctx, info, immediate) if err != nil { - log.Error(err, "Error while recovering Volume Snapshot backup") + contextLogger.Error(err, "Error while recovering Volume Snapshot backup") } return err }, diff --git a/internal/cmd/manager/instance/run/cmd.go b/internal/cmd/manager/instance/run/cmd.go index 1d37ac850a..066360ddaa 100644 --- a/internal/cmd/manager/instance/run/cmd.go +++ b/internal/cmd/manager/instance/run/cmd.go @@ -87,7 +87,10 @@ func NewCmd() *cobra.Command { }) }, RunE: func(cmd *cobra.Command, _ []string) error { - ctx := log.IntoContext(cmd.Context(), log.GetLogger()) + ctx := log.IntoContext( + cmd.Context(), + log.GetLogger().WithValues("logger", "instance-manager"), + ) instance := postgres.NewInstance(). WithPodName(podName). WithClusterName(clusterName). @@ -132,18 +135,18 @@ func NewCmd() *cobra.Command { func runSubCommand(ctx context.Context, instance *postgres.Instance) error { var err error - setupLog := log.WithName("setup") - setupLog.Info("Starting CloudNativePG Instance Manager", + contextLogger := log.FromContext(ctx) + contextLogger.Info("Starting CloudNativePG Instance Manager", "version", versions.Version, "build", versions.Info) - setupLog.Info("Checking for free disk space for WALs before starting PostgreSQL") + contextLogger.Info("Checking for free disk space for WALs before starting PostgreSQL") hasDiskSpaceForWals, err := instance.CheckHasDiskSpaceForWAL(ctx) if err != nil { - setupLog.Error(err, "Error while checking if there is enough disk space for WALs, skipping") + contextLogger.Error(err, "Error while checking if there is enough disk space for WALs, skipping") } else if !hasDiskSpaceForWals { - setupLog.Info("Detected low-disk space condition, avoid starting the instance") + contextLogger.Info("Detected low-disk space condition, avoid starting the instance") return errNoFreeWALSpace } @@ -180,9 +183,13 @@ func runSubCommand(ctx context.Context, instance *postgres.Instance) error { Metrics: server.Options{ BindAddress: "0", // TODO: merge metrics to the manager one }, + BaseContext: func() context.Context { + return ctx + }, + Logger: contextLogger.WithValues("logging_pod", os.Getenv("POD_NAME")).GetLogger(), }) if err != nil { - setupLog.Error(err, "unable to set up overall controller manager") + contextLogger.Error(err, "unable to set up overall controller manager") return err } @@ -196,7 +203,7 @@ func runSubCommand(ctx context.Context, instance *postgres.Instance) error { Named("instance-cluster"). Complete(reconciler) if err != nil { - setupLog.Error(err, "unable to create instance controller") + contextLogger.Error(err, "unable to create instance controller") return err } postgresStartConditions = append(postgresStartConditions, reconciler.GetExecutedCondition()) @@ -204,7 +211,7 @@ func runSubCommand(ctx context.Context, instance *postgres.Instance) error { // database reconciler dbReconciler := controller.NewDatabaseReconciler(mgr, instance) if err := dbReconciler.SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create database controller") + contextLogger.Error(err, "unable to create database controller") return err } @@ -239,24 +246,24 @@ func runSubCommand(ctx context.Context, instance *postgres.Instance) error { postgresLifecycleManager := lifecycle.NewPostgres(ctx, instance, postgresStartConditions) if err = mgr.Add(postgresLifecycleManager); err != nil { - setupLog.Error(err, "unable to create instance runnable") + contextLogger.Error(err, "unable to create instance runnable") return err } if err = mgr.Add(lifecycle.NewPostgresOrphansReaper(instance)); err != nil { - setupLog.Error(err, "unable to create zombie reaper") + contextLogger.Error(err, "unable to create zombie reaper") return err } slotReplicator := runner.NewReplicator(instance) if err = mgr.Add(slotReplicator); err != nil { - setupLog.Error(err, "unable to create slot replicator") + contextLogger.Error(err, "unable to create slot replicator") return err } roleSynchronizer := roles.NewRoleSynchronizer(instance, reconciler.GetClient()) if err = mgr.Add(roleSynchronizer); err != nil { - setupLog.Error(err, "unable to create role synchronizer") + contextLogger.Error(err, "unable to create role synchronizer") return err } @@ -273,7 +280,7 @@ func runSubCommand(ctx context.Context, instance *postgres.Instance) error { return err } if err = mgr.Add(remoteSrv); err != nil { - setupLog.Error(err, "unable to add remote webserver runnable") + contextLogger.Error(err, "unable to add remote webserver runnable") return err } @@ -286,7 +293,7 @@ func runSubCommand(ctx context.Context, instance *postgres.Instance) error { return err } if err = mgr.Add(localSrv); err != nil { - setupLog.Error(err, "unable to add local webserver runnable") + contextLogger.Error(err, "unable to add local webserver runnable") return err } @@ -295,36 +302,36 @@ func runSubCommand(ctx context.Context, instance *postgres.Instance) error { return err } if err = mgr.Add(metricsServer); err != nil { - setupLog.Error(err, "unable to add local webserver runnable") + contextLogger.Error(err, "unable to add local webserver runnable") return err } - setupLog.Info("starting tablespace manager") + contextLogger.Info("starting tablespace manager") if err := tablespaces.NewTablespaceReconciler(instance, mgr.GetClient()). SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create tablespace reconciler") + contextLogger.Error(err, "unable to create tablespace reconciler") return err } - setupLog.Info("starting external server manager") + contextLogger.Info("starting external server manager") if err := externalservers.NewReconciler(instance, mgr.GetClient()). SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create external servers reconciler") + contextLogger.Error(err, "unable to create external servers reconciler") return err } - setupLog.Info("starting controller-runtime manager") + contextLogger.Info("starting controller-runtime manager") if err := mgr.Start(onlineUpgradeCtx); err != nil { - setupLog.Error(err, "unable to run controller-runtime manager") + contextLogger.Error(err, "unable to run controller-runtime manager") return makeUnretryableError(err) } - setupLog.Info("Checking for free disk space for WALs after PostgreSQL finished") + contextLogger.Info("Checking for free disk space for WALs after PostgreSQL finished") hasDiskSpaceForWals, err = instance.CheckHasDiskSpaceForWAL(ctx) if err != nil { - setupLog.Error(err, "Error while checking if there is enough disk space for WALs, skipping") + contextLogger.Error(err, "Error while checking if there is enough disk space for WALs, skipping") } else if !hasDiskSpaceForWals { - setupLog.Info("Detected low-disk space condition") + contextLogger.Info("Detected low-disk space condition") return errNoFreeWALSpace } diff --git a/internal/cmd/manager/instance/run/lifecycle/run.go b/internal/cmd/manager/instance/run/lifecycle/run.go index 88a133dec7..6fde183430 100644 --- a/internal/cmd/manager/instance/run/lifecycle/run.go +++ b/internal/cmd/manager/instance/run/lifecycle/run.go @@ -98,7 +98,7 @@ func (i *PostgresLifecycle) runPostgresAndWait(ctx context.Context) <-chan error return err } - log.Info("postmaster started", "postMasterPID", postMasterPID) + contextLogger.Info("postmaster started", "postMasterPID", postMasterPID) // Now we'll wait for PostgreSQL to accept connections, and setup everything required // for replication and pg_rewind to work correctly. @@ -116,7 +116,11 @@ func (i *PostgresLifecycle) runPostgresAndWait(ctx context.Context) <-chan error defer i.instance.SetCanCheckReadiness(false) postmasterExitStatus := streamingCmd.Wait() - log.Info("postmaster exited", "postmasterExitStatus", postmasterExitStatus, "postMasterPID", postMasterPID) + contextLogger.Info( + "postmaster exited", + "postmasterExitStatus", postmasterExitStatus, + "postMasterPID", postMasterPID, + ) return postmasterExitStatus } diff --git a/internal/cmd/manager/instance/status/cmd.go b/internal/cmd/manager/instance/status/cmd.go index dc56e49b3a..d1361d71f1 100644 --- a/internal/cmd/manager/instance/status/cmd.go +++ b/internal/cmd/manager/instance/status/cmd.go @@ -49,15 +49,16 @@ func NewCmd() *cobra.Command { } func statusSubCommand(ctx context.Context) error { + contextLogger := log.FromContext(ctx) cli, err := management.NewControllerRuntimeClient() if err != nil { - log.Error(err, "while building the controller runtime client") + contextLogger.Error(err, "while building the controller runtime client") return err } cluster, err := cacheClient.GetCluster() if err != nil { - log.Error(err, "while loading the cluster from cache") + contextLogger.Error(err, "while loading the cluster from cache") return err } @@ -67,7 +68,7 @@ func statusSubCommand(ctx context.Context) error { cluster.GetServerCASecretObjectKey(), ) if err != nil { - log.Error(err, "Error while building the TLS context") + contextLogger.Error(err, "Error while building the TLS context") return err } @@ -76,14 +77,14 @@ func statusSubCommand(ctx context.Context) error { resp, err = executeRequest(ctx, "http") } if err != nil { - log.Error(err, "Error while requesting instance status") + contextLogger.Error(err, "Error while requesting instance status") return err } defer func() { err = resp.Body.Close() if err != nil { - log.Error(err, "Can't close the connection", + contextLogger.Error(err, "Can't close the connection", "statusCode", resp.StatusCode, ) } @@ -91,14 +92,14 @@ func statusSubCommand(ctx context.Context) error { body, err := io.ReadAll(resp.Body) if err != nil { - log.Error(err, "Error while reading status response body", + contextLogger.Error(err, "Error while reading status response body", "statusCode", resp.StatusCode, ) return err } if resp.StatusCode != 200 { - log.Info( + contextLogger.Info( "Error while extracting status", "statusCode", resp.StatusCode, "body", string(body), @@ -108,7 +109,7 @@ func statusSubCommand(ctx context.Context) error { _, err = os.Stdout.Write(body) if err != nil { - log.Error(err, "Error while showing status info") + contextLogger.Error(err, "Error while showing status info") return err } @@ -119,13 +120,15 @@ func executeRequest(ctx context.Context, scheme string) (*http.Response, error) const connectionTimeout = 2 * time.Second const requestTimeout = 30 * time.Second + contextLogger := log.FromContext(ctx) + statusURL := url.Build( scheme, "localhost", url.PathPgStatus, url.StatusPort, ) req, err := http.NewRequestWithContext(ctx, http.MethodGet, statusURL, nil) if err != nil { - log.Error(err, "Error while building the request") + contextLogger.Error(err, "Error while building the request") return nil, err } httpClient := resources.NewHTTPClient(connectionTimeout, requestTimeout) diff --git a/internal/cmd/manager/pgbouncer/run/cmd.go b/internal/cmd/manager/pgbouncer/run/cmd.go index 3c283478a7..3650601e3e 100644 --- a/internal/cmd/manager/pgbouncer/run/cmd.go +++ b/internal/cmd/manager/pgbouncer/run/cmd.go @@ -54,9 +54,10 @@ func NewCmd() *cobra.Command { cmd := &cobra.Command{ Use: "run", SilenceErrors: true, - PreRunE: func(_ *cobra.Command, _ []string) error { + PreRunE: func(cmd *cobra.Command, _ []string) error { + contextLogger := log.FromContext(cmd.Context()) if poolerNamespacedName.Name == "" || poolerNamespacedName.Namespace == "" { - log.Info( + contextLogger.Info( "pooler object key not set", "poolerNamespacedName", poolerNamespacedName) return errorMissingPoolerNamespacedName @@ -64,8 +65,14 @@ func NewCmd() *cobra.Command { return nil }, RunE: func(cmd *cobra.Command, _ []string) error { - if err := runSubCommand(cmd.Context(), poolerNamespacedName); err != nil { - log.Error(err, "Error while running manager") + ctx := log.IntoContext( + cmd.Context(), + log.GetLogger().WithValues("logger", "pgbouncer-manager"), + ) + contextLogger := log.FromContext(ctx) + + if err := runSubCommand(ctx, poolerNamespacedName); err != nil { + contextLogger.Error(err, "Error while running manager") return err } return nil @@ -91,11 +98,12 @@ func NewCmd() *cobra.Command { func runSubCommand(ctx context.Context, poolerNamespacedName types.NamespacedName) error { var err error - log.Info("Starting CloudNativePG PgBouncer Instance Manager", + contextLogger := log.FromContext(ctx) + contextLogger.Info("Starting CloudNativePG PgBouncer Instance Manager", "version", versions.Version, "build", versions.Info) - if err = startWebServer(); err != nil { + if err = startWebServer(ctx); err != nil { return fmt.Errorf("while starting the web server: %w", err) } @@ -114,10 +122,10 @@ func runSubCommand(ctx context.Context, poolerNamespacedName types.NamespacedNam pgBouncerIni := filepath.Join(config.ConfigsDir, config.PgBouncerIniFileName) pgBouncerCmd := exec.Command(pgBouncerCommandName, pgBouncerIni) //nolint:gosec stdoutWriter := &execlog.LogWriter{ - Logger: log.WithValues(execlog.PipeKey, execlog.StdOut), + Logger: contextLogger.WithValues(execlog.PipeKey, execlog.StdOut), } stderrWriter := &pgBouncerLogWriter{ - Logger: log.WithValues(execlog.PipeKey, execlog.StdErr), + Logger: contextLogger.WithValues(execlog.PipeKey, execlog.StdErr), } streamingCmd, err := execlog.RunStreamingNoWaitWithWriter( pgBouncerCmd, pgBouncerCommandName, stdoutWriter, stderrWriter) @@ -126,14 +134,14 @@ func runSubCommand(ctx context.Context, poolerNamespacedName types.NamespacedNam } startReconciler(ctx, reconciler) - registerSignalHandler(reconciler, pgBouncerCmd) + registerSignalHandler(ctx, reconciler, pgBouncerCmd) if err = streamingCmd.Wait(); err != nil { var exitError *exec.ExitError if !errors.As(err, &exitError) { - log.Error(err, "Error waiting on pgbouncer process") + contextLogger.Error(err, "Error waiting on pgbouncer process") } else { - log.Error(exitError, "pgbouncer process exited with errors") + contextLogger.Error(exitError, "pgbouncer process exited with errors") } return err } @@ -143,29 +151,30 @@ func runSubCommand(ctx context.Context, poolerNamespacedName types.NamespacedNam // registerSignalHandler handles signals from k8s, notifying postgres as // needed -func registerSignalHandler(reconciler *controller.PgBouncerReconciler, command *exec.Cmd) { +func registerSignalHandler(ctx context.Context, reconciler *controller.PgBouncerReconciler, command *exec.Cmd) { + contextLogger := log.FromContext(ctx) signals := make(chan os.Signal, 1) signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM) go func() { sig := <-signals - log.Info("Received termination signal", "signal", sig) + contextLogger.Info("Received termination signal", "signal", sig) - log.Info("Shutting down web server") + contextLogger.Info("Shutting down web server") err := metricsserver.Shutdown() if err != nil { - log.Error(err, "Error while shutting down the metrics server") + contextLogger.Error(err, "Error while shutting down the metrics server") } else { - log.Info("Metrics server shut down") + contextLogger.Info("Metrics server shut down") } reconciler.Stop() if command != nil { - log.Info("Shutting down pgbouncer instance") + contextLogger.Info("Shutting down pgbouncer instance") err := command.Process.Signal(syscall.SIGINT) if err != nil { - log.Error(err, "Unable to send SIGINT to pgbouncer instance") + contextLogger.Error(err, "Unable to send SIGINT to pgbouncer instance") } } }() @@ -173,15 +182,16 @@ func registerSignalHandler(reconciler *controller.PgBouncerReconciler, command * // startWebServer start the web server for handling probes given // a certain PostgreSQL instance -func startWebServer() error { - if err := metricsserver.Setup(); err != nil { +func startWebServer(ctx context.Context) error { + contextLogger := log.FromContext(ctx) + if err := metricsserver.Setup(ctx); err != nil { return err } go func() { err := metricsserver.ListenAndServe() if err != nil { - log.Error(err, "Error while starting the metrics server") + contextLogger.Error(err, "Error while starting the metrics server") } }() diff --git a/internal/cmd/manager/show/walarchivequeue/cmd.go b/internal/cmd/manager/show/walarchivequeue/cmd.go index a26cc7cabc..ddec44afd6 100644 --- a/internal/cmd/manager/show/walarchivequeue/cmd.go +++ b/internal/cmd/manager/show/walarchivequeue/cmd.go @@ -32,9 +32,12 @@ func NewCmd() *cobra.Command { cmd := cobra.Command{ Use: "wal-archive-queue", Short: "Lists all .ready wal files in " + specs.PgWalArchiveStatusPath, - RunE: func(_ *cobra.Command, _ []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + contextLogger := log.FromContext(ctx) + if err := run(); err != nil { - log.Error(err, "Error while extracting the list of .ready files") + contextLogger.Error(err, "Error while extracting the list of .ready files") } return nil diff --git a/internal/cmd/manager/walarchive/cmd.go b/internal/cmd/manager/walarchive/cmd.go index bfd897329d..7ff6adf958 100644 --- a/internal/cmd/manager/walarchive/cmd.go +++ b/internal/cmd/manager/walarchive/cmd.go @@ -97,7 +97,7 @@ func NewCmd() *cobra.Command { Message: err.Error(), } if errCond := conditions.Patch(ctx, typedClient, cluster, &condition); errCond != nil { - log.Error(errCond, "Error changing wal archiving condition (wal archiving failed)") + contextLog.Error(errCond, "Error changing wal archiving condition (wal archiving failed)") } return err } @@ -110,7 +110,7 @@ func NewCmd() *cobra.Command { Message: "Continuous archiving is working", } if errCond := conditions.Patch(ctx, typedClient, cluster, &condition); errCond != nil { - log.Error(errCond, "Error changing wal archiving condition (wal archiving succeeded)") + contextLog.Error(errCond, "Error changing wal archiving condition (wal archiving succeeded)") } return nil @@ -162,7 +162,7 @@ func run( // Request Barman Cloud to archive this WAL if cluster.Spec.Backup == nil || cluster.Spec.Backup.BarmanObjectStore == nil { // Backup not configured, skipping WAL - contextLog.Info("Backup not configured, skip WAL archiving via Barman Cloud", + contextLog.Debug("Backup not configured, skip WAL archiving via Barman Cloud", "walName", walName, "currentPrimary", cluster.Status.CurrentPrimary, "targetPrimary", cluster.Status.TargetPrimary, @@ -299,10 +299,11 @@ func checkWalArchive( walArchiver *barmanArchiver.WALArchiver, pgData string, ) error { + contextLogger := log.FromContext(ctx) checkWalOptions, err := walArchiver.BarmanCloudCheckWalArchiveOptions( ctx, cluster.Spec.Backup.BarmanObjectStore, cluster.Name) if err != nil { - log.Error(err, "while getting barman-cloud-wal-archive options") + contextLogger.Error(err, "while getting barman-cloud-wal-archive options") return err } @@ -311,7 +312,7 @@ func checkWalArchive( } if err := walArchiver.CheckWalArchiveDestination(ctx, checkWalOptions); err != nil { - log.Error(err, "while barman-cloud-check-wal-archive") + contextLogger.Error(err, "while barman-cloud-check-wal-archive") return err } diff --git a/internal/cmd/manager/walrestore/cmd.go b/internal/cmd/manager/walrestore/cmd.go index 50d237ff06..9ca043ee0e 100644 --- a/internal/cmd/manager/walrestore/cmd.go +++ b/internal/cmd/manager/walrestore/cmd.go @@ -81,7 +81,7 @@ func NewCmd() *cobra.Command { case errors.Is(err, barmanRestorer.ErrWALNotFound): // Nothing to log here. The failure has already been logged. case errors.Is(err, ErrNoBackupConfigured): - contextLog.Info("tried restoring WALs, but no backup was configured") + contextLog.Debug("tried restoring WALs, but no backup was configured") case errors.Is(err, ErrEndOfWALStreamReached): contextLog.Info( "end-of-wal-stream flag found." + diff --git a/internal/controller/cluster_pki.go b/internal/controller/cluster_pki.go index 4b00c78958..1b8813752a 100644 --- a/internal/controller/cluster_pki.go +++ b/internal/controller/cluster_pki.go @@ -83,7 +83,7 @@ func (r *ClusterReconciler) ensureClientCASecret(ctx context.Context, cluster *a return nil, err } - err = r.verifyCAValidity(secret, cluster) + err = r.verifyCAValidity(ctx, secret, cluster) if err != nil { return nil, err } @@ -120,7 +120,7 @@ func (r *ClusterReconciler) ensureServerCASecret(ctx context.Context, cluster *a return nil, err } - err = r.verifyCAValidity(secret, cluster) + err = r.verifyCAValidity(ctx, secret, cluster) if err != nil { return nil, err } @@ -139,7 +139,9 @@ func (r *ClusterReconciler) ensureServerCASecret(ctx context.Context, cluster *a return &secret, nil } -func (r *ClusterReconciler) verifyCAValidity(secret v1.Secret, cluster *apiv1.Cluster) error { +func (r *ClusterReconciler) verifyCAValidity(ctx context.Context, secret v1.Secret, cluster *apiv1.Cluster) error { + contextLogger := log.FromContext(ctx) + // Verify validity of the CA and expiration (only ca.crt) publicKey, ok := secret.Data[certs.CACertKey] if !ok { @@ -156,7 +158,7 @@ func (r *ClusterReconciler) verifyCAValidity(secret v1.Secret, cluster *apiv1.Cl } else if isExpiring { r.Recorder.Event(cluster, "Warning", "SecretIsExpiring", "Checking expiring date of secret "+secret.Name) - log.Info("CA certificate is expiring or is already expired", "secret", secret.Name) + contextLogger.Info("CA certificate is expiring or is already expired", "secret", secret.Name) } return nil diff --git a/internal/controller/cluster_status.go b/internal/controller/cluster_status.go index 3be647985d..4f5a2e673a 100644 --- a/internal/controller/cluster_status.go +++ b/internal/controller/cluster_status.go @@ -243,6 +243,7 @@ func (r *ClusterReconciler) updateResourceStatus( cluster *apiv1.Cluster, resources *managedResources, ) error { + contextLogger := log.FromContext(ctx) // Retrieve the cluster key existingClusterStatus := cluster.Status @@ -317,7 +318,7 @@ func (r *ClusterReconciler) updateResourceStatus( if poolerIntegrations, err := r.getPoolerIntegrationsNeeded(ctx, cluster); err == nil { cluster.Status.PoolerIntegrations = poolerIntegrations } else { - log.Error(err, "while checking pooler integrations were needed, ignored") + contextLogger.Error(err, "while checking pooler integrations were needed, ignored") } // Set the current hash code of the operator binary inside the status. diff --git a/internal/management/controller/cache.go b/internal/management/controller/cache.go index 4d3b06a2b6..1d4f6960a4 100644 --- a/internal/management/controller/cache.go +++ b/internal/management/controller/cache.go @@ -47,13 +47,15 @@ func (r *InstanceReconciler) updateCacheFromCluster(ctx context.Context, cluster } func (r *InstanceReconciler) updateWALRestoreSettingsCache(ctx context.Context, cluster *apiv1.Cluster) { + contextLogger := log.FromContext(ctx) + _, env, barmanConfiguration, err := walrestore.GetRecoverConfiguration(cluster, r.instance.GetPodName()) if errors.Is(err, walrestore.ErrNoBackupConfigured) { cache.Delete(cache.WALRestoreKey) return } if err != nil { - log.Error(err, "while getting recover configuration") + contextLogger.Error(err, "while getting recover configuration") return } env = append(env, os.Environ()...) @@ -66,7 +68,7 @@ func (r *InstanceReconciler) updateWALRestoreSettingsCache(ctx context.Context, env, ) if err != nil { - log.Error(err, "while getting recover credentials") + contextLogger.Error(err, "while getting recover credentials") } cache.Store(cache.WALRestoreKey, envRestore) } @@ -79,6 +81,8 @@ func (r *InstanceReconciler) shouldUpdateWALArchiveSettingsCache( ctx context.Context, cluster *apiv1.Cluster, ) (shouldRetry bool) { + contextLogger := log.FromContext(ctx) + if cluster.Spec.Backup == nil || cluster.Spec.Backup.BarmanObjectStore == nil { cache.Delete(cache.WALArchiveKey) return false @@ -92,12 +96,12 @@ func (r *InstanceReconciler) shouldUpdateWALArchiveSettingsCache( cluster.Spec.Backup.BarmanObjectStore, os.Environ()) if apierrors.IsForbidden(err) { - log.Info("backup credentials don't yet have access permissions. Will retry reconciliation loop") + contextLogger.Info("backup credentials don't yet have access permissions. Will retry reconciliation loop") return true } if err != nil { - log.Error(err, "while getting backup credentials") + contextLogger.Error(err, "while getting backup credentials") return false } diff --git a/internal/management/controller/database_controller.go b/internal/management/controller/database_controller.go index 08224c9c6d..d2c4256bc7 100644 --- a/internal/management/controller/database_controller.go +++ b/internal/management/controller/database_controller.go @@ -66,7 +66,7 @@ const databaseReconciliationInterval = 30 * time.Second // Reconcile is the database reconciliation loop func (r *DatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - contextLogger, ctx := log.SetupLogger(ctx) + contextLogger := log.FromContext(ctx) contextLogger.Debug("Reconciliation loop start") defer func() { diff --git a/internal/management/controller/instance_controller.go b/internal/management/controller/instance_controller.go index c3e65197b7..71f207bb17 100644 --- a/internal/management/controller/instance_controller.go +++ b/internal/management/controller/instance_controller.go @@ -89,7 +89,7 @@ func (r *InstanceReconciler) Reconcile( _ reconcile.Request, ) (reconcile.Result, error) { // set up a convenient contextLog object so we don't have to type request over and over again - contextLogger, ctx := log.SetupLogger(ctx) + contextLogger := log.FromContext(ctx) // if the context has already been cancelled, // trying to reconcile would just lead to misleading errors being reported @@ -289,7 +289,7 @@ func (r *InstanceReconciler) Reconcile( // operator. Without another reconciliation loop we would have an incoherent // state of electable synchronous_names inside the configuration. // (this is only relevant if syncReplicaElectionConstraint is enabled) - if requeueOnMissingPermissions || r.shouldRequeueForMissingTopology(cluster) { + if requeueOnMissingPermissions || r.shouldRequeueForMissingTopology(ctx, cluster) { return reconcile.Result{RequeueAfter: 30 * time.Second}, nil } @@ -344,7 +344,7 @@ func (r *InstanceReconciler) refreshConfigurationFiles( return false, err } - reloadIdent, err := r.instance.RefreshPGIdent(cluster.Spec.PostgresConfiguration.PgIdent) + reloadIdent, err := r.instance.RefreshPGIdent(ctx, cluster.Spec.PostgresConfiguration.PgIdent) if err != nil { return false, err } @@ -352,7 +352,7 @@ func (r *InstanceReconciler) refreshConfigurationFiles( // Reconcile PostgreSQL configuration // This doesn't need the PG connection, but it needs to reload it in case of changes - reloadConfig, err := r.instance.RefreshConfigurationFilesFromCluster(cluster, false) + reloadConfig, err := r.instance.RefreshConfigurationFilesFromCluster(ctx, cluster, false) if err != nil { return false, err } @@ -367,6 +367,8 @@ func (r *InstanceReconciler) refreshConfigurationFiles( } func (r *InstanceReconciler) reconcileFencing(ctx context.Context, cluster *apiv1.Cluster) *reconcile.Result { + contextLogger := log.FromContext(ctx) + fencingRequired := cluster.IsInstanceFenced(r.instance.GetPodName()) isFenced := r.instance.IsFenced() switch { @@ -379,7 +381,7 @@ func (r *InstanceReconciler) reconcileFencing(ctx context.Context, cluster *apiv timeout := time.Second * time.Duration(cluster.GetMaxStartDelay()) err := r.instance.RequestAndWaitFencingOff(ctx, timeout) if err != nil { - log.Error(err, "while waiting for the instance to be restarted after lifting the fence") + contextLogger.Error(err, "while waiting for the instance to be restarted after lifting the fence") } return &reconcile.Result{} } @@ -396,7 +398,7 @@ func handleErrNextLoop(err error) (reconcile.Result, error) { // initialize will handle initialization tasks func (r *InstanceReconciler) initialize(ctx context.Context, cluster *apiv1.Cluster) error { // we check there are no parameters that would prevent a follower to start - if err := r.verifyParametersForFollower(cluster); err != nil { + if err := r.verifyParametersForFollower(ctx, cluster); err != nil { return err } @@ -422,7 +424,12 @@ func (r *InstanceReconciler) initialize(ctx context.Context, cluster *apiv1.Clus // This could not be the case if the cluster spec value for one of those parameters // is decreased shortly after having been increased. The follower would be restarting // towards a high level, then write the lower value to the local config -func (r *InstanceReconciler) verifyParametersForFollower(cluster *apiv1.Cluster) error { +func (r *InstanceReconciler) verifyParametersForFollower( + ctx context.Context, + cluster *apiv1.Cluster, +) error { + contextLogger := log.FromContext(ctx) + if isPrimary, _ := r.instance.IsPrimary(); isPrimary { return nil } @@ -440,7 +447,7 @@ func (r *InstanceReconciler) verifyParametersForFollower(cluster *apiv1.Cluster) _, err := fileutils.WriteFileAtomic(filename, []byte(nil), 0o600) return err } - log.Info("Found previous run flag", "filename", filename) + contextLogger.Info("Found previous run flag", "filename", filename) controldataParams, err := postgresManagement.LoadEnforcedParametersFromPgControldata(r.instance.PgData) if err != nil { return err @@ -465,7 +472,7 @@ func (r *InstanceReconciler) verifyParametersForFollower(cluster *apiv1.Cluster) if len(options) == 0 { return nil } - log.Info("Updating some enforced parameters that would prevent the instance to start", + contextLogger.Info("Updating some enforced parameters that would prevent the instance to start", "parameters", options, "clusterParams", clusterParams) // we write the safer enforced parameter values to pod config as safety // in the face of cluster specs going up and down from nervous users @@ -1248,7 +1255,7 @@ func (r *InstanceReconciler) handlePromotion(ctx context.Context, cluster *apiv1 if r.instance.GetPodName() != cluster.Status.CurrentPrimary { // if the cluster is not replicating it means it's doing a failover and // we have to wait for wal receivers to be down - err := r.waitForWalReceiverDown() + err := r.waitForWalReceiverDown(ctx) if err != nil { return err } @@ -1305,7 +1312,9 @@ func (r *InstanceReconciler) reconcileDesignatedPrimary( // waitForWalReceiverDown wait until the wal receiver is down, and it's used // to grab all the WAL files from a replica -func (r *InstanceReconciler) waitForWalReceiverDown() error { +func (r *InstanceReconciler) waitForWalReceiverDown(ctx context.Context) error { + contextLogger := log.FromContext(ctx) + // This is not really exponential backoff as RetryUntilWalReceiverDown // doesn't contain any increment return wait.ExponentialBackoff(RetryUntilWalReceiverDown, func() (done bool, err error) { @@ -1318,7 +1327,7 @@ func (r *InstanceReconciler) waitForWalReceiverDown() error { return true, nil } - log.Info("WAL receiver is still active, waiting") + contextLogger.Info("WAL receiver is still active, waiting") return false, nil }) } @@ -1425,10 +1434,15 @@ func (r *InstanceReconciler) refreshPGHBA(ctx context.Context, cluster *apiv1.Cl ldapBindPassword = string(ldapBindPasswordByte) } // Generate pg_hba.conf file - return r.instance.RefreshPGHBA(cluster, ldapBindPassword) + return r.instance.RefreshPGHBA(ctx, cluster, ldapBindPassword) } -func (r *InstanceReconciler) shouldRequeueForMissingTopology(cluster *apiv1.Cluster) shoudRequeue { +func (r *InstanceReconciler) shouldRequeueForMissingTopology( + ctx context.Context, + cluster *apiv1.Cluster, +) shoudRequeue { + contextLogger := log.FromContext(ctx) + syncReplicaConstraint := cluster.Spec.PostgresConfiguration.SyncReplicaElectionConstraint if !syncReplicaConstraint.Enabled { return false @@ -1439,7 +1453,7 @@ func (r *InstanceReconciler) shouldRequeueForMissingTopology(cluster *apiv1.Clus topologyStatus := cluster.Status.Topology if !topologyStatus.SuccessfullyExtracted || len(topologyStatus.Instances) != cluster.Spec.Instances { - log.Info("missing topology information while syncReplicaElectionConstraint are enabled, " + + contextLogger.Info("missing topology information while syncReplicaElectionConstraint are enabled, " + "will requeue to calculate correctly the synchronous names") return true } diff --git a/internal/pgbouncer/management/controller/manager.go b/internal/pgbouncer/management/controller/manager.go index 0c97a1f95c..634a34a9e1 100644 --- a/internal/pgbouncer/management/controller/manager.go +++ b/internal/pgbouncer/management/controller/manager.go @@ -63,10 +63,12 @@ func NewPgBouncerReconciler(poolerNamespacedName types.NamespacedName) (*PgBounc // Run runs the reconciliation loop for this resource func (r *PgBouncerReconciler) Run(ctx context.Context) { + contextLogger := log.FromContext(ctx) + for { // Retry with exponential back-off, unless it is a connection refused error err := retry.OnError(retry.DefaultBackoff, func(err error) bool { - log.Error(err, "Error calling Watch") + contextLogger.Error(err, "Error calling Watch") return !utilnet.IsConnectionRefused(err) }, func() error { return r.watch(ctx) @@ -81,6 +83,8 @@ func (r *PgBouncerReconciler) Run(ctx context.Context) { // watch contains the main reconciler loop func (r *PgBouncerReconciler) watch(ctx context.Context) error { + contextLogger := log.FromContext(ctx) + var err error r.poolerWatch, err = r.client.Watch(ctx, &apiv1.PoolerList{}, &ctrl.ListOptions{ @@ -98,7 +102,7 @@ func (r *PgBouncerReconciler) watch(ctx context.Context) error { return r.Reconcile(ctx, &receivedEvent) }) if err != nil { - log.Error(err, "Reconciliation error") + contextLogger.Error(err, "Reconciliation error") } } return nil @@ -118,7 +122,7 @@ func (r *PgBouncerReconciler) GetClient() ctrl.Client { // Reconcile is the main reconciliation loop for the pgbouncer instance func (r *PgBouncerReconciler) Reconcile(ctx context.Context, event *watch.Event) error { - contextLogger, _ := log.SetupLogger(ctx) + contextLogger := log.FromContext(ctx) contextLogger.Debug( "Reconciliation loop", "eventType", event.Type, @@ -205,7 +209,7 @@ func (r *PgBouncerReconciler) writePgBouncerConfig(ctx context.Context, pooler * return false, fmt.Errorf("while generating pgbouncer configuration: %w", err) } - return refreshConfigurationFiles(configFiles) + return refreshConfigurationFiles(ctx, configFiles) } // Init ensures that all PgBouncer requirement are met. @@ -214,6 +218,8 @@ func (r *PgBouncerReconciler) writePgBouncerConfig(ctx context.Context, pooler * // 1. create the pgbouncer configuration and the required secrets // 2. ensure that every needed folder is existent func (r *PgBouncerReconciler) Init(ctx context.Context) error { + contextLogger := log.FromContext(ctx) + var pooler apiv1.Pooler // Get the pooler from the API Server @@ -228,7 +234,7 @@ func (r *PgBouncerReconciler) Init(ctx context.Context) error { // Ensure we have the directory to store the controlling socket if err := fileutils.EnsureDirectoryExists(config.PgBouncerSocketDir); err != nil { - log.Error(err, "while checking socket directory existed", "dir", config.PgBouncerSocketDir) + contextLogger.Error(err, "while checking socket directory existed", "dir", config.PgBouncerSocketDir) return err } diff --git a/internal/pgbouncer/management/controller/refresh.go b/internal/pgbouncer/management/controller/refresh.go index 2193c19f4a..f64794d915 100644 --- a/internal/pgbouncer/management/controller/refresh.go +++ b/internal/pgbouncer/management/controller/refresh.go @@ -17,6 +17,7 @@ limitations under the License. package controller import ( + "context" "fmt" "github.com/cloudnative-pg/machinery/pkg/fileutils" @@ -27,16 +28,18 @@ import ( // refreshConfigurationFiles writes the configuration files, returning a // flag indicating if something is changed or not and an error status -func refreshConfigurationFiles(files config.ConfigurationFiles) (bool, error) { +func refreshConfigurationFiles(ctx context.Context, files config.ConfigurationFiles) (bool, error) { var changed bool + contextLogger := log.FromContext(ctx) + for fileName, content := range files { changedFile, err := fileutils.WriteFileAtomic(fileName, content, 0o600) if err != nil { return false, fmt.Errorf("while recreating configs:%w", err) } if changedFile { - log.Info("updated configuration file", "name", fileName) + contextLogger.Info("updated configuration file", "name", fileName) changed = true } } diff --git a/internal/pgbouncer/management/controller/refresh_test.go b/internal/pgbouncer/management/controller/refresh_test.go index 5565c5357c..13e91aaa14 100644 --- a/internal/pgbouncer/management/controller/refresh_test.go +++ b/internal/pgbouncer/management/controller/refresh_test.go @@ -45,8 +45,8 @@ var _ = Describe("RefreshConfigurationFiles", func() { }) Context("when no files are passed", func() { - It("should return false and no error", func() { - changed, err := refreshConfigurationFiles(files) + It("should return false and no error", func(ctx SpecContext) { + changed, err := refreshConfigurationFiles(ctx, files) Expect(changed).To(BeFalse()) Expect(err).NotTo(HaveOccurred()) }) @@ -58,8 +58,8 @@ var _ = Describe("RefreshConfigurationFiles", func() { files[filepath.Join(tmpDir, "config2")] = []byte("content2") }) - It("should write content to files and return true", func() { - changed, err := refreshConfigurationFiles(files) + It("should write content to files and return true", func(ctx SpecContext) { + changed, err := refreshConfigurationFiles(ctx, files) Expect(changed).To(BeTrue()) Expect(err).NotTo(HaveOccurred()) @@ -76,8 +76,8 @@ var _ = Describe("RefreshConfigurationFiles", func() { files["/proc/you-cannot-write-here.conf"] = []byte("content") }) - It("should return an error", func() { - _, err := refreshConfigurationFiles(files) + It("should return an error", func(ctx SpecContext) { + _, err := refreshConfigurationFiles(ctx, files) Expect(err).To(HaveOccurred()) }) }) diff --git a/pkg/management/pgbouncer/metricsserver/lists.go b/pkg/management/pgbouncer/metricsserver/lists.go index 25a1ad678f..802a8e317a 100644 --- a/pkg/management/pgbouncer/metricsserver/lists.go +++ b/pkg/management/pgbouncer/metricsserver/lists.go @@ -120,11 +120,13 @@ func NewShowListsMetrics(subsystem string) ShowListsMetrics { } func (e *Exporter) collectShowLists(ch chan<- prometheus.Metric, db *sql.DB) { + contextLogger := log.FromContext(e.ctx) + e.Metrics.ShowLists.Reset() // First, let's check the connection. No need to proceed if this fails. rows, err := db.Query("SHOW LISTS;") if err != nil { - log.Error(err, "Error while executing SHOW LISTS") + contextLogger.Error(err, "Error while executing SHOW LISTS") e.Metrics.PgbouncerUp.Set(0) e.Metrics.Error.Set(1) return @@ -135,7 +137,7 @@ func (e *Exporter) collectShowLists(ch chan<- prometheus.Metric, db *sql.DB) { defer func() { err = rows.Close() if err != nil { - log.Error(err, "while closing rows for SHOW LISTS") + contextLogger.Error(err, "while closing rows for SHOW LISTS") } }() @@ -146,14 +148,14 @@ func (e *Exporter) collectShowLists(ch chan<- prometheus.Metric, db *sql.DB) { for rows.Next() { if err = rows.Scan(&list, &item); err != nil { - log.Error(err, "Error while executing SHOW LISTS") + contextLogger.Error(err, "Error while executing SHOW LISTS") e.Metrics.Error.Set(1) e.Metrics.PgCollectionErrors.WithLabelValues(err.Error()).Inc() } m, ok := e.Metrics.ShowLists[list] if !ok { e.Metrics.Error.Set(1) - log.Info("Missing metric", "query", "SHOW LISTS", "metric", list) + contextLogger.Info("Missing metric", "query", "SHOW LISTS", "metric", list) continue } m.Set(float64(item)) diff --git a/pkg/management/pgbouncer/metricsserver/metricsserver.go b/pkg/management/pgbouncer/metricsserver/metricsserver.go index 6245d6353c..61a55b8dd6 100644 --- a/pkg/management/pgbouncer/metricsserver/metricsserver.go +++ b/pkg/management/pgbouncer/metricsserver/metricsserver.go @@ -45,10 +45,10 @@ var ( // Setup configure the web statusServer for a certain PostgreSQL instance, and // must be invoked before starting the real web statusServer -func Setup() error { +func Setup(ctx context.Context) error { // create the exporter and serve it on the /metrics endpoint registry = prometheus.NewRegistry() - exporter = NewExporter() + exporter = NewExporter(ctx) if err := registry.Register(exporter); err != nil { return fmt.Errorf("while registering PgBouncer exporters: %w", err) } diff --git a/pkg/management/pgbouncer/metricsserver/metricsserver_test.go b/pkg/management/pgbouncer/metricsserver/metricsserver_test.go index 2b27ec2945..1a80045d40 100644 --- a/pkg/management/pgbouncer/metricsserver/metricsserver_test.go +++ b/pkg/management/pgbouncer/metricsserver/metricsserver_test.go @@ -29,8 +29,8 @@ var _ = Describe("MetricsServer", func() { exporter = nil }) - It("should register exporters and collectors successfully", func() { - err := Setup() + It("should register exporters and collectors successfully", func(ctx SpecContext) { + err := Setup(ctx) Expect(err).NotTo(HaveOccurred()) mfs, err := registry.Gather() diff --git a/pkg/management/pgbouncer/metricsserver/pgbouncer_collector.go b/pkg/management/pgbouncer/metricsserver/pgbouncer_collector.go index ccf2860629..e498d370f1 100644 --- a/pkg/management/pgbouncer/metricsserver/pgbouncer_collector.go +++ b/pkg/management/pgbouncer/metricsserver/pgbouncer_collector.go @@ -18,6 +18,7 @@ limitations under the License. package metricsserver import ( + "context" "database/sql" "fmt" "time" @@ -35,6 +36,7 @@ const PrometheusNamespace = "cnpg" // Exporter exports a set of metrics and collectors on a given postgres instance type Exporter struct { + ctx context.Context Metrics *metrics pool pool.Pooler } @@ -53,8 +55,9 @@ type metrics struct { } // NewExporter creates an exporter -func NewExporter() *Exporter { +func NewExporter(ctx context.Context) *Exporter { return &Exporter{ + ctx: ctx, Metrics: newMetrics(), } } @@ -122,6 +125,8 @@ func (e *Exporter) Collect(ch chan<- prometheus.Metric) { } func (e *Exporter) collectPgBouncerMetrics(ch chan<- prometheus.Metric) { + contextLogger := log.FromContext(e.ctx) + e.Metrics.CollectionsTotal.Inc() collectionStart := time.Now() defer func() { @@ -129,7 +134,7 @@ func (e *Exporter) collectPgBouncerMetrics(ch chan<- prometheus.Metric) { }() db, err := e.GetPgBouncerDB() if err != nil { - log.Error(err, "Error opening connection to PostgreSQL") + contextLogger.Error(err, "Error opening connection to PostgreSQL") e.Metrics.Error.Set(1) return } diff --git a/pkg/management/pgbouncer/metricsserver/pools.go b/pkg/management/pgbouncer/metricsserver/pools.go index 4913ab19a5..b6d95b799d 100644 --- a/pkg/management/pgbouncer/metricsserver/pools.go +++ b/pkg/management/pgbouncer/metricsserver/pools.go @@ -184,11 +184,13 @@ func NewShowPoolsMetrics(subsystem string) *ShowPoolsMetrics { } func (e *Exporter) collectShowPools(ch chan<- prometheus.Metric, db *sql.DB) { + contextLogger := log.FromContext(e.ctx) + e.Metrics.ShowPools.Reset() // First, let's check the connection. No need to proceed if this fails. rows, err := db.Query("SHOW POOLS;") if err != nil { - log.Error(err, "Error while executing SHOW POOLS") + contextLogger.Error(err, "Error while executing SHOW POOLS") e.Metrics.PgbouncerUp.Set(0) e.Metrics.Error.Set(1) return @@ -199,7 +201,7 @@ func (e *Exporter) collectShowPools(ch chan<- prometheus.Metric, db *sql.DB) { defer func() { err = rows.Close() if err != nil { - log.Error(err, "while closing rows for SHOW POOLS") + contextLogger.Error(err, "while closing rows for SHOW POOLS") } }() @@ -234,7 +236,7 @@ func (e *Exporter) collectShowPools(ch chan<- prometheus.Metric, db *sql.DB) { cols, err := rows.Columns() if err != nil { - log.Error(err, "Error while getting number of columns") + contextLogger.Error(err, "Error while getting number of columns") e.Metrics.PgbouncerUp.Set(0) e.Metrics.Error.Set(1) return @@ -259,7 +261,7 @@ func (e *Exporter) collectShowPools(ch chan<- prometheus.Metric, db *sql.DB) { &maxWaitUs, &poolMode, ); err != nil { - log.Error(err, "Error while executing SHOW POOLS") + contextLogger.Error(err, "Error while executing SHOW POOLS") e.Metrics.Error.Set(1) e.Metrics.PgCollectionErrors.WithLabelValues(err.Error()).Inc() } @@ -277,7 +279,7 @@ func (e *Exporter) collectShowPools(ch chan<- prometheus.Metric, db *sql.DB) { &maxWaitUs, &poolMode, ); err != nil { - log.Error(err, "Error while executing SHOW POOLS") + contextLogger.Error(err, "Error while executing SHOW POOLS") e.Metrics.Error.Set(1) e.Metrics.PgCollectionErrors.WithLabelValues(err.Error()).Inc() } diff --git a/pkg/management/pgbouncer/metricsserver/pools_test.go b/pkg/management/pgbouncer/metricsserver/pools_test.go index 117a219b41..8fd3e928d3 100644 --- a/pkg/management/pgbouncer/metricsserver/pools_test.go +++ b/pkg/management/pgbouncer/metricsserver/pools_test.go @@ -53,7 +53,7 @@ var _ = Describe("Exporter", func() { } ) - BeforeEach(func() { + BeforeEach(func(ctx SpecContext) { var err error db, mock, err = sqlmock.New() Expect(err).ShouldNot(HaveOccurred()) @@ -61,6 +61,7 @@ var _ = Describe("Exporter", func() { exp = &Exporter{ Metrics: newMetrics(), pool: fakePooler{db: db}, + ctx: ctx, } registry = prometheus.NewRegistry() diff --git a/pkg/management/pgbouncer/metricsserver/stats.go b/pkg/management/pgbouncer/metricsserver/stats.go index ebd19b6a1f..4bad1c4075 100644 --- a/pkg/management/pgbouncer/metricsserver/stats.go +++ b/pkg/management/pgbouncer/metricsserver/stats.go @@ -190,11 +190,13 @@ func NewShowStatsMetrics(subsystem string) *ShowStatsMetrics { } func (e *Exporter) collectShowStats(ch chan<- prometheus.Metric, db *sql.DB) { + contextLogger := log.FromContext(e.ctx) + e.Metrics.ShowStats.Reset() // First, let's check the connection. No need to proceed if this fails. rows, err := db.Query("SHOW STATS;") if err != nil { - log.Error(err, "Error while executing SHOW STATS") + contextLogger.Error(err, "Error while executing SHOW STATS") e.Metrics.PgbouncerUp.Set(0) e.Metrics.Error.Set(1) return @@ -205,7 +207,7 @@ func (e *Exporter) collectShowStats(ch chan<- prometheus.Metric, db *sql.DB) { defer func() { err = rows.Close() if err != nil { - log.Error(err, "while closing rows for SHOW STATS") + contextLogger.Error(err, "while closing rows for SHOW STATS") } }() var ( @@ -234,7 +236,7 @@ func (e *Exporter) collectShowStats(ch chan<- prometheus.Metric, db *sql.DB) { statCols, err := rows.Columns() if err != nil { - log.Error(err, "Error while reading SHOW STATS") + contextLogger.Error(err, "Error while reading SHOW STATS") return } @@ -280,7 +282,7 @@ func (e *Exporter) collectShowStats(ch chan<- prometheus.Metric, db *sql.DB) { ) } if err != nil { - log.Error(err, "Error while executing SHOW STATS") + contextLogger.Error(err, "Error while executing SHOW STATS") e.Metrics.Error.Set(1) e.Metrics.PgCollectionErrors.WithLabelValues(err.Error()).Inc() } diff --git a/pkg/management/pgbouncer/metricsserver/stats_test.go b/pkg/management/pgbouncer/metricsserver/stats_test.go index 1094cccd72..ddfcbadc4f 100644 --- a/pkg/management/pgbouncer/metricsserver/stats_test.go +++ b/pkg/management/pgbouncer/metricsserver/stats_test.go @@ -40,7 +40,7 @@ var _ = Describe("MetricsServer", func() { ch chan prometheus.Metric ) - BeforeEach(func() { + BeforeEach(func(ctx SpecContext) { var err error db, mock, err = sqlmock.New() Expect(err).NotTo(HaveOccurred()) @@ -48,6 +48,7 @@ var _ = Describe("MetricsServer", func() { exp = &Exporter{ Metrics: newMetrics(), pool: fakePooler{db: db}, + ctx: ctx, } registry = prometheus.NewRegistry() registry.MustRegister(exp.Metrics.Error) diff --git a/pkg/management/postgres/backup.go b/pkg/management/postgres/backup.go index 72f6b635fa..46ff344b29 100644 --- a/pkg/management/postgres/backup.go +++ b/pkg/management/postgres/backup.go @@ -99,6 +99,7 @@ func NewBarmanBackupCommand( // Start initiates a backup for this instance using // barman-cloud-backup func (b *BackupCommand) Start(ctx context.Context) error { + contextLogger := log.FromContext(ctx) if err := b.ensureCompatibility(); err != nil { return err } @@ -111,7 +112,7 @@ func (b *BackupCommand) Start(ctx context.Context) error { } if err := ensureWalArchiveIsWorking(b.Instance); err != nil { - log.Warning("WAL archiving is not working", "err", err) + contextLogger.Warning("WAL archiving is not working", "err", err) b.Backup.GetStatus().Phase = apiv1.BackupPhaseWalArchivingFailing return PatchBackupStatusAndRetry(ctx, b.Client, b.Backup) } @@ -120,7 +121,7 @@ func (b *BackupCommand) Start(ctx context.Context) error { b.Backup.GetStatus().Phase = apiv1.BackupPhaseRunning err := PatchBackupStatusAndRetry(ctx, b.Client, b.Backup) if err != nil { - log.Error(err, "can't set backup as WAL archiving failing") + contextLogger.Error(err, "can't set backup as WAL archiving failing") } } diff --git a/pkg/management/postgres/configuration.go b/pkg/management/postgres/configuration.go index 08bb4e1aad..e5c484c2c5 100644 --- a/pkg/management/postgres/configuration.go +++ b/pkg/management/postgres/configuration.go @@ -39,7 +39,9 @@ import ( // InstallPgDataFileContent installs a file in PgData, returning true/false if // the file has been changed and an error state -func InstallPgDataFileContent(pgdata, contents, destinationFile string) (bool, error) { +func InstallPgDataFileContent(ctx context.Context, pgdata, contents, destinationFile string) (bool, error) { + contextLogger := log.FromContext(ctx) + targetFile := path.Join(pgdata, destinationFile) result, err := fileutils.WriteStringToFile(targetFile, contents) if err != nil { @@ -47,7 +49,7 @@ func InstallPgDataFileContent(pgdata, contents, destinationFile string) (bool, e } if result { - log.Info( + contextLogger.Info( "Installed configuration file", "pgdata", pgdata, "filename", destinationFile) @@ -60,6 +62,7 @@ func InstallPgDataFileContent(pgdata, contents, destinationFile string) (bool, e // PostgreSQL configuration and rewrites the file in the PGDATA if needed. This // function will return "true" if the configuration has been really changed. func (instance *Instance) RefreshConfigurationFilesFromCluster( + ctx context.Context, cluster *apiv1.Cluster, preserveUserSettings bool, ) (bool, error) { @@ -69,6 +72,7 @@ func (instance *Instance) RefreshConfigurationFilesFromCluster( } postgresConfigurationChanged, err := InstallPgDataFileContent( + ctx, instance.PgData, postgresConfiguration, constants.PostgresqlCustomConfigurationFile) @@ -111,7 +115,7 @@ func (instance *Instance) GeneratePostgresqlHBA(cluster *apiv1.Cluster, ldapBind } // RefreshPGHBA generates and writes down the pg_hba.conf file -func (instance *Instance) RefreshPGHBA(cluster *apiv1.Cluster, ldapBindPassword string) ( +func (instance *Instance) RefreshPGHBA(ctx context.Context, cluster *apiv1.Cluster, ldapBindPassword string) ( postgresHBAChanged bool, err error, ) { @@ -121,6 +125,7 @@ func (instance *Instance) RefreshPGHBA(cluster *apiv1.Cluster, ldapBindPassword return false, nil } postgresHBAChanged, err = InstallPgDataFileContent( + ctx, instance.PgData, pgHBAContent, constants.PostgresqlHBARulesFile) @@ -213,13 +218,17 @@ func (instance *Instance) generatePostgresqlIdent(additionalLines []string) (str // RefreshPGIdent generates and writes down the pg_ident.conf file given // a set of additional pg_ident lines that is usually taken from the // Cluster configuration -func (instance *Instance) RefreshPGIdent(additionalLines []string) (postgresIdentChanged bool, err error) { +func (instance *Instance) RefreshPGIdent( + ctx context.Context, + additionalLines []string, +) (postgresIdentChanged bool, err error) { // Generate pg_ident.conf file pgIdentContent, err := instance.generatePostgresqlIdent(additionalLines) if err != nil { return false, nil } postgresIdentChanged, err = InstallPgDataFileContent( + ctx, instance.PgData, pgIdentContent, constants.PostgresqlIdentFile) diff --git a/pkg/management/postgres/initdb.go b/pkg/management/postgres/initdb.go index f7d44e9d73..ee3b1d9ada 100644 --- a/pkg/management/postgres/initdb.go +++ b/pkg/management/postgres/initdb.go @@ -144,7 +144,7 @@ func (info InitInfo) CheckTargetDataDirectory(ctx context.Context) error { pgDataExists, err := fileutils.FileExists(info.PgData) if err != nil { - log.Error(err, "Error while checking for an existing PGData") + contextLogger.Error(err, "Error while checking for an existing PGData") return fmt.Errorf("while verifying is PGDATA exists: %w", err) } if !pgDataExists { @@ -429,7 +429,7 @@ func (info InitInfo) Bootstrap(ctx context.Context) error { cluster.Spec.Bootstrap.InitDB != nil && cluster.Spec.Bootstrap.InitDB.Import != nil - if applied, err := instance.RefreshConfigurationFilesFromCluster(cluster, true); err != nil { + if applied, err := instance.RefreshConfigurationFilesFromCluster(ctx, cluster, true); err != nil { return fmt.Errorf("while writing the config: %w", err) } else if !applied { return fmt.Errorf("could not apply the config") diff --git a/pkg/management/postgres/instance.go b/pkg/management/postgres/instance.go index 54d1be07d2..42e538983d 100644 --- a/pkg/management/postgres/instance.go +++ b/pkg/management/postgres/instance.go @@ -331,7 +331,7 @@ func (instance *Instance) VerifyPgDataCoherence(ctx context.Context) error { } // creates a bare pg_ident.conf that only grants local access - _, err := instance.RefreshPGIdent(nil) + _, err := instance.RefreshPGIdent(ctx, nil) return err } @@ -477,7 +477,8 @@ func (instance *Instance) ShutdownConnections() { // with Startup. // This function will return an error whether PostgreSQL is still up // after the shutdown request. -func (instance *Instance) Shutdown(options shutdownOptions) error { +func (instance *Instance) Shutdown(ctx context.Context, options shutdownOptions) error { + contextLogger := log.FromContext(ctx) instance.ShutdownConnections() // check instance status @@ -503,7 +504,7 @@ func (instance *Instance) Shutdown(options shutdownOptions) error { pgCtlOptions = append(pgCtlOptions, "-t", fmt.Sprintf("%v", *options.Timeout)) } - log.Info("Shutting down instance", + contextLogger.Info("Shutting down instance", "pgdata", instance.PgData, "mode", options.Mode, "timeout", options.Timeout, @@ -537,11 +538,14 @@ func (instance *Instance) TryShuttingDownSmartFast(ctx context.Context) error { if smartTimeout > 0 { contextLogger.Info("Requesting smart shutdown of the PostgreSQL instance") - err = instance.Shutdown(shutdownOptions{ - Mode: shutdownModeSmart, - Wait: true, - Timeout: &smartTimeout, - }) + err = instance.Shutdown( + ctx, + shutdownOptions{ + Mode: shutdownModeSmart, + Wait: true, + Timeout: &smartTimeout, + }, + ) if err != nil { contextLogger.Warning("Error while handling the smart shutdown request", "err", err) } @@ -549,10 +553,12 @@ func (instance *Instance) TryShuttingDownSmartFast(ctx context.Context) error { if err != nil || smartTimeout == 0 { contextLogger.Info("Requesting fast shutdown of the PostgreSQL instance") - err = instance.Shutdown(shutdownOptions{ - Mode: shutdownModeFast, - Wait: true, - }) + err = instance.Shutdown(ctx, + shutdownOptions{ + Mode: shutdownModeFast, + Wait: true, + }, + ) } if err != nil { contextLogger.Error(err, "Error while shutting down the PostgreSQL instance") @@ -571,19 +577,24 @@ func (instance *Instance) TryShuttingDownFastImmediate(ctx context.Context) erro contextLogger := log.FromContext(ctx) contextLogger.Info("Requesting fast shutdown of the PostgreSQL instance") - err := instance.Shutdown(shutdownOptions{ - Mode: shutdownModeFast, - Wait: true, - Timeout: &instance.MaxSwitchoverDelay, - }) + err := instance.Shutdown( + ctx, + shutdownOptions{ + Mode: shutdownModeFast, + Wait: true, + Timeout: &instance.MaxSwitchoverDelay, + }, + ) var exitError *exec.ExitError if errors.As(err, &exitError) { contextLogger.Info("Graceful shutdown failed. Issuing immediate shutdown", "exitCode", exitError.ExitCode()) - err = instance.Shutdown(shutdownOptions{ - Mode: shutdownModeImmediate, - Wait: true, - }) + err = instance.Shutdown(ctx, + shutdownOptions{ + Mode: shutdownModeImmediate, + Wait: true, + }, + ) } return err } @@ -700,7 +711,7 @@ func (instance *Instance) WithActiveInstance(inner func() error) error { } defer func() { - if err := instance.Shutdown(defaultShutdownOptions); err != nil { + if err := instance.Shutdown(ctx, defaultShutdownOptions); err != nil { log.Info("Error while deactivating instance", "err", err) } }() @@ -847,18 +858,19 @@ func (instance *Instance) WaitForSuperuserConnectionAvailable(ctx context.Contex // waitForConnectionAvailable waits until we can connect to the passed // sql.DB connection -func waitForConnectionAvailable(context context.Context, db *sql.DB) error { +func waitForConnectionAvailable(ctx context.Context, db *sql.DB) error { + contextLogger := log.FromContext(ctx) errorIsRetryable := func(err error) bool { - if context.Err() != nil { + if ctx.Err() != nil { return false } return err != nil } return retry.OnError(RetryUntilServerAvailable, errorIsRetryable, func() error { - err := db.PingContext(context) + err := db.PingContext(ctx) if err != nil { - log.Info("DB not available, will retry", "err", err) + contextLogger.Info("DB not available, will retry", "err", err) } return err }) @@ -924,7 +936,9 @@ func (instance *Instance) WaitForConfigReload(ctx context.Context) (*postgres.Po // waitForStreamingConnectionAvailable waits until we can connect to the passed // sql.DB connection using streaming protocol -func waitForStreamingConnectionAvailable(db *sql.DB) error { +func waitForStreamingConnectionAvailable(ctx context.Context, db *sql.DB) error { + contextLogger := log.FromContext(ctx) + errorIsRetryable := func(err error) bool { return err != nil } @@ -932,7 +946,7 @@ func waitForStreamingConnectionAvailable(db *sql.DB) error { return retry.OnError(RetryUntilServerAvailable, errorIsRetryable, func() error { result, err := db.Query("IDENTIFY_SYSTEM") if err != nil || result.Err() != nil { - log.Info("DB not available, will retry", "err", err) + contextLogger.Info("DB not available, will retry", "err", err) return err } defer func() { diff --git a/pkg/management/postgres/join.go b/pkg/management/postgres/join.go index fb3cfc7914..73212522d6 100644 --- a/pkg/management/postgres/join.go +++ b/pkg/management/postgres/join.go @@ -17,6 +17,7 @@ limitations under the License. package postgres import ( + "context" "fmt" "os/exec" @@ -33,7 +34,7 @@ import ( // ClonePgData clones an existing server, given its connection string, // to a certain data directory -func ClonePgData(connectionString, targetPgData, walDir string) error { +func ClonePgData(ctx context.Context, connectionString, targetPgData, walDir string) error { log.Info("Waiting for server to be available", "connectionString", connectionString) db, err := pool.NewDBConnection(connectionString, pool.ConnectionProfilePostgresqlPhysicalReplication) @@ -44,7 +45,7 @@ func ClonePgData(connectionString, targetPgData, walDir string) error { _ = db.Close() }() - err = waitForStreamingConnectionAvailable(db) + err = waitForStreamingConnectionAvailable(ctx, db) if err != nil { return fmt.Errorf("source server not available: %v", connectionString) } @@ -70,7 +71,7 @@ func ClonePgData(connectionString, targetPgData, walDir string) error { } // Join creates a new instance joined to an existing PostgreSQL cluster -func (info InitInfo) Join(cluster *apiv1.Cluster) error { +func (info InitInfo) Join(ctx context.Context, cluster *apiv1.Cluster) error { primaryConnInfo := buildPrimaryConnInfo(info.ParentNode, info.PodName) + " dbname=postgres connect_timeout=5" pgVersion, err := cluster.GetPostgresqlVersion() @@ -91,7 +92,7 @@ func (info InitInfo) Join(cluster *apiv1.Cluster) error { return err } - if err = ClonePgData(primaryConnInfo, info.PgData, info.PgWal); err != nil { + if err = ClonePgData(ctx, primaryConnInfo, info.PgData, info.PgWal); err != nil { return err } diff --git a/pkg/management/postgres/restore.go b/pkg/management/postgres/restore.go index 6067f0389c..c797fb050b 100644 --- a/pkg/management/postgres/restore.go +++ b/pkg/management/postgres/restore.go @@ -146,7 +146,7 @@ func (info InitInfo) RestoreSnapshot(ctx context.Context, cli client.Client, imm return err } - if err := info.WriteInitialPostgresqlConf(cluster); err != nil { + if err := info.WriteInitialPostgresqlConf(ctx, cluster); err != nil { return err } @@ -167,7 +167,7 @@ func (info InitInfo) RestoreSnapshot(ctx context.Context, cli client.Client, imm return err } - if err := info.WriteRestoreHbaConf(); err != nil { + if err := info.WriteRestoreHbaConf(ctx); err != nil { return err } @@ -185,13 +185,14 @@ func (info InitInfo) createBackupObjectForSnapshotRestore( typedClient client.Client, cluster *apiv1.Cluster, ) (*apiv1.Backup, []string, error) { + contextLogger := log.FromContext(ctx) sourceName := cluster.Spec.Bootstrap.Recovery.Source if sourceName == "" { return nil, nil, fmt.Errorf("recovery source not specified") } - log.Info("Recovering from external cluster", "sourceName", sourceName) + contextLogger.Info("Recovering from external cluster", "sourceName", sourceName) server, found := cluster.ExternalCluster(sourceName) if !found { @@ -273,7 +274,7 @@ func (info InitInfo) Restore(ctx context.Context) error { return err } - if err := info.WriteInitialPostgresqlConf(cluster); err != nil { + if err := info.WriteInitialPostgresqlConf(ctx, cluster); err != nil { return err } // we need a migration here, otherwise the server will not start up if @@ -300,7 +301,7 @@ func (info InitInfo) Restore(ctx context.Context) error { return err } - if err := info.WriteRestoreHbaConf(); err != nil { + if err := info.WriteRestoreHbaConf(ctx); err != nil { return err } @@ -392,6 +393,7 @@ func (info InitInfo) restoreCustomWalDir(ctx context.Context) (bool, error) { // restoreDataDir restores PGDATA from an existing backup func (info InitInfo) restoreDataDir(ctx context.Context, backup *apiv1.Backup, env []string) error { + contextLogger := log.FromContext(ctx) var options []string if backup.Status.EndpointURL != "" { @@ -408,7 +410,7 @@ func (info InitInfo) restoreDataDir(ctx context.Context, backup *apiv1.Backup, e options = append(options, info.PgData) - log.Info("Starting barman-cloud-restore", + contextLogger.Info("Starting barman-cloud-restore", "options", options) cmd := exec.Command(barmanCapabilities.BarmanCloudRestore, options...) // #nosec G204 @@ -420,10 +422,10 @@ func (info InitInfo) restoreDataDir(ctx context.Context, backup *apiv1.Backup, e err = barmanCommand.UnmarshalBarmanCloudRestoreExitCode(ctx, exitError.ExitCode()) } - log.Error(err, "Can't restore backup") + contextLogger.Error(err, "Can't restore backup") return err } - log.Info("Restore completed") + contextLogger.Info("Restore completed") return nil } @@ -460,13 +462,14 @@ func (info InitInfo) loadBackupObjectFromExternalCluster( typedClient client.Client, cluster *apiv1.Cluster, ) (*apiv1.Backup, []string, error) { + contextLogger := log.FromContext(ctx) sourceName := cluster.Spec.Bootstrap.Recovery.Source if sourceName == "" { return nil, nil, fmt.Errorf("recovery source not specified") } - log.Info("Recovering from external cluster", "sourceName", sourceName) + contextLogger.Info("Recovering from external cluster", "sourceName", sourceName) server, found := cluster.ExternalCluster(sourceName) if !found { @@ -506,7 +509,7 @@ func (info InitInfo) loadBackupObjectFromExternalCluster( return nil, nil, fmt.Errorf("no target backup found") } - log.Info("Target backup found", "backup", targetBackup) + contextLogger.Info("Target backup found", "backup", targetBackup) return &apiv1.Backup{ Spec: apiv1.BackupSpec{ @@ -541,6 +544,7 @@ func (info InitInfo) loadBackupFromReference( typedClient client.Client, cluster *apiv1.Cluster, ) (*apiv1.Backup, []string, error) { + contextLogger := log.FromContext(ctx) var backup apiv1.Backup err := typedClient.Get( ctx, @@ -566,7 +570,7 @@ func (info InitInfo) loadBackupFromReference( return nil, nil, err } - log.Info("Recovering existing backup", "backup", backup) + contextLogger.Info("Recovering existing backup", "backup", backup) return &backup, env, nil } @@ -763,7 +767,8 @@ func LoadEnforcedParametersFromCluster( // WriteInitialPostgresqlConf resets the postgresql.conf that there is in the instance using // a new bootstrapped instance as reference -func (info InitInfo) WriteInitialPostgresqlConf(cluster *apiv1.Cluster) error { +func (info InitInfo) WriteInitialPostgresqlConf(ctx context.Context, cluster *apiv1.Cluster) error { + contextLogger := log.FromContext(ctx) if err := fileutils.EnsureDirectoryExists(postgresSpec.RecoveryTemporaryDirectory); err != nil { return err } @@ -775,7 +780,7 @@ func (info InitInfo) WriteInitialPostgresqlConf(cluster *apiv1.Cluster) error { defer func() { err = os.RemoveAll(tempDataDir) if err != nil { - log.Error( + contextLogger.Error( err, "skipping error while deleting temporary data directory") } @@ -794,15 +799,15 @@ func (info InitInfo) WriteInitialPostgresqlConf(cluster *apiv1.Cluster) error { WithNamespace(info.Namespace). WithClusterName(info.ClusterName) - _, err = temporaryInstance.RefreshPGHBA(cluster, "") + _, err = temporaryInstance.RefreshPGHBA(ctx, cluster, "") if err != nil { return fmt.Errorf("while generating pg_hba.conf: %w", err) } - _, err = temporaryInstance.RefreshPGIdent(cluster.Spec.PostgresConfiguration.PgIdent) + _, err = temporaryInstance.RefreshPGIdent(ctx, cluster.Spec.PostgresConfiguration.PgIdent) if err != nil { return fmt.Errorf("while generating pg_ident.conf: %w", err) } - _, err = temporaryInstance.RefreshConfigurationFilesFromCluster(cluster, false) + _, err = temporaryInstance.RefreshConfigurationFilesFromCluster(ctx, cluster, false) if err != nil { return fmt.Errorf("while generating Postgres configuration: %w", err) } @@ -841,7 +846,7 @@ func (info InitInfo) WriteInitialPostgresqlConf(cluster *apiv1.Cluster) error { // WriteRestoreHbaConf writes basic pg_hba.conf and pg_ident.conf allowing access without password from localhost. // This is needed to set the PostgreSQL password after the postgres server is started and active -func (info InitInfo) WriteRestoreHbaConf() error { +func (info InitInfo) WriteRestoreHbaConf(ctx context.Context) error { // We allow every access from localhost, and this is needed to correctly restore // the database _, err := fileutils.WriteStringToFile( @@ -852,7 +857,7 @@ func (info InitInfo) WriteRestoreHbaConf() error { } // Create only the local map referred in the HBA configuration - _, err = info.GetInstance().RefreshPGIdent(nil) + _, err = info.GetInstance().RefreshPGIdent(ctx, nil) return err } @@ -921,6 +926,7 @@ func (info *InitInfo) checkBackupDestination( client client.Client, cluster *apiv1.Cluster, ) error { + contextLogger := log.FromContext(ctx) if !cluster.Spec.Backup.IsBarmanBackupConfigured() { return nil } @@ -954,7 +960,7 @@ func (info *InitInfo) checkBackupDestination( checkWalOptions, err := walArchiver.BarmanCloudCheckWalArchiveOptions( ctx, cluster.Spec.Backup.BarmanObjectStore, cluster.Name) if err != nil { - log.Error(err, "while getting barman-cloud-wal-archive options") + contextLogger.Error(err, "while getting barman-cloud-wal-archive options") return err } diff --git a/pkg/management/postgres/webserver/webserver.go b/pkg/management/postgres/webserver/webserver.go index 1fcf09916b..9c6b0b90e3 100644 --- a/pkg/management/postgres/webserver/webserver.go +++ b/pkg/management/postgres/webserver/webserver.go @@ -79,9 +79,11 @@ func NewWebServer(server *http.Server) *Webserver { // Start starts a webserver listener, implementing the K8s runnable interface func (ws *Webserver) Start(ctx context.Context) error { + contextLogger := log.FromContext(ctx) + errChan := make(chan error, 1) go func() { - log.Info("Starting webserver", "address", ws.server.Addr, "hasTLS", ws.server.TLSConfig != nil) + contextLogger.Info("Starting webserver", "address", ws.server.Addr, "hasTLS", ws.server.TLSConfig != nil) var err error if ws.server.TLSConfig != nil { @@ -99,19 +101,19 @@ func (ws *Webserver) Start(ctx context.Context) error { // on subsequent tries case err := <-errChan: if errors.Is(err, http.ErrServerClosed) { - log.Error(err, "Closing the web server", "address", ws.server.Addr) + contextLogger.Error(err, "Closing the web server", "address", ws.server.Addr) } else { - log.Error(err, "Error while running the web server", "address", ws.server.Addr) + contextLogger.Error(err, "Error while running the web server", "address", ws.server.Addr) } return err case <-ctx.Done(): if err := ws.server.Shutdown(context.Background()); err != nil { - log.Error(err, "Error while shutting down the web server", "address", ws.server.Addr) + contextLogger.Error(err, "Error while shutting down the web server", "address", ws.server.Addr) return err } } - log.Info("Webserver exited", "address", ws.server.Addr) + contextLogger.Info("Webserver exited", "address", ws.server.Addr) return nil } From 5d47d8cdc8a62ab349b905b20c27f06c750593aa Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Mon, 14 Oct 2024 13:26:54 +0200 Subject: [PATCH 064/836] feat(backup): add `.Status.PluginMetadata` field (#5776) Closes #5777 Signed-off-by: Armando Ruocco --- .wordlist-en-custom.txt | 1 + api/v1/backup_types.go | 3 +++ api/v1/zz_generated.deepcopy.go | 7 +++++++ config/crd/bases/postgresql.cnpg.io_backups.yaml | 5 +++++ docs/src/cloudnative-pg.v1.md | 7 +++++++ internal/cnpi/plugin/client/backup.go | 3 +++ pkg/management/postgres/webserver/plugin_backup.go | 1 + 7 files changed, 27 insertions(+) diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index b4188b65aa..20bab75fc3 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -1004,6 +1004,7 @@ pitr plpgsql pluggable pluginConfiguration +pluginMetadata pluginStatus png podAffinityTerm diff --git a/api/v1/backup_types.go b/api/v1/backup_types.go index 739e9a731b..9c3e49d2ae 100644 --- a/api/v1/backup_types.go +++ b/api/v1/backup_types.go @@ -286,6 +286,9 @@ type BackupStatus struct { // Whether the backup was online/hot (`true`) or offline/cold (`false`) Online *bool `json:"online,omitempty"` + + // A map containing the plugin metadata + PluginMetadata map[string]string `json:"pluginMetadata,omitempty"` } // InstanceID contains the information to identify an instance diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index 14c71da945..b9b4cf7690 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -325,6 +325,13 @@ func (in *BackupStatus) DeepCopyInto(out *BackupStatus) { *out = new(bool) **out = **in } + if in.PluginMetadata != nil { + in, out := &in.PluginMetadata, &out.PluginMetadata + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStatus. diff --git a/config/crd/bases/postgresql.cnpg.io_backups.yaml b/config/crd/bases/postgresql.cnpg.io_backups.yaml index 9e1b5295a4..a6f002623f 100644 --- a/config/crd/bases/postgresql.cnpg.io_backups.yaml +++ b/config/crd/bases/postgresql.cnpg.io_backups.yaml @@ -318,6 +318,11 @@ spec: phase: description: The last backup status type: string + pluginMetadata: + additionalProperties: + type: string + description: A map containing the plugin metadata + type: object s3Credentials: description: The credentials to use to upload data to S3 properties: diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md index ba0d311131..f5678cb22e 100644 --- a/docs/src/cloudnative-pg.v1.md +++ b/docs/src/cloudnative-pg.v1.md @@ -816,6 +816,13 @@ parameter is omitted

Whether the backup was online/hot (true) or offline/cold (false)

+pluginMetadata [Required]
+map[string]string + + +

A map containing the plugin metadata

+ + diff --git a/internal/cnpi/plugin/client/backup.go b/internal/cnpi/plugin/client/backup.go index f2a7a211ba..131438247a 100644 --- a/internal/cnpi/plugin/client/backup.go +++ b/internal/cnpi/plugin/client/backup.go @@ -86,6 +86,8 @@ type BackupResponse struct { // This field is set to true for online/hot backups and to false otherwise. Online bool + + Metadata map[string]string } func (data *data) Backup( @@ -162,5 +164,6 @@ func (data *data) Backup( TablespaceMapFile: result.TablespaceMapFile, InstanceID: result.InstanceId, Online: result.Online, + Metadata: result.Metadata, }, nil } diff --git a/pkg/management/postgres/webserver/plugin_backup.go b/pkg/management/postgres/webserver/plugin_backup.go index d0cc5043eb..5d1ad1562b 100644 --- a/pkg/management/postgres/webserver/plugin_backup.go +++ b/pkg/management/postgres/webserver/plugin_backup.go @@ -137,6 +137,7 @@ func (b *PluginBackupCommand) invokeStart(ctx context.Context) { b.Backup.Status.BackupLabelFile = response.BackupLabelFile b.Backup.Status.TablespaceMapFile = response.TablespaceMapFile b.Backup.Status.Online = ptr.To(response.Online) + b.Backup.Status.PluginMetadata = response.Metadata if !response.StartedAt.IsZero() { b.Backup.Status.StartedAt = ptr.To(metav1.NewTime(response.StartedAt)) From ebe20200dee0bd8ed357ab459b6e5fdc30d12855 Mon Sep 17 00:00:00 2001 From: Jonas Kalderstam Date: Mon, 14 Oct 2024 17:12:41 +0200 Subject: [PATCH 065/836] fix(docs): typo in architecture.md (#5801) Signed-off-by: Jonas Kalderstam --- docs/src/architecture.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/architecture.md b/docs/src/architecture.md index 43f1244343..326f50fdd1 100644 --- a/docs/src/architecture.md +++ b/docs/src/architecture.md @@ -235,7 +235,7 @@ CloudNativePG recommends using the `node-role.kubernetes.io/postgres` taint. To assign the `postgres` taint to a node, use the following command: ```sh -kubectl taint node node-role.kubernetes.io/postgres=:noSchedule +kubectl taint node node-role.kubernetes.io/postgres=:NoSchedule ``` To ensure that a `Cluster` resource is scheduled on a node with a `postgres` taint, you must correctly configure the `.spec.affinity.tolerations` stanza in your manifests. From 36181640690ce23a56bf070d16f10f846e77f5f2 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Tue, 15 Oct 2024 09:42:07 +0200 Subject: [PATCH 066/836] fix(failover): prevent double failover in case of lost connectivity (#5788) This patch ensures the operator does not trigger two failovers when a primary Pod loses connectivity and fails to recognize its role change from primary to replica. Previously, the first failover occurred when the operator detected that the primary Pod was no longer ready or present. A second failover could be triggered if the old primary Pod recovered before the Kubelet timeout, with the operator potentially promoting it to primary again based on the Pod list. With this patch, the operator will wait for the recovered Pod to acknowledge its new role before taking further action, preventing unnecessary failovers. Closes: #2513 --------- Signed-off-by: Leonardo Cecchi Signed-off-by: Gabriele Bartolini Co-authored-by: Gabriele Bartolini --- internal/controller/cluster_controller.go | 26 +++++++++++++++++++++++ pkg/postgres/status.go | 14 ++++++++++++ 2 files changed, 40 insertions(+) diff --git a/internal/controller/cluster_controller.go b/internal/controller/cluster_controller.go index 0b29964e4a..097ca20b56 100644 --- a/internal/controller/cluster_controller.go +++ b/internal/controller/cluster_controller.go @@ -70,6 +70,11 @@ const ( var apiGVString = apiv1.GroupVersion.String() +// errOldPrimaryDetected occurs when a primary Pod loses connectivity with the +// API server and, upon reconnection, attempts to retain its previous primary +// role. +var errOldPrimaryDetected = errors.New("old primary detected") + // ClusterReconciler reconciles a Cluster objects type ClusterReconciler struct { client.Client @@ -340,6 +345,27 @@ func (r *ClusterReconciler) reconcile(ctx context.Context, cluster *apiv1.Cluste return ctrl.Result{}, fmt.Errorf("cannot update the instances status on the cluster: %w", err) } + // If a Pod loses connectivity, the operator will fail over but the faulty + // Pod would not receive a change of its role from primary to replica. + // + // When the connectivity resumes the operator will find two primaries: + // the previously faulting one and the new primary that has been + // promoted. The operator should just wait for the Pods to get its + // current role from auto-healing to proceed. Without this safety + // measure, the operator would just fail back to the first primary of + // the list. + if primaryNames := instancesStatus.PrimaryNames(); len(primaryNames) > 1 { + contextLogger.Error( + errOldPrimaryDetected, + "An old primary pod has been detected. Awaiting its recognition of the new role", + "primaryNames", primaryNames, + ) + instancesStatus.LogStatus(ctx) + return ctrl.Result{ + RequeueAfter: 5 * time.Second, + }, nil + } + if err := persistentvolumeclaim.ReconcileMetadata( ctx, r.Client, diff --git a/pkg/postgres/status.go b/pkg/postgres/status.go index fc530c7593..d3d731168a 100644 --- a/pkg/postgres/status.go +++ b/pkg/postgres/status.go @@ -405,3 +405,17 @@ func (list PostgresqlStatusList) InstancesReportingStatus() int { return n } + +// PrimaryNames get the names of each primary instance of this Cluster. Under +// normal conditions, this list is composed by one and only one name. +func (list PostgresqlStatusList) PrimaryNames() []string { + result := make([]string, 0, 1) + + for _, item := range list.Items { + if item.IsPrimary { + result = append(result, item.Pod.Name) + } + } + + return result +} From 47d61ed36936d4f135408e6b63461ad5cf562d62 Mon Sep 17 00:00:00 2001 From: Balthazar Rouberol Date: Tue, 15 Oct 2024 10:13:24 +0200 Subject: [PATCH 067/836] fix: set `TMPDIR` and `PSQL_HISTORY` environment variables (#5503) Ensure the `TMPDIR` environment variable is correctly set to `/controller/tmp` for temporary files. Additionally, define the `PSQL_HISTORY` variable as `/controller/tmp/.psql_history` to store PostgreSQL command history in a controlled location. This addresses issues with file management during execution and improves the isolation of temporary files and session history. Closes #5420 Closes #4137 Signed-off-by: Balthazar Rouberol Signed-off-by: Leonardo Cecchi Co-authored-by: Leonardo Cecchi --- internal/cmd/manager/instance/cmd.go | 5 +++++ pkg/postgres/configuration.go | 4 ++++ pkg/specs/pods.go | 9 +++++++++ pkg/specs/pods_test.go | 12 ++++++++++++ 4 files changed, 30 insertions(+) diff --git a/internal/cmd/manager/instance/cmd.go b/internal/cmd/manager/instance/cmd.go index 7cf773b66f..dde99ee925 100644 --- a/internal/cmd/manager/instance/cmd.go +++ b/internal/cmd/manager/instance/cmd.go @@ -19,6 +19,7 @@ package instance import ( "fmt" + "os" "github.com/spf13/cobra" @@ -29,6 +30,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/instance/restoresnapshot" "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/instance/run" "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/instance/status" + "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" ) // NewCmd creates the "instance" command @@ -39,6 +41,9 @@ func NewCmd() *cobra.Command { RunE: func(_ *cobra.Command, _ []string) error { return fmt.Errorf("missing subcommand") }, + PersistentPreRunE: func(_ *cobra.Command, _ []string) error { + return os.MkdirAll(postgres.TemporaryDirectory, 0o1777) //nolint:gosec + }, } cmd.AddCommand(initdb.NewCmd()) diff --git a/pkg/postgres/configuration.go b/pkg/postgres/configuration.go index 9bd377c26e..021ac045f5 100644 --- a/pkg/postgres/configuration.go +++ b/pkg/postgres/configuration.go @@ -146,6 +146,10 @@ local {{.Username}} postgres // ScratchDataDirectory is the directory to be used for scratch data ScratchDataDirectory = "/controller" + // TemporaryDirectory is the directory that is used to create + // temporary files, and configured as TMPDIR in PostgreSQL Pods + TemporaryDirectory = "/controller/tmp" + // SpoolDirectory is the directory where we spool the WAL files that // were pre-archived in parallel SpoolDirectory = ScratchDataDirectory + "/wal-archive-spool" diff --git a/pkg/specs/pods.go b/pkg/specs/pods.go index 91aa0b758f..8e3e5ed44e 100644 --- a/pkg/specs/pods.go +++ b/pkg/specs/pods.go @@ -22,6 +22,7 @@ import ( "encoding/json" "fmt" "math" + "path" "reflect" "slices" "strconv" @@ -131,6 +132,10 @@ func CreatePodEnvConfig(cluster apiv1.Cluster, podName string) EnvConfig { Name: "CLUSTER_NAME", Value: cluster.Name, }, + { + Name: "PSQL_HISTORY", + Value: path.Join(postgres.TemporaryDirectory, ".psql_history"), + }, { Name: "PGPORT", Value: strconv.Itoa(postgres.ServerPort), @@ -139,6 +144,10 @@ func CreatePodEnvConfig(cluster apiv1.Cluster, podName string) EnvConfig { Name: "PGHOST", Value: postgres.SocketDirectory, }, + { + Name: "TMPDIR", + Value: postgres.TemporaryDirectory, + }, }, EnvFrom: cluster.Spec.EnvFrom, } diff --git a/pkg/specs/pods_test.go b/pkg/specs/pods_test.go index f9b8258ac6..17f9494d15 100644 --- a/pkg/specs/pods_test.go +++ b/pkg/specs/pods_test.go @@ -332,6 +332,10 @@ var _ = Describe("EnvConfig", func() { Name: "CLUSTER_NAME", Value: cluster.Name, }, + { + Name: "PSQL_HISTORY", + Value: postgres.TemporaryDirectory + "/.psql_history", + }, { Name: "PGPORT", Value: strconv.Itoa(postgres.ServerPort), @@ -340,6 +344,10 @@ var _ = Describe("EnvConfig", func() { Name: "PGHOST", Value: postgres.SocketDirectory, }, + { + Name: "TMPDIR", + Value: postgres.TemporaryDirectory, + }, { Name: "TEST_ENV", Value: "EXPECTED", @@ -385,6 +393,10 @@ var _ = Describe("EnvConfig", func() { Name: "PGHOST", Value: postgres.SocketDirectory, }, + { + Name: "TMPDIR", + Value: postgres.TemporaryDirectory, + }, { Name: "TEST_ENV", Value: "UNEXPECTED", From e227fc7d1b959230ca58c250a44a9e2baaf7aba0 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Tue, 15 Oct 2024 11:16:51 +0200 Subject: [PATCH 068/836] chore: remove code to support PostgreSQL 11 (#5794) Refactor code to remove any blocks specifically designed to work on a PostgreSQL version < 12. Closes #5797 --------- Signed-off-by: Jonathan Gonzalez V. Signed-off-by: Gabriele Bartolini Signed-off-by: Marco Nenciarini Co-authored-by: Gabriele Bartolini Co-authored-by: Marco Nenciarini --- api/v1/cluster_webhook.go | 31 +---------- api/v1/cluster_webhook_test.go | 42 ++++---------- docs/src/replication.md | 11 ---- docs/src/ssl_connections.md | 4 -- pkg/management/postgres/configuration.go | 70 ++---------------------- pkg/management/postgres/restore.go | 49 ++++++----------- 6 files changed, 38 insertions(+), 169 deletions(-) diff --git a/api/v1/cluster_webhook.go b/api/v1/cluster_webhook.go index 197358d786..60add4a31a 100644 --- a/api/v1/cluster_webhook.go +++ b/api/v1/cluster_webhook.go @@ -1096,12 +1096,12 @@ func (r *Cluster) validateConfiguration() field.ErrorList { // validateImageName function return result } - if pgVersion.Major() < 11 { + if pgVersion.Major() < 12 { result = append(result, field.Invalid( field.NewPath("spec", "imageName"), r.Spec.ImageName, - "Unsupported PostgreSQL version. Versions 11 or newer are supported")) + "Unsupported PostgreSQL version. Versions 12 or newer are supported")) } info := postgres.ConfigurationInfo{ Settings: postgres.CnpgConfigurationSettings, @@ -2189,33 +2189,6 @@ func (r *Cluster) validateReplicationSlots() field.ErrorList { return nil } - psqlVersion, err := r.GetPostgresqlVersion() - if err != nil { - // The validation error will be already raised by the - // validateImageName function - return nil - } - - if psqlVersion.Major() < 11 { - if replicationSlots.HighAvailability.GetEnabled() { - return field.ErrorList{ - field.Invalid( - field.NewPath("spec", "replicationSlots", "highAvailability", "enabled"), - replicationSlots.HighAvailability.GetEnabled(), - "Cannot enable HA replication slots synchronization. PostgreSQL 11 or above required"), - } - } - - if replicationSlots.SynchronizeReplicas.GetEnabled() { - return field.ErrorList{ - field.Invalid( - field.NewPath("spec", "replicationSlots", "synchronizeReplicas", "enabled"), - replicationSlots.SynchronizeReplicas.GetEnabled(), - "Cannot enable user defined replication slots synchronization. PostgreSQL 11 or above required"), - } - } - } - if errs := r.Spec.ReplicationSlots.SynchronizeReplicas.compileRegex(); len(errs) > 0 { return field.ErrorList{ field.Invalid( diff --git a/api/v1/cluster_webhook_test.go b/api/v1/cluster_webhook_test.go index 85a28e9044..f6bea56fca 100644 --- a/api/v1/cluster_webhook_test.go +++ b/api/v1/cluster_webhook_test.go @@ -1333,12 +1333,12 @@ var _ = Describe("validate image name change", func() { It("complains if it can't upgrade between mayor versions", func() { clusterOld := Cluster{ Spec: ClusterSpec{ - ImageName: "postgres:12.0", + ImageName: "postgres:17.0", }, } clusterNew := Cluster{ Spec: ClusterSpec{ - ImageName: "postgres:11.0", + ImageName: "postgres:16.0", }, } Expect(clusterNew.validateImageChange(&clusterOld)).To(HaveLen(1)) @@ -1347,12 +1347,12 @@ var _ = Describe("validate image name change", func() { It("doesn't complain if image change is valid", func() { clusterOld := Cluster{ Spec: ClusterSpec{ - ImageName: "postgres:12.1", + ImageName: "postgres:17.1", }, } clusterNew := Cluster{ Spec: ClusterSpec{ - ImageName: "postgres:12.0", + ImageName: "postgres:17.0", }, } Expect(clusterNew.validateImageChange(&clusterOld)).To(BeEmpty()) @@ -1408,7 +1408,7 @@ var _ = Describe("validate image name change", func() { It("complains on major upgrades", func() { clusterOld := Cluster{ Spec: ClusterSpec{ - ImageName: "postgres:15.1", + ImageName: "postgres:16.1", }, } clusterNew := Cluster{ @@ -1418,7 +1418,7 @@ var _ = Describe("validate image name change", func() { Name: "test", Kind: "ImageCatalog", }, - Major: 16, + Major: 17, }, }, } @@ -1435,7 +1435,7 @@ var _ = Describe("validate image name change", func() { Name: "test", Kind: "ImageCatalog", }, - Major: 14, + Major: 16, }, }, } @@ -1472,13 +1472,13 @@ var _ = Describe("validate image name change", func() { Name: "test", Kind: "ImageCatalog", }, - Major: 16, + Major: 17, }, }, } clusterNew := Cluster{ Spec: ClusterSpec{ - ImageName: "postgres:16.1", + ImageName: "postgres:17.1", }, } Expect(clusterNew.validateImageChange(&clusterOld)).To(BeEmpty()) @@ -1491,13 +1491,13 @@ var _ = Describe("validate image name change", func() { Name: "test", Kind: "ImageCatalog", }, - Major: 15, + Major: 16, }, }, } clusterNew := Cluster{ Spec: ClusterSpec{ - ImageName: "postgres:16.1", + ImageName: "postgres:17.1", }, } Expect(clusterNew.validateImageChange(&clusterOld)).To(HaveLen(1)) @@ -1510,7 +1510,7 @@ var _ = Describe("validate image name change", func() { Name: "test", Kind: "ImageCatalog", }, - Major: 14, + Major: 16, }, }, } @@ -3380,24 +3380,6 @@ var _ = Describe("validation of imports", func() { }) var _ = Describe("validation of replication slots configuration", func() { - It("prevents using replication slots on PostgreSQL 10 and older", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - ImageName: "ghcr.io/cloudnative-pg/postgresql:10.5", - ReplicationSlots: &ReplicationSlotsConfiguration{ - HighAvailability: &ReplicationSlotsHAConfiguration{ - Enabled: ptr.To(true), - }, - UpdateInterval: 0, - }, - }, - } - cluster.Default() - - result := cluster.validateReplicationSlots() - Expect(result).To(HaveLen(1)) - }) - It("can be enabled on the default PostgreSQL image", func() { cluster := &Cluster{ Spec: ClusterSpec{ diff --git a/docs/src/replication.md b/docs/src/replication.md index a4f1cd93c5..b0e61e380e 100644 --- a/docs/src/replication.md +++ b/docs/src/replication.md @@ -480,17 +480,6 @@ Here follows a brief description of the main options: replication slots with the position on the current primary, expressed in seconds (default: 30) -!!! Important - This capability requires PostgreSQL 11 or higher, as it relies on the - [`pg_replication_slot_advance()` administration function](https://www.postgresql.org/docs/current/functions-admin.html) - to directly manipulate the position of a replication slot. - -!!! Warning - In PostgreSQL 11, enabling replication slots if initially disabled, or conversely - disabling them if initially enabled, will require a rolling update of the - cluster (due to the presence of the `recovery.conf` file that is only read - at startup). - Although it is not recommended, if you desire a different behavior, you can customize the above options. diff --git a/docs/src/ssl_connections.md b/docs/src/ssl_connections.md index a7826eb3c7..b11ea1a620 100644 --- a/docs/src/ssl_connections.md +++ b/docs/src/ssl_connections.md @@ -184,10 +184,6 @@ By default, the operator sets both [`ssl_min_protocol_version`](https://www.post and [`ssl_max_protocol_version`](https://www.postgresql.org/docs/current/runtime-config-connection.html#GUC-SSL-MAX-PROTOCOL-VERSION) to `TLSv1.3`. -!!! Important - In PostgreSQL 11, these two GUCs don't exist. Hence, in these specific versions - these values aren't set, and the default values are used. - This assumes that the PostgreSQL operand images include an OpenSSL library that supports the `TLSv1.3` version. If not, or if your client applications need a lower version number, you need to manually configure it in the PostgreSQL diff --git a/pkg/management/postgres/configuration.go b/pkg/management/postgres/configuration.go index e5c484c2c5..3dedfe9da5 100644 --- a/pkg/management/postgres/configuration.go +++ b/pkg/management/postgres/configuration.go @@ -31,7 +31,6 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/configfile" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/constants" - postgresutils "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/utils" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres/replication" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" @@ -249,77 +248,20 @@ func UpdateReplicaConfiguration(pgData, primaryConnInfo, slotName string) (chang return changed, err } - major, err := postgresutils.GetMajorVersion(pgData) - if err != nil { - return false, err - } - - if major < 12 { - return configureRecoveryConfFile(pgData, primaryConnInfo, slotName) - } - return changed, createStandbySignal(pgData) } -// configureRecoveryConfFile configures replication in the recovery.conf file -// for PostgreSQL 11 and earlier -func configureRecoveryConfFile(pgData, primaryConnInfo, slotName string) (changed bool, err error) { - targetFile := path.Join(pgData, "recovery.conf") - +// configurePostgresOverrideConfFile writes the content of override.conf file, including +// replication information +func configurePostgresOverrideConfFile(pgData, primaryConnInfo, slotName string) (changed bool, err error) { + targetFile := path.Join(pgData, constants.PostgresqlOverrideConfigurationFile) options := map[string]string{ - "standby_mode": "on", "restore_command": fmt.Sprintf( "/controller/manager wal-restore --log-destination %s/%s.json %%f %%p", postgres.LogPath, postgres.LogFileName), "recovery_target_timeline": "latest", - } - - if slotName != "" { - options["primary_slot_name"] = slotName - } - - if primaryConnInfo != "" { - options["primary_conninfo"] = primaryConnInfo - } - - changed, err = configfile.UpdatePostgresConfigurationFile( - targetFile, - options, - "primary_slot_name", - "primary_conninfo", - ) - if err != nil { - return false, err - } - if changed { - log.Info("Updated replication settings", "filename", "recovery.conf") - } - - return changed, nil -} - -// configurePostgresOverrideConfFile writes the content of override.conf file, including -// replication information -func configurePostgresOverrideConfFile(pgData, primaryConnInfo, slotName string) (changed bool, err error) { - targetFile := path.Join(pgData, constants.PostgresqlOverrideConfigurationFile) - - major, err := postgresutils.GetMajorVersion(pgData) - if err != nil { - return false, err - } - - options := make(map[string]string) - - // Write replication control as GUCs (from PostgreSQL 12 or above) - if major >= 12 { - options = map[string]string{ - "restore_command": fmt.Sprintf( - "/controller/manager wal-restore --log-destination %s/%s.json %%f %%p", - postgres.LogPath, postgres.LogFileName), - "recovery_target_timeline": "latest", - "primary_slot_name": slotName, - "primary_conninfo": primaryConnInfo, - } + "primary_slot_name": slotName, + "primary_conninfo": primaryConnInfo, } // Ensure that override.conf file contains just the above options diff --git a/pkg/management/postgres/restore.go b/pkg/management/postgres/restore.go index c797fb050b..347c997786 100644 --- a/pkg/management/postgres/restore.go +++ b/pkg/management/postgres/restore.go @@ -50,7 +50,6 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/management" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/external" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/constants" - postgresutils "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/utils" postgresSpec "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/system" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" @@ -612,16 +611,12 @@ func (info InitInfo) writeRestoreWalConfig( func (info InitInfo) writeRecoveryConfiguration(cluster *apiv1.Cluster, recoveryFileContents string) error { // Ensure restore_command is used to correctly recover WALs // from the object storage - major, err := postgresutils.GetMajorVersion(info.PgData) - if err != nil { - return fmt.Errorf("cannot detect major version: %w", err) - } log.Info("Generated recovery configuration", "configuration", recoveryFileContents) // Temporarily suspend WAL archiving. We set it to `false` (which means failure // of the archiver) in order to defer the decision about archiving to PostgreSQL // itself once the recovery job is completed and the instance is regularly started. - err = fileutils.AppendStringToFile( + err := fileutils.AppendStringToFile( path.Join(info.PgData, constants.PostgresqlCustomConfigurationFile), "archive_command = 'false'\n") if err != nil { @@ -671,35 +666,27 @@ func (info InitInfo) writeRecoveryConfiguration(cluster *apiv1.Cluster, recovery return fmt.Errorf("cannot write recovery config for enforced parameters: %w", err) } - if major >= 12 { - // Append restore_command to the end of the - // custom configs file - err = fileutils.AppendStringToFile( - path.Join(info.PgData, constants.PostgresqlCustomConfigurationFile), - recoveryFileContents) - if err != nil { - return fmt.Errorf("cannot write recovery config: %w", err) - } - - err = os.WriteFile( - path.Join(info.PgData, constants.PostgresqlOverrideConfigurationFile), - []byte(""), - 0o600) - if err != nil { - return fmt.Errorf("cannot erase auto config: %w", err) - } + // Append restore_command to the end of the + // custom configs file + err = fileutils.AppendStringToFile( + path.Join(info.PgData, constants.PostgresqlCustomConfigurationFile), + recoveryFileContents) + if err != nil { + return fmt.Errorf("cannot write recovery config: %w", err) + } - // Create recovery signal file - return os.WriteFile( - path.Join(info.PgData, "recovery.signal"), - []byte(""), - 0o600) + err = os.WriteFile( + path.Join(info.PgData, constants.PostgresqlOverrideConfigurationFile), + []byte(""), + 0o600) + if err != nil { + return fmt.Errorf("cannot erase auto config: %w", err) } - // We need to generate a recovery.conf + // Create recovery signal file return os.WriteFile( - path.Join(info.PgData, "recovery.conf"), - []byte(recoveryFileContents), + path.Join(info.PgData, "recovery.signal"), + []byte(""), 0o600) } From 0f722541a873dc14f53a378d33910aa9afe56410 Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Tue, 15 Oct 2024 16:27:23 +0200 Subject: [PATCH 069/836] docs: refactor logging page, mentioning `stern` (#5790) Closes #5780 Signed-off-by: Gabriele Bartolini Signed-off-by: Jaime Silvela Signed-off-by: Leonardo Cecchi Co-authored-by: Jaime Silvela Co-authored-by: Leonardo Cecchi --- docs/src/logging.md | 160 ++++++++++++++++++++---------------- docs/src/operator_conf.md | 6 +- docs/src/troubleshooting.md | 44 +++++----- 3 files changed, 111 insertions(+), 99 deletions(-) diff --git a/docs/src/logging.md b/docs/src/logging.md index 98ba55b467..8543446929 100644 --- a/docs/src/logging.md +++ b/docs/src/logging.md @@ -1,43 +1,60 @@ # Logging -The operator is designed to log in JSON format directly to standard output, -including PostgreSQL logs. +CloudNativePG outputs logs in JSON format directly to standard output, including +PostgreSQL logs, without persisting them to storage for security reasons. This +design facilitates seamless integration with most Kubernetes-compatible log +management tools, including command line ones like +[stern](https://github.com/stern/stern). -Each log entry has the following fields: - -- `level` – Log level (`info`, `notice`, ...). -- `ts` – The timestamp (epoch with microseconds). -- `logger` – The type of the record (for example, `postgres` or `pg_controldata`). -- `msg` – The actual message or the keyword `record` in case the message is parsed in JSON format. -- `record` – The actual record with structure that varies depending on the - `logger` type. -- `logging_podName` – The pod where the log was created. - -!!! Warning - Long-term storage and management of logs is outside the operator's purview, - and needs to be provided at the level of the Kubernetes installation. - See the +!!! Important + Long-term storage and management of logs are outside the scope of the + operator and should be handled at the Kubernetes infrastructure level. + For more information, see the [Kubernetes Logging Architecture](https://kubernetes.io/docs/concepts/cluster-administration/logging/) documentation. +Each log entry includes the following fields: + +- `level` – The log level (e.g., `info`, `notice`). +- `ts` – The timestamp. +- `logger` – The type of log (e.g., `postgres`, `pg_controldata`). +- `msg` – The log message, or the keyword `record` if the message is in JSON + format. +- `record` – The actual record, with a structure that varies depending on the + `logger` type. +- `logging_pod` – The name of the pod where the log was generated. + !!! Info - If your log ingestion system requires it, you can rename the `level` and `ts` field names using the `log-field-level` and - `log-field-timestamp` flags of the operator controller. Edit the `Deployment` definition of the - `cloudnative-pg` operator. + If your log ingestion system requires custom field names, you can rename + the `level` and `ts` fields using the `log-field-level` and + `log-field-timestamp` flags in the operator controller. This can be configured + by editing the `Deployment` definition of the `cloudnative-pg` operator. + +## Cluster Logs + +You can configure the log level for the instance pods in the cluster +specification using the `logLevel` option. Available log levels are: `error`, +`warning`, `info` (default), `debug`, and `trace`. -## Operator log +!!! Important + Currently, the log level can only be set at the time the instance starts. + Changes to the log level in the cluster specification after the cluster has + started will only apply to new pods, not existing ones. + +## Operator Logs -You can specify a log level in the cluster spec with the option `logLevel`. -You can set it to `error`, `warning`, `info`(default), `debug`, or `trace`. +The logs produced by the operator pod can be configured with log +levels, same as instance pods: `error`, `warning`, `info` (default), `debug`, +and `trace`. -Currently, you can set the log level only when an instance starts. You can't -change it at runtime. If you change the value in the cluster spec after the cluster -was started, it takes effect only in the new pods and not the old ones. +The log level for the operator can be configured by editing the `Deployment` +definition of the operator and setting the `--log-level` command line argument +to the desired value. -## PostgreSQL log +## PostgreSQL Logs -Each entry in the PostgreSQL log is a JSON object having the `logger` key set -to `postgres` and the structure described in the following example: +Each PostgreSQL log entry is a JSON object with the `logger` key set to +`postgres`. The structure of the log entries is as follows: ```json { @@ -75,35 +92,33 @@ to `postgres` and the structure described in the following example: } ``` -Internally, the operator relies on the PostgreSQL CSV log format. See -the PostgreSQL documentation for more information about the [CSV log -format](https://www.postgresql.org/docs/current/runtime-config-logging.html). +!!! Info + Internally, the operator uses PostgreSQL's CSV log format. For more details, + refer to the [PostgreSQL documentation on CSV log format](https://www.postgresql.org/docs/current/runtime-config-logging.html). -## PGAudit logs +## PGAudit Logs -CloudNativePG has transparent and native support for +CloudNativePG offers seamless and native support for [PGAudit](https://www.pgaudit.org/) on PostgreSQL clusters. -To enable this support, add the required `pgaudit` parameters to the `postgresql` -section in the configuration of the cluster. +To enable PGAudit, add the necessary `pgaudit` parameters in the `postgresql` +section of the cluster configuration. !!! Important - You need to add the PGAudit library to `shared_preload_libraries`. - CloudNativePG adds the library based on the - presence of `pgaudit.*` parameters in the postgresql configuration. - The operator detects and manages the addition and removal of the - library from `shared_preload_libraries`. + The PGAudit library must be added to `shared_preload_libraries`. + CloudNativePG automatically manages this based on the presence of `pgaudit.*` + parameters in the PostgreSQL configuration. The operator handles both the + addition and removal of the library from `shared_preload_libraries`. -The operator also takes care of creating and removing the extension from all -the available databases in the cluster. +Additionally, the operator manages the creation and removal of the PGAudit +extension across all databases within the cluster. !!! Important - CloudNativePG runs the `CREATE EXTENSION` and - `DROP EXTENSION` commands in all databases in the cluster that accept - connections. + CloudNativePG executes the `CREATE EXTENSION` and `DROP EXTENSION` commands + in all databases within the cluster that accept connections. -This example shows a PostgreSQL 13 `Cluster` deployment that results in -`pgaudit` being enabled with the requested configuration: +The following example demonstrates a PostgreSQL `Cluster` deployment with +PGAudit enabled and configured: ```yaml apiVersion: postgresql.cnpg.io/v1 @@ -112,7 +127,6 @@ metadata: name: cluster-example spec: instances: 3 - imageName: ghcr.io/cloudnative-pg/postgresql:13 postgresql: parameters: @@ -125,14 +139,15 @@ spec: size: 1Gi ``` -The audit CSV logs entries returned by PGAudit are then parsed and routed to -stdout in JSON format, similarly to all the remaining logs: +The audit CSV log entries generated by PGAudit are parsed and routed to +standard output in JSON format, similar to all other logs: - `.logger` is set to `pgaudit`. - `.msg` is set to `record`. -- `.record` contains the whole parsed record as a JSON object. This is similar to - `logging_collector` logs, except for `.record.audit`, which contains the - PGAudit CSV message formatted as a JSON object. +- `.record` contains the entire parsed record as a JSON object. This structure + resembles that of `logging_collector` logs, with the exception of + `.record.audit`, which contains the PGAudit CSV message formatted as a JSON + object. This example shows sample log entries: @@ -175,25 +190,26 @@ See the [PGAudit documentation](https://github.com/pgaudit/pgaudit/blob/master/README.md#format) for more details about each field in a record. -## Other logs - -All logs that are produced by the operator and its instances are in JSON -format, with `logger` set according to the process that produced them. -Therefore, all the possible `logger` values are the following: - -- `barman-cloud-wal-archive`: from `barman-cloud-wal-archive` directly -- `barman-cloud-wal-restore`: from `barman-cloud-wal-restore` directly -- `initdb`: from running `initdb` -- `pg_basebackup`: from running `pg_basebackup` -- `pg_controldata`: from running `pg_controldata` -- `pg_ctl`: from running any `pg_ctl` subcommand -- `pg_rewind`: from running `pg_rewind` -- `pgaudit`: from PGAudit extension -- `postgres`: from the `postgres` instance (having `msg` different than `record`) -- `wal-archive`: from the `wal-archive` subcommand of the instance manager -- `wal-restore`: from the `wal-restore` subcommand of the instance manager +## Other Logs + +All logs generated by the operator and its instances are in JSON format, with +the `logger` field indicating the process that produced them. The possible +`logger` values are as follows: + +- `barman-cloud-wal-archive`: logs from `barman-cloud-wal-archive` +- `barman-cloud-wal-restore`: logs from `barman-cloud-wal-restore` +- `initdb`: logs from running `initdb` +- `pg_basebackup`: logs from running `pg_basebackup` +- `pg_controldata`: logs from running `pg_controldata` +- `pg_ctl`: logs from running any `pg_ctl` subcommand +- `pg_rewind`: logs from running `pg_rewind` +- `pgaudit`: logs from the PGAudit extension +- `postgres`: logs from the `postgres` instance (with `msg` distinct from + `record`) +- `wal-archive`: logs from the `wal-archive` subcommand of the instance manager +- `wal-restore`: logs from the `wal-restore` subcommand of the instance manager - `instance-manager`: from the [PostgreSQL instance manager](./instance_manager.md) -Except for `postgres`, which has the aforementioned structures, -all other possible values have `msg` set to the escaped message that's +With the exception of `postgres`, which follows a specific structure, all other +`logger` values contain the `msg` field with the escaped message that is logged. diff --git a/docs/src/operator_conf.md b/docs/src/operator_conf.md index 72033f6016..2e6dadb7fc 100644 --- a/docs/src/operator_conf.md +++ b/docs/src/operator_conf.md @@ -153,7 +153,8 @@ You can do this by executing these commands: kubectl edit deployment -n cnpg-system cnpg-controller-manager ``` -Then on the edit page scroll down the container args and add `--pprof-server=true`, example: +Then on the edit page scroll down the container args and add +`--pprof-server=true`, as in this example: ```yaml containers: @@ -168,7 +169,8 @@ Then on the edit page scroll down the container args and add `--pprof-server=tru - /manager ``` -Save the changes, the deployment now will execute a roll-out and the new pod will have the PPROF server enabled. +Save the changes; the deployment now will execute a roll-out, and the new pod +will have the PPROF server enabled. Once the pod is running you can exec inside the container by doing: diff --git a/docs/src/troubleshooting.md b/docs/src/troubleshooting.md index 2caded71b9..fc6f59c39f 100644 --- a/docs/src/troubleshooting.md +++ b/docs/src/troubleshooting.md @@ -132,25 +132,27 @@ The above steps might be integrated into the `cnpg` plugin at some stage in the ## Logs -Every resource created and controlled by CloudNativePG logs to -standard output, as expected by Kubernetes, and directly in [JSON -format](logging.md). As a result, you should rely on the `kubectl logs` -command to retrieve logs from a given resource. - -For more information, type: - -```shell -kubectl logs --help -``` - -!!! Hint - JSON logs are great for machine reading, but hard to read for human beings. - Our recommendation is to use the `jq` command to improve usability. For - example, you can *pipe* the `kubectl logs` command with `| jq -C`. +All resources created and managed by CloudNativePG log to standard output in +accordance with Kubernetes conventions, using [JSON format](logging.md). + +While logs are typically processed at the infrastructure level and include +those from CloudNativePG, accessing logs directly from the command line +interface is critical during troubleshooting. You have three primary options +for doing so: + +- Use the `kubectl logs` command to retrieve logs from a specific resource, and + apply `jq` for better readability. +- Use the [`kubectl cnpg logs` command](kubectl-plugin.md#logs) for + CloudNativePG-specific logging. +- Leverage specialized open-source tools like + [stern](https://github.com/stern/stern), which can aggregate logs from + multiple resources (e.g., all pods in a PostgreSQL cluster by selecting the + `cnpg.io/clusterName` label), filter log entries, customize output formats, + and more. !!! Note - In the sections below, we will show some examples on how to retrieve logs - about different resources when it comes to troubleshooting CloudNativePG. + The following sections provide examples of how to retrieve logs for various + resources when troubleshooting CloudNativePG. ## Operator information @@ -277,14 +279,6 @@ kubectl cnpg status -n !!! Tip You can print more information by adding the `--verbose` option. -!!! Note - Besides knowing cluster status, you can also do the following things with the cnpg plugin: - Promote a replica.
- Manage certificates.
- Make a rollout restart cluster to apply configuration changes.
- Make a reconciliation loop to reload and apply configuration changes.
- For more information, please see [`cnpg` plugin](kubectl-plugin.md) documentation. - Get PostgreSQL container image version: ```shell From 8cc2dc9fcd692d08e534615e701ac13585d76140 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Tue, 15 Oct 2024 16:45:13 +0200 Subject: [PATCH 070/836] chore(plugin): organise plugin commands into groups (#5796) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Organise `cnpg` plugin commands into logical groups using the following categories: - Operator-level administration - Troubleshooting - Cluster administration - Database administration - Miscellaneous Closes #5795 Signed-off-by: Jonathan Gonzalez V. Signed-off-by: Gabriele Bartolini Signed-off-by: Niccolò Fei Signed-off-by: Leonardo Cecchi Signed-off-by: Francesco Canovai Co-authored-by: Gabriele Bartolini Co-authored-by: Niccolò Fei Co-authored-by: Leonardo Cecchi --- cmd/kubectl-cnpg/main.go | 27 +++++++++++++++++++ internal/cmd/plugin/backup/cmd.go | 7 ++--- internal/cmd/plugin/certificate/cmd.go | 3 ++- internal/cmd/plugin/color.go | 4 +-- internal/cmd/plugin/destroy/cmd.go | 7 ++--- internal/cmd/plugin/fence/cmd.go | 5 ++-- internal/cmd/plugin/fio/cmd.go | 3 +++ internal/cmd/plugin/hibernate/cmd.go | 5 ++-- internal/cmd/plugin/install/cmd.go | 7 +++-- .../cmd/plugin/logical/publication/cmd.go | 6 +++-- .../cmd/plugin/logical/subscription/cmd.go | 6 +++-- internal/cmd/plugin/logs/cmd.go | 7 +++-- internal/cmd/plugin/maintenance/cmd.go | 5 ++-- internal/cmd/plugin/pgadmin/cmd.go | 5 ++-- internal/cmd/plugin/pgbench/cmd.go | 3 +++ internal/cmd/plugin/plugin.go | 18 +++++++++++++ internal/cmd/plugin/promote/cmd.go | 7 ++--- internal/cmd/plugin/psql/cmd.go | 3 ++- internal/cmd/plugin/reload/cmd.go | 9 ++++--- internal/cmd/plugin/report/cmd.go | 7 +++-- internal/cmd/plugin/restart/cmd.go | 5 +++- internal/cmd/plugin/snapshot/cmd.go | 7 ++--- internal/cmd/plugin/status/cmd.go | 7 ++--- 23 files changed, 121 insertions(+), 42 deletions(-) diff --git a/cmd/kubectl-cnpg/main.go b/cmd/kubectl-cnpg/main.go index b19d0f746f..128f6ea232 100644 --- a/cmd/kubectl-cnpg/main.go +++ b/cmd/kubectl-cnpg/main.go @@ -82,6 +82,33 @@ func main() { logFlags.AddFlags(rootCmd.PersistentFlags()) configFlags.AddFlags(rootCmd.PersistentFlags()) + adminGroup := &cobra.Group{ + ID: plugin.GroupIDAdmin, + Title: "Operator-level administration", + } + + troubleshootingGroup := &cobra.Group{ + ID: plugin.GroupIDTroubleshooting, + Title: "Troubleshooting", + } + + pgClusterGroup := &cobra.Group{ + ID: plugin.GroupIDCluster, + Title: "Cluster administration", + } + + pgDatabaseGroup := &cobra.Group{ + ID: plugin.GroupIDDatabase, + Title: "Database administration", + } + + miscGroup := &cobra.Group{ + ID: plugin.GroupIDMiscellaneous, + Title: "Miscellaneous", + } + + rootCmd.AddGroup(adminGroup, troubleshootingGroup, pgClusterGroup, pgDatabaseGroup, miscGroup) + subcommands := []*cobra.Command{ backup.NewCmd(), certificate.NewCmd(), diff --git a/internal/cmd/plugin/backup/cmd.go b/internal/cmd/plugin/backup/cmd.go index cf11010fcf..6c29147f7f 100644 --- a/internal/cmd/plugin/backup/cmd.go +++ b/internal/cmd/plugin/backup/cmd.go @@ -62,9 +62,10 @@ func NewCmd() *cobra.Command { var backupName, backupTarget, backupMethod, online, immediateCheckpoint, waitForArchive string backupSubcommand := &cobra.Command{ - Use: "backup [cluster]", - Short: "Request an on-demand backup for a PostgreSQL Cluster", - Args: plugin.RequiresArguments(1), + Use: "backup [cluster]", + Short: "Request an on-demand backup for a PostgreSQL Cluster", + GroupID: plugin.GroupIDDatabase, + Args: plugin.RequiresArguments(1), ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp }, diff --git a/internal/cmd/plugin/certificate/cmd.go b/internal/cmd/plugin/certificate/cmd.go index e1359baf06..6247777aec 100644 --- a/internal/cmd/plugin/certificate/cmd.go +++ b/internal/cmd/plugin/certificate/cmd.go @@ -32,7 +32,8 @@ func NewCmd() *cobra.Command { Long: `This command creates a new Kubernetes secret containing the crypto-material. This is needed to configure TLS with Certificate authentication access for an application to connect to the PostgreSQL cluster.`, - Args: plugin.RequiresArguments(1), + GroupID: plugin.GroupIDDatabase, + Args: plugin.RequiresArguments(1), RunE: func(cmd *cobra.Command, args []string) error { ctx := context.Background() secretName := args[0] diff --git a/internal/cmd/plugin/color.go b/internal/cmd/plugin/color.go index 913b3d3545..9d2acdc423 100644 --- a/internal/cmd/plugin/color.go +++ b/internal/cmd/plugin/color.go @@ -32,9 +32,9 @@ type colorConfiguration string const ( // colorAlways configures the output to always be colorized colorAlways colorConfiguration = "always" - // colorAuto configures the the output to be colorized only when attached to a terminal + // colorAuto configures the output to be colorized only when attached to a terminal colorAuto colorConfiguration = "auto" - // colorNever configures the the output never to be colorized + // colorNever configures the output never to be colorized colorNever colorConfiguration = "never" ) diff --git a/internal/cmd/plugin/destroy/cmd.go b/internal/cmd/plugin/destroy/cmd.go index 168e50d548..c3f4e7c944 100644 --- a/internal/cmd/plugin/destroy/cmd.go +++ b/internal/cmd/plugin/destroy/cmd.go @@ -29,9 +29,10 @@ import ( // NewCmd create the new "destroy" subcommand func NewCmd() *cobra.Command { destroyCmd := &cobra.Command{ - Use: "destroy [cluster] [node]", - Short: "Destroy the instance named [cluster]-[node] or [node] with the associated PVC", - Args: plugin.RequiresArguments(2), + Use: "destroy [cluster] [node]", + Short: "Destroy the instance named [cluster]-[node] or [node] with the associated PVC", + GroupID: plugin.GroupIDCluster, + Args: plugin.RequiresArguments(2), RunE: func(cmd *cobra.Command, args []string) error { ctx := context.Background() clusterName := args[0] diff --git a/internal/cmd/plugin/fence/cmd.go b/internal/cmd/plugin/fence/cmd.go index 16eb2b0321..8b3e719936 100644 --- a/internal/cmd/plugin/fence/cmd.go +++ b/internal/cmd/plugin/fence/cmd.go @@ -59,8 +59,9 @@ var ( // NewCmd creates the new "fencing" command func NewCmd() *cobra.Command { cmd := &cobra.Command{ - Use: "fencing", - Short: `Fencing related commands`, + Use: "fencing", + Short: `Fencing related commands`, + GroupID: plugin.GroupIDCluster, } cmd.AddCommand(fenceOnCmd) cmd.AddCommand(fenceOffCmd) diff --git a/internal/cmd/plugin/fio/cmd.go b/internal/cmd/plugin/fio/cmd.go index 7c53e97aad..50d574fcba 100644 --- a/internal/cmd/plugin/fio/cmd.go +++ b/internal/cmd/plugin/fio/cmd.go @@ -22,6 +22,8 @@ import ( "os" "github.com/spf13/cobra" + + "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" ) // NewCmd initializes the fio command @@ -35,6 +37,7 @@ func NewCmd() *cobra.Command { Args: cobra.MinimumNArgs(1), Long: `Creates a fio deployment that will execute a fio job on the specified pvc.`, Example: jobExample, + GroupID: plugin.GroupIDMiscellaneous, RunE: func(_ *cobra.Command, args []string) error { ctx := context.Background() fioArgs := args[1:] diff --git a/internal/cmd/plugin/hibernate/cmd.go b/internal/cmd/plugin/hibernate/cmd.go index c13a8028cd..44f6c32a4c 100644 --- a/internal/cmd/plugin/hibernate/cmd.go +++ b/internal/cmd/plugin/hibernate/cmd.go @@ -92,8 +92,9 @@ var ( // NewCmd initializes the hibernate command func NewCmd() *cobra.Command { cmd := &cobra.Command{ - Use: "hibernate", - Short: `Hibernation related commands`, + Use: "hibernate", + Short: `Hibernation related commands`, + GroupID: plugin.GroupIDCluster, } cmd.AddCommand(hibernateOnCmd) diff --git a/internal/cmd/plugin/install/cmd.go b/internal/cmd/plugin/install/cmd.go index 33ee9ee9db..ac1ab16996 100644 --- a/internal/cmd/plugin/install/cmd.go +++ b/internal/cmd/plugin/install/cmd.go @@ -18,13 +18,16 @@ package install import ( "github.com/spf13/cobra" + + "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" ) // NewCmd returns the installation root cmd func NewCmd() *cobra.Command { cmd := &cobra.Command{ - Use: "install", - Short: "CNPG installation commands", + Use: "install", + Short: "CloudNativePG installation-related commands", + GroupID: plugin.GroupIDAdmin, } cmd.AddCommand(newGenerateCmd()) diff --git a/internal/cmd/plugin/logical/publication/cmd.go b/internal/cmd/plugin/logical/publication/cmd.go index 0409a2b04e..8da69947da 100644 --- a/internal/cmd/plugin/logical/publication/cmd.go +++ b/internal/cmd/plugin/logical/publication/cmd.go @@ -19,6 +19,7 @@ package publication import ( "github.com/spf13/cobra" + "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/logical/publication/create" "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/logical/publication/drop" ) @@ -26,8 +27,9 @@ import ( // NewCmd initializes the publication command func NewCmd() *cobra.Command { publicationCmd := &cobra.Command{ - Use: "publication", - Short: "Logical publication management commands", + Use: "publication", + Short: "Logical publication management commands", + GroupID: plugin.GroupIDDatabase, } publicationCmd.AddCommand(create.NewCmd()) publicationCmd.AddCommand(drop.NewCmd()) diff --git a/internal/cmd/plugin/logical/subscription/cmd.go b/internal/cmd/plugin/logical/subscription/cmd.go index 7c46b96fc7..706d3ff4dc 100644 --- a/internal/cmd/plugin/logical/subscription/cmd.go +++ b/internal/cmd/plugin/logical/subscription/cmd.go @@ -19,6 +19,7 @@ package subscription import ( "github.com/spf13/cobra" + "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/logical/subscription/create" "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/logical/subscription/drop" "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/logical/subscription/syncsequences" @@ -27,8 +28,9 @@ import ( // NewCmd initializes the subscription command func NewCmd() *cobra.Command { subscriptionCmd := &cobra.Command{ - Use: "subscription", - Short: "Logical subscription management commands", + Use: "subscription", + Short: "Logical subscription management commands", + GroupID: plugin.GroupIDDatabase, } subscriptionCmd.AddCommand(create.NewCmd()) subscriptionCmd.AddCommand(drop.NewCmd()) diff --git a/internal/cmd/plugin/logs/cmd.go b/internal/cmd/plugin/logs/cmd.go index 155cc313bb..dc9499935a 100644 --- a/internal/cmd/plugin/logs/cmd.go +++ b/internal/cmd/plugin/logs/cmd.go @@ -18,13 +18,16 @@ package logs import ( "github.com/spf13/cobra" + + "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" ) // NewCmd creates the new "report" command func NewCmd() *cobra.Command { logsCmd := &cobra.Command{ - Use: "logs cluster", - Short: "Collect cluster logs", + Use: "logs cluster", + Short: "Collect cluster logs", + GroupID: plugin.GroupIDTroubleshooting, } logsCmd.AddCommand(clusterCmd()) diff --git a/internal/cmd/plugin/maintenance/cmd.go b/internal/cmd/plugin/maintenance/cmd.go index e0e215fbfc..39064884d6 100644 --- a/internal/cmd/plugin/maintenance/cmd.go +++ b/internal/cmd/plugin/maintenance/cmd.go @@ -31,8 +31,9 @@ func NewCmd() *cobra.Command { confirmationRequired bool maintenanceCmd := &cobra.Command{ - Use: "maintenance [set/unset]", - Short: "Sets or removes maintenance mode from clusters", + Use: "maintenance [set/unset]", + Short: "Sets or removes maintenance mode from clusters", + GroupID: plugin.GroupIDCluster, } maintenanceCmd.AddCommand(&cobra.Command{ diff --git a/internal/cmd/plugin/pgadmin/cmd.go b/internal/cmd/plugin/pgadmin/cmd.go index 920d56203d..eb357c7a11 100644 --- a/internal/cmd/plugin/pgadmin/cmd.go +++ b/internal/cmd/plugin/pgadmin/cmd.go @@ -76,9 +76,10 @@ func NewCmd() *cobra.Command { pgadminCmd := &cobra.Command{ Use: "pgadmin4 [name]", - Short: "Creates a pgadmin deployment", + Short: "Creates a pgAdmin deployment", Args: cobra.MinimumNArgs(1), - Long: `Creates a pgadmin deployment configured to work with a CNPG Cluster.`, + Long: `Creates a pgAdmin deployment configured to work with a CNPG Cluster.`, + GroupID: plugin.GroupIDMiscellaneous, Example: pgadminExample, RunE: func(_ *cobra.Command, args []string) error { ctx := context.Background() diff --git a/internal/cmd/plugin/pgbench/cmd.go b/internal/cmd/plugin/pgbench/cmd.go index 3c601d48ba..9d10449d98 100644 --- a/internal/cmd/plugin/pgbench/cmd.go +++ b/internal/cmd/plugin/pgbench/cmd.go @@ -20,6 +20,8 @@ import ( "fmt" "github.com/spf13/cobra" + + "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" ) // NewCmd initializes the pgBench command @@ -31,6 +33,7 @@ func NewCmd() *cobra.Command { Short: "Creates a pgbench job", Args: validateCommandArgs, Long: "Creates a pgbench job to run against the specified Postgres Cluster.", + GroupID: plugin.GroupIDMiscellaneous, Example: jobExample, RunE: func(cmd *cobra.Command, args []string) error { run.clusterName = args[0] diff --git a/internal/cmd/plugin/plugin.go b/internal/cmd/plugin/plugin.go index ba593533d7..afaa602e1a 100644 --- a/internal/cmd/plugin/plugin.go +++ b/internal/cmd/plugin/plugin.go @@ -56,6 +56,24 @@ var ( ClientInterface kubernetes.Interface ) +const ( + // GroupIDAdmin represents an ID to group up CNPG commands + GroupIDAdmin = "admin" + + // GroupIDTroubleshooting represent an ID to group up troubleshooting + // commands + GroupIDTroubleshooting = "troubleshooting" + + // GroupIDCluster represents an ID to group up Postgres Cluster commands + GroupIDCluster = "cluster" + + // GroupIDDatabase represents an ID to group up Postgres Database commands + GroupIDDatabase = "db" + + // GroupIDMiscellaneous represents an ID to group up miscellaneous commands + GroupIDMiscellaneous = "misc" +) + // SetupKubernetesClient creates a k8s client to be used inside the kubectl-cnpg // utility func SetupKubernetesClient(configFlags *genericclioptions.ConfigFlags) error { diff --git a/internal/cmd/plugin/promote/cmd.go b/internal/cmd/plugin/promote/cmd.go index b2bd7e134d..f4f3c95d88 100644 --- a/internal/cmd/plugin/promote/cmd.go +++ b/internal/cmd/plugin/promote/cmd.go @@ -29,9 +29,10 @@ import ( // NewCmd create the new "promote" subcommand func NewCmd() *cobra.Command { promoteCmd := &cobra.Command{ - Use: "promote [cluster] [node]", - Short: "Promote the pod named [cluster]-[node] or [node] to primary", - Args: plugin.RequiresArguments(2), + Use: "promote [cluster] [node]", + Short: "Promote the pod named [cluster]-[node] or [node] to primary", + GroupID: plugin.GroupIDCluster, + Args: plugin.RequiresArguments(2), RunE: func(_ *cobra.Command, args []string) error { ctx := context.Background() clusterName := args[0] diff --git a/internal/cmd/plugin/psql/cmd.go b/internal/cmd/plugin/psql/cmd.go index 3910021464..85e4df4a96 100644 --- a/internal/cmd/plugin/psql/cmd.go +++ b/internal/cmd/plugin/psql/cmd.go @@ -37,7 +37,8 @@ func NewCmd() *cobra.Command { ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp }, - Long: "This command will start an interactive psql session inside a PostgreSQL Pod created by CloudNativePG.", + Long: "This command will start an interactive psql session inside a PostgreSQL Pod created by CloudNativePG.", + GroupID: plugin.GroupIDMiscellaneous, RunE: func(cmd *cobra.Command, args []string) error { clusterName := args[0] psqlArgs := args[1:] diff --git a/internal/cmd/plugin/reload/cmd.go b/internal/cmd/plugin/reload/cmd.go index 1f9a6bb1ab..f31aa3ddef 100644 --- a/internal/cmd/plugin/reload/cmd.go +++ b/internal/cmd/plugin/reload/cmd.go @@ -27,10 +27,11 @@ import ( // NewCmd creates the new "reset" command func NewCmd() *cobra.Command { restartCmd := &cobra.Command{ - Use: "reload [clusterName]", - Short: `Reload the cluster`, - Long: `Triggers a reconciliation loop for all the cluster's instances, rolling out new configurations if present.`, - Args: plugin.RequiresArguments(1), + Use: "reload [clusterName]", + Short: `Reload a cluster`, + Long: `Triggers a reconciliation loop for all the cluster's instances, rolling out new configurations if present.`, + GroupID: plugin.GroupIDCluster, + Args: plugin.RequiresArguments(1), ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp }, diff --git a/internal/cmd/plugin/report/cmd.go b/internal/cmd/plugin/report/cmd.go index c56c7d6417..0b746264ba 100644 --- a/internal/cmd/plugin/report/cmd.go +++ b/internal/cmd/plugin/report/cmd.go @@ -18,13 +18,16 @@ package report import ( "github.com/spf13/cobra" + + "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" ) // NewCmd creates the new "report" command func NewCmd() *cobra.Command { reportCmd := &cobra.Command{ - Use: "report operator/cluster", - Short: "Report on the operator", + Use: "report operator/cluster", + Short: "Report on the operator or a cluster for troubleshooting", + GroupID: plugin.GroupIDTroubleshooting, } reportCmd.AddCommand(operatorCmd()) diff --git a/internal/cmd/plugin/restart/cmd.go b/internal/cmd/plugin/restart/cmd.go index 2b686f062c..c27b66a989 100644 --- a/internal/cmd/plugin/restart/cmd.go +++ b/internal/cmd/plugin/restart/cmd.go @@ -21,6 +21,8 @@ import ( "strconv" "github.com/spf13/cobra" + + "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" ) // NewCmd creates the new "reset" command @@ -32,7 +34,8 @@ func NewCmd() *cobra.Command { rolling out new configurations if present. If a specific instance is specified, only that instance will be restarted, in-place if it is a primary, deleting the pod if it is a replica.`, - Args: cobra.RangeArgs(1, 2), + Args: cobra.RangeArgs(1, 2), + GroupID: plugin.GroupIDCluster, RunE: func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() clusterName := args[0] diff --git a/internal/cmd/plugin/snapshot/cmd.go b/internal/cmd/plugin/snapshot/cmd.go index 8abd4a0752..ed20dc669f 100644 --- a/internal/cmd/plugin/snapshot/cmd.go +++ b/internal/cmd/plugin/snapshot/cmd.go @@ -28,9 +28,10 @@ import ( // NewCmd implements the `snapshot` subcommand func NewCmd() *cobra.Command { cmd := &cobra.Command{ - Use: "snapshot ", - Short: "command removed", - Long: "Replaced by `kubectl cnpg backup -m volumeSnapshot`", + Use: "snapshot ", + Short: "DEPRECATED (use `backup -m volumeSnapshot` instead)", + Long: "Replaced by `kubectl cnpg backup -m volumeSnapshot`", + GroupID: plugin.GroupIDDatabase, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp }, diff --git a/internal/cmd/plugin/status/cmd.go b/internal/cmd/plugin/status/cmd.go index a22594523d..4ddbc5db02 100644 --- a/internal/cmd/plugin/status/cmd.go +++ b/internal/cmd/plugin/status/cmd.go @@ -28,9 +28,10 @@ import ( // NewCmd create the new "status" subcommand func NewCmd() *cobra.Command { statusCmd := &cobra.Command{ - Use: "status [cluster]", - Short: "Get the status of a PostgreSQL cluster", - Args: plugin.RequiresArguments(1), + Use: "status [cluster]", + Short: "Get the status of a PostgreSQL cluster", + Args: plugin.RequiresArguments(1), + GroupID: plugin.GroupIDDatabase, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if strings.HasPrefix(toComplete, "-") { fmt.Printf("%+v\n", toComplete) From 8ba09c7c6fd286237e204d5f13c28e2654db182d Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Tue, 15 Oct 2024 17:06:26 +0200 Subject: [PATCH 071/836] chore: avoid uselessly patching the ContinuousArchivingSuccess condition (#5833) The `archive_command` callback is keeping updated the `ContinuousArchivingSuccess` condition using the result of `barman-cloud-wal-archive` or the corresponding CNPG-i plugin. This change makes the instance manager deterministically hit the API server when the condition changes instead of using `reflect.DeepEqual`, avoiding false positives. See also: #5366 Signed-off-by: Leonardo Cecchi --- pkg/conditions/conditions.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/pkg/conditions/conditions.go b/pkg/conditions/conditions.go index 80b6d5af7d..768ac02d49 100644 --- a/pkg/conditions/conditions.go +++ b/pkg/conditions/conditions.go @@ -18,7 +18,6 @@ package conditions import ( "context" - "reflect" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -37,10 +36,9 @@ func Patch( if cluster == nil || condition == nil { return nil } - existingCluster := cluster.DeepCopy() - meta.SetStatusCondition(&cluster.Status.Conditions, *condition) - if !reflect.DeepEqual(existingCluster.Status.Conditions, cluster.Status.Conditions) { + existingCluster := cluster.DeepCopy() + if changed := meta.SetStatusCondition(&cluster.Status.Conditions, *condition); changed { // To avoid conflict using patch instead of update if err := c.Status().Patch(ctx, cluster, client.MergeFrom(existingCluster)); err != nil { return err From 756fbcb6cfe4713282ac04f8eb924e5aa695e1dd Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Tue, 15 Oct 2024 17:19:28 +0200 Subject: [PATCH 072/836] chore: create renovate groups for cnpg dependencies (#5802) To streamline the update process for all dependencies at once, we create a single group that includes all dependencies such as `machinery`, `barman-cloud`, and `cnpg-i`. Signed-off-by: Jonathan Gonzalez V. --- .github/renovate.json5 | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/.github/renovate.json5 b/.github/renovate.json5 index d03e3a76ae..1c5fe0970b 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -361,7 +361,8 @@ "excludePackagePrefixes": [ "k8s.io", "sigs.k8s.io", - "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring" + "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring", + "github.com/cloudnative-pg/" ], "matchUpdateTypes": [ "minor", @@ -424,6 +425,15 @@ ], "separateMajorMinor": "false", "pinDigests": false, - } + }, + { +// PR group for CNPG dependencies + "groupName": "cnpg", + "matchPackageNames": [ + "github.com/cloudnative-pg/", + ], + "separateMajorMinor": "false", + "pinDigests": false, + }, ] } From 8c419fa43f8f84ac926c2f1db5ba4d84c030c536 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 15 Oct 2024 17:41:04 +0200 Subject: [PATCH 073/836] chore(deps): update agilepathway/pull-request-label-checker docker tag to v1.6.56 (main) (#5842) --- .github/workflows/require-labels.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/require-labels.yml b/.github/workflows/require-labels.yml index edebf5b995..e0da0f8728 100644 --- a/.github/workflows/require-labels.yml +++ b/.github/workflows/require-labels.yml @@ -19,7 +19,7 @@ jobs: runs-on: ubuntu-24.04 steps: - name: Require labels - uses: docker://agilepathway/pull-request-label-checker:v1.6.55 + uses: docker://agilepathway/pull-request-label-checker:v1.6.56 with: any_of: "ok to merge :ok_hand:" none_of: "do not merge" From 2b389270b3f5958cc8b5adbf9bcbf277e8e780cd Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 15 Oct 2024 19:11:12 +0200 Subject: [PATCH 074/836] chore(deps): update github.com/cloudnative-pg/machinery digest to c27747f (main) (#5839) --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index c6b261104a..b4bafc29f9 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/cheynewallace/tabby v1.1.1 github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a github.com/cloudnative-pg/cnpg-i v0.0.0-20241001103001-7e24b2eccd50 - github.com/cloudnative-pg/machinery v0.0.0-20241010122207-5ac7af31ef72 + github.com/cloudnative-pg/machinery v0.0.0-20241014090714-c27747f9974b github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/evanphx/json-patch/v5 v5.9.0 github.com/go-logr/logr v1.4.2 @@ -108,7 +108,7 @@ require ( golang.org/x/net v0.29.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.25.0 // indirect + golang.org/x/sys v0.26.0 // indirect golang.org/x/text v0.18.0 // indirect golang.org/x/time v0.6.0 // indirect golang.org/x/tools v0.25.0 // indirect diff --git a/go.sum b/go.sum index dcf0977370..7ee1b22611 100644 --- a/go.sum +++ b/go.sum @@ -22,8 +22,8 @@ github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a h1:0v1 github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a/go.mod h1:Jm0tOp5oB7utpt8wz6RfSv31h1mThOtffjfyxVupriE= github.com/cloudnative-pg/cnpg-i v0.0.0-20241001103001-7e24b2eccd50 h1:Rm/bbC0GNCuWth5fHVMos99RzNczbWRVBdjubh3JMPs= github.com/cloudnative-pg/cnpg-i v0.0.0-20241001103001-7e24b2eccd50/go.mod h1:lTWPq8pluS0PSnRMwt0zShftbyssoRhTJ5zAip8unl8= -github.com/cloudnative-pg/machinery v0.0.0-20241010122207-5ac7af31ef72 h1:3pgtSYhv3RDd+51bnlqICNrcVpWQQvriCOvkxtbZpaE= -github.com/cloudnative-pg/machinery v0.0.0-20241010122207-5ac7af31ef72/go.mod h1:bWp1Es5zlxElg4Z/c5f0RKOkDcyNvDHdYIvNcPQU4WM= +github.com/cloudnative-pg/machinery v0.0.0-20241014090714-c27747f9974b h1:4Q2VQsPlLHliJdi87zodQ0FHLd1cJINMm4N70eu8rRg= +github.com/cloudnative-pg/machinery v0.0.0-20241014090714-c27747f9974b/go.mod h1:+mUFdys1IX+qwQUrV+/i56Tey/mYh8ZzWZYttwivRns= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= @@ -236,8 +236,8 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= -golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= From 4f272b415b63f06e781076631d937019665e73a7 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Tue, 15 Oct 2024 19:15:42 +0200 Subject: [PATCH 075/836] fix(plugin): collect logs from sidecars with `logs cluster` (#5826) `kubectl cnpg logs cluster` failed if an instance Pod had a sidecar. This patch allows it to continue working and getting the sidecar's log too. Closes: #5825 # Release notes ``` `kubectl cnpg logs cluster` does not error out if a PG Pod has a sidecar and collects also its logs ``` Signed-off-by: Leonardo Cecchi Signed-off-by: Armando Ruocco Co-authored-by: Armando Ruocco --- pkg/utils/logs/cluster_logs.go | 35 ++++++++++++----- pkg/utils/logs/cluster_logs_test.go | 58 +++++++++++++++++++++++++++++ 2 files changed, 83 insertions(+), 10 deletions(-) diff --git a/pkg/utils/logs/cluster_logs.go b/pkg/utils/logs/cluster_logs.go index 30c42f6e9b..c0afc0a314 100644 --- a/pkg/utils/logs/cluster_logs.go +++ b/pkg/utils/logs/cluster_logs.go @@ -19,6 +19,7 @@ package logs import ( "bufio" "context" + "fmt" "io" "log" "sync" @@ -61,9 +62,11 @@ func (csr *ClusterStreamingRequest) getClusterNamespace() string { return csr.Cluster.Namespace } -func (csr *ClusterStreamingRequest) getLogOptions() *v1.PodLogOptions { +func (csr *ClusterStreamingRequest) getLogOptions(containerName string) *v1.PodLogOptions { if csr.Options == nil { - csr.Options = &v1.PodLogOptions{} + csr.Options = &v1.PodLogOptions{ + Container: containerName, + } } csr.Options.Previous = csr.Previous return csr.Options @@ -187,13 +190,24 @@ func (csr *ClusterStreamingRequest) SingleStream(ctx context.Context, writer io. } for _, pod := range podList.Items { - if streamSet.has(pod.Name) { - continue + for _, container := range pod.Status.ContainerStatuses { + if container.State.Running != nil { + streamName := fmt.Sprintf("%s-%s", pod.Name, container.Name) + if streamSet.has(streamName) { + continue + } + + streamSet.add(streamName) + go csr.streamInGoroutine( + ctx, + pod.Name, + container.Name, + client, + streamSet, + safeWriterFrom(writer), + ) + } } - - streamSet.add(pod.Name) - go csr.streamInGoroutine(ctx, pod.Name, client, streamSet, - safeWriterFrom(writer)) } if streamSet.isZero() { return nil @@ -211,18 +225,19 @@ func (csr *ClusterStreamingRequest) SingleStream(ctx context.Context, writer io. func (csr *ClusterStreamingRequest) streamInGoroutine( ctx context.Context, podName string, + containerName string, client kubernetes.Interface, streamSet *activeSet, output io.Writer, ) { defer func() { - streamSet.drop(podName) + streamSet.drop(fmt.Sprintf("%s-%s", podName, containerName)) }() pods := client.CoreV1().Pods(csr.getClusterNamespace()) logsRequest := pods.GetLogs( podName, - csr.getLogOptions()) + csr.getLogOptions(containerName)) logStream, err := logsRequest.Stream(ctx) if err != nil { diff --git a/pkg/utils/logs/cluster_logs_test.go b/pkg/utils/logs/cluster_logs_test.go index ed057c71b3..4fa7b6f6bc 100644 --- a/pkg/utils/logs/cluster_logs_test.go +++ b/pkg/utils/logs/cluster_logs_test.go @@ -50,6 +50,41 @@ var _ = Describe("Cluster logging tests", func() { utils.ClusterLabelName: clusterName, }, }, + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + { + Name: "postgresql", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + }, + }, + } + podWithSidecars := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: clusterNamespace, + Name: clusterName + "-1", + Labels: map[string]string{ + utils.ClusterLabelName: clusterName, + }, + }, + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + { + Name: "postgresql", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + { + Name: "sidecar", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + }, + }, } It("should exit on ended pod logs with the non-follow option", func(ctx context.Context) { client := fake.NewSimpleClientset(pod) @@ -74,6 +109,29 @@ var _ = Describe("Cluster logging tests", func() { Expect(logBuffer.String()).To(BeEquivalentTo("fake logs\n")) }) + It("should catch the logs of the sidecar too", func(ctx context.Context) { + client := fake.NewClientset(podWithSidecars) + var logBuffer bytes.Buffer + var wait sync.WaitGroup + wait.Add(1) + go func() { + defer GinkgoRecover() + defer wait.Done() + streamClusterLogs := ClusterStreamingRequest{ + Cluster: cluster, + Options: &v1.PodLogOptions{ + Follow: false, + }, + Client: client, + } + err := streamClusterLogs.SingleStream(ctx, &logBuffer) + Expect(err).NotTo(HaveOccurred()) + }() + ctx.Done() + wait.Wait() + Expect(logBuffer.String()).To(BeEquivalentTo("fake logs\nfake logs\n")) + }) + It("should catch extra logs if given the follow option", func(ctx context.Context) { client := fake.NewSimpleClientset(pod) var logBuffer bytes.Buffer From 2176e771320939137d23c5359f733f6596c17845 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Tue, 15 Oct 2024 19:42:42 +0200 Subject: [PATCH 076/836] feat(plugin): add `logs pretty` command (#5779) This commit introduces the `logs pretty` command to the `cnpg` plugin. The command reads a log stream from standard input and outputs a human-readable format to standard output. Key features: - Allows filtering logs by specific pods or loggers. - Supports excluding logs below a specified severity level. This enhancement provides users with better log readability and more granular control over the logs they wish to view. Closes: #5770 # Release notes ``` Add the `logs pretty` command to the `cnpg` plugin to read a log stream from standard input and output a human-readable format, with options to filter log entries. ``` Signed-off-by: Leonardo Cecchi Signed-off-by: Gabriele Bartolini Signed-off-by: Jaime Silvela Co-authored-by: Gabriele Bartolini Co-authored-by: Jaime Silvela --- docs/src/kubectl-plugin.md | 72 +++++ go.mod | 2 +- internal/cmd/plugin/logs/cmd.go | 8 +- internal/cmd/plugin/logs/cmd_test.go | 4 +- internal/cmd/plugin/logs/pretty/doc.go | 18 ++ internal/cmd/plugin/logs/pretty/log_level.go | 83 ++++++ internal/cmd/plugin/logs/pretty/log_record.go | 170 ++++++++++++ internal/cmd/plugin/logs/pretty/pretty.go | 251 ++++++++++++++++++ 8 files changed, 602 insertions(+), 6 deletions(-) create mode 100644 internal/cmd/plugin/logs/pretty/doc.go create mode 100644 internal/cmd/plugin/logs/pretty/log_level.go create mode 100644 internal/cmd/plugin/logs/pretty/log_record.go create mode 100644 internal/cmd/plugin/logs/pretty/pretty.go diff --git a/docs/src/kubectl-plugin.md b/docs/src/kubectl-plugin.md index 1a2da0c607..a9564d545d 100755 --- a/docs/src/kubectl-plugin.md +++ b/docs/src/kubectl-plugin.md @@ -175,6 +175,7 @@ kubectl cnpg The plugin automatically detects if the standard output channel is connected to a terminal. In such cases, it may add ANSI colors to the command output. To disable colors, use the `--color=never` option with the command. + ### Generation of installation manifests The `cnpg` plugin can be used to generate the YAML manifest for the @@ -780,6 +781,77 @@ kubectl cnpg logs cluster cluster-example --output my-cluster.log Successfully written logs to "my-cluster.log" ``` +#### Pretty + +The `pretty` sub-command reads a log stream from standard input, formats it +into a human-readable output, and attempts to sort the entries by timestamp. + +It can be used in combination with `kubectl cnpg logs cluster`, as +shown in the following example: + +``` sh +$ kubectl cnpg logs cluster cluster-example | kubectl cnpg logs pretty +2024-10-15T17:35:00.336 INFO cluster-example-1 instance-manager Starting CloudNativePG Instance Manager +2024-10-15T17:35:00.336 INFO cluster-example-1 instance-manager Checking for free disk space for WALs before starting PostgreSQL +2024-10-15T17:35:00.347 INFO cluster-example-1 instance-manager starting tablespace manager +2024-10-15T17:35:00.347 INFO cluster-example-1 instance-manager starting external server manager +[...] +``` + +Alternatively, it can be used in combination with other commands that produce +CNPG logs in JSON format, such as `stern`, or `kubectl logs`, as in the +following example: + +``` sh +$ kubectl logs cluster-example-1 | kubectl cnpg logs pretty +2024-10-15T17:35:00.336 INFO cluster-example-1 instance-manager Starting CloudNativePG Instance Manager +2024-10-15T17:35:00.336 INFO cluster-example-1 instance-manager Checking for free disk space for WALs before starting PostgreSQL +2024-10-15T17:35:00.347 INFO cluster-example-1 instance-manager starting tablespace manager +2024-10-15T17:35:00.347 INFO cluster-example-1 instance-manager starting external server manager +[...] +``` + +The `pretty` sub-command also supports advanced log filtering, allowing users +to display logs for specific pods or loggers, or to filter logs by severity +level. +Here's an example: + +``` sh +$ kubectl cnpg logs cluster cluster-example | kubectl cnpg logs pretty --pods cluster-example-1 --loggers postgres --log-level info +2024-10-15T17:35:00.509 INFO cluster-example-1 postgres 2024-10-15 17:35:00.509 UTC [29] LOG: redirecting log output to logging collector process +2024-10-15T17:35:00.509 INFO cluster-example-1 postgres 2024-10-15 17:35:00.509 UTC [29] HINT: Future log output will appear in directory "/controller/log"... +2024-10-15T17:35:00.510 INFO cluster-example-1 postgres 2024-10-15 17:35:00.509 UTC [29] LOG: ending log output to stderr +2024-10-15T17:35:00.510 INFO cluster-example-1 postgres ending log output to stderr +[...] +``` + +The `pretty` sub-command will try to sort the log stream, +to make logs easier to reason about. In order to achieve this, it gathers the +logs into groups, and within groups it sorts by timestamp. This is the only +way to sort interactively, as `pretty` may be piped from a command in "follow" +mode. The sub-command will add a group separator line, `---`, at the end of +each sorted group. The size of the grouping can be configured via the +`--sorting-group-size` flag (default: 1000), as illustrated in the following example: + +``` sh +$ kubectl cnpg logs cluster cluster-example | kubectl cnpg logs pretty --sorting-group-size=3 +2024-10-15T17:35:20.426 INFO cluster-example-2 instance-manager Starting CloudNativePG Instance Manager +2024-10-15T17:35:20.426 INFO cluster-example-2 instance-manager Checking for free disk space for WALs before starting PostgreSQL +2024-10-15T17:35:20.438 INFO cluster-example-2 instance-manager starting tablespace manager +--- +2024-10-15T17:35:20.438 INFO cluster-example-2 instance-manager starting external server manager +2024-10-15T17:35:20.438 INFO cluster-example-2 instance-manager starting controller-runtime manager +2024-10-15T17:35:20.439 INFO cluster-example-2 instance-manager Starting EventSource +--- +[...] +``` + +To explore all available options, use the `-h` flag for detailed explanations +of the supported flags and their usage. + +!!! Info + You can also increase the verbosity of the log by adding more `-v` options. + ### Destroy The `kubectl cnpg destroy` command helps remove an instance and all the diff --git a/go.mod b/go.mod index b4bafc29f9..8b083ab9e9 100644 --- a/go.mod +++ b/go.mod @@ -36,6 +36,7 @@ require ( github.com/thoas/go-funk v0.9.3 go.uber.org/atomic v1.11.0 go.uber.org/multierr v1.11.0 + go.uber.org/zap v1.27.0 golang.org/x/term v0.24.0 google.golang.org/grpc v1.67.1 gopkg.in/yaml.v3 v3.0.1 @@ -102,7 +103,6 @@ require ( github.com/x448/float16 v0.8.4 // indirect github.com/xlab/treeprint v1.2.0 // indirect go.starlark.net v0.0.0-20240411212711-9b43f0afd521 // indirect - go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.27.0 // indirect golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect golang.org/x/net v0.29.0 // indirect diff --git a/internal/cmd/plugin/logs/cmd.go b/internal/cmd/plugin/logs/cmd.go index dc9499935a..c898ac085b 100644 --- a/internal/cmd/plugin/logs/cmd.go +++ b/internal/cmd/plugin/logs/cmd.go @@ -20,17 +20,19 @@ import ( "github.com/spf13/cobra" "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" + "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/logs/pretty" ) -// NewCmd creates the new "report" command +// NewCmd creates the new "logs" command func NewCmd() *cobra.Command { logsCmd := &cobra.Command{ - Use: "logs cluster", - Short: "Collect cluster logs", + Use: "logs", + Short: "Logging utilities", GroupID: plugin.GroupIDTroubleshooting, } logsCmd.AddCommand(clusterCmd()) + logsCmd.AddCommand(pretty.NewCmd()) return logsCmd } diff --git a/internal/cmd/plugin/logs/cmd_test.go b/internal/cmd/plugin/logs/cmd_test.go index 24fbdecd04..2ac6dc75f2 100644 --- a/internal/cmd/plugin/logs/cmd_test.go +++ b/internal/cmd/plugin/logs/cmd_test.go @@ -24,7 +24,7 @@ import ( var _ = Describe("Get the proper command", func() { It("get the proper command", func() { logsCmd := NewCmd() - Expect(logsCmd.Use).To(BeEquivalentTo("logs cluster")) - Expect(logsCmd.Short).To(BeEquivalentTo("Collect cluster logs")) + Expect(logsCmd.Use).To(BeEquivalentTo("logs")) + Expect(logsCmd.Short).To(BeEquivalentTo("Logging utilities")) }) }) diff --git a/internal/cmd/plugin/logs/pretty/doc.go b/internal/cmd/plugin/logs/pretty/doc.go new file mode 100644 index 0000000000..31c5069913 --- /dev/null +++ b/internal/cmd/plugin/logs/pretty/doc.go @@ -0,0 +1,18 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package pretty contains the implementation of `kubectl cnpg logs pretty` +package pretty diff --git a/internal/cmd/plugin/logs/pretty/log_level.go b/internal/cmd/plugin/logs/pretty/log_level.go new file mode 100644 index 0000000000..5fe3b6711b --- /dev/null +++ b/internal/cmd/plugin/logs/pretty/log_level.go @@ -0,0 +1,83 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pretty + +import ( + "errors" + + "github.com/cloudnative-pg/machinery/pkg/log" + "go.uber.org/zap/zapcore" +) + +// ErrUnknownLogLevel is returned when an unknown string representation +// of a log level is used +var ErrUnknownLogLevel = errors.New("unknown log level") + +// LogLevel represents a log level such as error, warning, info, debug, or trace. +type LogLevel string + +// Less returns true when the received event is less than +// the passed one +func (l LogLevel) Less(o LogLevel) bool { + return l.toInt() < o.toInt() +} + +// String is the string representation of this level +func (l LogLevel) String() string { + return string(l) +} + +// Type is the data type to be used for this type +// when used as a flag +func (l LogLevel) Type() string { + return "string" +} + +// Set sets a log level given its string representation +func (l *LogLevel) Set(val string) error { + switch val { + case log.ErrorLevelString, log.WarningLevelString, log.InfoLevelString, log.DebugLevelString, log.TraceLevelString: + *l = LogLevel(val) + return nil + + default: + return ErrUnknownLogLevel + } +} + +// toInt returns the corresponding zapcore level +func (l LogLevel) toInt() zapcore.Level { + switch l { + case log.ErrorLevelString: + return log.ErrorLevel + + case log.WarningLevelString: + return log.WarningLevel + + case log.InfoLevelString: + return log.InfoLevel + + case log.DebugLevelString: + return log.DebugLevel + + case log.TraceLevelString: + return log.TraceLevel + + default: + return log.ErrorLevel + } +} diff --git a/internal/cmd/plugin/logs/pretty/log_record.go b/internal/cmd/plugin/logs/pretty/log_record.go new file mode 100644 index 0000000000..bc784e4de3 --- /dev/null +++ b/internal/cmd/plugin/logs/pretty/log_record.go @@ -0,0 +1,170 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pretty + +import ( + "encoding/json" + "fmt" + "hash/fnv" + "io" + "strings" + + "github.com/logrusorgru/aurora/v4" +) + +// colorizers is a list of functions that can be used to decorate +// pod names +var colorizers = []func(any) aurora.Value{ + aurora.Red, + aurora.Green, + aurora.Magenta, + aurora.Cyan, + aurora.Yellow, +} + +// logRecord is the portion of the structure of a CNPG log +// that is handled by the beautifier +type logRecord struct { + Level LogLevel `json:"level"` + Msg string `json:"msg"` + Logger string `json:"logger"` + TS string `json:"ts"` + LoggingPod string `json:"logging_pod"` + Record struct { + ErrorSeverity string `json:"error_severity"` + Message string `json:"message"` + } `json:"record,omitempty"` + + AdditionalFields map[string]any +} + +func newLogRecordFromBytes(bytes []byte) (*logRecord, error) { + var record logRecord + + if err := json.Unmarshal(bytes, &record); err != nil { + return nil, fmt.Errorf("decoding log record: %w", err) + } + + extraFields := make(map[string]any) + if err := json.Unmarshal(bytes, &extraFields); err != nil { + return nil, fmt.Errorf("decoding extra fields: %w", err) + } + + delete(extraFields, "level") + delete(extraFields, "pipe") + delete(extraFields, "msg") + delete(extraFields, "logger") + delete(extraFields, "ts") + delete(extraFields, "logging_pod") + delete(extraFields, "record") + delete(extraFields, "controllerGroup") + delete(extraFields, "controllerKind") + delete(extraFields, "Cluster") + + record.AdditionalFields = extraFields + return &record, nil +} + +// normalize converts the error_severity into one of the acceptable +// LogLevel values +func (record *logRecord) normalize() { + message := record.Msg + level := string(record.Level) + + if record.Msg == "record" { + switch record.Record.ErrorSeverity { + case "DEBUG1", "DEBUG2", "DEBUG3", "DEBUG4", "DEBUG5": + level = "trace" + + case "INFO", "NOTICE", "LOG": + level = "info" + + case "WARNING": + level = "warning" + + case "ERROR", "FATAL", "PANIC": + level = "error" + + default: + level = "info" + } + + message = record.Record.Message + } + + record.Msg = message + record.Level = LogLevel(level) +} + +// print dumps the formatted record to the specified writer +func (record *logRecord) print(writer io.Writer, verbosity int) error { + const jsonPrefix = " " + const jsonIndent = " " + const maxRowLen = 100 + + message := record.Msg + level := string(record.Level) + + if record.Msg == "record" { + level = record.Record.ErrorSeverity + message = record.Record.Message + } + + additionalFields := "" + if len(record.AdditionalFields) > 0 { + v, _ := json.MarshalIndent(record.AdditionalFields, jsonPrefix, jsonIndent) + additionalFields = string(v) + } + + hasher := fnv.New32a() + _, _ = hasher.Write([]byte(record.LoggingPod)) + colorIdx := int(hasher.Sum32()) % len(colorizers) + + ts := record.TS + if verbosity == 0 && len(ts) > 23 { + ts = record.TS[:23] + } + if verbosity > 0 { + ts = fmt.Sprintf("%-30s", ts) + } + + if verbosity == 0 { + firstLine, suffix, _ := strings.Cut(message, "\n") + if len(firstLine) > maxRowLen || len(suffix) > 0 { + if len(firstLine) > maxRowLen { + firstLine = firstLine[:maxRowLen] + } + firstLine += "..." + } + message = firstLine + } + + _, err := fmt.Fprintln( + writer, + ts, + fmt.Sprintf("%-8s", aurora.Blue(strings.ToUpper(level))), + colorizers[colorIdx](record.LoggingPod), + fmt.Sprintf("%-16s", aurora.Blue(record.Logger)), + message) + if len(additionalFields) > 0 && verbosity > 1 { + _, err = fmt.Fprintln( + writer, + jsonPrefix+additionalFields, + ) + } + return err +} diff --git a/internal/cmd/plugin/logs/pretty/pretty.go b/internal/cmd/plugin/logs/pretty/pretty.go new file mode 100644 index 0000000000..8950a016db --- /dev/null +++ b/internal/cmd/plugin/logs/pretty/pretty.go @@ -0,0 +1,251 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pretty + +import ( + "bufio" + "context" + "fmt" + "io" + "os" + "slices" + "sync" + "time" + + "github.com/cloudnative-pg/machinery/pkg/stringset" + "github.com/logrusorgru/aurora/v4" + "github.com/spf13/cobra" + + "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" +) + +type prettyCmd struct { + loggers *stringset.Data + pods *stringset.Data + groupSize int + verbosity int + minLevel LogLevel +} + +// NewCmd creates a new `kubectl cnpg logs pretty` command +func NewCmd() *cobra.Command { + var loggers, pods []string + var sortingGroupSize, verbosity int + bf := prettyCmd{} + + cmd := &cobra.Command{ + Use: "pretty", + Short: "Prettify CNPG logs", + Long: "Reads CNPG logs from standard input and pretty-prints them for human consumption", + ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp + }, + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, _ []string) error { + bf.loggers = stringset.From(loggers) + bf.pods = stringset.From(pods) + bf.groupSize = sortingGroupSize + bf.verbosity = verbosity + + recordChannel := make(chan logRecord) + recordGroupsChannel := make(chan []logRecord) + + var wait sync.WaitGroup + + wait.Add(1) + go func() { + bf.decode(cmd.Context(), os.Stdin, recordChannel) + wait.Done() + }() + + wait.Add(1) + go func() { + bf.group(cmd.Context(), recordChannel, recordGroupsChannel) + wait.Done() + }() + + wait.Add(1) + go func() { + bf.write(cmd.Context(), recordGroupsChannel, os.Stdout) + wait.Done() + }() + + wait.Wait() + return nil + }, + } + + cmd.Flags().IntVar(&sortingGroupSize, "sorting-group-size", 1000, + "The maximum size of the window where logs are collected for sorting") + cmd.Flags().StringSliceVar(&loggers, "loggers", nil, + "The list of loggers to receive. Defaults to all.") + cmd.Flags().StringSliceVar(&pods, "pods", nil, + "The list of pods to receive from. Defaults to all.") + cmd.Flags().Var(&bf.minLevel, "min-level", + `Hides the messages whose log level is less important than the specified one. +Should be empty or one of error, warning, info, debug, or trace.`) + cmd.Flags().CountVarP(&verbosity, "verbosity", "v", + "The logs verbosity level. More verbose means more information will be printed") + + return cmd +} + +// decode progressively decodes the logs +func (bf *prettyCmd) decode(ctx context.Context, reader io.Reader, recordChannel chan<- logRecord) { + scanner := bufio.NewScanner(reader) + + for scanner.Scan() { + select { + case <-ctx.Done(): + return + default: + } + + record, err := newLogRecordFromBytes(scanner.Bytes()) + if err != nil { + _, _ = fmt.Fprintln( + os.Stderr, + aurora.Red(fmt.Sprintf("JSON syntax error (%s)", err.Error())), + scanner.Text()) + continue + } + + record.normalize() + + if !bf.isRecordRelevant(record) { + continue + } + + recordChannel <- *record + } + + close(recordChannel) +} + +// group transforms a stream of logs into a stream of log groups, so that the groups +// can then be sorted +func (bf *prettyCmd) group(ctx context.Context, logChannel <-chan logRecord, groupChannel chan<- []logRecord) { + bufferArray := make([]logRecord, bf.groupSize) + + buffer := bufferArray[0:0] + + pushLogGroup := func() { + if len(buffer) == 0 { + return + } + + bufferCopy := make([]logRecord, len(buffer)) + copy(bufferCopy, buffer) + groupChannel <- bufferCopy + + buffer = bufferArray[0:0] + } + +logLoop: + for { + timer := time.NewTimer(1 * time.Second) + defer timer.Stop() + + select { + case <-ctx.Done(): + break logLoop + + case <-timer.C: + pushLogGroup() + + case logRecord, ok := <-logChannel: + if !ok { + break logLoop + } + + buffer = append(buffer, logRecord) + if len(buffer) == bf.groupSize { + pushLogGroup() + } + } + } + + pushLogGroup() + close(groupChannel) +} + +// write writes the logs on the output +func (bf *prettyCmd) write(ctx context.Context, recordGroupChannel <-chan []logRecord, writer io.Writer) { + logRecordComparison := func(l1, l2 logRecord) int { + if l1.TS < l2.TS { + return -1 + } else if l1.TS > l2.TS { + return 1 + } + + if l1.LoggingPod < l2.LoggingPod { + return -1 + } else if l1.LoggingPod == l2.LoggingPod { + return 0 + } + + return 1 + } + firstGroup := true + +logLoop: + for { + select { + case <-ctx.Done(): + break logLoop + + case logGroupRecord, ok := <-recordGroupChannel: + if !ok { + break logLoop + } + + slices.SortFunc(logGroupRecord, logRecordComparison) + + if !firstGroup { + _, _ = writer.Write([]byte("---\n")) + } + for _, record := range logGroupRecord { + if err := record.print(writer, bf.verbosity); err != nil { + bf.emergencyLog(err, "Dumping a log entry") + } + } + firstGroup = false + } + } +} + +// isRecordRelevant is true when the passed log record is matched +// by the filters set by the user +func (bf *prettyCmd) isRecordRelevant(record *logRecord) bool { + if bf.loggers.Len() > 0 && !bf.loggers.Has(record.Logger) { + return false + } + + if bf.pods.Len() > 0 && !bf.pods.Has(record.LoggingPod) { + return false + } + + if bf.minLevel != "" && record.Level.Less(bf.minLevel) { + return false + } + + return true +} + +func (bf *prettyCmd) emergencyLog(err error, msg string) { + fmt.Println(aurora.Red("ERROR"), err.Error(), msg) +} From 305c079a1d526d6dbbb27a1ff06b73ace1b2eb3c Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Tue, 15 Oct 2024 22:03:18 +0200 Subject: [PATCH 077/836] test(e2e): workaround for pod rollout (#5838) Signed-off-by: Leonardo Cecchi --- tests/e2e/upgrade_test.go | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/tests/e2e/upgrade_test.go b/tests/e2e/upgrade_test.go index f7182daecd..b345936d64 100644 --- a/tests/e2e/upgrade_test.go +++ b/tests/e2e/upgrade_test.go @@ -447,7 +447,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O }) } - assertClustersWorkAfterOperatorUpgrade := func(upgradeNamespace, operatorManifest string) { + assertClustersWorkAfterOperatorUpgrade := func(upgradeNamespace, operatorManifest string, online bool) { // generate random serverNames for the clusters each time serverName1 := fmt.Sprintf("%s-%d", clusterName1, funk.RandomInt(0, 9999)) serverName2 := fmt.Sprintf("%s-%d", clusterName2, funk.RandomInt(0, 9999)) @@ -470,6 +470,24 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O err := os.Setenv("SERVER_NAME", serverName1) Expect(err).ToNot(HaveOccurred()) CreateResourceFromFile(upgradeNamespace, sampleFile) + + if online { + // Upgrading to the new release will trigger a + // rollout of Pods even if online upgrade is + // enabled. This happens because of the + // following PR: + // https://github.com/cloudnative-pg/cloudnative-pg/pull/5503 + // + // This E2e would correctly detect that and trigger a failure. + // To avoid this, just for this release, we disable the pod + // spec reconciliation. + // By doing that we don't test that the online upgrade won't + // trigger any Pod restart. We still test that the operator + // is upgraded in this case too. + _, stderr, err := testsUtils.Run( + fmt.Sprintf("kubectl annotate -n %s cluster/%s cnpg.io/reconcilePodSpec=disabled", upgradeNamespace, clusterName1)) + Expect(err).NotTo(HaveOccurred(), "stderr: "+stderr) + } }) // Cluster ready happens after minio is ready @@ -708,7 +726,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O DeferCleanup(cleanupOperatorAndMinio) upgradeNamespace := assertCreateNamespace(upgradeNamespacePrefix) - assertClustersWorkAfterOperatorUpgrade(upgradeNamespace, currentOperatorManifest) + assertClustersWorkAfterOperatorUpgrade(upgradeNamespace, currentOperatorManifest, false) }) It("keeps clusters working after an online upgrade", func() { @@ -725,7 +743,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O DeferCleanup(cleanupOperatorAndMinio) upgradeNamespace := assertCreateNamespace(upgradeNamespacePrefix) - assertClustersWorkAfterOperatorUpgrade(upgradeNamespace, currentOperatorManifest) + assertClustersWorkAfterOperatorUpgrade(upgradeNamespace, currentOperatorManifest, true) }) }) @@ -746,7 +764,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O DeferCleanup(cleanupOperatorAndMinio) upgradeNamespace := assertCreateNamespace(upgradeNamespacePrefix) - assertClustersWorkAfterOperatorUpgrade(upgradeNamespace, primeOperatorManifest) + assertClustersWorkAfterOperatorUpgrade(upgradeNamespace, primeOperatorManifest, true) }) It("keeps clusters working after a rolling upgrade", func() { @@ -759,7 +777,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O DeferCleanup(cleanupOperatorAndMinio) upgradeNamespace := assertCreateNamespace(upgradeNamespacePrefix) - assertClustersWorkAfterOperatorUpgrade(upgradeNamespace, primeOperatorManifest) + assertClustersWorkAfterOperatorUpgrade(upgradeNamespace, primeOperatorManifest, false) }) }) }) From b12e56c5cf63d31cb993f01b2b02a2f4ba855b18 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Tue, 15 Oct 2024 22:08:41 +0200 Subject: [PATCH 078/836] fix: preserve `synchronous_standby_names` when no replica is reachable (#5831) In clusters using the new synchronous replication configuration, the primary should block commits if all standby pods are unhealthy. However, if no replicas are available, the code was incorrectly dropping the `synchronous_standby_names` setting, allowing commits to proceed without synchronous replication. This patch ensures that `synchronous_standby_names` is maintained by considering all pods in the cluster as potential synchronous replicas, regardless of their health status. Closes: #5784 # Release notes ``` When using `.spec.postgresql.synchronous`, ensure the `synchronous_standby_names` parameter is correctly set, even when no replicas are reachable (#5831). ``` Signed-off-by: Leonardo Cecchi Signed-off-by: Francesco Canovai Signed-off-by: Armando Ruocco Co-authored-by: Francesco Canovai Co-authored-by: Armando Ruocco --- ...cluster-example-syncreplicas-explicit.yaml | 14 ++++ ... cluster-example-syncreplicas-legacy.yaml} | 0 pkg/postgres/replication/explicit.go | 66 ++++++++++++++++++- pkg/postgres/replication/explicit_test.go | 30 +++++++-- pkg/postgres/replication/suite_test.go | 1 + 5 files changed, 104 insertions(+), 7 deletions(-) create mode 100644 docs/src/samples/cluster-example-syncreplicas-explicit.yaml rename docs/src/samples/{cluster-example-syncreplicas.yaml => cluster-example-syncreplicas-legacy.yaml} (100%) diff --git a/docs/src/samples/cluster-example-syncreplicas-explicit.yaml b/docs/src/samples/cluster-example-syncreplicas-explicit.yaml new file mode 100644 index 0000000000..14ec8f2199 --- /dev/null +++ b/docs/src/samples/cluster-example-syncreplicas-explicit.yaml @@ -0,0 +1,14 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: cluster-example-syncreplicas +spec: + instances: 5 + + postgresql: + synchronous: + method: first + number: 2 + + storage: + size: 1G diff --git a/docs/src/samples/cluster-example-syncreplicas.yaml b/docs/src/samples/cluster-example-syncreplicas-legacy.yaml similarity index 100% rename from docs/src/samples/cluster-example-syncreplicas.yaml rename to docs/src/samples/cluster-example-syncreplicas-legacy.yaml diff --git a/pkg/postgres/replication/explicit.go b/pkg/postgres/replication/explicit.go index f935a16cb7..dfeb7a2857 100644 --- a/pkg/postgres/replication/explicit.go +++ b/pkg/postgres/replication/explicit.go @@ -18,16 +18,23 @@ package replication import ( "fmt" + "slices" + "sort" "strings" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" ) +// placeholderInstanceNameSuffix is the name of the suffix to be added to the +// cluster name in order to create a fake instance name to be used in +// `synchronous_stanby_names` when the replica list would be empty. +const placeholderInstanceNameSuffix = "-placeholder" + func explicitSynchronousStandbyNames(cluster *apiv1.Cluster) string { config := cluster.Spec.PostgresConfiguration.Synchronous // Create the list of pod names - clusterInstancesList := getSortedNonPrimaryInstanceNames(cluster) + clusterInstancesList := getSortedInstanceNames(cluster) if config.MaxStandbyNamesFromCluster != nil && len(clusterInstancesList) > *config.MaxStandbyNamesFromCluster { clusterInstancesList = clusterInstancesList[:*config.MaxStandbyNamesFromCluster] } @@ -36,8 +43,11 @@ func explicitSynchronousStandbyNames(cluster *apiv1.Cluster) string { instancesList := config.StandbyNamesPre instancesList = append(instancesList, clusterInstancesList...) instancesList = append(instancesList, config.StandbyNamesPost...) + if len(instancesList) == 0 { - return "" + instancesList = []string{ + cluster.Name + placeholderInstanceNameSuffix, + } } // Escape the pod list @@ -52,3 +62,55 @@ func explicitSynchronousStandbyNames(cluster *apiv1.Cluster) string { config.Number, strings.Join(escapedReplicas, ",")) } + +// getSortedInstanceNames gets a list of all the known PostgreSQL instances in a +// order that would be meaningful to be used by `synchronous_standby_names`. +// +// The result is composed by: +// +// - the list of non-primary ready instances - these are most likely the +// instances to be used as a potential synchronous replicas +// - the list of non-primary non-ready instances +// - the name of the primary instance +// +// This algorithm have been designed to produce an order that would be +// meaningful to be used with priority-based synchronous replication (using the +// `first` method), while using the `maxStandbyNamesFromCluster` parameter. +func getSortedInstanceNames(cluster *apiv1.Cluster) []string { + nonPrimaryReadyInstances := make([]string, 0, cluster.Spec.Instances) + otherInstances := make([]string, 0, cluster.Spec.Instances) + primaryInstance := "" + + for state, instanceList := range cluster.Status.InstancesStatus { + for _, instance := range instanceList { + switch { + case cluster.Status.CurrentPrimary == instance: + primaryInstance = instance + + case state == apiv1.PodHealthy: + nonPrimaryReadyInstances = append(nonPrimaryReadyInstances, instance) + } + } + } + + for _, instance := range cluster.Status.InstanceNames { + if instance == primaryInstance { + continue + } + + if !slices.Contains(nonPrimaryReadyInstances, instance) { + otherInstances = append(otherInstances, instance) + } + } + + sort.Strings(nonPrimaryReadyInstances) + sort.Strings(otherInstances) + result := make([]string, 0, cluster.Spec.Instances) + result = append(result, nonPrimaryReadyInstances...) + result = append(result, otherInstances...) + if len(primaryInstance) > 0 { + result = append(result, primaryInstance) + } + + return result +} diff --git a/pkg/postgres/replication/explicit_test.go b/pkg/postgres/replication/explicit_test.go index 80de4739d3..fa474b531f 100644 --- a/pkg/postgres/replication/explicit_test.go +++ b/pkg/postgres/replication/explicit_test.go @@ -42,7 +42,7 @@ var _ = Describe("synchronous replica configuration with the new API", func() { }, } - Expect(explicitSynchronousStandbyNames(cluster)).To(Equal("ANY 2 (\"three\",\"two\")")) + Expect(explicitSynchronousStandbyNames(cluster)).To(Equal("ANY 2 (\"three\",\"two\",\"one\")")) }) It("creates configuration with the FIRST clause", func() { @@ -61,10 +61,10 @@ var _ = Describe("synchronous replica configuration with the new API", func() { }, } - Expect(explicitSynchronousStandbyNames(cluster)).To(Equal("FIRST 2 (\"three\",\"two\")")) + Expect(explicitSynchronousStandbyNames(cluster)).To(Equal("FIRST 2 (\"three\",\"two\",\"one\")")) }) - It("consider the maximum number of standby names", func() { + It("considers the maximum number of standby names", func() { cluster := createFakeCluster("example") cluster.Spec.PostgresConfiguration.Synchronous = &apiv1.SynchronousReplicaConfiguration{ Method: apiv1.SynchronousReplicaConfigurationMethodFirst, @@ -83,7 +83,7 @@ var _ = Describe("synchronous replica configuration with the new API", func() { Expect(explicitSynchronousStandbyNames(cluster)).To(Equal("FIRST 2 (\"three\")")) }) - It("prepend the prefix and append the suffix", func() { + It("prepends the prefix and append the suffix", func() { cluster := createFakeCluster("example") cluster.Spec.PostgresConfiguration.Synchronous = &apiv1.SynchronousReplicaConfiguration{ Method: apiv1.SynchronousReplicaConfigurationMethodFirst, @@ -112,6 +112,26 @@ var _ = Describe("synchronous replica configuration with the new API", func() { } cluster.Status = apiv1.ClusterStatus{} - Expect(explicitSynchronousStandbyNames(cluster)).To(BeEmpty()) + Expect(explicitSynchronousStandbyNames(cluster)).To( + Equal("FIRST 2 (\"example-placeholder\")")) + }) + + It("includes pods that do not report the status", func() { + cluster := createFakeCluster("example") + cluster.Spec.PostgresConfiguration.Synchronous = &apiv1.SynchronousReplicaConfiguration{ + Method: apiv1.SynchronousReplicaConfigurationMethodFirst, + Number: 2, + MaxStandbyNamesFromCluster: nil, + StandbyNamesPre: []string{}, + StandbyNamesPost: []string{}, + } + cluster.Status = apiv1.ClusterStatus{ + CurrentPrimary: "one", + InstancesStatus: map[apiv1.PodStatus][]string{ + apiv1.PodHealthy: {"one", "three"}, + }, + InstanceNames: []string{"one", "two", "three"}, + } + Expect(explicitSynchronousStandbyNames(cluster)).To(Equal("FIRST 2 (\"three\",\"two\",\"one\")")) }) }) diff --git a/pkg/postgres/replication/suite_test.go b/pkg/postgres/replication/suite_test.go index baad4001eb..6b249bedcd 100644 --- a/pkg/postgres/replication/suite_test.go +++ b/pkg/postgres/replication/suite_test.go @@ -34,6 +34,7 @@ func TestReplication(t *testing.T) { func createFakeCluster(name string) *apiv1.Cluster { primaryPod := fmt.Sprintf("%s-1", name) cluster := &apiv1.Cluster{} + cluster.Name = name cluster.Default() cluster.Spec.Instances = 3 cluster.Spec.MaxSyncReplicas = 2 From d913d7a4d91f619e22ec8e660752184a85b853ca Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 16 Oct 2024 06:35:17 +0200 Subject: [PATCH 079/836] test: Updated Postgres versions used in E2E tests (#5695) Update the Postgres versions used in E2E tests Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: postgres-versions-updater --- .github/pg_versions.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/pg_versions.json b/.github/pg_versions.json index cf76550465..73e9d1a8d0 100644 --- a/.github/pg_versions.json +++ b/.github/pg_versions.json @@ -1,7 +1,7 @@ { "17": [ "17.0", - "17.0-3" + "17.0-15" ], "16": [ "16.4", From b8d79f7a3895fddef446386d6c1f72a7bb1867ec Mon Sep 17 00:00:00 2001 From: Jaime Silvela Date: Wed, 16 Oct 2024 13:56:11 +0200 Subject: [PATCH 080/836] docs: release notes for 1.24.1 and 1.23.5 (#5768) Closes #5764 Signed-off-by: Jaime Silvela Signed-off-by: Gabriele Bartolini Co-authored-by: Gabriele Bartolini --- .wordlist-en-custom.txt | 1 + docs/src/container_images.md | 1 + docs/src/index.md | 3 +- docs/src/release_notes/v1.23.md | 46 +++++++++++++++++++++++++++++++ docs/src/release_notes/v1.24.md | 49 +++++++++++++++++++++++++++++++++ docs/src/supported_releases.md | 8 ++++-- 6 files changed, 104 insertions(+), 4 deletions(-) diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index 20bab75fc3..e564fc34f0 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -817,6 +817,7 @@ initDB initdb initialise initializingPVC +inplace instanceID instanceName instanceNames diff --git a/docs/src/container_images.md b/docs/src/container_images.md index d198695945..b51344553b 100644 --- a/docs/src/container_images.md +++ b/docs/src/container_images.md @@ -21,6 +21,7 @@ with the following requirements: - PGAudit extension installed (optional - only if PGAudit is required in the deployed clusters) - Appropriate locale settings +- `du` (optional, for `kubectl cnpg status`) !!! Important Only [PostgreSQL versions supported by the PGDG](https://postgresql.org/) are allowed. diff --git a/docs/src/index.md b/docs/src/index.md index 52b51824be..641c074d2f 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -26,11 +26,12 @@ native connection pooler based on PgBouncer. CloudNativePG was originally built by [EDB](https://www.enterprisedb.com), then released open source under Apache License 2.0. +It has been submitted for the [CNCF Sandbox in September 2024](https://github.com/cncf/sandbox/issues/128). The [source code repository is in Github](https://github.com/cloudnative-pg/cloudnative-pg). !!! Note Based on the [Operator Capability Levels model](operator_capability_levels.md), - users can expect a **"Level V - Auto Pilot"** set of capabilities from the + users can expect a **"Level V - Auto Pilot"** subset of capabilities from the CloudNativePG Operator. ## Supported Kubernetes distributions diff --git a/docs/src/release_notes/v1.23.md b/docs/src/release_notes/v1.23.md index 197a81a93e..8aaf2f773f 100644 --- a/docs/src/release_notes/v1.23.md +++ b/docs/src/release_notes/v1.23.md @@ -6,6 +6,52 @@ For a complete list of changes, please refer to the [commits](https://github.com/cloudnative-pg/cloudnative-pg/commits/release-1.23) on the release branch in GitHub. +## Version 1.23.5 + +**Release date:** Oct 16, 2024 + +### Enhancements: + +- Remove the use of `pg_database_size` from the status probe, as it caused + high resource utilization by scanning the entire `PGDATA` directory to + compute database sizes. The `kubectl status` plugin will now rely on `du` + to provide detailed size information retrieval (#5689). +- Add the ability to configure the `full_page_writes` parameter in + PostgreSQL. This setting defaults to `on`, in line with PostgreSQL's + recommendations (#5516). +- Plugin: + - Add the `logs pretty` command in the `cnpg` plugin to read a log stream + from standard input and output a human-readable format, with options to + filter log entries (#5770) + - Enhance the `status` command by allowing multiple `-v` options to + increase verbosity for more detailed output (#5765). + - Add support for specifying a custom Docker image using the `--image` + flag in the `pgadmin4` plugin command, giving users control over the + Docker image used for pgAdmin4 deployments (#5515). + +### Fixes: + +- Resolve an issue with concurrent status updates when demoting a primary to a + designated primary, ensuring smoother transitions during cluster role changes + (#5755). +- Ensure that replica PodDisruptionBudgets (PDB) are removed when scaling down + to two instances, enabling easier maintenance on the node hosting the replica + (#5487). +- Prioritize full rollout over inplace restarts (#5407). +- Fix an issue that could lead to double failover in cases of lost + connectivity (#5788). +- Correctly set the `TMPDIR` and `PSQL_HISTORY` environment variables for pods + and jobs, improving temporary file and history management (#5503). +- Plugin: + - Resolve a race condition in the `logs cluster` command (#5775). + - Display the `potential` sync status in the `status` plugin (#5533). + - Fix the issue where pods deployed by the `pgadmin4` command didn’t have + a writable home directory (#5800). + +### Supported versions + +- PostgreSQL 17 (PostgreSQL 17.0 is the default image) + ## Version 1.23.4 **Release date:** Aug 22, 2024 diff --git a/docs/src/release_notes/v1.24.md b/docs/src/release_notes/v1.24.md index 4e5c2ae8e0..78182f8180 100644 --- a/docs/src/release_notes/v1.24.md +++ b/docs/src/release_notes/v1.24.md @@ -6,6 +6,55 @@ For a complete list of changes, please refer to the [commits](https://github.com/cloudnative-pg/cloudnative-pg/commits/release-1.24) on the release branch in GitHub. +## Version 1.24.1 + +**Release date:** Oct 16, 2024 + +### Enhancements: + +- Remove the use of `pg_database_size` from the status probe, as it caused + high resource utilization by scanning the entire `PGDATA` directory to + compute database sizes. The `kubectl status` plugin will now rely on `du` + to provide detailed size information retrieval (#5689). +- Add the ability to configure the `full_page_writes` parameter in + PostgreSQL. This setting defaults to `on`, in line with PostgreSQL's + recommendations (#5516). +- Plugin: + - Add the `logs pretty` command in the `cnpg` plugin to read a log stream + from standard input and output a human-readable format, with options to + filter log entries (#5770) + - Enhance the `status` command by allowing multiple `-v` options to + increase verbosity for more detailed output (#5765). + - Add support for specifying a custom Docker image using the `--image` + flag in the `pgadmin4` plugin command, giving users control over the + Docker image used for pgAdmin4 deployments (#5515). + +### Fixes: + +- Resolve an issue with concurrent status updates when demoting a primary to a + designated primary, ensuring smoother transitions during cluster role changes + (#5755). +- Ensure that replica PodDisruptionBudgets (PDB) are removed when scaling down + to two instances, enabling easier maintenance on the node hosting the replica + (#5487). +- Prioritize full rollout over inplace restarts (#5407). +- When using `.spec.postgresql.synchronous`, ensure that the + `synchronous_standby_names` parameter is correctly set, even when no replicas + are reachable (#5831). +- Fix an issue that could lead to double failover in cases of lost + connectivity (#5788). +- Correctly set the `TMPDIR` and `PSQL_HISTORY` environment variables for pods + and jobs, improving temporary file and history management (#5503). +- Plugin: + - Resolve a race condition in the `logs cluster` command (#5775). + - Display the `potential` sync status in the `status` plugin (#5533). + - Fix the issue where pods deployed by the `pgadmin4` command didn’t have + a writable home directory (#5800). + +### Supported versions + +- PostgreSQL 17 (PostgreSQL 17.0 is the default image) + ## Version 1.24.0 **Release date:** Aug 22, 2024 diff --git a/docs/src/supported_releases.md b/docs/src/supported_releases.md index c8d9434545..c1bfbd9128 100644 --- a/docs/src/supported_releases.md +++ b/docs/src/supported_releases.md @@ -81,9 +81,11 @@ Git tags for versions are prefixed with `v`. | Version | Currently supported | Release date | End of life | Supported Kubernetes versions | Tested, but not supported | Supported Postgres versions | |-----------------|----------------------|-------------------|---------------------|-------------------------------|---------------------------|-----------------------------| -| 1.24.x | Yes | August 22, 2024 | ~ February, 2025 | 1.28, 1.29, 1.30, 1.31 | 1.27 | 12 - 17 | -| 1.23.x | Yes | April 24, 2024 | ~ November, 2024 | 1.27, 1.28, 1.29 | 1.30, 1.31 | 12 - 16 | -| main | No, development only | | | | | 12 - 16 | +| 1.24.x | Yes | August 22, 2024 | ~ February, 2025 | 1.28, 1.29, 1.30, 1.31 | 1.27 | 121 - 17 | +| 1.23.x | Yes | April 24, 2024 | November 24, 2024 | 1.27, 1.28, 1.29 | 1.30, 1.31 | 121 - 17 | +| main | No, development only | | | | | 121 - 17 | + +1 _PostgreSQL 12 will be supported until November 14, 2024._ The list of supported Kubernetes versions in the table depends on what the CloudNativePG maintainers think is reasonable to support and to test. From 4c8327013c31fb99229cb67f4f3dfbc558d33221 Mon Sep 17 00:00:00 2001 From: Peggie Date: Wed, 16 Oct 2024 14:02:41 +0200 Subject: [PATCH 081/836] feat: Public Cloud K8S versions update (#5766) Update the versions used to test the operator on public cloud providers Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: public-cloud-k8s-versions-check --- .github/gke_versions.json | 3 +-- .github/kind_versions.json | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/gke_versions.json b/.github/gke_versions.json index 9fbe6428a3..f4408312dc 100644 --- a/.github/gke_versions.json +++ b/.github/gke_versions.json @@ -1,6 +1,5 @@ [ "1.30", "1.29", - "1.28", - "1.27" + "1.28" ] diff --git a/.github/kind_versions.json b/.github/kind_versions.json index e70b300c06..d9bd1a1215 100644 --- a/.github/kind_versions.json +++ b/.github/kind_versions.json @@ -1,5 +1,5 @@ [ - "v1.31.0", + "v1.31.1", "v1.30.4", "v1.29.8", "v1.28.13", From 37245fb4f81c0fe732638d371a4e70955379f7c4 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Wed, 16 Oct 2024 14:03:16 +0200 Subject: [PATCH 082/836] chore(olm): update OLM maintainers lists (#5853) Added new maintainers and remove old ones Closes #5852 Signed-off-by: Jonathan Gonzalez V. --- .../bases/cloudnative-pg.clusterserviceversion.yaml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml b/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml index ad6f561a6d..6f7a2108f5 100644 --- a/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml +++ b/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml @@ -107,8 +107,10 @@ spec: maintainers: - email: jonathan.gonzalez@enterprisedb.com name: Jonathan Gonzalez V. - - email: john.long@enterprisedb.com - name: John Long + - email: jonathan.battiato@enterprisedb.com + name: Jonathan Battiato + - email: niccolo.fei@enterprisedb.com + name: Niccolo Fei - email: gabriele.bartolini@enterprisedb.com name: Gabriele Bartolini maturity: stable From 93441fa18dace497f4b8ce1efa2cd3e3b66b5698 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 16 Oct 2024 17:52:29 +0200 Subject: [PATCH 083/836] Version tag to 1.24.1 (#5860) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Niccolò Fei Co-authored-by: Niccolò Fei --- docs/src/installation_upgrade.md | 4 +- pkg/versions/versions.go | 6 +- releases/cnpg-1.24.1.yaml | 16625 +++++++++++++++++++++++++++++ 3 files changed, 16630 insertions(+), 5 deletions(-) create mode 100644 releases/cnpg-1.24.1.yaml diff --git a/docs/src/installation_upgrade.md b/docs/src/installation_upgrade.md index c2fb1a3ac7..e85711cfff 100644 --- a/docs/src/installation_upgrade.md +++ b/docs/src/installation_upgrade.md @@ -7,12 +7,12 @@ The operator can be installed like any other resource in Kubernetes, through a YAML manifest applied via `kubectl`. -You can install the [latest operator manifest](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-1.24.0.yaml) +You can install the [latest operator manifest](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.24/releases/cnpg-1.24.1.yaml) for this minor release as follows: ```sh kubectl apply --server-side -f \ - https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-1.24.0.yaml + https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.24/releases/cnpg-1.24.1.yaml ``` You can verify that with: diff --git a/pkg/versions/versions.go b/pkg/versions/versions.go index 91112799a3..9493ffe2a3 100644 --- a/pkg/versions/versions.go +++ b/pkg/versions/versions.go @@ -20,13 +20,13 @@ package versions const ( // Version is the version of the operator - Version = "1.24.0" + Version = "1.24.1" // DefaultImageName is the default image used by the operator to create pods DefaultImageName = "ghcr.io/cloudnative-pg/postgresql:17.0" // DefaultOperatorImageName is the default operator image used by the controller in the pods running PostgreSQL - DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.24.0" + DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.24.1" ) // BuildInfo is a struct containing all the info about the build @@ -36,7 +36,7 @@ type BuildInfo struct { var ( // buildVersion injected during the build - buildVersion = "1.24.0" + buildVersion = "1.24.1" // buildCommit injected during the build buildCommit = "none" diff --git a/releases/cnpg-1.24.1.yaml b/releases/cnpg-1.24.1.yaml new file mode 100644 index 0000000000..6f1fb1b4de --- /dev/null +++ b/releases/cnpg-1.24.1.yaml @@ -0,0 +1,16625 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-system +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.4 + name: backups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Backup + listKind: BackupList + plural: backups + singular: backup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.method + name: Method + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .status.error + name: Error + type: string + name: v1 + schema: + openAPIV3Schema: + description: Backup is the Schema for the backups API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the backup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + type: object + status: + description: |- + Most recently observed status of the backup. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + azureCredentials: + description: The credentials to use to upload data to Azure Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without providing + explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + backupId: + description: The ID of the Barman backup + type: string + backupLabelFile: + description: Backup label file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + backupName: + description: The Name of the Barman backup + type: string + beginLSN: + description: The starting xlog + type: string + beginWal: + description: The starting WAL + type: string + commandError: + description: The backup command output in case of error + type: string + commandOutput: + description: Unused. Retained for compatibility with old versions. + type: string + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data. This may not be populated in case of errors. + type: string + encryption: + description: Encryption method required to S3 API + type: string + endLSN: + description: The ending xlog + type: string + endWal: + description: The ending WAL + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + error: + description: The detected error + type: string + googleCredentials: + description: The credentials to use to upload data to Google Cloud + Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage JSON + file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + instanceID: + description: Information to identify the instance where the backup + has been taken from + properties: + ContainerID: + description: The container ID + type: string + podName: + description: The pod name + type: string + type: object + method: + description: The backup method being used + type: string + online: + description: Whether the backup was online/hot (`true`) or offline/cold + (`false`) + type: boolean + phase: + description: The last backup status + type: string + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without providing + explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the region + name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + snapshotBackupStatus: + description: Status of the volumeSnapshot backup + properties: + elements: + description: The elements list, populated with the gathered volume + snapshots + items: + description: BackupSnapshotElementStatus is a volume snapshot + that is part of a volume snapshot method backup + properties: + name: + description: Name is the snapshot resource name + type: string + tablespaceName: + description: |- + TablespaceName is the name of the snapshotted tablespace. Only set + when type is PG_TABLESPACE + type: string + type: + description: Type is tho role of the snapshot in the cluster, + such as PG_DATA, PG_WAL and PG_TABLESPACE + type: string + required: + - name + - type + type: object + type: array + type: object + startedAt: + description: When the backup was started + format: date-time + type: string + stoppedAt: + description: When the backup was terminated + format: date-time + type: string + tablespaceMapFile: + description: Tablespace map file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.4 + name: clusterimagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ClusterImageCatalog + listKind: ClusterImageCatalogList + plural: clusterimagecatalogs + singular: clusterimagecatalog + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ClusterImageCatalog is the Schema for the clusterimagecatalogs + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ClusterImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.4 + name: clusters.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Cluster + listKind: ClusterList + plural: clusters + singular: cluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Number of instances + jsonPath: .status.instances + name: Instances + type: integer + - description: Number of ready instances + jsonPath: .status.readyInstances + name: Ready + type: integer + - description: Cluster current status + jsonPath: .status.phase + name: Status + type: string + - description: Primary pod + jsonPath: .status.currentPrimary + name: Primary + type: string + name: v1 + schema: + openAPIV3Schema: + description: Cluster is the Schema for the PostgreSQL API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the cluster. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + affinity: + description: Affinity/Anti-affinity rules for Pods + properties: + additionalPodAffinity: + description: AdditionalPodAffinity allows to specify pod affinity + terms to be passed to all the cluster's pods. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + additionalPodAntiAffinity: + description: |- + AdditionalPodAntiAffinity allows to specify pod anti-affinity terms to be added to the ones generated + by the operator if EnablePodAntiAffinity is set to true (default) or to be used exclusively if set to false. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + enablePodAntiAffinity: + description: |- + Activates anti-affinity for the pods. The operator will define pods + anti-affinity unless this field is explicitly set to false + type: boolean + nodeAffinity: + description: |- + NodeAffinity describes node affinity scheduling rules for the pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is map of key-value pairs used to define the nodes on which + the pods can run. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + podAntiAffinityType: + description: |- + PodAntiAffinityType allows the user to decide whether pod anti-affinity between cluster instance has to be + considered a strong requirement during scheduling or not. Allowed values are: "preferred" (default if empty) or + "required". Setting it to "required", could lead to instances remaining pending until new kubernetes nodes are + added if all the existing nodes don't match the required pod anti-affinity rule. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + type: string + tolerations: + description: |- + Tolerations is a list of Tolerations that should be set for all the pods, in order to allow them to run + on tainted nodes. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologyKey: + description: |- + TopologyKey to use for anti-affinity configuration. See k8s documentation + for more info on that + type: string + type: object + backup: + description: The configuration to be used for backups + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2` or `snappy`. + enum: + - gzip + - bzip2 + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage + JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the + region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2` or `snappy`. + enum: + - gzip + - bzip2 + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + retentionPolicy: + description: |- + RetentionPolicy is the retention policy to be used for backups + and WALs (i.e. '60d'). The retention policy is expressed in the form + of `XXu` where `XX` is a positive integer and `u` is in `[dwm]` - + days, weeks, months. + It's currently only applicable when using the BarmanObjectStore method. + pattern: ^[1-9][0-9]*[dwm]$ + type: string + target: + default: prefer-standby + description: |- + The policy to decide which instance should perform backups. Available + options are empty string, which will default to `prefer-standby` policy, + `primary` to have backups run always on primary instances, `prefer-standby` + to have backups run preferably on the most updated standby, if available. + enum: + - primary + - prefer-standby + type: string + volumeSnapshot: + description: VolumeSnapshot provides the configuration for the + execution of volume snapshot backups. + properties: + annotations: + additionalProperties: + type: string + description: Annotations key-value pairs that will be added + to .metadata.annotations snapshot resources. + type: object + className: + description: |- + ClassName specifies the Snapshot Class to be used for PG_DATA PersistentVolumeClaim. + It is the default class for the other types if no specific class is present + type: string + labels: + additionalProperties: + type: string + description: Labels are key-value pairs that will be added + to .metadata.labels snapshot resources. + type: object + online: + default: true + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + type: boolean + onlineConfiguration: + default: + immediateCheckpoint: false + waitForArchive: true + description: Configuration parameters to control the online/hot + backup with volume snapshots + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + snapshotOwnerReference: + default: none + description: SnapshotOwnerReference indicates the type of + owner reference the snapshot should have + enum: + - none + - cluster + - backup + type: string + tablespaceClassName: + additionalProperties: + type: string + description: |- + TablespaceClassName specifies the Snapshot Class to be used for the tablespaces. + defaults to the PGDATA Snapshot Class, if set + type: object + walClassName: + description: WalClassName specifies the Snapshot Class to + be used for the PG_WAL PersistentVolumeClaim. + type: string + type: object + type: object + bootstrap: + description: Instructions to bootstrap this cluster + properties: + initdb: + description: Bootstrap the cluster via initdb + properties: + dataChecksums: + description: |- + Whether the `-k` option should be passed to initdb, + enabling checksums on data pages (default: `false`) + type: boolean + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + encoding: + description: The value to be passed as option `--encoding` + for initdb (default:`UTF8`) + type: string + import: + description: |- + Bootstraps the new cluster by importing data from an existing PostgreSQL + instance using logical backup (`pg_dump` and `pg_restore`) + properties: + databases: + description: The databases to import + items: + type: string + type: array + postImportApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after is imported - to be used with extreme care + (by default empty). Only available in microservice type. + items: + type: string + type: array + roles: + description: The roles to import + items: + type: string + type: array + schemaOnly: + description: |- + When set to true, only the `pre-data` and `post-data` sections of + `pg_restore` are invoked, avoiding data import. Default: `false`. + type: boolean + source: + description: The source of the import + properties: + externalCluster: + description: The name of the externalCluster used + for import + type: string + required: + - externalCluster + type: object + type: + description: The import type. Can be `microservice` or + `monolith`. + enum: + - microservice + - monolith + type: string + required: + - databases + - source + - type + type: object + localeCType: + description: The value to be passed as option `--lc-ctype` + for initdb (default:`C`) + type: string + localeCollate: + description: The value to be passed as option `--lc-collate` + for initdb (default:`C`) + type: string + options: + description: |- + The list of options that must be passed to initdb when creating the cluster. + Deprecated: This could lead to inconsistent configurations, + please use the explicit provided parameters instead. + If defined, explicit values will be ignored. + items: + type: string + type: array + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + postInitApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitApplicationSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the application database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitSQL: + description: |- + List of SQL queries to be executed as a superuser in the `postgres` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `postgres` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitTemplateSQL: + description: |- + List of SQL queries to be executed as a superuser in the `template1` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitTemplateSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `template1` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + walSegmentSize: + description: |- + The value in megabytes (1 to 1024) to be passed to the `--wal-segsize` + option for initdb (default: empty, resulting in PostgreSQL default: 16MB) + maximum: 1024 + minimum: 1 + type: integer + type: object + pg_basebackup: + description: |- + Bootstrap the cluster taking a physical backup of another compatible + PostgreSQL instance + properties: + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: The name of the server of which we need to take + a physical backup + minLength: 1 + type: string + required: + - source + type: object + recovery: + description: Bootstrap the cluster from a backup + properties: + backup: + description: |- + The backup object containing the physical base backup from which to + initiate the recovery procedure. + Mutually exclusive with `source` and `volumeSnapshots`. + properties: + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + name: + description: Name of the referent. + type: string + required: + - name + type: object + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + recoveryTarget: + description: |- + By default, the recovery process applies all the available + WAL files in the archive (full recovery). However, you can also + end the recovery as soon as a consistent state is reached or + recover to a point-in-time (PITR) by specifying a `RecoveryTarget` object, + as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...). + More info: https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET + properties: + backupID: + description: |- + The ID of the backup from which to start the recovery process. + If empty (default) the operator will automatically detect the backup + based on targetTime or targetLSN if specified. Otherwise use the + latest available backup in chronological order. + type: string + exclusive: + description: |- + Set the target to be exclusive. If omitted, defaults to false, so that + in Postgres, `recovery_target_inclusive` will be true + type: boolean + targetImmediate: + description: End recovery as soon as a consistent state + is reached + type: boolean + targetLSN: + description: The target LSN (Log Sequence Number) + type: string + targetName: + description: |- + The target name (to be previously created + with `pg_create_restore_point`) + type: string + targetTLI: + description: The target timeline ("latest" or a positive + integer) + type: string + targetTime: + description: The target time as a timestamp in the RFC3339 + standard + type: string + targetXID: + description: The target transaction ID + type: string + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: |- + The external cluster whose backup we will restore. This is also + used as the name of the folder under which the backup is stored, + so it must be set to the name of the source cluster + Mutually exclusive with `backup`. + type: string + volumeSnapshots: + description: |- + The static PVC data source(s) from which to initiate the + recovery procedure. Currently supporting `VolumeSnapshot` + and `PersistentVolumeClaim` resources that map an existing + PVC group, compatible with CloudNativePG, and taken with + a cold backup copy on a fenced Postgres instance (limitation + which will be removed in the future when online backup + will be implemented). + Mutually exclusive with `backup`. + properties: + storage: + description: Configuration of the storage of the instances + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + tablespaceStorage: + additionalProperties: + description: |- + TypedLocalObjectReference contains enough information to let you locate the + typed referenced object inside the same namespace. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + description: Configuration of the storage for PostgreSQL + tablespaces + type: object + walStorage: + description: Configuration of the storage for PostgreSQL + WAL (Write-Ahead Log) + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + required: + - storage + type: object + type: object + type: object + certificates: + description: The configuration for the CA and related certificates + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + description: + description: Description of this PostgreSQL cluster + type: string + enablePDB: + default: true + description: |- + Manage the `PodDisruptionBudget` resources within the cluster. When + configured as `true` (default setting), the pod disruption budgets + will safeguard the primary node from being terminated. Conversely, + setting it to `false` will result in the absence of any + `PodDisruptionBudget` resource, permitting the shutdown of all nodes + hosting the PostgreSQL cluster. This latter configuration is + advisable for any PostgreSQL cluster employed for + development/staging purposes. + type: boolean + enableSuperuserAccess: + default: false + description: |- + When this option is enabled, the operator will use the `SuperuserSecret` + to update the `postgres` user password (if the secret is + not present, the operator will automatically create one). When this + option is disabled, the operator will ignore the `SuperuserSecret` content, delete + it when automatically created, and then blank the password of the `postgres` + user by setting it to `NULL`. Disabled by default. + type: boolean + env: + description: |- + Env follows the Env format to pass environment variables + to the pods created in the cluster + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + EnvFrom follows the EnvFrom format to pass environment variables + sources to the pods to be used by Env + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each key in + the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + ephemeralVolumeSource: + description: EphemeralVolumeSource allows the user to configure the + source of ephemeral volumes. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to + consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the + PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + ephemeralVolumesSizeLimit: + description: |- + EphemeralVolumesSizeLimit allows the user to set the limits for the ephemeral + volumes + properties: + shm: + anyOf: + - type: integer + - type: string + description: Shm is the size limit of the shared memory volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + temporaryData: + anyOf: + - type: integer + - type: string + description: TemporaryData is the size limit of the temporary + data volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + externalClusters: + description: The list of external clusters which are used in the configuration + items: + description: |- + ExternalCluster represents the connection parameters to an + external cluster which is used in the other sections of the configuration + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2` or `snappy`. + enum: + - gzip + - bzip2 + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud + Storage JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing + the region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2` or `snappy`. + enum: + - gzip + - bzip2 + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + connectionParameters: + additionalProperties: + type: string + description: The list of connection parameters, such as dbname, + host, username, etc + type: object + name: + description: The server name, required + type: string + password: + description: |- + The reference to the password to be used to connect to the server. + If a password is provided, CloudNativePG creates a PostgreSQL + passfile at `/controller/external/NAME/pass` (where "NAME" is the + cluster's name). This passfile is automatically referenced in the + connection string when establishing a connection to the remote + PostgreSQL server from the current PostgreSQL `Cluster`. This ensures + secure and efficient password management for external clusters. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslCert: + description: |- + The reference to an SSL certificate to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslKey: + description: |- + The reference to an SSL private key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslRootCert: + description: |- + The reference to an SSL CA public key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - name + type: object + type: array + failoverDelay: + default: 0 + description: |- + The amount of time (in seconds) to wait before triggering a failover + after the primary PostgreSQL instance in the cluster was detected + to be unhealthy + format: int32 + type: integer + imageCatalogRef: + description: Defines the major PostgreSQL version we want to use within + an ImageCatalog + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + major: + description: The major version of PostgreSQL we want to use from + the ImageCatalog + type: integer + x-kubernetes-validations: + - message: Major is immutable + rule: self == oldSelf + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - major + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: Only image catalogs are supported + rule: self.kind == 'ImageCatalog' || self.kind == 'ClusterImageCatalog' + - message: Only image catalogs are supported + rule: self.apiGroup == 'postgresql.cnpg.io' + imageName: + description: |- + Name of the container image, supporting both tags (`:`) + and digests for deterministic and repeatable deployments + (`:@sha256:`) + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of `Always`, `Never` or `IfNotPresent`. + If not defined, it defaults to `IfNotPresent`. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + imagePullSecrets: + description: The list of pull secrets to be used to pull the images + items: + description: |- + LocalObjectReference contains enough information to let you locate a + local object with a known type inside the same namespace + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + type: array + inheritedMetadata: + description: Metadata that will be inherited by all objects related + to the Cluster + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + instances: + default: 1 + description: Number of instances required in the cluster + minimum: 1 + type: integer + livenessProbeTimeout: + description: |- + LivenessProbeTimeout is the time (in seconds) that is allowed for a PostgreSQL instance + to successfully respond to the liveness probe (default 30). + The Liveness probe failure threshold is derived from this value using the formula: + ceiling(livenessProbe / 10). + format: int32 + type: integer + logLevel: + default: info + description: 'The instances'' log level, one of the following values: + error, warning, info (default), debug, trace' + enum: + - error + - warning + - info + - debug + - trace + type: string + managed: + description: The configuration that is used by the portions of PostgreSQL + that are managed by the instance manager + properties: + roles: + description: Database roles managed by the `Cluster` + items: + description: |- + RoleConfiguration is the representation, in Kubernetes, of a PostgreSQL role + with the additional field Ensure specifying whether to ensure the presence or + absence of the role in the database + + The defaults of the CREATE ROLE command are applied + Reference: https://www.postgresql.org/docs/current/sql-createrole.html + properties: + bypassrls: + description: |- + Whether a role bypasses every row-level security (RLS) policy. + Default is `false`. + type: boolean + comment: + description: Description of the role + type: string + connectionLimit: + default: -1 + description: |- + If the role can log in, this specifies how many concurrent + connections the role can make. `-1` (the default) means no limit. + format: int64 + type: integer + createdb: + description: |- + When set to `true`, the role being defined will be allowed to create + new databases. Specifying `false` (default) will deny a role the + ability to create databases. + type: boolean + createrole: + description: |- + Whether the role will be permitted to create, alter, drop, comment + on, change the security label for, and grant or revoke membership in + other roles. Default is `false`. + type: boolean + disablePassword: + description: DisablePassword indicates that a role's password + should be set to NULL in Postgres + type: boolean + ensure: + default: present + description: Ensure the role is `present` or `absent` - + defaults to "present" + enum: + - present + - absent + type: string + inRoles: + description: |- + List of one or more existing roles to which this role will be + immediately added as a new member. Default empty. + items: + type: string + type: array + inherit: + default: true + description: |- + Whether a role "inherits" the privileges of roles it is a member of. + Defaults is `true`. + type: boolean + login: + description: |- + Whether the role is allowed to log in. A role having the `login` + attribute can be thought of as a user. Roles without this attribute + are useful for managing database privileges, but are not users in + the usual sense of the word. Default is `false`. + type: boolean + name: + description: Name of the role + type: string + passwordSecret: + description: |- + Secret containing the password of the role (if present) + If null, the password will be ignored unless DisablePassword is set + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + replication: + description: |- + Whether a role is a replication role. A role must have this + attribute (or be a superuser) in order to be able to connect to the + server in replication mode (physical or logical replication) and in + order to be able to create or drop replication slots. A role having + the `replication` attribute is a very highly privileged role, and + should only be used on roles actually used for replication. Default + is `false`. + type: boolean + superuser: + description: |- + Whether the role is a `superuser` who can override all access + restrictions within the database - superuser status is dangerous and + should be used only when really needed. You must yourself be a + superuser to create a new superuser. Defaults is `false`. + type: boolean + validUntil: + description: |- + Date and time after which the role's password is no longer valid. + When omitted, the password will never expire (default). + format: date-time + type: string + required: + - name + type: object + type: array + services: + description: Services roles managed by the `Cluster` + properties: + additional: + description: Additional is a list of additional managed services + specified by the user. + items: + description: |- + ManagedService represents a specific service managed by the cluster. + It includes the type of service and its associated template specification. + properties: + selectorType: + allOf: + - enum: + - rw + - r + - ro + - enum: + - rw + - r + - ro + description: |- + SelectorType specifies the type of selectors that the service will have. + Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services. + type: string + serviceTemplate: + description: ServiceTemplate is the template specification + for the service. + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only + supported for certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information + on service's port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed + by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains + the configurations of session affinity. + properties: + clientIP: + description: clientIP contains the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic is + distributed to Service endpoints. Implementations can use this field as a + hint, but are not required to guarantee strict adherence. If the field is + not set, the implementation will apply its default routing strategy. If set + to "PreferClose", implementations should prioritize endpoints that are + topologically close (e.g., same zone). + This is an alpha field and requires enabling ServiceTrafficDistribution feature. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + updateStrategy: + default: patch + description: UpdateStrategy describes how the service + differences should be reconciled + enum: + - patch + - replace + type: string + required: + - selectorType + - serviceTemplate + type: object + type: array + disabledDefaultServices: + description: |- + DisabledDefaultServices is a list of service types that are disabled by default. + Valid values are "r", and "ro", representing read, and read-only services. + items: + description: |- + ServiceSelectorType describes a valid value for generating the service selectors. + It indicates which type of service the selector applies to, such as read-write, read, or read-only + enum: + - rw + - r + - ro + type: string + type: array + type: object + type: object + maxSyncReplicas: + default: 0 + description: |- + The target value for the synchronous replication quorum, that can be + decreased if the number of ready standbys is lower than this. + Undefined or 0 disable synchronous replication. + minimum: 0 + type: integer + minSyncReplicas: + default: 0 + description: |- + Minimum number of instances required in synchronous replication with the + primary. Undefined or 0 allow writes to complete when no standby is + available. + minimum: 0 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this cluster + properties: + customQueriesConfigMap: + description: The list of config maps containing the custom queries + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + customQueriesSecret: + description: The list of secrets containing the custom queries + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + disableDefaultQueries: + default: false + description: |- + Whether the default queries should be injected. + Set it to `true` if you don't want to inject default queries into the cluster. + Default: false. + type: boolean + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + tls: + description: |- + Configure TLS communication for the metrics endpoint. + Changing tls.enabled option will force a rollout of all instances. + properties: + enabled: + default: false + description: |- + Enable TLS for the monitoring endpoint. + Changing this option will force a rollout of all instances. + type: boolean + type: object + type: object + nodeMaintenanceWindow: + description: Define a maintenance window for the Kubernetes nodes + properties: + inProgress: + default: false + description: Is there a node maintenance activity in progress? + type: boolean + reusePVC: + default: true + description: |- + Reuse the existing PVC (wait for the node to come + up again) or not (recreate it elsewhere - when `instances` >1) + type: boolean + type: object + plugins: + description: |- + The plugins configuration, containing + any plugin to be loaded with the corresponding configuration + items: + description: |- + PluginConfiguration specifies a plugin that need to be loaded for this + cluster to be reconciled + properties: + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + type: array + postgresGID: + default: 26 + description: The GID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresUID: + default: 26 + description: The UID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresql: + description: Configuration of the PostgreSQL server + properties: + enableAlterSystem: + description: |- + If this parameter is true, the user will be able to invoke `ALTER SYSTEM` + on this CloudNativePG Cluster. + This should only be used for debugging and troubleshooting. + Defaults to false. + type: boolean + ldap: + description: Options to specify LDAP configuration + properties: + bindAsAuth: + description: Bind as authentication configuration + properties: + prefix: + description: Prefix for the bind authentication option + type: string + suffix: + description: Suffix for the bind authentication option + type: string + type: object + bindSearchAuth: + description: Bind+Search authentication configuration + properties: + baseDN: + description: Root DN to begin the user search + type: string + bindDN: + description: DN of the user to bind to the directory + type: string + bindPassword: + description: Secret with the password for the user to + bind to the directory + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + searchAttribute: + description: Attribute to match against the username + type: string + searchFilter: + description: Search filter to use when doing the search+bind + authentication + type: string + type: object + port: + description: LDAP server port + type: integer + scheme: + description: LDAP schema to be used, possible options are + `ldap` and `ldaps` + enum: + - ldap + - ldaps + type: string + server: + description: LDAP hostname or IP address + type: string + tls: + description: Set to 'true' to enable LDAP over TLS. 'false' + is default + type: boolean + type: object + parameters: + additionalProperties: + type: string + description: PostgreSQL configuration options (postgresql.conf) + type: object + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + pg_ident: + description: |- + PostgreSQL User Name Maps rules (lines to be appended + to the pg_ident.conf file) + items: + type: string + type: array + promotionTimeout: + description: |- + Specifies the maximum number of seconds to wait when promoting an instance to primary. + Default value is 40000000, greater than one year in seconds, + big enough to simulate an infinite timeout + format: int32 + type: integer + shared_preload_libraries: + description: Lists of shared preload libraries to add to the default + ones + items: + type: string + type: array + syncReplicaElectionConstraint: + description: |- + Requirements to be met by sync replicas. This will affect how the "synchronous_standby_names" parameter will be + set up. + properties: + enabled: + description: This flag enables the constraints for sync replicas + type: boolean + nodeLabelsAntiAffinity: + description: A list of node labels values to extract and compare + to evaluate if the pods reside in the same topology or not + items: + type: string + type: array + required: + - enabled + type: object + synchronous: + description: Configuration of the PostgreSQL synchronous replication + feature + properties: + maxStandbyNamesFromCluster: + description: |- + Specifies the maximum number of local cluster pods that can be + automatically included in the `synchronous_standby_names` option in + PostgreSQL. + type: integer + method: + description: |- + Method to select synchronous replication standbys from the listed + servers, accepting 'any' (quorum-based synchronous replication) or + 'first' (priority-based synchronous replication) as values. + enum: + - any + - first + type: string + number: + description: |- + Specifies the number of synchronous standby servers that + transactions must wait for responses from. + type: integer + x-kubernetes-validations: + - message: The number of synchronous replicas should be greater + than zero + rule: self > 0 + standbyNamesPost: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` after local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + standbyNamesPre: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` before local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + required: + - method + - number + type: object + type: object + primaryUpdateMethod: + default: restart + description: |- + Method to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be with a switchover (`switchover`) or in-place (`restart` - default) + enum: + - switchover + - restart + type: string + primaryUpdateStrategy: + default: unsupervised + description: |- + Deployment strategy to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be automated (`unsupervised` - default) or manual (`supervised`) + enum: + - unsupervised + - supervised + type: string + priorityClassName: + description: |- + Name of the priority class which will be used in every generated Pod, if the PriorityClass + specified does not exist, the pod will not be able to schedule. Please refer to + https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass + for more information + type: string + projectedVolumeTemplate: + description: |- + Template to be used to define projected volumes, projected volumes will be mounted + under `/projected` base folder + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root to write + the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap data + to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the + pod: only annotations, labels, name, namespace + and uid are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must not + be absolute or contain the ''..'' path. Must + be utf-8 encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the secret data to + project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether the Secret + or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information about the + serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + replica: + description: Replica cluster configuration + properties: + enabled: + description: |- + If replica mode is enabled, this cluster will be a replica of an + existing cluster. Replica cluster can be created from a recovery + object store or via streaming through pg_basebackup. + Refer to the Replica clusters page of the documentation for more information. + type: boolean + minApplyDelay: + description: |- + When replica mode is enabled, this parameter allows you to replay + transactions only when the system time is at least the configured + time past the commit time. This provides an opportunity to correct + data loss errors. Note that when this parameter is set, a promotion + token cannot be used. + type: string + primary: + description: |- + Primary defines which Cluster is defined to be the primary in the distributed PostgreSQL cluster, based on the + topology specified in externalClusters + type: string + promotionToken: + description: |- + A demotion token generated by an external cluster used to + check if the promotion requirements are met. + type: string + self: + description: |- + Self defines the name of this cluster. It is used to determine if this is a primary + or a replica cluster, comparing it with `primary` + type: string + source: + description: The name of the external cluster which is the replication + origin + minLength: 1 + type: string + required: + - source + type: object + replicationSlots: + default: + highAvailability: + enabled: true + description: Replication slots management configuration + properties: + highAvailability: + default: + enabled: true + description: Replication slots for high availability configuration + properties: + enabled: + default: true + description: |- + If enabled (default), the operator will automatically manage replication slots + on the primary instance and use them in streaming replication + connections with all the standby instances that are part of the HA + cluster. If disabled, the operator will not take advantage + of replication slots in streaming connections with the replicas. + This feature also controls replication slots in replica cluster, + from the designated primary to its cascading replicas. + type: boolean + slotPrefix: + default: _cnpg_ + description: |- + Prefix for replication slots managed by the operator for HA. + It may only contain lower case letters, numbers, and the underscore character. + This can only be set at creation time. By default set to `_cnpg_`. + pattern: ^[0-9a-z_]*$ + type: string + type: object + synchronizeReplicas: + description: Configures the synchronization of the user defined + physical replication slots + properties: + enabled: + default: true + description: When set to true, every replication slot that + is on the primary is synchronized on each standby + type: boolean + excludePatterns: + description: List of regular expression patterns to match + the names of replication slots to be excluded (by default + empty) + items: + type: string + type: array + required: + - enabled + type: object + updateInterval: + default: 30 + description: |- + Standby will update the status of the local replication slots + every `updateInterval` seconds (default 30). + minimum: 1 + type: integer + type: object + resources: + description: |- + Resources requirements of every generated Pod. Please refer to + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + for more information. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + schedulerName: + description: |- + If specified, the pod will be dispatched by specified Kubernetes + scheduler. If not specified, the pod will be dispatched by the default + scheduler. More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/ + type: string + seccompProfile: + description: |- + The SeccompProfile applied to every Pod and Container. + Defaults to: `RuntimeDefault` + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + serviceAccountTemplate: + description: Configure the generation of the service account + properties: + metadata: + description: |- + Metadata are the metadata to be used for the generated + service account + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + required: + - metadata + type: object + smartShutdownTimeout: + default: 180 + description: |- + The time in seconds that controls the window of time reserved for the smart shutdown of Postgres to complete. + Make sure you reserve enough time for the operator to request a fast shutdown of Postgres + (that is: `stopDelay` - `smartShutdownTimeout`). + format: int32 + type: integer + startDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + successfully start up (default 3600). + The startup probe failure threshold is derived from this value using the formula: + ceiling(startDelay / 10). + format: int32 + type: integer + stopDelay: + default: 1800 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + gracefully shutdown (default 1800) + format: int32 + type: integer + storage: + description: Configuration of the storage of the instances + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + superuserSecret: + description: |- + The secret containing the superuser password. If not defined a new + secret will be created with a randomly generated password + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + switchoverDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a primary PostgreSQL instance + to gracefully shutdown during a switchover. + Default value is 3600 seconds (1 hour). + format: int32 + type: integer + tablespaces: + description: The tablespaces configuration + items: + description: |- + TablespaceConfiguration is the configuration of a tablespace, and includes + the storage specification for the tablespace + properties: + name: + description: The name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + properties: + name: + type: string + type: object + storage: + description: The storage configuration for the tablespace + properties: + pvcTemplate: + description: Template to be used to generate the Persistent + Volume Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to + the PersistentVolume backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + temporary: + default: false + description: |- + When set to true, the tablespace will be added as a `temp_tablespaces` + entry in PostgreSQL, and will be available to automatically house temp + database objects, or other temporary files. Please refer to PostgreSQL + documentation for more information on the `temp_tablespaces` GUC. + type: boolean + required: + - name + - storage + type: object + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints specifies how to spread matching pods among the given topology. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ + items: + description: TopologySpreadConstraint specifies how to spread matching + pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + walStorage: + description: Configuration of the storage for PostgreSQL WAL (Write-Ahead + Log) + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + required: + - instances + type: object + x-kubernetes-validations: + - message: imageName and imageCatalogRef are mutually exclusive + rule: '!(has(self.imageCatalogRef) && has(self.imageName))' + status: + description: |- + Most recently observed status of the cluster. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + availableArchitectures: + description: AvailableArchitectures reports the available architectures + of a cluster + items: + description: AvailableArchitecture represents the state of a cluster's + architecture + properties: + goArch: + description: GoArch is the name of the executable architecture + type: string + hash: + description: Hash is the hash of the executable + type: string + required: + - goArch + - hash + type: object + type: array + azurePVCUpdateEnabled: + description: AzurePVCUpdateEnabled shows if the PVC online upgrade + is enabled for this cluster + type: boolean + certificates: + description: The configuration for the CA and related certificates, + initialized with defaults. + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + expirations: + additionalProperties: + type: string + description: Expiration dates for all certificates. + type: object + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + cloudNativePGCommitHash: + description: The commit hash number of which this operator running + type: string + cloudNativePGOperatorHash: + description: The hash of the binary of the operator + type: string + conditions: + description: Conditions for cluster object + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + configMapResourceVersion: + description: |- + The list of resource versions of the configmaps, + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + configmap data + properties: + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the config maps used to pass metrics. + Map keys are the config map names, map values are the versions + type: object + type: object + currentPrimary: + description: Current primary instance + type: string + currentPrimaryFailingSinceTimestamp: + description: |- + The timestamp when the primary was detected to be unhealthy + This field is reported when `.spec.failoverDelay` is populated or during online upgrades + type: string + currentPrimaryTimestamp: + description: The timestamp when the last actual promotion to primary + has occurred + type: string + danglingPVC: + description: |- + List of all the PVCs created by this cluster and still available + which are not attached to a Pod + items: + type: string + type: array + demotionToken: + description: |- + DemotionToken is a JSON token containing the information + from pg_controldata such as Database system identifier, Latest checkpoint's + TimeLineID, Latest checkpoint's REDO location, Latest checkpoint's REDO + WAL file, and Time of latest checkpoint + type: string + firstRecoverabilityPoint: + description: |- + The first recoverability point, stored as a date in RFC3339 format. + This field is calculated from the content of FirstRecoverabilityPointByMethod + type: string + firstRecoverabilityPointByMethod: + additionalProperties: + format: date-time + type: string + description: The first recoverability point, stored as a date in RFC3339 + format, per backup method type + type: object + healthyPVC: + description: List of all the PVCs not dangling nor initializing + items: + type: string + type: array + image: + description: Image contains the image name used by the pods + type: string + initializingPVC: + description: List of all the PVCs that are being initialized by this + cluster + items: + type: string + type: array + instanceNames: + description: List of instance names in the cluster + items: + type: string + type: array + instances: + description: The total number of PVC Groups detected in the cluster. + It may differ from the number of existing instance pods. + type: integer + instancesReportedState: + additionalProperties: + description: InstanceReportedState describes the last reported state + of an instance during a reconciliation loop + properties: + isPrimary: + description: indicates if an instance is the primary one + type: boolean + timeLineID: + description: indicates on which TimelineId the instance is + type: integer + required: + - isPrimary + type: object + description: The reported state of the instances during the last reconciliation + loop + type: object + instancesStatus: + additionalProperties: + items: + type: string + type: array + description: InstancesStatus indicates in which status the instances + are + type: object + jobCount: + description: How many Jobs have been created by this cluster + format: int32 + type: integer + lastFailedBackup: + description: Stored as a date in RFC3339 format + type: string + lastPromotionToken: + description: |- + LastPromotionToken is the last verified promotion token that + was used to promote a replica cluster + type: string + lastSuccessfulBackup: + description: |- + Last successful backup, stored as a date in RFC3339 format + This field is calculated from the content of LastSuccessfulBackupByMethod + type: string + lastSuccessfulBackupByMethod: + additionalProperties: + format: date-time + type: string + description: Last successful backup, stored as a date in RFC3339 format, + per backup method type + type: object + latestGeneratedNode: + description: ID of the latest generated node (used to avoid node name + clashing) + type: integer + managedRolesStatus: + description: ManagedRolesStatus reports the state of the managed roles + in the cluster + properties: + byStatus: + additionalProperties: + items: + type: string + type: array + description: ByStatus gives the list of roles in each state + type: object + cannotReconcile: + additionalProperties: + items: + type: string + type: array + description: |- + CannotReconcile lists roles that cannot be reconciled in PostgreSQL, + with an explanation of the cause + type: object + passwordStatus: + additionalProperties: + description: PasswordState represents the state of the password + of a managed RoleConfiguration + properties: + resourceVersion: + description: the resource version of the password secret + type: string + transactionID: + description: the last transaction ID to affect the role + definition in PostgreSQL + format: int64 + type: integer + type: object + description: PasswordStatus gives the last transaction id and + password secret version for each managed role + type: object + type: object + onlineUpdateEnabled: + description: OnlineUpdateEnabled shows if the online upgrade is enabled + inside the cluster + type: boolean + phase: + description: Current phase of the cluster + type: string + phaseReason: + description: Reason for the current phase + type: string + pluginStatus: + description: PluginStatus is the status of the loaded plugins + items: + description: PluginStatus is the status of a loaded plugin + properties: + backupCapabilities: + description: |- + BackupCapabilities are the list of capabilities of the + plugin regarding the Backup management + items: + type: string + type: array + capabilities: + description: |- + Capabilities are the list of capabilities of the + plugin + items: + type: string + type: array + name: + description: Name is the name of the plugin + type: string + operatorCapabilities: + description: |- + OperatorCapabilities are the list of capabilities of the + plugin regarding the reconciler + items: + type: string + type: array + status: + description: Status contain the status reported by the plugin + through the SetStatusInCluster interface + type: string + version: + description: |- + Version is the version of the plugin loaded by the + latest reconciliation loop + type: string + walCapabilities: + description: |- + WALCapabilities are the list of capabilities of the + plugin regarding the WAL management + items: + type: string + type: array + required: + - name + - version + type: object + type: array + poolerIntegrations: + description: The integration needed by poolers referencing the cluster + properties: + pgBouncerIntegration: + description: PgBouncerIntegrationStatus encapsulates the needed + integration for the pgbouncer poolers referencing the cluster + properties: + secrets: + items: + type: string + type: array + type: object + type: object + pvcCount: + description: How many PVCs have been created by this cluster + format: int32 + type: integer + readService: + description: Current list of read pods + type: string + readyInstances: + description: The total number of ready instances in the cluster. It + is equal to the number of ready instance pods. + type: integer + resizingPVC: + description: List of all the PVCs that have ResizingPVC condition. + items: + type: string + type: array + secretsResourceVersion: + description: |- + The list of resource versions of the secrets + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + secret data + properties: + applicationSecretVersion: + description: The resource version of the "app" user secret + type: string + barmanEndpointCA: + description: The resource version of the Barman Endpoint CA if + provided + type: string + caSecretVersion: + description: Unused. Retained for compatibility with old versions. + type: string + clientCaSecretVersion: + description: The resource version of the PostgreSQL client-side + CA secret version + type: string + externalClusterSecretVersion: + additionalProperties: + type: string + description: The resource versions of the external cluster secrets + type: object + managedRoleSecretVersion: + additionalProperties: + type: string + description: The resource versions of the managed roles secrets + type: object + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the secrets used to pass metrics. + Map keys are the secret names, map values are the versions + type: object + replicationSecretVersion: + description: The resource version of the "streaming_replica" user + secret + type: string + serverCaSecretVersion: + description: The resource version of the PostgreSQL server-side + CA secret version + type: string + serverSecretVersion: + description: The resource version of the PostgreSQL server-side + secret version + type: string + superuserSecretVersion: + description: The resource version of the "postgres" user secret + type: string + type: object + switchReplicaClusterStatus: + description: SwitchReplicaClusterStatus is the status of the switch + to replica cluster + properties: + inProgress: + description: InProgress indicates if there is an ongoing procedure + of switching a cluster to a replica cluster. + type: boolean + type: object + tablespacesStatus: + description: TablespacesStatus reports the state of the declarative + tablespaces in the cluster + items: + description: TablespaceState represents the state of a tablespace + in a cluster + properties: + error: + description: Error is the reconciliation error, if any + type: string + name: + description: Name is the name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + type: string + state: + description: State is the latest reconciliation state + type: string + required: + - name + - state + type: object + type: array + targetPrimary: + description: |- + Target primary instance, this is different from the previous one + during a switchover or a failover + type: string + targetPrimaryTimestamp: + description: The timestamp when the last request for a new primary + has occurred + type: string + timelineID: + description: The timeline of the Postgres cluster + type: integer + topology: + description: Instances topology. + properties: + instances: + additionalProperties: + additionalProperties: + type: string + description: PodTopologyLabels represent the topology of a Pod. + map[labelName]labelValue + type: object + description: Instances contains the pod topology of the instances + type: object + nodesUsed: + description: |- + NodesUsed represents the count of distinct nodes accommodating the instances. + A value of '1' suggests that all instances are hosted on a single node, + implying the absence of High Availability (HA). Ideally, this value should + be the same as the number of instances in the Postgres HA cluster, implying + shared nothing architecture on the compute side. + format: int32 + type: integer + successfullyExtracted: + description: |- + SuccessfullyExtracted indicates if the topology data was extract. It is useful to enact fallback behaviors + in synchronous replica election in case of failures + type: boolean + type: object + unusablePVC: + description: List of all the PVCs that are unusable because another + PVC is missing + items: + type: string + type: array + writeService: + description: Current write pod + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.4 + name: imagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ImageCatalog + listKind: ImageCatalogList + plural: imagecatalogs + singular: imagecatalog + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ImageCatalog is the Schema for the imagecatalogs API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.4 + name: poolers.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Pooler + listKind: PoolerList + plural: poolers + singular: pooler + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.type + name: Type + type: string + name: v1 + schema: + openAPIV3Schema: + description: Pooler is the Schema for the poolers API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the Pooler. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: |- + This is the cluster reference on which the Pooler will work. + Pooler name should never match with any cluster name within the same namespace. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + deploymentStrategy: + description: The deployment strategy to use for pgbouncer to replace + existing pods with new ones + properties: + rollingUpdate: + description: |- + Rolling update config params. Present only if DeploymentStrategyType = + RollingUpdate. + properties: + maxSurge: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be scheduled above the desired number of + pods. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + This can not be 0 if MaxUnavailable is 0. + Absolute number is calculated from percentage by rounding up. + Defaults to 25%. + Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when + the rolling update starts, such that the total number of old and new pods do not exceed + 130% of desired pods. Once old pods have been killed, + new ReplicaSet can be scaled up further, ensuring that total number of pods running + at any time during the update is at most 130% of desired pods. + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be unavailable during the update. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + Absolute number is calculated from percentage by rounding down. + This can not be 0 if MaxSurge is 0. + Defaults to 25%. + Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods + immediately when the rolling update starts. Once new pods are ready, old ReplicaSet + can be scaled down further, followed by scaling up the new ReplicaSet, ensuring + that the total number of pods available at all times during the update is at + least 70% of desired pods. + x-kubernetes-int-or-string: true + type: object + type: + description: Type of deployment. Can be "Recreate" or "RollingUpdate". + Default is RollingUpdate. + type: string + type: object + instances: + default: 1 + description: 'The number of replicas we want. Default: 1.' + format: int32 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this pooler. + properties: + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + type: object + pgbouncer: + description: The PgBouncer configuration + properties: + authQuery: + description: |- + The query that will be used to download the hash of the password + of a certain user. Default: "SELECT usename, passwd FROM public.user_search($1)". + In case it is specified, also an AuthQuerySecret has to be specified and + no automatic CNPG Cluster integration will be triggered. + type: string + authQuerySecret: + description: |- + The credentials of the user that need to be used for the authentication + query. In case it is specified, also an AuthQuery + (e.g. "SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1") + has to be specified and no automatic CNPG Cluster integration will be triggered. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + parameters: + additionalProperties: + type: string + description: |- + Additional parameters to be passed to PgBouncer - please check + the CNPG documentation for a list of options you can configure + type: object + paused: + default: false + description: |- + When set to `true`, PgBouncer will disconnect from the PostgreSQL + server, first waiting for all queries to complete, and pause all new + client connections until this value is set to `false` (default). Internally, + the operator calls PgBouncer's `PAUSE` and `RESUME` commands. + type: boolean + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + poolMode: + default: session + description: 'The pool mode. Default: `session`.' + enum: + - session + - transaction + type: string + type: object + serviceTemplate: + description: Template for the Service to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information on service's + port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the configurations + of session affinity. + properties: + clientIP: + description: clientIP contains the configurations of Client + IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic is + distributed to Service endpoints. Implementations can use this field as a + hint, but are not required to guarantee strict adherence. If the field is + not set, the implementation will apply its default routing strategy. If set + to "PreferClose", implementations should prioritize endpoints that are + topologically close (e.g., same zone). + This is an alpha field and requires enabling ServiceTrafficDistribution feature. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + template: + description: The template of the Pod to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the pod. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + activeDeadlineSeconds: + description: |- + Optional duration in seconds the pod may be active on the node relative to + StartTime before the system will actively try to mark it failed and kill associated containers. + Value must be a positive integer. + format: int64 + type: integer + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + automountServiceAccountToken: + description: AutomountServiceAccountToken indicates whether + a service account token should be automatically mounted. + type: boolean + containers: + description: |- + List of containers belonging to the pod. + Containers cannot currently be added or removed. + There must be at least one container in a Pod. + Cannot be updated. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + dnsConfig: + description: |- + Specifies the DNS parameters of a pod. + Parameters specified here will be merged to the generated DNS + configuration based on DNSPolicy. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver + options of a pod. + properties: + name: + description: Required. + type: string + value: + type: string + type: object + type: array + x-kubernetes-list-type: atomic + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + dnsPolicy: + description: |- + Set DNS policy for the pod. + Defaults to "ClusterFirst". + Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + To have DNS options set along with hostNetwork, you have to specify DNS policy + explicitly to 'ClusterFirstWithHostNet'. + type: string + enableServiceLinks: + description: |- + EnableServiceLinks indicates whether information about services should be injected into pod's + environment variables, matching the syntax of Docker links. + Optional: Defaults to true. + type: boolean + ephemeralContainers: + description: |- + List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + pod to perform user-initiated actions such as debugging. This list cannot be specified when + creating a pod, and it cannot be modified by updating the pod spec. In order to add an + ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + items: + description: |- + An EphemeralContainer is a temporary container that you may add to an existing Pod for + user-initiated activities such as debugging. Ephemeral containers have no resource or + scheduling guarantees, and they will not be restarted when they exit or when a Pod is + removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the + Pod to exceed its resource allocation. + + To add an ephemeral container, use the ephemeralcontainers subresource of an existing + Pod. Ephemeral containers may not be removed or restarted. + properties: + args: + description: |- + Arguments to the entrypoint. + The image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: Lifecycle is not allowed for ephemeral + containers. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the ephemeral container specified as a DNS_LABEL. + This name must be unique among all containers, init containers and ephemeral containers. + type: string + ports: + description: Ports are not allowed for ephemeral containers. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + already allocated to the pod. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for the container to manage the restart behavior of each + container within a pod. + This may only be set for init containers. You cannot set this field on + ephemeral containers. + type: string + securityContext: + description: |- + Optional: SecurityContext defines the security options the ephemeral container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + targetContainerName: + description: |- + If set, the name of the container from PodSpec that this ephemeral container targets. + The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + If not set then the ephemeral container uses the namespaces configured in the Pod spec. + + The container runtime must implement support for this feature. If the runtime does not + support namespace targeting then the result of setting this field is undefined. + type: string + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + hostAliases: + description: |- + HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + file if specified. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + description: IP address of the host file entry. + type: string + required: + - ip + type: object + type: array + x-kubernetes-list-map-keys: + - ip + x-kubernetes-list-type: map + hostIPC: + description: |- + Use the host's ipc namespace. + Optional: Default to false. + type: boolean + hostNetwork: + description: |- + Host networking requested for this pod. Use the host's network namespace. + If this option is set, the ports that will be used must be specified. + Default to false. + type: boolean + hostPID: + description: |- + Use the host's pid namespace. + Optional: Default to false. + type: boolean + hostUsers: + description: |- + Use the host's user namespace. + Optional: Default to true. + If set to true or not present, the pod will be run in the host user namespace, useful + for when the pod needs a feature only available to the host user namespace, such as + loading a kernel module with CAP_SYS_MODULE. + When set to false, a new userns is created for the pod. Setting false is useful for + mitigating container breakout vulnerabilities even allowing users to run their + containers as root without actually having root privileges on the host. + This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. + type: boolean + hostname: + description: |- + Specifies the hostname of the Pod + If not specified, the pod's hostname will be set to a system-defined value. + type: string + imagePullSecrets: + description: |- + ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + If specified, these secrets will be passed to individual puller implementations for them to use. + More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + initContainers: + description: |- + List of initialization containers belonging to the pod. + Init containers are executed in order prior to containers being started. If any + init container fails, the pod is considered to have failed and is handled according + to its restartPolicy. The name for an init container or normal container must be + unique among all containers. + Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. + The resourceRequirements of an init container are taken into account during scheduling + by finding the highest request/limit for each resource type, and then using the max of + of that value or the sum of the normal containers. Limits are applied to init containers + in a similar fashion. + Init containers cannot currently be added or removed. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + nodeName: + description: |- + NodeName indicates in which node this pod is scheduled. + If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. + Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. + This field should not be used to express a desire for the pod to be scheduled on a specific node. + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the pod to fit on a node. + Selector which must match a node's labels for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + x-kubernetes-map-type: atomic + os: + description: |- + Specifies the OS of the containers in the pod. + Some pod and container fields are restricted if this is set. + + If the OS field is set to linux, the following fields must be unset: + -securityContext.windowsOptions + + If the OS field is set to windows, following fields must be unset: + - spec.hostPID + - spec.hostIPC + - spec.hostUsers + - spec.securityContext.appArmorProfile + - spec.securityContext.seLinuxOptions + - spec.securityContext.seccompProfile + - spec.securityContext.fsGroup + - spec.securityContext.fsGroupChangePolicy + - spec.securityContext.sysctls + - spec.shareProcessNamespace + - spec.securityContext.runAsUser + - spec.securityContext.runAsGroup + - spec.securityContext.supplementalGroups + - spec.securityContext.supplementalGroupsPolicy + - spec.containers[*].securityContext.appArmorProfile + - spec.containers[*].securityContext.seLinuxOptions + - spec.containers[*].securityContext.seccompProfile + - spec.containers[*].securityContext.capabilities + - spec.containers[*].securityContext.readOnlyRootFilesystem + - spec.containers[*].securityContext.privileged + - spec.containers[*].securityContext.allowPrivilegeEscalation + - spec.containers[*].securityContext.procMount + - spec.containers[*].securityContext.runAsUser + - spec.containers[*].securityContext.runAsGroup + properties: + name: + description: |- + Name is the name of the operating system. The currently supported values are linux and windows. + Additional value may be defined in future and can be one of: + https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + Clients should expect to handle additional values and treat unrecognized values in this field as os: null + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + This field will be autopopulated at admission time by the RuntimeClass admission controller. If + the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + The RuntimeClass admission controller will reject Pod create requests which have the overhead already + set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md + type: object + preemptionPolicy: + description: |- + PreemptionPolicy is the Policy for preempting pods with lower priority. + One of Never, PreemptLowerPriority. + Defaults to PreemptLowerPriority if unset. + type: string + priority: + description: |- + The priority value. Various system components use this field to find the + priority of the pod. When Priority Admission Controller is enabled, it + prevents users from setting this field. The admission controller populates + this field from PriorityClassName. + The higher the value, the higher the priority. + format: int32 + type: integer + priorityClassName: + description: |- + If specified, indicates the pod's priority. "system-node-critical" and + "system-cluster-critical" are two special keywords which indicate the + highest priorities with the former being the highest priority. Any other + name must be defined by creating a PriorityClass object with that name. + If not specified, the pod priority will be default or zero if there is no + default. + type: string + readinessGates: + description: |- + If specified, all readiness gates will be evaluated for pod readiness. + A pod is ready when all its containers are ready AND + all conditions specified in the readiness gates have status equal to "True" + More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates + items: + description: PodReadinessGate contains the reference to + a pod condition + properties: + conditionType: + description: ConditionType refers to a condition in + the pod's condition list with matching type. + type: string + required: + - conditionType + type: object + type: array + x-kubernetes-list-type: atomic + resourceClaims: + description: |- + ResourceClaims defines which ResourceClaims must be allocated + and reserved before the Pod is allowed to start. The resources + will be made available to those containers which consume them + by name. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. + items: + description: |- + PodResourceClaim references exactly one ResourceClaim, either directly + or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim + for the pod. + + It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. + Containers that need access to the ResourceClaim reference it with this name. + properties: + name: + description: |- + Name uniquely identifies this resource claim inside the pod. + This must be a DNS_LABEL. + type: string + resourceClaimName: + description: |- + ResourceClaimName is the name of a ResourceClaim object in the same + namespace as this pod. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + resourceClaimTemplateName: + description: |- + ResourceClaimTemplateName is the name of a ResourceClaimTemplate + object in the same namespace as this pod. + + The template will be used to create a new ResourceClaim, which will + be bound to this pod. When this pod is deleted, the ResourceClaim + will also be deleted. The pod name and resource name, along with a + generated component, will be used to form a unique name for the + ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + + This field is immutable and no changes will be made to the + corresponding ResourceClaim by the control plane after creating the + ResourceClaim. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + restartPolicy: + description: |- + Restart policy for all containers within the pod. + One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. + Default to Always. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy + type: string + runtimeClassName: + description: |- + RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + empty definition that uses the default runtime handler. + More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + type: string + schedulerName: + description: |- + If specified, the pod will be dispatched by specified scheduler. + If not specified, the pod will be dispatched by default scheduler. + type: string + schedulingGates: + description: |- + SchedulingGates is an opaque list of values that if specified will block scheduling the pod. + If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + scheduler will not attempt to schedule the pod. + + SchedulingGates can only be set at pod creation time, and be removed only afterwards. + items: + description: PodSchedulingGate is associated to a Pod to + guard its scheduling. + properties: + name: + description: |- + Name of the scheduling gate. + Each scheduling gate must have a unique name field. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + securityContext: + description: |- + SecurityContext holds pod-level security attributes and common container settings. + Optional: Defaults to empty. See type description for default values of each field. + properties: + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in + addition to the container's primary GID and fsGroup (if specified). If + the SupplementalGroupsPolicy feature is enabled, the + supplementalGroupsPolicy field determines whether these are in addition + to or instead of any group memberships defined in the container image. + If unspecified, no additional groups are added, though group memberships + defined in the container image may still be used, depending on the + supplementalGroupsPolicy field. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + description: |- + Defines how supplemental groups of the first container processes are calculated. + Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + and the container runtime must implement support for this feature. + Note that this field cannot be set when spec.os.name is windows. + type: string + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be + set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: |- + DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. + Deprecated: Use serviceAccountName instead. + type: string + serviceAccountName: + description: |- + ServiceAccountName is the name of the ServiceAccount to use to run this pod. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + type: string + setHostnameAsFQDN: + description: |- + If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). + In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). + In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. + If a pod does not have FQDN, this has no effect. + Default to false. + type: boolean + shareProcessNamespace: + description: |- + Share a single process namespace between all of the containers in a pod. + When this is set containers will be able to view and signal processes from other containers + in the same pod, and the first process in each container will not be assigned PID 1. + HostPID and ShareProcessNamespace cannot both be set. + Optional: Default to false. + type: boolean + subdomain: + description: |- + If specified, the fully qualified Pod hostname will be "...svc.". + If not specified, the pod will not have a domainname at all. + type: string + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + If this value is nil, the default grace period will be used instead. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: If specified, the pod's tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of pods ought to spread across topology + domains. Scheduler will schedule pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + description: |- + List of volumes that can be mounted by containers belonging to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk + mount on the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk + in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in + the blob storage + type: string + fsType: + default: ext4 + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage account Managed: azure + managed data disk (only in managed availability + set). defaults to shared' + type: string + readOnly: + default: false + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service + mount on the host and bind mount to the pod. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the + host that shares a pod's lifetime + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default + is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers (Beta feature). + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about + the pod that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing the + pod field + properties: + fieldRef: + description: 'Required: Selects a field of + the pod: only annotations, labels, name, + namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path. + Must be utf-8 encoded. The first item of + the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource + that is attached to a kubelet's host machine and then + exposed to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target + worldwide names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to + use for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached + to a kubelet's host machine. This depends on the Flocker + control service being running + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the + specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + image: + description: |- + image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + The volume is resolved at pod startup depending on which PullPolicy value is provided: + + - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + + The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + The volume will be mounted read-only (ro) and non-executable files (noexec). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + default: default + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host + machine + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon + Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume + attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources + secrets, configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume + root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the + configMap data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about + the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name, namespace and uid + are supported.' + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file + to be created. Must not be absolute + or contain the ''..'' path. Must + be utf-8 encoded. The first item + of the relative path must not + start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the + secret data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: quobyte represents a Quobyte mount on the + host that shares a pod's lifetime + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + default: /etc/ceph/keyring + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent + volume attached and mounted on Kubernetes nodes. + properties: + fsType: + default: xfs + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the + ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the + ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL + communication with Gateway, default false + type: boolean + storageMode: + default: ThinProvisioned + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage + Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether the + Secret or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume + attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy + Based Management (SPBM) profile ID associated + with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + required: + - containers + type: object + type: object + type: + default: rw + description: 'Type of service to forward traffic to. Default: `rw`.' + enum: + - rw + - ro + type: string + required: + - cluster + - pgbouncer + type: object + status: + description: |- + Most recently observed status of the Pooler. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + instances: + description: The number of pods trying to be scheduled + format: int32 + type: integer + secrets: + description: The resource version of the config object + properties: + clientCA: + description: The client CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + pgBouncerSecrets: + description: The version of the secrets used by PgBouncer + properties: + authQuery: + description: The auth query secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + serverCA: + description: The server CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + serverTLS: + description: The server TLS secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.4 + name: scheduledbackups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ScheduledBackup + listKind: ScheduledBackupList + plural: scheduledbackups + singular: scheduledbackup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .status.lastScheduleTime + name: Last Backup + type: date + name: v1 + schema: + openAPIV3Schema: + description: ScheduledBackup is the Schema for the scheduledbackups API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ScheduledBackup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + backupOwnerReference: + default: none + description: |- + Indicates which ownerReference should be put inside the created backup resources.
+ - none: no owner reference for created backup objects (same behavior as before the field was introduced)
+ - self: sets the Scheduled backup object as owner of the backup
+ - cluster: set the cluster as owner of the backup
+ enum: + - none + - self + - cluster + type: string + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + immediate: + description: If the first backup has to be immediately start after + creation or not + type: boolean + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + schedule: + description: |- + The schedule does not follow the same format used in Kubernetes CronJobs + as it includes an additional seconds specifier, + see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format + type: string + suspend: + description: If this backup is suspended or not + type: boolean + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + - schedule + type: object + status: + description: |- + Most recently observed status of the ScheduledBackup. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + lastCheckTime: + description: The latest time the schedule + format: date-time + type: string + lastScheduleTime: + description: Information when was the last time that backup was successfully + scheduled. + format: date-time + type: string + nextScheduleTime: + description: Next time we will run a backup + format: date-time + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cnpg-manager +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps/status + - secrets/status + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - pods + - pods/exec + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - pods/status + verbs: + - get +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - get + - patch +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +- apiGroups: + - monitoring.coreos.com + resources: + - podmonitors + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups + - clusters + - poolers + - scheduledbackups + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups/status + - scheduledbackups/status + verbs: + - get + - patch + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusterimagecatalogs + - imagecatalogs + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/finalizers + - poolers/finalizers + verbs: + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/status + - poolers/status + verbs: + - get + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - create + - get + - list + - patch + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cnpg-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cnpg-manager +subjects: +- kind: ServiceAccount + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: v1 +data: + queries: | + backends: + query: | + SELECT sa.datname + , sa.usename + , sa.application_name + , states.state + , COALESCE(sa.count, 0) AS total + , COALESCE(sa.max_tx_secs, 0) AS max_tx_duration_seconds + FROM ( VALUES ('active') + , ('idle') + , ('idle in transaction') + , ('idle in transaction (aborted)') + , ('fastpath function call') + , ('disabled') + ) AS states(state) + LEFT JOIN ( + SELECT datname + , state + , usename + , COALESCE(application_name, '') AS application_name + , COUNT(*) + , COALESCE(EXTRACT (EPOCH FROM (max(now() - xact_start))), 0) AS max_tx_secs + FROM pg_catalog.pg_stat_activity + GROUP BY datname, state, usename, application_name + ) sa ON states.state = sa.state + WHERE sa.usename IS NOT NULL + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - usename: + usage: "LABEL" + description: "Name of the user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - state: + usage: "LABEL" + description: "State of the backend" + - total: + usage: "GAUGE" + description: "Number of backends" + - max_tx_duration_seconds: + usage: "GAUGE" + description: "Maximum duration of a transaction in seconds" + + backends_waiting: + query: | + SELECT count(*) AS total + FROM pg_catalog.pg_locks blocked_locks + JOIN pg_catalog.pg_locks blocking_locks + ON blocking_locks.locktype = blocked_locks.locktype + AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database + AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation + AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page + AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple + AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid + AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid + AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid + AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid + AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid + AND blocking_locks.pid != blocked_locks.pid + JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid + WHERE NOT blocked_locks.granted + metrics: + - total: + usage: "GAUGE" + description: "Total number of backends that are currently waiting on other queries" + + pg_database: + query: | + SELECT datname + , pg_catalog.pg_database_size(datname) AS size_bytes + , pg_catalog.age(datfrozenxid) AS xid_age + , pg_catalog.mxid_age(datminmxid) AS mxid_age + FROM pg_catalog.pg_database + WHERE datallowconn + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - size_bytes: + usage: "GAUGE" + description: "Disk space used by the database" + - xid_age: + usage: "GAUGE" + description: "Number of transactions from the frozen XID to the current one" + - mxid_age: + usage: "GAUGE" + description: "Number of multiple transactions (Multixact) from the frozen XID to the current one" + + pg_postmaster: + query: | + SELECT EXTRACT(EPOCH FROM pg_postmaster_start_time) AS start_time + FROM pg_catalog.pg_postmaster_start_time() + metrics: + - start_time: + usage: "GAUGE" + description: "Time at which postgres started (based on epoch)" + + pg_replication: + query: "SELECT CASE WHEN ( + NOT pg_catalog.pg_is_in_recovery() + OR pg_catalog.pg_last_wal_receive_lsn() = pg_catalog.pg_last_wal_replay_lsn()) + THEN 0 + ELSE GREATEST (0, + EXTRACT(EPOCH FROM (now() - pg_catalog.pg_last_xact_replay_timestamp()))) + END AS lag, + pg_catalog.pg_is_in_recovery() AS in_recovery, + EXISTS (TABLE pg_stat_wal_receiver) AS is_wal_receiver_up, + (SELECT count(*) FROM pg_catalog.pg_stat_replication) AS streaming_replicas" + metrics: + - lag: + usage: "GAUGE" + description: "Replication lag behind primary in seconds" + - in_recovery: + usage: "GAUGE" + description: "Whether the instance is in recovery" + - is_wal_receiver_up: + usage: "GAUGE" + description: "Whether the instance wal_receiver is up" + - streaming_replicas: + usage: "GAUGE" + description: "Number of streaming replicas connected to the instance" + + pg_replication_slots: + query: | + SELECT slot_name, + slot_type, + database, + active, + (CASE pg_catalog.pg_is_in_recovery() + WHEN TRUE THEN pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_last_wal_receive_lsn(), restart_lsn) + ELSE pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), restart_lsn) + END) as pg_wal_lsn_diff + FROM pg_catalog.pg_replication_slots + WHERE NOT temporary + metrics: + - slot_name: + usage: "LABEL" + description: "Name of the replication slot" + - slot_type: + usage: "LABEL" + description: "Type of the replication slot" + - database: + usage: "LABEL" + description: "Name of the database" + - active: + usage: "GAUGE" + description: "Flag indicating whether the slot is active" + - pg_wal_lsn_diff: + usage: "GAUGE" + description: "Replication lag in bytes" + + pg_stat_archiver: + query: | + SELECT archived_count + , failed_count + , COALESCE(EXTRACT(EPOCH FROM (now() - last_archived_time)), -1) AS seconds_since_last_archival + , COALESCE(EXTRACT(EPOCH FROM (now() - last_failed_time)), -1) AS seconds_since_last_failure + , COALESCE(EXTRACT(EPOCH FROM last_archived_time), -1) AS last_archived_time + , COALESCE(EXTRACT(EPOCH FROM last_failed_time), -1) AS last_failed_time + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_archived_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_archived_wal_start_lsn + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_failed_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_failed_wal_start_lsn + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_archiver + metrics: + - archived_count: + usage: "COUNTER" + description: "Number of WAL files that have been successfully archived" + - failed_count: + usage: "COUNTER" + description: "Number of failed attempts for archiving WAL files" + - seconds_since_last_archival: + usage: "GAUGE" + description: "Seconds since the last successful archival operation" + - seconds_since_last_failure: + usage: "GAUGE" + description: "Seconds since the last failed archival operation" + - last_archived_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving succeeded" + - last_failed_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving failed" + - last_archived_wal_start_lsn: + usage: "GAUGE" + description: "Archived WAL start LSN" + - last_failed_wal_start_lsn: + usage: "GAUGE" + description: "Last failed WAL LSN" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_bgwriter: + runonserver: "<17.0.0" + query: | + SELECT checkpoints_timed + , checkpoints_req + , checkpoint_write_time + , checkpoint_sync_time + , buffers_checkpoint + , buffers_clean + , maxwritten_clean + , buffers_backend + , buffers_backend_fsync + , buffers_alloc + FROM pg_catalog.pg_stat_bgwriter + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - checkpoint_write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds" + - checkpoint_sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds" + - buffers_checkpoint: + usage: "COUNTER" + description: "Number of buffers written during checkpoints" + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_backend: + usage: "COUNTER" + description: "Number of buffers written directly by a backend" + - buffers_backend_fsync: + usage: "COUNTER" + description: "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + + pg_stat_bgwriter_17: + runonserver: ">=17.0.0" + name: pg_stat_bgwriter + query: | + SELECT buffers_clean + , maxwritten_clean + , buffers_alloc + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_bgwriter + metrics: + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_checkpointer: + runonserver: ">=17.0.0" + query: | + SELECT num_timed AS checkpoints_timed + , num_requested AS checkpoints_req + , restartpoints_timed + , restartpoints_req + , restartpoints_done + , write_time + , sync_time + , buffers_written + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_checkpointer + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - restartpoints_timed: + usage: "COUNTER" + description: "Number of scheduled restartpoints due to timeout or after a failed attempt to perform it" + - restartpoints_req: + usage: "COUNTER" + description: "Number of requested restartpoints that have been performed" + - restartpoints_done: + usage: "COUNTER" + description: "Number of restartpoints that have been performed" + - write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are written to disk, in milliseconds" + - sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are synchronized to disk, in milliseconds" + - buffers_written: + usage: "COUNTER" + description: "Number of buffers written during checkpoints and restartpoints" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_database: + query: | + SELECT datname + , xact_commit + , xact_rollback + , blks_read + , blks_hit + , tup_returned + , tup_fetched + , tup_inserted + , tup_updated + , tup_deleted + , conflicts + , temp_files + , temp_bytes + , deadlocks + , blk_read_time + , blk_write_time + FROM pg_catalog.pg_stat_database + metrics: + - datname: + usage: "LABEL" + description: "Name of this database" + - xact_commit: + usage: "COUNTER" + description: "Number of transactions in this database that have been committed" + - xact_rollback: + usage: "COUNTER" + description: "Number of transactions in this database that have been rolled back" + - blks_read: + usage: "COUNTER" + description: "Number of disk blocks read in this database" + - blks_hit: + usage: "COUNTER" + description: "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)" + - tup_returned: + usage: "COUNTER" + description: "Number of rows returned by queries in this database" + - tup_fetched: + usage: "COUNTER" + description: "Number of rows fetched by queries in this database" + - tup_inserted: + usage: "COUNTER" + description: "Number of rows inserted by queries in this database" + - tup_updated: + usage: "COUNTER" + description: "Number of rows updated by queries in this database" + - tup_deleted: + usage: "COUNTER" + description: "Number of rows deleted by queries in this database" + - conflicts: + usage: "COUNTER" + description: "Number of queries canceled due to conflicts with recovery in this database" + - temp_files: + usage: "COUNTER" + description: "Number of temporary files created by queries in this database" + - temp_bytes: + usage: "COUNTER" + description: "Total amount of data written to temporary files by queries in this database" + - deadlocks: + usage: "COUNTER" + description: "Number of deadlocks detected in this database" + - blk_read_time: + usage: "COUNTER" + description: "Time spent reading data file blocks by backends in this database, in milliseconds" + - blk_write_time: + usage: "COUNTER" + description: "Time spent writing data file blocks by backends in this database, in milliseconds" + + pg_stat_replication: + primary: true + query: | + SELECT usename + , COALESCE(application_name, '') AS application_name + , COALESCE(client_addr::text, '') AS client_addr + , COALESCE(client_port::text, '') AS client_port + , EXTRACT(EPOCH FROM backend_start) AS backend_start + , COALESCE(pg_catalog.age(backend_xmin), 0) AS backend_xmin_age + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), sent_lsn) AS sent_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), write_lsn) AS write_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), flush_lsn) AS flush_diff_bytes + , COALESCE(pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), replay_lsn),0) AS replay_diff_bytes + , COALESCE((EXTRACT(EPOCH FROM write_lag)),0)::float AS write_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM flush_lag)),0)::float AS flush_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM replay_lag)),0)::float AS replay_lag_seconds + FROM pg_catalog.pg_stat_replication + metrics: + - usename: + usage: "LABEL" + description: "Name of the replication user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - client_addr: + usage: "LABEL" + description: "Client IP address" + - client_port: + usage: "LABEL" + description: "Client TCP port" + - backend_start: + usage: "COUNTER" + description: "Time when this process was started" + - backend_xmin_age: + usage: "COUNTER" + description: "The age of this standby's xmin horizon" + - sent_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location sent on this connection" + - write_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location written to disk by this standby server" + - flush_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location flushed to disk by this standby server" + - replay_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location replayed into the database on this standby server" + - write_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it" + - flush_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it" + - replay_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it" + + pg_settings: + query: | + SELECT name, + CASE setting WHEN 'on' THEN '1' WHEN 'off' THEN '0' ELSE setting END AS setting + FROM pg_catalog.pg_settings + WHERE vartype IN ('integer', 'real', 'bool') + ORDER BY 1 + metrics: + - name: + usage: "LABEL" + description: "Name of the setting" + - setting: + usage: "GAUGE" + description: "Setting value" +kind: ConfigMap +metadata: + labels: + cnpg.io/reload: "" + name: cnpg-default-monitoring + namespace: cnpg-system +--- +apiVersion: v1 +kind: Service +metadata: + name: cnpg-webhook-service + namespace: cnpg-system +spec: + ports: + - port: 443 + targetPort: 9443 + selector: + app.kubernetes.io/name: cloudnative-pg +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-controller-manager + namespace: cnpg-system +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: cloudnative-pg + template: + metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + spec: + containers: + - args: + - controller + - --leader-elect + - --config-map-name=cnpg-controller-manager-config + - --secret-name=cnpg-controller-manager-config + - --webhook-port=9443 + command: + - /manager + env: + - name: OPERATOR_IMAGE_NAME + value: ghcr.io/cloudnative-pg/cloudnative-pg:1.24.1 + - name: OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MONITORING_QUERIES_CONFIGMAP + value: cnpg-default-monitoring + image: ghcr.io/cloudnative-pg/cloudnative-pg:1.24.1 + livenessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + name: manager + ports: + - containerPort: 8080 + name: metrics + protocol: TCP + - containerPort: 9443 + name: webhook-server + protocol: TCP + readinessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + resources: + limits: + cpu: 100m + memory: 200Mi + requests: + cpu: 100m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 10001 + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /controller + name: scratch-data + - mountPath: /run/secrets/cnpg.io/webhook + name: webhook-certificates + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: cnpg-manager + terminationGracePeriodSeconds: 10 + volumes: + - emptyDir: {} + name: scratch-data + - name: webhook-certificates + secret: + defaultMode: 420 + optional: true + secretName: cnpg-webhook-cert +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: cnpg-mutating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: mbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: mcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: mscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: cnpg-validating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: vbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: vcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-pooler + failurePolicy: Fail + name: vpooler.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - poolers + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: vscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None From 976b5efe8fd1f01d074abe82d2ed7e167de856a8 Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Wed, 16 Oct 2024 18:51:23 +0200 Subject: [PATCH 084/836] fix(docs): update header for 1.24 upgrade procedure (#5862) Closes #5861 Signed-off-by: Gabriele Bartolini --- docs/src/installation_upgrade.md | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/docs/src/installation_upgrade.md b/docs/src/installation_upgrade.md index e85711cfff..ec9b2019a6 100644 --- a/docs/src/installation_upgrade.md +++ b/docs/src/installation_upgrade.md @@ -248,12 +248,16 @@ When versions are not directly upgradable, the old version needs to be removed before installing the new one. This won't affect user data but only the operator itself. -### Upgrading to 1.24.0 or 1.23.4 + + +### Upgrading to 1.24 from a previous minor version !!! Warning Every time you are upgrading to a higher minor release, make sure you From 2c621025e4f8b4639d76bb6008e772566c83649f Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 17 Oct 2024 10:12:42 +0200 Subject: [PATCH 085/836] fix(deps): update all non-major go dependencies (main) (#5737) https://github.com/prometheus/client_golang `v1.20.4` -> `v1.20.5` https://github.com/stern/stern `v1.30.0` -> `v1.31.0` golang.org/x/term `v0.24.0` -> `v0.25.0` github.com/fatih/colo `v1.16.0` -> `v1.17.0` github.com/google/btree `v1.1.2` -> `v1.1.3` go.starlark.net `v0.0.0-20240411212711-9b43f0afd521` -> `v0.0.0-20240925182052-1207426daebd golang.org/x/crypto `v0.27.0` -> `v0.28.0` golang.org/x/net `v0.29.0` -> `v0.30.0` golang.org/x/text`v0.18.0` -> `v0.19.0` golang.org/x/time `v0.6.0` -> `v0.7.0` sigs.k8s.io/kustomize/api `v0.17.2` -> `v0.17.3` sigs.k8s.io/kustomize/kyaml `v0.17.1` -> `v0.17.2` --- go.mod | 24 ++++++++++++------------ go.sum | 52 ++++++++++++++++++++++++++-------------------------- 2 files changed, 38 insertions(+), 38 deletions(-) diff --git a/go.mod b/go.mod index 8b083ab9e9..caa2fb1033 100644 --- a/go.mod +++ b/go.mod @@ -28,16 +28,16 @@ require ( github.com/onsi/ginkgo/v2 v2.20.2 github.com/onsi/gomega v1.34.2 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.77.1 - github.com/prometheus/client_golang v1.20.4 + github.com/prometheus/client_golang v1.20.5 github.com/robfig/cron v1.2.0 github.com/sethvargo/go-password v0.3.1 github.com/spf13/cobra v1.8.1 - github.com/stern/stern v1.30.0 + github.com/stern/stern v1.31.0 github.com/thoas/go-funk v0.9.3 go.uber.org/atomic v1.11.0 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 - golang.org/x/term v0.24.0 + golang.org/x/term v0.25.0 google.golang.org/grpc v1.67.1 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.31.1 @@ -56,7 +56,7 @@ require ( github.com/blang/semver/v4 v4.0.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/emicklei/go-restful/v3 v3.12.1 // indirect - github.com/fatih/color v1.16.0 // indirect + github.com/fatih/color v1.17.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-errors/errors v1.5.1 // indirect @@ -68,7 +68,7 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/btree v1.1.2 // indirect + github.com/google/btree v1.1.3 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect @@ -102,15 +102,15 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xlab/treeprint v1.2.0 // indirect - go.starlark.net v0.0.0-20240411212711-9b43f0afd521 // indirect - golang.org/x/crypto v0.27.0 // indirect + go.starlark.net v0.0.0-20240925182052-1207426daebd // indirect + golang.org/x/crypto v0.28.0 // indirect golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect - golang.org/x/net v0.29.0 // indirect + golang.org/x/net v0.30.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/sync v0.8.0 // indirect golang.org/x/sys v0.26.0 // indirect - golang.org/x/text v0.18.0 // indirect - golang.org/x/time v0.6.0 // indirect + golang.org/x/text v0.19.0 // indirect + golang.org/x/time v0.7.0 // indirect golang.org/x/tools v0.25.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect @@ -121,7 +121,7 @@ require ( k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/kustomize/api v0.17.2 // indirect - sigs.k8s.io/kustomize/kyaml v0.17.1 // indirect + sigs.k8s.io/kustomize/api v0.17.3 // indirect + sigs.k8s.io/kustomize/kyaml v0.17.2 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/go.sum b/go.sum index 7ee1b22611..04f20745c9 100644 --- a/go.sum +++ b/go.sum @@ -33,12 +33,12 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= -github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= +github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= -github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= -github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= +github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= @@ -63,8 +63,8 @@ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= -github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -159,8 +159,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.77.1 h1:XGoEXT6WTTihO+MD8MAao+YaQIH905HbK0WK2lyo28k= github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.77.1/go.mod h1:D0KY8md81DQKdaR/cXwnhoWB3MYYyc/UjvqE8GFkIvA= -github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= -github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.59.1 h1:LXb1quJHWm1P6wq/U824uxYi4Sg0oGvNeUm1z5dJoX0= @@ -180,8 +180,8 @@ github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stern/stern v1.30.0 h1:4drczNgYqiVZlZ1rTMWgskokq2Owj+Wb1oYOrgM2TQI= -github.com/stern/stern v1.30.0/go.mod h1:l4c94jBK8YEyroFNTCEwFimLK55bdRKPaeyAFWZPvNQ= +github.com/stern/stern v1.31.0 h1:kKHVgEmIgqbC6/sFZahUeU9TbxDH+0l3l5/ornLlQLs= +github.com/stern/stern v1.31.0/go.mod h1:BfAeaPQhkMhQPTaFV81pS8YWCBmxg6IBL8fPGalt0qY= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -198,8 +198,8 @@ github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.starlark.net v0.0.0-20240411212711-9b43f0afd521 h1:1Ufp2S2fPpj0RHIQ4rbzpCdPLCPkzdK7BaVFH3nkYBQ= -go.starlark.net v0.0.0-20240411212711-9b43f0afd521/go.mod h1:YKMCv9b1WrfWmeqdV5MAuEHWsu5iC+fe6kYl2sQjdI8= +go.starlark.net v0.0.0-20240925182052-1207426daebd h1:S+EMisJOHklQxnS3kqsY8jl2y5aF0FDEdcLnOw3q22E= +go.starlark.net v0.0.0-20240925182052-1207426daebd/go.mod h1:YKMCv9b1WrfWmeqdV5MAuEHWsu5iC+fe6kYl2sQjdI8= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -211,8 +211,8 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= -golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= +golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= +golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -221,8 +221,8 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= -golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -238,14 +238,14 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= -golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= +golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= +golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= -golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= -golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= +golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -298,10 +298,10 @@ sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/kustomize/api v0.17.2 h1:E7/Fjk7V5fboiuijoZHgs4aHuexi5Y2loXlVOAVAG5g= -sigs.k8s.io/kustomize/api v0.17.2/go.mod h1:UWTz9Ct+MvoeQsHcJ5e+vziRRkwimm3HytpZgIYqye0= -sigs.k8s.io/kustomize/kyaml v0.17.1 h1:TnxYQxFXzbmNG6gOINgGWQt09GghzgTP6mIurOgrLCQ= -sigs.k8s.io/kustomize/kyaml v0.17.1/go.mod h1:9V0mCjIEYjlXuCdYsSXvyoy2BTsLESH7TlGV81S282U= +sigs.k8s.io/kustomize/api v0.17.3 h1:6GCuHSsxq7fN5yhF2XrC+AAr8gxQwhexgHflOAD/JJU= +sigs.k8s.io/kustomize/api v0.17.3/go.mod h1:TuDH4mdx7jTfK61SQ/j1QZM/QWR+5rmEiNjvYlhzFhc= +sigs.k8s.io/kustomize/kyaml v0.17.2 h1:+AzvoJUY0kq4QAhH/ydPHHMRLijtUKiyVyh7fOSshr0= +sigs.k8s.io/kustomize/kyaml v0.17.2/go.mod h1:9V0mCjIEYjlXuCdYsSXvyoy2BTsLESH7TlGV81S282U= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= From 88eaf002d3884747cdca37e5d31ca8248e1bdeb8 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 17 Oct 2024 10:39:08 +0200 Subject: [PATCH 086/836] chore(deps): update dependency redhat-openshift-ecosystem/openshift-preflight to v1.10.2 (main) (#5866) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 3e363b0fd7..24dae85780 100644 --- a/Makefile +++ b/Makefile @@ -48,7 +48,7 @@ SPELLCHECK_VERSION ?= 0.43.0 WOKE_VERSION ?= 0.19.0 OPERATOR_SDK_VERSION ?= v1.37.0 OPM_VERSION ?= v1.47.0 -PREFLIGHT_VERSION ?= 1.10.1 +PREFLIGHT_VERSION ?= 1.10.2 OPENSHIFT_VERSIONS ?= v4.12-v4.17 ARCH ?= amd64 From e561bda9d9c341a0d899e75f2f47b9059a45f09d Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 17 Oct 2024 13:33:12 +0200 Subject: [PATCH 087/836] chore(deps): update module sigs.k8s.io/kustomize/kustomize/v5 to v5.5.0 (main) (#5820) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 24dae85780..f16b5da862 100644 --- a/Makefile +++ b/Makefile @@ -41,7 +41,7 @@ LOCALBIN ?= $(shell pwd)/bin BUILD_IMAGE ?= true POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions.go" | cut -f 2 -d \") -KUSTOMIZE_VERSION ?= v5.4.3 +KUSTOMIZE_VERSION ?= v5.5.0 CONTROLLER_TOOLS_VERSION ?= v0.16.4 GORELEASER_VERSION ?= v2.3.2 SPELLCHECK_VERSION ?= 0.43.0 From d5ecd9494f4eea709929f451bc90e35dd32e71a6 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Thu, 17 Oct 2024 17:00:00 +0200 Subject: [PATCH 088/836] test(e2e): prevent namespace deletion timeout (#5854) This commit resolves potential timeouts during namespace deletion when a PostgreSQL cluster still runs inside the namespace. It ensures that all pods within the namespace are deleted with a 1-second grace period immediately after the namespace deletion request, improving cleanup speed and reliability. Signed-off-by: Marco Nenciarini Signed-off-by: Jaime Silvela Co-authored-by: Jaime Silvela --- tests/utils/namespace.go | 34 ++++++++++++++++++++++++++++++++-- 1 file changed, 32 insertions(+), 2 deletions(-) diff --git a/tests/utils/namespace.go b/tests/utils/namespace.go index b7b0b73d6c..65a9513278 100644 --- a/tests/utils/namespace.go +++ b/tests/utils/namespace.go @@ -18,16 +18,20 @@ package utils import ( "bytes" + "context" "errors" "fmt" "path" "strings" + "time" "github.com/cloudnative-pg/machinery/pkg/fileutils" "github.com/onsi/ginkgo/v2" corev1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/wait" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils/logs" @@ -181,7 +185,33 @@ func (env TestingEnvironment) DeleteNamespaceAndWait(name string, timeoutSeconds } } - _, _, err := Run(fmt.Sprintf("kubectl delete namespace %v --wait=true --timeout %vs", name, timeoutSeconds)) + ctx, cancel := context.WithTimeout(env.Ctx, time.Duration(timeoutSeconds)*time.Second) + defer cancel() - return err + err := env.DeleteNamespace(name, client.PropagationPolicy("Background")) + if err != nil { + return err + } + + pods, err := env.GetPodList(name) + if err != nil { + return err + } + + for _, pod := range pods.Items { + err = env.DeletePod(name, pod.Name, client.GracePeriodSeconds(1), client.PropagationPolicy("Background")) + if err != nil && !apierrs.IsNotFound(err) { + return err + } + } + + return wait.PollUntilContextCancel(ctx, time.Second, true, + func(ctx context.Context) (bool, error) { + err := env.Client.Get(ctx, client.ObjectKey{Name: name}, &corev1.Namespace{}) + if apierrs.IsNotFound(err) { + return true, nil + } + return false, err + }, + ) } From ad1a56080baa8efbd89a707d37cfc817950b54ca Mon Sep 17 00:00:00 2001 From: Francesco Canovai Date: Fri, 18 Oct 2024 09:31:16 +0200 Subject: [PATCH 089/836] ci: collect apiserver audit logs (#5879) Add the ENABLE_APISERVER_AUDIT env variable to the kind e2e scripts. If set to true, kind will configure auditing for postgresql.cnpg.io resources and mount a local directory to write the audit logs on. Closes #5873 Signed-off-by: Francesco Canovai --- .github/workflows/continuous-delivery.yml | 2 ++ hack/e2e/audit-policy.yaml | 6 ++++ hack/e2e/run-e2e-kind.sh | 1 + hack/setup-cluster.sh | 36 +++++++++++++++++++++++ 4 files changed, 45 insertions(+) create mode 100644 hack/e2e/audit-policy.yaml diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index d7a95ec095..eabe9dff31 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -716,6 +716,8 @@ jobs: cat config/manager/env_override.yaml - name: Run Kind End-to-End tests + env: + ENABLE_APISERVER_AUDIT: true run: make e2e-test-kind - diff --git a/hack/e2e/audit-policy.yaml b/hack/e2e/audit-policy.yaml new file mode 100644 index 0000000000..780380f59c --- /dev/null +++ b/hack/e2e/audit-policy.yaml @@ -0,0 +1,6 @@ +apiVersion: audit.k8s.io/v1 +kind: Policy +rules: +- level: RequestResponse + resources: + - group: "postgresql.cnpg.io" diff --git a/hack/e2e/run-e2e-kind.sh b/hack/e2e/run-e2e-kind.sh index f37c274a8d..3a977f6628 100755 --- a/hack/e2e/run-e2e-kind.sh +++ b/hack/e2e/run-e2e-kind.sh @@ -34,6 +34,7 @@ export K8S_VERSION=${K8S_VERSION:-$KIND_NODE_DEFAULT_VERSION} export CLUSTER_ENGINE=kind export CLUSTER_NAME=pg-operator-e2e-${K8S_VERSION//./-} export LOG_DIR=${LOG_DIR:-$ROOT_DIR/_logs/} +export ENABLE_APISERVER_AUDIT=${ENABLE_APISERVER_AUDIT:-false} export POSTGRES_IMG=${POSTGRES_IMG:-$(grep 'DefaultImageName.*=' "${ROOT_DIR}/pkg/versions/versions.go" | cut -f 2 -d \")} export E2E_PRE_ROLLING_UPDATE_IMG=${E2E_PRE_ROLLING_UPDATE_IMG:-${POSTGRES_IMG%.*}} diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh index d8ba973fe7..9b24a33022 100755 --- a/hack/setup-cluster.sh +++ b/hack/setup-cluster.sh @@ -38,6 +38,7 @@ ENGINE=${CLUSTER_ENGINE:-kind} ENABLE_REGISTRY=${ENABLE_REGISTRY:-} ENABLE_PYROSCOPE=${ENABLE_PYROSCOPE:-} ENABLE_CSI_DRIVER=${ENABLE_CSI_DRIVER:-} +ENABLE_APISERVER_AUDIT=${ENABLE_APISERVER_AUDIT:-} NODES=${NODES:-3} # This option is telling the docker to use node image with certain arch, i.e kindest/node in kind. # In M1/M2, if enable amd64 emulation then we keep it as linux/amd64. @@ -146,6 +147,41 @@ kubeadmConfigPatchesJSON6902: nodes: - role: control-plane EOF + if [ "${ENABLE_APISERVER_AUDIT}" = "true" ]; then + # Create the apiserver audit log directory beforehand, otherwise it will be + # generated within docker with root permissions + mkdir -p "${LOG_DIR}/apiserver" + touch "${LOG_DIR}/apiserver/kube-apiserver-audit.log" + cat >>"${config_file}" <<-EOF + kubeadmConfigPatches: + - | + kind: ClusterConfiguration + apiServer: + # enable auditing flags on the API server + extraArgs: + audit-log-path: /var/log/kubernetes/kube-apiserver-audit.log + audit-policy-file: /etc/kubernetes/policies/audit-policy.yaml + # mount new files / directories on the control plane + extraVolumes: + - name: audit-policies + hostPath: /etc/kubernetes/policies + mountPath: /etc/kubernetes/policies + readOnly: true + pathType: "DirectoryOrCreate" + - name: "audit-logs" + hostPath: "/var/log/kubernetes" + mountPath: "/var/log/kubernetes" + readOnly: false + pathType: DirectoryOrCreate + # mount the local file on the control plane + extraMounts: + - hostPath: ${E2E_DIR}/audit-policy.yaml + containerPath: /etc/kubernetes/policies/audit-policy.yaml + readOnly: true + - hostPath: ${LOG_DIR}/apiserver/ + containerPath: /var/log/kubernetes/ +EOF + fi if [ "$NODES" -gt 1 ]; then for ((i = 0; i < NODES; i++)); do From 4b001818e51c6b07b444ff20b12101608499fb94 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 18 Oct 2024 11:38:50 +0200 Subject: [PATCH 090/836] chore(deps): update rojopolis/spellcheck-github-actions action to v0.43.1 (main) (#5886) --- .github/workflows/spellcheck.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/spellcheck.yml b/.github/workflows/spellcheck.yml index 847ca8891b..6dc47bd04b 100644 --- a/.github/workflows/spellcheck.yml +++ b/.github/workflows/spellcheck.yml @@ -28,4 +28,4 @@ jobs: uses: actions/checkout@v4 - name: Spellcheck - uses: rojopolis/spellcheck-github-actions@0.43.0 + uses: rojopolis/spellcheck-github-actions@0.43.1 From 7ea31611e8a9f0d360482d8d843d110029808943 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 18 Oct 2024 13:43:03 +0200 Subject: [PATCH 091/836] chore(deps): update dependency rook/rook to v1.15.4 (main) (#5893) --- .github/workflows/continuous-delivery.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index eabe9dff31..f5bc575720 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -37,7 +37,7 @@ env: GOLANG_VERSION: "1.23.x" KUBEBUILDER_VERSION: "2.3.1" KIND_VERSION: "v0.24.0" - ROOK_VERSION: "v1.15.3" + ROOK_VERSION: "v1.15.4" EXTERNAL_SNAPSHOTTER_VERSION: "v8.1.0" OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing" BUILD_PUSH_PROVENANCE: "" From a2c8ff4dc2668be3a75663166d73e5a8270b63ed Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Fri, 18 Oct 2024 15:08:17 +0200 Subject: [PATCH 092/836] ci: properly quote options in bug issue templates (#5897) Since the versions can and may end in `0` we should quote the version numbers to avoid YAML ignoring the `0` at the end like in `1.30` where the `0` is ignored and not showed because of the missing quotes Closes #5892 Signed-off-by: Marco Nenciarini --- .github/ISSUE_TEMPLATE/bug.yml | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml index 0506b9a579..d02633e40a 100644 --- a/.github/ISSUE_TEMPLATE/bug.yml +++ b/.github/ISSUE_TEMPLATE/bug.yml @@ -48,11 +48,11 @@ body: label: Version description: What is the version of CloudNativePG you are running? options: - - 1.24.0 - - 1.23.4 - - trunk (main) - - older in 1.23.x - - older minor (unsupported) + - "1.24.0" + - "1.23.4" + - "trunk (main)" + - "older in 1.23.x" + - "older minor (unsupported)" validations: required: true - type: dropdown @@ -60,12 +60,12 @@ body: attributes: label: What version of Kubernetes are you using? options: - - 1.31 - - 1.30 - - 1.29 - - 1.28 - - 1.27 (unsupported) - - other (unsupported) + - "1.31" + - "1.30" + - "1.29" + - "1.28" + - "1.27 (unsupported)" + - "other (unsupported)" validations: required: true - type: dropdown From 40b218f4cedb413556befd5bee0b4b70c5e4a94c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niccol=C3=B2=20Fei?= Date: Fri, 18 Oct 2024 16:08:06 +0200 Subject: [PATCH 093/836] test(e2e): refactor assertions to create, insert and validate data (#5759) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes: #5713 Refactors: - Unify AssertCreateTestData, AssertCreateTestDataWithDatabaseName, AssertCreateTestDataInTablespace - Unify insertRecordIntoTable,insertRecordIntoTableWithDatabaseName - Unify AssertDataExpectedCount,AssertDataExpectedCountWithDatabaseName - Remove ExecCommandWithPsqlClient given that we previously removed the psqlCLient - Remove RunQueryFromPod, which is not needed anymore since now we can connect directly using port-forwarding - Convert ExecCommand usages that were connecting locally to run a query to instead use ExecQueryInInstancePod - Convert EventuallyExecCommand usages that were connecting locally to run a query to instead use EventuallyExecQueryInInstancePod - Add RunExecOverForward which allows running queries that don't return any rows via port-forwarding Fixes: - Client should close the connection when it is done. This improves timings on switchover/failover because we rely on smartShutdown by default E2E fixes: - Drain with PDB disabled: actually wait for the Cluster to be back - Replica Mode: synchronize secret value in the Replica Cluster before connecting - Configuration update: simplify pg_ident assertion Signed-off-by: Niccolò Fei Signed-off-by: Jonathan Gonzalez V. Co-authored-by: Jonathan Gonzalez V. --- tests/e2e/asserts_test.go | 541 +++++++++++----------- tests/e2e/backup_restore_test.go | 81 +++- tests/e2e/cluster_microservice_test.go | 116 +++-- tests/e2e/cluster_monolithic_test.go | 1 + tests/e2e/cluster_setup_test.go | 1 + tests/e2e/configuration_update_test.go | 214 +++++---- tests/e2e/declarative_hibernation_test.go | 17 +- tests/e2e/disk_space_test.go | 18 +- tests/e2e/drain_node_test.go | 52 ++- tests/e2e/failover_test.go | 66 ++- tests/e2e/fastswitchover_test.go | 30 +- tests/e2e/fencing_test.go | 11 +- tests/e2e/hibernation_test.go | 20 +- tests/e2e/managed_roles_test.go | 22 +- tests/e2e/metrics_test.go | 1 + tests/e2e/operator_unavailable_test.go | 27 +- tests/e2e/pg_basebackup_test.go | 42 +- tests/e2e/pg_data_corruption_test.go | 10 +- tests/e2e/replica_mode_cluster_test.go | 55 ++- tests/e2e/replication_slot_test.go | 12 +- tests/e2e/tablespaces_test.go | 76 ++- tests/e2e/update_user_test.go | 19 +- tests/e2e/upgrade_test.go | 62 ++- tests/e2e/volume_snapshot_test.go | 60 ++- tests/utils/environment.go | 26 -- tests/utils/pod.go | 28 ++ tests/utils/postgres.go | 35 +- tests/utils/psql_connection.go | 30 +- tests/utils/replication_slots.go | 35 +- 29 files changed, 1051 insertions(+), 657 deletions(-) diff --git a/tests/e2e/asserts_test.go b/tests/e2e/asserts_test.go index badcfe420c..9fc3c9dba6 100644 --- a/tests/e2e/asserts_test.go +++ b/tests/e2e/asserts_test.go @@ -406,95 +406,34 @@ func AssertOperatorIsReady() { }, testTimeouts[testsUtils.OperatorIsReady]).Should(BeTrue(), "Operator pod is not ready") } -// AssertDatabaseIsReady checks the database on the primary is ready to run queries -// -// NOTE: even if we checked AssertClusterIsReady, a temporary DB connectivity issue would take -// failureThreshold x periodSeconds to be detected -func AssertDatabaseIsReady(namespace, clusterName, dbName string) { - By(fmt.Sprintf("checking the database on %s is ready", clusterName), func() { - Eventually(func(g Gomega) { - primary, err := env.GetClusterPrimary(namespace, clusterName) - g.Expect(err).ToNot(HaveOccurred()) - - stdout, stderr, err := env.ExecCommandInInstancePod(testsUtils.PodLocator{ - Namespace: namespace, - PodName: primary.GetName(), - }, nil, "pg_isready") - g.Expect(err).ShouldNot(HaveOccurred()) - g.Expect(stderr).To(BeEmpty(), "while checking pg_isready") - g.Expect(stdout).To(ContainSubstring("accepting"), "while checking pg_isready: Not accepting connections") - _, _, err = env.ExecQueryInInstancePod(testsUtils.PodLocator{ - Namespace: namespace, - PodName: primary.GetName(), - }, testsUtils.DatabaseName(dbName), "select 1") - g.Expect(err).ShouldNot(HaveOccurred()) - }, RetryTimeout, PollingTime).Should(Succeed()) - }) -} - -// AssertCreateTestData create test on the "app" database -func AssertCreateTestData(env *testsUtils.TestingEnvironment, namespace, clusterName, tableName string) { - AssertDatabaseIsReady(namespace, clusterName, testsUtils.AppDBName) - By(fmt.Sprintf("creating test data in cluster %v", clusterName), func() { - forward, conn, err := testsUtils.ForwardPSQLConnection( - env, - namespace, - clusterName, - testsUtils.AppDBName, - apiv1.ApplicationUserSecretSuffix, - ) - Expect(err).ToNot(HaveOccurred()) - - query := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %v AS VALUES (1),(2);", tableName) - _, err = conn.Exec(query) - Expect(err).ToNot(HaveOccurred()) - forward.Close() - }) -} - -// AssertCreateTestDataWithDatabaseName create test data in a given database. -func AssertCreateTestDataWithDatabaseName( - env *testsUtils.TestingEnvironment, - namespace, - clusterName, - databaseName, - tableName string, -) { - By(fmt.Sprintf("creating test data in cluster %v", clusterName), func() { - forward, conn, err := testsUtils.ForwardPSQLConnection( - env, - namespace, - clusterName, - databaseName, - apiv1.ApplicationUserSecretSuffix, - ) - Expect(err).ToNot(HaveOccurred()) - query := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %v AS VALUES (1),(2);", tableName) - _, err = conn.Exec(query) - Expect(err).ToNot(HaveOccurred()) - forward.Close() - }) -} - type TableLocator struct { - Namespace string - ClusterName string - TableName string - Tablespace string + Namespace string + ClusterName string + DatabaseName string + TableName string + Tablespace string } -// AssertCreateTestDataInTablespace create test data. -func AssertCreateTestDataInTablespace(env *testsUtils.TestingEnvironment, tl TableLocator) { - AssertDatabaseIsReady(tl.Namespace, tl.ClusterName, testsUtils.AppDBName) - By(fmt.Sprintf("creating test data in tablespace %q", tl.Tablespace), func() { +// AssertCreateTestData create test data on a given TableLocator +func AssertCreateTestData(env *testsUtils.TestingEnvironment, tl TableLocator) { + if tl.DatabaseName == "" { + tl.DatabaseName = testsUtils.AppDBName + } + if tl.Tablespace == "" { + tl.Tablespace = testsUtils.TablespaceDefaultName + } + + By(fmt.Sprintf("creating test data in table %v (cluster %v, database %v, tablespace %v)", + tl.TableName, tl.ClusterName, tl.DatabaseName, tl.Tablespace), func() { forward, conn, err := testsUtils.ForwardPSQLConnection( env, tl.Namespace, tl.ClusterName, - testsUtils.AppDBName, + tl.DatabaseName, apiv1.ApplicationUserSecretSuffix, ) defer func() { + _ = conn.Close() forward.Close() }() Expect(err).ToNot(HaveOccurred()) @@ -508,51 +447,17 @@ func AssertCreateTestDataInTablespace(env *testsUtils.TestingEnvironment, tl Tab } // AssertCreateTestDataLargeObject create large objects with oid and data -func AssertCreateTestDataLargeObject(namespace, clusterName string, oid int, data string, pod *corev1.Pod) { +func AssertCreateTestDataLargeObject(namespace, clusterName string, oid int, data string) { By("creating large object", func() { query := fmt.Sprintf("CREATE TABLE IF NOT EXISTS image (name text,raster oid); "+ "INSERT INTO image (name, raster) VALUES ('beautiful image', lo_from_bytea(%d, '%s'));", oid, data) - appUser, appUserPass, err := testsUtils.GetCredentials(clusterName, namespace, apiv1.ApplicationUserSecretSuffix, env) - Expect(err).ToNot(HaveOccurred()) - host, err := testsUtils.GetHostName(namespace, clusterName, env) - Expect(err).ToNot(HaveOccurred()) - _, _, err = testsUtils.RunQueryFromPod( - pod, - host, - testsUtils.AppDBName, - appUser, - appUserPass, - query, - env) + + _, err := testsUtils.RunExecOverForward(env, namespace, clusterName, testsUtils.AppDBName, + apiv1.ApplicationUserSecretSuffix, query) Expect(err).ToNot(HaveOccurred()) }) } -// insertRecordIntoTableWithDatabaseName insert an entry into a table -func insertRecordIntoTableWithDatabaseName( - env *testsUtils.TestingEnvironment, - namespace, - clusterName, - databaseName, - tableName string, - value int, -) { - forward, conn, err := testsUtils.ForwardPSQLConnection( - env, - namespace, - clusterName, - databaseName, - apiv1.ApplicationUserSecretSuffix, - ) - defer func() { - forward.Close() - }() - Expect(err).ToNot(HaveOccurred()) - - _, err = conn.Exec(fmt.Sprintf("INSERT INTO %s VALUES (%d);", tableName, value)) - Expect(err).ToNot(HaveOccurred()) -} - // insertRecordIntoTable insert an entry into a table func insertRecordIntoTable(tableName string, value int, conn *sql.DB) { _, err := conn.Exec(fmt.Sprintf("INSERT INTO %s VALUES (%d)", tableName, value)) @@ -560,16 +465,21 @@ func insertRecordIntoTable(tableName string, value int, conn *sql.DB) { } // AssertDatabaseExists assert if database exists -func AssertDatabaseExists(namespace, podName, databaseName string, expectedValue bool) { +func AssertDatabaseExists(pod *corev1.Pod, databaseName string, expectedValue bool) { By(fmt.Sprintf("verifying if database %v exists", databaseName), func() { - pod := &corev1.Pod{} - commandTimeout := time.Second * 10 query := fmt.Sprintf("SELECT EXISTS(SELECT 1 FROM pg_database WHERE lower(datname) = lower('%v'));", databaseName) - err := env.Client.Get(env.Ctx, ctrlclient.ObjectKey{Namespace: namespace, Name: podName}, pod) - Expect(err).ToNot(HaveOccurred()) - stdout, _, err := env.ExecCommand(env.Ctx, *pod, specs.PostgresContainerName, - &commandTimeout, "psql", "-U", "postgres", "postgres", "-tAc", query) + stdout, stderr, err := env.ExecQueryInInstancePod( + testsUtils.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + testsUtils.PostgresDBName, + query) + if err != nil { + GinkgoWriter.Printf("stdout: %v\nstderr: %v", stdout, stderr) + } Expect(err).ToNot(HaveOccurred()) + if expectedValue { Expect(strings.Trim(stdout, "\n")).To(BeEquivalentTo("t")) } else { @@ -579,19 +489,21 @@ func AssertDatabaseExists(namespace, podName, databaseName string, expectedValue } // AssertUserExists assert if user exists -func AssertUserExists(namespace, podName, userName string, expectedValue bool) { +func AssertUserExists(pod *corev1.Pod, userName string, expectedValue bool) { By(fmt.Sprintf("verifying if user %v exists", userName), func() { - pod := &corev1.Pod{} - commandTimeout := time.Second * 10 query := fmt.Sprintf("SELECT EXISTS(SELECT 1 FROM pg_user WHERE lower(usename) = lower('%v'));", userName) - err := env.Client.Get(env.Ctx, ctrlclient.ObjectKey{Namespace: namespace, Name: podName}, pod) - Expect(err).ToNot(HaveOccurred()) - stdout, stderr, err := env.ExecCommand(env.Ctx, *pod, specs.PostgresContainerName, - &commandTimeout, "psql", "-U", "postgres", "postgres", "-tAc", query) + stdout, stderr, err := env.ExecQueryInInstancePod( + testsUtils.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + testsUtils.PostgresDBName, + query) if err != nil { GinkgoWriter.Printf("stdout: %v\nstderr: %v", stdout, stderr) } Expect(err).ToNot(HaveOccurred()) + if expectedValue { Expect(strings.Trim(stdout, "\n")).To(BeEquivalentTo("t")) } else { @@ -600,48 +512,21 @@ func AssertUserExists(namespace, podName, userName string, expectedValue bool) { }) } -// AssertDataExpectedCountWithDatabaseName verifies that an expected amount of rows exists on the table -func AssertDataExpectedCountWithDatabaseName(namespace, podName, databaseName string, - tableName string, expectedValue int, -) { - By(fmt.Sprintf("verifying test data on pod %v", podName), func() { - query := fmt.Sprintf("select count(*) from %v", tableName) - commandTimeout := time.Second * 10 - - Eventually(func() (int, error) { - // We keep getting the pod, since there could be a new pod with the same name - pod := &corev1.Pod{} - err := env.Client.Get(env.Ctx, ctrlclient.ObjectKey{Namespace: namespace, Name: podName}, pod) - if err != nil { - return 0, err - } - stdout, _, err := env.ExecCommand(env.Ctx, *pod, specs.PostgresContainerName, - &commandTimeout, "psql", "-U", "postgres", databaseName, "-tAc", query) - if err != nil { - return 0, err - } - nRows, err := strconv.Atoi(strings.Trim(stdout, "\n")) - return nRows, err - }, 300).Should(BeEquivalentTo(expectedValue)) - }) -} - // AssertDataExpectedCount verifies that an expected amount of rows exists on the table func AssertDataExpectedCount( env *testsUtils.TestingEnvironment, - namespace, - clusterName, - tableName string, + tl TableLocator, expectedValue int, ) { - By(fmt.Sprintf("verifying test data in table %v", tableName), func() { + By(fmt.Sprintf("verifying test data in table %v (cluster %v, database %v, tablespace %v)", + tl.TableName, tl.ClusterName, tl.DatabaseName, tl.Tablespace), func() { row, err := testsUtils.RunQueryRowOverForward( env, - namespace, - clusterName, - testsUtils.AppDBName, + tl.Namespace, + tl.ClusterName, + tl.DatabaseName, apiv1.ApplicationUserSecretSuffix, - fmt.Sprintf("SELECT COUNT(*) FROM %s", tableName), + fmt.Sprintf("SELECT COUNT(*) FROM %s", tl.TableName), ) Expect(err).ToNot(HaveOccurred()) @@ -653,24 +538,22 @@ func AssertDataExpectedCount( } // AssertLargeObjectValue verifies the presence of a Large Object given by its OID and data -func AssertLargeObjectValue(namespace, clusterName string, oid int, data string, pod *corev1.Pod) { +func AssertLargeObjectValue(namespace, clusterName string, oid int, data string) { By("verifying large object", func() { query := fmt.Sprintf("SELECT encode(lo_get(%v), 'escape');", oid) Eventually(func() (string, error) { // We keep getting the pod, since there could be a new pod with the same name - appUser, appUserPass, err := testsUtils.GetCredentials( - clusterName, namespace, apiv1.ApplicationUserSecretSuffix, env) - Expect(err).ToNot(HaveOccurred()) - host, err := testsUtils.GetHostName(namespace, clusterName, env) - Expect(err).ToNot(HaveOccurred()) - stdout, _, err := testsUtils.RunQueryFromPod( - pod, - host, + primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + if err != nil { + return "", err + } + stdout, _, err := env.ExecQueryInInstancePod( + testsUtils.PodLocator{ + Namespace: primaryPod.Namespace, + PodName: primaryPod.Name, + }, testsUtils.AppDBName, - appUser, - appUserPass, - query, - env) + query) if err != nil { return "", err } @@ -681,6 +564,7 @@ func AssertLargeObjectValue(namespace, clusterName string, oid int, data string, // AssertClusterStandbysAreStreaming verifies that all the standbys of a cluster have a wal-receiver running. func AssertClusterStandbysAreStreaming(namespace string, clusterName string, timeout int32) { + query := "SELECT count(*) FROM pg_stat_wal_receiver" Eventually(func() error { standbyPods, err := env.GetClusterReplicas(namespace, clusterName) if err != nil { @@ -688,9 +572,13 @@ func AssertClusterStandbysAreStreaming(namespace string, clusterName string, tim } for _, pod := range standbyPods.Items { - timeout := time.Second * 10 - out, _, err := env.EventuallyExecCommand(env.Ctx, pod, specs.PostgresContainerName, &timeout, - "psql", "-U", "postgres", "-tAc", "SELECT count(*) FROM pg_stat_wal_receiver") + out, _, err := env.ExecQueryInInstancePod( + testsUtils.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + testsUtils.PostgresDBName, + query) if err != nil { return err } @@ -721,22 +609,25 @@ func AssertStandbysFollowPromotion(namespace string, clusterName string, timeout // and are following the promotion, we should find those // records on each of them. - commandTimeout := time.Second * 10 for i := 1; i < 4; i++ { podName := fmt.Sprintf("%v-%v", clusterName, i) podNamespacedName := types.NamespacedName{ Namespace: namespace, Name: podName, } + query := "SELECT count(*) > 0 FROM tps.tl WHERE timeline = '00000002'" Eventually(func() (string, error) { pod := &corev1.Pod{} if err := env.Client.Get(env.Ctx, podNamespacedName, pod); err != nil { return "", err } - out, _, err := env.ExecCommand(env.Ctx, *pod, specs.PostgresContainerName, - &commandTimeout, "psql", "-U", "postgres", "app", "-tAc", - "SELECT count(*) > 0 FROM tps.tl "+ - "WHERE timeline = '00000002'") + out, _, err := env.ExecQueryInInstancePod( + testsUtils.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + testsUtils.AppDBName, + query) return strings.TrimSpace(out), err }, timeout).Should(BeEquivalentTo("t"), "Pod %v should have moved to timeline 2", podName) @@ -781,12 +672,18 @@ func AssertWritesResumedBeforeTimeout(namespace string, clusterName string, time Name: podName, } var switchTime float64 - commandTimeout := time.Second * 10 pod := &corev1.Pod{} err := env.Client.Get(env.Ctx, namespacedName, pod) Expect(err).ToNot(HaveOccurred()) - out, _, err := env.EventuallyExecCommand(env.Ctx, *pod, specs.PostgresContainerName, - &commandTimeout, "psql", "-U", "postgres", "app", "-tAc", query) + out, _, err := env.EventuallyExecQueryInInstancePod( + testsUtils.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, testsUtils.AppDBName, + query, + RetryTimeout, + PollingTime, + ) Expect(err).ToNot(HaveOccurred()) switchTime, err = strconv.ParseFloat(strings.TrimSpace(out), 64) if err != nil { @@ -827,7 +724,6 @@ func AssertNewPrimary(namespace string, clusterName string, oldPrimary string) { newPrimaryPod = newPrimary }) By(fmt.Sprintf("verifying write operation on the new primary pod: %s", newPrimaryPod), func() { - commandTimeout := time.Second * 10 namespacedName := types.NamespacedName{ Namespace: namespace, Name: newPrimaryPod, @@ -837,8 +733,15 @@ func AssertNewPrimary(namespace string, clusterName string, oldPrimary string) { Expect(err).ToNot(HaveOccurred()) // Expect write operation to succeed query := "CREATE TABLE IF NOT EXISTS assert_new_primary(var1 text);" - _, _, err = env.EventuallyExecCommand(env.Ctx, pod, specs.PostgresContainerName, - &commandTimeout, "psql", "-U", "postgres", "app", "-tAc", query) + _, _, err = env.EventuallyExecQueryInInstancePod( + testsUtils.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, testsUtils.AppDBName, + query, + RetryTimeout, + PollingTime, + ) Expect(err).ToNot(HaveOccurred()) }) } @@ -971,9 +874,13 @@ func AssertPgRecoveryMode(pod *corev1.Pod, expectedValue bool) { } Eventually(func() (string, error) { - commandTimeout := time.Second * 10 - stdOut, stdErr, err := env.ExecCommand(env.Ctx, *pod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "postgres", "-tAc", "select pg_is_in_recovery();") + stdOut, stdErr, err := env.ExecQueryInInstancePod( + testsUtils.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + testsUtils.PostgresDBName, + "select pg_is_in_recovery();") if err != nil { GinkgoWriter.Printf("stdout: %v\ntderr: %v\n", stdOut, stdErr) } @@ -993,12 +900,15 @@ func AssertReplicaModeCluster( testTableName string, ) { var primaryReplicaCluster *corev1.Pod - commandTimeout := time.Second * 10 checkQuery := fmt.Sprintf("SELECT count(*) FROM %v", testTableName) - AssertDatabaseIsReady(namespace, srcClusterName, srcClusterDBName) - - AssertCreateTestDataWithDatabaseName(env, namespace, srcClusterName, srcClusterDBName, testTableName) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: srcClusterName, + DatabaseName: srcClusterDBName, + TableName: testTableName, + } + AssertCreateTestData(env, tableLocator) By("creating replica cluster", func() { replicaClusterName, err := env.GetResourceNameFromYAML(replicaClusterSample) @@ -1014,20 +924,42 @@ func AssertReplicaModeCluster( By("checking data have been copied correctly in replica cluster", func() { Eventually(func() (string, error) { - stdOut, _, err := env.ExecCommand(env.Ctx, *primaryReplicaCluster, specs.PostgresContainerName, - &commandTimeout, "psql", "-U", "postgres", srcClusterDBName, "-tAc", checkQuery) + stdOut, _, err := env.ExecQueryInInstancePod( + testsUtils.PodLocator{ + Namespace: primaryReplicaCluster.Namespace, + PodName: primaryReplicaCluster.Name, + }, + testsUtils.DatabaseName(srcClusterDBName), + checkQuery) return strings.Trim(stdOut, "\n"), err }, 180, 10).Should(BeEquivalentTo("2")) }) By("writing some new data to the source cluster", func() { - insertRecordIntoTableWithDatabaseName(env, namespace, srcClusterName, srcClusterDBName, testTableName, 3) + forwardSource, connSource, err := testsUtils.ForwardPSQLConnection( + env, + namespace, + srcClusterName, + srcClusterDBName, + apiv1.ApplicationUserSecretSuffix, + ) + defer func() { + _ = connSource.Close() + forwardSource.Close() + }() + Expect(err).ToNot(HaveOccurred()) + insertRecordIntoTable(testTableName, 3, connSource) }) By("checking new data have been copied correctly in replica cluster", func() { Eventually(func() (string, error) { - stdOut, _, err := env.ExecCommand(env.Ctx, *primaryReplicaCluster, specs.PostgresContainerName, - &commandTimeout, "psql", "-U", "postgres", srcClusterDBName, "-tAc", checkQuery) + stdOut, _, err := env.ExecQueryInInstancePod( + testsUtils.PodLocator{ + Namespace: primaryReplicaCluster.Namespace, + PodName: primaryReplicaCluster.Name, + }, + testsUtils.DatabaseName(srcClusterDBName), + checkQuery) return strings.Trim(stdOut, "\n"), err }, 180, 15).Should(BeEquivalentTo("3")) }) @@ -1036,8 +968,8 @@ func AssertReplicaModeCluster( // verify the replica database created followed the source database, rather than // default to the "app" db and user By("checking that in replica cluster there is no database app and user app", func() { - AssertDatabaseExists(namespace, primaryReplicaCluster.Name, "app", false) - AssertUserExists(namespace, primaryReplicaCluster.Name, "app", false) + AssertDatabaseExists(primaryReplicaCluster, "app", false) + AssertUserExists(primaryReplicaCluster, "app", false) }) } } @@ -1057,7 +989,6 @@ func AssertDetachReplicaModeCluster( testTableName string, ) { var primaryReplicaCluster *corev1.Pod - replicaCommandTimeout := time.Second * 10 var referenceTime time.Time By("taking the reference time before the detaching", func() { @@ -1104,8 +1035,13 @@ func AssertDetachReplicaModeCluster( // Get primary from replica cluster primaryReplicaCluster, err = env.GetClusterPrimary(namespace, replicaClusterName) g.Expect(err).ToNot(HaveOccurred()) - _, _, err = env.EventuallyExecCommand(env.Ctx, *primaryReplicaCluster, specs.PostgresContainerName, - &replicaCommandTimeout, "psql", "-U", "postgres", srcDatabaseName, "-tAc", query) + _, _, err = env.ExecQueryInInstancePod( + testsUtils.PodLocator{ + Namespace: primaryReplicaCluster.Namespace, + PodName: primaryReplicaCluster.Name, + }, testsUtils.DatabaseName(srcDatabaseName), + query, + ) g.Expect(err).ToNot(HaveOccurred()) }, 300, 15).Should(Succeed()) }) @@ -1113,17 +1049,30 @@ func AssertDetachReplicaModeCluster( By("verifying the replica database doesn't exist in the replica cluster", func() { // Application database configuration is skipped for replica clusters, // so we expect these to not be present - AssertDatabaseExists(namespace, primaryReplicaCluster.Name, replicaDatabaseName, false) - AssertUserExists(namespace, primaryReplicaCluster.Name, replicaUserName, false) + AssertDatabaseExists(primaryReplicaCluster, replicaDatabaseName, false) + AssertUserExists(primaryReplicaCluster, replicaUserName, false) }) By("writing some new data to the source cluster", func() { - AssertCreateTestDataWithDatabaseName(env, namespace, srcClusterName, srcDatabaseName, testTableName) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: srcClusterName, + DatabaseName: srcDatabaseName, + TableName: testTableName, + } + AssertCreateTestData(env, tableLocator) }) By("verifying that replica cluster was not modified", func() { - outTables, stdErr, err := env.EventuallyExecCommand(env.Ctx, *primaryReplicaCluster, specs.PostgresContainerName, - &replicaCommandTimeout, "psql", "-U", "postgres", srcDatabaseName, "-tAc", "\\dt") + outTables, stdErr, err := env.EventuallyExecQueryInInstancePod( + testsUtils.PodLocator{ + Namespace: primaryReplicaCluster.Namespace, + PodName: primaryReplicaCluster.Name, + }, testsUtils.DatabaseName(srcDatabaseName), + "\\dt", + RetryTimeout, + PollingTime, + ) if err != nil { GinkgoWriter.Printf("stdout: %v\nstderr: %v\n", outTables, stdErr) } @@ -1253,16 +1202,8 @@ func AssertFastFailOver( ", PRIMARY KEY (id)" + ")" - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - _, _, err = env.ExecCommandWithPsqlClient( - namespace, - clusterName, - primaryPod, - apiv1.ApplicationUserSecretSuffix, - testsUtils.AppDBName, - query, - ) + _, err = testsUtils.RunExecOverForward(env, namespace, clusterName, testsUtils.AppDBName, + apiv1.ApplicationUserSecretSuffix, query) Expect(err).ToNot(HaveOccurred()) }) @@ -1280,25 +1221,27 @@ func AssertFastFailOver( " -f " + webTestJob) Expect(err).ToNot(HaveOccurred()) - commandTimeout := time.Second * 10 - timeout := 60 primaryPodName := clusterName + "-1" primaryPodNamespacedName := types.NamespacedName{ Namespace: namespace, Name: primaryPodName, } + query := "SELECT count(*) > 0 FROM tps.tl" Eventually(func() (string, error) { primaryPod := &corev1.Pod{} - err = env.Client.Get(env.Ctx, primaryPodNamespacedName, primaryPod) - if err != nil { + if err = env.Client.Get(env.Ctx, primaryPodNamespacedName, primaryPod); err != nil { return "", err } - out, _, err := env.ExecCommand(env.Ctx, *primaryPod, specs.PostgresContainerName, - &commandTimeout, "psql", "-U", "postgres", "app", "-tAc", - "SELECT count(*) > 0 FROM tps.tl") + out, _, err := env.ExecQueryInInstancePod( + testsUtils.PodLocator{ + Namespace: primaryPod.Namespace, + PodName: primaryPod.Name, + }, + testsUtils.AppDBName, + query) return strings.TrimSpace(out), err - }, timeout).Should(BeEquivalentTo("t")) + }, RetryTimeout).Should(BeEquivalentTo("t")) }) By("deleting the primary", func() { @@ -1361,17 +1304,17 @@ func AssertCreationOfTestDataForTargetDB( Expect(err).ToNot(HaveOccurred()) // Create database - commandTimeout := time.Second * 10 createDBQuery := fmt.Sprintf("CREATE DATABASE %v OWNER %v", targetDBName, appUser) - _, _, err = env.ExecCommand( - env.Ctx, - *currentPrimary, - specs.PostgresContainerName, - &commandTimeout, - "psql", "-U", "postgres", "-tAc", createDBQuery, - ) + _, _, err = env.ExecQueryInInstancePod( + testsUtils.PodLocator{ + Namespace: currentPrimary.Namespace, + PodName: currentPrimary.Name, + }, + testsUtils.PostgresDBName, + createDBQuery) Expect(err).ToNot(HaveOccurred()) + // Open a connection to the newly created database forward, conn, err := testsUtils.ForwardPSQLConnection( env, namespace, @@ -1379,6 +1322,10 @@ func AssertCreationOfTestDataForTargetDB( targetDBName, apiv1.ApplicationUserSecretSuffix, ) + defer func() { + _ = conn.Close() + forward.Close() + }() Expect(err).ToNot(HaveOccurred()) // Create table on target database @@ -1390,9 +1337,6 @@ func AssertCreationOfTestDataForTargetDB( grantRoleQuery := "GRANT SELECT ON all tables in schema public to pg_monitor;" _, err = conn.Exec(grantRoleQuery) Expect(err).ToNot(HaveOccurred()) - - // Close the connection and forward - forward.Close() }) } @@ -1618,7 +1562,13 @@ func AssertClusterAsyncReplica(namespace, sourceClusterFile, restoreClusterFile, // Insert new data in the source cluster insertRecordIntoTable(tableName, 3, connSource) AssertArchiveWalOnMinio(namespace, sourceClusterName, sourceClusterName) - AssertDataExpectedCount(env, namespace, sourceClusterName, tableName, 3) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: sourceClusterName, + DatabaseName: testsUtils.AppDBName, + TableName: tableName, + } + AssertDataExpectedCount(env, tableLocator, 3) cluster, err := env.GetCluster(namespace, restoredClusterName) Expect(err).ToNot(HaveOccurred()) @@ -1640,7 +1590,13 @@ func AssertClusterRestoreWithApplicationDB(namespace, restoreClusterFile, tableN AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[testsUtils.ClusterIsReadySlow], env) // Test data should be present on restored primary - AssertDataExpectedCount(env, namespace, restoredClusterName, tableName, 2) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: restoredClusterName, + DatabaseName: testsUtils.AppDBName, + TableName: tableName, + } + AssertDataExpectedCount(env, tableLocator, 2) }) By("Ensuring the restored cluster is on timeline 2", func() { @@ -1712,7 +1668,13 @@ func AssertClusterRestore(namespace, restoreClusterFile, tableName string) { // Test data should be present on restored primary primary := restoredClusterName + "-1" - AssertDataExpectedCount(env, namespace, restoredClusterName, tableName, 2) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: restoredClusterName, + DatabaseName: testsUtils.AppDBName, + TableName: tableName, + } + AssertDataExpectedCount(env, tableLocator, 2) // Restored primary should be on timeline 2 out, _, err := env.ExecQueryInInstancePod( @@ -1720,7 +1682,7 @@ func AssertClusterRestore(namespace, restoreClusterFile, tableName string) { Namespace: namespace, PodName: primary, }, - testsUtils.DatabaseName("app"), + testsUtils.AppDBName, "select substring(pg_walfile_name(pg_current_wal_lsn()), 1, 8)") Expect(strings.Trim(out, "\n"), err).To(Equal("00000002")) @@ -1892,7 +1854,13 @@ func AssertClusterWasRestoredWithPITRAndApplicationDB(namespace, clusterName, ta By(fmt.Sprintf("after restored, 3rd entry should not be exists in table '%v'", tableName), func() { // Only 2 entries should be present - AssertDataExpectedCount(env, namespace, clusterName, tableName, 2) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testsUtils.AppDBName, + TableName: tableName, + } + AssertDataExpectedCount(env, tableLocator, 2) }) // Gather credentials @@ -1956,7 +1924,13 @@ func AssertClusterWasRestoredWithPITR(namespace, clusterName, tableName, lsn str By(fmt.Sprintf("after restored, 3rd entry should not be exists in table '%v'", tableName), func() { // Only 2 entries should be present - AssertDataExpectedCount(env, namespace, clusterName, tableName, 2) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testsUtils.AppDBName, + TableName: tableName, + } + AssertDataExpectedCount(env, tableLocator, 2) }) } @@ -2010,7 +1984,7 @@ func switchWalAndGetLatestArchive(namespace, podName string) string { Namespace: namespace, PodName: podName, }, - testsUtils.DatabaseName("postgres"), + testsUtils.PostgresDBName, "CHECKPOINT;") Expect(err).ToNot(HaveOccurred()) @@ -2019,7 +1993,7 @@ func switchWalAndGetLatestArchive(namespace, podName string) string { Namespace: namespace, PodName: podName, }, - testsUtils.DatabaseName("postgres"), + testsUtils.PostgresDBName, "SELECT pg_walfile_name(pg_switch_wal());") Expect(err).ToNot(HaveOccurred()) @@ -2051,7 +2025,13 @@ func prepareClusterForPITROnMinio( }) // Write a table and insert 2 entries on the "app" database - AssertCreateTestData(env, namespace, clusterName, tableNamePitr) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testsUtils.AppDBName, + TableName: tableNamePitr, + } + AssertCreateTestData(env, tableLocator) By("getting currentTimestamp", func() { ts, err := testsUtils.GetCurrentTimestamp(namespace, clusterName, env) @@ -2068,6 +2048,7 @@ func prepareClusterForPITROnMinio( apiv1.ApplicationUserSecretSuffix, ) defer func() { + _ = conn.Close() forward.Close() }() Expect(err).ToNot(HaveOccurred()) @@ -2102,7 +2083,13 @@ func prepareClusterForPITROnAzureBlob( }) // Write a table and insert 2 entries on the "app" database - AssertCreateTestData(env, namespace, clusterName, tableNamePitr) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testsUtils.AppDBName, + TableName: tableNamePitr, + } + AssertCreateTestData(env, tableLocator) By("getting currentTimestamp", func() { ts, err := testsUtils.GetCurrentTimestamp(namespace, clusterName, env) @@ -2119,6 +2106,7 @@ func prepareClusterForPITROnAzureBlob( apiv1.ApplicationUserSecretSuffix, ) defer func() { + _ = conn.Close() forward.Close() }() Expect(err).ToNot(HaveOccurred()) @@ -2164,7 +2152,13 @@ func prepareClusterBackupOnAzurite( // Setting up Azurite and az cli along with Postgresql cluster prepareClusterOnAzurite(namespace, clusterName, clusterSampleFile) // Write a table and some data on the "app" database - AssertCreateTestData(env, namespace, clusterName, tableName) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testsUtils.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) AssertArchiveWalOnAzurite(namespace, clusterName) By("backing up a cluster and verifying it exists on azurite", func() { @@ -2204,7 +2198,13 @@ func prepareClusterForPITROnAzurite( }) // Write a table and insert 2 entries on the "app" database - AssertCreateTestData(env, namespace, clusterName, "for_restore") + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testsUtils.AppDBName, + TableName: "for_restore", + } + AssertCreateTestData(env, tableLocator) By("getting currentTimestamp", func() { ts, err := testsUtils.GetCurrentTimestamp(namespace, clusterName, env) @@ -2221,6 +2221,7 @@ func prepareClusterForPITROnAzurite( apiv1.ApplicationUserSecretSuffix, ) defer func() { + _ = conn.Close() forward.Close() }() Expect(err).ToNot(HaveOccurred()) @@ -2660,10 +2661,11 @@ func DeleteTableUsingPgBouncerService( Expect(err).ToNot(HaveOccurred()) AssertConnection(poolerService, appUser, "app", generatedAppUserPassword, pod, 180, env) - _, _, err = testsUtils.RunQueryFromPod( - pod, poolerService, "app", appUser, generatedAppUserPassword, - "DROP TABLE table1", - env) + connectionTimeout := time.Second * 10 + dsn := testsUtils.CreateDSN(poolerService, appUser, testsUtils.AppDBName, generatedAppUserPassword, + testsUtils.Require, 5432) + _, _, err = env.EventuallyExecCommand(env.Ctx, *pod, specs.PostgresContainerName, &connectionTimeout, + "psql", dsn, "-tAc", "DROP TABLE table1") Expect(err).ToNot(HaveOccurred()) } @@ -2873,16 +2875,22 @@ func DeleteResourcesFromFile(namespace, sampleFilePath string) error { } // Assert in the giving cluster, all the postgres db has no pending restart -func AssertPostgresNoPendingRestart(namespace, clusterName string, cmdTimeout time.Duration, timeout int) { +func AssertPostgresNoPendingRestart(namespace, clusterName string, timeout int) { By("waiting for all pods have no pending restart", func() { podList, err := env.GetClusterPodList(namespace, clusterName) Expect(err).ToNot(HaveOccurred()) + query := "SELECT EXISTS(SELECT 1 FROM pg_settings WHERE pending_restart)" // Check that the new parameter has been modified in every pod Eventually(func() (bool, error) { noPendingRestart := true for _, pod := range podList.Items { - stdout, _, err := env.ExecCommand(env.Ctx, pod, specs.PostgresContainerName, &cmdTimeout, - "psql", "-U", "postgres", "-tAc", "SELECT EXISTS(SELECT 1 FROM pg_settings WHERE pending_restart)") + stdout, _, err := env.ExecQueryInInstancePod( + testsUtils.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + testsUtils.PostgresDBName, + query) if err != nil { return false, nil } @@ -3029,8 +3037,13 @@ func AssertReplicationSlotsOnPod( "AND temporary = 'f' AND slot_type = 'physical')", slot, isActiveOnPrimary) } Eventually(func() (string, error) { - stdout, _, err := testsUtils.RunQueryFromPod(&pod, testsUtils.PGLocalSocketDir, - "app", "postgres", "''", query, env) + stdout, _, err := env.ExecQueryInInstancePod( + testsUtils.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + testsUtils.PostgresDBName, + query) return strings.TrimSpace(stdout), err }, 300).Should(BeEquivalentTo("t"), func() string { diff --git a/tests/e2e/backup_restore_test.go b/tests/e2e/backup_restore_test.go index 8ba08452e4..a452eaee94 100644 --- a/tests/e2e/backup_restore_test.go +++ b/tests/e2e/backup_restore_test.go @@ -123,7 +123,13 @@ var _ = Describe("Backup and restore", Label(tests.LabelBackupRestore), func() { AssertCreationOfTestDataForTargetDB(env, namespace, clusterName, targetDBSecret, testTableName) // Write a table and some data on the "app" database - AssertCreateTestData(env, namespace, clusterName, tableName) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testUtils.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) AssertArchiveWalOnMinio(namespace, clusterName, clusterName) latestTar := minioPath(clusterName, "data.tar") @@ -269,7 +275,13 @@ var _ = Describe("Backup and restore", Label(tests.LabelBackupRestore), func() { AssertCreationOfTestDataForTargetDB(env, namespace, targetClusterName, targetDBSecret, testTableName) // Write a table and some data on the "app" database - AssertCreateTestData(env, namespace, targetClusterName, tableName) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: targetClusterName, + DatabaseName: testUtils.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) AssertArchiveWalOnMinio(namespace, targetClusterName, targetClusterName) latestTar := minioPath(targetClusterName, "data.tar") @@ -313,7 +325,13 @@ var _ = Describe("Backup and restore", Label(tests.LabelBackupRestore), func() { AssertCreationOfTestDataForTargetDB(env, namespace, targetClusterName, targetDBSecret, testTableName) // Write a table and some data on the "app" database - AssertCreateTestData(env, namespace, targetClusterName, tableName) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: targetClusterName, + DatabaseName: testUtils.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) AssertArchiveWalOnMinio(namespace, targetClusterName, targetClusterName) latestTar := minioPath(targetClusterName, "data.tar") @@ -366,7 +384,13 @@ var _ = Describe("Backup and restore", Label(tests.LabelBackupRestore), func() { AssertCreationOfTestDataForTargetDB(env, namespace, customClusterName, targetDBSecret, testTableName) // Write a table and some data on the "app" database - AssertCreateTestData(env, namespace, customClusterName, tableName) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: customClusterName, + DatabaseName: testUtils.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) AssertArchiveWalOnMinio(namespace, customClusterName, clusterServerName) @@ -539,7 +563,13 @@ var _ = Describe("Backup and restore", Label(tests.LabelBackupRestore), func() { // be there It("backs up and restore a cluster", func() { // Write a table and some data on the "app" database - AssertCreateTestData(env, namespace, clusterName, tableName) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testUtils.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) AssertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration) By("uploading a backup", func() { // We create a backup @@ -821,7 +851,13 @@ var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.Label Expect(err).ToNot(HaveOccurred()) // Write a table and some data on the "app" database - AssertCreateTestData(env, namespace, clusterName, tableName) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testUtils.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) AssertArchiveWalOnMinio(namespace, clusterName, clusterName) @@ -854,7 +890,13 @@ var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.Label AssertClusterRestore(namespace, externalClusterFileMinio, tableName) // verify test data on restored external cluster - AssertDataExpectedCount(env, namespace, externalClusterName, tableName, 2) + tableLocator = TableLocator{ + Namespace: namespace, + ClusterName: externalClusterName, + DatabaseName: testUtils.AppDBName, + TableName: tableName, + } + AssertDataExpectedCount(env, tableLocator, 2) By("deleting the restored cluster", func() { err = DeleteResourcesFromFile(namespace, externalClusterFileMinio) @@ -882,6 +924,7 @@ var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.Label apiv1.ApplicationUserSecretSuffix, ) defer func() { + _ = conn.Close() forward.Close() }() Expect(err).ToNot(HaveOccurred()) @@ -919,7 +962,13 @@ var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.Label It("restore cluster from barman object using replica option in spec", func() { // Write a table and some data on the "app" database - AssertCreateTestData(env, namespace, clusterName, "for_restore_repl") + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testUtils.AppDBName, + TableName: "for_restore_repl", + } + AssertCreateTestData(env, tableLocator) AssertArchiveWalOnMinio(namespace, clusterName, clusterName) @@ -974,7 +1023,13 @@ var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.Label It("restores a cluster from barman object using 'barmanObjectStore' option in 'externalClusters' section", func() { // Write a table and some data on the "app" database - AssertCreateTestData(env, namespace, clusterName, tableName) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testUtils.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) AssertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration) By("backing up a cluster and verifying it exists on azure blob storage", func() { @@ -1060,7 +1115,13 @@ var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.Label It("restores cluster from barman object using 'barmanObjectStore' option in 'externalClusters' section", func() { // Write a table and some data on the "app" database - AssertCreateTestData(env, namespace, clusterName, tableName) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testUtils.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) // Create a WAL on the primary and check if it arrives in the // Azure Blob Storage within a short time diff --git a/tests/e2e/cluster_microservice_test.go b/tests/e2e/cluster_microservice_test.go index 57704d213a..6019086aa3 100644 --- a/tests/e2e/cluster_microservice_test.go +++ b/tests/e2e/cluster_microservice_test.go @@ -20,7 +20,6 @@ import ( "fmt" "os" "strings" - "time" "github.com/cloudnative-pg/machinery/pkg/image/reference" "github.com/cloudnative-pg/machinery/pkg/postgres/version" @@ -28,7 +27,6 @@ import ( "k8s.io/apimachinery/pkg/types" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" "github.com/cloudnative-pg/cloudnative-pg/tests" testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" @@ -71,15 +69,25 @@ var _ = Describe("Imports with Microservice Approach", Label(tests.LabelImportin Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, sourceClusterName, sourceSampleFile, env) - AssertCreateTestData(env, namespace, sourceClusterName, tableName) - primaryPod, err := env.GetClusterPrimary(namespace, sourceClusterName) - Expect(err).ToNot(HaveOccurred()) - AssertCreateTestDataLargeObject(namespace, sourceClusterName, oid, data, primaryPod) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: sourceClusterName, + DatabaseName: testsUtils.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) + AssertCreateTestDataLargeObject(namespace, sourceClusterName, oid, data) importedClusterName = "cluster-pgdump-large-object" cluster := AssertClusterImport(namespace, importedClusterName, sourceClusterName, "app") - AssertDataExpectedCount(env, namespace, importedClusterName, tableName, 2) - AssertLargeObjectValue(namespace, importedClusterName, oid, data, primaryPod) + tableLocator = TableLocator{ + Namespace: namespace, + ClusterName: importedClusterName, + DatabaseName: testsUtils.AppDBName, + TableName: tableName, + } + AssertDataExpectedCount(env, tableLocator, 2) + AssertLargeObjectValue(namespace, importedClusterName, oid, data) By("deleting the imported database", func() { Expect(testsUtils.DeleteObject(env, cluster)).To(Succeed()) }) @@ -98,7 +106,13 @@ var _ = Describe("Imports with Microservice Approach", Label(tests.LabelImportin importedClusterName = "cluster-pgdump" AssertClusterImport(namespace, importedClusterName, sourceClusterName, "app") - AssertDataExpectedCount(env, namespace, importedClusterName, tableName, 2) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: importedClusterName, + DatabaseName: testsUtils.AppDBName, + TableName: tableName, + } + AssertDataExpectedCount(env, tableLocator, 2) assertTableAndDataOnImportedCluster(namespace, tableName, importedClusterName) }) @@ -199,7 +213,6 @@ func assertCreateTableWithDataOnSourceCluster( "and grant read only to app user", func() { pod, err := env.GetClusterPrimary(namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - commandTimeout := time.Second * 10 query := fmt.Sprintf( "DROP USER IF EXISTS micro; "+ @@ -209,12 +222,13 @@ func assertCreateTableWithDataOnSourceCluster( "GRANT SELECT ON %[1]v TO app;", tableName) - _, _, err = env.ExecCommand( - env.Ctx, - *pod, - specs.PostgresContainerName, - &commandTimeout, - "psql", "-U", "postgres", "app", "-tAc", query) + _, _, err = env.ExecQueryInInstancePod( + testsUtils.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + testsUtils.AppDBName, + query) Expect(err).ToNot(HaveOccurred()) }) } @@ -235,29 +249,19 @@ func assertTableAndDataOnImportedCluster( tableName, testsUtils.AppUser, ) - out, _, err := env.ExecCommandWithPsqlClient( - namespace, - importedClusterName, - pod, - apiv1.ApplicationUserSecretSuffix, + out, _, err := env.ExecQueryInInstancePod( + testsUtils.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, testsUtils.AppDBName, - queryImported, - ) + queryImported) Expect(err).ToNot(HaveOccurred()) Expect(strings.Contains(out, tableName), err).Should(BeTrue()) }) By("verifying the user named 'micro' on source is not in imported database", func() { - outUser, _, err := env.ExecCommandWithPsqlClient( - namespace, - importedClusterName, - pod, - apiv1.ApplicationUserSecretSuffix, - testsUtils.AppDBName, - "\\du", - ) - Expect(err).ToNot(HaveOccurred()) - Expect(strings.Contains(outUser, "micro"), err).Should(BeFalse()) + AssertUserExists(pod, "micro", false) }) }) } @@ -280,18 +284,18 @@ func assertImportRenamesSelectedDatabase( AssertCreateCluster(namespace, clusterName, sampleFile, env) primaryPod, err := env.GetClusterPrimary(namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - commandTimeout := time.Second * 10 By("creating multiple dbs on source and set ownership to app", func() { for _, db := range dbList { // Create database createDBQuery := fmt.Sprintf("CREATE DATABASE %v OWNER app", db) - _, _, err = env.ExecCommand( - env.Ctx, - *primaryPod, - specs.PostgresContainerName, - &commandTimeout, - "psql", "-U", "postgres", "-tAc", createDBQuery) + _, _, err = env.ExecQueryInInstancePod( + testsUtils.PodLocator{ + Namespace: primaryPod.Namespace, + PodName: primaryPod.Name, + }, + testsUtils.PostgresDBName, + createDBQuery) Expect(err).ToNot(HaveOccurred()) } }) @@ -299,14 +303,8 @@ func assertImportRenamesSelectedDatabase( By(fmt.Sprintf("creating table '%s' and insert records on selected db %v", tableName, dbToImport), func() { // create a table with two records query := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s AS VALUES (1),(2);", tableName) - _, _, err := env.ExecCommandWithPsqlClient( - namespace, - clusterName, - primaryPod, - apiv1.ApplicationUserSecretSuffix, - dbToImport, - query, - ) + _, err = testsUtils.RunExecOverForward(env, namespace, clusterName, dbToImport, + apiv1.ApplicationUserSecretSuffix, query) Expect(err).ToNot(HaveOccurred()) }) @@ -320,22 +318,20 @@ func assertImportRenamesSelectedDatabase( AssertClusterStandbysAreStreaming(namespace, importedClusterName, 120) }) - AssertDataExpectedCount(env, namespace, importedClusterName, tableName, 2) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: importedClusterName, + DatabaseName: testsUtils.AppDBName, + TableName: tableName, + } + AssertDataExpectedCount(env, tableLocator, 2) By("verifying that only 'app' DB exists in the imported cluster", func() { importedPrimaryPod, err := env.GetClusterPrimary(namespace, importedClusterName) Expect(err).ToNot(HaveOccurred()) - out, _, err := env.ExecCommandWithPsqlClient( - namespace, - importedClusterName, - importedPrimaryPod, - apiv1.ApplicationUserSecretSuffix, - testsUtils.AppDBName, - "\\l", - ) - Expect(err).ToNot(HaveOccurred(), err) - Expect(strings.Contains(out, "db2"), err).Should(BeFalse()) - Expect(strings.Contains(out, "app"), err).Should(BeTrue()) + + AssertUserExists(importedPrimaryPod, "db2", false) + AssertUserExists(importedPrimaryPod, "app", true) }) By("cleaning up the clusters", func() { diff --git a/tests/e2e/cluster_monolithic_test.go b/tests/e2e/cluster_monolithic_test.go index 89281f09b6..34d1f3de9e 100644 --- a/tests/e2e/cluster_monolithic_test.go +++ b/tests/e2e/cluster_monolithic_test.go @@ -84,6 +84,7 @@ var _ = Describe("Imports with Monolithic Approach", Label(tests.LabelImportingD apiv1.SuperUserSecretSuffix, ) defer func() { + _ = conn.Close() forward.Close() }() Expect(err).ToNot(HaveOccurred()) diff --git a/tests/e2e/cluster_setup_test.go b/tests/e2e/cluster_setup_test.go index 3515d9c3d0..9f2d124712 100644 --- a/tests/e2e/cluster_setup_test.go +++ b/tests/e2e/cluster_setup_test.go @@ -131,6 +131,7 @@ var _ = Describe("Cluster setup", Label(tests.LabelSmoke, tests.LabelBasic), fun apiv1.ApplicationUserSecretSuffix, ) defer func() { + _ = conn.Close() forward.Close() }() Expect(err).NotTo(HaveOccurred()) diff --git a/tests/e2e/configuration_update_test.go b/tests/e2e/configuration_update_test.go index 9c0346ea49..74800a23ab 100644 --- a/tests/e2e/configuration_update_test.go +++ b/tests/e2e/configuration_update_test.go @@ -31,7 +31,6 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" - devUtils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" "github.com/cloudnative-pg/cloudnative-pg/tests/utils" @@ -117,8 +116,13 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada for idx := range podList.Items { pod := podList.Items[idx] Eventually(func(g Gomega) int { - stdout, _, err := env.ExecCommand(env.Ctx, pod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", "show autovacuum_max_workers") + stdout, _, err := env.ExecQueryInInstancePod( + utils.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + utils.PostgresDBName, + "show autovacuum_max_workers") g.Expect(err).ToNot(HaveOccurred()) value, atoiErr := strconv.Atoi(strings.Trim(stdout, "\n")) @@ -150,15 +154,20 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada By("apply configuration update", func() { // Update the configuration updateClusterPostgresParams(postgresParams, namespace) - AssertPostgresNoPendingRestart(namespace, clusterName, commandTimeout, 300) + AssertPostgresNoPendingRestart(namespace, clusterName, 300) }) By("verify that work_mem result as expected", func() { // Check that the parameter has been modified in every pod for _, pod := range podList.Items { Eventually(func() (int, error, error) { - stdout, _, err := env.ExecCommand(env.Ctx, pod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", "show work_mem") + stdout, _, err := env.ExecQueryInInstancePod( + utils.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + utils.PostgresDBName, + "show work_mem") value, atoiErr := strconv.Atoi(strings.Trim(stdout, "MB\n")) return value, err, atoiErr }, timeout).Should(BeEquivalentTo(8)) @@ -173,14 +182,9 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada podList, err := env.GetClusterPodList(namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - By("verify that connection should failed by default", func() { - _, _, err := devUtils.ExecCommand( - env.Ctx, - env.Interface, - env.RestClientConfig, - podList.Items[0], - specs.PostgresContainerName, - &commandTimeout, + By("verify that connections fail by default", func() { + _, _, err := env.ExecCommand(env.Ctx, podList.Items[0], + specs.PostgresContainerName, &commandTimeout, "psql", "-U", "postgres", "-h", endpointName, "-tAc", "select 1", ) Expect(err).To(HaveOccurred()) @@ -189,16 +193,21 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada By("apply configuration update", func() { // Update the configuration updateClusterPostgresPgHBA(namespace) - AssertPostgresNoPendingRestart(namespace, clusterName, commandTimeout, 300) + AssertPostgresNoPendingRestart(namespace, clusterName, 300) }) - By("verify that connection should success after pg_hba_reload", func() { + By("verify that connections succeed after pg_hba_reload", func() { // The new pg_hba rule should be present in every pod + query := "select count(*) from pg_hba_file_rules where type = 'host' and auth_method = 'trust'" for _, pod := range podList.Items { Eventually(func() (string, error) { - stdout, _, err := env.ExecCommand(env.Ctx, pod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", - "select count(*) from pg_hba_file_rules where type = 'host' and auth_method = 'trust'") + stdout, _, err := env.ExecQueryInInstancePod( + utils.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + utils.PostgresDBName, + query) return strings.Trim(stdout, "\n"), err }, timeout).Should(BeEquivalentTo("1")) } @@ -227,15 +236,20 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada // Update the configuration postgresParams["shared_buffers"] = "256MB" updateClusterPostgresParams(postgresParams, namespace) - AssertPostgresNoPendingRestart(namespace, clusterName, commandTimeout, timeout) + AssertPostgresNoPendingRestart(namespace, clusterName, timeout) }) By("verify that shared_buffers setting changed", func() { // Check that the new parameter has been modified in every pod for _, pod := range podList.Items { Eventually(func() (int, error, error) { - stdout, _, err := env.ExecCommand(env.Ctx, pod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", "show shared_buffers") + stdout, _, err := env.ExecQueryInInstancePod( + utils.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + utils.PostgresDBName, + "show shared_buffers") value, atoiErr := strconv.Atoi(strings.Trim(stdout, "MB\n")) return value, err, atoiErr }, timeout).Should(BeEquivalentTo(256), @@ -265,22 +279,32 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada postgresParams["max_replication_slots"] = "16" postgresParams["maintenance_work_mem"] = "128MB" updateClusterPostgresParams(postgresParams, namespace) - AssertPostgresNoPendingRestart(namespace, clusterName, commandTimeout, timeout) + AssertPostgresNoPendingRestart(namespace, clusterName, timeout) }) By("verify that both parameters have been modified in each pod", func() { // Check that both parameters have been modified in each pod for _, pod := range podList.Items { Eventually(func() (int, error, error) { - stdout, _, err := env.ExecCommand(env.Ctx, pod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", "show max_replication_slots") + stdout, _, err := env.ExecQueryInInstancePod( + utils.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + utils.PostgresDBName, + "show max_replication_slots") value, atoiErr := strconv.Atoi(strings.Trim(stdout, "\n")) return value, err, atoiErr }, timeout).Should(BeEquivalentTo(16)) Eventually(func() (int, error, error) { - stdout, _, err := env.ExecCommand(env.Ctx, pod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", "show maintenance_work_mem") + stdout, _, err := env.ExecQueryInInstancePod( + utils.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + utils.PostgresDBName, + "show maintenance_work_mem") value, atoiErr := strconv.Atoi(strings.Trim(stdout, "MB\n")) return value, err, atoiErr }, timeout).Should(BeEquivalentTo(128)) @@ -325,15 +349,20 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada delete(postgresParams, "port") postgresParams["max_connections"] = "105" updateClusterPostgresParams(postgresParams, namespace) - AssertPostgresNoPendingRestart(namespace, clusterName, commandTimeout, timeout) + AssertPostgresNoPendingRestart(namespace, clusterName, timeout) }) By("verify that max_connections has been decreased in every pod", func() { // Check that the new parameter has been modified in every pod for _, pod := range podList.Items { Eventually(func() (int, error, error) { - stdout, _, err := env.ExecCommand(env.Ctx, pod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", "show max_connections") + stdout, _, err := env.ExecQueryInInstancePod( + utils.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + utils.PostgresDBName, + "show max_connections") value, atoiErr := strconv.Atoi(strings.Trim(stdout, "\n")) return value, err, atoiErr }, timeout).Should(BeEquivalentTo(105), @@ -366,15 +395,20 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada // Update the configuration delete(postgresParams, "max_connections") updateClusterPostgresParams(postgresParams, namespace) - AssertPostgresNoPendingRestart(namespace, clusterName, commandTimeout, timeout) + AssertPostgresNoPendingRestart(namespace, clusterName, timeout) }) By("verify that the max_connections has been set to default in every pod", func() { // Check that the new parameter has been modified in every pod for _, pod := range podList.Items { Eventually(func() (int, error, error) { - stdout, _, err := env.ExecCommand(env.Ctx, pod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", "show max_connections") + stdout, _, err := env.ExecQueryInInstancePod( + utils.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + utils.PostgresDBName, + "show max_connections") value, atoiErr := strconv.Atoi(strings.Trim(stdout, "\n")) return value, err, atoiErr }, timeout).Should(BeEquivalentTo(100), @@ -390,53 +424,45 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada }) }) + // pg_ident_file_mappings is available from v15 only It("09. reloading Pg when pg_ident rules are modified", func() { - podList, err := env.GetClusterPodList(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - - stdout, _, err := env.ExecCommand(env.Ctx, podList.Items[0], specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", - "select count(1) from pg_views where viewname = 'pg_ident_file_mappings';") - psqlHasIdentView := err == nil && strings.Trim(stdout, "\n") == "1" + if env.PostgresVersion > 14 { + primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + query := "select count(1) from pg_ident_file_mappings;" - By("check that there is only one entry in pg_ident_file_mappings", func() { - for _, pod := range podList.Items { - if psqlHasIdentView { - Eventually(func() (string, error) { - stdout, _, err := env.ExecCommand(env.Ctx, pod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", - "select count(1) from pg_ident_file_mappings;") - return strings.Trim(stdout, "\n"), err - }, timeout).Should(BeEquivalentTo("1")) - } - } - }) + By("check that there is only one entry in pg_ident_file_mappings", func() { + Eventually(func() (string, error) { + stdout, _, err := env.ExecQueryInInstancePod( + utils.PodLocator{ + Namespace: primaryPod.Namespace, + PodName: primaryPod.Name, + }, + utils.PostgresDBName, + query) + return strings.Trim(stdout, "\n"), err + }, timeout).Should(BeEquivalentTo("1")) + }) - By("apply configuration update", func() { - // Update the configuration - updateClusterPostgresPgIdent(namespace) - AssertPostgresNoPendingRestart(namespace, clusterName, commandTimeout, 300) - }) + By("apply configuration update", func() { + // Update the configuration + updateClusterPostgresPgIdent(namespace) + AssertPostgresNoPendingRestart(namespace, clusterName, 300) + }) - By("verify that there are now two entries in pg_ident_file_mappings", func() { - for _, pod := range podList.Items { - if psqlHasIdentView { - Eventually(func() (string, error) { - stdout, _, err := env.ExecCommand(env.Ctx, pod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", - "select count(1) from pg_ident_file_mappings;") - return strings.Trim(stdout, "\n"), err - }, timeout).Should(BeEquivalentTo("2")) - } else { - // Can't check for the actual content of the file, but let's check that we can reload the config - Eventually(func() (string, error) { - stdout, _, err := env.ExecCommand(env.Ctx, pod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", "select count(1) where pg_reload_conf();") - return strings.Trim(stdout, "\n"), err - }, timeout).Should(BeEquivalentTo("1")) - } - } - }) + By("verify that there are now two entries in pg_ident_file_mappings", func() { + Eventually(func() (string, error) { + stdout, _, err := env.ExecQueryInInstancePod( + utils.PodLocator{ + Namespace: primaryPod.Namespace, + PodName: primaryPod.Name, + }, + utils.PostgresDBName, + query) + return strings.Trim(stdout, "\n"), err + }, timeout).Should(BeEquivalentTo("2")) + }) + } }) }) @@ -532,11 +558,15 @@ var _ = Describe("Configuration update with primaryUpdateMethod", Label(tests.La podList, err := env.GetClusterPodList(namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - commandTimeout := time.Second * 10 for _, pod := range podList.Items { Eventually(func() (int, error, error) { - stdout, _, err := env.ExecCommand(env.Ctx, pod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", "show max_connections") + stdout, _, err := env.ExecQueryInInstancePod( + utils.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + utils.PostgresDBName, + "show max_connections") value, atoiErr := strconv.Atoi(strings.Trim(stdout, "\n")) return value, err, atoiErr }, 180).Should(BeEquivalentTo(newMaxConnectionsValue), @@ -552,7 +582,6 @@ var _ = Describe("Configuration update with primaryUpdateMethod", Label(tests.La }) By("verifying that old primary was actually restarted", func() { - commandTimeout := time.Second * 10 pod := corev1.Pod{} err := env.Client.Get(env.Ctx, types.NamespacedName{ Namespace: namespace, @@ -561,9 +590,16 @@ var _ = Describe("Configuration update with primaryUpdateMethod", Label(tests.La Expect(err).ToNot(HaveOccurred()) // take pg postmaster start time - stdout, _, cmdErr := env.EventuallyExecCommand(env.Ctx, pod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", - "select to_char(pg_postmaster_start_time(), 'YYYY-MM-DD HH24:MI:SS');") + query := "select to_char(pg_postmaster_start_time(), 'YYYY-MM-DD HH24:MI:SS');" + stdout, _, cmdErr := env.EventuallyExecQueryInInstancePod( + utils.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, utils.PostgresDBName, + query, + RetryTimeout, + PollingTime, + ) Expect(cmdErr).ToNot(HaveOccurred()) newStartTime, err := cnpgTypes.ParseTargetTime(nil, strings.Trim(stdout, "\n")) @@ -576,7 +612,6 @@ var _ = Describe("Configuration update with primaryUpdateMethod", Label(tests.La It("work_mem config change should not require a restart", func() { const expectedNewValueForWorkMem = "10MB" - commandTimeout := time.Second * 10 By("updating work mem ", func() { cluster, err := env.GetCluster(namespace, clusterName) @@ -595,14 +630,19 @@ var _ = Describe("Configuration update with primaryUpdateMethod", Label(tests.La // Check that the parameter has been modified in every pod for _, pod := range podList.Items { Eventually(func() (int, error, error) { - stdout, _, err := env.ExecCommand(env.Ctx, pod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", "show work_mem") + stdout, _, err := env.ExecQueryInInstancePod( + utils.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + utils.PostgresDBName, + "show work_mem") value, atoiErr := strconv.Atoi(strings.Trim(stdout, "MB\n")) return value, err, atoiErr }, 160).Should(BeEquivalentTo(10)) } }) - AssertPostgresNoPendingRestart(namespace, clusterName, commandTimeout, 120) + AssertPostgresNoPendingRestart(namespace, clusterName, 120) }) }) }) diff --git a/tests/e2e/declarative_hibernation_test.go b/tests/e2e/declarative_hibernation_test.go index 3b88a044a9..7f30bf1070 100644 --- a/tests/e2e/declarative_hibernation_test.go +++ b/tests/e2e/declarative_hibernation_test.go @@ -24,6 +24,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/hibernation" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" + testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -55,7 +56,13 @@ var _ = Describe("Cluster declarative hibernation", func() { By("creating a new cluster", func() { AssertCreateCluster(namespace, clusterName, sampleFileCluster, env) // Write a table and some data on the "app" database - AssertCreateTestData(env, namespace, clusterName, tableName) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testsUtils.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) }) By("hibernating the new cluster", func() { @@ -114,7 +121,13 @@ var _ = Describe("Cluster declarative hibernation", func() { }) By("verifying the data has been preserved", func() { - AssertDataExpectedCount(env, namespace, clusterName, tableName, 2) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testsUtils.AppDBName, + TableName: tableName, + } + AssertDataExpectedCount(env, tableLocator, 2) }) }) }) diff --git a/tests/e2e/disk_space_test.go b/tests/e2e/disk_space_test.go index 84986cd3c1..d838034d9d 100644 --- a/tests/e2e/disk_space_test.go +++ b/tests/e2e/disk_space_test.go @@ -73,22 +73,22 @@ var _ = Describe("Volume space unavailable", Label(tests.LabelStorage), func() { By("writing something when no space is available", func() { // Create the table used by the scenario query := "CREATE TABLE diskspace AS SELECT generate_series(1, 1000000);" - _, _, err := env.ExecCommandWithPsqlClient( - namespace, - clusterName, - primaryPod, - apiv1.ApplicationUserSecretSuffix, + _, _, err := env.ExecQueryInInstancePod( + testsUtils.PodLocator{ + Namespace: primaryPod.Namespace, + PodName: primaryPod.Name, + }, testsUtils.AppDBName, - query, - ) + query) Expect(err).To(HaveOccurred()) + query = "CHECKPOINT; SELECT pg_switch_wal(); CHECKPOINT" _, _, err = env.ExecQueryInInstancePod( testsUtils.PodLocator{ Namespace: primaryPod.Namespace, PodName: primaryPod.Name, }, - testsUtils.DatabaseName("postgres"), + testsUtils.PostgresDBName, query) Expect(err).To(HaveOccurred()) }) @@ -171,7 +171,7 @@ var _ = Describe("Volume space unavailable", Label(tests.LabelStorage), func() { Namespace: primaryPod.Namespace, PodName: primaryPod.Name, }, - testsUtils.DatabaseName("postgres"), + testsUtils.PostgresDBName, query) Expect(err).NotTo(HaveOccurred()) }) diff --git a/tests/e2e/drain_node_test.go b/tests/e2e/drain_node_test.go index 19e183db2d..b018065c1b 100644 --- a/tests/e2e/drain_node_test.go +++ b/tests/e2e/drain_node_test.go @@ -118,7 +118,13 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La // Load test data oldPrimary := clusterName + "-1" - AssertCreateTestData(env, namespace, clusterName, "test") + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testsUtils.AppDBName, + TableName: "test", + } + AssertCreateTestData(env, tableLocator) // We create a mapping between the pod names and the UIDs of // their volumes. We do not expect the UIDs to change. @@ -178,10 +184,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La } }) - // Expect the (previously created) test data to be available - primary, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - AssertDataExpectedCountWithDatabaseName(namespace, primary.Name, "app", "test", 2) + AssertDataExpectedCount(env, tableLocator, 2) AssertClusterStandbysAreStreaming(namespace, clusterName, 120) }) @@ -230,7 +233,13 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La // Load test data oldPrimary := clusterName + "-1" - AssertCreateTestData(env, namespace, clusterName, "test") + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testsUtils.AppDBName, + TableName: "test", + } + AssertCreateTestData(env, tableLocator) // We create a mapping between the pod names and the UIDs of // their volumes. We do not expect the UIDs to change. @@ -294,10 +303,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La } }) - // Expect the (previously created) test data to be available - primary, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - AssertDataExpectedCountWithDatabaseName(namespace, primary.Name, "app", "test", 2) + AssertDataExpectedCount(env, tableLocator, 2) AssertClusterStandbysAreStreaming(namespace, clusterName, 120) }) }) @@ -360,7 +366,13 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La }) // Load test data - AssertCreateTestData(env, namespace, clusterName, "test") + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testsUtils.AppDBName, + TableName: "test", + } + AssertCreateTestData(env, tableLocator) // We uncordon a cordoned node. New pods can go there. By("uncordon node for pod failover", func() { @@ -396,10 +408,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La }, timeout).Should(Succeed()) }) - // Expect the (previously created) test data to be available - primary, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - AssertDataExpectedCountWithDatabaseName(namespace, primary.Name, "app", "test", 2) + AssertDataExpectedCount(env, tableLocator, 2) AssertClusterStandbysAreStreaming(namespace, clusterName, 120) err = nodes.UncordonAllNodes(env) Expect(err).ToNot(HaveOccurred()) @@ -433,9 +442,13 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La }) // Load test data - primary, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - AssertCreateTestData(env, namespace, clusterName, "test") + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testsUtils.AppDBName, + TableName: "test", + } + AssertCreateTestData(env, tableLocator) // Drain the node containing the primary pod and store the list of running pods _ = nodes.DrainPrimaryNode(namespace, clusterName, @@ -458,7 +471,8 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La Expect(err).ToNot(HaveOccurred()) }) - AssertDataExpectedCountWithDatabaseName(namespace, primary.Name, "app", "test", 2) + AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReady], env) + AssertDataExpectedCount(env, tableLocator, 2) }) }) diff --git a/tests/e2e/failover_test.go b/tests/e2e/failover_test.go index b3f6731aa1..a59695bad6 100644 --- a/tests/e2e/failover_test.go +++ b/tests/e2e/failover_test.go @@ -79,9 +79,15 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() { // Get the walreceiver pid query := "SELECT pid FROM pg_stat_activity WHERE backend_type = 'walreceiver'" - out, _, err := env.EventuallyExecCommand( - env.Ctx, *pausedPod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", query) + out, _, err := env.EventuallyExecQueryInInstancePod( + utils.PodLocator{ + Namespace: pausedPod.Namespace, + PodName: pausedPod.Name, + }, utils.PostgresDBName, + query, + RetryTimeout, + PollingTime, + ) Expect(err).ToNot(HaveOccurred()) pid = strings.Trim(out, "\n") @@ -94,9 +100,15 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() { // We don't want to wait for the replication timeout. query = fmt.Sprintf("SELECT pg_terminate_backend(pid) FROM pg_stat_replication "+ "WHERE application_name = '%v'", pausedReplica) - _, _, err = env.EventuallyExecCommand( - env.Ctx, *primaryPod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", query) + _, _, err = env.EventuallyExecQueryInInstancePod( + utils.PodLocator{ + Namespace: primaryPod.Namespace, + PodName: primaryPod.Name, + }, utils.PostgresDBName, + query, + RetryTimeout, + PollingTime, + ) Expect(err).ToNot(HaveOccurred()) // Expect the primary to have lost connection with the stopped standby @@ -114,28 +126,46 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() { Expect(err).ToNot(HaveOccurred()) // Gather the current WAL LSN - initialLSN, _, err := env.EventuallyExecCommand( - env.Ctx, *primaryPod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", "SELECT pg_current_wal_lsn()") + initialLSN, _, err := env.EventuallyExecQueryInInstancePod( + utils.PodLocator{ + Namespace: primaryPod.Namespace, + PodName: primaryPod.Name, + }, utils.PostgresDBName, + "SELECT pg_current_wal_lsn()", + RetryTimeout, + PollingTime, + ) Expect(err).ToNot(HaveOccurred()) // Execute a checkpoint - _, _, err = env.EventuallyExecCommand( - env.Ctx, *primaryPod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", "CHECKPOINT") + _, _, err = env.EventuallyExecQueryInInstancePod( + utils.PodLocator{ + Namespace: primaryPod.Namespace, + PodName: primaryPod.Name, + }, utils.PostgresDBName, + "CHECKPOINT", + RetryTimeout, + PollingTime, + ) Expect(err).ToNot(HaveOccurred()) + query := fmt.Sprintf("SELECT true FROM pg_stat_replication "+ + "WHERE application_name = '%v' AND replay_lsn > '%v'", + targetPrimary, strings.Trim(initialLSN, "\n")) // The replay_lsn of the targetPrimary should be ahead // of the one before the checkpoint Eventually(func() (string, error) { primaryPod, err = env.GetPod(namespace, currentPrimary) Expect(err).ToNot(HaveOccurred()) - query := fmt.Sprintf("SELECT true FROM pg_stat_replication "+ - "WHERE application_name = '%v' AND replay_lsn > '%v'", - targetPrimary, strings.Trim(initialLSN, "\n")) - out, _, err := env.EventuallyExecCommand( - env.Ctx, *primaryPod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", query) + out, _, err := env.EventuallyExecQueryInInstancePod( + utils.PodLocator{ + Namespace: primaryPod.Namespace, + PodName: primaryPod.Name, + }, utils.PostgresDBName, + query, + RetryTimeout, + PollingTime, + ) return strings.TrimSpace(out), err }, RetryTimeout).Should(BeEquivalentTo("t")) }) diff --git a/tests/e2e/fastswitchover_test.go b/tests/e2e/fastswitchover_test.go index 86aac345ca..d7a45efd47 100644 --- a/tests/e2e/fastswitchover_test.go +++ b/tests/e2e/fastswitchover_test.go @@ -19,14 +19,12 @@ package e2e import ( "fmt" "strings" - "time" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/retry" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/tests" "github.com/cloudnative-pg/cloudnative-pg/tests/utils" @@ -137,17 +135,8 @@ func assertFastSwitchover(namespace, sampleFile, clusterName, webTestFile, webTe ", PRIMARY KEY (id)" + ")" - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - - _, _, err = env.ExecCommandWithPsqlClient( - namespace, - clusterName, - primaryPod, - apiv1.ApplicationUserSecretSuffix, - utils.AppDBName, - query, - ) + _, err := utils.RunExecOverForward(env, namespace, clusterName, utils.AppDBName, + apiv1.ApplicationUserSecretSuffix, query) Expect(err).ToNot(HaveOccurred()) }) @@ -164,23 +153,26 @@ func assertFastSwitchover(namespace, sampleFile, clusterName, webTestFile, webTe " -f " + webTestJob) Expect(err).ToNot(HaveOccurred()) - commandTimeout := time.Second * 10 - timeout := 60 primaryPodNamespacedName := types.NamespacedName{ Namespace: namespace, Name: oldPrimary, } + query := "SELECT count(*) > 0 FROM tps.tl" Eventually(func() (string, error) { primaryPod := &corev1.Pod{} err := env.Client.Get(env.Ctx, primaryPodNamespacedName, primaryPod) if err != nil { return "", err } - out, _, err := env.ExecCommand(env.Ctx, *primaryPod, specs.PostgresContainerName, - &commandTimeout, "psql", "-U", "postgres", "app", "-tAc", - "SELECT count(*) > 0 FROM tps.tl") + out, _, err := env.ExecQueryInInstancePod( + utils.PodLocator{ + Namespace: primaryPod.Namespace, + PodName: primaryPod.Name, + }, + utils.AppDBName, + query) return strings.TrimSpace(out), err - }, timeout).Should(BeEquivalentTo("t")) + }, RetryTimeout).Should(BeEquivalentTo("t")) }) By("setting the TargetPrimary to node2 to trigger a switchover", func() { diff --git a/tests/e2e/fencing_test.go b/tests/e2e/fencing_test.go index d160cbb583..4644a4b3ab 100644 --- a/tests/e2e/fencing_test.go +++ b/tests/e2e/fencing_test.go @@ -70,7 +70,7 @@ var _ = Describe("Fencing", Label(tests.LabelPlugin), func() { } checkInstanceIsStreaming := func(instanceName, namespace string) { - timeout := time.Second * 10 + query := "SELECT count(*) FROM pg_stat_wal_receiver" Eventually(func() (int, error) { err := env.Client.Get(env.Ctx, ctrlclient.ObjectKey{Namespace: namespace, Name: instanceName}, @@ -78,8 +78,13 @@ var _ = Describe("Fencing", Label(tests.LabelPlugin), func() { if err != nil { return 0, err } - out, _, err := env.ExecCommand(env.Ctx, pod, specs.PostgresContainerName, &timeout, - "psql", "-U", "postgres", "-tAc", "SELECT count(*) FROM pg_stat_wal_receiver") + out, _, err := env.ExecQueryInInstancePod( + testUtils.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + testUtils.PostgresDBName, + query) if err != nil { return 0, err } diff --git a/tests/e2e/hibernation_test.go b/tests/e2e/hibernation_test.go index 58f68418ee..ce4f38ea64 100644 --- a/tests/e2e/hibernation_test.go +++ b/tests/e2e/hibernation_test.go @@ -225,7 +225,13 @@ var _ = Describe("Cluster Hibernation with plugin", Label(tests.LabelPlugin), fu var beforeHibernationPgDataPvcUID types.UID // Write a table and some data on the "app" database - AssertCreateTestData(env, namespace, clusterName, tableName) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testsUtils.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) clusterManifest, currentPrimary := getPrimaryAndClusterManifest(namespace, clusterName) By("collecting pgWal pvc details of current primary", func() { @@ -289,7 +295,7 @@ var _ = Describe("Cluster Hibernation with plugin", Label(tests.LabelPlugin), fu AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReady], env) // Test data should be present after hibernation off - AssertDataExpectedCount(env, namespace, clusterName, tableName, 2) + AssertDataExpectedCount(env, tableLocator, 2) } When("cluster setup with PG-WAL volume", func() { @@ -316,7 +322,13 @@ var _ = Describe("Cluster Hibernation with plugin", Label(tests.LabelPlugin), fu Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFileClusterWithOutPGWalVolume, env) // Write a table and some data on the "app" database - AssertCreateTestData(env, namespace, clusterName, tableName) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testsUtils.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) clusterManifest, currentPrimary := getPrimaryAndClusterManifest(namespace, clusterName) @@ -363,7 +375,7 @@ var _ = Describe("Cluster Hibernation with plugin", Label(tests.LabelPlugin), fu AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReady], env) // Test data should be present after hibernation off - AssertDataExpectedCount(env, namespace, clusterName, tableName, 2) + AssertDataExpectedCount(env, tableLocator, 2) }) }) When("cluster hibernation after switchover", func() { diff --git a/tests/e2e/managed_roles_test.go b/tests/e2e/managed_roles_test.go index 0f77768921..534867203d 100644 --- a/tests/e2e/managed_roles_test.go +++ b/tests/e2e/managed_roles_test.go @@ -94,7 +94,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic Namespace: namespace, PodName: primaryPod, }, - utils.DatabaseName("postgres"), + utils.PostgresDBName, "\\du") g.Expect(err).ToNot(HaveOccurred()) if shouldExists { @@ -121,7 +121,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic Namespace: namespace, PodName: primaryPod, }, - utils.DatabaseName("postgres"), + utils.PostgresDBName, query) if err != nil { return []string{ERROR} @@ -163,7 +163,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic Namespace: namespace, PodName: primaryPodInfo.Name, }, - utils.DatabaseName("postgres"), + utils.PostgresDBName, q) Expect(err).ToNot(HaveOccurred()) Expect(stdout).To(Equal("t\n")) @@ -195,7 +195,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic Namespace: namespace, PodName: primaryPodInfo.Name, }, - utils.DatabaseName("postgres"), + utils.PostgresDBName, query) Expect(err).ToNot(HaveOccurred()) Expect(stdout).To(Equal("t\n")) @@ -274,7 +274,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic Namespace: namespace, PodName: primaryPod.Name, }, - utils.DatabaseName("postgres"), + utils.PostgresDBName, query) if err != nil { return "" @@ -348,7 +348,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic Namespace: namespace, PodName: primaryPodInfo.Name, }, - utils.DatabaseName("postgres"), + utils.PostgresDBName, query) if err != nil { return "" @@ -390,7 +390,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic Namespace: namespace, PodName: primaryPodInfo.Name, }, - utils.DatabaseName("postgres"), + utils.PostgresDBName, query) if err != nil { return ERROR @@ -410,7 +410,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic Namespace: namespace, PodName: primaryPodInfo.Name, }, - utils.DatabaseName("postgres"), + utils.PostgresDBName, query) if err != nil { return ERROR @@ -556,7 +556,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic Namespace: namespace, PodName: primaryPod.Name, }, - utils.DatabaseName("postgres"), + utils.PostgresDBName, query) Expect(err).ToNot(HaveOccurred()) }) @@ -603,7 +603,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic Namespace: namespace, PodName: primaryPodInfo.Name, }, - utils.DatabaseName("postgres"), + utils.PostgresDBName, query) if err != nil { return ERROR @@ -623,7 +623,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic Namespace: namespace, PodName: primaryPodInfo.Name, }, - utils.DatabaseName("postgres"), + utils.PostgresDBName, query) if err != nil { return ERROR diff --git a/tests/e2e/metrics_test.go b/tests/e2e/metrics_test.go index 9907f59266..e057220f35 100644 --- a/tests/e2e/metrics_test.go +++ b/tests/e2e/metrics_test.go @@ -298,6 +298,7 @@ var _ = Describe("Metrics", Label(tests.LabelObservability), func() { apiv1.ApplicationUserSecretSuffix, ) defer func() { + _ = conn.Close() forward.Close() }() Expect(err).ToNot(HaveOccurred()) diff --git a/tests/e2e/operator_unavailable_test.go b/tests/e2e/operator_unavailable_test.go index d2088c7a68..5f23913135 100644 --- a/tests/e2e/operator_unavailable_test.go +++ b/tests/e2e/operator_unavailable_test.go @@ -26,6 +26,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" + testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -59,7 +60,13 @@ var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, te // Load test data currentPrimary := clusterName + "-1" - AssertCreateTestData(env, namespace, clusterName, "test") + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testsUtils.AppDBName, + TableName: "test", + } + AssertCreateTestData(env, tableLocator) By("scaling down operator replicas to zero", func() { err := env.ScaleOperatorDeployment(0) @@ -120,10 +127,7 @@ var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, te return specs.IsPodStandby(pod), err }, timeout).Should(BeTrue()) }) - // Expect the test data previously created to be available - primary, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - AssertDataExpectedCountWithDatabaseName(namespace, primary.Name, "app", "test", 2) + AssertDataExpectedCount(env, tableLocator, 2) }) }) @@ -140,7 +144,13 @@ var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, te // Load test data currentPrimary := clusterName + "-1" - AssertCreateTestData(env, namespace, clusterName, "test") + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testsUtils.AppDBName, + TableName: "test", + } + AssertCreateTestData(env, tableLocator) operatorNamespace, err := env.GetOperatorNamespaceName() Expect(err).ToNot(HaveOccurred()) @@ -211,10 +221,7 @@ var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, te return specs.IsPodStandby(pod), err }, timeout).Should(BeTrue()) }) - // Expect the test data previously created to be available - primary, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - AssertDataExpectedCountWithDatabaseName(namespace, primary.Name, "app", "test", 2) + AssertDataExpectedCount(env, tableLocator, 2) }) }) }) diff --git a/tests/e2e/pg_basebackup_test.go b/tests/e2e/pg_basebackup_test.go index 59cb5542ad..6ec697fd4a 100644 --- a/tests/e2e/pg_basebackup_test.go +++ b/tests/e2e/pg_basebackup_test.go @@ -52,7 +52,13 @@ var _ = Describe("Bootstrap with pg_basebackup", Label(tests.LabelRecovery), fun srcClusterName, err = env.GetResourceNameFromYAML(srcCluster) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, srcClusterName, srcCluster, env) - AssertCreateTestData(env, namespace, srcClusterName, tableName) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: srcClusterName, + DatabaseName: utils.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) }) It("using basic authentication", func() { @@ -87,7 +93,13 @@ var _ = Describe("Bootstrap with pg_basebackup", Label(tests.LabelRecovery), fun }) By("checking data have been copied correctly", func() { - AssertDataExpectedCount(env, namespace, dstClusterName, tableName, 2) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: dstClusterName, + DatabaseName: utils.AppDBName, + TableName: tableName, + } + AssertDataExpectedCount(env, tableLocator, 2) }) By("writing some new data to the dst cluster", func() { @@ -99,6 +111,7 @@ var _ = Describe("Bootstrap with pg_basebackup", Label(tests.LabelRecovery), fun apiv1.ApplicationUserSecretSuffix, ) defer func() { + _ = conn.Close() forward.Close() }() Expect(err).ToNot(HaveOccurred()) @@ -106,7 +119,13 @@ var _ = Describe("Bootstrap with pg_basebackup", Label(tests.LabelRecovery), fun }) By("checking the src cluster was not modified", func() { - AssertDataExpectedCount(env, namespace, srcClusterName, tableName, 2) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: srcClusterName, + DatabaseName: utils.AppDBName, + TableName: tableName, + } + AssertDataExpectedCount(env, tableLocator, 2) }) }) @@ -119,7 +138,13 @@ var _ = Describe("Bootstrap with pg_basebackup", Label(tests.LabelRecovery), fun AssertClusterIsReady(namespace, dstClusterName, testTimeouts[utils.ClusterIsReadySlow], env) By("checking data have been copied correctly", func() { - AssertDataExpectedCount(env, namespace, dstClusterName, tableName, 2) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: dstClusterName, + DatabaseName: utils.AppDBName, + TableName: tableName, + } + AssertDataExpectedCount(env, tableLocator, 2) }) By("writing some new data to the dst cluster", func() { @@ -131,6 +156,7 @@ var _ = Describe("Bootstrap with pg_basebackup", Label(tests.LabelRecovery), fun apiv1.ApplicationUserSecretSuffix, ) defer func() { + _ = conn.Close() forward.Close() }() Expect(err).ToNot(HaveOccurred()) @@ -138,7 +164,13 @@ var _ = Describe("Bootstrap with pg_basebackup", Label(tests.LabelRecovery), fun }) By("checking the src cluster was not modified", func() { - AssertDataExpectedCount(env, namespace, srcClusterName, tableName, 2) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: srcClusterName, + DatabaseName: utils.AppDBName, + TableName: tableName, + } + AssertDataExpectedCount(env, tableLocator, 2) }) }) }) diff --git a/tests/e2e/pg_data_corruption_test.go b/tests/e2e/pg_data_corruption_test.go index 996e886f12..c8c6fbe321 100644 --- a/tests/e2e/pg_data_corruption_test.go +++ b/tests/e2e/pg_data_corruption_test.go @@ -58,7 +58,13 @@ var _ = Describe("PGDATA Corruption", Label(tests.LabelRecovery), Ordered, func( clusterName, err := env.GetResourceNameFromYAML(sampleFile) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) - AssertCreateTestData(env, namespace, clusterName, tableName) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testsUtils.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) By("gathering current primary pod and pvc", func() { oldPrimaryPod, err := env.GetClusterPrimary(namespace, clusterName) @@ -187,7 +193,7 @@ var _ = Describe("PGDATA Corruption", Label(tests.LabelRecovery), Ordered, func( }, 300).Should(BeTrue()) }) AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReadyQuick], env) - AssertDataExpectedCount(env, namespace, clusterName, tableName, 2) + AssertDataExpectedCount(env, tableLocator, 2) AssertClusterStandbysAreStreaming(namespace, clusterName, 120) } diff --git a/tests/e2e/replica_mode_cluster_test.go b/tests/e2e/replica_mode_cluster_test.go index 977b0a7daf..45998be4ae 100644 --- a/tests/e2e/replica_mode_cluster_test.go +++ b/tests/e2e/replica_mode_cluster_test.go @@ -33,7 +33,6 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/replicaclusterswitch" - "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" @@ -211,17 +210,35 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { }) By("creating a new data in the new source cluster", func() { - AssertCreateTestDataWithDatabaseName(env, namespace, clusterTwoName, sourceDBName, "new_test_table") + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterTwoName, + DatabaseName: sourceDBName, + TableName: "new_test_table", + } + AssertCreateTestData(env, tableLocator) + }) + + // The dst Cluster gets promoted to primary, hence the new appUser password will + // be updated to reflect its "-app" secret. + // We need to copy the password changes over to the src Cluster, which is now a Replica + // Cluster, in order to connect using the "-app" secret. + By("updating the appUser secret of the src cluster", func() { + _, appSecretPassword, err := testUtils.GetCredentials(clusterTwoName, namespace, + apiv1.ApplicationUserSecretSuffix, env) + Expect(err).ToNot(HaveOccurred()) + AssertUpdateSecret("password", appSecretPassword, clusterOneName+apiv1.ApplicationUserSecretSuffix, + namespace, clusterOneName, 30, env) }) By("checking that the data is present in the old src cluster", func() { - AssertDataExpectedCountWithDatabaseName( - namespace, - clusterOnePrimary.Name, - sourceDBName, - "new_test_table", - 2, - ) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterOneName, + DatabaseName: sourceDBName, + TableName: "new_test_table", + } + AssertDataExpectedCount(env, tableLocator, 2) }) }) }) @@ -261,13 +278,16 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { primaryReplicaCluster, err := env.GetClusterPrimary(replicaNamespace, replicaClusterName) Expect(err).ToNot(HaveOccurred()) - commandTimeout := time.Second * 10 - By("verify archive mode is set to 'always on' designated primary", func() { query := "show archive_mode;" Eventually(func() (string, error) { - stdOut, _, err := env.ExecCommand(env.Ctx, *primaryReplicaCluster, specs.PostgresContainerName, - &commandTimeout, "psql", "-U", "postgres", sourceDBName, "-tAc", query) + stdOut, _, err := env.ExecQueryInInstancePod( + testUtils.PodLocator{ + Namespace: primaryReplicaCluster.Namespace, + PodName: primaryReplicaCluster.Name, + }, + sourceDBName, + query) return strings.Trim(stdOut, "\n"), err }, 30).Should(BeEquivalentTo("always")) }) @@ -645,8 +665,13 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f Consistently(func(g Gomega) { pod, err := env.GetClusterPrimary(namespace, clusterBName) g.Expect(err).ToNot(HaveOccurred()) - stdOut, _, err := env.ExecCommand(env.Ctx, *pod, specs.PostgresContainerName, ptr.To(time.Second*10), - "psql", "-U", "postgres", "postgres", "-tAc", "select pg_is_in_recovery();") + stdOut, _, err := env.ExecQueryInInstancePod( + testUtils.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + testUtils.PostgresDBName, + "select pg_is_in_recovery();") g.Expect(err).ToNot(HaveOccurred()) g.Expect(strings.Trim(stdOut, "\n")).To(Equal("t")) }, 60, 10).Should(Succeed()) diff --git a/tests/e2e/replication_slot_test.go b/tests/e2e/replication_slot_test.go index 4a576de7bb..c57404f93f 100644 --- a/tests/e2e/replication_slot_test.go +++ b/tests/e2e/replication_slot_test.go @@ -109,10 +109,14 @@ var _ = Describe("Replication Slot", Label(tests.LabelReplication), func() { primaryPod, err := env.GetClusterPrimary(namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - _, _, err = testsUtils.RunQueryFromPod(primaryPod, testsUtils.PGLocalSocketDir, - "app", "postgres", "''", - fmt.Sprintf("SELECT pg_create_physical_replication_slot('%s');", userPhysicalSlot), - env) + query := fmt.Sprintf("SELECT pg_create_physical_replication_slot('%s');", userPhysicalSlot) + _, _, err = env.ExecQueryInInstancePod( + testsUtils.PodLocator{ + Namespace: primaryPod.Namespace, + PodName: primaryPod.Name, + }, + testsUtils.PostgresDBName, + query) Expect(err).ToNot(HaveOccurred()) }) diff --git a/tests/e2e/tablespaces_test.go b/tests/e2e/tablespaces_test.go index b3093e5ab0..72987be873 100644 --- a/tests/e2e/tablespaces_test.go +++ b/tests/e2e/tablespaces_test.go @@ -361,7 +361,6 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, tablespace2 = "tbs2" table2 = "test_tbs2" ) - checkPointTimeout := time.Second * 10 BeforeAll(func() { // Create a cluster in a namespace we'll delete after the test @@ -418,26 +417,34 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, By("inserting test data and creating WALs on the cluster to be snapshotted", func() { // Create a table and insert data 1,2 in each tablespace tl1 := TableLocator{ - Namespace: namespace, - ClusterName: clusterName, - TableName: table1, - Tablespace: tablespace1, + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testUtils.AppDBName, + TableName: table1, + Tablespace: tablespace1, } - AssertCreateTestDataInTablespace(env, tl1) + AssertCreateTestData(env, tl1) tl2 := TableLocator{ - Namespace: namespace, - ClusterName: clusterName, - TableName: table2, - Tablespace: tablespace2, + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testUtils.AppDBName, + TableName: table2, + Tablespace: tablespace2, } - AssertCreateTestDataInTablespace(env, tl2) + AssertCreateTestData(env, tl2) primaryPod, err := env.GetClusterPrimary(namespace, clusterName) Expect(err).ToNot(HaveOccurred()) // Execute a checkpoint - _, _, err = env.EventuallyExecCommand( - env.Ctx, *primaryPod, specs.PostgresContainerName, &checkPointTimeout, - "psql", "-U", "postgres", "-tAc", "CHECKPOINT") + _, _, err = env.EventuallyExecQueryInInstancePod( + testUtils.PodLocator{ + Namespace: primaryPod.Namespace, + PodName: primaryPod.Name, + }, testUtils.PostgresDBName, + "CHECKPOINT", + RetryTimeout, + PollingTime, + ) Expect(err).ToNot(HaveOccurred()) }) @@ -510,8 +517,20 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, }) By("verifying the correct data exists in the restored cluster", func() { - AssertDataExpectedCount(env, namespace, clusterToRestoreName, table1, 2) - AssertDataExpectedCount(env, namespace, clusterToRestoreName, table2, 2) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterToRestoreName, + DatabaseName: testUtils.AppDBName, + TableName: table1, + } + AssertDataExpectedCount(env, tableLocator, 2) + tableLocator = TableLocator{ + Namespace: namespace, + ClusterName: clusterToRestoreName, + DatabaseName: testUtils.AppDBName, + TableName: table2, + } + AssertDataExpectedCount(env, tableLocator, 2) }) }) @@ -526,6 +545,7 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, apiv1.ApplicationUserSecretSuffix, ) defer func() { + _ = conn.Close() forward.Close() }() Expect(err).ToNot(HaveOccurred()) @@ -590,8 +610,20 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, }) By("verifying the correct data exists in the restored cluster", func() { - AssertDataExpectedCount(env, namespace, clusterToPITRName, table1, 4) - AssertDataExpectedCount(env, namespace, clusterToPITRName, table2, 4) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterToPITRName, + DatabaseName: testUtils.AppDBName, + TableName: table1, + } + AssertDataExpectedCount(env, tableLocator, 4) + tableLocator = TableLocator{ + Namespace: namespace, + ClusterName: clusterToPITRName, + DatabaseName: testUtils.AppDBName, + TableName: table2, + } + AssertDataExpectedCount(env, tableLocator, 4) }) }) }) @@ -1004,7 +1036,7 @@ func AssertDatabaseContainsTablespaces(cluster *apiv1.Cluster, timeout int) { testUtils.PodLocator{ Namespace: namespace, PodName: instance.Name, - }, testUtils.DatabaseName("app"), + }, testUtils.AppDBName, "SELECT oid, spcname, pg_get_userbyid(spcowner) FROM pg_tablespace;", ) g.Expect(stdErr).To(BeEmpty()) @@ -1032,7 +1064,7 @@ func AssertTempTablespaceContent(cluster *apiv1.Cluster, timeout int, content st testUtils.PodLocator{ Namespace: namespace, PodName: primary.Name, - }, testUtils.DatabaseName("app"), + }, testUtils.AppDBName, "SHOW temp_tablespaces", ) g.Expect(stdErr).To(BeEmpty()) @@ -1057,7 +1089,7 @@ func AssertTempTablespaceBehavior(cluster *apiv1.Cluster, expectedTempTablespace testUtils.PodLocator{ Namespace: namespace, PodName: primary.Name, - }, testUtils.DatabaseName("app"), + }, testUtils.AppDBName, "CREATE TEMPORARY TABLE cnp_e2e_test_table (i INTEGER); "+ "SELECT spcname FROM pg_tablespace WHERE OID="+ "(SELECT reltablespace FROM pg_class WHERE oid = 'cnp_e2e_test_table'::regclass)", @@ -1079,7 +1111,7 @@ func AssertTablespaceAndOwnerExist(cluster *apiv1.Cluster, tablespace, owner str testUtils.PodLocator{ Namespace: namespace, PodName: primaryPod.Name, - }, testUtils.DatabaseName("app"), + }, testUtils.AppDBName, fmt.Sprintf("SELECT 1 FROM pg_tablespace WHERE spcname = '%s' AND pg_get_userbyid(spcowner) = '%s';", tablespace, owner), diff --git a/tests/e2e/update_user_test.go b/tests/e2e/update_user_test.go index 5d8455be05..d614e8ad75 100644 --- a/tests/e2e/update_user_test.go +++ b/tests/e2e/update_user_test.go @@ -126,6 +126,12 @@ var _ = Describe("Enable superuser password", Label(tests.LabelServiceConnectivi ) var namespace string + BeforeEach(func() { + if testLevelEnv.Depth < int(level) { + Skip("Test depth is lower than the amount requested for this test") + } + }) + It("enable and disable superuser access", func() { var err error // Create a cluster in a namespace we'll delete after the test @@ -152,13 +158,16 @@ var _ = Describe("Enable superuser password", Label(tests.LabelServiceConnectivi g.Expect(apierrors.IsNotFound(err)).To(BeTrue()) }, 200).Should(Succeed()) - timeout := time.Second * 10 - + query := "SELECT rolpassword IS NULL FROM pg_authid WHERE rolname='postgres'" // We should have the `postgres` user with a null password Eventually(func() string { - stdout, _, err := env.ExecCommand(env.Ctx, *primaryPod, specs.PostgresContainerName, &timeout, - "psql", "-U", "postgres", "-tAc", - "SELECT rolpassword IS NULL FROM pg_authid WHERE rolname='postgres'") + stdout, _, err := env.ExecQueryInInstancePod( + testsUtils.PodLocator{ + Namespace: primaryPod.Namespace, + PodName: primaryPod.Name, + }, + testsUtils.PostgresDBName, + query) if err != nil { return "" } diff --git a/tests/e2e/upgrade_test.go b/tests/e2e/upgrade_test.go index b345936d64..98b3235796 100644 --- a/tests/e2e/upgrade_test.go +++ b/tests/e2e/upgrade_test.go @@ -35,7 +35,6 @@ import ( ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/tests" testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" @@ -167,6 +166,8 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O } AssertConfUpgrade := func(clusterName, upgradeNamespace string) { + databaseName := "appdb" + By("checking basic functionality performing a configuration upgrade on the cluster", func() { podList, err := env.GetClusterPodList(upgradeNamespace, clusterName) Expect(err).ToNot(HaveOccurred()) @@ -186,12 +187,16 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O }, 60).ShouldNot(HaveOccurred()) timeout := 300 - commandTimeout := time.Second * 10 // Check that both parameters have been modified in each pod for _, pod := range podList.Items { Eventually(func() (int, error) { - stdout, stderr, err := env.ExecCommand(env.Ctx, pod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", "show max_replication_slots") + stdout, stderr, err := env.ExecQueryInInstancePod( + testsUtils.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + testsUtils.PostgresDBName, + "show max_replication_slots") if err != nil { return 0, err } @@ -204,8 +209,13 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O "Pod %v should have updated its config", pod.Name) Eventually(func() (int, error, error) { - stdout, _, err := env.ExecCommand(env.Ctx, pod, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "-tAc", "show maintenance_work_mem") + stdout, _, err := env.ExecQueryInInstancePod( + testsUtils.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + testsUtils.PostgresDBName, + "show maintenance_work_mem") value, atoiErr := strconv.Atoi(strings.Trim(stdout, "MB\n")) return value, err, atoiErr }, timeout).Should(BeEquivalentTo(256), @@ -235,10 +245,16 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O primary, err := env.GetClusterPrimary(upgradeNamespace, clusterName) Expect(err).ToNot(HaveOccurred()) - commandTimeout := time.Second * 10 query := "CREATE TABLE IF NOT EXISTS postswitch(i int);" - _, _, err = env.EventuallyExecCommand(env.Ctx, *primary, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "appdb", "-tAc", query) + _, _, err = env.EventuallyExecQueryInInstancePod( + testsUtils.PodLocator{ + Namespace: primary.Namespace, + PodName: primary.Name, + }, testsUtils.DatabaseName(databaseName), + query, + RetryTimeout, + PollingTime, + ) Expect(err).ToNot(HaveOccurred()) for i := 1; i < 4; i++ { @@ -252,8 +268,13 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O if err := env.Client.Get(env.Ctx, podNamespacedName, pod); err != nil { return "", err } - out, _, err := env.ExecCommand(env.Ctx, *pod, specs.PostgresContainerName, - &commandTimeout, "psql", "-U", "postgres", "appdb", "-tAc", + + out, _, err := env.ExecQueryInInstancePod( + testsUtils.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + testsUtils.DatabaseName(databaseName), "SELECT count(*) = 0 FROM postswitch") return strings.TrimSpace(out), err }, 240).Should(BeEquivalentTo("t"), @@ -448,6 +469,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O } assertClustersWorkAfterOperatorUpgrade := func(upgradeNamespace, operatorManifest string, online bool) { + databaseName := "appdb" // generate random serverNames for the clusters each time serverName1 := fmt.Sprintf("%s-%d", clusterName1, funk.RandomInt(0, 9999)) serverName2 := fmt.Sprintf("%s-%d", clusterName2, funk.RandomInt(0, 9999)) @@ -505,10 +527,16 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O primary, err := env.GetClusterPrimary(upgradeNamespace, clusterName1) Expect(err).ToNot(HaveOccurred()) - commandTimeout := time.Second * 10 query := "CREATE TABLE IF NOT EXISTS to_restore AS VALUES (1),(2);" - _, _, err = env.EventuallyExecCommand(env.Ctx, *primary, specs.PostgresContainerName, &commandTimeout, - "psql", "-U", "postgres", "appdb", "-tAc", query) + _, _, err = env.EventuallyExecQueryInInstancePod( + testsUtils.PodLocator{ + Namespace: primary.Namespace, + PodName: primary.Name, + }, testsUtils.DatabaseName(databaseName), + query, + RetryTimeout, + PollingTime, + ) Expect(err).ToNot(HaveOccurred()) }) @@ -636,7 +664,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O Namespace: upgradeNamespace, PodName: primary, }, - testsUtils.DatabaseName("appdb"), + testsUtils.DatabaseName(databaseName), "SELECT count(*) FROM to_restore") Expect(strings.Trim(out, "\n"), err).To(BeEquivalentTo("2")) @@ -649,7 +677,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O Namespace: upgradeNamespace, PodName: primary, }, - testsUtils.DatabaseName("appdb"), + testsUtils.DatabaseName(databaseName), "select substring(pg_walfile_name(pg_current_wal_lsn()), 1, 8)") Expect(err).NotTo(HaveOccurred()) Expect(strconv.Atoi(strings.Trim(out, "\n"))).To( @@ -662,7 +690,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O Namespace: upgradeNamespace, PodName: primary, }, - testsUtils.DatabaseName("appdb"), + testsUtils.DatabaseName(databaseName), "SELECT count(*) FROM pg_stat_replication") return strings.Trim(out, "\n"), err }, 180).Should(BeEquivalentTo("2")) diff --git a/tests/e2e/volume_snapshot_test.go b/tests/e2e/volume_snapshot_test.go index 55718ee38c..ff0b016ec0 100644 --- a/tests/e2e/volume_snapshot_test.go +++ b/tests/e2e/volume_snapshot_test.go @@ -242,7 +242,13 @@ var _ = Describe("Verify Volume Snapshot", By("inserting test data and creating WALs on the cluster to be snapshotted", func() { // Create a "test" table with values 1,2 - AssertCreateTestData(env, namespace, clusterToSnapshotName, tableName) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterToSnapshotName, + DatabaseName: testUtils.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) // Because GetCurrentTimestamp() rounds down to the second and is executed // right after the creation of the test data, we wait for 1s to avoid not @@ -262,6 +268,7 @@ var _ = Describe("Verify Volume Snapshot", apiv1.ApplicationUserSecretSuffix, ) defer func() { + _ = conn.Close() forward.Close() }() Expect(err).ToNot(HaveOccurred()) @@ -282,7 +289,13 @@ var _ = Describe("Verify Volume Snapshot", }) By("verifying the correct data exists in the restored cluster", func() { - AssertDataExpectedCount(env, namespace, clusterToRestoreName, tableName, 2) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterToRestoreName, + DatabaseName: testUtils.AppDBName, + TableName: tableName, + } + AssertDataExpectedCount(env, tableLocator, 2) }) }) }) @@ -366,7 +379,13 @@ var _ = Describe("Verify Volume Snapshot", It("can create a declarative cold backup and restoring using it", func() { By("inserting test data", func() { - AssertCreateTestData(env, namespace, clusterToBackupName, tableName) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterToBackupName, + DatabaseName: testUtils.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) }) backupName, err := env.GetResourceNameFromYAML(backupFileFilePath) @@ -426,7 +445,13 @@ var _ = Describe("Verify Volume Snapshot", }) By("checking that the data is present on the restored cluster", func() { - AssertDataExpectedCount(env, namespace, clusterToRestoreName, tableName, 2) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterToRestoreName, + DatabaseName: testUtils.AppDBName, + TableName: tableName, + } + AssertDataExpectedCount(env, tableLocator, 2) }) }) It("can take a snapshot targeting the primary", func() { @@ -615,11 +640,18 @@ var _ = Describe("Verify Volume Snapshot", apiv1.ApplicationUserSecretSuffix, ) defer func() { + _ = conn.Close() forward.Close() }() Expect(err).ToNot(HaveOccurred()) // Create a "test" table with values 1,2 - AssertCreateTestData(env, namespace, clusterToSnapshotName, tableName) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterToSnapshotName, + DatabaseName: testUtils.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) // Insert 2 more rows which we expect not to be present at the end of the recovery insertRecordIntoTable(tableName, 3, conn) @@ -684,7 +716,13 @@ var _ = Describe("Verify Volume Snapshot", }) By("verifying the correct data exists in the restored cluster", func() { - AssertDataExpectedCount(env, namespace, clusterToRestoreName, tableName, 4) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterToRestoreName, + DatabaseName: testUtils.AppDBName, + TableName: tableName, + } + AssertDataExpectedCount(env, tableLocator, 4) }) }) @@ -700,6 +738,7 @@ var _ = Describe("Verify Volume Snapshot", apiv1.ApplicationUserSecretSuffix, ) defer func() { + _ = conn.Close() forward.Close() }() Expect(err).ToNot(HaveOccurred()) @@ -740,8 +779,13 @@ var _ = Describe("Verify Volume Snapshot", podList, err := env.GetClusterReplicas(namespace, clusterToSnapshotName) Expect(err).ToNot(HaveOccurred()) Expect(podList.Items).To(HaveLen(2)) - AssertDataExpectedCount(env, namespace, clusterToSnapshotName, tableName, 6) - AssertDataExpectedCount(env, namespace, clusterToSnapshotName, tableName, 6) + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterToSnapshotName, + DatabaseName: testUtils.AppDBName, + TableName: tableName, + } + AssertDataExpectedCount(env, tableLocator, 6) }) }) }) diff --git a/tests/utils/environment.go b/tests/utils/environment.go index 90e019327e..2596f87186 100644 --- a/tests/utils/environment.go +++ b/tests/utils/environment.go @@ -48,7 +48,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log/zap" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" @@ -204,31 +203,6 @@ func (env TestingEnvironment) ExecCommand( pod, containerName, timeout, command...) } -// ExecCommandWithPsqlClient wraps the utils.ExecCommand pre-setting values and -// run query on psql client pod with rw service as host. -func (env TestingEnvironment) ExecCommandWithPsqlClient( - namespace, - clusterName string, - pod *corev1.Pod, - secretSuffix string, - dbname string, - query string, -) (string, string, error) { - timeout := time.Second * 10 - username, password, err := GetCredentials(clusterName, namespace, secretSuffix, &env) - if err != nil { - return "", "", err - } - rwService, err := GetRwServiceObject(namespace, clusterName, &env) - if err != nil { - return "", "", err - } - host := CreateServiceFQDN(namespace, rwService.GetName()) - dsn := CreateDSN(host, username, dbname, password, Prefer, 5432) - return utils.ExecCommand(env.Ctx, env.Interface, env.RestClientConfig, - *pod, specs.PostgresContainerName, &timeout, "psql", dsn, "-tAc", query) -} - // GetPVCList gathers the current list of PVCs in a namespace func (env TestingEnvironment) GetPVCList(namespace string) (*corev1.PersistentVolumeClaimList, error) { pvcList := &corev1.PersistentVolumeClaimList{} diff --git a/tests/utils/pod.go b/tests/utils/pod.go index f62b4fcba1..25841da6a1 100644 --- a/tests/utils/pod.go +++ b/tests/utils/pod.go @@ -33,6 +33,8 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" pkgutils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + + . "github.com/onsi/gomega" // nolint ) // PodCreateAndWaitForReady creates a given pod object and wait for it to be ready @@ -246,3 +248,29 @@ func (env TestingEnvironment) ExecQueryInInstancePod( PodName: podLocator.PodName, }, &timeout, "psql", "-U", "postgres", string(dbname), "-tAc", query) } + +// EventuallyExecQueryInInstancePod wraps ExecQueryInInstancePod with an Eventually clause +func (env TestingEnvironment) EventuallyExecQueryInInstancePod( + podLocator PodLocator, + dbname DatabaseName, + query string, + retryTimeout int, + pollingTime int, +) (string, string, error) { + var stdOut, stdErr string + var err error + + Eventually(func() error { + stdOut, stdErr, err = env.ExecQueryInInstancePod( + PodLocator{ + Namespace: podLocator.Namespace, + PodName: podLocator.PodName, + }, dbname, query) + if err != nil { + return err + } + return nil + }, retryTimeout, pollingTime).Should(BeNil()) + + return stdOut, stdErr, err +} diff --git a/tests/utils/postgres.go b/tests/utils/postgres.go index e85f1bf573..9c4011c9f1 100644 --- a/tests/utils/postgres.go +++ b/tests/utils/postgres.go @@ -19,11 +19,8 @@ package utils import ( "strconv" "strings" - "time" corev1 "k8s.io/api/core/v1" - - "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" ) const ( @@ -37,32 +34,22 @@ const ( AppDBName = "app" // PostgresDBName database name postgres PostgresDBName = "postgres" + // TablespaceDefaultName is the default tablespace location + TablespaceDefaultName = "pg_default" ) -// RunQueryFromPod executes a query from a pod to a host -func RunQueryFromPod( - connectingPod *corev1.Pod, - host string, - dbname string, - user string, - password string, - query string, - env *TestingEnvironment, -) (string, string, error) { - timeout := time.Second * 10 - dsn := CreateDSN(host, user, dbname, password, Prefer, 5432) - - stdout, stderr, err := env.EventuallyExecCommand(env.Ctx, *connectingPod, specs.PostgresContainerName, &timeout, - "psql", dsn, "-tAc", query) - return stdout, stderr, err -} - // CountReplicas counts the number of replicas attached to an instance func CountReplicas(env *TestingEnvironment, pod *corev1.Pod) (int, error) { query := "SELECT count(*) FROM pg_stat_replication" - commandTimeout := time.Second * 10 - stdOut, _, err := env.EventuallyExecCommand(env.Ctx, *pod, specs.PostgresContainerName, - &commandTimeout, "psql", "-U", "postgres", "app", "-tAc", query) + stdOut, _, err := env.EventuallyExecQueryInInstancePod( + PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, AppDBName, + query, + RetryTimeout, + PollingTime, + ) if err != nil { return 0, nil } diff --git a/tests/utils/psql_connection.go b/tests/utils/psql_connection.go index b88e07c50b..a0d8a7a1fb 100644 --- a/tests/utils/psql_connection.go +++ b/tests/utils/psql_connection.go @@ -200,7 +200,7 @@ func ForwardPSQLConnectionWithCreds( return forward, conn, err } -// RunQueryRowOverForward runs QueryRow with a given query, returning the result Row +// RunQueryRowOverForward runs QueryRow with a given query, returning the Row of the SQL command func RunQueryRowOverForward( env *TestingEnvironment, namespace, @@ -220,8 +220,36 @@ func RunQueryRowOverForward( return nil, err } defer func() { + _ = conn.Close() forward.Close() }() return conn.QueryRow(query), nil } + +// RunExecOverForward runs Exec with a given query, returning the Result of the SQL command +func RunExecOverForward( + env *TestingEnvironment, + namespace, + clusterName, + dbname, + secretSuffix, + query string, +) (sql.Result, error) { + forward, conn, err := ForwardPSQLConnection( + env, + namespace, + clusterName, + dbname, + secretSuffix, + ) + if err != nil { + return nil, err + } + defer func() { + _ = conn.Close() + forward.Close() + }() + + return conn.Exec(query) +} diff --git a/tests/utils/replication_slots.go b/tests/utils/replication_slots.go index 9279a01ce1..dab55b9e9e 100644 --- a/tests/utils/replication_slots.go +++ b/tests/utils/replication_slots.go @@ -54,13 +54,14 @@ func PrintReplicationSlots( } m := make(map[string]string) for _, slot := range slots { - restartLsn, _, err := RunQueryFromPod( - &podList.Items[i], PGLocalSocketDir, - "app", - "postgres", - "''", - fmt.Sprintf("SELECT restart_lsn FROM pg_replication_slots WHERE slot_name = '%v'", slot), - env) + query := fmt.Sprintf("SELECT restart_lsn FROM pg_replication_slots WHERE slot_name = '%v'", slot) + restartLsn, _, err := env.ExecQueryInInstancePod( + PodLocator{ + Namespace: podList.Items[i].Namespace, + PodName: podList.Items[i].Name, + }, + AppDBName, + query) if err != nil { output.WriteString(fmt.Sprintf("Couldn't retrieve restart_lsn for slot %v: %v\n", slot, err)) } @@ -125,9 +126,14 @@ func GetReplicationSlotsOnPod(namespace, podName string, env *TestingEnvironment return nil, err } - stdout, _, err := RunQueryFromPod(targetPod, PGLocalSocketDir, - "app", "postgres", "''", - "SELECT slot_name FROM pg_replication_slots WHERE temporary = 'f' AND slot_type = 'physical'", env) + query := "SELECT slot_name FROM pg_replication_slots WHERE temporary = 'f' AND slot_type = 'physical'" + stdout, _, err := env.ExecQueryInInstancePod( + PodLocator{ + Namespace: targetPod.Namespace, + PodName: targetPod.Name, + }, + AppDBName, + query) if err != nil { return nil, err } @@ -157,8 +163,13 @@ func GetReplicationSlotLsnsOnPod( for _, slot := range slots { query := fmt.Sprintf("SELECT restart_lsn FROM pg_replication_slots WHERE slot_name = '%v'", slot) - restartLsn, _, err := RunQueryFromPod(&pod, PGLocalSocketDir, - "app", "postgres", "''", query, env) + restartLsn, _, err := env.ExecQueryInInstancePod( + PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + AppDBName, + query) if err != nil { return nil, err } From 1228dada11b0749caacfe599926d12a8dfcaedc4 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Fri, 18 Oct 2024 16:56:23 +0200 Subject: [PATCH 094/836] test: improve handling of release directory in unit tests (#5901) Closes #5900 Signed-off-by: Marco Nenciarini Signed-off-by: Jaime Silvela Co-authored-by: Jaime Silvela --- tests/utils/release.go | 24 +++++++++++++----- tests/utils/release_test.go | 49 ++++++++++++++++++++++++++++++++++++- 2 files changed, 66 insertions(+), 7 deletions(-) diff --git a/tests/utils/release.go b/tests/utils/release.go index e92dc961a4..6b480e1957 100644 --- a/tests/utils/release.go +++ b/tests/utils/release.go @@ -19,9 +19,12 @@ package utils import ( "errors" + "fmt" "io/fs" "os" "os/exec" + "regexp" + "slices" "sort" "strings" @@ -73,14 +76,19 @@ func GetAvailableReleases(releasesPath string) ([]*semver.Version, error) { // build the array that contains the versions // found in the releasePath directory for i, file := range validFiles { - tag := extractTag(file.Name()) + tag, err := extractTag(file.Name()) + if err != nil { + continue + } versions[i] = semver.MustParse(tag) } // Sorting version as descending order ([v1.10.0, v1.9.0...]) sort.Sort(sort.Reverse(semver.Collection(versions))) - return versions, nil + return slices.CompactFunc(versions, func(a, b *semver.Version) bool { + return a.Equal(b) + }), nil } func isReleasePullRequestBranch() bool { @@ -95,9 +103,13 @@ func isReleasePullRequestBranch() bool { return strings.HasPrefix(branchName, "release/v") } -func extractTag(releaseFile string) string { - releaseFile = strings.TrimPrefix(releaseFile, "cnpg-") - tag := strings.TrimSuffix(releaseFile, ".yaml") +var extractTagRegex = regexp.MustCompile(`-(\d+\.\d+\.\d+).yaml$`) - return tag +func extractTag(releaseFile string) (string, error) { + matches := extractTagRegex.FindStringSubmatch(releaseFile) + if len(matches) == 0 { + return "", fmt.Errorf("could not extract tag from filename %s", releaseFile) + } + // since the regex is matched, the second fragment contains the submatch + return matches[1], nil } diff --git a/tests/utils/release_test.go b/tests/utils/release_test.go index 7126d586e8..a65ea5b58f 100644 --- a/tests/utils/release_test.go +++ b/tests/utils/release_test.go @@ -17,6 +17,7 @@ limitations under the License. package utils import ( + "os" "path/filepath" "strings" @@ -28,7 +29,13 @@ import ( var _ = Describe("Release tag extraction", func() { It("properly works with expected filename", func() { - tag := extractTag("cnpg-0.5.0.yaml") + tag, err := extractTag("cnpg-0.5.0.yaml") + Expect(err).ToNot(HaveOccurred()) + Expect(tag).To(Equal("0.5.0")) + }) + It("properly works with a different prefix", func() { + tag, err := extractTag("modified-manifest-0.5.0.yaml") + Expect(err).ToNot(HaveOccurred()) Expect(tag).To(Equal("0.5.0")) }) }) @@ -85,4 +92,44 @@ var _ = Describe("GetAvailableReleases fails on wrong release directory", func() _, err := GetMostRecentReleaseTag(tmpDir) Expect(err).To(HaveOccurred()) }) + + It("properly deduplicate releases", func() { + tmpDir := GinkgoT().TempDir() + + for _, file := range []string{ + "cnpg-0.5.0.yaml", + "cnpg-0.5.1.yaml", + "cnpg-0.6.0.yaml", + "mangled-cnpg-0.5.1.yaml", + } { + f, err := os.Create(filepath.Clean(filepath.Join(tmpDir, file))) + Expect(err).ToNot(HaveOccurred()) + Expect(f.Close()).ToNot(HaveOccurred()) + } + + versions, err := GetAvailableReleases(tmpDir) + Expect(err).ToNot(HaveOccurred()) + Expect(versions).To(HaveLen(3)) + Expect(versions[0].String()).To(Equal("0.6.0")) + Expect(versions[1].String()).To(Equal("0.5.1")) + Expect(versions[2].String()).To(Equal("0.5.0")) + }) + + It("properly ignore rc versions", func() { + tmpDir := GinkgoT().TempDir() + + for _, file := range []string{ + "cnpg-0.5.0.yaml", + "cnpg-0.5.1.yaml", + "cnpg-0.6.0-rc1.yaml", + } { + f, err := os.Create(filepath.Clean(filepath.Join(tmpDir, file))) + Expect(err).ToNot(HaveOccurred()) + Expect(f.Close()).ToNot(HaveOccurred()) + } + + latest, err := GetMostRecentReleaseTag(tmpDir) + Expect(err).ToNot(HaveOccurred()) + Expect(latest).To(Equal("0.5.1")) + }) }) From 7c063563406f3f96f02565500bc376b8dff80afc Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sun, 20 Oct 2024 12:48:36 +0200 Subject: [PATCH 095/836] chore(deps): update jonasbn/github-action-spellcheck docker tag to v0.43.1 (main) (#5908) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index f16b5da862..192d152fd0 100644 --- a/Makefile +++ b/Makefile @@ -44,7 +44,7 @@ POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions KUSTOMIZE_VERSION ?= v5.5.0 CONTROLLER_TOOLS_VERSION ?= v0.16.4 GORELEASER_VERSION ?= v2.3.2 -SPELLCHECK_VERSION ?= 0.43.0 +SPELLCHECK_VERSION ?= 0.43.1 WOKE_VERSION ?= 0.19.0 OPERATOR_SDK_VERSION ?= v1.37.0 OPM_VERSION ?= v1.47.0 From 088f6f70f34ddad9c132049f0acdb15fab353864 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Mon, 21 Oct 2024 12:10:21 +0200 Subject: [PATCH 096/836] test: replace temp directory handling with GinkgoT().TempDir() (#5914) This change updates the test suite to use `GinkgoT().TempDir()` for managing temporary directories. Signed-off-by: Marco Nenciarini --- pkg/executablehash/executablehash_test.go | 8 +------- pkg/utils/discovery_test.go | 6 ++---- tests/e2e/upgrade_test.go | 3 ++- 3 files changed, 5 insertions(+), 12 deletions(-) diff --git a/pkg/executablehash/executablehash_test.go b/pkg/executablehash/executablehash_test.go index b23d67c6de..7a5bdaee16 100644 --- a/pkg/executablehash/executablehash_test.go +++ b/pkg/executablehash/executablehash_test.go @@ -34,14 +34,8 @@ var _ = Describe("Executable hash detection", func() { It("retrieves a hash from a given filename", func() { const expectedHash = "d6672ee3a93d0d6e3c30bdef89f310799c2f3ab781098a9792040d5541ce3ed3" const fileName = "test-hash" - var tempDir string - DeferCleanup(func() { - Expect(os.RemoveAll(tempDir)).To(Succeed()) - }) - - tempDir, err := os.MkdirTemp("", "test") - Expect(err).NotTo(HaveOccurred()) + tempDir := GinkgoT().TempDir() Expect(os.WriteFile(filepath.Join(tempDir, fileName), []byte(fileName), 0o600)).To(Succeed()) result, err := GetByName(filepath.Join(tempDir, fileName)) diff --git a/pkg/utils/discovery_test.go b/pkg/utils/discovery_test.go index 6df7bfc3dd..6407a9e423 100644 --- a/pkg/utils/discovery_test.go +++ b/pkg/utils/discovery_test.go @@ -256,16 +256,14 @@ var _ = Describe("AvailableArchitecture", func() { }) It("should retrieve an existing available architecture", func() { - tempDir, err := os.MkdirTemp("", "test") - Expect(err).NotTo(HaveOccurred()) + tempDir := GinkgoT().TempDir() DeferCleanup(func() { - Expect(os.RemoveAll(tempDir)).To(Succeed()) availableArchitectures = nil }) // Create a sample file Expect(os.WriteFile(filepath.Join(tempDir, "manager_amd64"), []byte("amd64"), 0o600)).To(Succeed()) - err = detectAvailableArchitectures(filepath.Join(tempDir, "manager_*")) + err := detectAvailableArchitectures(filepath.Join(tempDir, "manager_*")) Expect(err).ToNot(HaveOccurred()) Expect(availableArchitectures).To(HaveLen(1)) diff --git a/tests/e2e/upgrade_test.go b/tests/e2e/upgrade_test.go index 98b3235796..c389bf7a29 100644 --- a/tests/e2e/upgrade_test.go +++ b/tests/e2e/upgrade_test.go @@ -507,7 +507,8 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O // trigger any Pod restart. We still test that the operator // is upgraded in this case too. _, stderr, err := testsUtils.Run( - fmt.Sprintf("kubectl annotate -n %s cluster/%s cnpg.io/reconcilePodSpec=disabled", upgradeNamespace, clusterName1)) + fmt.Sprintf("kubectl annotate -n %s cluster/%s cnpg.io/reconcilePodSpec=disabled", + upgradeNamespace, clusterName1)) Expect(err).NotTo(HaveOccurred(), "stderr: "+stderr) } }) From 8a68e8c6c02ca8ef3f5946000235c53d099846da Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 22 Oct 2024 10:03:30 +0200 Subject: [PATCH 097/836] fix(deps): update github.com/cloudnative-pg/barman-cloud digest to 44f56f7 (main) (#5864) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index caa2fb1033..d0ca1763c6 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/avast/retry-go/v4 v4.6.0 github.com/blang/semver v3.5.1+incompatible github.com/cheynewallace/tabby v1.1.1 - github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a + github.com/cloudnative-pg/barman-cloud v0.0.0-20241016085606-44f56f711a5c github.com/cloudnative-pg/cnpg-i v0.0.0-20241001103001-7e24b2eccd50 github.com/cloudnative-pg/machinery v0.0.0-20241014090714-c27747f9974b github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc diff --git a/go.sum b/go.sum index 04f20745c9..19a21dfdaa 100644 --- a/go.sum +++ b/go.sum @@ -18,8 +18,8 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cheynewallace/tabby v1.1.1 h1:JvUR8waht4Y0S3JF17G6Vhyt+FRhnqVCkk8l4YrOU54= github.com/cheynewallace/tabby v1.1.1/go.mod h1:Pba/6cUL8uYqvOc9RkyvFbHGrQ9wShyrn6/S/1OYVys= -github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a h1:0v1ML9Eibfq3helbT9GtU0EstqFtG91k/MPO9azY5ME= -github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a/go.mod h1:Jm0tOp5oB7utpt8wz6RfSv31h1mThOtffjfyxVupriE= +github.com/cloudnative-pg/barman-cloud v0.0.0-20241016085606-44f56f711a5c h1:JQK5GOXSukWTInG5GzgmlTwY/rs5yO446+xy09NqbLg= +github.com/cloudnative-pg/barman-cloud v0.0.0-20241016085606-44f56f711a5c/go.mod h1:Jm0tOp5oB7utpt8wz6RfSv31h1mThOtffjfyxVupriE= github.com/cloudnative-pg/cnpg-i v0.0.0-20241001103001-7e24b2eccd50 h1:Rm/bbC0GNCuWth5fHVMos99RzNczbWRVBdjubh3JMPs= github.com/cloudnative-pg/cnpg-i v0.0.0-20241001103001-7e24b2eccd50/go.mod h1:lTWPq8pluS0PSnRMwt0zShftbyssoRhTJ5zAip8unl8= github.com/cloudnative-pg/machinery v0.0.0-20241014090714-c27747f9974b h1:4Q2VQsPlLHliJdi87zodQ0FHLd1cJINMm4N70eu8rRg= From e5ab6432403685de99becc6c315ea7f6d6b2e85a Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Tue, 22 Oct 2024 11:28:55 +0200 Subject: [PATCH 098/836] chore(logging): add logs to `pre` and `post` reconcile hooks (#5850) Closes #5851 Signed-off-by: Armando Ruocco --- internal/cnpi/plugin/client/contracts.go | 7 ++--- internal/cnpi/plugin/client/reconciler.go | 31 +++++++++++++++-------- internal/controller/cluster_controller.go | 4 +++ 3 files changed, 28 insertions(+), 14 deletions(-) diff --git a/internal/cnpi/plugin/client/contracts.go b/internal/cnpi/plugin/client/contracts.go index 1489181fb6..c1b141f0e9 100644 --- a/internal/cnpi/plugin/client/contracts.go +++ b/internal/cnpi/plugin/client/contracts.go @@ -77,9 +77,10 @@ type ClusterCapabilities interface { // ReconcilerHookResult is the result of a reconciliation loop type ReconcilerHookResult struct { - Result ctrl.Result - Err error - StopReconciliation bool + Result ctrl.Result `json:"result"` + Err error `json:"err"` + StopReconciliation bool `json:"stopReconciliation"` + Identifier string `json:"identifier"` } // ClusterReconcilerHooks decsribes a set of behavior needed to enhance diff --git a/internal/cnpi/plugin/client/reconciler.go b/internal/cnpi/plugin/client/reconciler.go index 8f1909cfd3..c7459c158f 100644 --- a/internal/cnpi/plugin/client/reconciler.go +++ b/internal/cnpi/plugin/client/reconciler.go @@ -31,27 +31,34 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/connection" ) +const cnpgOperatorKey = "cnpg-operator" + // newContinueResult returns a result instructing the reconciliation loop // to continue its operation -func newContinueResult() ReconcilerHookResult { return ReconcilerHookResult{} } +func newContinueResult(identifier string) ReconcilerHookResult { + return ReconcilerHookResult{Identifier: identifier} +} // newTerminateResult returns a result instructing the reconciliation loop to stop // reconciliation -func newTerminateResult() ReconcilerHookResult { return ReconcilerHookResult{StopReconciliation: true} } +func newTerminateResult(identifier string) ReconcilerHookResult { + return ReconcilerHookResult{StopReconciliation: true, Identifier: identifier} +} // newReconcilerRequeueResult creates a new result instructing // a reconciler to schedule a loop in the passed time frame -func newReconcilerRequeueResult(after int64) ReconcilerHookResult { +func newReconcilerRequeueResult(identifier string, after int64) ReconcilerHookResult { return ReconcilerHookResult{ Err: nil, StopReconciliation: true, Result: ctrl.Result{Requeue: true, RequeueAfter: time.Second * time.Duration(after)}, + Identifier: identifier, } } // newReconcilerErrorResult creates a new result from an error -func newReconcilerErrorResult(err error) ReconcilerHookResult { - return ReconcilerHookResult{Err: err, StopReconciliation: true} +func newReconcilerErrorResult(identifier string, err error) ReconcilerHookResult { + return ReconcilerHookResult{Err: err, StopReconciliation: true, Identifier: identifier} } func (data *data) PreReconcile(ctx context.Context, cluster client.Object, object client.Object) ReconcilerHookResult { @@ -104,6 +111,7 @@ func reconcilerHook( serializedCluster, err := json.Marshal(cluster) if err != nil { return newReconcilerErrorResult( + cnpgOperatorKey, fmt.Errorf("while serializing %s %s/%s to JSON: %w", cluster.GetObjectKind().GroupVersionKind().Kind, cluster.GetNamespace(), cluster.GetName(), @@ -115,6 +123,7 @@ func reconcilerHook( serializedObject, err := json.Marshal(object) if err != nil { return newReconcilerErrorResult( + cnpgOperatorKey, fmt.Errorf( "while serializing %s %s/%s to JSON: %w", cluster.GetObjectKind().GroupVersionKind().Kind, @@ -139,7 +148,7 @@ func reconcilerHook( contextLogger.Info( "Skipping reconciler hooks for unknown group", "objectGvk", object.GetObjectKind()) - return newContinueResult() + return newContinueResult(cnpgOperatorKey) } for idx := range plugins { @@ -151,20 +160,20 @@ func reconcilerHook( result, err := executeRequest(ctx, plugin.ReconcilerHooksClient(), request) if err != nil { - return newReconcilerErrorResult(err) + return newReconcilerErrorResult(plugin.Name(), err) } switch result.Behavior { case reconciler.ReconcilerHooksResult_BEHAVIOR_TERMINATE: - return newTerminateResult() + return newTerminateResult(plugin.Name()) case reconciler.ReconcilerHooksResult_BEHAVIOR_REQUEUE: - return newReconcilerRequeueResult(result.GetRequeueAfter()) + return newReconcilerRequeueResult(plugin.Name(), result.GetRequeueAfter()) case reconciler.ReconcilerHooksResult_BEHAVIOR_CONTINUE: - return newContinueResult() + return newContinueResult(plugin.Name()) } } - return newContinueResult() + return newContinueResult(cnpgOperatorKey) } diff --git a/internal/controller/cluster_controller.go b/internal/controller/cluster_controller.go index 097ca20b56..1c5974cb7d 100644 --- a/internal/controller/cluster_controller.go +++ b/internal/controller/cluster_controller.go @@ -298,6 +298,8 @@ func (r *ClusterReconciler) reconcile(ctx context.Context, cluster *apiv1.Cluste // Calls pre-reconcile hooks if hookResult := preReconcilePluginHooks(ctx, cluster, cluster); hookResult.StopReconciliation { + contextLogger.Info("Pre-reconcile hook stopped the reconciliation loop", + "hookResult", hookResult) return hookResult.Result, hookResult.Err } @@ -502,6 +504,8 @@ func (r *ClusterReconciler) reconcile(ctx context.Context, cluster *apiv1.Cluste // Calls post-reconcile hooks if hookResult := postReconcilePluginHooks(ctx, cluster, cluster); hookResult.Err != nil || !hookResult.Result.IsZero() { + contextLogger.Info("Post-reconcile hook stopped the reconciliation loop", + "hookResult", hookResult) return hookResult.Result, hookResult.Err } From cdc28e0aaacb436e5d8f9f0aeeca28b326e6a03f Mon Sep 17 00:00:00 2001 From: Gabriele Quaresima Date: Tue, 22 Oct 2024 13:19:43 +0200 Subject: [PATCH 099/836] test(database): add e2e case of declarative database with delete reclaim policy (#5774) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes #5680 Signed-off-by: Gabriele Quaresima Signed-off-by: Jaime Silvela Signed-off-by: Niccolò Fei Signed-off-by: Marco Nenciarini Co-authored-by: Jaime Silvela Co-authored-by: Niccolò Fei Co-authored-by: Marco Nenciarini --- contribute/e2e_testing_environment/README.md | 1 + docs/src/e2e.md | 4 + .../declarative_database_management_test.go | 192 +++++++++++------- ...e-with-delete-reclaim-policy.yaml.template | 13 ++ .../database.yaml.template | 1 + tests/labels.go | 35 ++-- 6 files changed, 151 insertions(+), 95 deletions(-) create mode 100644 tests/e2e/fixtures/declarative_databases/database-with-delete-reclaim-policy.yaml.template diff --git a/contribute/e2e_testing_environment/README.md b/contribute/e2e_testing_environment/README.md index bbe60be4a0..30a41ddaf4 100644 --- a/contribute/e2e_testing_environment/README.md +++ b/contribute/e2e_testing_environment/README.md @@ -206,6 +206,7 @@ exported, it will select all medium test cases from the feature type provided. | `security` | | `maintenance` | | `tablespaces` | +| `declarative-databases` | ex: ```shell diff --git a/docs/src/e2e.md b/docs/src/e2e.md index 416df25e0d..e796db13b6 100644 --- a/docs/src/e2e.md +++ b/docs/src/e2e.md @@ -127,3 +127,7 @@ and the following suite of E2E tests are performed on that cluster: * Declarative creation of temporary tablespaces * Backup / recovery from object storage * Backup / recovery from volume snapshots + +* **Declarative databases** + * Declarative creation of databases with default (retain) reclaim policy + * Declarative creation of databases with delete reclaim policy diff --git a/tests/e2e/declarative_database_management_test.go b/tests/e2e/declarative_database_management_test.go index 6861d17a33..ba2fb47c2f 100644 --- a/tests/e2e/declarative_database_management_test.go +++ b/tests/e2e/declarative_database_management_test.go @@ -33,11 +33,11 @@ import ( // - spinning up a cluster, apply a declarative database on it // Set of tests in which we use the declarative database CRD to add new databases on an existing cluster -var _ = Describe("Declarative databases management test", Label(tests.LabelSmoke, tests.LabelBasic), func() { +var _ = Describe("Declarative database management", Label(tests.LabelSmoke, tests.LabelBasic, + tests.LabelDeclarativeDatabases), func() { const ( - clusterManifest = fixturesDir + "/declarative_databases/cluster.yaml.template" - databaseManifest = fixturesDir + "/declarative_databases/database.yaml.template" - level = tests.Medium + clusterManifest = fixturesDir + "/declarative_databases/cluster.yaml.template" + level = tests.Medium ) BeforeEach(func() { @@ -46,15 +46,14 @@ var _ = Describe("Declarative databases management test", Label(tests.LabelSmoke } }) - Context("plain vanilla cluster", Ordered, func() { + Context("in a plain vanilla cluster", Ordered, func() { const ( namespacePrefix = "declarative-db" dbname = "declarative" ) var ( - clusterName, namespace, databaseObjectName string - database *apiv1.Database - err error + clusterName, namespace string + err error ) BeforeAll(func() { @@ -70,27 +69,9 @@ var _ = Describe("Declarative databases management test", Label(tests.LabelSmoke }) }) - assertDatabaseExists := func(namespace, primaryPod, dbname string, shouldContain bool) { - Eventually(func(g Gomega) { - stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ - Namespace: namespace, - PodName: primaryPod, - }, - "postgres", - "\\l") - g.Expect(err).ToNot(HaveOccurred()) - if shouldContain { - g.Expect(stdout).Should(ContainSubstring(dbname)) - } else { - g.Expect(stdout).ShouldNot(ContainSubstring(dbname)) - } - }, 300).Should(Succeed()) - } - assertDatabaseHasExpectedFields := func(namespace, primaryPod string, db apiv1.Database) { query := fmt.Sprintf("select count(*) from pg_database where datname = '%s' "+ - "and encoding = %s and datctype = '%s' and datcollate = '%s'", + "and encoding = pg_char_to_encoding('%s') and datctype = '%s' and datcollate = '%s'", db.Spec.Name, db.Spec.Encoding, db.Spec.LcCtype, db.Spec.LcCollate) Eventually(func(g Gomega) { stdout, _, err := env.ExecQueryInInstancePod( @@ -101,65 +82,118 @@ var _ = Describe("Declarative databases management test", Label(tests.LabelSmoke "postgres", query) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(stdout).Should(ContainSubstring("1")) + g.Expect(stdout).Should(ContainSubstring("1"), "expected database not found") }, 30).Should(Succeed()) } - When("Database CRD reclaim policy is set to retain (default) inside spec", func() { - It("can add a declarative database", func() { - By("applying Database CRD manifest", func() { - CreateResourceFromFile(namespace, databaseManifest) - databaseObjectName, err = env.GetResourceNameFromYAML(databaseManifest) - Expect(err).NotTo(HaveOccurred()) - }) - By("ensuring the Database CRD succeeded reconciliation", func() { - // get database object - database = &apiv1.Database{} - databaseNamespacedName := types.NamespacedName{ - Namespace: namespace, - Name: databaseObjectName, - } - - Eventually(func(g Gomega) { - err := env.Client.Get(env.Ctx, databaseNamespacedName, database) - Expect(err).ToNot(HaveOccurred()) - g.Expect(database.Status.Ready).Should(BeTrue()) - }, 300).WithPolling(10 * time.Second).Should(Succeed()) - }) - - By("verifying new database has been created with the expected fields", func() { - primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - - assertDatabaseExists(namespace, primaryPodInfo.Name, dbname, true) - - // NOTE: the `pg_database` table in Postgres does not contain fields - // for the owner nor the template. - // Its fields are dependent on the version of Postgres, so we pick - // a subset that is available to check even on PG v12 - expectedDatabaseFields := apiv1.Database{ - Spec: apiv1.DatabaseSpec{ - Name: "declarative", - LcCtype: "en_US.utf8", - LcCollate: "C", // this is the default value - Encoding: "0", // corresponds to SQL_ASCII - }, - } - assertDatabaseHasExpectedFields(namespace, primaryPodInfo.Name, expectedDatabaseFields) - }) + assertTestDeclarativeDatabase := func( + databaseManifest string, + retainOnDeletion bool, + ) { + var ( + database apiv1.Database + databaseObjectName string + ) + By("applying Database CRD manifest", func() { + CreateResourceFromFile(namespace, databaseManifest) + databaseObjectName, err = env.GetResourceNameFromYAML(databaseManifest) + Expect(err).NotTo(HaveOccurred()) + }) + By("ensuring the Database CRD succeeded reconciliation", func() { + // get database object + database = apiv1.Database{} + databaseNamespacedName := types.NamespacedName{ + Namespace: namespace, + Name: databaseObjectName, + } + + Eventually(func(g Gomega) { + err := env.Client.Get(env.Ctx, databaseNamespacedName, &database) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(database.Status.Ready).Should(BeTrue()) + }, 300).WithPolling(10 * time.Second).Should(Succeed()) + }) + + By("verifying new database has been created with the expected fields", func() { + primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + + AssertDatabaseExists(primaryPodInfo, dbname, true) + + assertDatabaseHasExpectedFields(namespace, primaryPodInfo.Name, database) }) - It("keeps the db when Database CRD is removed", func() { - By("remove Database CRD", func() { - Expect(utils.DeleteObject(env, database)).To(Succeed()) - }) + By("removing the Database object", func() { + Expect(utils.DeleteObject(env, &database)).To(Succeed()) + }) - By("verifying database is still existing", func() { - primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) + By("verifying the retention policy in the postgres database", func() { + primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) - assertDatabaseExists(namespace, primaryPodInfo.Name, dbname, true) - }) + AssertDatabaseExists(primaryPodInfo, dbname, retainOnDeletion) + }) + } + + When("Database CRD reclaim policy is set to delete", func() { + It("can manage a declarative database and delete it in Postgres", func() { + databaseManifest := fixturesDir + + "/declarative_databases/database-with-delete-reclaim-policy.yaml.template" + assertTestDeclarativeDatabase(databaseManifest, + false) + }) + }) + + When("Database CRD reclaim policy is set to retain", func() { + It("can manage a declarative database and release it", func() { + databaseManifest := fixturesDir + "/declarative_databases/database.yaml.template" + assertTestDeclarativeDatabase(databaseManifest, true) + }) + }) + }) + + Context("in a Namespace to be deleted manually", func() { + const ( + namespace = "declarative-db-finalizers" + ) + var ( + err error + clusterName string + databaseObjectName string + ) + It("will not prevent the deletion of the namespace with lagging finalizers", func() { + By("setting up the new namespace and cluster", func() { + err = env.CreateNamespace(namespace) + Expect(err).ToNot(HaveOccurred()) + + clusterName, err = env.GetResourceNameFromYAML(clusterManifest) + Expect(err).ToNot(HaveOccurred()) + + AssertCreateCluster(namespace, clusterName, clusterManifest, env) + }) + By("creating the database", func() { + databaseManifest := fixturesDir + + "/declarative_databases/database-with-delete-reclaim-policy.yaml.template" + databaseObjectName, err = env.GetResourceNameFromYAML(databaseManifest) + Expect(err).NotTo(HaveOccurred()) + CreateResourceFromFile(namespace, databaseManifest) + }) + By("ensuring the database is reconciled successfully", func() { + // get database object + dbObj := &apiv1.Database{} + databaseNamespacedName := types.NamespacedName{ + Namespace: namespace, + Name: databaseObjectName, + } + Eventually(func(g Gomega) { + err := env.Client.Get(env.Ctx, databaseNamespacedName, dbObj) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(dbObj.Status.Ready).Should(BeTrue()) + }, 300).WithPolling(10 * time.Second).Should(Succeed()) + }) + By("deleting the namespace and making sure it succeeds before timeout", func() { + err := env.DeleteNamespaceAndWait(namespace, 60) + Expect(err).ToNot(HaveOccurred()) }) }) }) diff --git a/tests/e2e/fixtures/declarative_databases/database-with-delete-reclaim-policy.yaml.template b/tests/e2e/fixtures/declarative_databases/database-with-delete-reclaim-policy.yaml.template new file mode 100644 index 0000000000..0ce2071609 --- /dev/null +++ b/tests/e2e/fixtures/declarative_databases/database-with-delete-reclaim-policy.yaml.template @@ -0,0 +1,13 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Database +metadata: + name: db-declarative-delete +spec: + name: declarative + owner: app + lc_ctype: C + lc_collate: C + encoding: UTF8 + databaseReclaimPolicy: delete + cluster: + name: cluster-with-declarative-databases diff --git a/tests/e2e/fixtures/declarative_databases/database.yaml.template b/tests/e2e/fixtures/declarative_databases/database.yaml.template index 3ded03c50a..75f2107bcc 100644 --- a/tests/e2e/fixtures/declarative_databases/database.yaml.template +++ b/tests/e2e/fixtures/declarative_databases/database.yaml.template @@ -6,6 +6,7 @@ spec: name: declarative owner: app lc_ctype: "en_US.utf8" + lc_collate: C encoding: SQL_ASCII template: template0 cluster: diff --git a/tests/labels.go b/tests/labels.go index 81b1329281..25b2b858b5 100644 --- a/tests/labels.go +++ b/tests/labels.go @@ -23,25 +23,28 @@ const ( // LabelBackupRestore is a label for only selecting backup and restore tests LabelBackupRestore = "backup-restore" - // LabelBasic is a label for selecting basic test + // LabelBasic is a label for selecting basic tests LabelBasic = "basic" - // LabelClusterMetadata is a label for selecting cluster-metadata test + // LabelClusterMetadata is a label for selecting cluster-metadata tests LabelClusterMetadata = "cluster-metadata" + // LabelDeclarativeDatabases is a label for selecting the declarative databases test + LabelDeclarativeDatabases = "declarative-databases" + // LabelDisruptive is the string for labelling disruptive tests LabelDisruptive = "disruptive" - // LabelImportingDatabases is a label for selecting importing-databases test + // LabelImportingDatabases is a label for selecting the importing-databases test LabelImportingDatabases = "importing-databases" - // LabelMaintenance is a label for selecting maintenance test + // LabelMaintenance is a label for selecting maintenance tests LabelMaintenance = "maintenance" - // LabelNoOpenshift is the string for labelling tests that don't run on Openshift + // LabelNoOpenshift is the string for selecting tests that don't run on Openshift LabelNoOpenshift = "no-openshift" - // LabelObservability is a label for selecting observability test + // LabelObservability is a label for selecting observability tests LabelObservability = "observability" // LabelOperator is a label for only selecting operator tests @@ -50,7 +53,7 @@ const ( // LabelPerformance is the string for labelling performance tests LabelPerformance = "performance" - // LabelPlugin is a label for selecting plugin test + // LabelPlugin is a label for selecting plugin tests LabelPlugin = "plugin" // LabelPodScheduling is a label for selecting pod-scheduling test @@ -59,33 +62,33 @@ const ( // LabelPostgresConfiguration is a label for selecting postgres-configuration test LabelPostgresConfiguration = "postgres-configuration" - // LabelRecovery is a label for selecting recovery test + // LabelRecovery is a label for selecting recovery tests LabelRecovery = "recovery" - // LabelReplication is a label for selecting replication test + // LabelReplication is a label for selecting replication tests LabelReplication = "replication" - // LabelSecurity is a label for selecting security test + // LabelSecurity is a label for selecting security tests LabelSecurity = "security" - // LabelSelfHealing is a label for selecting self-healing test + // LabelSelfHealing is a label for selecting self-healing tests LabelSelfHealing = "self-healing" - // LabelServiceConnectivity is a label for selecting service connections test + // LabelServiceConnectivity is a label for selecting service connections tests LabelServiceConnectivity = "service-connectivity" - // LabelSmoke is a label for selecting smoke test + // LabelSmoke is a label for selecting smoke tests LabelSmoke = "smoke" // LabelSnapshot is a label for selecting snapshot tests LabelSnapshot = "snapshot" - // LabelStorage is a label for selecting storage test + // LabelStorage is a label for selecting storage tests LabelStorage = "storage" - // LabelTablespaces is a lable for selectin the tablespaces tests + // LabelTablespaces is a label for selecting the tablespaces test LabelTablespaces = "tablespaces" - // LabelUpgrade is the string for labelling upgrade tests + // LabelUpgrade is a label for upgrade tests LabelUpgrade = "upgrade" ) From 46657933f4894588191b62076ca1d3f408a9711f Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 22 Oct 2024 15:58:02 +0200 Subject: [PATCH 100/836] fix(deps): update github.com/cloudnative-pg/cnpg-i digest to 8d61352 (main) (#5865) https://redirect.github.com/cloudnative-pg/cnpg-i `7e24b2e` -> `8d61352` google.golang.org/protobuf `v1.34.2` -> `v1.35.1` --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index d0ca1763c6..b519ff235d 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/blang/semver v3.5.1+incompatible github.com/cheynewallace/tabby v1.1.1 github.com/cloudnative-pg/barman-cloud v0.0.0-20241016085606-44f56f711a5c - github.com/cloudnative-pg/cnpg-i v0.0.0-20241001103001-7e24b2eccd50 + github.com/cloudnative-pg/cnpg-i v0.0.0-20241016132832-8d61352831c6 github.com/cloudnative-pg/machinery v0.0.0-20241014090714-c27747f9974b github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/evanphx/json-patch/v5 v5.9.0 @@ -114,7 +114,7 @@ require ( golang.org/x/tools v0.25.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect - google.golang.org/protobuf v1.34.2 // indirect + google.golang.org/protobuf v1.35.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 19a21dfdaa..9be832577f 100644 --- a/go.sum +++ b/go.sum @@ -20,8 +20,8 @@ github.com/cheynewallace/tabby v1.1.1 h1:JvUR8waht4Y0S3JF17G6Vhyt+FRhnqVCkk8l4Yr github.com/cheynewallace/tabby v1.1.1/go.mod h1:Pba/6cUL8uYqvOc9RkyvFbHGrQ9wShyrn6/S/1OYVys= github.com/cloudnative-pg/barman-cloud v0.0.0-20241016085606-44f56f711a5c h1:JQK5GOXSukWTInG5GzgmlTwY/rs5yO446+xy09NqbLg= github.com/cloudnative-pg/barman-cloud v0.0.0-20241016085606-44f56f711a5c/go.mod h1:Jm0tOp5oB7utpt8wz6RfSv31h1mThOtffjfyxVupriE= -github.com/cloudnative-pg/cnpg-i v0.0.0-20241001103001-7e24b2eccd50 h1:Rm/bbC0GNCuWth5fHVMos99RzNczbWRVBdjubh3JMPs= -github.com/cloudnative-pg/cnpg-i v0.0.0-20241001103001-7e24b2eccd50/go.mod h1:lTWPq8pluS0PSnRMwt0zShftbyssoRhTJ5zAip8unl8= +github.com/cloudnative-pg/cnpg-i v0.0.0-20241016132832-8d61352831c6 h1:QokKbYfQ0sRWMHDB0sVUL1H/kGQki+AXBfBRp7J+9Og= +github.com/cloudnative-pg/cnpg-i v0.0.0-20241016132832-8d61352831c6/go.mod h1:fAU7ySVzjpt/RZntxWZiWJCjaBJayzIxEnd0NuO7oQc= github.com/cloudnative-pg/machinery v0.0.0-20241014090714-c27747f9974b h1:4Q2VQsPlLHliJdi87zodQ0FHLd1cJINMm4N70eu8rRg= github.com/cloudnative-pg/machinery v0.0.0-20241014090714-c27747f9974b/go.mod h1:+mUFdys1IX+qwQUrV+/i56Tey/mYh8ZzWZYttwivRns= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= @@ -262,8 +262,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= From ccfa3be896dd5ab24a0c00a703ec73fd953dde4d Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 22 Oct 2024 23:04:19 +0200 Subject: [PATCH 101/836] fix(deps): update module github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring to v0.77.2 (main) (#5919) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index b519ff235d..f6aa11ec03 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/mitchellh/go-ps v1.0.0 github.com/onsi/ginkgo/v2 v2.20.2 github.com/onsi/gomega v1.34.2 - github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.77.1 + github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.77.2 github.com/prometheus/client_golang v1.20.5 github.com/robfig/cron v1.2.0 github.com/sethvargo/go-password v0.3.1 diff --git a/go.sum b/go.sum index 9be832577f..1c1d7bd228 100644 --- a/go.sum +++ b/go.sum @@ -157,8 +157,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.77.1 h1:XGoEXT6WTTihO+MD8MAao+YaQIH905HbK0WK2lyo28k= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.77.1/go.mod h1:D0KY8md81DQKdaR/cXwnhoWB3MYYyc/UjvqE8GFkIvA= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.77.2 h1:F/MALZ518KfI1zEg+Kg8/uTzoXKDyqw+LNC/5irJlJE= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.77.2/go.mod h1:D0KY8md81DQKdaR/cXwnhoWB3MYYyc/UjvqE8GFkIvA= github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= From 17a84eda078811f556ec119c035b3cc4f6592534 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Wed, 23 Oct 2024 10:26:51 +0200 Subject: [PATCH 102/836] chore(tests): Refactored backup and restore tests by separating backend-specific logic (#5735) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously, all backup and restore tests for different backends (MinIO, Azurite, Azure) were combined in a single file, making it difficult to manage and maintain. This change separates the tests, creating individual files for each backend, with dedicated functions tailored to each backend's requirements. This improves readability and organization of the code. Closes #5632 Signed-off-by: Jonathan Gonzalez V. Signed-off-by: Francesco Canovai Signed-off-by: Niccolò Fei Co-authored-by: Francesco Canovai Co-authored-by: Niccolò Fei --- hack/setup-cluster.sh | 2 +- tests/e2e/asserts_test.go | 389 +------ tests/e2e/backup_restore_azure_test.go | 486 +++++++++ tests/e2e/backup_restore_azurite_test.go | 353 ++++++ tests/e2e/backup_restore_minio_test.go | 800 ++++++++++++++ tests/e2e/backup_restore_test.go | 1250 ---------------------- tests/e2e/replica_mode_cluster_test.go | 33 +- tests/e2e/suite_test.go | 5 +- tests/e2e/tablespaces_test.go | 38 +- tests/e2e/upgrade_test.go | 27 +- tests/e2e/volume_snapshot_test.go | 30 +- tests/e2e/wal_restore_parallel_test.go | 57 +- tests/utils/azurite.go | 299 +++++- tests/utils/backup.go | 243 +---- tests/utils/{ => minio}/minio.go | 145 +-- tests/utils/secrets.go | 28 + 16 files changed, 2241 insertions(+), 1944 deletions(-) create mode 100644 tests/e2e/backup_restore_azure_test.go create mode 100644 tests/e2e/backup_restore_azurite_test.go create mode 100644 tests/e2e/backup_restore_minio_test.go delete mode 100644 tests/e2e/backup_restore_test.go rename tests/utils/{ => minio}/minio.go (76%) diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh index 9b24a33022..a5444c392f 100755 --- a/hack/setup-cluster.sh +++ b/hack/setup-cluster.sh @@ -83,7 +83,7 @@ registry_name=registry.dev POSTGRES_IMG=${POSTGRES_IMG:-$(grep 'DefaultImageName.*=' "${ROOT_DIR}/pkg/versions/versions.go" | cut -f 2 -d \")} E2E_PRE_ROLLING_UPDATE_IMG=${E2E_PRE_ROLLING_UPDATE_IMG:-${POSTGRES_IMG%.*}} PGBOUNCER_IMG=${PGBOUNCER_IMG:-$(grep 'DefaultPgbouncerImage.*=' "${ROOT_DIR}/pkg/specs/pgbouncer/deployments.go" | cut -f 2 -d \")} -MINIO_IMG=${MINIO_IMG:-$(grep 'minioImage.*=' "${ROOT_DIR}/tests/utils/minio.go" | cut -f 2 -d \")} +MINIO_IMG=${MINIO_IMG:-$(grep 'minioImage.*=' "${ROOT_DIR}/tests/utils/minio/minio.go" | cut -f 2 -d \")} APACHE_IMG=${APACHE_IMG:-"httpd"} HELPER_IMGS=("$POSTGRES_IMG" "$E2E_PRE_ROLLING_UPDATE_IMG" "$PGBOUNCER_IMG" "$MINIO_IMG" "$APACHE_IMG") diff --git a/tests/e2e/asserts_test.go b/tests/e2e/asserts_test.go index 9fc3c9dba6..685630691f 100644 --- a/tests/e2e/asserts_test.go +++ b/tests/e2e/asserts_test.go @@ -43,6 +43,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -173,7 +174,8 @@ func AssertSwitchoverWithHistory( } numHistory := len(strings.Split(strings.TrimSpace(out), "\n")) - GinkgoWriter.Printf("count %d: pod: %s, the number of history file in pg_wal: %d\n", count, pod, numHistory) + GinkgoWriter.Printf("count %d: pod: %s, the number of history file in pg_wal: %d\n", count, pod, + numHistory) count++ if numHistory > 0 { continue @@ -291,8 +293,11 @@ func AssertClusterIsReady(namespace string, clusterName string, timeout int, env }) } -func AssertClusterDefault(namespace string, clusterName string, - isExpectedToDefault bool, env *testsUtils.TestingEnvironment, +func AssertClusterDefault( + namespace string, + clusterName string, + isExpectedToDefault bool, + env *testsUtils.TestingEnvironment, ) { By("having a Cluster object populated with default values", func() { // Eventually the number of ready instances should be equal to the @@ -334,8 +339,14 @@ func AssertWebhookEnabled(env *testsUtils.TestingEnvironment, mutating, validati } // Update the secrets and verify cluster reference the updated resource version of secrets -func AssertUpdateSecret(field string, value string, secretName string, namespace string, - clusterName string, timeout int, env *testsUtils.TestingEnvironment, +func AssertUpdateSecret( + field string, + value string, + secretName string, + namespace string, + clusterName string, + timeout int, + env *testsUtils.TestingEnvironment, ) { var secret corev1.Secret Eventually(func(g Gomega) { @@ -376,8 +387,14 @@ func AssertUpdateSecret(field string, value string, secretName string, namespace // AssertConnection is used if a connection from a pod to a postgresql // database works -func AssertConnection(host string, user string, dbname string, - password string, queryingPod *corev1.Pod, timeout int, env *testsUtils.TestingEnvironment, +func AssertConnection( + host string, + user string, + dbname string, + password string, + queryingPod *corev1.Pod, + timeout int, + env *testsUtils.TestingEnvironment, ) { By(fmt.Sprintf("connecting to the %v service as %v", host, user), func() { Eventually(func() string { @@ -746,25 +763,6 @@ func AssertNewPrimary(namespace string, clusterName string, oldPrimary string) { }) } -func AssertStorageCredentialsAreCreated(namespace string, name string, id string, key string) { - Eventually(func() error { - _, _, err := testsUtils.Run(fmt.Sprintf("kubectl create secret generic %v -n %v "+ - "--from-literal='ID=%v' "+ - "--from-literal='KEY=%v'", - name, namespace, id, key)) - return err - }, 60, 5).Should(BeNil()) -} - -// minioPath gets the MinIO file string for WAL/backup objects in a configured bucket -func minioPath(serverName, fileName string) string { - // the * regexes enable matching these typical paths: - // minio/backups/serverName/base/20220618T140300/data.tar - // minio/backups/serverName/wals/0000000100000000/000000010000000000000002.gz - // minio/backups/serverName/wals/00000002.history.gz - return filepath.Join("*", serverName, "*", fileName) -} - // CheckPointAndSwitchWalOnPrimary trigger a checkpoint and switch wal on primary pod and returns the latest WAL file func CheckPointAndSwitchWalOnPrimary(namespace, clusterName string) string { var latestWAL string @@ -786,13 +784,13 @@ func AssertArchiveWalOnMinio(namespace, clusterName string, serverName string) { Expect(err).ToNot(HaveOccurred()) primary := pod.GetName() latestWAL := switchWalAndGetLatestArchive(namespace, primary) - latestWALPath = minioPath(serverName, latestWAL+".gz") + latestWALPath = minio.GetFilePath(serverName, latestWAL+".gz") }) By(fmt.Sprintf("verify the existence of WAL %v in minio", latestWALPath), func() { Eventually(func() (int, error) { // WALs are compressed with gzip in the fixture - return testsUtils.CountFilesOnMinio(minioEnv, latestWALPath) + return minio.CountFiles(minioEnv, latestWALPath) }, testTimeouts[testsUtils.WalsInMinio]).Should(BeEquivalentTo(1)) }) } @@ -1380,9 +1378,11 @@ func AssertMetricsData(namespace, targetOne, targetTwo, targetSecret string, clu podName := pod.GetName() out, err := testsUtils.RetrieveMetricsFromInstance(env, pod, cluster.IsMetricsTLSEnabled()) Expect(err).ToNot(HaveOccurred()) - Expect(strings.Contains(out, fmt.Sprintf(`cnpg_some_query_rows{datname="%v"} 0`, targetOne))).Should(BeTrue(), + Expect(strings.Contains(out, + fmt.Sprintf(`cnpg_some_query_rows{datname="%v"} 0`, targetOne))).Should(BeTrue(), "Metric collection issues on %v.\nCollected metrics:\n%v", podName, out) - Expect(strings.Contains(out, fmt.Sprintf(`cnpg_some_query_rows{datname="%v"} 0`, targetTwo))).Should(BeTrue(), + Expect(strings.Contains(out, + fmt.Sprintf(`cnpg_some_query_rows{datname="%v"} 0`, targetTwo))).Should(BeTrue(), "Metric collection issues on %v.\nCollected metrics:\n%v", podName, out) Expect(strings.Contains(out, fmt.Sprintf(`cnpg_some_query_test_rows{datname="%v"} 1`, targetSecret))).Should(BeTrue(), @@ -1453,54 +1453,6 @@ func AssertSSLVerifyFullDBConnectionFromAppPod(namespace string, clusterName str }) } -func AssertCreateSASTokenCredentials(namespace string, id string, key string) { - // Adding 24 hours to the current time - date := time.Now().UTC().Add(time.Hour * 24) - // Creating date time format for az command - expiringDate := fmt.Sprintf("%v"+"-"+"%d"+"-"+"%v"+"T"+"%v"+":"+"%v"+"Z", - date.Year(), - date.Month(), - date.Day(), - date.Hour(), - date.Minute()) - - out, _, err := testsUtils.Run(fmt.Sprintf( - // SAS Token at Blob Container level does not currently work in Barman Cloud - // https://github.com/EnterpriseDB/barman/issues/388 - // we will use SAS Token at Storage Account level - // ( "az storage container generate-sas --account-name %v "+ - // "--name %v "+ - // "--https-only --permissions racwdl --auth-mode key --only-show-errors "+ - // "--expiry \"$(date -u -d \"+4 hours\" '+%%Y-%%m-%%dT%%H:%%MZ')\"", - // id, blobContainerName ) - "az storage account generate-sas --account-name %v "+ - "--https-only --permissions cdlruwap --account-key %v "+ - "--resource-types co --services b --expiry %v -o tsv", - id, key, expiringDate)) - Expect(err).ToNot(HaveOccurred()) - SASTokenRW := strings.TrimRight(out, "\n") - - out, _, err = testsUtils.Run(fmt.Sprintf( - "az storage account generate-sas --account-name %v "+ - "--https-only --permissions lr --account-key %v "+ - "--resource-types co --services b --expiry %v -o tsv", - id, key, expiringDate)) - Expect(err).ToNot(HaveOccurred()) - SASTokenRO := strings.TrimRight(out, "\n") - - AssertROSASTokenUnableToWrite("restore-cluster-sas", id, SASTokenRO) - - AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds-sas", id, SASTokenRW) - AssertStorageCredentialsAreCreated(namespace, "restore-storage-creds-sas", id, SASTokenRO) -} - -func AssertROSASTokenUnableToWrite(containerName string, id string, key string) { - _, _, err := testsUtils.RunUnchecked(fmt.Sprintf("az storage container create "+ - "--name %v --account-name %v "+ - "--sas-token %v", containerName, id, key)) - Expect(err).To(HaveOccurred()) -} - func AssertClusterAsyncReplica(namespace, sourceClusterFile, restoreClusterFile, tableName string) { By("Async Replication into external cluster", func() { restoredClusterName, err := env.GetResourceNameFromYAML(restoreClusterFile) @@ -1864,7 +1816,8 @@ func AssertClusterWasRestoredWithPITRAndApplicationDB(namespace, clusterName, ta }) // Gather credentials - appUser, appUserPass, err := testsUtils.GetCredentials(clusterName, namespace, apiv1.ApplicationUserSecretSuffix, env) + appUser, appUserPass, err := testsUtils.GetCredentials(clusterName, namespace, apiv1.ApplicationUserSecretSuffix, + env) Expect(err).ToNot(HaveOccurred()) primaryPod, err := env.GetClusterPrimary(namespace, clusterName) @@ -1945,38 +1898,6 @@ func AssertArchiveConditionMet(namespace, clusterName, timeout string) { }) } -func AssertArchiveWalOnAzurite(namespace, clusterName string) { - // Create a WAL on the primary and check if it arrives at the Azure Blob Storage within a short time - By("archiving WALs and verifying they exist", func() { - primary := clusterName + "-1" - latestWAL := switchWalAndGetLatestArchive(namespace, primary) - // verifying on blob storage using az - // Define what file we are looking for in Azurite. - // Escapes are required since az expects forward slashes to be escaped - path := fmt.Sprintf("%v\\/wals\\/0000000100000000\\/%v.gz", clusterName, latestWAL) - // verifying on blob storage using az - Eventually(func() (int, error) { - return testsUtils.CountFilesOnAzuriteBlobStorage(namespace, clusterName, path) - }, 60).Should(BeEquivalentTo(1)) - }) -} - -func AssertArchiveWalOnAzureBlob(namespace, clusterName string, configuration testsUtils.AzureConfiguration) { - // Create a WAL on the primary and check if it arrives at the Azure Blob Storage, within a short time - By("archiving WALs and verifying they exist", func() { - primary, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - latestWAL := switchWalAndGetLatestArchive(primary.Namespace, primary.Name) - // Define what file we are looking for in Azure. - // Escapes are required since az expects forward slashes to be escaped - path := fmt.Sprintf("wals\\/0000000100000000\\/%v.gz", latestWAL) - // Verifying on blob storage using az - Eventually(func() (int, error) { - return testsUtils.CountFilesOnAzureBlobStorage(configuration, clusterName, path) - }, 60).Should(BeEquivalentTo(1)) - }) -} - // switchWalAndGetLatestArchive trigger a new wal and get the name of latest wal file func switchWalAndGetLatestArchive(namespace, podName string) string { _, _, err := env.ExecQueryInInstancePod( @@ -2000,236 +1921,6 @@ func switchWalAndGetLatestArchive(namespace, podName string) string { return strings.TrimSpace(out) } -func prepareClusterForPITROnMinio( - namespace, - clusterName, - backupSampleFile string, - expectedVal int, - currentTimestamp *string, -) { - const tableNamePitr = "for_restore" - - By("backing up a cluster and verifying it exists on minio", func() { - testsUtils.ExecuteBackup(namespace, backupSampleFile, false, testTimeouts[testsUtils.BackupIsReady], env) - latestTar := minioPath(clusterName, "data.tar") - Eventually(func() (int, error) { - return testsUtils.CountFilesOnMinio(minioEnv, latestTar) - }, 60).Should(BeNumerically(">=", expectedVal), - fmt.Sprintf("verify the number of backups %v is greater than or equal to %v", latestTar, - expectedVal)) - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - return cluster.Status.FirstRecoverabilityPoint, err - }, 30).ShouldNot(BeEmpty()) - }) - - // Write a table and insert 2 entries on the "app" database - tableLocator := TableLocator{ - Namespace: namespace, - ClusterName: clusterName, - DatabaseName: testsUtils.AppDBName, - TableName: tableNamePitr, - } - AssertCreateTestData(env, tableLocator) - - By("getting currentTimestamp", func() { - ts, err := testsUtils.GetCurrentTimestamp(namespace, clusterName, env) - *currentTimestamp = ts - Expect(err).ToNot(HaveOccurred()) - }) - - By(fmt.Sprintf("writing 3rd entry into test table '%v'", tableNamePitr), func() { - forward, conn, err := testsUtils.ForwardPSQLConnection( - env, - namespace, - clusterName, - testsUtils.AppDBName, - apiv1.ApplicationUserSecretSuffix, - ) - defer func() { - _ = conn.Close() - forward.Close() - }() - Expect(err).ToNot(HaveOccurred()) - - insertRecordIntoTable(tableNamePitr, 3, conn) - }) - AssertArchiveWalOnMinio(namespace, clusterName, clusterName) - AssertArchiveConditionMet(namespace, clusterName, "5m") - AssertBackupConditionInClusterStatus(namespace, clusterName) -} - -func prepareClusterForPITROnAzureBlob( - namespace string, - clusterName string, - backupSampleFile string, - azureConfig testsUtils.AzureConfiguration, - expectedVal int, - currentTimestamp *string, -) { - const tableNamePitr = "for_restore" - By("backing up a cluster and verifying it exists on Azure Blob", func() { - testsUtils.ExecuteBackup(namespace, backupSampleFile, false, testTimeouts[testsUtils.BackupIsReady], env) - - Eventually(func() (int, error) { - return testsUtils.CountFilesOnAzureBlobStorage(azureConfig, clusterName, "data.tar") - }, 30).Should(BeEquivalentTo(expectedVal)) - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - return cluster.Status.FirstRecoverabilityPoint, err - }, 30).ShouldNot(BeEmpty()) - }) - - // Write a table and insert 2 entries on the "app" database - tableLocator := TableLocator{ - Namespace: namespace, - ClusterName: clusterName, - DatabaseName: testsUtils.AppDBName, - TableName: tableNamePitr, - } - AssertCreateTestData(env, tableLocator) - - By("getting currentTimestamp", func() { - ts, err := testsUtils.GetCurrentTimestamp(namespace, clusterName, env) - *currentTimestamp = ts - Expect(err).ToNot(HaveOccurred()) - }) - - By(fmt.Sprintf("writing 3rd entry into test table '%v'", tableNamePitr), func() { - forward, conn, err := testsUtils.ForwardPSQLConnection( - env, - namespace, - clusterName, - testsUtils.AppDBName, - apiv1.ApplicationUserSecretSuffix, - ) - defer func() { - _ = conn.Close() - forward.Close() - }() - Expect(err).ToNot(HaveOccurred()) - insertRecordIntoTable(tableNamePitr, 3, conn) - }) - AssertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration) - AssertArchiveConditionMet(namespace, clusterName, "5m") - AssertBackupConditionInClusterStatus(namespace, clusterName) -} - -func prepareClusterOnAzurite(namespace, clusterName, clusterSampleFile string) { - By("creating the Azurite storage credentials", func() { - err := testsUtils.CreateStorageCredentialsOnAzurite(namespace, env) - Expect(err).ToNot(HaveOccurred()) - }) - - By("setting up Azurite to hold the backups", func() { - // Deploying azurite for blob storage - err := testsUtils.InstallAzurite(namespace, env) - Expect(err).ToNot(HaveOccurred()) - }) - - By("setting up az-cli", func() { - // This is required as we have a service of Azurite running locally. - // In order to connect, we need az cli inside the namespace - err := testsUtils.InstallAzCli(namespace, env) - Expect(err).ToNot(HaveOccurred()) - }) - - // Creating cluster - AssertCreateCluster(namespace, clusterName, clusterSampleFile, env) - - AssertArchiveConditionMet(namespace, clusterName, "5m") -} - -func prepareClusterBackupOnAzurite( - namespace, - clusterName, - clusterSampleFile, - backupFile, - tableName string, -) { - // Setting up Azurite and az cli along with Postgresql cluster - prepareClusterOnAzurite(namespace, clusterName, clusterSampleFile) - // Write a table and some data on the "app" database - tableLocator := TableLocator{ - Namespace: namespace, - ClusterName: clusterName, - DatabaseName: testsUtils.AppDBName, - TableName: tableName, - } - AssertCreateTestData(env, tableLocator) - AssertArchiveWalOnAzurite(namespace, clusterName) - - By("backing up a cluster and verifying it exists on azurite", func() { - // We create a Backup - testsUtils.ExecuteBackup(namespace, backupFile, false, testTimeouts[testsUtils.BackupIsReady], env) - // Verifying file called data.tar should be available on Azurite blob storage - Eventually(func() (int, error) { - return testsUtils.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar") - }, 30).Should(BeNumerically(">=", 1)) - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - return cluster.Status.FirstRecoverabilityPoint, err - }, 30).ShouldNot(BeEmpty()) - }) - AssertBackupConditionInClusterStatus(namespace, clusterName) -} - -func prepareClusterForPITROnAzurite( - namespace, - clusterName, - backupSampleFile string, - currentTimestamp *string, -) { - By("backing up a cluster and verifying it exists on azurite", func() { - // We create a Backup - testsUtils.ExecuteBackup(namespace, backupSampleFile, false, testTimeouts[testsUtils.BackupIsReady], env) - // Verifying file called data.tar should be available on Azurite blob storage - Eventually(func() (int, error) { - return testsUtils.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar") - }, 30).Should(BeNumerically(">=", 1)) - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - return cluster.Status.FirstRecoverabilityPoint, err - }, 30).ShouldNot(BeEmpty()) - }) - - // Write a table and insert 2 entries on the "app" database - tableLocator := TableLocator{ - Namespace: namespace, - ClusterName: clusterName, - DatabaseName: testsUtils.AppDBName, - TableName: "for_restore", - } - AssertCreateTestData(env, tableLocator) - - By("getting currentTimestamp", func() { - ts, err := testsUtils.GetCurrentTimestamp(namespace, clusterName, env) - *currentTimestamp = ts - Expect(err).ToNot(HaveOccurred()) - }) - - By(fmt.Sprintf("writing 3rd entry into test table '%v'", "for_restore"), func() { - forward, conn, err := testsUtils.ForwardPSQLConnection( - env, - namespace, - clusterName, - testsUtils.AppDBName, - apiv1.ApplicationUserSecretSuffix, - ) - defer func() { - _ = conn.Close() - forward.Close() - }() - Expect(err).ToNot(HaveOccurred()) - insertRecordIntoTable("for_restore", 3, conn) - }) - AssertArchiveWalOnAzurite(namespace, clusterName) -} - func createAndAssertPgBouncerPoolerIsSetUp(namespace, poolerYamlFilePath string, expectedInstanceCount int) { CreateResourceFromFile(namespace, poolerYamlFilePath) Eventually(func() (int32, error) { @@ -2925,19 +2616,6 @@ func AssertBackupConditionTimestampChangedInClusterStatus( }) } -func AssertBackupConditionInClusterStatus(namespace, clusterName string) { - By(fmt.Sprintf("waiting for backup condition status in cluster '%v'", clusterName), func() { - Eventually(func() (string, error) { - getBackupCondition, err := testsUtils.GetConditionsInClusterStatus( - namespace, clusterName, env, apiv1.ConditionBackup) - if err != nil { - return "", err - } - return string(getBackupCondition.Status), nil - }, 300, 5).Should(BeEquivalentTo("True")) - }) -} - func AssertClusterReadinessStatusIsReached( namespace, clusterName string, @@ -3081,7 +2759,8 @@ func AssertClusterHAReplicationSlots(namespace, clusterName string) { podList, err := env.GetClusterPodList(namespace, clusterName) Expect(err).ToNot(HaveOccurred()) for _, pod := range podList.Items { - expectedSlots, err := testsUtils.GetExpectedHAReplicationSlotsOnPod(namespace, clusterName, pod.GetName(), env) + expectedSlots, err := testsUtils.GetExpectedHAReplicationSlotsOnPod(namespace, clusterName, pod.GetName(), + env) Expect(err).ToNot(HaveOccurred()) AssertReplicationSlotsOnPod(namespace, clusterName, pod, expectedSlots, true, false) } diff --git a/tests/e2e/backup_restore_azure_test.go b/tests/e2e/backup_restore_azure_test.go new file mode 100644 index 0000000000..65c3f8a2ed --- /dev/null +++ b/tests/e2e/backup_restore_azure_test.go @@ -0,0 +1,486 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "fmt" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/tests" + testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Azure - Backup and restore", Label(tests.LabelBackupRestore), func() { + const ( + tableName = "to_restore" + ) + + BeforeEach(func() { + if testLevelEnv.Depth < int(tests.High) { + Skip("Test depth is lower than the amount requested for this test") + } + if !IsAKS() { + Skip("This test is only run on AKS clusters") + } + }) + + Context("using azure blobs as object storage with storage account access authentication", Ordered, func() { + // We must be careful here. All the clusters use the same remote storage + // and that means that we must use different cluster names otherwise + // we risk mixing WALs and backups + const azureBlobSampleFile = fixturesDir + "/backup/azure_blob/cluster-with-backup-azure-blob.yaml.template" + const clusterRestoreSampleFile = fixturesDir + "/backup/azure_blob/cluster-from-restore.yaml.template" + const scheduledBackupSampleFile = fixturesDir + + "/backup/scheduled_backup_immediate/scheduled-backup-immediate-azure-blob.yaml" + backupFile := fixturesDir + "/backup/azure_blob/backup-azure-blob.yaml" + var namespace, clusterName string + + BeforeAll(func() { + const namespacePrefix = "cluster-backup-azure-blob" + var err error + clusterName, err = env.GetResourceNameFromYAML(azureBlobSampleFile) + Expect(err).ToNot(HaveOccurred()) + + // Create a cluster in a namespace we'll delete after the test + namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + Expect(err).ToNot(HaveOccurred()) + + // The Azure Blob Storage should have been created ad-hoc for the tests. + // The credentials are retrieved from the environment variables, as we can't create + // a fixture for them + By("creating the Azure Blob Storage credentials", func() { + _, err = testUtils.CreateObjectStorageSecret( + namespace, + "backup-storage-creds", + env.AzureConfiguration.StorageAccount, + env.AzureConfiguration.StorageKey, + env, + ) + Expect(err).ToNot(HaveOccurred()) + }) + + // Create the cluster + AssertCreateCluster(namespace, clusterName, azureBlobSampleFile, env) + }) + + // We back up and restore a cluster, and verify some expected data to + // be there + It("backs up and restore a cluster", func() { + // Write a table and some data on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testUtils.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) + assertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration) + By("uploading a backup", func() { + // We create a backup + testUtils.ExecuteBackup(namespace, backupFile, false, testTimeouts[testUtils.BackupIsReady], env) + testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName) + + // Verifying file called data.tar should be available on Azure blob storage + Eventually(func() (int, error) { + return testUtils.CountFilesOnAzureBlobStorage(env.AzureConfiguration, clusterName, "data.tar") + }, 30).Should(BeNumerically(">=", 1)) + Eventually(func() (string, error) { + cluster, err := env.GetCluster(namespace, clusterName) + return cluster.Status.FirstRecoverabilityPoint, err + }, 30).ShouldNot(BeEmpty()) + }) + + // Restore backup in a new cluster + AssertClusterRestore(namespace, clusterRestoreSampleFile, tableName) + + By("deleting the restored cluster", func() { + err := DeleteResourcesFromFile(namespace, clusterRestoreSampleFile) + Expect(err).ToNot(HaveOccurred()) + }) + }) + + // Create a scheduled backup with the 'immediate' option enabled. We expect the backup to be available + It("immediately starts a backup using ScheduledBackups 'immediate' option", func() { + scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupSampleFile) + Expect(err).ToNot(HaveOccurred()) + + AssertScheduledBackupsImmediate(namespace, scheduledBackupSampleFile, scheduledBackupName) + + // Only one data.tar files should be present + Eventually(func() (int, error) { + return testUtils.CountFilesOnAzureBlobStorage(env.AzureConfiguration, + clusterName, "data.tar") + }, 30).Should(BeNumerically("==", 2)) + }) + + It("backs up and restore a cluster with PITR", func() { + restoredClusterName := "restore-cluster-azure-pitr" + currentTimestamp := new(string) + + prepareClusterForPITROnAzureBlob( + namespace, + clusterName, + backupFile, + env.AzureConfiguration, + 2, + currentTimestamp, + ) + + assertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration) + + cluster, err := testUtils.CreateClusterFromBackupUsingPITR( + namespace, + restoredClusterName, + backupFile, + *currentTimestamp, + env, + ) + Expect(err).ToNot(HaveOccurred()) + AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[testUtils.ClusterIsReady], env) + + // Restore backup in a new cluster, also cover if no application database is configured + AssertClusterWasRestoredWithPITR(namespace, restoredClusterName, tableName, "00000002") + By("deleting the restored cluster", func() { + Expect(testUtils.DeleteObject(env, cluster)).To(Succeed()) + }) + }) + + // We create a cluster, create a scheduled backup, patch it to suspend its + // execution. We verify that the number of backups does not increase. + // We then patch it again back to its initial state and verify that + // the amount of backups keeps increasing again + It("verifies that scheduled backups can be suspended", func() { + const scheduledBackupSampleFile = fixturesDir + + "/backup/scheduled_backup_suspend/scheduled-backup-suspend-azure-blob.yaml" + scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupSampleFile) + Expect(err).ToNot(HaveOccurred()) + + By("scheduling backups", func() { + AssertScheduledBackupsAreScheduled(namespace, scheduledBackupSampleFile, 480) + + // AssertScheduledBackupsImmediate creates at least two backups, we should find + // their base backups + Eventually(func() (int, error) { + return testUtils.CountFilesOnAzureBlobStorage(env.AzureConfiguration, + clusterName, "data.tar") + }, 60).Should(BeNumerically(">=", 2)) + }) + AssertSuspendScheduleBackups(namespace, scheduledBackupName) + }) + }) +}) + +var _ = Describe("Azure - Clusters Recovery From Barman Object Store", Label(tests.LabelBackupRestore), func() { + const ( + fixturesBackupDir = fixturesDir + "/backup/recovery_external_clusters/" + sourceBackupFileAzure = fixturesBackupDir + "backup-azure-blob-02.yaml" + clusterSourceFileAzure = fixturesBackupDir + "source-cluster-azure-blob-01.yaml.template" + externalClusterFileAzure = fixturesBackupDir + "external-clusters-azure-blob-03.yaml.template" + sourceBackupFileAzurePITR = fixturesBackupDir + "backup-azure-blob-pitr.yaml" + tableName = "to_restore" + clusterSourceFileAzureSAS = fixturesBackupDir + "cluster-with-backup-azure-blob-sas.yaml.template" + clusterRestoreFileAzureSAS = fixturesBackupDir + "cluster-from-restore-sas.yaml.template" + sourceBackupFileAzureSAS = fixturesBackupDir + "backup-azure-blob-sas.yaml" + sourceBackupFileAzurePITRSAS = fixturesBackupDir + "backup-azure-blob-pitr-sas.yaml" + level = tests.High + ) + + currentTimestamp := new(string) + + BeforeEach(func() { + if testLevelEnv.Depth < int(level) { + Skip("Test depth is lower than the amount requested for this test") + } + if !IsAKS() { + Skip("This test is only executed on AKS clusters") + } + }) + + // Restore cluster using a recovery object store, that is a backup of another cluster, + // created by Barman Cloud, and defined via the barmanObjectStore option in the externalClusters section + + Context("using azure blobs as object storage", func() { + Context("storage account access authentication", Ordered, func() { + var namespace, clusterName string + BeforeAll(func() { + const namespacePrefix = "recovery-barman-object-azure" + var err error + clusterName, err = env.GetResourceNameFromYAML(clusterSourceFileAzure) + Expect(err).ToNot(HaveOccurred()) + + // Create a cluster in a namespace we'll delete after the test + namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + Expect(err).ToNot(HaveOccurred()) + + // The Azure Blob Storage should have been created ad-hoc for the tests. + // The credentials are retrieved from the environment variables, as we can't create + // a fixture for them + By("creating the Azure Blob Storage credentials", func() { + _, err = testUtils.CreateObjectStorageSecret( + namespace, + "backup-storage-creds", + env.AzureConfiguration.StorageAccount, + env.AzureConfiguration.StorageKey, + env) + Expect(err).ToNot(HaveOccurred()) + }) + + // Create the cluster + AssertCreateCluster(namespace, clusterName, clusterSourceFileAzure, env) + }) + + It("restores a cluster from barman object using 'barmanObjectStore' option in 'externalClusters' section", func() { + // Write a table and some data on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testUtils.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) + assertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration) + + By("backing up a cluster and verifying it exists on azure blob storage", func() { + // Create the backup + testUtils.ExecuteBackup(namespace, sourceBackupFileAzure, false, testTimeouts[testUtils.BackupIsReady], env) + testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName) + // Verifying file called data.tar should be available on Azure blob storage + Eventually(func() (int, error) { + return testUtils.CountFilesOnAzureBlobStorage(env.AzureConfiguration, clusterName, "data.tar") + }, 30).Should(BeNumerically(">=", 1)) + }) + + // Restoring cluster using a recovery barman object store, which is defined + // in the externalClusters section + AssertClusterRestore(namespace, externalClusterFileAzure, tableName) + }) + + It("restores a cluster with 'PITR' from barman object using "+ + "'barmanObjectStore' option in 'externalClusters' section", func() { + externalClusterName := "external-cluster-azure-pitr" + + prepareClusterForPITROnAzureBlob( + namespace, + clusterName, + sourceBackupFileAzurePITR, + env.AzureConfiguration, + 1, + currentTimestamp, + ) + + restoredCluster, err := testUtils.CreateClusterFromExternalClusterBackupWithPITROnAzure( + namespace, + externalClusterName, + clusterName, + *currentTimestamp, + "backup-storage-creds", + env.AzureConfiguration.StorageAccount, + env.AzureConfiguration.BlobContainer, + env) + Expect(err).ToNot(HaveOccurred()) + + // Restoring cluster using a recovery barman object store, which is defined + // in the externalClusters section + AssertClusterWasRestoredWithPITRAndApplicationDB( + namespace, + externalClusterName, + tableName, + "00000002", + ) + + By("delete restored cluster", func() { + Expect(testUtils.DeleteObject(env, restoredCluster)).To(Succeed()) + }) + }) + }) + + Context("storage account SAS Token authentication", Ordered, func() { + var namespace, clusterName string + BeforeAll(func() { + if !IsAKS() { + Skip("This test is only executed on AKS clusters") + } + const namespacePrefix = "cluster-backup-azure-blob-sas" + var err error + clusterName, err = env.GetResourceNameFromYAML(clusterSourceFileAzureSAS) + Expect(err).ToNot(HaveOccurred()) + + // Create a cluster in a namespace we'll delete after the test + namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + Expect(err).ToNot(HaveOccurred()) + + // The Azure Blob Storage should have been created ad-hoc for the tests, + // we get the credentials from the environment variables as we can't create + // a fixture for them + By("creating the Azure Blob Container SAS Token credentials", func() { + err = testUtils.CreateSASTokenCredentials( + namespace, + env.AzureConfiguration.StorageAccount, + env.AzureConfiguration.StorageKey, + env, + ) + Expect(err).ToNot(HaveOccurred()) + }) + + // Create the Cluster + AssertCreateCluster(namespace, clusterName, clusterSourceFileAzureSAS, env) + }) + + It("restores cluster from barman object using 'barmanObjectStore' option in 'externalClusters' section", func() { + // Write a table and some data on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testUtils.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) + + // Create a WAL on the primary and check if it arrives in the + // Azure Blob Storage within a short time + assertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration) + + By("backing up a cluster and verifying it exists on azure blob storage", func() { + // We create a Backup + testUtils.ExecuteBackup(namespace, sourceBackupFileAzureSAS, false, testTimeouts[testUtils.BackupIsReady], env) + testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName) + // Verifying file called data.tar should be available on Azure blob storage + Eventually(func() (int, error) { + return testUtils.CountFilesOnAzureBlobStorage(env.AzureConfiguration, clusterName, "data.tar") + }, 30).Should(BeNumerically(">=", 1)) + }) + + // Restore backup in a new cluster + AssertClusterRestoreWithApplicationDB(namespace, clusterRestoreFileAzureSAS, tableName) + }) + + It("restores a cluster with 'PITR' from barman object using "+ + "'barmanObjectStore' option in 'externalClusters' section", func() { + externalClusterName := "external-cluster-azure-pitr" + + prepareClusterForPITROnAzureBlob( + namespace, + clusterName, + sourceBackupFileAzurePITRSAS, + env.AzureConfiguration, + 1, + currentTimestamp, + ) + + restoredCluster, err := testUtils.CreateClusterFromExternalClusterBackupWithPITROnAzure( + namespace, + externalClusterName, + clusterName, + *currentTimestamp, + "backup-storage-creds-sas", + env.AzureConfiguration.StorageAccount, + env.AzureConfiguration.BlobContainer, + env) + Expect(err).ToNot(HaveOccurred()) + + // Restoring cluster using a recovery barman object store, which is defined + // in the externalClusters section + AssertClusterWasRestoredWithPITRAndApplicationDB( + namespace, + externalClusterName, + tableName, + "00000002", + ) + + By("delete restored cluster", func() { + Expect(testUtils.DeleteObject(env, restoredCluster)).To(Succeed()) + }) + }) + }) + }) +}) + +func assertArchiveWalOnAzureBlob(namespace, clusterName string, configuration testUtils.AzureConfiguration) { + // Create a WAL on the primary and check if it arrives at the Azure Blob Storage, within a short time + By("archiving WALs and verifying they exist", func() { + primary, err := env.GetClusterPrimary(namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + latestWAL := switchWalAndGetLatestArchive(primary.Namespace, primary.Name) + // Define what file we are looking for in Azure. + // Escapes are required since az expects forward slashes to be escaped + path := fmt.Sprintf("wals\\/0000000100000000\\/%v.gz", latestWAL) + // Verifying on blob storage using az + Eventually(func() (int, error) { + return testUtils.CountFilesOnAzureBlobStorage(configuration, clusterName, path) + }, 60).Should(BeEquivalentTo(1)) + }) +} + +func prepareClusterForPITROnAzureBlob( + namespace string, + clusterName string, + backupSampleFile string, + azureConfig testUtils.AzureConfiguration, + expectedVal int, + currentTimestamp *string, +) { + const tableNamePitr = "for_restore" + By("backing up a cluster and verifying it exists on Azure Blob", func() { + testUtils.ExecuteBackup(namespace, backupSampleFile, false, testTimeouts[testUtils.BackupIsReady], env) + + Eventually(func() (int, error) { + return testUtils.CountFilesOnAzureBlobStorage(azureConfig, clusterName, "data.tar") + }, 30).Should(BeEquivalentTo(expectedVal)) + Eventually(func() (string, error) { + cluster, err := env.GetCluster(namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + return cluster.Status.FirstRecoverabilityPoint, err + }, 30).ShouldNot(BeEmpty()) + }) + + // Write a table and insert 2 entries on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testUtils.AppDBName, + TableName: tableNamePitr, + } + AssertCreateTestData(env, tableLocator) + + By("getting currentTimestamp", func() { + ts, err := testUtils.GetCurrentTimestamp(namespace, clusterName, env) + *currentTimestamp = ts + Expect(err).ToNot(HaveOccurred()) + }) + + By(fmt.Sprintf("writing 3rd entry into test table '%v'", tableNamePitr), func() { + forward, conn, err := testUtils.ForwardPSQLConnection( + env, + namespace, + clusterName, + testUtils.AppDBName, + apiv1.ApplicationUserSecretSuffix, + ) + defer func() { + _ = conn.Close() + forward.Close() + }() + Expect(err).ToNot(HaveOccurred()) + insertRecordIntoTable(tableNamePitr, 3, conn) + }) + assertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration) + AssertArchiveConditionMet(namespace, clusterName, "5m") + testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName) +} diff --git a/tests/e2e/backup_restore_azurite_test.go b/tests/e2e/backup_restore_azurite_test.go new file mode 100644 index 0000000000..cb3254c5a3 --- /dev/null +++ b/tests/e2e/backup_restore_azurite_test.go @@ -0,0 +1,353 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "fmt" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/tests" + testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Azurite - Backup and restore", Label(tests.LabelBackupRestore), func() { + const ( + tableName = "to_restore" + azuriteBlobSampleFile = fixturesDir + "/backup/azurite/cluster-backup.yaml.template" + ) + BeforeEach(func() { + if testLevelEnv.Depth < int(tests.High) { + Skip("Test depth is lower than the amount requested for this test") + } + + if !(IsLocal() || IsGKE() || IsOpenshift()) { + Skip("This test is only executed on gke, openshift and local") + } + }) + + Context("using Azurite blobs as object storage", Ordered, func() { + // This is a set of tests using an Azurite server deployed in the same + // namespace as the cluster. Since each cluster is installed in its + // own namespace, they can share the configuration file + const ( + clusterRestoreSampleFile = fixturesDir + "/backup/azurite/cluster-from-restore.yaml.template" + scheduledBackupSampleFile = fixturesDir + + "/backup/scheduled_backup_suspend/scheduled-backup-suspend-azurite.yaml" + scheduledBackupImmediateSampleFile = fixturesDir + + "/backup/scheduled_backup_immediate/scheduled-backup-immediate-azurite.yaml" + backupFile = fixturesDir + "/backup/azurite/backup.yaml" + azuriteCaSecName = "azurite-ca-secret" + azuriteTLSSecName = "azurite-tls-secret" + ) + var namespace, clusterName string + + BeforeAll(func() { + const namespacePrefix = "cluster-backup-azurite" + var err error + clusterName, err = env.GetResourceNameFromYAML(azuriteBlobSampleFile) + Expect(err).ToNot(HaveOccurred()) + + // Create a cluster in a namespace we'll delete after the test + namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + Expect(err).ToNot(HaveOccurred()) + + // Create and assert ca and tls certificate secrets on Azurite + By("creating ca and tls certificate secrets", func() { + err := testUtils.CreateCertificateSecretsOnAzurite(namespace, clusterName, + azuriteCaSecName, azuriteTLSSecName, env) + Expect(err).ToNot(HaveOccurred()) + }) + // Setup Azurite and az cli along with Postgresql cluster + prepareClusterBackupOnAzurite(namespace, clusterName, azuriteBlobSampleFile, backupFile, tableName) + }) + + It("restores a backed up cluster", func() { + // Restore backup in a new cluster + AssertClusterRestoreWithApplicationDB(namespace, clusterRestoreSampleFile, tableName) + }) + + // Create a scheduled backup with the 'immediate' option enabled. + // We expect the backup to be available + It("immediately starts a backup using ScheduledBackups immediate option", func() { + scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupImmediateSampleFile) + Expect(err).ToNot(HaveOccurred()) + + AssertScheduledBackupsImmediate(namespace, scheduledBackupImmediateSampleFile, scheduledBackupName) + + // AssertScheduledBackupsImmediate creates at least two backups, we should find + // their base backups + Eventually(func() (int, error) { + return testUtils.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar") + }, 30).Should(BeNumerically("==", 2)) + }) + + It("backs up and restore a cluster with PITR Azurite", func() { + const ( + restoredClusterName = "restore-cluster-pitr-azurite" + backupFilePITR = fixturesDir + "/backup/azurite/backup-pitr.yaml" + ) + currentTimestamp := new(string) + + prepareClusterForPITROnAzurite(namespace, clusterName, backupFilePITR, currentTimestamp) + + cluster, err := testUtils.CreateClusterFromBackupUsingPITR( + namespace, + restoredClusterName, + backupFilePITR, + *currentTimestamp, + env, + ) + Expect(err).NotTo(HaveOccurred()) + AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[testUtils.ClusterIsReady], env) + + // Restore backup in a new cluster, also cover if no application database is configured + AssertClusterWasRestoredWithPITR(namespace, restoredClusterName, tableName, "00000002") + + By("deleting the restored cluster", func() { + Expect(testUtils.DeleteObject(env, cluster)).To(Succeed()) + }) + }) + + // We create a cluster, create a scheduled backup, patch it to suspend its + // execution. We verify that the number of backups does not increase. + // We then patch it again back to its initial state and verify that + // the amount of backups keeps increasing again + It("verifies that scheduled backups can be suspended", func() { + scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupSampleFile) + Expect(err).ToNot(HaveOccurred()) + + By("scheduling backups", func() { + AssertScheduledBackupsAreScheduled(namespace, scheduledBackupSampleFile, 300) + Eventually(func() (int, error) { + return testUtils.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar") + }, 60).Should(BeNumerically(">=", 3)) + }) + + AssertSuspendScheduleBackups(namespace, scheduledBackupName) + }) + }) +}) + +var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.LabelBackupRestore), func() { + const ( + fixturesBackupDir = fixturesDir + "/backup/recovery_external_clusters/" + azuriteBlobSampleFile = fixturesDir + "/backup/azurite/cluster-backup.yaml.template" + backupFileAzurite = fixturesBackupDir + "backup-azurite-02.yaml" + externalClusterFileAzurite = fixturesBackupDir + "external-clusters-azurite-03.yaml.template" + + azuriteCaSecName = "azurite-ca-secret" + azuriteTLSSecName = "azurite-tls-secret" + tableName = "to_restore" + ) + Context("using Azurite blobs as object storage", Ordered, func() { + var namespace, clusterName string + BeforeAll(func() { + if IsAKS() { + Skip("This test is not run on AKS") + } + const namespacePrefix = "recovery-barman-object-azurite" + var err error + clusterName, err = env.GetResourceNameFromYAML(azuriteBlobSampleFile) + Expect(err).ToNot(HaveOccurred()) + + // Create a cluster in a namespace we'll delete after the test + namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + Expect(err).ToNot(HaveOccurred()) + + // Create and assert ca and tls certificate secrets on Azurite + By("creating ca and tls certificate secrets", func() { + err := testUtils.CreateCertificateSecretsOnAzurite( + namespace, + clusterName, + azuriteCaSecName, + azuriteTLSSecName, + env) + Expect(err).ToNot(HaveOccurred()) + }) + // Setup Azurite and az cli along with PostgreSQL cluster + prepareClusterBackupOnAzurite( + namespace, + clusterName, + azuriteBlobSampleFile, + backupFileAzurite, + tableName, + ) + }) + + It("restore cluster from barman object using 'barmanObjectStore' option in 'externalClusters' section", func() { + // Restore backup in a new cluster + AssertClusterRestoreWithApplicationDB(namespace, externalClusterFileAzurite, tableName) + }) + + It("restores a cluster with 'PITR' from barman object using 'barmanObjectStore' "+ + " option in 'externalClusters' section", func() { + const ( + externalClusterRestoreName = "restore-external-cluster-pitr" + backupFileAzuritePITR = fixturesBackupDir + "backup-azurite-pitr.yaml" + ) + currentTimestamp := new(string) + prepareClusterForPITROnAzurite(namespace, clusterName, backupFileAzuritePITR, currentTimestamp) + + // Create a cluster from a particular time using external backup. + restoredCluster, err := testUtils.CreateClusterFromExternalClusterBackupWithPITROnAzurite( + namespace, externalClusterRestoreName, clusterName, *currentTimestamp, env) + Expect(err).NotTo(HaveOccurred()) + + AssertClusterWasRestoredWithPITRAndApplicationDB( + namespace, + externalClusterRestoreName, + tableName, + "00000002", + ) + + By("delete restored cluster", func() { + Expect(testUtils.DeleteObject(env, restoredCluster)).To(Succeed()) + }) + }) + }) +}) + +func prepareClusterOnAzurite(namespace, clusterName, clusterSampleFile string) { + By("creating the Azurite storage credentials", func() { + err := testUtils.CreateStorageCredentialsOnAzurite(namespace, env) + Expect(err).ToNot(HaveOccurred()) + }) + + By("setting up Azurite to hold the backups", func() { + // Deploying azurite for blob storage + err := testUtils.InstallAzurite(namespace, env) + Expect(err).ToNot(HaveOccurred()) + }) + + By("setting up az-cli", func() { + // This is required as we have a service of Azurite running locally. + // In order to connect, we need az cli inside the namespace + err := testUtils.InstallAzCli(namespace, env) + Expect(err).ToNot(HaveOccurred()) + }) + + // Creating cluster + AssertCreateCluster(namespace, clusterName, clusterSampleFile, env) + + AssertArchiveConditionMet(namespace, clusterName, "5m") +} + +func prepareClusterBackupOnAzurite( + namespace, + clusterName, + clusterSampleFile, + backupFile, + tableName string, +) { + // Setting up Azurite and az cli along with Postgresql cluster + prepareClusterOnAzurite(namespace, clusterName, clusterSampleFile) + // Write a table and some data on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testUtils.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) + assertArchiveWalOnAzurite(namespace, clusterName) + + By("backing up a cluster and verifying it exists on azurite", func() { + // We create a Backup + testUtils.ExecuteBackup(namespace, backupFile, false, testTimeouts[testUtils.BackupIsReady], env) + // Verifying file called data.tar should be available on Azurite blob storage + Eventually(func() (int, error) { + return testUtils.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar") + }, 30).Should(BeNumerically(">=", 1)) + Eventually(func() (string, error) { + cluster, err := env.GetCluster(namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + return cluster.Status.FirstRecoverabilityPoint, err + }, 30).ShouldNot(BeEmpty()) + }) + testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName) +} + +func prepareClusterForPITROnAzurite( + namespace, + clusterName, + backupSampleFile string, + currentTimestamp *string, +) { + By("backing up a cluster and verifying it exists on azurite", func() { + // We create a Backup + testUtils.ExecuteBackup(namespace, backupSampleFile, false, testTimeouts[testUtils.BackupIsReady], env) + // Verifying file called data.tar should be available on Azurite blob storage + Eventually(func() (int, error) { + return testUtils.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar") + }, 30).Should(BeNumerically(">=", 1)) + Eventually(func() (string, error) { + cluster, err := env.GetCluster(namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + return cluster.Status.FirstRecoverabilityPoint, err + }, 30).ShouldNot(BeEmpty()) + }) + + // Write a table and insert 2 entries on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testUtils.AppDBName, + TableName: "for_restore", + } + AssertCreateTestData(env, tableLocator) + + By("getting currentTimestamp", func() { + ts, err := testUtils.GetCurrentTimestamp(namespace, clusterName, env) + *currentTimestamp = ts + Expect(err).ToNot(HaveOccurred()) + }) + + By(fmt.Sprintf("writing 3rd entry into test table '%v'", "for_restore"), func() { + forward, conn, err := testUtils.ForwardPSQLConnection( + env, + namespace, + clusterName, + testUtils.AppDBName, + apiv1.ApplicationUserSecretSuffix, + ) + defer func() { + forward.Close() + }() + Expect(err).ToNot(HaveOccurred()) + insertRecordIntoTable("for_restore", 3, conn) + }) + assertArchiveWalOnAzurite(namespace, clusterName) +} + +func assertArchiveWalOnAzurite(namespace, clusterName string) { + // Create a WAL on the primary and check if it arrives at the Azure Blob Storage within a short time + By("archiving WALs and verifying they exist", func() { + primary := clusterName + "-1" + latestWAL := switchWalAndGetLatestArchive(namespace, primary) + // verifying on blob storage using az + // Define what file we are looking for in Azurite. + // Escapes are required since az expects forward slashes to be escaped + path := fmt.Sprintf("%v\\/wals\\/0000000100000000\\/%v.gz", clusterName, latestWAL) + // verifying on blob storage using az + Eventually(func() (int, error) { + return testUtils.CountFilesOnAzuriteBlobStorage(namespace, clusterName, path) + }, 60).Should(BeEquivalentTo(1)) + }) +} diff --git a/tests/e2e/backup_restore_minio_test.go b/tests/e2e/backup_restore_minio_test.go new file mode 100644 index 0000000000..41ada349f0 --- /dev/null +++ b/tests/e2e/backup_restore_minio_test.go @@ -0,0 +1,800 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "fmt" + "path/filepath" + + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/tests" + testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore), func() { + const ( + tableName = "to_restore" + barmanCloudBackupLogEntry = "Starting barman-cloud-backup" + ) + BeforeEach(func() { + if testLevelEnv.Depth < int(tests.High) { + Skip("Test depth is lower than the amount requested for this test") + } + }) + + Context("using minio as object storage for backup", Ordered, func() { + // This is a set of tests using a minio server deployed in the same + // namespace as the cluster. Since each cluster is installed in its + // own namespace, they can share the configuration file + var namespace, clusterName string + const ( + backupFile = fixturesDir + "/backup/minio/backup-minio.yaml" + customQueriesSampleFile = fixturesDir + "/metrics/custom-queries-with-target-databases.yaml" + ) + + clusterWithMinioSampleFile := fixturesDir + "/backup/minio/cluster-with-backup-minio.yaml.template" + + BeforeAll(func() { + if !IsLocal() { + Skip("This test is only run on local clusters") + } + const namespacePrefix = "cluster-backup-minio" + var err error + clusterName, err = env.GetResourceNameFromYAML(clusterWithMinioSampleFile) + Expect(err).ToNot(HaveOccurred()) + + namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + Expect(err).ToNot(HaveOccurred()) + + By("create the certificates for MinIO", func() { + err := minioEnv.CreateCaSecret(env, namespace) + Expect(err).ToNot(HaveOccurred()) + }) + + By("creating the credentials for minio", func() { + _, err = testUtils.CreateObjectStorageSecret( + namespace, + "backup-storage-creds", + "minio", + "minio123", + env, + ) + Expect(err).ToNot(HaveOccurred()) + }) + + // Create ConfigMap and secrets to verify metrics for target database after backup restore + AssertCustomMetricsResourcesExist(namespace, customQueriesSampleFile, 1, 1) + + // Create the cluster + AssertCreateCluster(namespace, clusterName, clusterWithMinioSampleFile, env) + + By("verify test connectivity to minio using barman-cloud-wal-archive script", func() { + primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + Eventually(func() (bool, error) { + connectionStatus, err := minio.TestConnectivityUsingBarmanCloudWalArchive( + namespace, clusterName, primaryPod.GetName(), "minio", "minio123", minioEnv.ServiceName) + if err != nil { + return false, err + } + return connectionStatus, nil + }, 60).Should(BeTrue()) + }) + }) + + // We back up and restore a cluster, and verify some expected data to + // be there + It("backs up and restores a cluster using minio", func() { + const ( + targetDBOne = "test" + targetDBTwo = "test1" + targetDBSecret = "secret_test" + testTableName = "test_table" + clusterRestoreSampleFile = fixturesDir + "/backup/cluster-from-restore.yaml.template" + ) + var backup *apiv1.Backup + restoredClusterName, err := env.GetResourceNameFromYAML(clusterWithMinioSampleFile) + Expect(err).ToNot(HaveOccurred()) + backupName, err := env.GetResourceNameFromYAML(backupFile) + Expect(err).ToNot(HaveOccurred()) + // Create required test data + AssertCreationOfTestDataForTargetDB(env, namespace, clusterName, targetDBOne, testTableName) + AssertCreationOfTestDataForTargetDB(env, namespace, clusterName, targetDBTwo, testTableName) + AssertCreationOfTestDataForTargetDB(env, namespace, clusterName, targetDBSecret, testTableName) + + // Write a table and some data on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testUtils.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) + + AssertArchiveWalOnMinio(namespace, clusterName, clusterName) + latestTar := minio.GetFilePath(clusterName, "data.tar") + + // There should be a backup resource and + By(fmt.Sprintf("backing up a cluster and verifying it exists on minio, backup path is %v", latestTar), + func() { + backup = testUtils.ExecuteBackup(namespace, backupFile, false, + testTimeouts[testUtils.BackupIsReady], env) + testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName) + Eventually(func() (int, error) { + return minio.CountFiles(minioEnv, latestTar) + }, 60).Should(BeEquivalentTo(1)) + Eventually(func() (string, error) { + cluster, err := env.GetCluster(namespace, clusterName) + if err != nil { + return "", err + } + return cluster.Status.FirstRecoverabilityPoint, err + }, 30).ShouldNot(BeEmpty()) + Eventually(func() (string, error) { + cluster, err := env.GetCluster(namespace, clusterName) + if err != nil { + return "", err + } + return cluster.Status.LastSuccessfulBackup, err + }, 30).ShouldNot(BeEmpty()) + Eventually(func() (string, error) { + cluster, err := env.GetCluster(namespace, clusterName) + if err != nil { + return "", err + } + return cluster.Status.LastFailedBackup, err + }, 30).Should(BeEmpty()) + }) + + By("verifying the backup is using the expected barman-cloud-backup options", func() { + Expect(backup).ToNot(BeNil()) + Expect(backup.Status.InstanceID).ToNot(BeNil()) + logEntries, err := testUtils.ParseJSONLogs(namespace, backup.Status.InstanceID.PodName, env) + Expect(err).ToNot(HaveOccurred()) + expectedBaseBackupOptions := []string{ + "--immediate-checkpoint", + "--min-chunk-size=5MB", + "--read-timeout=59", + } + result, err := testUtils.CheckOptionsForBarmanCommand( + logEntries, + barmanCloudBackupLogEntry, + backup.Name, + backup.Status.InstanceID.PodName, + expectedBaseBackupOptions, + ) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(BeTrue()) + }) + + By("executing a second backup and verifying the number of backups on minio", func() { + Eventually(func() (int, error) { + return minio.CountFiles(minioEnv, latestTar) + }, 60).Should(BeEquivalentTo(1)) + + // delete the first backup and create a second backup + backup := &apiv1.Backup{} + err := env.Client.Get(env.Ctx, + ctrlclient.ObjectKey{Namespace: namespace, Name: backupName}, + backup) + Expect(err).ToNot(HaveOccurred()) + err = env.Client.Delete(env.Ctx, backup) + Expect(err).ToNot(HaveOccurred()) + // create a second backup + testUtils.ExecuteBackup(namespace, backupFile, false, testTimeouts[testUtils.BackupIsReady], env) + latestTar = minio.GetFilePath(clusterName, "data.tar") + Eventually(func() (int, error) { + return minio.CountFiles(minioEnv, latestTar) + }, 60).Should(BeEquivalentTo(2)) + }) + + By("verifying the backupName is properly set in the status of the backup", func() { + backup := &apiv1.Backup{} + err := env.Client.Get(env.Ctx, + ctrlclient.ObjectKey{Namespace: namespace, Name: backupName}, + backup) + Expect(err).ToNot(HaveOccurred()) + cluster, err := env.GetCluster(namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + // We know that our current images always contain the latest barman version + if cluster.ShouldForceLegacyBackup() { + Expect(backup.Status.BackupName).To(BeEmpty()) + } else { + Expect(backup.Status.BackupName).To(HavePrefix("backup-")) + } + }) + + // Restore backup in a new cluster, also cover if no application database is configured + AssertClusterRestore(namespace, clusterRestoreSampleFile, tableName) + + cluster, err := env.GetCluster(namespace, restoredClusterName) + Expect(err).ToNot(HaveOccurred()) + AssertMetricsData(namespace, targetDBOne, targetDBTwo, targetDBSecret, cluster) + + previous := 0 + latestGZ := filepath.Join("*", clusterName, "*", "*.history.gz") + By(fmt.Sprintf("checking the previous number of .history files in minio, history file name is %v", + latestGZ), func() { + previous, err = minio.CountFiles(minioEnv, latestGZ) + Expect(err).ToNot(HaveOccurred()) + }) + + AssertSwitchover(namespace, clusterName, env) + + By("checking the number of .history after switchover", func() { + Eventually(func() (int, error) { + return minio.CountFiles(minioEnv, latestGZ) + }, 60).Should(BeNumerically(">", previous)) + }) + + By("deleting the restored cluster", func() { + err = DeleteResourcesFromFile(namespace, clusterRestoreSampleFile) + Expect(err).ToNot(HaveOccurred()) + }) + }) + + // We backup and restore a cluster from a standby, and verify some expected data to + // be there + It("backs up and restore a cluster from standby", func() { + const ( + targetDBOne = "test" + targetDBTwo = "test1" + targetDBSecret = "secret_test" + testTableName = "test_table" + clusterWithMinioStandbySampleFile = fixturesDir + "/backup/minio/cluster-with-backup-minio-standby.yaml.template" + backupStandbyFile = fixturesDir + "/backup/minio/backup-minio-standby.yaml" + ) + + targetClusterName, err := env.GetResourceNameFromYAML(clusterWithMinioStandbySampleFile) + Expect(err).ToNot(HaveOccurred()) + + // Create the cluster with custom serverName in the backup spec + AssertCreateCluster(namespace, targetClusterName, clusterWithMinioStandbySampleFile, env) + + // Create required test data + AssertCreationOfTestDataForTargetDB(env, namespace, targetClusterName, targetDBOne, testTableName) + AssertCreationOfTestDataForTargetDB(env, namespace, targetClusterName, targetDBTwo, testTableName) + AssertCreationOfTestDataForTargetDB(env, namespace, targetClusterName, targetDBSecret, testTableName) + + // Write a table and some data on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: targetClusterName, + DatabaseName: testUtils.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) + + AssertArchiveWalOnMinio(namespace, targetClusterName, targetClusterName) + latestTar := minio.GetFilePath(targetClusterName, "data.tar") + + // There should be a backup resource and + By(fmt.Sprintf("backing up a cluster from standby and verifying it exists on minio, backup path is %v", + latestTar), func() { + testUtils.ExecuteBackup(namespace, backupStandbyFile, true, testTimeouts[testUtils.BackupIsReady], env) + testUtils.AssertBackupConditionInClusterStatus(env, namespace, targetClusterName) + Eventually(func() (int, error) { + return minio.CountFiles(minioEnv, latestTar) + }, 60).Should(BeEquivalentTo(1)) + Eventually(func() (string, error) { + cluster, err := env.GetCluster(namespace, targetClusterName) + return cluster.Status.FirstRecoverabilityPoint, err + }, 30).ShouldNot(BeEmpty()) + }) + }) + + // We backup and restore a cluster from a standby, and verify some expected data to + // be there + It("backs up a cluster from standby with backup target defined in backup", func() { + const ( + targetDBOne = "test" + targetDBTwo = "test1" + targetDBSecret = "secret_test" + testTableName = "test_table" + clusterWithMinioSampleFile = fixturesDir + "/backup/minio/cluster-with-backup-minio-primary.yaml.template" + backupWithTargetFile = fixturesDir + "/backup/minio/backup-minio-override-target.yaml" + ) + + targetClusterName, err := env.GetResourceNameFromYAML(clusterWithMinioSampleFile) + Expect(err).ToNot(HaveOccurred()) + + // Create the cluster with custom serverName in the backup spec + AssertCreateCluster(namespace, targetClusterName, clusterWithMinioSampleFile, env) + + // Create required test data + AssertCreationOfTestDataForTargetDB(env, namespace, targetClusterName, targetDBOne, testTableName) + AssertCreationOfTestDataForTargetDB(env, namespace, targetClusterName, targetDBTwo, testTableName) + AssertCreationOfTestDataForTargetDB(env, namespace, targetClusterName, targetDBSecret, testTableName) + + // Write a table and some data on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: targetClusterName, + DatabaseName: testUtils.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) + + AssertArchiveWalOnMinio(namespace, targetClusterName, targetClusterName) + latestTar := minio.GetFilePath(targetClusterName, "data.tar") + + // There should be a backup resource and + By(fmt.Sprintf("backing up a cluster from standby (defined in backup file) and verifying it exists on minio,"+ + " backup path is %v", latestTar), func() { + testUtils.ExecuteBackup(namespace, backupWithTargetFile, true, testTimeouts[testUtils.BackupIsReady], + env) + testUtils.AssertBackupConditionInClusterStatus(env, namespace, targetClusterName) + Eventually(func() (int, error) { + return minio.CountFiles(minioEnv, latestTar) + }, 60).Should(BeEquivalentTo(1)) + Eventually(func() (string, error) { + cluster, err := env.GetCluster(namespace, targetClusterName) + return cluster.Status.FirstRecoverabilityPoint, err + }, 30).ShouldNot(BeEmpty()) + }) + + By("deleting the cluster", func() { + err = DeleteResourcesFromFile(namespace, clusterWithMinioSampleFile) + Expect(err).ToNot(HaveOccurred()) + }) + }) + + // Test that the restore works if the source cluster has a custom + // backup.barmanObjectStore.serverName that is different from the cluster name + It("backs up and restores a cluster with custom backup serverName", func() { + const ( + targetDBOne = "test" + targetDBTwo = "test1" + targetDBSecret = "secret_test" + testTableName = "test_table" + clusterRestoreSampleFile = fixturesDir + "/backup/cluster-from-restore-custom.yaml.template" + // clusterWithMinioCustomSampleFile has metadata.name != backup.barmanObjectStore.serverName + clusterWithMinioCustomSampleFile = fixturesDir + + "/backup/minio/cluster-with-backup-minio-custom-servername.yaml.template" + backupFileCustom = fixturesDir + "/backup/minio/backup-minio-custom-servername.yaml" + clusterServerName = "pg-backup-minio-Custom-Name" + ) + + customClusterName, err := env.GetResourceNameFromYAML(clusterWithMinioCustomSampleFile) + Expect(err).ToNot(HaveOccurred()) + + // Create the cluster with custom serverName in the backup spec + AssertCreateCluster(namespace, customClusterName, clusterWithMinioCustomSampleFile, env) + + // Create required test data + AssertCreationOfTestDataForTargetDB(env, namespace, customClusterName, targetDBOne, testTableName) + AssertCreationOfTestDataForTargetDB(env, namespace, customClusterName, targetDBTwo, testTableName) + AssertCreationOfTestDataForTargetDB(env, namespace, customClusterName, targetDBSecret, testTableName) + + // Write a table and some data on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: customClusterName, + DatabaseName: testUtils.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) + + AssertArchiveWalOnMinio(namespace, customClusterName, clusterServerName) + + // There should be a backup resource and + By("backing up a cluster and verifying it exists on minio", func() { + testUtils.ExecuteBackup(namespace, backupFileCustom, false, testTimeouts[testUtils.BackupIsReady], env) + testUtils.AssertBackupConditionInClusterStatus(env, namespace, customClusterName) + latestBaseTar := minio.GetFilePath(clusterServerName, "data.tar") + Eventually(func() (int, error) { + return minio.CountFiles(minioEnv, latestBaseTar) + }, 60).Should(BeEquivalentTo(1), + fmt.Sprintf("verify the number of backup %v is equals to 1", latestBaseTar)) + // this is the second backup we take on the bucket + Eventually(func() (string, error) { + cluster, err := env.GetCluster(namespace, customClusterName) + return cluster.Status.FirstRecoverabilityPoint, err + }, 30).ShouldNot(BeEmpty()) + }) + + // Restore backup in a new cluster + AssertClusterRestore(namespace, clusterRestoreSampleFile, tableName) + + By("deleting the primary cluster", func() { + err = DeleteResourcesFromFile(namespace, clusterWithMinioCustomSampleFile) + Expect(err).ToNot(HaveOccurred()) + }) + + By("deleting the restored cluster", func() { + err = DeleteResourcesFromFile(namespace, clusterRestoreSampleFile) + Expect(err).ToNot(HaveOccurred()) + }) + }) + + // Create a scheduled backup with the 'immediate' option enabled. We expect the backup to be available + It("immediately starts a backup using ScheduledBackups 'immediate' option", func() { + const scheduledBackupSampleFile = fixturesDir + + "/backup/scheduled_backup_immediate/scheduled-backup-immediate-minio.yaml" + scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupSampleFile) + Expect(err).ToNot(HaveOccurred()) + + AssertScheduledBackupsImmediate(namespace, scheduledBackupSampleFile, scheduledBackupName) + latestBaseTar := minio.GetFilePath(clusterName, "data.tar") + // AssertScheduledBackupsImmediate creates at least two backups, we should find + // their base backups + Eventually(func() (int, error) { + return minio.CountFiles(minioEnv, latestBaseTar) + }, 60).Should(BeNumerically(">=", 2), + fmt.Sprintf("verify the number of backup %v is >= 2", latestBaseTar)) + }) + + It("backs up and restore a cluster with PITR MinIO", func() { + const ( + restoredClusterName = "restore-cluster-pitr-minio" + backupFilePITR = fixturesDir + "/backup/minio/backup-minio-pitr.yaml" + ) + currentTimestamp := new(string) + prepareClusterForPITROnMinio( + namespace, + clusterName, + backupFilePITR, + 3, + currentTimestamp, + ) + + cluster, err := testUtils.CreateClusterFromBackupUsingPITR( + namespace, + restoredClusterName, + backupFilePITR, + *currentTimestamp, + env, + ) + Expect(err).NotTo(HaveOccurred()) + AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[testUtils.ClusterIsReady], env) + + // Restore backup in a new cluster, also cover if no application database is configured + AssertClusterWasRestoredWithPITR(namespace, restoredClusterName, tableName, "00000003") + + By("deleting the restored cluster", func() { + Expect(testUtils.DeleteObject(env, cluster)).To(Succeed()) + }) + }) + + // We create a cluster and a scheduled backup, then it is patched to suspend its + // execution. We verify that the number of backups does not increase. + // We then patch it again back to its initial state and verify that + // the amount of backups keeps increasing again + It("verifies that scheduled backups can be suspended", func() { + const scheduledBackupSampleFile = fixturesDir + + "/backup/scheduled_backup_suspend/scheduled-backup-suspend-minio.yaml" + scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupSampleFile) + Expect(err).ToNot(HaveOccurred()) + + By("scheduling backups", func() { + AssertScheduledBackupsAreScheduled(namespace, scheduledBackupSampleFile, 300) + latestTar := minio.GetFilePath(clusterName, "data.tar") + Eventually(func() (int, error) { + return minio.CountFiles(minioEnv, latestTar) + }, 60).Should(BeNumerically(">=", 2), + fmt.Sprintf("verify the number of backup %v is great than 2", latestTar)) + }) + + AssertSuspendScheduleBackups(namespace, scheduledBackupName) + }) + + It("verify tags in backed files", func() { + AssertArchiveWalOnMinio(namespace, clusterName, clusterName) + tags, err := minio.GetFileTags(minioEnv, minio.GetFilePath(clusterName, "*1.gz")) + Expect(err).ToNot(HaveOccurred()) + Expect(tags.Tags).ToNot(BeEmpty()) + + currentPrimary, err := env.GetClusterPrimary(namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + oldPrimary := currentPrimary.GetName() + // Force-delete the primary + quickDelete := &ctrlclient.DeleteOptions{ + GracePeriodSeconds: &quickDeletionPeriod, + } + err = env.DeletePod(namespace, currentPrimary.GetName(), quickDelete) + Expect(err).ToNot(HaveOccurred()) + + AssertNewPrimary(namespace, clusterName, oldPrimary) + + tags, err = minio.GetFileTags(minioEnv, minio.GetFilePath(clusterName, "*.history.gz")) + Expect(err).ToNot(HaveOccurred()) + Expect(tags.Tags).ToNot(BeEmpty()) + }) + }) +}) + +var _ = Describe("MinIO - Clusters Recovery from Barman Object Store", Label(tests.LabelBackupRestore), func() { + const ( + fixturesBackupDir = fixturesDir + "/backup/recovery_external_clusters/" + externalClusterFileMinioReplica = fixturesBackupDir + "external-clusters-minio-replica-04.yaml.template" + clusterSourceFileMinio = fixturesBackupDir + "source-cluster-minio-01.yaml.template" + externalClusterFileMinio = fixturesBackupDir + "external-clusters-minio-03.yaml.template" + sourceTakeFirstBackupFileMinio = fixturesBackupDir + "backup-minio-02.yaml" + sourceTakeSecondBackupFileMinio = fixturesBackupDir + "backup-minio-03.yaml" + sourceTakeThirdBackupFileMinio = fixturesBackupDir + "backup-minio-04.yaml" + tableName = "to_restore" + ) + BeforeEach(func() { + if testLevelEnv.Depth < int(tests.High) { + Skip("Test depth is lower than the amount requested for this test") + } + }) + + // Restore cluster using a recovery object store, that is a backup of another cluster, + // created by Barman Cloud, and defined via the barmanObjectStore option in the externalClusters section + Context("using minio as object storage", Ordered, func() { + var namespace, clusterName string + + BeforeAll(func() { + if !IsLocal() { + Skip("This test is only executed on local") + } + const namespacePrefix = "recovery-barman-object-minio" + var err error + clusterName, err = env.GetResourceNameFromYAML(clusterSourceFileMinio) + Expect(err).ToNot(HaveOccurred()) + // Create a cluster in a namespace we'll delete after the test + namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + Expect(err).ToNot(HaveOccurred()) + + By("creating the credentials for minio", func() { + _, err = testUtils.CreateObjectStorageSecret( + namespace, + "backup-storage-creds", + "minio", + "minio123", + env, + ) + Expect(err).ToNot(HaveOccurred()) + }) + + By("create the certificates for MinIO", func() { + err := minioEnv.CreateCaSecret(env, namespace) + Expect(err).ToNot(HaveOccurred()) + }) + + // Create the cluster + AssertCreateCluster(namespace, clusterName, clusterSourceFileMinio, env) + + By("verify test connectivity to minio using barman-cloud-wal-archive script", func() { + primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + Eventually(func() (bool, error) { + connectionStatus, err := minio.TestConnectivityUsingBarmanCloudWalArchive( + namespace, clusterName, primaryPod.GetName(), "minio", "minio123", minioEnv.ServiceName) + if err != nil { + return false, err + } + return connectionStatus, nil + }, 60).Should(BeTrue()) + }) + }) + + It("restores a cluster from barman object using 'barmanObjectStore' option in 'externalClusters' section", + func() { + externalClusterName, err := env.GetResourceNameFromYAML(externalClusterFileMinio) + Expect(err).ToNot(HaveOccurred()) + + // Write a table and some data on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testUtils.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) + + AssertArchiveWalOnMinio(namespace, clusterName, clusterName) + + // There should be a backup resource and + By("backing up a cluster and verifying it exists on minio", func() { + testUtils.ExecuteBackup(namespace, sourceTakeFirstBackupFileMinio, false, + testTimeouts[testUtils.BackupIsReady], env) + testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName) + + // TODO: this is to force a CHECKPOINT when we run the backup on standby. + // This should be better handled inside ExecuteBackup + AssertArchiveWalOnMinio(namespace, clusterName, clusterName) + + latestTar := minio.GetFilePath(clusterName, "data.tar") + Eventually(func() (int, error) { + return minio.CountFiles(minioEnv, latestTar) + }, 60).Should(BeEquivalentTo(1), + fmt.Sprintf("verify the number of backup %v is equals to 1", latestTar)) + Eventually(func() (string, error) { + cluster, err := env.GetCluster(namespace, clusterName) + if err != nil { + return "", err + } + return cluster.Status.FirstRecoverabilityPoint, err + }, 30).ShouldNot(BeEmpty()) + }) + + // Restoring cluster using a recovery barman object store, which is defined + // in the externalClusters section + AssertClusterRestore(namespace, externalClusterFileMinio, tableName) + + // verify test data on restored external cluster + tableLocator = TableLocator{ + Namespace: namespace, + ClusterName: externalClusterName, + DatabaseName: testUtils.AppDBName, + TableName: tableName, + } + AssertDataExpectedCount(env, tableLocator, 2) + + By("deleting the restored cluster", func() { + err = DeleteResourcesFromFile(namespace, externalClusterFileMinio) + Expect(err).ToNot(HaveOccurred()) + }) + }) + + It("restores a cluster with 'PITR' from barman object using 'barmanObjectStore' "+ + " option in 'externalClusters' section", func() { + externalClusterRestoreName := "restore-external-cluster-pitr" + + currentTimestamp := new(string) + // We have already written 2 rows in test table 'to_restore' in above test now we will take current + // timestamp. It will use to restore cluster from source using PITR + By("getting currentTimestamp", func() { + ts, err := testUtils.GetCurrentTimestamp(namespace, clusterName, env) + *currentTimestamp = ts + Expect(err).ToNot(HaveOccurred()) + }) + By(fmt.Sprintf("writing 2 more entries in table '%v'", tableName), func() { + forward, conn, err := testUtils.ForwardPSQLConnection( + env, + namespace, + clusterName, + testUtils.AppDBName, + apiv1.ApplicationUserSecretSuffix, + ) + defer func() { + _ = conn.Close() + forward.Close() + }() + Expect(err).ToNot(HaveOccurred()) + // insert 2 more rows entries 3,4 on the "app" database + insertRecordIntoTable(tableName, 3, conn) + insertRecordIntoTable(tableName, 4, conn) + }) + By("creating second backup and verifying it exists on minio", func() { + testUtils.ExecuteBackup(namespace, sourceTakeSecondBackupFileMinio, false, + testTimeouts[testUtils.BackupIsReady], env) + testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName) + latestTar := minio.GetFilePath(clusterName, "data.tar") + Eventually(func() (int, error) { + return minio.CountFiles(minioEnv, latestTar) + }, 60).Should(BeEquivalentTo(2), + fmt.Sprintf("verify the number of backup %v is equals to 2", latestTar)) + }) + var restoredCluster *apiv1.Cluster + By("create a cluster from backup with PITR", func() { + var err error + restoredCluster, err = testUtils.CreateClusterFromExternalClusterBackupWithPITROnMinio( + namespace, externalClusterRestoreName, clusterName, *currentTimestamp, env) + Expect(err).NotTo(HaveOccurred()) + }) + AssertClusterWasRestoredWithPITRAndApplicationDB( + namespace, + externalClusterRestoreName, + tableName, + "00000002", + ) + By("delete restored cluster", func() { + Expect(testUtils.DeleteObject(env, restoredCluster)).To(Succeed()) + }) + }) + + It("restore cluster from barman object using replica option in spec", func() { + // Write a table and some data on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testUtils.AppDBName, + TableName: "for_restore_repl", + } + AssertCreateTestData(env, tableLocator) + + AssertArchiveWalOnMinio(namespace, clusterName, clusterName) + + By("backing up a cluster and verifying it exists on minio", func() { + testUtils.ExecuteBackup(namespace, sourceTakeThirdBackupFileMinio, false, + testTimeouts[testUtils.BackupIsReady], env) + testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName) + latestTar := minio.GetFilePath(clusterName, "data.tar") + Eventually(func() (int, error) { + return minio.CountFiles(minioEnv, latestTar) + }, 60).Should(BeEquivalentTo(3), + fmt.Sprintf("verify the number of backup %v is great than 3", latestTar)) + }) + + // Replicating a cluster with asynchronous replication + AssertClusterAsyncReplica( + namespace, + clusterSourceFileMinio, + externalClusterFileMinioReplica, + "for_restore_repl", + ) + }) + }) +}) + +func prepareClusterForPITROnMinio( + namespace, + clusterName, + backupSampleFile string, + expectedVal int, + currentTimestamp *string, +) { + const tableNamePitr = "for_restore" + + By("backing up a cluster and verifying it exists on minio", func() { + testUtils.ExecuteBackup(namespace, backupSampleFile, false, testTimeouts[testUtils.BackupIsReady], env) + latestTar := minio.GetFilePath(clusterName, "data.tar") + Eventually(func() (int, error) { + return minio.CountFiles(minioEnv, latestTar) + }, 60).Should(BeNumerically(">=", expectedVal), + fmt.Sprintf("verify the number of backups %v is greater than or equal to %v", latestTar, + expectedVal)) + Eventually(func() (string, error) { + cluster, err := env.GetCluster(namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + return cluster.Status.FirstRecoverabilityPoint, err + }, 30).ShouldNot(BeEmpty()) + }) + + // Write a table and insert 2 entries on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: testUtils.AppDBName, + TableName: tableNamePitr, + } + AssertCreateTestData(env, tableLocator) + + By("getting currentTimestamp", func() { + ts, err := testUtils.GetCurrentTimestamp(namespace, clusterName, env) + *currentTimestamp = ts + Expect(err).ToNot(HaveOccurred()) + }) + + By(fmt.Sprintf("writing 3rd entry into test table '%v'", tableNamePitr), func() { + forward, conn, err := testUtils.ForwardPSQLConnection( + env, + namespace, + clusterName, + testUtils.AppDBName, + apiv1.ApplicationUserSecretSuffix, + ) + defer func() { + forward.Close() + }() + Expect(err).ToNot(HaveOccurred()) + + insertRecordIntoTable(tableNamePitr, 3, conn) + }) + AssertArchiveWalOnMinio(namespace, clusterName, clusterName) + AssertArchiveConditionMet(namespace, clusterName, "5m") + testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName) +} diff --git a/tests/e2e/backup_restore_test.go b/tests/e2e/backup_restore_test.go deleted file mode 100644 index a452eaee94..0000000000 --- a/tests/e2e/backup_restore_test.go +++ /dev/null @@ -1,1250 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package e2e - -import ( - "fmt" - "path/filepath" - - ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" - - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/tests" - testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -var _ = Describe("Backup and restore", Label(tests.LabelBackupRestore), func() { - const ( - level = tests.High - - azuriteBlobSampleFile = fixturesDir + "/backup/azurite/cluster-backup.yaml.template" - - tableName = "to_restore" - - barmanCloudBackupLogEntry = "Starting barman-cloud-backup" - ) - - currentTimestamp := new(string) - - BeforeEach(func() { - if testLevelEnv.Depth < int(level) { - Skip("Test depth is lower than the amount requested for this test") - } - }) - - Context("using minio as object storage for backup", Ordered, func() { - // This is a set of tests using a minio server deployed in the same - // namespace as the cluster. Since each cluster is installed in its - // own namespace, they can share the configuration file - var namespace, clusterName string - const ( - backupFile = fixturesDir + "/backup/minio/backup-minio.yaml" - customQueriesSampleFile = fixturesDir + "/metrics/custom-queries-with-target-databases.yaml" - ) - - clusterWithMinioSampleFile := fixturesDir + "/backup/minio/cluster-with-backup-minio.yaml.template" - - BeforeAll(func() { - if !IsLocal() { - Skip("This test is only run on local clusters") - } - const namespacePrefix = "cluster-backup-minio" - var err error - clusterName, err = env.GetResourceNameFromYAML(clusterWithMinioSampleFile) - Expect(err).ToNot(HaveOccurred()) - - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) - Expect(err).ToNot(HaveOccurred()) - - By("create the certificates for MinIO", func() { - err := minioEnv.CreateCaSecret(env, namespace) - Expect(err).ToNot(HaveOccurred()) - }) - - By("creating the credentials for minio", func() { - AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123") - }) - - // Create ConfigMap and secrets to verify metrics for target database after backup restore - AssertCustomMetricsResourcesExist(namespace, customQueriesSampleFile, 1, 1) - - // Create the cluster - AssertCreateCluster(namespace, clusterName, clusterWithMinioSampleFile, env) - - By("verify test connectivity to minio using barman-cloud-wal-archive script", func() { - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - Eventually(func() (bool, error) { - connectionStatus, err := testUtils.MinioTestConnectivityUsingBarmanCloudWalArchive( - namespace, clusterName, primaryPod.GetName(), "minio", "minio123", minioEnv.ServiceName) - if err != nil { - return false, err - } - return connectionStatus, nil - }, 60).Should(BeTrue()) - }) - }) - - // We backup and restore a cluster, and verify some expected data to - // be there - It("backs up and restores a cluster using minio", func() { - const ( - targetDBOne = "test" - targetDBTwo = "test1" - targetDBSecret = "secret_test" - testTableName = "test_table" - clusterRestoreSampleFile = fixturesDir + "/backup/cluster-from-restore.yaml.template" - ) - var backup *apiv1.Backup - restoredClusterName, err := env.GetResourceNameFromYAML(clusterWithMinioSampleFile) - Expect(err).ToNot(HaveOccurred()) - backupName, err := env.GetResourceNameFromYAML(backupFile) - Expect(err).ToNot(HaveOccurred()) - // Create required test data - AssertCreationOfTestDataForTargetDB(env, namespace, clusterName, targetDBOne, testTableName) - AssertCreationOfTestDataForTargetDB(env, namespace, clusterName, targetDBTwo, testTableName) - AssertCreationOfTestDataForTargetDB(env, namespace, clusterName, targetDBSecret, testTableName) - - // Write a table and some data on the "app" database - tableLocator := TableLocator{ - Namespace: namespace, - ClusterName: clusterName, - DatabaseName: testUtils.AppDBName, - TableName: tableName, - } - AssertCreateTestData(env, tableLocator) - - AssertArchiveWalOnMinio(namespace, clusterName, clusterName) - latestTar := minioPath(clusterName, "data.tar") - - // There should be a backup resource and - By(fmt.Sprintf("backing up a cluster and verifying it exists on minio, backup path is %v", latestTar), func() { - backup = testUtils.ExecuteBackup(namespace, backupFile, false, testTimeouts[testUtils.BackupIsReady], env) - AssertBackupConditionInClusterStatus(namespace, clusterName) - Eventually(func() (int, error) { - return testUtils.CountFilesOnMinio(minioEnv, latestTar) - }, 60).Should(BeEquivalentTo(1)) - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) - if err != nil { - return "", err - } - return cluster.Status.FirstRecoverabilityPoint, err - }, 30).ShouldNot(BeEmpty()) - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) - if err != nil { - return "", err - } - return cluster.Status.LastSuccessfulBackup, err - }, 30).ShouldNot(BeEmpty()) - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) - if err != nil { - return "", err - } - return cluster.Status.LastFailedBackup, err - }, 30).Should(BeEmpty()) - }) - - By("verifying the backup is using the expected barman-cloud-backup options", func() { - Expect(backup).ToNot(BeNil()) - Expect(backup.Status.InstanceID).ToNot(BeNil()) - logEntries, err := testUtils.ParseJSONLogs(namespace, backup.Status.InstanceID.PodName, env) - Expect(err).ToNot(HaveOccurred()) - expectedBaseBackupOptions := []string{ - "--immediate-checkpoint", - "--min-chunk-size=5MB", - "--read-timeout=59", - } - result, err := testUtils.CheckOptionsForBarmanCommand( - logEntries, - barmanCloudBackupLogEntry, - backup.Name, - backup.Status.InstanceID.PodName, - expectedBaseBackupOptions, - ) - Expect(err).ToNot(HaveOccurred()) - Expect(result).To(BeTrue()) - }) - - By("executing a second backup and verifying the number of backups on minio", func() { - Eventually(func() (int, error) { - return testUtils.CountFilesOnMinio(minioEnv, latestTar) - }, 60).Should(BeEquivalentTo(1)) - - // delete the first backup and create a second backup - backup := &apiv1.Backup{} - err := env.Client.Get(env.Ctx, - ctrlclient.ObjectKey{Namespace: namespace, Name: backupName}, - backup) - Expect(err).ToNot(HaveOccurred()) - err = env.Client.Delete(env.Ctx, backup) - Expect(err).ToNot(HaveOccurred()) - // create a second backup - testUtils.ExecuteBackup(namespace, backupFile, false, testTimeouts[testUtils.BackupIsReady], env) - latestTar = minioPath(clusterName, "data.tar") - Eventually(func() (int, error) { - return testUtils.CountFilesOnMinio(minioEnv, latestTar) - }, 60).Should(BeEquivalentTo(2)) - }) - - By("verifying the backupName is properly set in the status of the backup", func() { - backup := &apiv1.Backup{} - err := env.Client.Get(env.Ctx, - ctrlclient.ObjectKey{Namespace: namespace, Name: backupName}, - backup) - Expect(err).ToNot(HaveOccurred()) - cluster, err := env.GetCluster(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - // We know that our current images always contain the latest barman version - if cluster.ShouldForceLegacyBackup() { - Expect(backup.Status.BackupName).To(BeEmpty()) - } else { - Expect(backup.Status.BackupName).To(HavePrefix("backup-")) - } - }) - - // Restore backup in a new cluster, also cover if no application database is configured - AssertClusterRestore(namespace, clusterRestoreSampleFile, tableName) - - cluster, err := env.GetCluster(namespace, restoredClusterName) - Expect(err).ToNot(HaveOccurred()) - AssertMetricsData(namespace, targetDBOne, targetDBTwo, targetDBSecret, cluster) - - previous := 0 - latestGZ := filepath.Join("*", clusterName, "*", "*.history.gz") - By(fmt.Sprintf("checking the previous number of .history files in minio, history file name is %v", - latestGZ), func() { - previous, err = testUtils.CountFilesOnMinio(minioEnv, latestGZ) - Expect(err).ToNot(HaveOccurred()) - }) - - AssertSwitchover(namespace, clusterName, env) - - By("checking the number of .history after switchover", func() { - Eventually(func() (int, error) { - return testUtils.CountFilesOnMinio(minioEnv, latestGZ) - }, 60).Should(BeNumerically(">", previous)) - }) - - By("deleting the restored cluster", func() { - err = DeleteResourcesFromFile(namespace, clusterRestoreSampleFile) - Expect(err).ToNot(HaveOccurred()) - }) - }) - - // We backup and restore a cluster from a standby, and verify some expected data to - // be there - It("backs up and restore a cluster from standby", func() { - const ( - targetDBOne = "test" - targetDBTwo = "test1" - targetDBSecret = "secret_test" - testTableName = "test_table" - clusterWithMinioStandbySampleFile = fixturesDir + "/backup/minio/cluster-with-backup-minio-standby.yaml.template" - backupStandbyFile = fixturesDir + "/backup/minio/backup-minio-standby.yaml" - ) - - targetClusterName, err := env.GetResourceNameFromYAML(clusterWithMinioStandbySampleFile) - Expect(err).ToNot(HaveOccurred()) - - // Create the cluster with custom serverName in the backup spec - AssertCreateCluster(namespace, targetClusterName, clusterWithMinioStandbySampleFile, env) - - // Create required test data - AssertCreationOfTestDataForTargetDB(env, namespace, targetClusterName, targetDBOne, testTableName) - AssertCreationOfTestDataForTargetDB(env, namespace, targetClusterName, targetDBTwo, testTableName) - AssertCreationOfTestDataForTargetDB(env, namespace, targetClusterName, targetDBSecret, testTableName) - - // Write a table and some data on the "app" database - tableLocator := TableLocator{ - Namespace: namespace, - ClusterName: targetClusterName, - DatabaseName: testUtils.AppDBName, - TableName: tableName, - } - AssertCreateTestData(env, tableLocator) - - AssertArchiveWalOnMinio(namespace, targetClusterName, targetClusterName) - latestTar := minioPath(targetClusterName, "data.tar") - - // There should be a backup resource and - By(fmt.Sprintf("backing up a cluster from standby and verifying it exists on minio, backup path is %v", - latestTar), func() { - testUtils.ExecuteBackup(namespace, backupStandbyFile, true, testTimeouts[testUtils.BackupIsReady], env) - AssertBackupConditionInClusterStatus(namespace, targetClusterName) - Eventually(func() (int, error) { - return testUtils.CountFilesOnMinio(minioEnv, latestTar) - }, 60).Should(BeEquivalentTo(1)) - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, targetClusterName) - return cluster.Status.FirstRecoverabilityPoint, err - }, 30).ShouldNot(BeEmpty()) - }) - }) - - // We backup and restore a cluster from a standby, and verify some expected data to - // be there - It("backs up a cluster from standby with backup target defined in backup", func() { - const ( - targetDBOne = "test" - targetDBTwo = "test1" - targetDBSecret = "secret_test" - testTableName = "test_table" - clusterWithMinioSampleFile = fixturesDir + "/backup/minio/cluster-with-backup-minio-primary.yaml.template" - backupWithTargetFile = fixturesDir + "/backup/minio/backup-minio-override-target.yaml" - ) - - targetClusterName, err := env.GetResourceNameFromYAML(clusterWithMinioSampleFile) - Expect(err).ToNot(HaveOccurred()) - - // Create the cluster with custom serverName in the backup spec - AssertCreateCluster(namespace, targetClusterName, clusterWithMinioSampleFile, env) - - // Create required test data - AssertCreationOfTestDataForTargetDB(env, namespace, targetClusterName, targetDBOne, testTableName) - AssertCreationOfTestDataForTargetDB(env, namespace, targetClusterName, targetDBTwo, testTableName) - AssertCreationOfTestDataForTargetDB(env, namespace, targetClusterName, targetDBSecret, testTableName) - - // Write a table and some data on the "app" database - tableLocator := TableLocator{ - Namespace: namespace, - ClusterName: targetClusterName, - DatabaseName: testUtils.AppDBName, - TableName: tableName, - } - AssertCreateTestData(env, tableLocator) - - AssertArchiveWalOnMinio(namespace, targetClusterName, targetClusterName) - latestTar := minioPath(targetClusterName, "data.tar") - - // There should be a backup resource and - By(fmt.Sprintf("backing up a cluster from standby (defined in backup file) and verifying it exists on minio,"+ - " backup path is %v", latestTar), func() { - testUtils.ExecuteBackup(namespace, backupWithTargetFile, true, testTimeouts[testUtils.BackupIsReady], env) - AssertBackupConditionInClusterStatus(namespace, targetClusterName) - Eventually(func() (int, error) { - return testUtils.CountFilesOnMinio(minioEnv, latestTar) - }, 60).Should(BeEquivalentTo(1)) - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, targetClusterName) - return cluster.Status.FirstRecoverabilityPoint, err - }, 30).ShouldNot(BeEmpty()) - }) - - By("deleting the cluster", func() { - err = DeleteResourcesFromFile(namespace, clusterWithMinioSampleFile) - Expect(err).ToNot(HaveOccurred()) - }) - }) - - // Test that the restore works if the source cluster has a custom - // backup.barmanObjectStore.serverName that is different than the cluster name - It("backs up and restores a cluster with custom backup serverName", func() { - const ( - targetDBOne = "test" - targetDBTwo = "test1" - targetDBSecret = "secret_test" - testTableName = "test_table" - clusterRestoreSampleFile = fixturesDir + "/backup/cluster-from-restore-custom.yaml.template" - // clusterWithMinioCustomSampleFile has metadata.name != backup.barmanObjectStore.serverName - clusterWithMinioCustomSampleFile = fixturesDir + - "/backup/minio/cluster-with-backup-minio-custom-servername.yaml.template" - backupFileCustom = fixturesDir + "/backup/minio/backup-minio-custom-servername.yaml" - clusterServerName = "pg-backup-minio-Custom-Name" - ) - - customClusterName, err := env.GetResourceNameFromYAML(clusterWithMinioCustomSampleFile) - Expect(err).ToNot(HaveOccurred()) - - // Create the cluster with custom serverName in the backup spec - AssertCreateCluster(namespace, customClusterName, clusterWithMinioCustomSampleFile, env) - - // Create required test data - AssertCreationOfTestDataForTargetDB(env, namespace, customClusterName, targetDBOne, testTableName) - AssertCreationOfTestDataForTargetDB(env, namespace, customClusterName, targetDBTwo, testTableName) - AssertCreationOfTestDataForTargetDB(env, namespace, customClusterName, targetDBSecret, testTableName) - - // Write a table and some data on the "app" database - tableLocator := TableLocator{ - Namespace: namespace, - ClusterName: customClusterName, - DatabaseName: testUtils.AppDBName, - TableName: tableName, - } - AssertCreateTestData(env, tableLocator) - - AssertArchiveWalOnMinio(namespace, customClusterName, clusterServerName) - - // There should be a backup resource and - By("backing up a cluster and verifying it exists on minio", func() { - testUtils.ExecuteBackup(namespace, backupFileCustom, false, testTimeouts[testUtils.BackupIsReady], env) - AssertBackupConditionInClusterStatus(namespace, customClusterName) - latestBaseTar := minioPath(clusterServerName, "data.tar") - Eventually(func() (int, error) { - return testUtils.CountFilesOnMinio(minioEnv, latestBaseTar) - }, 60).Should(BeEquivalentTo(1), - fmt.Sprintf("verify the number of backup %v is equals to 1", latestBaseTar)) - // this is the second backup we take on the bucket - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, customClusterName) - return cluster.Status.FirstRecoverabilityPoint, err - }, 30).ShouldNot(BeEmpty()) - }) - - // Restore backup in a new cluster - AssertClusterRestore(namespace, clusterRestoreSampleFile, tableName) - - By("deleting the primary cluster", func() { - err = DeleteResourcesFromFile(namespace, clusterWithMinioCustomSampleFile) - Expect(err).ToNot(HaveOccurred()) - }) - - By("deleting the restored cluster", func() { - err = DeleteResourcesFromFile(namespace, clusterRestoreSampleFile) - Expect(err).ToNot(HaveOccurred()) - }) - }) - - // Create a scheduled backup with the 'immediate' option enabled. We expect the backup to be available - It("immediately starts a backup using ScheduledBackups 'immediate' option", func() { - const scheduledBackupSampleFile = fixturesDir + - "/backup/scheduled_backup_immediate/scheduled-backup-immediate-minio.yaml" - scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupSampleFile) - Expect(err).ToNot(HaveOccurred()) - - AssertScheduledBackupsImmediate(namespace, scheduledBackupSampleFile, scheduledBackupName) - latestBaseTar := minioPath(clusterName, "data.tar") - // AssertScheduledBackupsImmediate creates at least two backups, we should find - // their base backups - Eventually(func() (int, error) { - return testUtils.CountFilesOnMinio(minioEnv, latestBaseTar) - }, 60).Should(BeNumerically(">=", 2), - fmt.Sprintf("verify the number of backup %v is >= 2", latestBaseTar)) - }) - - It("backs up and restore a cluster with PITR MinIO", func() { - const ( - restoredClusterName = "restore-cluster-pitr-minio" - backupFilePITR = fixturesDir + "/backup/minio/backup-minio-pitr.yaml" - ) - - prepareClusterForPITROnMinio( - namespace, - clusterName, - backupFilePITR, - 3, - currentTimestamp, - ) - - cluster, err := testUtils.CreateClusterFromBackupUsingPITR( - namespace, - restoredClusterName, - backupFilePITR, - *currentTimestamp, - env, - ) - Expect(err).NotTo(HaveOccurred()) - AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[testUtils.ClusterIsReady], env) - - // Restore backup in a new cluster, also cover if no application database is configured - AssertClusterWasRestoredWithPITR(namespace, restoredClusterName, tableName, "00000003") - - By("deleting the restored cluster", func() { - Expect(testUtils.DeleteObject(env, cluster)).To(Succeed()) - }) - }) - - // We create a cluster and a scheduled backup, then it is patched to suspend its - // execution. We verify that the number of backups does not increase. - // We then patch it again back to its initial state and verify that - // the amount of backups keeps increasing again - It("verifies that scheduled backups can be suspended", func() { - const scheduledBackupSampleFile = fixturesDir + - "/backup/scheduled_backup_suspend/scheduled-backup-suspend-minio.yaml" - scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupSampleFile) - Expect(err).ToNot(HaveOccurred()) - - By("scheduling backups", func() { - AssertScheduledBackupsAreScheduled(namespace, scheduledBackupSampleFile, 300) - latestTar := minioPath(clusterName, "data.tar") - Eventually(func() (int, error) { - return testUtils.CountFilesOnMinio(minioEnv, latestTar) - }, 60).Should(BeNumerically(">=", 2), - fmt.Sprintf("verify the number of backup %v is great than 2", latestTar)) - }) - - AssertSuspendScheduleBackups(namespace, scheduledBackupName) - }) - - It("verify tags in backed files", func() { - AssertArchiveWalOnMinio(namespace, clusterName, clusterName) - tags, err := testUtils.GetFileTagsOnMinio(minioEnv, minioPath(clusterName, "*1.gz")) - Expect(err).ToNot(HaveOccurred()) - Expect(tags.Tags).ToNot(BeEmpty()) - - currentPrimary, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - oldPrimary := currentPrimary.GetName() - // Force-delete the primary - quickDelete := &ctrlclient.DeleteOptions{ - GracePeriodSeconds: &quickDeletionPeriod, - } - err = env.DeletePod(namespace, currentPrimary.GetName(), quickDelete) - Expect(err).ToNot(HaveOccurred()) - - AssertNewPrimary(namespace, clusterName, oldPrimary) - - tags, err = testUtils.GetFileTagsOnMinio(minioEnv, minioPath(clusterName, "*.history.gz")) - Expect(err).ToNot(HaveOccurred()) - Expect(tags.Tags).ToNot(BeEmpty()) - }) - }) - - Context("using azure blobs as object storage with storage account access authentication", Ordered, func() { - // We must be careful here. All the clusters use the same remote storage - // and that means that we must use different cluster names otherwise - // we risk mixing WALs and backups - const azureBlobSampleFile = fixturesDir + "/backup/azure_blob/cluster-with-backup-azure-blob.yaml.template" - const clusterRestoreSampleFile = fixturesDir + "/backup/azure_blob/cluster-from-restore.yaml.template" - const scheduledBackupSampleFile = fixturesDir + - "/backup/scheduled_backup_immediate/scheduled-backup-immediate-azure-blob.yaml" - backupFile := fixturesDir + "/backup/azure_blob/backup-azure-blob.yaml" - var namespace, clusterName string - - BeforeAll(func() { - if !IsAKS() { - Skip("This test is only run on AKS clusters") - } - const namespacePrefix = "cluster-backup-azure-blob" - var err error - clusterName, err = env.GetResourceNameFromYAML(azureBlobSampleFile) - Expect(err).ToNot(HaveOccurred()) - - // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) - Expect(err).ToNot(HaveOccurred()) - - // The Azure Blob Storage should have been created ad-hoc for the test. - // The credentials are retrieved from the environment variables, as we can't create - // a fixture for them - By("creating the Azure Blob Storage credentials", func() { - AssertStorageCredentialsAreCreated( - namespace, - "backup-storage-creds", - env.AzureConfiguration.StorageAccount, - env.AzureConfiguration.StorageKey, - ) - }) - - // Create the cluster - AssertCreateCluster(namespace, clusterName, azureBlobSampleFile, env) - }) - - // We backup and restore a cluster, and verify some expected data to - // be there - It("backs up and restore a cluster", func() { - // Write a table and some data on the "app" database - tableLocator := TableLocator{ - Namespace: namespace, - ClusterName: clusterName, - DatabaseName: testUtils.AppDBName, - TableName: tableName, - } - AssertCreateTestData(env, tableLocator) - AssertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration) - By("uploading a backup", func() { - // We create a backup - testUtils.ExecuteBackup(namespace, backupFile, false, testTimeouts[testUtils.BackupIsReady], env) - AssertBackupConditionInClusterStatus(namespace, clusterName) - - // Verifying file called data.tar should be available on Azure blob storage - Eventually(func() (int, error) { - return testUtils.CountFilesOnAzureBlobStorage(env.AzureConfiguration, clusterName, "data.tar") - }, 30).Should(BeNumerically(">=", 1)) - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) - return cluster.Status.FirstRecoverabilityPoint, err - }, 30).ShouldNot(BeEmpty()) - }) - - // Restore backup in a new cluster - AssertClusterRestore(namespace, clusterRestoreSampleFile, tableName) - - By("deleting the restored cluster", func() { - err := DeleteResourcesFromFile(namespace, clusterRestoreSampleFile) - Expect(err).ToNot(HaveOccurred()) - }) - }) - - // Create a scheduled backup with the 'immediate' option enabled. We expect the backup to be available - It("immediately starts a backup using ScheduledBackups 'immediate' option", func() { - scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupSampleFile) - Expect(err).ToNot(HaveOccurred()) - - AssertScheduledBackupsImmediate(namespace, scheduledBackupSampleFile, scheduledBackupName) - - // Only one data.tar files should be present - Eventually(func() (int, error) { - return testUtils.CountFilesOnAzureBlobStorage(env.AzureConfiguration, - clusterName, "data.tar") - }, 30).Should(BeNumerically("==", 2)) - }) - - It("backs up and restore a cluster with PITR", func() { - restoredClusterName := "restore-cluster-azure-pitr" - - prepareClusterForPITROnAzureBlob( - namespace, - clusterName, - backupFile, - env.AzureConfiguration, - 2, - currentTimestamp, - ) - - AssertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration) - - cluster, err := testUtils.CreateClusterFromBackupUsingPITR( - namespace, - restoredClusterName, - backupFile, - *currentTimestamp, - env, - ) - Expect(err).ToNot(HaveOccurred()) - AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[testUtils.ClusterIsReady], env) - - // Restore backup in a new cluster, also cover if no application database is configured - AssertClusterWasRestoredWithPITR(namespace, restoredClusterName, tableName, "00000002") - By("deleting the restored cluster", func() { - Expect(testUtils.DeleteObject(env, cluster)).To(Succeed()) - }) - }) - - // We create a cluster, create a scheduled backup, patch it to suspend its - // execution. We verify that the number of backups does not increase. - // We then patch it again back to its initial state and verify that - // the amount of backups keeps increasing again - It("verifies that scheduled backups can be suspended", func() { - const scheduledBackupSampleFile = fixturesDir + - "/backup/scheduled_backup_suspend/scheduled-backup-suspend-azure-blob.yaml" - scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupSampleFile) - Expect(err).ToNot(HaveOccurred()) - - By("scheduling backups", func() { - AssertScheduledBackupsAreScheduled(namespace, scheduledBackupSampleFile, 480) - - // AssertScheduledBackupsImmediate creates at least two backups, we should find - // their base backups - Eventually(func() (int, error) { - return testUtils.CountFilesOnAzureBlobStorage(env.AzureConfiguration, - clusterName, "data.tar") - }, 60).Should(BeNumerically(">=", 2)) - }) - AssertSuspendScheduleBackups(namespace, scheduledBackupName) - }) - }) - - Context("using Azurite blobs as object storage", Ordered, func() { - // This is a set of tests using an Azurite server deployed in the same - // namespace as the cluster. Since each cluster is installed in its - // own namespace, they can share the configuration file - const ( - clusterRestoreSampleFile = fixturesDir + "/backup/azurite/cluster-from-restore.yaml.template" - scheduledBackupSampleFile = fixturesDir + - "/backup/scheduled_backup_suspend/scheduled-backup-suspend-azurite.yaml" - scheduledBackupImmediateSampleFile = fixturesDir + - "/backup/scheduled_backup_immediate/scheduled-backup-immediate-azurite.yaml" - backupFile = fixturesDir + "/backup/azurite/backup.yaml" - azuriteCaSecName = "azurite-ca-secret" - azuriteTLSSecName = "azurite-tls-secret" - ) - var namespace, clusterName string - - BeforeAll(func() { - if !(IsLocal() || IsGKE() || IsOpenshift()) { - Skip("This test is only executed on gke, openshift and local") - } - const namespacePrefix = "cluster-backup-azurite" - var err error - clusterName, err = env.GetResourceNameFromYAML(azuriteBlobSampleFile) - Expect(err).ToNot(HaveOccurred()) - - // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) - Expect(err).ToNot(HaveOccurred()) - - // Create and assert ca and tls certificate secrets on Azurite - By("creating ca and tls certificate secrets", func() { - err := testUtils.CreateCertificateSecretsOnAzurite(namespace, clusterName, - azuriteCaSecName, azuriteTLSSecName, env) - Expect(err).ToNot(HaveOccurred()) - }) - // Setup Azurite and az cli along with Postgresql cluster - prepareClusterBackupOnAzurite(namespace, clusterName, azuriteBlobSampleFile, backupFile, tableName) - }) - - It("restores a backed up cluster", func() { - // Restore backup in a new cluster - AssertClusterRestoreWithApplicationDB(namespace, clusterRestoreSampleFile, tableName) - }) - - // Create a scheduled backup with the 'immediate' option enabled. - // We expect the backup to be available - It("immediately starts a backup using ScheduledBackups immediate option", func() { - scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupImmediateSampleFile) - Expect(err).ToNot(HaveOccurred()) - - AssertScheduledBackupsImmediate(namespace, scheduledBackupImmediateSampleFile, scheduledBackupName) - - // AssertScheduledBackupsImmediate creates at least two backups, we should find - // their base backups - Eventually(func() (int, error) { - return testUtils.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar") - }, 30).Should(BeNumerically("==", 2)) - }) - - It("backs up and restore a cluster with PITR Azurite", func() { - const ( - restoredClusterName = "restore-cluster-pitr-azurite" - backupFilePITR = fixturesDir + "/backup/azurite/backup-pitr.yaml" - ) - - prepareClusterForPITROnAzurite(namespace, clusterName, backupFilePITR, currentTimestamp) - - cluster, err := testUtils.CreateClusterFromBackupUsingPITR( - namespace, - restoredClusterName, - backupFilePITR, - *currentTimestamp, - env, - ) - Expect(err).NotTo(HaveOccurred()) - AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[testUtils.ClusterIsReady], env) - - // Restore backup in a new cluster, also cover if no application database is configured - AssertClusterWasRestoredWithPITR(namespace, restoredClusterName, tableName, "00000002") - - By("deleting the restored cluster", func() { - Expect(testUtils.DeleteObject(env, cluster)).To(Succeed()) - }) - }) - - // We create a cluster, create a scheduled backup, patch it to suspend its - // execution. We verify that the number of backups does not increase. - // We then patch it again back to its initial state and verify that - // the amount of backups keeps increasing again - It("verifies that scheduled backups can be suspended", func() { - scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupSampleFile) - Expect(err).ToNot(HaveOccurred()) - - By("scheduling backups", func() { - AssertScheduledBackupsAreScheduled(namespace, scheduledBackupSampleFile, 300) - Eventually(func() (int, error) { - return testUtils.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar") - }, 60).Should(BeNumerically(">=", 3)) - }) - - AssertSuspendScheduleBackups(namespace, scheduledBackupName) - }) - }) -}) - -var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.LabelBackupRestore), func() { - const ( - fixturesBackupDir = fixturesDir + "/backup/recovery_external_clusters/" - azuriteBlobSampleFile = fixturesDir + "/backup/azurite/cluster-backup.yaml.template" - externalClusterFileMinio = fixturesBackupDir + "external-clusters-minio-03.yaml.template" - externalClusterFileMinioReplica = fixturesBackupDir + "external-clusters-minio-replica-04.yaml.template" - sourceTakeFirstBackupFileMinio = fixturesBackupDir + "backup-minio-02.yaml" - sourceTakeSecondBackupFileMinio = fixturesBackupDir + "backup-minio-03.yaml" - sourceTakeThirdBackupFileMinio = fixturesBackupDir + "backup-minio-04.yaml" - clusterSourceFileMinio = fixturesBackupDir + "source-cluster-minio-01.yaml.template" - sourceBackupFileAzure = fixturesBackupDir + "backup-azure-blob-02.yaml" - clusterSourceFileAzure = fixturesBackupDir + "source-cluster-azure-blob-01.yaml.template" - externalClusterFileAzure = fixturesBackupDir + "external-clusters-azure-blob-03.yaml.template" - sourceBackupFileAzurePITR = fixturesBackupDir + "backup-azure-blob-pitr.yaml" - externalClusterFileAzurite = fixturesBackupDir + "external-clusters-azurite-03.yaml.template" - backupFileAzurite = fixturesBackupDir + "backup-azurite-02.yaml" - tableName = "to_restore" - clusterSourceFileAzureSAS = fixturesBackupDir + "cluster-with-backup-azure-blob-sas.yaml.template" - clusterRestoreFileAzureSAS = fixturesBackupDir + "cluster-from-restore-sas.yaml.template" - sourceBackupFileAzureSAS = fixturesBackupDir + "backup-azure-blob-sas.yaml" - sourceBackupFileAzurePITRSAS = fixturesBackupDir + "backup-azure-blob-pitr-sas.yaml" - level = tests.High - minioCaSecName = "minio-server-ca-secret" - minioTLSSecName = "minio-server-tls-secret" - azuriteCaSecName = "azurite-ca-secret" - azuriteTLSSecName = "azurite-tls-secret" - ) - - currentTimestamp := new(string) - - BeforeEach(func() { - if testLevelEnv.Depth < int(level) { - Skip("Test depth is lower than the amount requested for this test") - } - }) - - // Restore cluster using a recovery object store, that is a backup of another cluster, - // created by Barman Cloud, and defined via the barmanObjectStore option in the externalClusters section - Context("using minio as object storage", Ordered, func() { - var namespace, clusterName string - - BeforeAll(func() { - if !IsLocal() { - Skip("This test is only executed on local") - } - const namespacePrefix = "recovery-barman-object-minio" - var err error - clusterName, err = env.GetResourceNameFromYAML(clusterSourceFileMinio) - Expect(err).ToNot(HaveOccurred()) - // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) - Expect(err).ToNot(HaveOccurred()) - - AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123") - - By("create the certificates for MinIO", func() { - err := minioEnv.CreateCaSecret(env, namespace) - Expect(err).ToNot(HaveOccurred()) - }) - - // Create the cluster - AssertCreateCluster(namespace, clusterName, clusterSourceFileMinio, env) - - By("verify test connectivity to minio using barman-cloud-wal-archive script", func() { - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - Eventually(func() (bool, error) { - connectionStatus, err := testUtils.MinioTestConnectivityUsingBarmanCloudWalArchive( - namespace, clusterName, primaryPod.GetName(), "minio", "minio123", minioEnv.ServiceName) - if err != nil { - return false, err - } - return connectionStatus, nil - }, 60).Should(BeTrue()) - }) - }) - - It("restores a cluster from barman object using 'barmanObjectStore' option in 'externalClusters' section", func() { - externalClusterName, err := env.GetResourceNameFromYAML(externalClusterFileMinio) - Expect(err).ToNot(HaveOccurred()) - - // Write a table and some data on the "app" database - tableLocator := TableLocator{ - Namespace: namespace, - ClusterName: clusterName, - DatabaseName: testUtils.AppDBName, - TableName: tableName, - } - AssertCreateTestData(env, tableLocator) - - AssertArchiveWalOnMinio(namespace, clusterName, clusterName) - - // There should be a backup resource and - By("backing up a cluster and verifying it exists on minio", func() { - testUtils.ExecuteBackup(namespace, sourceTakeFirstBackupFileMinio, false, - testTimeouts[testUtils.BackupIsReady], env) - AssertBackupConditionInClusterStatus(namespace, clusterName) - - // TODO: this is to force a CHECKPOINT when we run the backup on standby. - // This should be better handled inside ExecuteBackup - AssertArchiveWalOnMinio(namespace, clusterName, clusterName) - - latestTar := minioPath(clusterName, "data.tar") - Eventually(func() (int, error) { - return testUtils.CountFilesOnMinio(minioEnv, latestTar) - }, 60).Should(BeEquivalentTo(1), - fmt.Sprintf("verify the number of backup %v is equals to 1", latestTar)) - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) - if err != nil { - return "", err - } - return cluster.Status.FirstRecoverabilityPoint, err - }, 30).ShouldNot(BeEmpty()) - }) - - // Restoring cluster using a recovery barman object store, which is defined - // in the externalClusters section - AssertClusterRestore(namespace, externalClusterFileMinio, tableName) - - // verify test data on restored external cluster - tableLocator = TableLocator{ - Namespace: namespace, - ClusterName: externalClusterName, - DatabaseName: testUtils.AppDBName, - TableName: tableName, - } - AssertDataExpectedCount(env, tableLocator, 2) - - By("deleting the restored cluster", func() { - err = DeleteResourcesFromFile(namespace, externalClusterFileMinio) - Expect(err).ToNot(HaveOccurred()) - }) - }) - - It("restores a cluster with 'PITR' from barman object using 'barmanObjectStore' "+ - " option in 'externalClusters' section", func() { - externalClusterRestoreName := "restore-external-cluster-pitr" - // We have already written 2 rows in test table 'to_restore' in above test now we will take current - // timestamp. It will use to restore cluster from source using PITR - - By("getting currentTimestamp", func() { - ts, err := testUtils.GetCurrentTimestamp(namespace, clusterName, env) - *currentTimestamp = ts - Expect(err).ToNot(HaveOccurred()) - }) - By(fmt.Sprintf("writing 2 more entries in table '%v'", tableName), func() { - forward, conn, err := testUtils.ForwardPSQLConnection( - env, - namespace, - clusterName, - testUtils.AppDBName, - apiv1.ApplicationUserSecretSuffix, - ) - defer func() { - _ = conn.Close() - forward.Close() - }() - Expect(err).ToNot(HaveOccurred()) - // insert 2 more rows entries 3,4 on the "app" database - insertRecordIntoTable(tableName, 3, conn) - insertRecordIntoTable(tableName, 4, conn) - }) - By("creating second backup and verifying it exists on minio", func() { - testUtils.ExecuteBackup(namespace, sourceTakeSecondBackupFileMinio, false, - testTimeouts[testUtils.BackupIsReady], env) - AssertBackupConditionInClusterStatus(namespace, clusterName) - latestTar := minioPath(clusterName, "data.tar") - Eventually(func() (int, error) { - return testUtils.CountFilesOnMinio(minioEnv, latestTar) - }, 60).Should(BeEquivalentTo(2), - fmt.Sprintf("verify the number of backup %v is equals to 2", latestTar)) - }) - var restoredCluster *apiv1.Cluster - By("create a cluster from backup with PITR", func() { - var err error - restoredCluster, err = testUtils.CreateClusterFromExternalClusterBackupWithPITROnMinio( - namespace, externalClusterRestoreName, clusterName, *currentTimestamp, env) - Expect(err).NotTo(HaveOccurred()) - }) - AssertClusterWasRestoredWithPITRAndApplicationDB( - namespace, - externalClusterRestoreName, - tableName, - "00000002", - ) - By("delete restored cluster", func() { - Expect(testUtils.DeleteObject(env, restoredCluster)).To(Succeed()) - }) - }) - - It("restore cluster from barman object using replica option in spec", func() { - // Write a table and some data on the "app" database - tableLocator := TableLocator{ - Namespace: namespace, - ClusterName: clusterName, - DatabaseName: testUtils.AppDBName, - TableName: "for_restore_repl", - } - AssertCreateTestData(env, tableLocator) - - AssertArchiveWalOnMinio(namespace, clusterName, clusterName) - - By("backing up a cluster and verifying it exists on minio", func() { - testUtils.ExecuteBackup(namespace, sourceTakeThirdBackupFileMinio, false, - testTimeouts[testUtils.BackupIsReady], env) - AssertBackupConditionInClusterStatus(namespace, clusterName) - latestTar := minioPath(clusterName, "data.tar") - Eventually(func() (int, error) { - return testUtils.CountFilesOnMinio(minioEnv, latestTar) - }, 60).Should(BeEquivalentTo(3), - fmt.Sprintf("verify the number of backup %v is great than 3", latestTar)) - }) - - // Replicating a cluster with asynchronous replication - AssertClusterAsyncReplica( - namespace, - clusterSourceFileMinio, - externalClusterFileMinioReplica, - "for_restore_repl", - ) - }) - }) - - Context("using azure blobs as object storage", func() { - Context("storage account access authentication", Ordered, func() { - var namespace, clusterName string - BeforeAll(func() { - if !IsAKS() { - Skip("This test is only executed on AKS clusters") - } - const namespacePrefix = "recovery-barman-object-azure" - var err error - clusterName, err = env.GetResourceNameFromYAML(clusterSourceFileAzure) - Expect(err).ToNot(HaveOccurred()) - - // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) - Expect(err).ToNot(HaveOccurred()) - - // The Azure Blob Storage should have been created ad-hoc for the test. - // The credentials are retrieved from the environment variables, as we can't create - // a fixture for them - By("creating the Azure Blob Storage credentials", func() { - AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", - env.AzureConfiguration.StorageAccount, env.AzureConfiguration.StorageKey) - }) - - // Create the cluster - AssertCreateCluster(namespace, clusterName, clusterSourceFileAzure, env) - }) - - It("restores a cluster from barman object using 'barmanObjectStore' option in 'externalClusters' section", func() { - // Write a table and some data on the "app" database - tableLocator := TableLocator{ - Namespace: namespace, - ClusterName: clusterName, - DatabaseName: testUtils.AppDBName, - TableName: tableName, - } - AssertCreateTestData(env, tableLocator) - AssertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration) - - By("backing up a cluster and verifying it exists on azure blob storage", func() { - // Create the backup - testUtils.ExecuteBackup(namespace, sourceBackupFileAzure, false, testTimeouts[testUtils.BackupIsReady], env) - AssertBackupConditionInClusterStatus(namespace, clusterName) - // Verifying file called data.tar should be available on Azure blob storage - Eventually(func() (int, error) { - return testUtils.CountFilesOnAzureBlobStorage(env.AzureConfiguration, clusterName, "data.tar") - }, 30).Should(BeNumerically(">=", 1)) - }) - - // Restoring cluster using a recovery barman object store, which is defined - // in the externalClusters section - AssertClusterRestore(namespace, externalClusterFileAzure, tableName) - }) - - It("restores a cluster with 'PITR' from barman object using "+ - "'barmanObjectStore' option in 'externalClusters' section", func() { - externalClusterName := "external-cluster-azure-pitr" - - prepareClusterForPITROnAzureBlob( - namespace, - clusterName, - sourceBackupFileAzurePITR, - env.AzureConfiguration, - 1, - currentTimestamp, - ) - - restoredCluster, err := testUtils.CreateClusterFromExternalClusterBackupWithPITROnAzure( - namespace, - externalClusterName, - clusterName, - *currentTimestamp, - "backup-storage-creds", - env.AzureConfiguration.StorageAccount, - env.AzureConfiguration.BlobContainer, - env) - Expect(err).ToNot(HaveOccurred()) - - // Restoring cluster using a recovery barman object store, which is defined - // in the externalClusters section - AssertClusterWasRestoredWithPITRAndApplicationDB( - namespace, - externalClusterName, - tableName, - "00000002", - ) - - By("delete restored cluster", func() { - Expect(testUtils.DeleteObject(env, restoredCluster)).To(Succeed()) - }) - }) - }) - - Context("storage account SAS Token authentication", Ordered, func() { - var namespace, clusterName string - BeforeAll(func() { - if !IsAKS() { - Skip("This test is only executed on AKS clusters") - } - const namespacePrefix = "cluster-backup-azure-blob-sas" - var err error - clusterName, err = env.GetResourceNameFromYAML(clusterSourceFileAzureSAS) - Expect(err).ToNot(HaveOccurred()) - - // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) - Expect(err).ToNot(HaveOccurred()) - - // The Azure Blob Storage should have been created ad-hoc for the test, - // we get the credentials from the environment variables as we can't create - // a fixture for them - By("creating the Azure Blob Container SAS Token credentials", func() { - AssertCreateSASTokenCredentials(namespace, env.AzureConfiguration.StorageAccount, - env.AzureConfiguration.StorageKey) - }) - - // Create the Cluster - AssertCreateCluster(namespace, clusterName, clusterSourceFileAzureSAS, env) - }) - - It("restores cluster from barman object using 'barmanObjectStore' option in 'externalClusters' section", func() { - // Write a table and some data on the "app" database - tableLocator := TableLocator{ - Namespace: namespace, - ClusterName: clusterName, - DatabaseName: testUtils.AppDBName, - TableName: tableName, - } - AssertCreateTestData(env, tableLocator) - - // Create a WAL on the primary and check if it arrives in the - // Azure Blob Storage within a short time - AssertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration) - - By("backing up a cluster and verifying it exists on azure blob storage", func() { - // We create a Backup - testUtils.ExecuteBackup(namespace, sourceBackupFileAzureSAS, false, testTimeouts[testUtils.BackupIsReady], env) - AssertBackupConditionInClusterStatus(namespace, clusterName) - // Verifying file called data.tar should be available on Azure blob storage - Eventually(func() (int, error) { - return testUtils.CountFilesOnAzureBlobStorage(env.AzureConfiguration, clusterName, "data.tar") - }, 30).Should(BeNumerically(">=", 1)) - }) - - // Restore backup in a new cluster - AssertClusterRestoreWithApplicationDB(namespace, clusterRestoreFileAzureSAS, tableName) - }) - - It("restores a cluster with 'PITR' from barman object using "+ - "'barmanObjectStore' option in 'externalClusters' section", func() { - externalClusterName := "external-cluster-azure-pitr" - - prepareClusterForPITROnAzureBlob( - namespace, - clusterName, - sourceBackupFileAzurePITRSAS, - env.AzureConfiguration, - 1, - currentTimestamp, - ) - - restoredCluster, err := testUtils.CreateClusterFromExternalClusterBackupWithPITROnAzure( - namespace, - externalClusterName, - clusterName, - *currentTimestamp, - "backup-storage-creds-sas", - env.AzureConfiguration.StorageAccount, - env.AzureConfiguration.BlobContainer, - env) - Expect(err).ToNot(HaveOccurred()) - - // Restoring cluster using a recovery barman object store, which is defined - // in the externalClusters section - AssertClusterWasRestoredWithPITRAndApplicationDB( - namespace, - externalClusterName, - tableName, - "00000002", - ) - - By("delete restored cluster", func() { - Expect(testUtils.DeleteObject(env, restoredCluster)).To(Succeed()) - }) - }) - }) - }) - - Context("using Azurite blobs as object storage", Ordered, func() { - var namespace, clusterName string - BeforeAll(func() { - if IsAKS() { - Skip("This test is not run on AKS") - } - const namespacePrefix = "recovery-barman-object-azurite" - var err error - clusterName, err = env.GetResourceNameFromYAML(azuriteBlobSampleFile) - Expect(err).ToNot(HaveOccurred()) - - // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) - Expect(err).ToNot(HaveOccurred()) - - // Create and assert ca and tls certificate secrets on Azurite - By("creating ca and tls certificate secrets", func() { - err := testUtils.CreateCertificateSecretsOnAzurite( - namespace, - clusterName, - azuriteCaSecName, - azuriteTLSSecName, - env) - Expect(err).ToNot(HaveOccurred()) - }) - // Setup Azurite and az cli along with PostgreSQL cluster - prepareClusterBackupOnAzurite( - namespace, - clusterName, - azuriteBlobSampleFile, - backupFileAzurite, - tableName, - ) - }) - - It("restore cluster from barman object using 'barmanObjectStore' option in 'externalClusters' section", func() { - // Restore backup in a new cluster - AssertClusterRestoreWithApplicationDB(namespace, externalClusterFileAzurite, tableName) - }) - - It("restores a cluster with 'PITR' from barman object using 'barmanObjectStore' "+ - " option in 'externalClusters' section", func() { - const ( - externalClusterRestoreName = "restore-external-cluster-pitr" - backupFileAzuritePITR = fixturesBackupDir + "backup-azurite-pitr.yaml" - ) - - prepareClusterForPITROnAzurite(namespace, clusterName, backupFileAzuritePITR, currentTimestamp) - - // Create a cluster from a particular time using external backup. - restoredCluster, err := testUtils.CreateClusterFromExternalClusterBackupWithPITROnAzurite( - namespace, externalClusterRestoreName, clusterName, *currentTimestamp, env) - Expect(err).NotTo(HaveOccurred()) - - AssertClusterWasRestoredWithPITRAndApplicationDB( - namespace, - externalClusterRestoreName, - tableName, - "00000002", - ) - - By("delete restored cluster", func() { - Expect(testUtils.DeleteObject(env, restoredCluster)).To(Succeed()) - }) - }) - }) -}) diff --git a/tests/e2e/replica_mode_cluster_test.go b/tests/e2e/replica_mode_cluster_test.go index 45998be4ae..a22f4b98ad 100644 --- a/tests/e2e/replica_mode_cluster_test.go +++ b/tests/e2e/replica_mode_cluster_test.go @@ -36,6 +36,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -255,8 +256,16 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { Expect(err).ToNot(HaveOccurred()) replicaNamespace, err := env.CreateUniqueTestNamespace(replicaNamespacePrefix) Expect(err).ToNot(HaveOccurred()) + By("creating the credentials for minio", func() { - AssertStorageCredentialsAreCreated(replicaNamespace, "backup-storage-creds", "minio", "minio123") + _, err = testUtils.CreateObjectStorageSecret( + replicaNamespace, + "backup-storage-creds", + "minio", + "minio123", + env, + ) + Expect(err).ToNot(HaveOccurred()) }) By("create the certificates for MinIO", func() { @@ -313,7 +322,14 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { Expect(err).ToNot(HaveOccurred()) By("creating the credentials for minio", func() { - AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123") + _, err = testUtils.CreateObjectStorageSecret( + namespace, + "backup-storage-creds", + "minio", + "minio123", + env, + ) + Expect(err).ToNot(HaveOccurred()) }) By("create the certificates for MinIO", func() { @@ -522,11 +538,11 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f DeferCleanup(func() error { // Since we use multiple times the same cluster names for the same minio instance, we need to clean it up // between tests - _, err = testUtils.CleanFilesOnMinio(minioEnv, path.Join("minio", "cluster-backups", clusterAName)) + _, err = minio.CleanFiles(minioEnv, path.Join("minio", "cluster-backups", clusterAName)) if err != nil { return err } - _, err = testUtils.CleanFilesOnMinio(minioEnv, path.Join("minio", "cluster-backups", clusterBName)) + _, err = minio.CleanFiles(minioEnv, path.Join("minio", "cluster-backups", clusterBName)) if err != nil { return err } @@ -537,7 +553,14 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f DeferCleanup(func() { close(stopLoad) }) By("creating the credentials for minio", func() { - AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123") + _, err = testUtils.CreateObjectStorageSecret( + namespace, + "backup-storage-creds", + "minio", + "minio123", + env, + ) + Expect(err).ToNot(HaveOccurred()) }) By("create the certificates for MinIO", func() { diff --git a/tests/e2e/suite_test.go b/tests/e2e/suite_test.go index c70c87180c..fa637fffce 100644 --- a/tests/e2e/suite_test.go +++ b/tests/e2e/suite_test.go @@ -36,6 +36,7 @@ import ( cnpgUtils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/sternmultitailer" . "github.com/onsi/ginkgo/v2" @@ -57,7 +58,7 @@ var ( operatorWasRestarted bool quickDeletionPeriod = int64(1) testTimeouts map[utils.Timeout]int - minioEnv = &utils.MinioEnv{ + minioEnv = &minio.Env{ Namespace: "minio", ServiceName: "minio-service.minio", CaSecretName: "minio-server-ca-secret", @@ -98,7 +99,7 @@ var _ = SynchronizedBeforeSuite(func() []byte { Expect(err).ToNot(HaveOccurred()) }) minioEnv.Timeout = uint(testTimeouts[utils.MinioInstallation]) - minioClient, err := utils.MinioDeploy(minioEnv, env) + minioClient, err := minio.Deploy(minioEnv, env) Expect(err).ToNot(HaveOccurred()) caSecret := minioEnv.CaPair.GenerateCASecret(minioEnv.Namespace, minioEnv.CaSecretName) diff --git a/tests/e2e/tablespaces_test.go b/tests/e2e/tablespaces_test.go index 72987be873..cad9524df3 100644 --- a/tests/e2e/tablespaces_test.go +++ b/tests/e2e/tablespaces_test.go @@ -39,6 +39,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/utils/logs" "github.com/cloudnative-pg/cloudnative-pg/tests" testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -109,7 +110,16 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, Expect(err).ToNot(HaveOccurred()) // We create the MinIO credentials required to login into the system - AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123") + By("creating the credentials for minio", func() { + _, err = testUtils.CreateObjectStorageSecret( + namespace, + "backup-storage-creds", + "minio", + "minio123", + env, + ) + Expect(err).ToNot(HaveOccurred()) + }) By("create the certificates for MinIO", func() { err := minioEnv.CreateCaSecret(env, namespace) @@ -168,8 +178,9 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, Expect(err).ToNot(HaveOccurred()) By(fmt.Sprintf("creating backup %s and verifying backup is ready", backupName), func() { - testUtils.ExecuteBackup(namespace, clusterBackupManifest, false, testTimeouts[testUtils.BackupIsReady], env) - AssertBackupConditionInClusterStatus(namespace, clusterName) + testUtils.ExecuteBackup(namespace, clusterBackupManifest, false, testTimeouts[testUtils.BackupIsReady], + env) + testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName) }) By("verifying the number of tars in minio", func() { @@ -270,7 +281,7 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, // This should be better handled inside ExecuteBackup AssertArchiveWalOnMinio(namespace, clusterName, clusterName) - AssertBackupConditionInClusterStatus(namespace, clusterName) + testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName) }) By("verifying the number of tars in the latest base backup", func() { @@ -368,7 +379,16 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, Expect(err).ToNot(HaveOccurred()) // We create the required credentials for MinIO - AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123") + By("creating the credentials for minio", func() { + _, err = testUtils.CreateObjectStorageSecret( + namespace, + "backup-storage-creds", + "minio", + "minio123", + env, + ) + Expect(err).ToNot(HaveOccurred()) + }) By("create the certificates for MinIO", func() { err := minioEnv.CreateCaSecret(env, namespace) @@ -396,7 +416,7 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, testTimeouts[testUtils.VolumeSnapshotIsReady], env, ) - AssertBackupConditionInClusterStatus(namespace, clusterName) + testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName) }) By("checking that volumeSnapshots are properly labeled", func() { @@ -1214,7 +1234,7 @@ func latestBaseBackupContainsExpectedTars( // we list the backup.info files to get the listing of base backups // directories in minio backupInfoFiles := filepath.Join("*", clusterName, "base", "*", "*.info") - ls, err := testUtils.ListFilesOnMinio(minioEnv, backupInfoFiles) + ls, err := minio.ListFiles(minioEnv, backupInfoFiles) g.Expect(err).ShouldNot(HaveOccurred()) frags := strings.Split(ls, "\n") slices.Sort(frags) @@ -1222,10 +1242,10 @@ func latestBaseBackupContainsExpectedTars( g.Expect(frags).To(HaveLen(numBackups), report) latestBaseBackup := filepath.Dir(frags[numBackups-1]) tarsInLastBackup := strings.TrimPrefix(filepath.Join(latestBaseBackup, "*.tar"), "minio/") - listing, err := testUtils.ListFilesOnMinio(minioEnv, tarsInLastBackup) + listing, err := minio.ListFiles(minioEnv, tarsInLastBackup) g.Expect(err).ShouldNot(HaveOccurred()) report += fmt.Sprintf("tar listing:\n%s\n", listing) - numTars, err := testUtils.CountFilesOnMinio(minioEnv, tarsInLastBackup) + numTars, err := minio.CountFiles(minioEnv, tarsInLastBackup) g.Expect(err).ShouldNot(HaveOccurred()) g.Expect(numTars).To(Equal(expectedTars), report) }, 120).Should(Succeed()) diff --git a/tests/e2e/upgrade_test.go b/tests/e2e/upgrade_test.go index c389bf7a29..f698d29c6f 100644 --- a/tests/e2e/upgrade_test.go +++ b/tests/e2e/upgrade_test.go @@ -37,6 +37,7 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/tests" testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -145,11 +146,11 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O // but a single scheduled backups during the check AssertScheduledBackupsAreScheduled := func(serverName string) { By("verifying scheduled backups are still happening", func() { - latestTar := minioPath(serverName, "data.tar.gz") - currentBackups, err := testsUtils.CountFilesOnMinio(minioEnv, latestTar) + latestTar := minio.GetFilePath(serverName, "data.tar.gz") + currentBackups, err := minio.CountFiles(minioEnv, latestTar) Expect(err).ToNot(HaveOccurred()) Eventually(func() (int, error) { - return testsUtils.CountFilesOnMinio(minioEnv, latestTar) + return minio.CountFiles(minioEnv, latestTar) }, 120).Should(BeNumerically(">", currentBackups)) }) } @@ -354,7 +355,9 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O // assertExpectedMatchingPodUIDs checks that the UID of each pod of a Cluster matches with a given list of UIDs. // expectedMatches defines how many times, when comparing the elements of the 2 lists, you are expected to have // common values - assertExpectedMatchingPodUIDs := func(namespace, clusterName string, podUIDs []types.UID, expectedMatches int) error { + assertExpectedMatchingPodUIDs := func( + namespace, clusterName string, podUIDs []types.UID, expectedMatches int, + ) error { backoffCheckingPodRestarts := wait.Backoff{ Duration: 10 * time.Second, Steps: 30, @@ -397,11 +400,11 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O return fmt.Errorf("could not cleanup, failed to delete operator namespace: %v", err) } - if _, err := testsUtils.CleanFilesOnMinio(minioEnv, minioPath1); err != nil { + if _, err := minio.CleanFiles(minioEnv, minioPath1); err != nil { return fmt.Errorf("encountered an error while cleaning up minio: %v", err) } - if _, err := testsUtils.CleanFilesOnMinio(minioEnv, minioPath2); err != nil { + if _, err := minio.CleanFiles(minioEnv, minioPath2); err != nil { return fmt.Errorf("encountered an error while cleaning up minio: %v", err) } @@ -478,7 +481,14 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O CreateResourceFromFile(upgradeNamespace, pgSecrets) }) By("creating the cloud storage credentials", func() { - AssertStorageCredentialsAreCreated(upgradeNamespace, "aws-creds", "minio", "minio123") + _, err := testsUtils.CreateObjectStorageSecret( + upgradeNamespace, + "aws-creds", + "minio", + "minio123", + env, + ) + Expect(err).NotTo(HaveOccurred()) }) By("create the certificates for MinIO", func() { err := minioEnv.CreateCaSecret(env, upgradeNamespace) @@ -656,7 +666,8 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O By("restoring the backup taken from the first Cluster in a new cluster", func() { restoredClusterName := "cluster-restore" CreateResourceFromFile(upgradeNamespace, restoreFile) - AssertClusterIsReady(upgradeNamespace, restoredClusterName, testTimeouts[testsUtils.ClusterIsReadySlow], env) + AssertClusterIsReady(upgradeNamespace, restoredClusterName, testTimeouts[testsUtils.ClusterIsReadySlow], + env) // Test data should be present on restored primary primary := restoredClusterName + "-1" diff --git a/tests/e2e/volume_snapshot_test.go b/tests/e2e/volume_snapshot_test.go index ff0b016ec0..74f32e8bca 100644 --- a/tests/e2e/volume_snapshot_test.go +++ b/tests/e2e/volume_snapshot_test.go @@ -32,6 +32,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -167,7 +168,13 @@ var _ = Describe("Verify Volume Snapshot", Expect(err).ToNot(HaveOccurred()) }) - AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123") + _, err = testUtils.CreateObjectStorageSecret( + namespace, + "backup-storage-creds", + "minio", + "minio123", + env) + Expect(err).ToNot(HaveOccurred()) }) It("correctly executes PITR with a cold snapshot", func() { @@ -190,7 +197,7 @@ var _ = Describe("Verify Volume Snapshot", primaryPod, err := env.GetClusterPrimary(namespace, clusterToSnapshotName) Expect(err).ToNot(HaveOccurred()) Eventually(func() (bool, error) { - connectionStatus, err := testUtils.MinioTestConnectivityUsingBarmanCloudWalArchive( + connectionStatus, err := minio.TestConnectivityUsingBarmanCloudWalArchive( namespace, clusterToSnapshotName, primaryPod.GetName(), "minio", "minio123", minioEnv.ServiceName) if err != nil { return false, err @@ -405,7 +412,7 @@ var _ = Describe("Verify Volume Snapshot", "Backup should be completed correctly, error message is '%s'", backup.Status.Error) }, testTimeouts[testUtils.VolumeSnapshotIsReady]).Should(Succeed()) - AssertBackupConditionInClusterStatus(namespace, clusterToBackupName) + testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterToBackupName) }) By("checking that the backup status is correctly populated", func() { @@ -473,7 +480,7 @@ var _ = Describe("Verify Volume Snapshot", "Backup should be completed correctly, error message is '%s'", backup.Status.Error) }, testTimeouts[testUtils.VolumeSnapshotIsReady]).Should(Succeed()) - AssertBackupConditionInClusterStatus(namespace, clusterToBackupName) + testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterToBackupName) }) By("checking that the backup status is correctly populated", func() { @@ -540,7 +547,7 @@ var _ = Describe("Verify Volume Snapshot", "Backup should be completed correctly, error message is '%s'", backup.Status.Error) }, testTimeouts[testUtils.VolumeSnapshotIsReady]).Should(Succeed()) - AssertBackupConditionInClusterStatus(namespace, clusterToBackupName) + testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterToBackupName) }) By("checking that the backup status is correctly populated", func() { @@ -602,7 +609,16 @@ var _ = Describe("Verify Volume Snapshot", Expect(err).ToNot(HaveOccurred()) }) - AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123") + By("creating the credentials for minio", func() { + _, err = testUtils.CreateObjectStorageSecret( + namespace, + "backup-storage-creds", + "minio", + "minio123", + env, + ) + Expect(err).ToNot(HaveOccurred()) + }) By("creating the cluster to snapshot", func() { AssertCreateCluster(namespace, clusterToSnapshotName, clusterToSnapshot, env) @@ -612,7 +628,7 @@ var _ = Describe("Verify Volume Snapshot", primaryPod, err := env.GetClusterPrimary(namespace, clusterToSnapshotName) Expect(err).ToNot(HaveOccurred()) Eventually(func() (bool, error) { - connectionStatus, err := testUtils.MinioTestConnectivityUsingBarmanCloudWalArchive( + connectionStatus, err := minio.TestConnectivityUsingBarmanCloudWalArchive( namespace, clusterToSnapshotName, primaryPod.GetName(), "minio", "minio123", minioEnv.ServiceName) if err != nil { return false, err diff --git a/tests/e2e/wal_restore_parallel_test.go b/tests/e2e/wal_restore_parallel_test.go index 64c371fc82..03906b7152 100644 --- a/tests/e2e/wal_restore_parallel_test.go +++ b/tests/e2e/wal_restore_parallel_test.go @@ -23,6 +23,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/tests" testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -69,7 +70,14 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun Expect(err).ToNot(HaveOccurred()) By("creating the credentials for minio", func() { - AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123") + _, err = testUtils.CreateObjectStorageSecret( + namespace, + "backup-storage-creds", + "minio", + "minio123", + env, + ) + Expect(err).ToNot(HaveOccurred()) }) By("create the certificates for MinIO", func() { @@ -104,10 +112,10 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun Expect(err).ToNot(HaveOccurred()) primary := pod.GetName() latestWAL = switchWalAndGetLatestArchive(namespace, primary) - latestWALPath := minioPath(clusterName, latestWAL+".gz") + latestWALPath := minio.GetFilePath(clusterName, latestWAL+".gz") Eventually(func() (int, error) { // WALs are compressed with gzip in the fixture - return testUtils.CountFilesOnMinio(minioEnv, latestWALPath) + return minio.CountFiles(minioEnv, latestWALPath) }, RetryTimeout).Should(BeEquivalentTo(1), fmt.Sprintf("verify the existence of WAL %v in minio", latestWALPath)) }) @@ -118,15 +126,20 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun walFile3 = "0000000100000000000000F3" walFile4 = "0000000100000000000000F4" walFile5 = "0000000100000000000000F5" - Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, walFile1)). + Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, + walFile1)). ShouldNot(HaveOccurred()) - Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, walFile2)). + Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, + walFile2)). ShouldNot(HaveOccurred()) - Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, walFile3)). + Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, + walFile3)). ShouldNot(HaveOccurred()) - Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, walFile4)). + Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, + walFile4)). ShouldNot(HaveOccurred()) - Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, walFile5)). + Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, + walFile5)). ShouldNot(HaveOccurred()) }) @@ -167,7 +180,10 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun WithTimeout(RetryTimeout). Should(BeTrue(), "#3 wal is in the spool directory") - Eventually(func() bool { return testUtils.TestFileExist(namespace, standby, SpoolDirectory, "end-of-wal-stream") }). + Eventually(func() bool { + return testUtils.TestFileExist(namespace, standby, SpoolDirectory, + "end-of-wal-stream") + }). WithTimeout(RetryTimeout). Should(BeFalse(), "end-of-wal-stream flag is unset") @@ -193,7 +209,10 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun WithTimeout(RetryTimeout). Should(BeTrue(), "#3 wal is in the spool directory") - Eventually(func() bool { return testUtils.TestFileExist(namespace, standby, SpoolDirectory, "end-of-wal-stream") }). + Eventually(func() bool { + return testUtils.TestFileExist(namespace, standby, SpoolDirectory, + "end-of-wal-stream") + }). WithTimeout(RetryTimeout). Should(BeFalse(), "end-of-wal-stream flag is unset") @@ -241,7 +260,10 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun WithTimeout(RetryTimeout). Should(BeTrue(), "#5 wal is in the spool directory") - Eventually(func() bool { return testUtils.TestFileExist(namespace, standby, SpoolDirectory, "end-of-wal-stream") }). + Eventually(func() bool { + return testUtils.TestFileExist(namespace, standby, SpoolDirectory, + "end-of-wal-stream") + }). WithTimeout(RetryTimeout). Should(BeTrue(), "end-of-wal-stream flag is set for #6 wal is not present") @@ -250,7 +272,8 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun // Generate a new wal file; the archive also contains WAL #6. By("forging a new wal file, the #6 wal", func() { walFile6 = "0000000100000000000000F6" - Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, walFile6)). + Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, + walFile6)). ShouldNot(HaveOccurred()) }) @@ -273,7 +296,10 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun WithTimeout(RetryTimeout). Should(BeFalse(), "no wal files exist in the spool directory") - Eventually(func() bool { return testUtils.TestFileExist(namespace, standby, SpoolDirectory, "end-of-wal-stream") }). + Eventually(func() bool { + return testUtils.TestFileExist(namespace, standby, SpoolDirectory, + "end-of-wal-stream") + }). WithTimeout(RetryTimeout). Should(BeTrue(), "end-of-wal-stream flag is still there") @@ -321,7 +347,10 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun WithTimeout(RetryTimeout). Should(BeFalse(), "no wals in the spool directory") - Eventually(func() bool { return testUtils.TestFileExist(namespace, standby, SpoolDirectory, "end-of-wal-stream") }). + Eventually(func() bool { + return testUtils.TestFileExist(namespace, standby, SpoolDirectory, + "end-of-wal-stream") + }). WithTimeout(RetryTimeout). Should(BeTrue(), "end-of-wal-stream flag is set for #7 and #8 wal is not present") diff --git a/tests/utils/azurite.go b/tests/utils/azurite.go index c7732104b1..7ea3ed7903 100644 --- a/tests/utils/azurite.go +++ b/tests/utils/azurite.go @@ -17,7 +17,11 @@ limitations under the License. package utils import ( + "encoding/json" + "fmt" "os" + "strings" + "time" apiv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -26,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/utils/ptr" + v1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/certs" ) @@ -119,7 +124,7 @@ func InstallAzCli(namespace string, env *TestingEnvironment) error { return nil } -// getAzuriteClientPod get the cli client pod +// getAzuriteClientPod get the cli client pod/home/zeus/src/cloudnative-pg/pkg func getAzuriteClientPod(namespace string) corev1.Pod { seccompProfile := &corev1.SeccompProfile{ Type: corev1.SeccompProfileTypeRuntimeDefault, @@ -346,3 +351,295 @@ func getStorageCredentials(namespace string) corev1.Secret { } return azuriteStorageSecrets } + +// CreateClusterFromExternalClusterBackupWithPITROnAzure creates a cluster on Azure, starting from an external cluster +// backup with PITR +func CreateClusterFromExternalClusterBackupWithPITROnAzure( + namespace, + externalClusterName, + sourceClusterName, + targetTime, + storageCredentialsSecretName, + azStorageAccount, + azBlobContainer string, + env *TestingEnvironment, +) (*v1.Cluster, error) { + storageClassName := os.Getenv("E2E_DEFAULT_STORAGE_CLASS") + destinationPath := fmt.Sprintf("https://%v.blob.core.windows.net/%v/", + azStorageAccount, azBlobContainer) + + restoreCluster := &v1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: externalClusterName, + Namespace: namespace, + }, + Spec: v1.ClusterSpec{ + Instances: 3, + + StorageConfiguration: v1.StorageConfiguration{ + Size: "1Gi", + StorageClass: &storageClassName, + }, + + PostgresConfiguration: v1.PostgresConfiguration{ + Parameters: map[string]string{ + "log_checkpoints": "on", + "log_lock_waits": "on", + "log_min_duration_statement": "1000", + "log_statement": "ddl", + "log_temp_files": "1024", + "log_autovacuum_min_duration": "1s", + "log_replication_commands": "on", + }, + }, + + Bootstrap: &v1.BootstrapConfiguration{ + Recovery: &v1.BootstrapRecovery{ + Source: sourceClusterName, + RecoveryTarget: &v1.RecoveryTarget{ + TargetTime: targetTime, + }, + }, + }, + + ExternalClusters: []v1.ExternalCluster{ + { + Name: sourceClusterName, + BarmanObjectStore: &v1.BarmanObjectStoreConfiguration{ + DestinationPath: destinationPath, + BarmanCredentials: v1.BarmanCredentials{ + Azure: &v1.AzureCredentials{ + StorageAccount: &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: storageCredentialsSecretName, + }, + Key: "ID", + }, + StorageKey: &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: storageCredentialsSecretName, + }, + Key: "KEY", + }, + }, + }, + }, + }, + }, + }, + } + obj, err := CreateObject(env, restoreCluster) + if err != nil { + return nil, err + } + cluster, ok := obj.(*v1.Cluster) + if !ok { + return nil, fmt.Errorf("created object is not of type cluster: %T, %v", obj, obj) + } + return cluster, nil +} + +// CreateClusterFromExternalClusterBackupWithPITROnAzurite creates a cluster with Azurite, starting from an external +// cluster backup with PITR +func CreateClusterFromExternalClusterBackupWithPITROnAzurite( + namespace, + externalClusterName, + sourceClusterName, + targetTime string, + env *TestingEnvironment, +) (*v1.Cluster, error) { + storageClassName := os.Getenv("E2E_DEFAULT_STORAGE_CLASS") + DestinationPath := fmt.Sprintf("https://azurite:10000/storageaccountname/%v", sourceClusterName) + + restoreCluster := &v1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: externalClusterName, + Namespace: namespace, + }, + Spec: v1.ClusterSpec{ + Instances: 3, + + StorageConfiguration: v1.StorageConfiguration{ + Size: "1Gi", + StorageClass: &storageClassName, + }, + + PostgresConfiguration: v1.PostgresConfiguration{ + Parameters: map[string]string{ + "log_checkpoints": "on", + "log_lock_waits": "on", + "log_min_duration_statement": "1000", + "log_statement": "ddl", + "log_temp_files": "1024", + "log_autovacuum_min_duration": "1s", + "log_replication_commands": "on", + }, + }, + + Bootstrap: &v1.BootstrapConfiguration{ + Recovery: &v1.BootstrapRecovery{ + Source: sourceClusterName, + RecoveryTarget: &v1.RecoveryTarget{ + TargetTime: targetTime, + }, + }, + }, + + ExternalClusters: []v1.ExternalCluster{ + { + Name: sourceClusterName, + BarmanObjectStore: &v1.BarmanObjectStoreConfiguration{ + DestinationPath: DestinationPath, + EndpointCA: &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "azurite-ca-secret", + }, + Key: "ca.crt", + }, + BarmanCredentials: v1.BarmanCredentials{ + Azure: &v1.AzureCredentials{ + ConnectionString: &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "azurite", + }, + Key: "AZURE_CONNECTION_STRING", + }, + }, + }, + }, + }, + }, + }, + } + obj, err := CreateObject(env, restoreCluster) + if err != nil { + return nil, err + } + cluster, ok := obj.(*v1.Cluster) + if !ok { + return nil, fmt.Errorf("created object is not of type cluster: %T, %v", obj, obj) + } + return cluster, nil +} + +// ComposeAzBlobListAzuriteCmd builds the Azure storage blob list command for Azurite +func ComposeAzBlobListAzuriteCmd(clusterName, path string) string { + return fmt.Sprintf("az storage blob list --container-name %v --query \"[?contains(@.name, \\`%v\\`)].name\" "+ + "--connection-string $AZURE_CONNECTION_STRING", + clusterName, path) +} + +// ComposeAzBlobListCmd builds the Azure storage blob list command +func ComposeAzBlobListCmd( + configuration AzureConfiguration, + clusterName, + path string, +) string { + return fmt.Sprintf("az storage blob list --account-name %v "+ + "--account-key %v "+ + "--container-name %v "+ + "--prefix %v/ "+ + "--query \"[?contains(@.name, \\`%v\\`)].name\"", + configuration.StorageAccount, configuration.StorageKey, configuration.BlobContainer, clusterName, path) +} + +// CountFilesOnAzureBlobStorage counts files on Azure Blob storage +func CountFilesOnAzureBlobStorage( + configuration AzureConfiguration, + clusterName, + path string, +) (int, error) { + azBlobListCmd := ComposeAzBlobListCmd(configuration, clusterName, path) + out, _, err := RunUnchecked(azBlobListCmd) + if err != nil { + return -1, err + } + var arr []string + err = json.Unmarshal([]byte(out), &arr) + return len(arr), err +} + +// CountFilesOnAzuriteBlobStorage counts files on Azure Blob storage. using Azurite +func CountFilesOnAzuriteBlobStorage( + namespace, + clusterName, + path string, +) (int, error) { + azBlobListCmd := ComposeAzBlobListAzuriteCmd(clusterName, path) + out, _, err := RunUnchecked(fmt.Sprintf("kubectl exec -n %v az-cli "+ + "-- /bin/bash -c '%v'", namespace, azBlobListCmd)) + if err != nil { + return -1, err + } + var arr []string + err = json.Unmarshal([]byte(out), &arr) + return len(arr), err +} + +// verifySASTokenWriteActivity returns true if the given token has RW permissions, +// otherwise it returns false +func verifySASTokenWriteActivity(containerName string, id string, key string) bool { + _, _, err := RunUnchecked(fmt.Sprintf("az storage container create "+ + "--name %v --account-name %v "+ + "--sas-token %v", containerName, id, key)) + + return err == nil +} + +// CreateSASTokenCredentials generates Secrets for the Azure Blob Storage +func CreateSASTokenCredentials(namespace string, id string, key string, env *TestingEnvironment) error { + // Adding 24 hours to the current time + date := time.Now().UTC().Add(time.Hour * 24) + // Creating date time format for az command + expiringDate := fmt.Sprintf("%v"+"-"+"%d"+"-"+"%v"+"T"+"%v"+":"+"%v"+"Z", + date.Year(), + date.Month(), + date.Day(), + date.Hour(), + date.Minute()) + + out, _, err := Run(fmt.Sprintf( + // SAS Token at Blob Container level does not currently work in Barman Cloud + // https://github.com/EnterpriseDB/barman/issues/388 + // we will use SAS Token at Storage Account level + // ( "az storage container generate-sas --account-name %v "+ + // "--name %v "+ + // "--https-only --permissions racwdl --auth-mode key --only-show-errors "+ + // "--expiry \"$(date -u -d \"+4 hours\" '+%%Y-%%m-%%dT%%H:%%MZ')\"", + // id, blobContainerName ) + "az storage account generate-sas --account-name %v "+ + "--https-only --permissions cdlruwap --account-key %v "+ + "--resource-types co --services b --expiry %v -o tsv", + id, key, expiringDate)) + if err != nil { + return err + } + SASTokenRW := strings.TrimRight(out, "\n") + + out, _, err = Run(fmt.Sprintf( + "az storage account generate-sas --account-name %v "+ + "--https-only --permissions lr --account-key %v "+ + "--resource-types co --services b --expiry %v -o tsv", + id, key, expiringDate)) + if err != nil { + return err + } + + SASTokenRO := strings.TrimRight(out, "\n") + isReadWrite := verifySASTokenWriteActivity("restore-cluster-sas", id, SASTokenRO) + if isReadWrite { + return fmt.Errorf("expected token to be ready only") + } + + _, err = CreateObjectStorageSecret(namespace, "backup-storage-creds-sas", id, SASTokenRW, env) + if err != nil { + return err + } + + _, err = CreateObjectStorageSecret(namespace, "restore-storage-creds-sas", id, SASTokenRO, env) + if err != nil { + return err + } + + return nil +} diff --git a/tests/utils/backup.go b/tests/utils/backup.go index d88c0b5504..e07f20d2a5 100644 --- a/tests/utils/backup.go +++ b/tests/utils/backup.go @@ -17,7 +17,6 @@ limitations under the License. package utils import ( - "encoding/json" "fmt" "os" @@ -27,7 +26,8 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - . "github.com/onsi/gomega" // nolint + . "github.com/onsi/ginkgo/v2" // nolint + . "github.com/onsi/gomega" // nolint ) // ExecuteBackup performs a backup and checks the backup status @@ -159,93 +159,6 @@ func CreateClusterFromBackupUsingPITR( return cluster, nil } -// CreateClusterFromExternalClusterBackupWithPITROnAzure creates a cluster on Azure, starting from an external cluster -// backup with PITR -func CreateClusterFromExternalClusterBackupWithPITROnAzure( - namespace, - externalClusterName, - sourceClusterName, - targetTime, - storageCredentialsSecretName, - azStorageAccount, - azBlobContainer string, - env *TestingEnvironment, -) (*apiv1.Cluster, error) { - storageClassName := os.Getenv("E2E_DEFAULT_STORAGE_CLASS") - destinationPath := fmt.Sprintf("https://%v.blob.core.windows.net/%v/", - azStorageAccount, azBlobContainer) - - restoreCluster := &apiv1.Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: externalClusterName, - Namespace: namespace, - }, - Spec: apiv1.ClusterSpec{ - Instances: 3, - - StorageConfiguration: apiv1.StorageConfiguration{ - Size: "1Gi", - StorageClass: &storageClassName, - }, - - PostgresConfiguration: apiv1.PostgresConfiguration{ - Parameters: map[string]string{ - "log_checkpoints": "on", - "log_lock_waits": "on", - "log_min_duration_statement": "1000", - "log_statement": "ddl", - "log_temp_files": "1024", - "log_autovacuum_min_duration": "1s", - "log_replication_commands": "on", - }, - }, - - Bootstrap: &apiv1.BootstrapConfiguration{ - Recovery: &apiv1.BootstrapRecovery{ - Source: sourceClusterName, - RecoveryTarget: &apiv1.RecoveryTarget{ - TargetTime: targetTime, - }, - }, - }, - - ExternalClusters: []apiv1.ExternalCluster{ - { - Name: sourceClusterName, - BarmanObjectStore: &apiv1.BarmanObjectStoreConfiguration{ - DestinationPath: destinationPath, - BarmanCredentials: apiv1.BarmanCredentials{ - Azure: &apiv1.AzureCredentials{ - StorageAccount: &apiv1.SecretKeySelector{ - LocalObjectReference: apiv1.LocalObjectReference{ - Name: storageCredentialsSecretName, - }, - Key: "ID", - }, - StorageKey: &apiv1.SecretKeySelector{ - LocalObjectReference: apiv1.LocalObjectReference{ - Name: storageCredentialsSecretName, - }, - Key: "KEY", - }, - }, - }, - }, - }, - }, - }, - } - obj, err := CreateObject(env, restoreCluster) - if err != nil { - return nil, err - } - cluster, ok := obj.(*apiv1.Cluster) - if !ok { - return nil, fmt.Errorf("created object is not of type cluster: %T, %v", obj, obj) - } - return cluster, nil -} - // CreateClusterFromExternalClusterBackupWithPITROnMinio creates a cluster on Minio, starting from an external cluster // backup with PITR func CreateClusterFromExternalClusterBackupWithPITROnMinio( @@ -335,143 +248,6 @@ func CreateClusterFromExternalClusterBackupWithPITROnMinio( return cluster, nil } -// CreateClusterFromExternalClusterBackupWithPITROnAzurite creates a cluster with Azurite, starting from an external -// cluster backup with PITR -func CreateClusterFromExternalClusterBackupWithPITROnAzurite( - namespace, - externalClusterName, - sourceClusterName, - targetTime string, - env *TestingEnvironment, -) (*apiv1.Cluster, error) { - storageClassName := os.Getenv("E2E_DEFAULT_STORAGE_CLASS") - DestinationPath := fmt.Sprintf("https://azurite:10000/storageaccountname/%v", sourceClusterName) - - restoreCluster := &apiv1.Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: externalClusterName, - Namespace: namespace, - }, - Spec: apiv1.ClusterSpec{ - Instances: 3, - - StorageConfiguration: apiv1.StorageConfiguration{ - Size: "1Gi", - StorageClass: &storageClassName, - }, - - PostgresConfiguration: apiv1.PostgresConfiguration{ - Parameters: map[string]string{ - "log_checkpoints": "on", - "log_lock_waits": "on", - "log_min_duration_statement": "1000", - "log_statement": "ddl", - "log_temp_files": "1024", - "log_autovacuum_min_duration": "1s", - "log_replication_commands": "on", - }, - }, - - Bootstrap: &apiv1.BootstrapConfiguration{ - Recovery: &apiv1.BootstrapRecovery{ - Source: sourceClusterName, - RecoveryTarget: &apiv1.RecoveryTarget{ - TargetTime: targetTime, - }, - }, - }, - - ExternalClusters: []apiv1.ExternalCluster{ - { - Name: sourceClusterName, - BarmanObjectStore: &apiv1.BarmanObjectStoreConfiguration{ - DestinationPath: DestinationPath, - EndpointCA: &apiv1.SecretKeySelector{ - LocalObjectReference: apiv1.LocalObjectReference{ - Name: "azurite-ca-secret", - }, - Key: "ca.crt", - }, - BarmanCredentials: apiv1.BarmanCredentials{ - Azure: &apiv1.AzureCredentials{ - ConnectionString: &apiv1.SecretKeySelector{ - LocalObjectReference: apiv1.LocalObjectReference{ - Name: "azurite", - }, - Key: "AZURE_CONNECTION_STRING", - }, - }, - }, - }, - }, - }, - }, - } - obj, err := CreateObject(env, restoreCluster) - if err != nil { - return nil, err - } - cluster, ok := obj.(*apiv1.Cluster) - if !ok { - return nil, fmt.Errorf("created object is not of type cluster: %T, %v", obj, obj) - } - return cluster, nil -} - -// ComposeAzBlobListAzuriteCmd builds the Azure storage blob list command for Azurite -func ComposeAzBlobListAzuriteCmd(clusterName, path string) string { - return fmt.Sprintf("az storage blob list --container-name %v --query \"[?contains(@.name, \\`%v\\`)].name\" "+ - "--connection-string $AZURE_CONNECTION_STRING", - clusterName, path) -} - -// ComposeAzBlobListCmd builds the Azure storage blob list command -func ComposeAzBlobListCmd( - configuration AzureConfiguration, - clusterName, - path string, -) string { - return fmt.Sprintf("az storage blob list --account-name %v "+ - "--account-key %v "+ - "--container-name %v "+ - "--prefix %v/ "+ - "--query \"[?contains(@.name, \\`%v\\`)].name\"", - configuration.StorageAccount, configuration.StorageKey, configuration.BlobContainer, clusterName, path) -} - -// CountFilesOnAzureBlobStorage counts files on Azure Blob storage -func CountFilesOnAzureBlobStorage( - configuration AzureConfiguration, - clusterName, - path string, -) (int, error) { - azBlobListCmd := ComposeAzBlobListCmd(configuration, clusterName, path) - out, _, err := RunUnchecked(azBlobListCmd) - if err != nil { - return -1, err - } - var arr []string - err = json.Unmarshal([]byte(out), &arr) - return len(arr), err -} - -// CountFilesOnAzuriteBlobStorage counts files on Azure Blob storage. using Azurite -func CountFilesOnAzuriteBlobStorage( - namespace, - clusterName, - path string, -) (int, error) { - azBlobListCmd := ComposeAzBlobListAzuriteCmd(clusterName, path) - out, _, err := RunUnchecked(fmt.Sprintf("kubectl exec -n %v az-cli "+ - "-- /bin/bash -c '%v'", namespace, azBlobListCmd)) - if err != nil { - return -1, err - } - var arr []string - err = json.Unmarshal([]byte(out), &arr) - return len(arr), err -} - // GetConditionsInClusterStatus get conditions values as given type from cluster object status func GetConditionsInClusterStatus( namespace, @@ -593,3 +369,18 @@ func (env TestingEnvironment) GetVolumeSnapshot( } return volumeSnapshot, nil } + +// AssertBackupConditionInClusterStatus check that the backup condition in the Cluster's Status +// eventually returns true +func AssertBackupConditionInClusterStatus(env *TestingEnvironment, namespace, clusterName string) { + By(fmt.Sprintf("waiting for backup condition status in cluster '%v'", clusterName), func() { + Eventually(func() (string, error) { + getBackupCondition, err := GetConditionsInClusterStatus( + namespace, clusterName, env, apiv1.ConditionBackup) + if err != nil { + return "", err + } + return string(getBackupCondition.Status), nil + }, 300, 5).Should(BeEquivalentTo("True")) + }) +} diff --git a/tests/utils/minio.go b/tests/utils/minio/minio.go similarity index 76% rename from tests/utils/minio.go rename to tests/utils/minio/minio.go index d69821c6f3..a5f878ed85 100644 --- a/tests/utils/minio.go +++ b/tests/utils/minio/minio.go @@ -14,12 +14,15 @@ See the License for the specific language governing permissions and limitations under the License. */ -package utils +// Package minio contains all the require functions to setup a MinIO deployment and +// query this MinIO deployment using the MinIO API +package minio import ( "encoding/json" "fmt" "os" + "path/filepath" "strconv" "strings" "time" @@ -36,6 +39,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/certs" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils" ) const ( @@ -43,9 +47,9 @@ const ( minioClientImage = "minio/mc:RELEASE.2022-06-11T21-10-36Z" ) -// MinioEnv contains all the information related or required by MinIO deployment and +// Env contains all the information related or required by MinIO deployment and // used by the functions on every test -type MinioEnv struct { +type Env struct { Client *corev1.Pod CaPair *certs.KeyPair CaSecretObj corev1.Secret @@ -56,9 +60,9 @@ type MinioEnv struct { Timeout uint } -// MinioSetup contains the resources needed for a working minio server deployment: +// Setup contains the resources needed for a working minio server deployment: // a PersistentVolumeClaim, a Deployment and a Service -type MinioSetup struct { +type Setup struct { PersistentVolumeClaim corev1.PersistentVolumeClaim Deployment appsv1.Deployment Service corev1.Service @@ -69,10 +73,10 @@ type TagSet struct { Tags map[string]string `json:"tagset"` } -// InstallMinio installs minio in a given namespace -func InstallMinio( - env *TestingEnvironment, - minioSetup MinioSetup, +// installMinio installs minio in a given namespace +func installMinio( + env *utils.TestingEnvironment, + minioSetup Setup, timeoutSeconds uint, ) error { if err := env.Client.Create(env.Ctx, &minioSetup.PersistentVolumeClaim); err != nil { @@ -110,15 +114,15 @@ func InstallMinio( return err } -// MinioDefaultSetup returns the definition for the default minio setup -func MinioDefaultSetup(namespace string) (MinioSetup, error) { - pvc, err := MinioDefaultPVC(namespace) +// defaultSetup returns the definition for the default minio setup +func defaultSetup(namespace string) (Setup, error) { + pvc, err := defaultPVC(namespace) if err != nil { - return MinioSetup{}, err + return Setup{}, err } - deployment := MinioDefaultDeployment(namespace, pvc) - service := MinioDefaultSVC(namespace) - setup := MinioSetup{ + deployment := defaultDeployment(namespace, pvc) + service := defaultSVC(namespace) + setup := Setup{ PersistentVolumeClaim: pvc, Deployment: deployment, Service: service, @@ -126,8 +130,8 @@ func MinioDefaultSetup(namespace string) (MinioSetup, error) { return setup, nil } -// MinioDefaultDeployment returns a default Deployment for minio -func MinioDefaultDeployment(namespace string, minioPVC corev1.PersistentVolumeClaim) appsv1.Deployment { +// defaultDeployment returns a default Deployment for minio +func defaultDeployment(namespace string, minioPVC corev1.PersistentVolumeClaim) appsv1.Deployment { seccompProfile := &corev1.SeccompProfile{ Type: corev1.SeccompProfileTypeRuntimeDefault, } @@ -222,8 +226,8 @@ func MinioDefaultDeployment(namespace string, minioPVC corev1.PersistentVolumeCl return minioDeployment } -// MinioDefaultSVC returns a default Service for minio -func MinioDefaultSVC(namespace string) corev1.Service { +// defaultSVC returns a default Service for minio +func defaultSVC(namespace string) corev1.Service { minioService := corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "minio-service", @@ -245,8 +249,8 @@ func MinioDefaultSVC(namespace string) corev1.Service { return minioService } -// MinioDefaultPVC returns a default PVC for minio -func MinioDefaultPVC(namespace string) (corev1.PersistentVolumeClaim, error) { +// defaultPVC returns a default PVC for minio +func defaultPVC(namespace string) (corev1.PersistentVolumeClaim, error) { const claimName = "minio-pv-claim" storageClass, ok := os.LookupEnv("E2E_DEFAULT_STORAGE_CLASS") if !ok { @@ -273,11 +277,11 @@ func MinioDefaultPVC(namespace string) (corev1.PersistentVolumeClaim, error) { return minioPVC, nil } -// MinioSSLSetup returns the definition for a minio setup using SSL -func MinioSSLSetup(namespace string) (MinioSetup, error) { - setup, err := MinioDefaultSetup(namespace) +// sslSetup returns the definition for a minio setup using SSL +func sslSetup(namespace string) (Setup, error) { + setup, err := defaultSetup(namespace) if err != nil { - return MinioSetup{}, err + return Setup{}, err } const tlsVolumeName = "secret-volume" const tlsVolumeMountPath = "/etc/secrets/certs" @@ -341,8 +345,8 @@ func MinioSSLSetup(namespace string) (MinioSetup, error) { return setup, nil } -// MinioDefaultClient returns the default Pod definition for a minio client -func MinioDefaultClient(namespace string) corev1.Pod { +// defaultClient returns the default Pod definition for a minio client +func defaultClient(namespace string) corev1.Pod { seccompProfile := &corev1.SeccompProfile{ Type: corev1.SeccompProfileTypeRuntimeDefault, } @@ -403,8 +407,8 @@ func MinioDefaultClient(namespace string) corev1.Pod { return minioClient } -// MinioSSLClient returns the Pod definition for a minio client using SSL -func MinioSSLClient(namespace string) corev1.Pod { +// sslClient returns the Pod definition for a minio client using SSL +func sslClient(namespace string) corev1.Pod { const ( configVolumeMountPath = "/mc/.mc" configVolumeName = "mc-config" @@ -414,7 +418,7 @@ func MinioSSLClient(namespace string) corev1.Pod { ) var secretMode int32 = 0o600 - minioClient := MinioDefaultClient(namespace) + minioClient := defaultClient(namespace) minioClient.Spec.Volumes = append(minioClient.Spec.Volumes, corev1.Volume{ Name: configVolumeName, @@ -448,8 +452,8 @@ func MinioSSLClient(namespace string) corev1.Pod { return minioClient } -// MinioDeploy will create a full MinIO deployment defined inthe minioEnv variable -func MinioDeploy(minioEnv *MinioEnv, env *TestingEnvironment) (*corev1.Pod, error) { +// Deploy will create a full MinIO deployment defined inthe minioEnv variable +func Deploy(minioEnv *Env, env *utils.TestingEnvironment) (*corev1.Pod, error) { var err error minioEnv.CaPair, err = certs.CreateRootCA(minioEnv.Namespace, "minio") if err != nil { @@ -457,7 +461,7 @@ func MinioDeploy(minioEnv *MinioEnv, env *TestingEnvironment) (*corev1.Pod, erro } minioEnv.CaSecretObj = *minioEnv.CaPair.GenerateCASecret(minioEnv.Namespace, minioEnv.CaSecretName) - if _, err = CreateObject(env, &minioEnv.CaSecretObj); err != nil { + if _, err = utils.CreateObject(env, &minioEnv.CaSecretObj); err != nil { return nil, err } @@ -474,20 +478,20 @@ func MinioDeploy(minioEnv *MinioEnv, env *TestingEnvironment) (*corev1.Pod, erro return nil, err } - setup, err := MinioSSLSetup(minioEnv.Namespace) + setup, err := sslSetup(minioEnv.Namespace) if err != nil { return nil, err } - if err = InstallMinio(env, setup, minioEnv.Timeout); err != nil { + if err = installMinio(env, setup, minioEnv.Timeout); err != nil { return nil, err } - minioClient := MinioSSLClient(minioEnv.Namespace) + minioClient := sslClient(minioEnv.Namespace) - return &minioClient, PodCreateAndWaitForReady(env, &minioClient, 240) + return &minioClient, utils.PodCreateAndWaitForReady(env, &minioClient, 240) } -func (m *MinioEnv) getCaSecret(env *TestingEnvironment, namespace string) (*corev1.Secret, error) { +func (m *Env) getCaSecret(env *utils.TestingEnvironment, namespace string) (*corev1.Secret, error) { var certSecret corev1.Secret if err := env.Client.Get(env.Ctx, types.NamespacedName{ @@ -508,24 +512,24 @@ func (m *MinioEnv) getCaSecret(env *TestingEnvironment, namespace string) (*core } // CreateCaSecret creates the certificates required to authenticate against the the MinIO service -func (m *MinioEnv) CreateCaSecret(env *TestingEnvironment, namespace string) error { +func (m *Env) CreateCaSecret(env *utils.TestingEnvironment, namespace string) error { caSecret, err := m.getCaSecret(env, namespace) if err != nil { return err } - _, err = CreateObject(env, caSecret) + _, err = utils.CreateObject(env, caSecret) return err } -// CountFilesOnMinio uses the minioClient in the given `namespace` to count the +// CountFiles uses the minioClient in the given `namespace` to count the // amount of files matching the given `path` -func CountFilesOnMinio(minioEnv *MinioEnv, path string) (value int, err error) { +func CountFiles(minioEnv *Env, path string) (value int, err error) { var stdout string - stdout, _, err = RunUnchecked(fmt.Sprintf( + stdout, _, err = utils.RunUnchecked(fmt.Sprintf( "kubectl exec -n %v %v -- %v", minioEnv.Namespace, minioEnv.Client.Name, - composeFindMinioCmd(path, "minio"))) + composeFindCmd(path, "minio"))) if err != nil { return -1, err } @@ -533,41 +537,41 @@ func CountFilesOnMinio(minioEnv *MinioEnv, path string) (value int, err error) { return value, err } -// ListFilesOnMinio uses the minioClient in the given `namespace` to list the +// ListFiles uses the minioClient in the given `namespace` to list the // paths matching the given `path` -func ListFilesOnMinio(minioEnv *MinioEnv, path string) (string, error) { +func ListFiles(minioEnv *Env, path string) (string, error) { var stdout string - stdout, _, err := RunUnchecked(fmt.Sprintf( + stdout, _, err := utils.RunUnchecked(fmt.Sprintf( "kubectl exec -n %v %v -- %v", minioEnv.Namespace, minioEnv.Client.Name, - composeListFilesMinio(path, "minio"))) + composeListFiles(path, "minio"))) if err != nil { return "", err } return strings.Trim(stdout, "\n"), nil } -// composeListFilesMinio builds the Minio command to list the filenames matching a given path -func composeListFilesMinio(path string, serviceName string) string { +// composeListFiles builds the Minio command to list the filenames matching a given path +func composeListFiles(path string, serviceName string) string { return fmt.Sprintf("sh -c 'mc find %v --path %v'", serviceName, path) } -// composeListFilesMinio builds the Minio command to list the filenames matching a given path -func composeCleanFilesMinio(path string) string { +// composeCleanFiles builds the Minio command to list the filenames matching a given path +func composeCleanFiles(path string) string { return fmt.Sprintf("sh -c 'mc rm --force --recursive %v'", path) } -// composeFindMinioCmd builds the Minio find command -func composeFindMinioCmd(path string, serviceName string) string { +// composeFindCmd builds the Minio find command +func composeFindCmd(path string, serviceName string) string { return fmt.Sprintf("sh -c 'mc find %v --path %v | wc -l'", serviceName, path) } -// GetFileTagsOnMinio will use the minioClient to retrieve the tags in a specified path -func GetFileTagsOnMinio(minioEnv *MinioEnv, path string) (TagSet, error) { +// GetFileTags will use the minioClient to retrieve the tags in a specified path +func GetFileTags(minioEnv *Env, path string) (TagSet, error) { var output TagSet // Make sure we have a registered backup to access - out, _, err := RunUncheckedRetry(fmt.Sprintf( + out, _, err := utils.RunUncheckedRetry(fmt.Sprintf( "kubectl exec -n %v %v -- sh -c 'mc find minio --path %v | head -n1'", minioEnv.Namespace, minioEnv.Client.Name, @@ -578,7 +582,7 @@ func GetFileTagsOnMinio(minioEnv *MinioEnv, path string) (TagSet, error) { walFile := strings.Trim(out, "\n") - stdout, _, err := RunUncheckedRetry(fmt.Sprintf( + stdout, _, err := utils.RunUncheckedRetry(fmt.Sprintf( "kubectl exec -n %v %v -- sh -c 'mc --json tag list %v'", minioEnv.Namespace, minioEnv.Client.Name, @@ -594,8 +598,8 @@ func GetFileTagsOnMinio(minioEnv *MinioEnv, path string) (TagSet, error) { return output, nil } -// MinioTestConnectivityUsingBarmanCloudWalArchive returns true if test connection is successful else false -func MinioTestConnectivityUsingBarmanCloudWalArchive( +// TestConnectivityUsingBarmanCloudWalArchive returns true if test connection is successful else false +func TestConnectivityUsingBarmanCloudWalArchive( namespace, clusterName, podName, @@ -609,7 +613,7 @@ func MinioTestConnectivityUsingBarmanCloudWalArchive( "barman-cloud-wal-archive --cloud-provider aws-s3 --endpoint-url https://%s:9000 s3://cluster-backups/ %s "+ "000000010000000000000000 --test", postgres.BarmanBackupEndpointCACertificateLocation, id, key, minioSvcName, clusterName) - _, _, err := RunUnchecked(fmt.Sprintf( + _, _, err := utils.RunUnchecked(fmt.Sprintf( "kubectl exec -n %v %v -c postgres -- /bin/bash -c \"%v\"", namespace, podName, @@ -620,16 +624,25 @@ func MinioTestConnectivityUsingBarmanCloudWalArchive( return true, nil } -// CleanFilesOnMinio clean files on minio for a given path -func CleanFilesOnMinio(minioEnv *MinioEnv, path string) (string, error) { +// CleanFiles clean files on minio for a given path +func CleanFiles(minioEnv *Env, path string) (string, error) { var stdout string - stdout, _, err := RunUnchecked(fmt.Sprintf( + stdout, _, err := utils.RunUnchecked(fmt.Sprintf( "kubectl exec -n %v %v -- %v", minioEnv.Namespace, minioEnv.Client.Name, - composeCleanFilesMinio(path))) + composeCleanFiles(path))) if err != nil { return "", err } return strings.Trim(stdout, "\n"), nil } + +// GetFilePath gets the MinIO file string for WAL/backup objects in a configured bucket +func GetFilePath(serverName, fileName string) string { + // the * regexes enable matching these typical paths: + // minio/backups/serverName/base/20220618T140300/data.tar + // minio/backups/serverName/wals/0000000100000000/000000010000000000000002.gz + // minio/backups/serverName/wals/00000002.history.gz + return filepath.Join("*", serverName, "*", fileName) +} diff --git a/tests/utils/secrets.go b/tests/utils/secrets.go index f45c2b1cb9..c6f01b3f10 100644 --- a/tests/utils/secrets.go +++ b/tests/utils/secrets.go @@ -21,6 +21,7 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" @@ -101,3 +102,30 @@ func GetCredentials( password := string(secret.Data["password"]) return username, password, nil } + +// CreateObjectStorageSecret generates an Opaque Secret with a given ID and Key +func CreateObjectStorageSecret( + namespace string, + secretName string, + id string, + key string, + env *TestingEnvironment, +) (*corev1.Secret, error) { + targetSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: namespace, + }, + StringData: map[string]string{ + "ID": id, + "KEY": key, + }, + Type: corev1.SecretTypeOpaque, + } + obj, err := CreateObject(env, targetSecret) + if err != nil { + return nil, err + } + + return obj.(*corev1.Secret), nil +} From fdafa5546850c67a5207bf65bfa5091966a533c5 Mon Sep 17 00:00:00 2001 From: Peggie Date: Wed, 23 Oct 2024 12:27:01 +0200 Subject: [PATCH 103/836] feat: Public Cloud K8S versions update (#5918) Update the versions used to test the operator on public cloud providers Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Signed-off-by: Jonathan Gonzalez V. Co-authored-by: public-cloud-k8s-versions-check --- .github/aks_versions.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/aks_versions.json b/.github/aks_versions.json index 4c3493a7b5..873c7f6786 100644 --- a/.github/aks_versions.json +++ b/.github/aks_versions.json @@ -1,5 +1,5 @@ [ - "1.30.4", - "1.29.8", + "1.30.5", + "1.29.9", "1.28.9" ] From 4d2984d4c4278189f5cf9c78229c98e35a45aedd Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Wed, 23 Oct 2024 15:14:36 +0200 Subject: [PATCH 104/836] test: remove redundant log capture in tablespace tests (#5781) After #5790, we capture the logs for the whole cluster, so it is It is unnecessary to set up the log capture for the single tests. Closes #5847 Signed-off-by: Jonathan Gonzalez V. --- tests/e2e/tablespaces_test.go | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/tests/e2e/tablespaces_test.go b/tests/e2e/tablespaces_test.go index cad9524df3..afbaa42c13 100644 --- a/tests/e2e/tablespaces_test.go +++ b/tests/e2e/tablespaces_test.go @@ -17,8 +17,6 @@ limitations under the License. package e2e import ( - "bytes" - "context" "fmt" "os" "path" @@ -36,7 +34,6 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils/logs" "github.com/cloudnative-pg/cloudnative-pg/tests" testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio" @@ -79,19 +76,6 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, }) cluster, err = env.GetCluster(namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - - clusterLogs := logs.ClusterStreamingRequest{ - Cluster: cluster, - Options: &corev1.PodLogOptions{ - Follow: true, - }, - } - var buffer bytes.Buffer - go func() { - defer GinkgoRecover() - err = clusterLogs.SingleStream(context.TODO(), &buffer) - Expect(err).ToNot(HaveOccurred()) - }() } Context("on a new cluster with tablespaces", Ordered, func() { From 8ff0929affe93535b701b65b577282c87c63bb3f Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 23 Oct 2024 16:28:45 +0200 Subject: [PATCH 105/836] test: Updated Postgres versions used in E2E tests (#5863) Update the Postgres versions used in E2E tests Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Signed-off-by: Jonathan Gonzalez V. Co-authored-by: postgres-versions-updater --- .github/pg_versions.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/pg_versions.json b/.github/pg_versions.json index 73e9d1a8d0..3f2a5d2f85 100644 --- a/.github/pg_versions.json +++ b/.github/pg_versions.json @@ -1,7 +1,7 @@ { "17": [ "17.0", - "17.0-15" + "17.0-20" ], "16": [ "16.4", From 1a70c90a59211d1198caf6e6d667318509d93bd3 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Wed, 23 Oct 2024 22:26:09 +0200 Subject: [PATCH 106/836] fix(plugin): handle multiple containers in `kubectl cnpg logs` (#5931) This patch fixes an issue in the `kubectl cnpg logs` command that leads to a failure if the instance pod has more than one container. Closes #5905 Signed-off-by: Marco Nenciarini Signed-off-by: Armando Ruocco Co-authored-by: Armando Ruocco --- pkg/utils/logs/cluster_logs.go | 24 +++++++++----- pkg/utils/logs/cluster_logs_test.go | 51 +++++++++++++++++++---------- 2 files changed, 50 insertions(+), 25 deletions(-) diff --git a/pkg/utils/logs/cluster_logs.go b/pkg/utils/logs/cluster_logs.go index c0afc0a314..3e5b85e6c9 100644 --- a/pkg/utils/logs/cluster_logs.go +++ b/pkg/utils/logs/cluster_logs.go @@ -25,7 +25,7 @@ import ( "sync" "time" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -46,7 +46,7 @@ const DefaultFollowWaiting time.Duration = 1 * time.Second // streaming type ClusterStreamingRequest struct { Cluster *apiv1.Cluster - Options *v1.PodLogOptions + Options *corev1.PodLogOptions Previous bool `json:"previous,omitempty"` FollowWaiting time.Duration // NOTE: the Client argument may be omitted, but it is good practice to pass it @@ -62,14 +62,17 @@ func (csr *ClusterStreamingRequest) getClusterNamespace() string { return csr.Cluster.Namespace } -func (csr *ClusterStreamingRequest) getLogOptions(containerName string) *v1.PodLogOptions { +func (csr *ClusterStreamingRequest) getLogOptions(containerName string) *corev1.PodLogOptions { if csr.Options == nil { - csr.Options = &v1.PodLogOptions{ + return &corev1.PodLogOptions{ Container: containerName, + Previous: csr.Previous, } } - csr.Options.Previous = csr.Previous - return csr.Options + options := csr.Options.DeepCopy() + options.Container = containerName + options.Previous = csr.Previous + return options } func (csr *ClusterStreamingRequest) getKubernetesClient() kubernetes.Interface { @@ -135,6 +138,8 @@ func (as *activeSet) add(name string) { // has returns true if and only if name is active func (as *activeSet) has(name string) bool { + as.m.Lock() + defer as.m.Unlock() _, found := as.set[name] return found } @@ -149,6 +154,8 @@ func (as *activeSet) drop(name string) { // isZero checks if there are any active processes func (as *activeSet) isZero() bool { + as.m.Lock() + defer as.m.Unlock() return len(as.set) == 0 } @@ -169,7 +176,7 @@ func (csr *ClusterStreamingRequest) SingleStream(ctx context.Context, writer io. for { var ( - podList *v1.PodList + podList *corev1.PodList err error ) if isFirstScan || csr.Options.Follow { @@ -189,6 +196,7 @@ func (csr *ClusterStreamingRequest) SingleStream(ctx context.Context, writer io. return nil } + wrappedWriter := safeWriterFrom(writer) for _, pod := range podList.Items { for _, container := range pod.Status.ContainerStatuses { if container.State.Running != nil { @@ -204,7 +212,7 @@ func (csr *ClusterStreamingRequest) SingleStream(ctx context.Context, writer io. container.Name, client, streamSet, - safeWriterFrom(writer), + wrappedWriter, ) } } diff --git a/pkg/utils/logs/cluster_logs_test.go b/pkg/utils/logs/cluster_logs_test.go index 4fa7b6f6bc..b0561d8a22 100644 --- a/pkg/utils/logs/cluster_logs_test.go +++ b/pkg/utils/logs/cluster_logs_test.go @@ -22,7 +22,7 @@ import ( "sync" "time" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/fake" @@ -33,6 +33,23 @@ import ( . "github.com/onsi/gomega" ) +type syncBuffer struct { + b bytes.Buffer + m sync.Mutex +} + +func (b *syncBuffer) Write(p []byte) (n int, err error) { + b.m.Lock() + defer b.m.Unlock() + return b.b.Write(p) +} + +func (b *syncBuffer) String() string { + b.m.Lock() + defer b.m.Unlock() + return b.b.String() +} + var _ = Describe("Cluster logging tests", func() { clusterNamespace := "cluster-test" clusterName := "myTestCluster" @@ -42,7 +59,7 @@ var _ = Describe("Cluster logging tests", func() { Name: clusterName, }, } - pod := &v1.Pod{ + pod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Namespace: clusterNamespace, Name: clusterName + "-1", @@ -50,18 +67,18 @@ var _ = Describe("Cluster logging tests", func() { utils.ClusterLabelName: clusterName, }, }, - Status: v1.PodStatus{ - ContainerStatuses: []v1.ContainerStatus{ + Status: corev1.PodStatus{ + ContainerStatuses: []corev1.ContainerStatus{ { Name: "postgresql", - State: v1.ContainerState{ - Running: &v1.ContainerStateRunning{}, + State: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{}, }, }, }, }, } - podWithSidecars := &v1.Pod{ + podWithSidecars := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Namespace: clusterNamespace, Name: clusterName + "-1", @@ -69,18 +86,18 @@ var _ = Describe("Cluster logging tests", func() { utils.ClusterLabelName: clusterName, }, }, - Status: v1.PodStatus{ - ContainerStatuses: []v1.ContainerStatus{ + Status: corev1.PodStatus{ + ContainerStatuses: []corev1.ContainerStatus{ { Name: "postgresql", - State: v1.ContainerState{ - Running: &v1.ContainerStateRunning{}, + State: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{}, }, }, { Name: "sidecar", - State: v1.ContainerState{ - Running: &v1.ContainerStateRunning{}, + State: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{}, }, }, }, @@ -96,7 +113,7 @@ var _ = Describe("Cluster logging tests", func() { defer wait.Done() streamClusterLogs := ClusterStreamingRequest{ Cluster: cluster, - Options: &v1.PodLogOptions{ + Options: &corev1.PodLogOptions{ Follow: false, }, Client: client, @@ -119,7 +136,7 @@ var _ = Describe("Cluster logging tests", func() { defer wait.Done() streamClusterLogs := ClusterStreamingRequest{ Cluster: cluster, - Options: &v1.PodLogOptions{ + Options: &corev1.PodLogOptions{ Follow: false, }, Client: client, @@ -134,7 +151,7 @@ var _ = Describe("Cluster logging tests", func() { It("should catch extra logs if given the follow option", func(ctx context.Context) { client := fake.NewSimpleClientset(pod) - var logBuffer bytes.Buffer + var logBuffer syncBuffer // let's set a short follow-wait, and keep the cluster streaming for two // cycles followWaiting := 200 * time.Millisecond @@ -143,7 +160,7 @@ var _ = Describe("Cluster logging tests", func() { defer GinkgoRecover() streamClusterLogs := ClusterStreamingRequest{ Cluster: cluster, - Options: &v1.PodLogOptions{ + Options: &corev1.PodLogOptions{ Follow: true, }, FollowWaiting: followWaiting, From 919d2b52a78363e3a65d4a69322665d741049240 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 24 Oct 2024 10:33:27 +0200 Subject: [PATCH 107/836] fix(deps): update kubernetes patches to v0.31.2 (main) (#5932) https://github.com/kubernetes/api `v0.31.1` -> `v0.31.2` https://github.com/kubernetes/apiextensions-apiserver `v0.31.1` -> `v0.31.2` https://github.com/kubernetes/apimachinery `v0.31.1` -> `v0.31.2` https://github.com/kubernetes/cli-runtime `v0.31.1` -> `v0.31.2` https://github.com/kubernetes/client-go `v0.31.1` -> `v0.31.2` --- go.mod | 10 +++++----- go.sum | 20 ++++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index f6aa11ec03..3714fa4102 100644 --- a/go.mod +++ b/go.mod @@ -40,11 +40,11 @@ require ( golang.org/x/term v0.25.0 google.golang.org/grpc v1.67.1 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.31.1 - k8s.io/apiextensions-apiserver v0.31.1 - k8s.io/apimachinery v0.31.1 - k8s.io/cli-runtime v0.31.1 - k8s.io/client-go v0.31.1 + k8s.io/api v0.31.2 + k8s.io/apiextensions-apiserver v0.31.2 + k8s.io/apimachinery v0.31.2 + k8s.io/cli-runtime v0.31.2 + k8s.io/client-go v0.31.2 k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 sigs.k8s.io/controller-runtime v0.19.0 sigs.k8s.io/yaml v1.4.0 diff --git a/go.sum b/go.sum index 1c1d7bd228..7b5f00c955 100644 --- a/go.sum +++ b/go.sum @@ -278,16 +278,16 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU= -k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI= -k8s.io/apiextensions-apiserver v0.31.1 h1:L+hwULvXx+nvTYX/MKM3kKMZyei+UiSXQWciX/N6E40= -k8s.io/apiextensions-apiserver v0.31.1/go.mod h1:tWMPR3sgW+jsl2xm9v7lAyRF1rYEK71i9G5dRtkknoQ= -k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= -k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= -k8s.io/cli-runtime v0.31.1 h1:/ZmKhmZ6hNqDM+yf9s3Y4KEYakNXUn5sod2LWGGwCuk= -k8s.io/cli-runtime v0.31.1/go.mod h1:pKv1cDIaq7ehWGuXQ+A//1OIF+7DI+xudXtExMCbe9U= -k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0= -k8s.io/client-go v0.31.1/go.mod h1:sKI8871MJN2OyeqRlmA4W4KM9KBdBUpDLu/43eGemCg= +k8s.io/api v0.31.2 h1:3wLBbL5Uom/8Zy98GRPXpJ254nEFpl+hwndmk9RwmL0= +k8s.io/api v0.31.2/go.mod h1:bWmGvrGPssSK1ljmLzd3pwCQ9MgoTsRCuK35u6SygUk= +k8s.io/apiextensions-apiserver v0.31.2 h1:W8EwUb8+WXBLu56ser5IudT2cOho0gAKeTOnywBLxd0= +k8s.io/apiextensions-apiserver v0.31.2/go.mod h1:i+Geh+nGCJEGiCGR3MlBDkS7koHIIKWVfWeRFiOsUcM= +k8s.io/apimachinery v0.31.2 h1:i4vUt2hPK56W6mlT7Ry+AO8eEsyxMD1U44NR22CLTYw= +k8s.io/apimachinery v0.31.2/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/cli-runtime v0.31.2 h1:7FQt4C4Xnqx8V1GJqymInK0FFsoC+fAZtbLqgXYVOLQ= +k8s.io/cli-runtime v0.31.2/go.mod h1:XROyicf+G7rQ6FQJMbeDV9jqxzkWXTYD6Uxd15noe0Q= +k8s.io/client-go v0.31.2 h1:Y2F4dxU5d3AQj+ybwSMqQnpZH9F30//1ObxOKlTI9yc= +k8s.io/client-go v0.31.2/go.mod h1:NPa74jSVR/+eez2dFsEIHNa+3o09vtNaWwWwb1qSxSs= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 h1:1dWzkmJrrprYvjGwh9kEUxmcUV/CtNU8QM7h1FLWQOo= From 52b991f62a879acd3a3b192032b5dc9e594e93fa Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Thu, 24 Oct 2024 14:33:58 +0200 Subject: [PATCH 108/836] test: improve unit tests and add race condition detection (#5936) * Added `make test-race` target to the Makefile to enable running tests with the Go race detector. This helps catch potential concurrency issues. * Added the missing `RunSpec` invocation in the `internal/cmd/manager/instance/run` package. * Marked relevant test suites with the `Ordered` modifier to ensure proper execution order for dependent tests. * Moved shared variables initialization in a `BeforeEach` block. * Avoided disrupting the formatting of the test output by ensuring there was no output to stdout during the tests. * Renamed some imported packages to improve code clarity and readability. * Removed empty and unused test suites. * Improved test suite setup and teardown with `DeferCleanup()` to handle cleanup operations consistently and avoid manual `AfterSuite` calls. * Used `SpecContext` injected parameter instead of `context.TODO()` when a context is needed. * Applied various style fixes for consistency and code quality. Signed-off-by: Marco Nenciarini Signed-off-by: Marco Nenciarini Signed-off-by: Jaime Silvela Co-authored-by: Jaime Silvela --- Makefile | 8 +++- api/v1/cluster_funcs_test.go | 6 ++- api/v1/scheduledbackup_funcs_test.go | 6 ++- .../run}/suite_test.go | 6 +-- internal/cmd/plugin/logs/cluster_logs_test.go | 47 +++++++++++-------- internal/cmd/plugin/logs/cluster_test.go | 14 ++++-- internal/cmd/plugin/logs/suite_test.go | 14 ------ internal/cmd/plugin/suite_test.go | 14 +++--- .../controller/cluster_controller_test.go | 10 ++-- internal/controller/cluster_create_test.go | 14 +++--- internal/controller/cluster_restore_test.go | 26 ++++------ internal/controller/cluster_scale_test.go | 4 +- internal/controller/pooler_update_test.go | 9 ++-- .../controller/roles/postgres_test.go | 8 ++-- .../controller/roles/reconciler_test.go | 10 ++-- .../slots/reconciler/replicationslot_test.go | 12 ++--- .../controller/slots/runner/runner_test.go | 5 +- pkg/certs/certs_test.go | 8 ++-- pkg/certs/k8s_test.go | 6 +-- pkg/certs/tls_test.go | 6 +-- pkg/management/postgres/configuration_test.go | 1 - .../postgres/logicalimport/database_test.go | 21 ++++----- .../postgres/logicalimport/role_test.go | 15 +++--- .../postgres/logpipe/logpipe_test.go | 21 ++++----- .../postgres/metrics/collector_test.go | 2 +- pkg/management/postgres/restore_test.go | 17 ++++--- .../postgres/webserver/suite_test.go | 29 ------------ .../persistentvolumeclaim/reconciler_test.go | 2 +- .../persistentvolumeclaim/resources_test.go | 6 +-- pkg/resources/retry_test.go | 8 +--- pkg/utils/discovery_test.go | 9 +++- pkg/utils/logs/logs_test.go | 3 +- 32 files changed, 160 insertions(+), 207 deletions(-) rename internal/cmd/manager/{walarchive => instance/run}/suite_test.go (88%) delete mode 100644 pkg/management/postgres/webserver/suite_test.go diff --git a/Makefile b/Makefile index 192d152fd0..50cba47855 100644 --- a/Makefile +++ b/Makefile @@ -101,7 +101,13 @@ test: generate fmt vet manifests envtest ## Run tests. source <(${ENVTEST} use -p env --bin-dir ${ENVTEST_ASSETS_DIR} ${ENVTEST_K8S_VERSION}) ;\ export KUBEBUILDER_CONTROLPLANE_STOP_TIMEOUT=60s ;\ export KUBEBUILDER_CONTROLPLANE_START_TIMEOUT=60s ;\ - go test -coverpkg=./... --count=1 -coverprofile=cover.out ./api/... ./cmd/... ./internal/... ./pkg/... ./tests/utils ; + go test -coverpkg=./... -coverprofile=cover.out ./api/... ./cmd/... ./internal/... ./pkg/... ./tests/utils + +test-race: generate fmt vet manifests envtest ## Run tests enabling race detection. + mkdir -p ${ENVTEST_ASSETS_DIR} ;\ + source <(${ENVTEST} use -p env --bin-dir ${ENVTEST_ASSETS_DIR} ${ENVTEST_K8S_VERSION}) ;\ + go run github.com/onsi/ginkgo/v2/ginkgo -r -p --skip-package=e2e \ + --race --keep-going --fail-on-empty --randomize-all --randomize-suites e2e-test-kind: ## Run e2e tests locally using kind. hack/e2e/run-e2e-kind.sh diff --git a/api/v1/cluster_funcs_test.go b/api/v1/cluster_funcs_test.go index 8d6f0950ac..c478c2b3ae 100644 --- a/api/v1/cluster_funcs_test.go +++ b/api/v1/cluster_funcs_test.go @@ -452,7 +452,7 @@ var _ = Describe("external cluster list", func() { }) }) -var _ = Describe("look up for secrets", func() { +var _ = Describe("look up for secrets", Ordered, func() { cluster := Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "clustername", @@ -479,15 +479,19 @@ var _ = Describe("look up for secrets", func() { It("retrieves client CA secret name", func() { Expect(cluster.GetClientCASecretName()).To(Equal("clustername-ca")) }) + It("retrieves server CA secret name", func() { Expect(cluster.GetServerCASecretName()).To(Equal("clustername-ca")) }) + It("retrieves replication secret name", func() { Expect(cluster.GetReplicationSecretName()).To(Equal("clustername-replication")) }) + It("retrieves replication secret name", func() { Expect(cluster.GetReplicationSecretName()).To(Equal("clustername-replication")) }) + It("retrieves all names needed to build a server CA certificate", func() { names := cluster.GetClusterAltDNSNames() Expect(names).To(HaveLen(12)) diff --git a/api/v1/scheduledbackup_funcs_test.go b/api/v1/scheduledbackup_funcs_test.go index e68b20ba42..9ef98a3692 100644 --- a/api/v1/scheduledbackup_funcs_test.go +++ b/api/v1/scheduledbackup_funcs_test.go @@ -26,9 +26,13 @@ import ( ) var _ = Describe("Scheduled backup", func() { - scheduledBackup := &ScheduledBackup{} + var scheduledBackup *ScheduledBackup backupName := "test" + BeforeEach(func() { + scheduledBackup = &ScheduledBackup{} + }) + It("properly creates a backup with no annotations", func() { backup := scheduledBackup.CreateBackup("test") Expect(backup).ToNot(BeNil()) diff --git a/internal/cmd/manager/walarchive/suite_test.go b/internal/cmd/manager/instance/run/suite_test.go similarity index 88% rename from internal/cmd/manager/walarchive/suite_test.go rename to internal/cmd/manager/instance/run/suite_test.go index e8e0072475..e1d9122745 100644 --- a/internal/cmd/manager/walarchive/suite_test.go +++ b/internal/cmd/manager/instance/run/suite_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package walarchive +package run import ( "testing" @@ -23,7 +23,7 @@ import ( . "github.com/onsi/gomega" ) -func TestUtils(t *testing.T) { +func TestSuite(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "walarchive test suite") + RunSpecs(t, "instance run test suite") } diff --git a/internal/cmd/plugin/logs/cluster_logs_test.go b/internal/cmd/plugin/logs/cluster_logs_test.go index 326ab2a313..bcd7a87a1f 100644 --- a/internal/cmd/plugin/logs/cluster_logs_test.go +++ b/internal/cmd/plugin/logs/cluster_logs_test.go @@ -17,12 +17,11 @@ limitations under the License. package logs import ( - "context" "path" - v1 "k8s.io/api/core/v1" - v12 "k8s.io/apimachinery/pkg/apis/meta/v1" - fake2 "k8s.io/client-go/kubernetes/fake" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + fakeClient "k8s.io/client-go/kubernetes/fake" "sigs.k8s.io/controller-runtime/pkg/client/fake" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" @@ -34,18 +33,18 @@ import ( . "github.com/onsi/gomega" ) -var _ = Describe("Get the logs", func() { +var _ = Describe("Get the logs", Ordered, func() { namespace := "default" clusterName := "test-cluster" - pod := &v1.Pod{ - ObjectMeta: v12.ObjectMeta{ + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, Name: clusterName + "-1", }, } - client := fake2.NewSimpleClientset(pod) + client := fakeClient.NewSimpleClientset(pod) cluster := &apiv1.Cluster{ - ObjectMeta: v12.ObjectMeta{ + ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, Name: clusterName, Labels: map[string]string{ @@ -54,20 +53,24 @@ var _ = Describe("Get the logs", func() { }, Spec: apiv1.ClusterSpec{}, } - cl := clusterLogs{ - ctx: context.TODO(), - clusterName: clusterName, - namespace: namespace, - follow: true, - timestamp: true, - tailLines: -1, - client: client, - } + var cl clusterLogs plugin.Client = fake.NewClientBuilder(). WithScheme(scheme.BuildWithAllKnownScheme()). WithObjects(cluster). Build() + BeforeEach(func(ctx SpecContext) { + cl = clusterLogs{ + ctx: ctx, + clusterName: clusterName, + namespace: namespace, + follow: true, + timestamp: true, + tailLines: -1, + client: client, + } + }) + It("should get a proper cluster", func() { cluster, err := getCluster(cl) Expect(err).ToNot(HaveOccurred()) @@ -95,18 +98,24 @@ var _ = Describe("Get the logs", func() { }) It("should get the proper stream for logs", func() { + PauseOutputInterception() err := followCluster(cl) + ResumeOutputInterception() Expect(err).ToNot(HaveOccurred()) }) It("should save the logs to file", func() { + tempDir := GinkgoT().TempDir() cl.outputFile = path.Join(tempDir, "test-file.logs") + PauseOutputInterception() err := saveClusterLogs(cl) + ResumeOutputInterception() Expect(err).ToNot(HaveOccurred()) }) It("should fail if can't write a file", func() { - cl.outputFile = "/this-does-not-exist/test-file.log" + tempDir := GinkgoT().TempDir() + cl.outputFile = path.Join(tempDir, "this-does-not-exist/test-file.log") err := saveClusterLogs(cl) Expect(err).To(HaveOccurred()) }) diff --git a/internal/cmd/plugin/logs/cluster_test.go b/internal/cmd/plugin/logs/cluster_test.go index 1df2081fd4..d66206731a 100644 --- a/internal/cmd/plugin/logs/cluster_test.go +++ b/internal/cmd/plugin/logs/cluster_test.go @@ -17,8 +17,8 @@ limitations under the License. package logs import ( - v1 "k8s.io/api/core/v1" - v12 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" fakeClient "k8s.io/client-go/kubernetes/fake" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -35,14 +35,14 @@ var _ = Describe("Test the command", func() { clusterName := "test-cluster" namespace := "default" var cluster *apiv1.Cluster - pod := &v1.Pod{ - ObjectMeta: v12.ObjectMeta{ + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, Name: clusterName + "-1", }, } cluster = &apiv1.Cluster{ - ObjectMeta: v12.ObjectMeta{ + ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, Name: clusterName, Labels: map[string]string{ @@ -62,14 +62,18 @@ var _ = Describe("Test the command", func() { It("should not fail, with cluster name as argument", func() { cmd := clusterCmd() cmd.SetArgs([]string{clusterName}) + PauseOutputInterception() err := cmd.Execute() + ResumeOutputInterception() Expect(err).ToNot(HaveOccurred()) }) It("could follow the logs", func() { cmd := clusterCmd() cmd.SetArgs([]string{clusterName, "-f"}) + PauseOutputInterception() err := cmd.Execute() + ResumeOutputInterception() Expect(err).ToNot(HaveOccurred()) }) }) diff --git a/internal/cmd/plugin/logs/suite_test.go b/internal/cmd/plugin/logs/suite_test.go index c5bd148d2d..476d2ff84b 100644 --- a/internal/cmd/plugin/logs/suite_test.go +++ b/internal/cmd/plugin/logs/suite_test.go @@ -17,27 +17,13 @@ limitations under the License. package logs import ( - "os" "testing" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) -var tempDir string - func TestPgbench(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Logs Suite") } - -var _ = BeforeSuite(func() { - var err error - tempDir, err = os.MkdirTemp(os.TempDir(), "logs_") - Expect(err).ToNot(HaveOccurred()) -}) - -var _ = AfterSuite(func() { - err := os.RemoveAll(tempDir) - Expect(err).ToNot(HaveOccurred()) -}) diff --git a/internal/cmd/plugin/suite_test.go b/internal/cmd/plugin/suite_test.go index 4628a4efab..2f0816b08b 100644 --- a/internal/cmd/plugin/suite_test.go +++ b/internal/cmd/plugin/suite_test.go @@ -48,8 +48,6 @@ func TestPlugin(t *testing.T) { } var _ = BeforeSuite(func() { - By("bootstrapping test environment") - if os.Getenv("USE_EXISTING_CLUSTER") == "true" { By("using existing config for test environment") testEnv = &envtest.Environment{} @@ -65,6 +63,12 @@ var _ = BeforeSuite(func() { Expect(err).ToNot(HaveOccurred()) Expect(cfg).ToNot(BeNil()) + DeferCleanup(func() { + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).ToNot(HaveOccurred()) + }) + err = apiv1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) @@ -74,9 +78,3 @@ var _ = BeforeSuite(func() { Expect(err).ToNot(HaveOccurred()) Expect(k8sClient).ToNot(BeNil()) }) - -var _ = AfterSuite(func() { - By("tearing down the test environment") - err := testEnv.Stop() - Expect(err).ToNot(HaveOccurred()) -}) diff --git a/internal/controller/cluster_controller_test.go b/internal/controller/cluster_controller_test.go index ef62079db2..9439f0b86b 100644 --- a/internal/controller/cluster_controller_test.go +++ b/internal/controller/cluster_controller_test.go @@ -17,7 +17,6 @@ limitations under the License. package controller import ( - "context" "time" cnpgTypes "github.com/cloudnative-pg/machinery/pkg/types" @@ -73,8 +72,7 @@ var _ = Describe("Updating target primary", func() { env = buildTestEnvironment() }) - It("selects the new target primary right away", func() { - ctx := context.TODO() + It("selects the new target primary right away", func(ctx SpecContext) { namespace := newFakeNamespace(env.client) cluster := newFakeCNPGCluster(env.client, namespace) @@ -132,8 +130,7 @@ var _ = Describe("Updating target primary", func() { }) }) - It("it should wait the failover delay to select the new target primary", func() { - ctx := context.TODO() + It("it should wait the failover delay to select the new target primary", func(ctx SpecContext) { namespace := newFakeNamespace(env.client) cluster := newFakeCNPGCluster(env.client, namespace, func(cluster *apiv1.Cluster) { cluster.Spec.FailoverDelay = 2 @@ -210,8 +207,7 @@ var _ = Describe("Updating target primary", func() { }) }) - It("Issue #1783: ensure that the scale-down behaviour remain consistent", func() { - ctx := context.TODO() + It("Issue #1783: ensure that the scale-down behaviour remain consistent", func(ctx SpecContext) { namespace := newFakeNamespace(env.client) cluster := newFakeCNPGCluster(env.client, namespace, func(cluster *apiv1.Cluster) { cluster.Spec.Instances = 2 diff --git a/internal/controller/cluster_create_test.go b/internal/controller/cluster_create_test.go index c7068a15d7..d6aa79bcbf 100644 --- a/internal/controller/cluster_create_test.go +++ b/internal/controller/cluster_create_test.go @@ -822,14 +822,12 @@ var _ = Describe("createOrPatchClusterCredentialSecret", func() { namespace = "test-namespace" ) var ( - ctx context.Context proposed *corev1.Secret cli k8client.Client ) BeforeEach(func() { cli = fake.NewClientBuilder().WithScheme(schemeBuilder.BuildWithAllKnownScheme()).Build() - ctx = context.TODO() const secretName = "test-secret" proposed = &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -843,7 +841,7 @@ var _ = Describe("createOrPatchClusterCredentialSecret", func() { }) Context("when the secret does not exist", func() { - It("should create the secret", func() { + It("should create the secret", func(ctx SpecContext) { err := createOrPatchClusterCredentialSecret(ctx, cli, proposed) Expect(err).NotTo(HaveOccurred()) @@ -857,7 +855,7 @@ var _ = Describe("createOrPatchClusterCredentialSecret", func() { }) Context("when the secret exists and is owned by the cluster", func() { - BeforeEach(func() { + BeforeEach(func(ctx SpecContext) { existingSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: secretName, @@ -878,7 +876,7 @@ var _ = Describe("createOrPatchClusterCredentialSecret", func() { Expect(cli.Create(ctx, existingSecret)).To(Succeed()) }) - It("should patch the secret if metadata differs", func() { + It("should patch the secret if metadata differs", func(ctx SpecContext) { Expect(proposed.Labels).To(HaveKeyWithValue("test", "label")) Expect(proposed.Annotations).To(HaveKeyWithValue("test", "annotation")) @@ -892,7 +890,7 @@ var _ = Describe("createOrPatchClusterCredentialSecret", func() { Expect(patchedSecret.Annotations).To(HaveKeyWithValue("test", "annotation")) }) - It("should not patch the secret if metadata is the same", func() { + It("should not patch the secret if metadata is the same", func(ctx SpecContext) { var originalSecret corev1.Secret err := cli.Get(ctx, types.NamespacedName{Name: secretName, Namespace: namespace}, &originalSecret) Expect(err).NotTo(HaveOccurred()) @@ -913,7 +911,7 @@ var _ = Describe("createOrPatchClusterCredentialSecret", func() { }) Context("when the secret exists but is not owned by the cluster", func() { - BeforeEach(func() { + BeforeEach(func(ctx SpecContext) { existingSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: secretName, @@ -923,7 +921,7 @@ var _ = Describe("createOrPatchClusterCredentialSecret", func() { Expect(cli.Create(ctx, existingSecret)).To(Succeed()) }) - It("should not modify the secret", func() { + It("should not modify the secret", func(ctx SpecContext) { var originalSecret corev1.Secret err := cli.Get(ctx, types.NamespacedName{Name: secretName, Namespace: namespace}, &originalSecret) Expect(err).NotTo(HaveOccurred()) diff --git a/internal/controller/cluster_restore_test.go b/internal/controller/cluster_restore_test.go index 616b0cf882..cb68fc565b 100644 --- a/internal/controller/cluster_restore_test.go +++ b/internal/controller/cluster_restore_test.go @@ -37,19 +37,17 @@ import ( var _ = Describe("ensureClusterIsNotFenced", func() { var ( - ctx context.Context mockCli k8client.Client cluster *apiv1.Cluster ) - getCluster := func(clusterKey k8client.ObjectKey) (*apiv1.Cluster, error) { + getCluster := func(ctx context.Context, clusterKey k8client.ObjectKey) (*apiv1.Cluster, error) { remoteCluster := &apiv1.Cluster{} err := mockCli.Get(ctx, clusterKey, remoteCluster) return remoteCluster, err } BeforeEach(func() { - ctx = context.TODO() cluster = &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "test", @@ -67,14 +65,14 @@ var _ = Describe("ensureClusterIsNotFenced", func() { }) Context("when no instances are fenced", func() { - It("should not modify the object", func() { - origCluster, err := getCluster(k8client.ObjectKeyFromObject(cluster)) + It("should not modify the object", func(ctx SpecContext) { + origCluster, err := getCluster(ctx, k8client.ObjectKeyFromObject(cluster)) Expect(err).ToNot(HaveOccurred()) err = ensureClusterIsNotFenced(ctx, mockCli, cluster) Expect(err).ToNot(HaveOccurred()) - remoteCluster, err := getCluster(k8client.ObjectKeyFromObject(cluster)) + remoteCluster, err := getCluster(ctx, k8client.ObjectKeyFromObject(cluster)) Expect(err).ToNot(HaveOccurred()) Expect(remoteCluster.ObjectMeta).To(Equal(origCluster.ObjectMeta)) }) @@ -91,15 +89,15 @@ var _ = Describe("ensureClusterIsNotFenced", func() { Build() }) - It("should patch the cluster and remove fenced instances", func() { - origCluster, err := getCluster(k8client.ObjectKeyFromObject(cluster)) + It("should patch the cluster and remove fenced instances", func(ctx SpecContext) { + origCluster, err := getCluster(ctx, k8client.ObjectKeyFromObject(cluster)) Expect(err).ToNot(HaveOccurred()) Expect(origCluster.Annotations).To(HaveKey(utils.FencedInstanceAnnotation)) err = ensureClusterIsNotFenced(ctx, mockCli, cluster) Expect(err).ToNot(HaveOccurred()) - remoteCluster, err := getCluster(k8client.ObjectKeyFromObject(cluster)) + remoteCluster, err := getCluster(ctx, k8client.ObjectKeyFromObject(cluster)) Expect(err).ToNot(HaveOccurred()) Expect(remoteCluster.ObjectMeta).ToNot(Equal(origCluster.ObjectMeta)) @@ -110,13 +108,11 @@ var _ = Describe("ensureClusterIsNotFenced", func() { var _ = Describe("restoreClusterStatus", func() { var ( - ctx context.Context mockCli k8client.Client cluster *apiv1.Cluster ) BeforeEach(func() { - ctx = context.TODO() cluster = &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "test", @@ -131,7 +127,7 @@ var _ = Describe("restoreClusterStatus", func() { }) Context("when restoring cluster status", func() { - It("should patch the cluster with the updated status", func() { + It("should patch the cluster with the updated status", func(ctx SpecContext) { latestNodeSerial := 10 targetPrimaryNodeSerial := 3 @@ -151,7 +147,6 @@ var _ = Describe("restoreClusterStatus", func() { var _ = Describe("getOrphanPVCs", func() { var ( - ctx context.Context mockCli k8client.Client cluster *apiv1.Cluster goodPvcs []corev1.PersistentVolumeClaim @@ -159,7 +154,6 @@ var _ = Describe("getOrphanPVCs", func() { ) BeforeEach(func() { - ctx = context.TODO() cluster = &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "test", @@ -268,7 +262,7 @@ var _ = Describe("getOrphanPVCs", func() { Build() }) - It("should fetch only the pvcs that belong to the cluster and without an owner", func() { + It("should fetch only the pvcs that belong to the cluster and without an owner", func(ctx SpecContext) { remotePvcs, err := getOrphanPVCs(ctx, mockCli, cluster) Expect(err).ToNot(HaveOccurred()) Expect(remotePvcs).To(HaveLen(len(goodPvcs))) @@ -290,7 +284,7 @@ var _ = Describe("getOrphanPVCs", func() { Expect(primary).To(Equal(2)) }) - It("should correctly restore the orphan pvcs", func() { + It("should correctly restore the orphan pvcs", func(ctx SpecContext) { err := restoreOrphanPVCs(ctx, mockCli, cluster, goodPvcs) Expect(err).ToNot(HaveOccurred()) diff --git a/internal/controller/cluster_scale_test.go b/internal/controller/cluster_scale_test.go index 6b01ab1854..4ed725a82a 100644 --- a/internal/controller/cluster_scale_test.go +++ b/internal/controller/cluster_scale_test.go @@ -190,7 +190,7 @@ var _ = Describe("cluster scale pod and job deletion logic", func() { cancel() }) - It("should delete all the jobs", func() { + It("should delete all the jobs", func(ctx SpecContext) { for _, jobName := range specs.GetPossibleJobNames(instanceName) { job := &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ @@ -198,7 +198,7 @@ var _ = Describe("cluster scale pod and job deletion logic", func() { Namespace: cluster.Namespace, }, } - err := fakeClientSet.Create(context.TODO(), job) + err := fakeClientSet.Create(ctx, job) Expect(err).NotTo(HaveOccurred()) } diff --git a/internal/controller/pooler_update_test.go b/internal/controller/pooler_update_test.go index 6f0b599c23..8774d1ad8b 100644 --- a/internal/controller/pooler_update_test.go +++ b/internal/controller/pooler_update_test.go @@ -362,7 +362,6 @@ var _ = Describe("unit test of pooler_update reconciliation logic", func() { var _ = Describe("ensureServiceAccountPullSecret", func() { var ( - ctx context.Context r *PoolerReconciler pooler *apiv1.Pooler conf *configuration.Data @@ -385,8 +384,6 @@ var _ = Describe("ensureServiceAccountPullSecret", func() { } BeforeEach(func() { - ctx = context.TODO() - pullSecret = generateOperatorPullSecret() conf = &configuration.Data{ @@ -417,13 +414,13 @@ var _ = Describe("ensureServiceAccountPullSecret", func() { } }) - It("should create the pull secret", func() { + It("should create the pull secret", func(ctx SpecContext) { name, err := r.ensureServiceAccountPullSecret(ctx, pooler, conf) Expect(err).ToNot(HaveOccurred()) Expect(name).To(Equal(poolerSecretName)) }) - It("should not change the pull secret if it matches", func() { + It("should not change the pull secret if it matches", func(ctx SpecContext) { By("creating the secret before triggering the reconcile") secret := generateOperatorPullSecret() secret.Name = poolerSecretName @@ -450,7 +447,7 @@ var _ = Describe("ensureServiceAccountPullSecret", func() { Expect(remoteSecret).To(BeEquivalentTo(remoteSecret)) }) - It("should reconcile the secret if it doesn't match", func() { + It("should reconcile the secret if it doesn't match", func(ctx SpecContext) { By("creating the secret before triggering the reconcile") secret := generateOperatorPullSecret() secret.Name = poolerSecretName diff --git a/internal/management/controller/roles/postgres_test.go b/internal/management/controller/roles/postgres_test.go index d003af03f3..60fdbbe99f 100644 --- a/internal/management/controller/roles/postgres_test.go +++ b/internal/management/controller/roles/postgres_test.go @@ -548,7 +548,7 @@ var _ = Describe("Postgres RoleManager implementation test", func() { Expect(queryValidUntil.String()).To(BeEquivalentTo(expectedQueryValidUntil)) }) - It("Getting the proper TransactionID per rol", func() { + It("Getting the proper TransactionID per rol", func(ctx SpecContext) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) prm := NewPostgresRoleManager(db) @@ -558,16 +558,16 @@ var _ = Describe("Postgres RoleManager implementation test", func() { dbRole := roleConfigurationAdapter{RoleConfiguration: wantedRole}.toDatabaseRole() mock.ExpectQuery(lastTransactionQuery).WithArgs("foo").WillReturnError(errors.New("Kaboom")) - _, err = prm.GetLastTransactionID(context.TODO(), dbRole) + _, err = prm.GetLastTransactionID(ctx, dbRole) Expect(err).To(HaveOccurred()) mock.ExpectQuery(lastTransactionQuery).WithArgs("foo").WillReturnError(sql.ErrNoRows) - _, err = prm.GetLastTransactionID(context.TODO(), dbRole) + _, err = prm.GetLastTransactionID(ctx, dbRole) Expect(err).To(HaveOccurred()) rows.AddRow("1321") mock.ExpectQuery(lastTransactionQuery).WithArgs("foo").WillReturnRows(rows) - transID, err := prm.GetLastTransactionID(context.TODO(), dbRole) + transID, err := prm.GetLastTransactionID(ctx, dbRole) Expect(err).ToNot(HaveOccurred()) Expect(transID).To(BeEquivalentTo(1321)) }) diff --git a/internal/management/controller/roles/reconciler_test.go b/internal/management/controller/roles/reconciler_test.go index 1ee151a170..a126b73ef4 100644 --- a/internal/management/controller/roles/reconciler_test.go +++ b/internal/management/controller/roles/reconciler_test.go @@ -17,8 +17,6 @@ limitations under the License. package roles import ( - "context" - "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -30,17 +28,17 @@ import ( ) var _ = Describe("Role reconciler test", func() { - It("reconcile an empty cluster", func() { + It("reconcile an empty cluster", func(ctx SpecContext) { cluster := &v1.Cluster{} instance := &postgres.Instance{} mockClient := fake.NewClientBuilder().Build() - result, err := Reconcile(context.TODO(), instance, cluster, mockClient) + result, err := Reconcile(ctx, instance, cluster, mockClient) Expect(err).ToNot(HaveOccurred()) Expect(result).To(BeEquivalentTo(reconcile.Result{})) }) - It("reconcile fails with no database connection", func() { + It("reconcile fails with no database connection", func(ctx SpecContext) { instance := &postgres.Instance{} mockClient := fake.NewClientBuilder().Build() cluster := &v1.Cluster{ @@ -59,7 +57,7 @@ var _ = Describe("Role reconciler test", func() { "failed to connect to `user=postgres database=postgres`: " + "/controller/run/.s.PGSQL.5432 (/controller/run): " + "dial error: dial unix /controller/run/.s.PGSQL.5432: connect: no such file or directory" - result, err := Reconcile(context.TODO(), instance, cluster, mockClient) + result, err := Reconcile(ctx, instance, cluster, mockClient) Expect(err.Error()).To(BeEquivalentTo(pgStringError)) Expect(result).To(BeEquivalentTo(reconcile.Result{})) }) diff --git a/internal/management/controller/slots/reconciler/replicationslot_test.go b/internal/management/controller/slots/reconciler/replicationslot_test.go index 0634641475..8e90f2d068 100644 --- a/internal/management/controller/slots/reconciler/replicationslot_test.go +++ b/internal/management/controller/slots/reconciler/replicationslot_test.go @@ -103,7 +103,7 @@ func makeClusterWithInstanceNames(instanceNames []string, primary string) apiv1. } var _ = Describe("HA Replication Slots reconciliation in Primary", func() { - It("can create a new replication slot for a new cluster instance", func() { + It("can create a new replication slot for a new cluster instance", func(ctx SpecContext) { fakeSlotManager := fakeReplicationSlotManager{ replicationSlots: map[fakeSlot]bool{ {name: slotPrefix + "instance1", isHA: true}: true, @@ -117,7 +117,7 @@ var _ = Describe("HA Replication Slots reconciliation in Primary", func() { Expect(fakeSlotManager.replicationSlots[fakeSlot{name: "_cnpg_instance1", isHA: true}]).To(BeTrue()) Expect(fakeSlotManager.replicationSlots[fakeSlot{name: "_cnpg_instance2", isHA: true}]).To(BeTrue()) - _, err := ReconcileReplicationSlots(context.TODO(), "instance1", fakeSlotManager, &cluster) + _, err := ReconcileReplicationSlots(ctx, "instance1", fakeSlotManager, &cluster) Expect(err).ShouldNot(HaveOccurred()) Expect(fakeSlotManager.replicationSlots[fakeSlot{name: "_cnpg_instance1", isHA: true}]).To(BeFalse()) Expect(fakeSlotManager.replicationSlots[fakeSlot{name: "_cnpg_instance3", isHA: true}]).To(BeTrue()) @@ -125,7 +125,7 @@ var _ = Describe("HA Replication Slots reconciliation in Primary", func() { Expect(fakeSlotManager.replicationSlots).To(HaveLen(2)) }) - It("can delete an inactive HA replication slot that is not in the cluster", func() { + It("can delete an inactive HA replication slot that is not in the cluster", func(ctx SpecContext) { fakeSlotManager := fakeReplicationSlotManager{ replicationSlots: map[fakeSlot]bool{ {name: slotPrefix + "instance1", isHA: true}: true, @@ -138,13 +138,13 @@ var _ = Describe("HA Replication Slots reconciliation in Primary", func() { Expect(fakeSlotManager.replicationSlots).To(HaveLen(3)) - _, err := ReconcileReplicationSlots(context.TODO(), "instance1", fakeSlotManager, &cluster) + _, err := ReconcileReplicationSlots(ctx, "instance1", fakeSlotManager, &cluster) Expect(err).ShouldNot(HaveOccurred()) Expect(fakeSlotManager.replicationSlots[fakeSlot{name: "_cnpg_instance3", isHA: true}]).To(BeFalse()) Expect(fakeSlotManager.replicationSlots).To(HaveLen(1)) }) - It("will not delete an active HA replication slot that is not in the cluster", func() { + It("will not delete an active HA replication slot that is not in the cluster", func(ctx SpecContext) { fakeSlotManager := fakeReplicationSlotManager{ replicationSlots: map[fakeSlot]bool{ {name: slotPrefix + "instance1", isHA: true}: true, @@ -157,7 +157,7 @@ var _ = Describe("HA Replication Slots reconciliation in Primary", func() { Expect(fakeSlotManager.replicationSlots).To(HaveLen(3)) - _, err := ReconcileReplicationSlots(context.TODO(), "instance1", fakeSlotManager, &cluster) + _, err := ReconcileReplicationSlots(ctx, "instance1", fakeSlotManager, &cluster) Expect(err).ShouldNot(HaveOccurred()) Expect(fakeSlotManager.replicationSlots[fakeSlot{name: slotPrefix + "instance3", isHA: true, active: true}]). To(BeTrue()) diff --git a/internal/management/controller/slots/runner/runner_test.go b/internal/management/controller/slots/runner/runner_test.go index de1df33e0b..df73585c72 100644 --- a/internal/management/controller/slots/runner/runner_test.go +++ b/internal/management/controller/slots/runner/runner_test.go @@ -89,7 +89,7 @@ func (sm *fakeSlotManager) Delete(_ context.Context, slot infrastructure.Replica return nil } -var _ = Describe("Slot synchronization", func() { +var _ = Describe("Slot synchronization", Ordered, func() { localPodName := "cluster-2" localSlotName := "_cnpg_cluster_2" slot3 := "cluster-3" @@ -127,6 +127,7 @@ var _ = Describe("Slot synchronization", func() { Expect(localSlotsAfter.Has(slot4)).To(BeTrue()) Expect(local.slotsCreated).To(Equal(2)) }) + It("can update slots in local when ReplayLSN in primary advanced", func(ctx SpecContext) { // advance slot3 in primary newLSN := "0/308C4D8" @@ -144,6 +145,7 @@ var _ = Describe("Slot synchronization", func() { Expect(slot.RestartLSN).To(Equal(newLSN)) Expect(local.slotsUpdated).To(Equal(1)) }) + It("can drop slots in local when they are no longer in primary", func(ctx SpecContext) { err := primary.Delete(ctx, infrastructure.ReplicationSlot{SlotName: slot4}) Expect(err).ShouldNot(HaveOccurred()) @@ -157,6 +159,7 @@ var _ = Describe("Slot synchronization", func() { Expect(localSlotsAfter.Has(slot3)).To(BeTrue()) Expect(local.slotsDeleted).To(Equal(1)) }) + It("can drop slots in local that hold xmin", func(ctx SpecContext) { slotWithXmin := "_cnpg_xmin" err := primary.Create(ctx, infrastructure.ReplicationSlot{SlotName: slotWithXmin}) diff --git a/pkg/certs/certs_test.go b/pkg/certs/certs_test.go index aa85c9e0da..503553552c 100644 --- a/pkg/certs/certs_test.go +++ b/pkg/certs/certs_test.go @@ -343,20 +343,22 @@ var _ = Describe("Certicate duration and expiration threshold", func() { defaultExpiringThreshold := configuration.ExpiringCheckThreshold * 24 * time.Hour tenDays := 10 * 24 * time.Hour + BeforeEach(func() { + configuration.Current = configuration.NewConfiguration() + }) + It("returns the default duration", func() { duration := getCertificateDuration() Expect(duration).To(BeEquivalentTo(defaultCertificateDuration)) }) It("returns the default duration if the configuration is a negative value", func() { - configuration.Current = configuration.NewConfiguration() configuration.Current.CertificateDuration = -1 duration := getCertificateDuration() Expect(duration).To(BeEquivalentTo(defaultCertificateDuration)) }) It("returns a valid duration of 10 days", func() { - configuration.Current = configuration.NewConfiguration() configuration.Current.CertificateDuration = 10 duration := getCertificateDuration() Expect(duration).To(BeEquivalentTo(tenDays)) @@ -368,14 +370,12 @@ var _ = Describe("Certicate duration and expiration threshold", func() { }) It("returns the default check threshold if the configuration is a negative value", func() { - configuration.Current = configuration.NewConfiguration() configuration.Current.ExpiringCheckThreshold = -1 threshold := getCheckThreshold() Expect(threshold).To(BeEquivalentTo(defaultExpiringThreshold)) }) It("returns a valid threshold of 10 days", func() { - configuration.Current = configuration.NewConfiguration() configuration.Current.ExpiringCheckThreshold = 10 threshold := getCheckThreshold() Expect(threshold).To(BeEquivalentTo(tenDays)) diff --git a/pkg/certs/k8s_test.go b/pkg/certs/k8s_test.go index 54f54c9044..043b13be82 100644 --- a/pkg/certs/k8s_test.go +++ b/pkg/certs/k8s_test.go @@ -163,7 +163,7 @@ var _ = Describe("Root CA secret generation", func() { }) var _ = Describe("Webhook certificate validation", func() { - When("we have a valid CA secret", func() { + When("we have a valid CA secret", Ordered, func() { kubeClient := generateFakeClient() pki := pkiEnvironmentTemplate @@ -196,7 +196,7 @@ var _ = Describe("Webhook certificate validation", func() { }) }) - When("we have a valid CA and webhook secret", func() { + When("we have a valid CA and webhook secret", Ordered, func() { kubeClient := generateFakeClient() pki := pkiEnvironmentTemplate var caSecret, webhookSecret *corev1.Secret @@ -220,7 +220,7 @@ var _ = Describe("Webhook certificate validation", func() { }) }) - When("we have a valid CA secret and expired webhook secret", func() { + When("we have a valid CA secret and expired webhook secret", Ordered, func() { kubeClient := generateFakeClient() pki := pkiEnvironmentTemplate diff --git a/pkg/certs/tls_test.go b/pkg/certs/tls_test.go index 8e99876520..66039695eb 100644 --- a/pkg/certs/tls_test.go +++ b/pkg/certs/tls_test.go @@ -35,13 +35,11 @@ import ( var _ = Describe("newTLSConfigFromSecret", func() { var ( - ctx context.Context c client.Client caSecret types.NamespacedName ) BeforeEach(func() { - ctx = context.TODO() caSecret = types.NamespacedName{Name: "test-secret", Namespace: "default"} }) @@ -276,7 +274,7 @@ MQCKGqId+Xj6O6gnoi9xhu0rbzSnMjrURoa1v2d5+O5XssE7LGtJdIKrd2p7EuwE c = fake.NewClientBuilder().Build() }) - It("should return an error", func() { + It("should return an error", func(ctx SpecContext) { tlsConfig, err := newTLSConfigFromSecret(ctx, c, caSecret) Expect(err).To(HaveOccurred()) Expect(tlsConfig).To(BeNil()) @@ -295,7 +293,7 @@ MQCKGqId+Xj6O6gnoi9xhu0rbzSnMjrURoa1v2d5+O5XssE7LGtJdIKrd2p7EuwE c = fake.NewClientBuilder().WithObjects(secret).Build() }) - It("should return an error", func() { + It("should return an error", func(ctx SpecContext) { tlsConfig, err := newTLSConfigFromSecret(ctx, c, caSecret) Expect(err).To(HaveOccurred()) Expect(tlsConfig).To(BeNil()) diff --git a/pkg/management/postgres/configuration_test.go b/pkg/management/postgres/configuration_test.go index 7d040f8071..f4a9d2f20d 100644 --- a/pkg/management/postgres/configuration_test.go +++ b/pkg/management/postgres/configuration_test.go @@ -84,7 +84,6 @@ var _ = Describe("testing the building of the ldap config string", func() { }) It("correctly builds a bindSearchAuth string", func() { str := buildLDAPConfigString(&cluster, ldapPassword) - fmt.Printf("here %s\n", str) Expect(str).To(Equal(fmt.Sprintf(`host all all 0.0.0.0/0 ldap ldapserver="%s" ldapport=%d `+ `ldapscheme="%s" ldaptls=1 ldapbasedn="%s" ldapbinddn="%s" `+ `ldapbindpasswd="%s" ldapsearchfilter="%s" ldapsearchattribute="%s"`, diff --git a/pkg/management/postgres/logicalimport/database_test.go b/pkg/management/postgres/logicalimport/database_test.go index 426703e5d7..cb3a6eb63a 100644 --- a/pkg/management/postgres/logicalimport/database_test.go +++ b/pkg/management/postgres/logicalimport/database_test.go @@ -17,7 +17,6 @@ limitations under the License. package logicalimport import ( - "context" "fmt" "github.com/DATA-DOG/go-sqlmock" @@ -32,14 +31,12 @@ import ( var _ = Describe("databaseSnapshotter methods test", func() { var ( - ctx context.Context ds databaseSnapshotter fp fakePooler mock sqlmock.Sqlmock ) BeforeEach(func() { - ctx = context.TODO() ds = databaseSnapshotter{ cluster: &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ @@ -109,13 +106,13 @@ var _ = Describe("databaseSnapshotter methods test", func() { } }) - It("should execute the query properly", func() { + It("should execute the query properly", func(ctx SpecContext) { mock.ExpectExec(createQuery).WillReturnResult(sqlmock.NewResult(0, 0)) err := ds.executePostImportQueries(ctx, fp, "test") Expect(err).ToNot(HaveOccurred()) }) - It("should return any error encountered", func() { + It("should return any error encountered", func(ctx SpecContext) { expectedErr := fmt.Errorf("will fail") mock.ExpectExec(createQuery).WillReturnError(expectedErr) err := ds.executePostImportQueries(ctx, fp, "test") @@ -123,7 +120,7 @@ var _ = Describe("databaseSnapshotter methods test", func() { }) }) - It("should run analyze", func() { + It("should run analyze", func(ctx SpecContext) { mock.ExpectExec("ANALYZE VERBOSE").WillReturnResult(sqlmock.NewResult(0, 0)) err := ds.analyze(ctx, fp, []string{"test"}) Expect(err).ToNot(HaveOccurred()) @@ -136,7 +133,7 @@ var _ = Describe("databaseSnapshotter methods test", func() { expectedQuery = mock.ExpectQuery("SELECT extname FROM pg_extension WHERE oid >= 16384") }) - It("should drop the user-defined extensions successfully", func() { + It("should drop the user-defined extensions successfully", func(ctx SpecContext) { extensions := []string{"extension1", "extension2"} rows := sqlmock.NewRows([]string{"extname"}) @@ -150,7 +147,7 @@ var _ = Describe("databaseSnapshotter methods test", func() { Expect(err).ToNot(HaveOccurred()) }) - It("should correctly handle an error when querying for extensions", func() { + It("should correctly handle an error when querying for extensions", func(ctx SpecContext) { expectedErr := fmt.Errorf("querying error") expectedQuery.WillReturnError(expectedErr) @@ -158,7 +155,7 @@ var _ = Describe("databaseSnapshotter methods test", func() { Expect(err).To(Equal(expectedErr)) }) - It("should correctly handle an error when dropping an extension", func() { + It("should correctly handle an error when dropping an extension", func(ctx SpecContext) { rows := sqlmock.NewRows([]string{"extname"}).AddRow("extension1") expectedQuery.WillReturnRows(rows) @@ -184,7 +181,7 @@ var _ = Describe("databaseSnapshotter methods test", func() { } }) - It("should return the explicit database list if present", func() { + It("should return the explicit database list if present", func(ctx SpecContext) { explicitDatabaseList := []string{"db1", "db2"} ds.cluster.Spec.Bootstrap.InitDB.Import.Databases = explicitDatabaseList @@ -193,7 +190,7 @@ var _ = Describe("databaseSnapshotter methods test", func() { Expect(dbs).To(Equal(explicitDatabaseList)) }) - It("should query for databases if explicit list is not present", func() { + It("should query for databases if explicit list is not present", func(ctx SpecContext) { expectedQuery := mock.ExpectQuery(query) ds.cluster.Spec.Bootstrap.InitDB.Import.Databases = []string{"*"} @@ -209,7 +206,7 @@ var _ = Describe("databaseSnapshotter methods test", func() { Expect(dbs).To(Equal(queryDatabaseList)) }) - It("should return any error encountered when querying for databases", func() { + It("should return any error encountered when querying for databases", func(ctx SpecContext) { expectedErr := fmt.Errorf("querying error") expectedQuery := mock.ExpectQuery(query) ds.cluster.Spec.Bootstrap.InitDB.Import.Databases = []string{"*"} diff --git a/pkg/management/postgres/logicalimport/role_test.go b/pkg/management/postgres/logicalimport/role_test.go index 4d2d70d940..a42fb001fd 100644 --- a/pkg/management/postgres/logicalimport/role_test.go +++ b/pkg/management/postgres/logicalimport/role_test.go @@ -17,7 +17,6 @@ limitations under the License. package logicalimport import ( - "context" "fmt" "github.com/DATA-DOG/go-sqlmock" @@ -35,7 +34,6 @@ var _ = Describe("", func() { "WHERE ur.oid >= 16384 AND um.oid >= 16384" var ( - ctx context.Context fp fakePooler mock sqlmock.Sqlmock ri []RoleInheritance @@ -43,7 +41,6 @@ var _ = Describe("", func() { ) BeforeEach(func() { - ctx = context.TODO() db, dbMock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) mock = dbMock @@ -66,7 +63,7 @@ var _ = Describe("", func() { Expect(expectationErr).ToNot(HaveOccurred()) }) - It("should clone role inheritance successfully", func() { + It("should clone role inheritance successfully", func(ctx SpecContext) { // Define the RoleInheritance result for getRoleInheritance ri := []RoleInheritance{ { @@ -95,7 +92,7 @@ var _ = Describe("", func() { Expect(err).ToNot(HaveOccurred()) }) - It("should return any error encountered when getting role inheritance", func() { + It("should return any error encountered when getting role inheritance", func(ctx SpecContext) { expectedErr := fmt.Errorf("querying error") mock.ExpectQuery(inhQuery).WillReturnError(expectedErr) @@ -103,7 +100,7 @@ var _ = Describe("", func() { Expect(err).To(Equal(expectedErr)) }) - It("should import role inheritance successfully", func() { + It("should import role inheritance successfully", func(ctx SpecContext) { query := fmt.Sprintf(`GRANT %s TO %s WITH ADMIN OPTION GRANTED BY %s`, pgx.Identifier{ri[0].RoleID}.Sanitize(), pgx.Identifier{ri[0].Member}.Sanitize(), @@ -117,7 +114,7 @@ var _ = Describe("", func() { Expect(err).ToNot(HaveOccurred()) }) - It("should return the correct role inheritances", func() { + It("should return the correct role inheritances", func(ctx SpecContext) { mock.ExpectQuery(inhQuery). WillReturnRows(sqlmock.NewRows([]string{"roleid", "member", "admin_option", "grantor"}). AddRow("role1", "member1", true, "grantor1")) @@ -127,7 +124,7 @@ var _ = Describe("", func() { Expect(ris).To(Equal(ri)) }) - It("should return any error encountered when getting role inheritances", func() { + It("should return any error encountered when getting role inheritances", func(ctx SpecContext) { expectedErr := fmt.Errorf("querying error") mock.ExpectQuery(inhQuery).WillReturnError(expectedErr) @@ -135,7 +132,7 @@ var _ = Describe("", func() { Expect(err).To(Equal(expectedErr)) }) - It("should return any error encountered when scanning the result", func() { + It("should return any error encountered when scanning the result", func(ctx SpecContext) { mock.ExpectQuery(inhQuery).WillReturnRows(sqlmock.NewRows([]string{"wrongColumnName"}).AddRow("role1")) _, err := rm.getRoleInheritance(ctx) diff --git a/pkg/management/postgres/logpipe/logpipe_test.go b/pkg/management/postgres/logpipe/logpipe_test.go index e05bd48708..70a4902872 100644 --- a/pkg/management/postgres/logpipe/logpipe_test.go +++ b/pkg/management/postgres/logpipe/logpipe_test.go @@ -17,7 +17,6 @@ limitations under the License. package logpipe import ( - "context" "errors" "os" "strings" @@ -39,9 +38,7 @@ func (writer *SpyRecordWriter) Write(record NamedRecord) { var _ = Describe("CSV file reader", func() { When("given CSV logs from logging_collector", func() { - ctx := context.TODO() - - It("can read multiple CSV lines", func() { + It("can read multiple CSV lines", func(ctx SpecContext) { f, err := os.Open("testdata/two_lines.csv") defer func() { _ = f.Close() @@ -57,7 +54,7 @@ var _ = Describe("CSV file reader", func() { Expect(spy.records).To(HaveLen(2)) }) - It("can read multiple CSV lines on PostgreSQL version <= 12", func() { + It("can read multiple CSV lines on PostgreSQL version <= 12", func(ctx SpecContext) { f, err := os.Open("testdata/two_lines_12.csv") defer func() { _ = f.Close() @@ -73,7 +70,7 @@ var _ = Describe("CSV file reader", func() { Expect(spy.records).To(HaveLen(2)) }) - It("can read multiple CSV lines on PostgreSQL version == 14", func() { + It("can read multiple CSV lines on PostgreSQL version == 14", func(ctx SpecContext) { f, err := os.Open("testdata/two_lines_14.csv") defer func() { _ = f.Close() @@ -89,7 +86,7 @@ var _ = Describe("CSV file reader", func() { Expect(spy.records).To(HaveLen(2)) }) - It("can read pgAudit CSV lines", func() { + It("can read pgAudit CSV lines", func(ctx SpecContext) { f, err := os.Open("testdata/pgaudit.csv") defer func() { _ = f.Close() @@ -110,7 +107,7 @@ var _ = Describe("CSV file reader", func() { Expect(err).ShouldNot(HaveOccurred()) input := strings.TrimRight(string(inputBuffer), " \n") - It("there are too many fields", func() { + It("there are too many fields", func(ctx SpecContext) { spy := SpyRecordWriter{} longerInput := input + ",test" @@ -128,7 +125,7 @@ var _ = Describe("CSV file reader", func() { Expect(extendedError.Fields).To(HaveLen(FieldsPerRecord13 + 1)) }) - It("there are not enough fields", func() { + It("there are not enough fields", func(ctx SpecContext) { spy := SpyRecordWriter{} shorterInput := "one,two,three" @@ -146,7 +143,7 @@ var _ = Describe("CSV file reader", func() { Expect(extendedError.Fields).To(HaveLen(3)) }) - It("there is a trailing comma", func() { + It("there is a trailing comma", func(ctx SpecContext) { spy := SpyRecordWriter{} trailingCommaInput := input + "," @@ -164,7 +161,7 @@ var _ = Describe("CSV file reader", func() { Expect(extendedError.Fields).To(HaveLen(FieldsPerRecord13 + 1)) }) - It("there is a wrong number of fields on a line that is not the first", func() { + It("there is a wrong number of fields on a line that is not the first", func(ctx SpecContext) { spy := SpyRecordWriter{} longerInput := input + "\none,two,three" @@ -183,7 +180,7 @@ var _ = Describe("CSV file reader", func() { }) }) - It("correctly handles an empty stream", func() { + It("correctly handles an empty stream", func(ctx SpecContext) { spy := SpyRecordWriter{} p := LogPipe{ record: &LoggingRecord{}, diff --git a/pkg/management/postgres/metrics/collector_test.go b/pkg/management/postgres/metrics/collector_test.go index 8ce178d225..f8399472b1 100644 --- a/pkg/management/postgres/metrics/collector_test.go +++ b/pkg/management/postgres/metrics/collector_test.go @@ -23,7 +23,7 @@ import ( . "github.com/onsi/gomega" ) -var _ = Describe("Set default queries", func() { +var _ = Describe("Set default queries", Ordered, func() { q := NewQueriesCollector("test", nil, "db") It("does assign nothing with empty default queries", func() { diff --git a/pkg/management/postgres/restore_test.go b/pkg/management/postgres/restore_test.go index bd59455472..ad19c76453 100644 --- a/pkg/management/postgres/restore_test.go +++ b/pkg/management/postgres/restore_test.go @@ -17,7 +17,6 @@ limitations under the License. package postgres import ( - "context" "os" "path" @@ -44,13 +43,13 @@ var _ = Describe("testing restore InitInfo methods", func() { _ = fileutils.RemoveFile(tempDir) }) - It("should correctly restore a custom PgWal folder without data", func() { + It("should correctly restore a custom PgWal folder without data", func(ctx SpecContext) { initInfo := InitInfo{ PgData: pgData, PgWal: newPgWal, } - chg, err := initInfo.restoreCustomWalDir(context.TODO()) + chg, err := initInfo.restoreCustomWalDir(ctx) Expect(err).ToNot(HaveOccurred()) Expect(chg).To(BeTrue()) @@ -59,7 +58,7 @@ var _ = Describe("testing restore InitInfo methods", func() { Expect(exists).To(BeTrue()) }) - It("should correctly migrate an existing wal folder to the new one", func() { + It("should correctly migrate an existing wal folder to the new one", func(ctx SpecContext) { initInfo := InitInfo{ PgData: pgData, PgWal: newPgWal, @@ -93,7 +92,7 @@ var _ = Describe("testing restore InitInfo methods", func() { }) By("executing the restore custom wal dir function", func() { - chg, err := initInfo.restoreCustomWalDir(context.TODO()) + chg, err := initInfo.restoreCustomWalDir(ctx) Expect(err).ToNot(HaveOccurred()) Expect(chg).To(BeTrue()) }) @@ -120,7 +119,7 @@ var _ = Describe("testing restore InitInfo methods", func() { }) }) - It("should not do any changes if the symlink is already present", func() { + It("should not do any changes if the symlink is already present", func(ctx SpecContext) { initInfo := InitInfo{ PgData: pgData, PgWal: newPgWal, @@ -135,16 +134,16 @@ var _ = Describe("testing restore InitInfo methods", func() { err = os.Symlink(newPgWal, pgWal) Expect(err).ToNot(HaveOccurred()) - chg, err := initInfo.restoreCustomWalDir(context.TODO()) + chg, err := initInfo.restoreCustomWalDir(ctx) Expect(err).ToNot(HaveOccurred()) Expect(chg).To(BeFalse()) }) - It("should not do any changes if pgWal is not set", func() { + It("should not do any changes if pgWal is not set", func(ctx SpecContext) { initInfo := InitInfo{ PgData: pgData, } - chg, err := initInfo.restoreCustomWalDir(context.TODO()) + chg, err := initInfo.restoreCustomWalDir(ctx) Expect(err).ToNot(HaveOccurred()) Expect(chg).To(BeFalse()) }) diff --git a/pkg/management/postgres/webserver/suite_test.go b/pkg/management/postgres/webserver/suite_test.go deleted file mode 100644 index 34419aa9c8..0000000000 --- a/pkg/management/postgres/webserver/suite_test.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package webserver - -import ( - "testing" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -func TestMetricsServer(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Postgres Webserver test suite") -} diff --git a/pkg/reconciler/persistentvolumeclaim/reconciler_test.go b/pkg/reconciler/persistentvolumeclaim/reconciler_test.go index 4ca794faa8..05937ce383 100644 --- a/pkg/reconciler/persistentvolumeclaim/reconciler_test.go +++ b/pkg/reconciler/persistentvolumeclaim/reconciler_test.go @@ -184,7 +184,7 @@ var _ = Describe("Reconcile resource requests", func() { }) }) -var _ = Describe("PVC reconciliation", func() { +var _ = Describe("PVC reconciliation", Ordered, func() { const clusterName = "cluster-pvc-reconciliation" fetchPVC := func(cl client.Client, pvcToFetch corev1.PersistentVolumeClaim) corev1.PersistentVolumeClaim { diff --git a/pkg/reconciler/persistentvolumeclaim/resources_test.go b/pkg/reconciler/persistentvolumeclaim/resources_test.go index bbccc10553..791f70e30a 100644 --- a/pkg/reconciler/persistentvolumeclaim/resources_test.go +++ b/pkg/reconciler/persistentvolumeclaim/resources_test.go @@ -17,8 +17,6 @@ limitations under the License. package persistentvolumeclaim import ( - "context" - batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -31,7 +29,7 @@ import ( ) var _ = Describe("PVC detection", func() { - It("will list PVCs with Jobs or Pods or which are Ready", func() { + It("will list PVCs with Jobs or Pods or which are Ready", func(ctx SpecContext) { clusterName := "myCluster" makeClusterPVC := func(serial string, isResizing bool) corev1.PersistentVolumeClaim { return makePVC(clusterName, serial, serial, NewPgDataCalculator(), isResizing) @@ -48,7 +46,7 @@ var _ = Describe("PVC detection", func() { }, } EnrichStatus( - context.TODO(), + ctx, cluster, []corev1.Pod{ makePod(clusterName, "1", specs.ClusterRoleLabelPrimary), diff --git a/pkg/resources/retry_test.go b/pkg/resources/retry_test.go index 04ae25cb90..7379c2905c 100644 --- a/pkg/resources/retry_test.go +++ b/pkg/resources/retry_test.go @@ -17,8 +17,6 @@ limitations under the License. package resources import ( - "context" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -41,11 +39,9 @@ var _ = Describe("RetryWithRefreshedResource", func() { var ( fakeClient client.Client testResource *appsv1.Deployment - ctx context.Context ) BeforeEach(func() { - ctx = context.TODO() fakeClient = fake.NewClientBuilder().WithScheme(schemeBuilder.BuildWithAllKnownScheme()).Build() testResource = &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}, @@ -70,7 +66,7 @@ var _ = Describe("RetryWithRefreshedResource", func() { }) Context("when client.Get succeeds", func() { - BeforeEach(func() { + BeforeEach(func(ctx SpecContext) { // Set up the fake client to return the resource without error Expect(fakeClient.Create(ctx, testResource)).To(Succeed()) @@ -80,7 +76,7 @@ var _ = Describe("RetryWithRefreshedResource", func() { Expect(err).ToNot(HaveOccurred()) }) - It("should invoke the callback without error and update the resource", func() { + It("should invoke the callback without error and update the resource", func(ctx SpecContext) { // ensure that the local deployment contains the old value Expect(*testResource.Spec.Replicas).To(Equal(int32(1))) diff --git a/pkg/utils/discovery_test.go b/pkg/utils/discovery_test.go index 6407a9e423..8652c3a445 100644 --- a/pkg/utils/discovery_test.go +++ b/pkg/utils/discovery_test.go @@ -43,8 +43,13 @@ var _ = DescribeTable("Kubernetes minor version detection", ) var _ = Describe("Detect resources properly when", func() { - client := fakeClient.NewSimpleClientset() - fakeDiscovery := client.Discovery().(*discoveryFake.FakeDiscovery) + var client *fakeClient.Clientset + var fakeDiscovery *discoveryFake.FakeDiscovery + + BeforeEach(func() { + client = fakeClient.NewSimpleClientset() + fakeDiscovery = client.Discovery().(*discoveryFake.FakeDiscovery) + }) It("should not detect PodMonitor resource", func() { exists, err := PodMonitorExist(client.Discovery()) diff --git a/pkg/utils/logs/logs_test.go b/pkg/utils/logs/logs_test.go index 8631c1c998..5d93c21cbc 100644 --- a/pkg/utils/logs/logs_test.go +++ b/pkg/utils/logs/logs_test.go @@ -125,10 +125,9 @@ var _ = Describe("Pod logging tests", func() { Expect(logBuffer.String()).To(BeEquivalentTo("fake logs")) }) - It("can follow pod logs", func() { + It("can follow pod logs", func(ctx SpecContext) { client := fake.NewSimpleClientset(pod) var logBuffer bytes.Buffer - ctx := context.TODO() var wait sync.WaitGroup wait.Add(1) go func() { From 2e90e9d5edd56dc8f3761e488255c7b43e287cb2 Mon Sep 17 00:00:00 2001 From: Francesco Canovai Date: Thu, 24 Oct 2024 16:00:31 +0200 Subject: [PATCH 109/836] docs: fix minor issues (#5903) * Mark all omitempty fields in the API as optional, so they won't appear as required in the documentation * Fix broken links to structs now in the barman-cloud API * Make Database resource appear in the API resource types Closes #5904 Signed-off-by: Francesco Canovai Signed-off-by: Gabriele Quaresima Co-authored-by: Gabriele Quaresima --- api/v1/backup_types.go | 4 + api/v1/cluster_types.go | 20 ++++ api/v1/common_types.go | 1 + api/v1/database_types.go | 4 + api/v1/pooler_types.go | 1 + api/v1/scheduledbackup_types.go | 1 + docs/config.yaml | 10 +- docs/src/backup_barmanobjectstore.md | 8 +- docs/src/cloudnative-pg.v1.md | 144 +++++++++++++++------------ docs/src/wal_archiving.md | 4 +- 10 files changed, 124 insertions(+), 73 deletions(-) diff --git a/api/v1/backup_types.go b/api/v1/backup_types.go index 9c3e49d2ae..f8a01fb1d3 100644 --- a/api/v1/backup_types.go +++ b/api/v1/backup_types.go @@ -182,6 +182,7 @@ type BackupSnapshotElementStatus struct { // TablespaceName is the name of the snapshotted tablespace. Only set // when type is PG_TABLESPACE + // +optional TablespaceName string `json:"tablespaceName,omitempty"` } @@ -285,9 +286,11 @@ type BackupStatus struct { Method BackupMethod `json:"method,omitempty"` // Whether the backup was online/hot (`true`) or offline/cold (`false`) + // +optional Online *bool `json:"online,omitempty"` // A map containing the plugin metadata + // +optional PluginMetadata map[string]string `json:"pluginMetadata,omitempty"` } @@ -333,6 +336,7 @@ type BackupList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional metav1.ListMeta `json:"metadata,omitempty"` // List of backups Items []Backup `json:"items"` diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go index 87171977b8..2fc34c4926 100644 --- a/api/v1/cluster_types.go +++ b/api/v1/cluster_types.go @@ -382,6 +382,7 @@ type ClusterSpec struct { // EphemeralVolumesSizeLimit allows the user to set the limits for the ephemeral // volumes + // +optional EphemeralVolumesSizeLimit *EphemeralVolumesSizeLimitConfiguration `json:"ephemeralVolumesSizeLimit,omitempty"` // Name of the priority class which will be used in every generated Pod, if the PriorityClass @@ -473,6 +474,7 @@ type ClusterSpec struct { // The plugins configuration, containing // any plugin to be loaded with the corresponding configuration + // +optional Plugins PluginConfigurationList `json:"plugins,omitempty"` } @@ -547,9 +549,11 @@ const ( // storage type EphemeralVolumesSizeLimitConfiguration struct { // Shm is the size limit of the shared memory volume + // +optional Shm *resource.Quantity `json:"shm,omitempty"` // TemporaryData is the size limit of the temporary data volume + // +optional TemporaryData *resource.Quantity `json:"temporaryData,omitempty"` } @@ -712,6 +716,7 @@ type ClusterStatus struct { // LastPromotionToken is the last verified promotion token that // was used to promote a replica cluster + // +optional LastPromotionToken string `json:"lastPromotionToken,omitempty"` // How many PVCs have been created by this cluster @@ -849,6 +854,7 @@ type ClusterStatus struct { Image string `json:"image,omitempty"` // PluginStatus is the status of the loaded plugins + // +optional PluginStatus []PluginStatus `json:"pluginStatus,omitempty"` // SwitchReplicaClusterStatus is the status of the switch to replica cluster @@ -967,10 +973,12 @@ type PgBouncerIntegrationStatus struct { type ReplicaClusterConfiguration struct { // Self defines the name of this cluster. It is used to determine if this is a primary // or a replica cluster, comparing it with `primary` + // +optional Self string `json:"self,omitempty"` // Primary defines which Cluster is defined to be the primary in the distributed PostgreSQL cluster, based on the // topology specified in externalClusters + // +optional Primary string `json:"primary,omitempty"` // The name of the external cluster which is the replication origin @@ -981,10 +989,12 @@ type ReplicaClusterConfiguration struct { // existing cluster. Replica cluster can be created from a recovery // object store or via streaming through pg_basebackup. // Refer to the Replica clusters page of the documentation for more information. + // +optional Enabled *bool `json:"enabled,omitempty"` // A demotion token generated by an external cluster used to // check if the promotion requirements are met. + // +optional PromotionToken string `json:"promotionToken,omitempty"` // When replica mode is enabled, this parameter allows you to replay @@ -992,6 +1002,7 @@ type ReplicaClusterConfiguration struct { // time past the commit time. This provides an opportunity to correct // data loss errors. Note that when this parameter is set, a promotion // token cannot be used. + // +optional MinApplyDelay *metav1.Duration `json:"minApplyDelay,omitempty"` } @@ -2005,6 +2016,7 @@ type ManagedServices struct { // +optional DisabledDefaultServices []ServiceSelectorType `json:"disabledDefaultServices,omitempty"` // Additional is a list of additional managed services specified by the user. + // +optional Additional []ManagedService `json:"additional,omitempty"` } @@ -2018,6 +2030,7 @@ type ManagedService struct { // UpdateStrategy describes how the service differences should be reconciled // +kubebuilder:default:="patch" + // +optional UpdateStrategy ServiceUpdateStrategy `json:"updateStrategy,omitempty"` // ServiceTemplate is the template specification for the service. @@ -2047,6 +2060,7 @@ type PluginConfiguration struct { Enabled *bool `json:"enabled,omitempty"` // Parameters is the configuration of the plugin + // +optional Parameters map[string]string `json:"parameters,omitempty"` } @@ -2061,21 +2075,26 @@ type PluginStatus struct { // Capabilities are the list of capabilities of the // plugin + // +optional Capabilities []string `json:"capabilities,omitempty"` // OperatorCapabilities are the list of capabilities of the // plugin regarding the reconciler + // +optional OperatorCapabilities []string `json:"operatorCapabilities,omitempty"` // WALCapabilities are the list of capabilities of the // plugin regarding the WAL management + // +optional WALCapabilities []string `json:"walCapabilities,omitempty"` // BackupCapabilities are the list of capabilities of the // plugin regarding the Backup management + // +optional BackupCapabilities []string `json:"backupCapabilities,omitempty"` // Status contain the status reported by the plugin through the SetStatusInCluster interface + // +optional Status string `json:"status,omitempty"` } @@ -2204,6 +2223,7 @@ type ClusterList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional metav1.ListMeta `json:"metadata,omitempty"` // List of clusters Items []Cluster `json:"items"` diff --git a/api/v1/common_types.go b/api/v1/common_types.go index fb5144ae5b..b87e009b23 100644 --- a/api/v1/common_types.go +++ b/api/v1/common_types.go @@ -25,6 +25,7 @@ const VolumeSnapshotKind = "VolumeSnapshot" // not using the core data types. type Metadata struct { // The name of the resource. Only supported for certain types + // +optional Name string `json:"name,omitempty"` // Map of string keys and values that can be used to organize and categorize diff --git a/api/v1/database_types.go b/api/v1/database_types.go index 243285dcbd..1089a97957 100644 --- a/api/v1/database_types.go +++ b/api/v1/database_types.go @@ -132,13 +132,17 @@ type DatabaseStatus struct { ObservedGeneration int64 `json:"observedGeneration,omitempty"` // Ready is true if the database was reconciled correctly + // +optional Ready bool `json:"ready,omitempty"` // Error is the reconciliation error message + // +optional Error string `json:"error,omitempty"` } +// +genclient // +kubebuilder:object:root=true +// +kubebuilder:storageversion // +kubebuilder:subresource:status // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" // +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".spec.cluster.name" diff --git a/api/v1/pooler_types.go b/api/v1/pooler_types.go index 5fc3bdb9d1..b3f06fcbf2 100644 --- a/api/v1/pooler_types.go +++ b/api/v1/pooler_types.go @@ -260,6 +260,7 @@ type Pooler struct { // PoolerList contains a list of Pooler type PoolerList struct { metav1.TypeMeta `json:",inline"` + // +optional metav1.ListMeta `json:"metadata,omitempty"` Items []Pooler `json:"items"` } diff --git a/api/v1/scheduledbackup_types.go b/api/v1/scheduledbackup_types.go index 1929db5d95..b89248c49c 100644 --- a/api/v1/scheduledbackup_types.go +++ b/api/v1/scheduledbackup_types.go @@ -125,6 +125,7 @@ type ScheduledBackupList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional metav1.ListMeta `json:"metadata,omitempty"` // List of clusters Items []ScheduledBackup `json:"items"` diff --git a/docs/config.yaml b/docs/config.yaml index aa77638cf6..54ecf6e949 100644 --- a/docs/config.yaml +++ b/docs/config.yaml @@ -1,5 +1,6 @@ hiddenMemberFields: - "TypeMeta" + - "synchronizeReplicasCache" externalPackages: - match: ^github\.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1\.RelabelConfig$ @@ -31,7 +32,14 @@ externalPackages: hideTypePatterns: - "ParseError$" - - "List$" + # We cannot exclude all `List$` because we declare PluginConfigurationList + - "BackupList$" + - "ClusterList$" + - "ClusterImageCatalogList$" + - "DatabaseList$" + - "ImageCatalogList$" + - "PoolerList$" + - "ScheduledBackupList$" markdownDisabled: false diff --git a/docs/src/backup_barmanobjectstore.md b/docs/src/backup_barmanobjectstore.md index 34b907e0ae..5859966af3 100644 --- a/docs/src/backup_barmanobjectstore.md +++ b/docs/src/backup_barmanobjectstore.md @@ -96,9 +96,9 @@ algorithms via `barman-cloud-backup` (for backups) and * snappy The compression settings for backups and WALs are independent. See the -[DataBackupConfiguration](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-DataBackupConfiguration) and -[WALBackupConfiguration](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-WalBackupConfiguration) sections in -the API reference. +[DataBackupConfiguration](https://pkg.go.dev/github.com/cloudnative-pg/barman-cloud/pkg/api#DataBackupConfiguration) and +[WALBackupConfiguration](https://pkg.go.dev/github.com/cloudnative-pg/barman-cloud/pkg/api#WalBackupConfiguration) sections in +the barman-cloud API reference. It is important to note that archival time, restore time, and size change between the algorithms, so the compression algorithm should be chosen according @@ -198,4 +198,4 @@ spec: additionalCommandArgs: - "--max-concurrency=1" - "--read-timeout=60" -``` \ No newline at end of file +``` diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md index f5678cb22e..7af6805e2e 100644 --- a/docs/src/cloudnative-pg.v1.md +++ b/docs/src/cloudnative-pg.v1.md @@ -9,6 +9,7 @@ - [Backup](#postgresql-cnpg-io-v1-Backup) - [Cluster](#postgresql-cnpg-io-v1-Cluster) - [ClusterImageCatalog](#postgresql-cnpg-io-v1-ClusterImageCatalog) +- [Database](#postgresql-cnpg-io-v1-Database) - [ImageCatalog](#postgresql-cnpg-io-v1-ImageCatalog) - [Pooler](#postgresql-cnpg-io-v1-Pooler) - [ScheduledBackup](#postgresql-cnpg-io-v1-ScheduledBackup) @@ -118,6 +119,44 @@ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api- +## Database {#postgresql-cnpg-io-v1-Database} + + + +

Database is the Schema for the databases API

+ + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion [Required]
string
postgresql.cnpg.io/v1
kind [Required]
string
Database
metadata [Required]
+meta/v1.ObjectMeta +
+ No description provided.Refer to the Kubernetes API documentation for the fields of the metadata field.
spec [Required]
+DatabaseSpec +
+

Specification of the desired Database. +More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status

+
status
+DatabaseStatus +
+

Most recently observed status of the Database. This data may not be up to +date. Populated by the system. Read-only. +More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status

+
+ ## ImageCatalog {#postgresql-cnpg-io-v1-ImageCatalog} @@ -492,7 +531,7 @@ plugin for this backup

Type is tho role of the snapshot in the cluster, such as PG_DATA, PG_WAL and PG_TABLESPACE

-tablespaceName [Required]
+tablespaceName
string @@ -809,14 +848,14 @@ parameter is omitted

The backup method being used

-online [Required]
+online
bool

Whether the backup was online/hot (true) or offline/cold (false)

-pluginMetadata [Required]
+pluginMetadata
map[string]string @@ -1638,7 +1677,7 @@ https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ for more information.

-ephemeralVolumesSizeLimit [Required]
+ephemeralVolumesSizeLimit
EphemeralVolumesSizeLimitConfiguration @@ -1769,7 +1808,7 @@ advisable for any PostgreSQL cluster employed for development/staging purposes.

-plugins [Required]
+plugins
PluginConfigurationList @@ -1872,7 +1911,7 @@ any plugin to be loaded with the corresponding configuration

during a switchover or a failover

-lastPromotionToken [Required]
+lastPromotionToken
string @@ -2107,7 +2146,7 @@ This field is reported when .spec.failoverDelay is populated or dur

Image contains the image name used by the pods

-pluginStatus [Required]
+pluginStatus
[]PluginStatus @@ -2199,42 +2238,6 @@ PostgreSQL cluster from an existing storage

-## Database {#postgresql-cnpg-io-v1-Database} - - - -

Database is the Schema for the databases API

- - - - - - - - - - - - - - - -
FieldDescription
metadata [Required]
-meta/v1.ObjectMeta -
- No description provided.Refer to the Kubernetes API documentation for the fields of the metadata field.
spec [Required]
-DatabaseSpec -
-

Specification of the desired Database. -More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status

-
status
-DatabaseStatus -
-

Most recently observed status of the Database. This data may not be up to -date. Populated by the system. Read-only. -More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status

-
- ## DatabaseReclaimPolicy {#postgresql-cnpg-io-v1-DatabaseReclaimPolicy} (Alias of `string`) @@ -2438,14 +2441,14 @@ database is not valid

desired state that was synchronized

-ready [Required]
+ready
bool

Ready is true if the database was reconciled correctly

-error [Required]
+error
string @@ -2514,14 +2517,14 @@ storage

- - - - - - - - - + + + @@ -2441,18 +2448,18 @@ database is not valid

desired state that was synchronized

- - @@ -2493,6 +2500,8 @@ desired state that was synchronized

**Appears in:** +- [DatabaseSpec](#postgresql-cnpg-io-v1-DatabaseSpec) + - [RoleConfiguration](#postgresql-cnpg-io-v1-RoleConfiguration) diff --git a/docs/src/declarative_database_management.md b/docs/src/declarative_database_management.md index 27fc7638a2..b740292660 100644 --- a/docs/src/declarative_database_management.md +++ b/docs/src/declarative_database_management.md @@ -24,7 +24,7 @@ spec: ``` Once the reconciliation cycle is completed successfully, the `Database` -status will show a `ready` field set to `true` and an empty `error` field. +status will show a `applied` field set to `true` and an empty `message` field. ### Database Deletion and Reclaim Policies diff --git a/internal/management/controller/database_controller.go b/internal/management/controller/database_controller.go index d2c4256bc7..7fb0b2a6f4 100644 --- a/internal/management/controller/database_controller.go +++ b/internal/management/controller/database_controller.go @@ -27,6 +27,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -122,11 +123,7 @@ func (r *DatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c // Cannot do anything on a replica cluster if cluster.IsReplica() { - return r.failedReconciliation( - ctx, - &database, - errClusterIsReplica, - ) + return r.replicaClusterReconciliation(ctx, &database) } // Add the finalizer if we don't have it @@ -233,14 +230,14 @@ func (r *DatabaseReconciler) failedReconciliation( err error, ) (ctrl.Result, error) { oldDatabase := database.DeepCopy() - database.Status.Error = err.Error() - database.Status.Ready = false + database.Status.Message = fmt.Sprintf("reconciliation error: %s", err.Error()) + database.Status.Applied = ptr.To(false) var statusError *instance.StatusError if errors.As(err, &statusError) { // The body line of the instance manager contains the human // readable error - database.Status.Error = statusError.Body + database.Status.Message = statusError.Body } if err := r.Client.Status().Patch(ctx, database, client.MergeFrom(oldDatabase)); err != nil { @@ -258,8 +255,8 @@ func (r *DatabaseReconciler) succeededReconciliation( database *apiv1.Database, ) (ctrl.Result, error) { oldDatabase := database.DeepCopy() - database.Status.Error = "" - database.Status.Ready = true + database.Status.Message = "" + database.Status.Applied = ptr.To(true) database.Status.ObservedGeneration = database.Generation if err := r.Client.Status().Patch(ctx, database, client.MergeFrom(oldDatabase)); err != nil { @@ -271,6 +268,25 @@ func (r *DatabaseReconciler) succeededReconciliation( }, nil } +// replicaClusterReconciliation sets the status for a reconciliation that's +// executed in a replica Cluster +func (r *DatabaseReconciler) replicaClusterReconciliation( + ctx context.Context, + database *apiv1.Database, +) (ctrl.Result, error) { + oldDatabase := database.DeepCopy() + database.Status.Message = errClusterIsReplica.Error() + database.Status.Applied = nil + + if err := r.Client.Status().Patch(ctx, database, client.MergeFrom(oldDatabase)); err != nil { + return ctrl.Result{}, err + } + + return ctrl.Result{ + RequeueAfter: databaseReconciliationInterval, + }, nil +} + // NewDatabaseReconciler creates a new database reconciler func NewDatabaseReconciler( mgr manager.Manager, @@ -312,6 +328,10 @@ func (r *DatabaseReconciler) reconcileDatabase(ctx context.Context, obj *apiv1.D return fmt.Errorf("while connecting to the database %q: %w", obj.Spec.Name, err) } + if obj.Spec.Ensure == apiv1.EnsureAbsent { + return dropDatabase(ctx, db, obj) + } + dbExists, err := detectDatabase(ctx, db, obj) if err != nil { return fmt.Errorf("while detecting the database %q: %w", obj.Spec.Name, err) diff --git a/internal/management/controller/database_controller_sql.go b/internal/management/controller/database_controller_sql.go index b55ac5a659..1a5f527918 100644 --- a/internal/management/controller/database_controller_sql.go +++ b/internal/management/controller/database_controller_sql.go @@ -114,7 +114,11 @@ func createDatabase( contextLogger.Error(err, "while creating database", "query", sqlCreateDatabase.String()) } - return err + if err != nil { + return fmt.Errorf("while creating database %q: %w", + obj.Spec.Name, err) + } + return nil } func updateDatabase( @@ -171,7 +175,7 @@ func updateDatabase( if _, err := db.ExecContext(ctx, changeOwnerSQL); err != nil { contextLogger.Error(err, "while altering database", "query", changeOwnerSQL) - return fmt.Errorf("while altering database %q owner %s to: %w", + return fmt.Errorf("while altering database %q owner to %s: %w", obj.Spec.Name, obj.Spec.Owner, err) } } @@ -184,7 +188,7 @@ func updateDatabase( if _, err := db.ExecContext(ctx, changeTablespaceSQL); err != nil { contextLogger.Error(err, "while altering database", "query", changeTablespaceSQL) - return fmt.Errorf("while altering database %q tablespace %s: %w", + return fmt.Errorf("while altering database %q tablespace to %s: %w", obj.Spec.Name, obj.Spec.Tablespace, err) } } diff --git a/internal/management/controller/database_controller_test.go b/internal/management/controller/database_controller_test.go index ecf9d4cd60..c9c86ac216 100644 --- a/internal/management/controller/database_controller_test.go +++ b/internal/management/controller/database_controller_test.go @@ -26,6 +26,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -145,8 +146,8 @@ var _ = Describe("Managed Database status", func() { }, &updatedDatabase) Expect(err).ToNot(HaveOccurred()) - Expect(updatedDatabase.Status.Ready).Should(BeTrue()) - Expect(updatedDatabase.Status.Error).Should(BeEmpty()) + Expect(updatedDatabase.Status.Applied).Should(HaveValue(BeTrue())) + Expect(updatedDatabase.Status.Message).Should(BeEmpty()) Expect(updatedDatabase.Finalizers).NotTo(BeEmpty()) }) @@ -179,8 +180,8 @@ var _ = Describe("Managed Database status", func() { }, &updatedDatabase) Expect(err).ToNot(HaveOccurred()) - Expect(updatedDatabase.Status.Ready).Should(BeFalse()) - Expect(updatedDatabase.Status.Error).Should(ContainSubstring(expectedError.Error())) + Expect(updatedDatabase.Status.Applied).Should(HaveValue(BeFalse())) + Expect(updatedDatabase.Status.Message).Should(ContainSubstring(expectedError.Error())) }) It("on deletion it removes finalizers and drops DB", func(ctx SpecContext) { @@ -214,8 +215,8 @@ var _ = Describe("Managed Database status", func() { }, &updatedDatabase) Expect(err).ToNot(HaveOccurred()) - Expect(updatedDatabase.Status.Ready).Should(BeTrue()) - Expect(updatedDatabase.Status.Error).Should(BeEmpty()) + Expect(updatedDatabase.Status.Applied).Should(HaveValue(BeTrue())) + Expect(updatedDatabase.Status.Message).Should(BeEmpty()) Expect(updatedDatabase.Finalizers).NotTo(BeEmpty()) // the next 3 lines are a hacky bit to make sure the next reconciler @@ -300,8 +301,8 @@ var _ = Describe("Managed Database status", func() { }, &updatedDatabase) Expect(err).ToNot(HaveOccurred()) - Expect(updatedDatabase.Status.Ready).Should(BeFalse()) - Expect(updatedDatabase.Status.Error).Should(BeEmpty()) + Expect(updatedDatabase.Status.Applied).Should(BeNil()) + Expect(updatedDatabase.Status.Message).Should(BeEmpty()) }) It("skips reconciliation if database object isn't found (deleted database)", func(ctx SpecContext) { @@ -335,8 +336,8 @@ var _ = Describe("Managed Database status", func() { It("properly marks the status on a succeeded reconciliation", func(ctx SpecContext) { _, err := r.succeededReconciliation(ctx, database) Expect(err).ToNot(HaveOccurred()) - Expect(database.Status.Ready).To(BeTrue()) - Expect(database.Status.Error).To(BeEmpty()) + Expect(database.Status.Applied).To(HaveValue(BeTrue())) + Expect(database.Status.Message).To(BeEmpty()) }) It("properly marks the status on a failed reconciliation", func(ctx SpecContext) { @@ -344,8 +345,46 @@ var _ = Describe("Managed Database status", func() { _, err := r.failedReconciliation(ctx, database, exampleError) Expect(err).ToNot(HaveOccurred()) - Expect(database.Status.Ready).To(BeFalse()) - Expect(database.Status.Error).To(BeEquivalentTo(exampleError.Error())) + Expect(database.Status.Applied).To(HaveValue(BeFalse())) + Expect(database.Status.Message).To(ContainSubstring(exampleError.Error())) + }) + + It("properly marks the status on a replica Cluster reconciliation", func(ctx SpecContext) { + _, err := r.replicaClusterReconciliation(ctx, database) + Expect(err).ToNot(HaveOccurred()) + Expect(database.Status.Applied).To(BeNil()) + Expect(database.Status.Message).To(BeEquivalentTo(errClusterIsReplica.Error())) + }) + + It("drops database with ensure absent option", func(ctx SpecContext) { + // Mocking dropDatabase + expectedValue := sqlmock.NewResult(0, 1) + expectedQuery := fmt.Sprintf( + "DROP DATABASE IF EXISTS %s", + pgx.Identifier{database.Spec.Name}.Sanitize(), + ) + dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedValue) + + // Update the obj to set EnsureAbsent + database.Spec.Ensure = apiv1.EnsureAbsent + Expect(fakeClient.Update(ctx, database)).To(Succeed()) + + // Reconcile and get the updated object + _, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ + Namespace: database.Namespace, + Name: database.Name, + }}) + Expect(err).ToNot(HaveOccurred()) + + err = fakeClient.Get(ctx, client.ObjectKey{ + Namespace: database.Namespace, + Name: database.Name, + }, database) + Expect(err).ToNot(HaveOccurred()) + + Expect(database.Status.Applied).To(HaveValue(BeTrue())) + Expect(database.Status.Message).To(BeEmpty()) + Expect(database.Status.ObservedGeneration).To(BeEquivalentTo(1)) }) It("marks as failed if the target Database is already being managed", func(ctx SpecContext) { @@ -363,7 +402,7 @@ var _ = Describe("Managed Database status", func() { Owner: "app", }, Status: apiv1.DatabaseStatus{ - Ready: true, + Applied: ptr.To(true), ObservedGeneration: 1, }, } @@ -402,8 +441,32 @@ var _ = Describe("Managed Database status", func() { expectedError := fmt.Sprintf("database %q is already managed by Database object %q", dbDuplicate.Spec.Name, currentManager.Name) - Expect(dbDuplicate.Status.Ready).To(BeFalse()) - Expect(dbDuplicate.Status.Error).To(BeEquivalentTo(expectedError)) + Expect(dbDuplicate.Status.Applied).To(HaveValue(BeFalse())) + Expect(dbDuplicate.Status.Message).To(ContainSubstring(expectedError)) Expect(dbDuplicate.Status.ObservedGeneration).To(BeZero()) }) + + It("properly signals a database is on a replica cluster", func(ctx SpecContext) { + initialCluster := cluster.DeepCopy() + cluster.Spec.ReplicaCluster = &apiv1.ReplicaClusterConfiguration{ + Enabled: ptr.To(true), + } + Expect(fakeClient.Patch(ctx, cluster, client.MergeFrom(initialCluster))).To(Succeed()) + + _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ + Namespace: database.Namespace, + Name: database.Spec.Name, + }}) + Expect(err).ToNot(HaveOccurred()) + + var updatedDatabase apiv1.Database + err = fakeClient.Get(ctx, client.ObjectKey{ + Namespace: database.Namespace, + Name: database.Name, + }, &updatedDatabase) + Expect(err).ToNot(HaveOccurred()) + + Expect(updatedDatabase.Status.Applied).Should(BeNil()) + Expect(updatedDatabase.Status.Message).Should(ContainSubstring("waiting for the cluster to become primary")) + }) }) diff --git a/tests/e2e/declarative_database_management_test.go b/tests/e2e/declarative_database_management_test.go index ba2fb47c2f..a7f9c574eb 100644 --- a/tests/e2e/declarative_database_management_test.go +++ b/tests/e2e/declarative_database_management_test.go @@ -110,7 +110,8 @@ var _ = Describe("Declarative database management", Label(tests.LabelSmoke, test Eventually(func(g Gomega) { err := env.Client.Get(env.Ctx, databaseNamespacedName, &database) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(database.Status.Ready).Should(BeTrue()) + g.Expect(database.Status.Applied).Should(HaveValue(BeTrue())) + g.Expect(database.Status.Message).Should(BeEmpty()) }, 300).WithPolling(10 * time.Second).Should(Succeed()) }) @@ -188,7 +189,7 @@ var _ = Describe("Declarative database management", Label(tests.LabelSmoke, test Eventually(func(g Gomega) { err := env.Client.Get(env.Ctx, databaseNamespacedName, dbObj) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(dbObj.Status.Ready).Should(BeTrue()) + g.Expect(dbObj.Status.Applied).Should(HaveValue(BeTrue())) }, 300).WithPolling(10 * time.Second).Should(Succeed()) }) By("deleting the namespace and making sure it succeeds before timeout", func() { From f7359548e8ae6f6e5929515425f0813c7ca13591 Mon Sep 17 00:00:00 2001 From: smiyc <36233521+smiyc@users.noreply.github.com> Date: Mon, 28 Oct 2024 15:04:10 +0100 Subject: [PATCH 113/836] fix(metrics): handle FIRST keyword in synchronousStandbyNames parsing (#5955) This patch corrects a bug where the metrics collector fails to parse the `synchronous_standby_names` configurations when `.spec.postgresql.synchronous.method` is set to `first`. Closes #5538 Signed-off-by: Daniel Chambre Signed-off-by: Jaime Silvela Signed-off-by: Armando Ruocco Signed-off-by: Marco Nenciarini Co-authored-by: Jaime Silvela Co-authored-by: Armando Ruocco Co-authored-by: Marco Nenciarini --- .../webserver/metricserver/pg_collector.go | 9 ++- .../metricserver/pg_collector_test.go | 56 +++++++++++++++++-- 2 files changed, 57 insertions(+), 8 deletions(-) diff --git a/pkg/management/postgres/webserver/metricserver/pg_collector.go b/pkg/management/postgres/webserver/metricserver/pg_collector.go index a7a74373b3..64f469f2bb 100644 --- a/pkg/management/postgres/webserver/metricserver/pg_collector.go +++ b/pkg/management/postgres/webserver/metricserver/pg_collector.go @@ -40,7 +40,7 @@ import ( // or the operator const PrometheusNamespace = "cnpg" -var synchronousStandbyNamesRegex = regexp.MustCompile(`ANY ([0-9]+) \(.*\)`) +var synchronousStandbyNamesRegex = regexp.MustCompile(`(?:ANY|FIRST) ([0-9]+) \(.*\)`) // Exporter exports a set of metrics and collectors on a given postgres instance type Exporter struct { @@ -517,7 +517,7 @@ func (e *Exporter) collectFromPrimaryFirstPointOnTimeRecovery() { } func (e *Exporter) collectFromPrimarySynchronousStandbysNumber(db *sql.DB) { - nStandbys, err := getSynchronousStandbysNumber(db) + nStandbys, err := getRequestedSynchronousStandbysNumber(db) if err != nil { log.Error(err, "unable to collect metrics") e.Metrics.Error.Set(1) @@ -546,7 +546,10 @@ func collectPGVersion(e *Exporter) error { return nil } -func getSynchronousStandbysNumber(db *sql.DB) (int, error) { +// getRequestedSynchronousStandbysNumber returns the number of requested synchronous standbys +// Example: FIRST 2 (node1,node2) will return 2, ANY 4 (node1) will return 4. +// If the query fails, it will return 0 and an error. +func getRequestedSynchronousStandbysNumber(db *sql.DB) (int, error) { var syncReplicasFromConfig string err := db.QueryRow(fmt.Sprintf("SHOW %s", postgresconf.SynchronousStandbyNames)). Scan(&syncReplicasFromConfig) diff --git a/pkg/management/postgres/webserver/metricserver/pg_collector_test.go b/pkg/management/postgres/webserver/metricserver/pg_collector_test.go index 509c5e297f..914944403c 100644 --- a/pkg/management/postgres/webserver/metricserver/pg_collector_test.go +++ b/pkg/management/postgres/webserver/metricserver/pg_collector_test.go @@ -33,7 +33,7 @@ import ( . "github.com/onsi/gomega" ) -var _ = Describe("ensure timestamp metric it's set properly", func() { +var _ = Describe("test metrics parsing", func() { var exporter *Exporter BeforeEach(func() { @@ -97,12 +97,12 @@ var _ = Describe("ensure timestamp metric it's set properly", func() { } }) - It("It correctly parse the sync replicas", func() { + It("correctly parses the number of sync replicas when quorum-based", func() { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) rows := sqlmock.NewRows([]string{"synchronous_standby_names"}). - AddRow("ANY 2 ( \"cluster-example-2\",\"cluster-example-3\")") + AddRow(`ANY 2 ( "cluster-example-2","cluster-example-3")`) mock.ExpectQuery(fmt.Sprintf("SHOW %s", postgresconf.SynchronousStandbyNames)).WillReturnRows(rows) exporter.collectFromPrimarySynchronousStandbysNumber(db) @@ -117,12 +117,58 @@ var _ = Describe("ensure timestamp metric it's set properly", func() { } }) - It("register -1 in case it can't parse the sync replicas string", func() { + It("correctly parses the number of sync replicas when preferential", func() { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) rows := sqlmock.NewRows([]string{"synchronous_standby_names"}). - AddRow("( \"cluster-example-2\",\"cluster-example-3\")") + AddRow(`FIRST 2 ( "cluster-example-2","cluster-example-3")`) + mock.ExpectQuery(fmt.Sprintf("SHOW %s", postgresconf.SynchronousStandbyNames)).WillReturnRows(rows) + + exporter.collectFromPrimarySynchronousStandbysNumber(db) + + registry := prometheus.NewRegistry() + registry.MustRegister(exporter.Metrics.SyncReplicas) + metrics, _ := registry.Gather() + + for _, metric := range metrics { + m := metric.GetMetric() + Expect(m[0].GetGauge().GetValue()).To(BeEquivalentTo(2)) + } + }) + + It("should return an error when encountering unexpected results", func() { + By("not matching the synchronous standby names regex", func() { + db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) + Expect(err).ToNot(HaveOccurred()) + + // This row will generate only two strings in the array + rows := sqlmock.NewRows([]string{"synchronous_standby_names"}).AddRow("ANY q (xx)") + mock.ExpectQuery(fmt.Sprintf("SHOW %s", postgresconf.SynchronousStandbyNames)).WillReturnRows(rows) + _, err = getRequestedSynchronousStandbysNumber(db) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("not matching synchronous standby names regex: ANY q (xx)")) + }) + + By("not matching the number of sync replicas", func() { + db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) + Expect(err).ToNot(HaveOccurred()) + + // This row will generate only two strings in the array + rows := sqlmock.NewRows([]string{"synchronous_standby_names"}).AddRow("ANY 2 (xx, ") + mock.ExpectQuery(fmt.Sprintf("SHOW %s", postgresconf.SynchronousStandbyNames)).WillReturnRows(rows) + _, err = getRequestedSynchronousStandbysNumber(db) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("not matching synchronous standby names regex: ANY 2 (xx")) + }) + }) + + It("sets the number of sync replicas as -1 if it can't parse the sync replicas string", func() { + db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) + Expect(err).ToNot(HaveOccurred()) + + rows := sqlmock.NewRows([]string{"synchronous_standby_names"}). + AddRow(`( "cluster-example-2","cluster-example-3")`) mock.ExpectQuery(fmt.Sprintf("SHOW %s", postgresconf.SynchronousStandbyNames)).WillReturnRows(rows) exporter.collectFromPrimarySynchronousStandbysNumber(db) From e5eb26e9d8f69deb3a4a08a6c58b357c07a2673e Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Tue, 29 Oct 2024 15:02:25 +0100 Subject: [PATCH 114/836] fix(plugin): improve plugin errors (#5836) Some errors were hiding the real reason behind them. Most of them were exposed when the user executing the command didn't have permissions to access the requested resource. Now we also append the error from the API to show explicitly the error when trying to access the resources. Closes #5823 --------- Signed-off-by: Jonathan Gonzalez V. Signed-off-by: Armando Ruocco Signed-off-by: Jaime Silvela Co-authored-by: Armando Ruocco Co-authored-by: Jaime Silvela --- internal/cmd/plugin/hibernate/on.go | 6 +++++- internal/cmd/plugin/logical/database.go | 2 +- internal/cmd/plugin/logical/externalcluster.go | 2 +- .../cmd/plugin/logical/subscription/syncsequences/cmd.go | 3 ++- internal/cmd/plugin/maintenance/maintenance.go | 2 +- internal/cmd/plugin/pgbench/cmd.go | 2 +- internal/cmd/plugin/promote/promote.go | 4 ++-- internal/cmd/plugin/psql/cmd.go | 2 +- internal/cmd/plugin/status/status.go | 3 ++- pkg/utils/fencing.go | 2 +- 10 files changed, 17 insertions(+), 11 deletions(-) diff --git a/internal/cmd/plugin/hibernate/on.go b/internal/cmd/plugin/hibernate/on.go index 65b36e9c88..f9b685b5c5 100644 --- a/internal/cmd/plugin/hibernate/on.go +++ b/internal/cmd/plugin/hibernate/on.go @@ -25,6 +25,7 @@ import ( "github.com/cloudnative-pg/machinery/pkg/log" corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/retry" @@ -213,8 +214,11 @@ func (on *onCommand) rollbackFenceClusterIfNeeded() { // waitInstancesToBeFenced waits for all instances to be shut down func (on *onCommand) waitInstancesToBeFencedStep() error { + isRetryable := func(err error) bool { + return !apierrors.IsForbidden(err) && !apierrors.IsUnauthorized(err) + } for _, instance := range on.managedInstances { - if err := retry.OnError(hibernationBackoff, resources.RetryAlways, func() error { + if err := retry.OnError(hibernationBackoff, isRetryable, func() error { running, err := pluginresources.IsInstanceRunning(on.ctx, instance) if err != nil { return fmt.Errorf("error checking instance status (%v): %w", instance.Name, err) diff --git a/internal/cmd/plugin/logical/database.go b/internal/cmd/plugin/logical/database.go index d9f388a2ba..c3b97d2d4c 100644 --- a/internal/cmd/plugin/logical/database.go +++ b/internal/cmd/plugin/logical/database.go @@ -40,7 +40,7 @@ func GetApplicationDatabaseName(ctx context.Context, clusterName string) (string &cluster, ) if err != nil { - return "", fmt.Errorf("cluster %s not found in namespace %s", clusterName, plugin.Namespace) + return "", fmt.Errorf("cluster %s not found in namespace %s: %w", clusterName, plugin.Namespace, err) } return cluster.GetApplicationDatabaseName(), nil diff --git a/internal/cmd/plugin/logical/externalcluster.go b/internal/cmd/plugin/logical/externalcluster.go index 8a21726743..8f8bdcc54a 100644 --- a/internal/cmd/plugin/logical/externalcluster.go +++ b/internal/cmd/plugin/logical/externalcluster.go @@ -46,7 +46,7 @@ func GetConnectionString( &cluster, ) if err != nil { - return "", fmt.Errorf("cluster %s not found in namespace %s", clusterName, plugin.Namespace) + return "", fmt.Errorf("cluster %s not found in namespace %s: %w", clusterName, plugin.Namespace, err) } externalCluster, ok := cluster.ExternalCluster(externalClusterName) diff --git a/internal/cmd/plugin/logical/subscription/syncsequences/cmd.go b/internal/cmd/plugin/logical/subscription/syncsequences/cmd.go index a73fe0ff09..fbb6230794 100644 --- a/internal/cmd/plugin/logical/subscription/syncsequences/cmd.go +++ b/internal/cmd/plugin/logical/subscription/syncsequences/cmd.go @@ -57,7 +57,8 @@ func NewCmd() *cobra.Command { &cluster, ) if err != nil { - return fmt.Errorf("cluster %s not found in namespace %s", clusterName, plugin.Namespace) + return fmt.Errorf("cluster %s not found in namespace %s: %w", + clusterName, plugin.Namespace, err) } if len(dbName) == 0 { diff --git a/internal/cmd/plugin/maintenance/maintenance.go b/internal/cmd/plugin/maintenance/maintenance.go index 7060d87375..414a43f3a2 100644 --- a/internal/cmd/plugin/maintenance/maintenance.go +++ b/internal/cmd/plugin/maintenance/maintenance.go @@ -83,7 +83,7 @@ func Maintenance(ctx context.Context, for _, item := range clusterList.Items { err := patchNodeMaintenanceWindow(ctx, item, setInProgressTo, reusePVC) if err != nil { - return fmt.Errorf("unable to set progress to cluster %v in namespace %v", item.Name, item.Namespace) + return fmt.Errorf("unable to set progress to cluster %v in namespace %v: %w", item.Name, item.Namespace, err) } } diff --git a/internal/cmd/plugin/pgbench/cmd.go b/internal/cmd/plugin/pgbench/cmd.go index 9d10449d98..7f79ed4c4a 100644 --- a/internal/cmd/plugin/pgbench/cmd.go +++ b/internal/cmd/plugin/pgbench/cmd.go @@ -88,7 +88,7 @@ func validateCommandArgs(cmd *cobra.Command, args []string) error { } if cmd.ArgsLenAtDash() > 1 { - return fmt.Errorf("pgBenchCommands should be passed after -- delimiter") + return fmt.Errorf("pgBenchCommands should be passed after the -- delimiter") } return nil diff --git a/internal/cmd/plugin/promote/promote.go b/internal/cmd/plugin/promote/promote.go index 0e45c4a45b..daba43bf78 100644 --- a/internal/cmd/plugin/promote/promote.go +++ b/internal/cmd/plugin/promote/promote.go @@ -37,7 +37,7 @@ func Promote(ctx context.Context, clusterName string, serverName string) error { // Get the Cluster object err := plugin.Client.Get(ctx, client.ObjectKey{Namespace: plugin.Namespace, Name: clusterName}, &cluster) if err != nil { - return fmt.Errorf("cluster %s not found in namespace %s", clusterName, plugin.Namespace) + return fmt.Errorf("cluster %s not found in namespace %s: %w", clusterName, plugin.Namespace, err) } // If server name is equal to target primary, there is no need to promote @@ -51,7 +51,7 @@ func Promote(ctx context.Context, clusterName string, serverName string) error { var pod v1.Pod err = plugin.Client.Get(ctx, client.ObjectKey{Namespace: plugin.Namespace, Name: serverName}, &pod) if err != nil { - return fmt.Errorf("new primary node %s not found in namespace %s", serverName, plugin.Namespace) + return fmt.Errorf("new primary node %s not found in namespace %s: %w", serverName, plugin.Namespace, err) } // The Pod exists, let's update status fields diff --git a/internal/cmd/plugin/psql/cmd.go b/internal/cmd/plugin/psql/cmd.go index 85e4df4a96..8cae04a0ea 100644 --- a/internal/cmd/plugin/psql/cmd.go +++ b/internal/cmd/plugin/psql/cmd.go @@ -92,7 +92,7 @@ func validatePsqlArgs(cmd *cobra.Command, args []string) error { } if cmd.ArgsLenAtDash() > 1 { - return fmt.Errorf("psqlArgs should be passed after -- delimitator") + return fmt.Errorf("psqlArgs should be passed after the -- delimiter") } return nil diff --git a/internal/cmd/plugin/status/status.go b/internal/cmd/plugin/status/status.go index 3ae465b96a..8b05a67d5c 100644 --- a/internal/cmd/plugin/status/status.go +++ b/internal/cmd/plugin/status/status.go @@ -114,7 +114,8 @@ func Status( // Get the Cluster object err := plugin.Client.Get(ctx, client.ObjectKey{Namespace: plugin.Namespace, Name: clusterName}, &cluster) if err != nil { - return err + return fmt.Errorf("while trying to get cluster %s in namespace %s: %w", + clusterName, plugin.Namespace, err) } status := extractPostgresqlStatus(ctx, cluster) diff --git a/pkg/utils/fencing.go b/pkg/utils/fencing.go index c7ec6c37aa..13e9aca7bd 100644 --- a/pkg/utils/fencing.go +++ b/pkg/utils/fencing.go @@ -192,7 +192,7 @@ func (fb *FencingMetadataExecutor) Execute(ctx context.Context, key types.Namesp if name != FenceAllInstances { var pod corev1.Pod if err := fb.cli.Get(ctx, client.ObjectKey{Namespace: key.Namespace, Name: name}, &pod); err != nil { - return fmt.Errorf("node %s not found in namespace %s", name, key.Namespace) + return fmt.Errorf("node %s not found in namespace %s: %w", name, key.Namespace, err) } } } From d4740fc7cb6489bb2f389b462fb9ed30a31e3e07 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 30 Oct 2024 08:11:19 +0100 Subject: [PATCH 115/836] fix(deps): update module sigs.k8s.io/controller-runtime to v0.19.1 (main) (#5959) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 3714fa4102..e5c3535123 100644 --- a/go.mod +++ b/go.mod @@ -46,7 +46,7 @@ require ( k8s.io/cli-runtime v0.31.2 k8s.io/client-go v0.31.2 k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 - sigs.k8s.io/controller-runtime v0.19.0 + sigs.k8s.io/controller-runtime v0.19.1 sigs.k8s.io/yaml v1.4.0 ) diff --git a/go.sum b/go.sum index 7b5f00c955..6c616a7b4e 100644 --- a/go.sum +++ b/go.sum @@ -294,8 +294,8 @@ k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 h1:1dWzkmJrrprYvjGwh9kEUx k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38/go.mod h1:coRQXBK9NxO98XUv3ZD6AK3xzHCxV6+b7lrquKwaKzA= k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 h1:MDF6h2H/h4tbzmtIKTuctcwZmY0tY9mD9fNT47QO6HI= k8s.io/utils v0.0.0-20240921022957-49e7df575cb6/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q= -sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= +sigs.k8s.io/controller-runtime v0.19.1 h1:Son+Q40+Be3QWb+niBXAg2vFiYWolDjjRfO8hn/cxOk= +sigs.k8s.io/controller-runtime v0.19.1/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kustomize/api v0.17.3 h1:6GCuHSsxq7fN5yhF2XrC+AAr8gxQwhexgHflOAD/JJU= From 5b80e215c13cf31d4d5b963695135e40179c5870 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 30 Oct 2024 11:51:31 +0100 Subject: [PATCH 116/836] chore(deps): update dependency operator-framework/operator-registry to v1.48.0 (main) (#5978) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 50cba47855..1a4698e810 100644 --- a/Makefile +++ b/Makefile @@ -47,7 +47,7 @@ GORELEASER_VERSION ?= v2.3.2 SPELLCHECK_VERSION ?= 0.43.1 WOKE_VERSION ?= 0.19.0 OPERATOR_SDK_VERSION ?= v1.37.0 -OPM_VERSION ?= v1.47.0 +OPM_VERSION ?= v1.48.0 PREFLIGHT_VERSION ?= 1.10.2 OPENSHIFT_VERSIONS ?= v4.12-v4.17 ARCH ?= amd64 From 9def2d6f9a1aa321e834615e2da224e318c9d926 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Wed, 30 Oct 2024 13:48:22 +0100 Subject: [PATCH 117/836] docs: add permissions required by kubectl CNPG plugin (#5829) document every permission required by every command in the CNPG plugin to let users assign specific permissions for specific tasks. Closes #5330 Signed-off-by: Gabriele Quaresima Signed-off-by: Jonathan Gonzalez V. Signed-off-by: Jaime Silvela Signed-off-by: Francesco Canovai --- .wordlist-en-custom.txt | 5 ++ docs/mkdocs.yml | 4 +- docs/src/kubectl-plugin.md | 115 ++++++++++++++++++++++++++++++ internal/cmd/plugin/report/olm.go | 2 +- 4 files changed, 124 insertions(+), 2 deletions(-) diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index e564fc34f0..df5953f2a3 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -525,6 +525,7 @@ authQuery authQuerySecret authn authz +autocompletion autoscaler autovacuum availableArchitectures @@ -610,6 +611,7 @@ clusterName clusterimagecatalogs clusterlist clusterrole +clusterserviceversions clusterspec clusterstatus cmd @@ -818,6 +820,7 @@ initdb initialise initializingPVC inplace +installplans instanceID instanceName instanceNames @@ -923,6 +926,7 @@ mountPath msg mspan multinamespace +mutatingwebhookconfigurations myAKSCluster myResourceGroup namespace @@ -1279,6 +1283,7 @@ usernamepassword usr utils validUntil +validatingwebhookconfigurations valueFrom viceversa virtualized diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index b3dd9c55f7..471250cf7d 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -2,7 +2,8 @@ site_name: CloudNativePG site_author: The CloudNativePG Contributors docs_dir: src -theme: readthedocs +theme: + name: readthedocs extra_css: - css/override.css @@ -11,6 +12,7 @@ markdown_extensions: - admonition - def_list - attr_list + - footnotes nav: - index.md diff --git a/docs/src/kubectl-plugin.md b/docs/src/kubectl-plugin.md index a9564d545d..35c66f2494 100755 --- a/docs/src/kubectl-plugin.md +++ b/docs/src/kubectl-plugin.md @@ -1366,3 +1366,118 @@ The `cnpg` plugin can be easily integrated in [K9s](https://k9scli.io/), a popular terminal-based UI to interact with Kubernetes clusters. See [`k9s/plugins.yml`](samples/k9s/plugins.yml) for details. + +## Permissions required by the plugin + +The plugin requires a set of Kubernetes permissions that depends on the command +to execute. These permissions may affect resources and sub-resources like Pods, +PDBs, PVCs, and enable actions like `get`, `delete`, `patch`. The following +table contains the full details: + +| Command | Resource Permissions | +|:----------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| backup | clusters: get
backups: create | +| certificate | clusters: get
secrets: get,create | +| destroy | pods: get,delete
jobs: delete,list
PVCs: list,delete,update | +| fencing | clusters: get,patch
pods: get | +| fio | PVCs: create
configmaps: create
deployment: create | +| hibernate | clusters: get,patch,delete
pods: list,get,delete
pods/exec: create
jobs: list
PVCs: get,list,update,patch,delete | +| install | none | +| logs | clusters: get
pods: list
pods/log: get | +| maintenance | clusters: get,patch,list
| +| pgadmin4 | clusters: get
configmaps: create
deployments: create
services: create
secrets: create | +| pgbench | clusters: get
jobs: create
| +| promote | clusters: get
clusters/status: patch
pods: get | +| psql | pods: get,list
pods/exec: create | +| publication | clusters: get
pods: get,list
pods/exec: create | +| reload | clusters: get,patch | +| report cluster | clusters: get
pods: list
pods/log: get
jobs: list
events: list
PVCs: list | +| report operator | configmaps: get
deployments: get
events: list
pods: list
pods/log: get
secrets: get
services: get
mutatingwebhookconfigurations: list[^1]
validatingwebhookconfigurations: list[^1]
If OLM is present on the K8s cluster, also:
clusterserviceversions: list
installplans: list
subscriptions: list | +| restart | clusters: get,patch
pods: get,delete | +| status | clusters: get
pods: list
pods/exec: create
pods/proxy: create
PDBs: list | +| subscription | clusters: get
pods: get,list
pods/exec: create | +| version | none | + +[^1]: The permissions are cluster scope ClusterRole resources. + +///Footnotes Go Here/// + +Additionally, assigning the `list` permission on the `clusters` will enable +autocompletion for multiple commands. + +### Role examples + +It is possible to create roles with restricted permissions. +The following example creates a role that only has access to the cluster logs: + +```yaml +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cnpg-log +rules: + - verbs: + - get + apiGroups: + - postgresql.cnpg.io + resources: + - clusters + - verbs: + - list + apiGroups: + - '' + resources: + - pods + - verbs: + - get + apiGroups: + - '' + resources: + - pods/log +``` + +The next example shows a role with the minimal permissions required to get +the cluster status using the plugin's `status` command: + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cnpg-status +rules: + - verbs: + - get + apiGroups: + - postgresql.cnpg.io + resources: + - clusters + - verbs: + - list + apiGroups: + - '' + resources: + - pods + - verbs: + - create + apiGroups: + - '' + resources: + - pods/exec + - verbs: + - create + apiGroups: + - '' + resources: + - pods/proxy + - verbs: + - list + apiGroups: + - policy + resources: + - poddisruptionbudgets +``` + +!!! Important + Keeping the verbs restricted per `resources` and per `apiGroups` helps to + prevent inadvertently granting more than intended permissions. diff --git a/internal/cmd/plugin/report/olm.go b/internal/cmd/plugin/report/olm.go index cc20ff54ae..5e179540cc 100644 --- a/internal/cmd/plugin/report/olm.go +++ b/internal/cmd/plugin/report/olm.go @@ -46,7 +46,7 @@ func getOlmResourceList( resourceList, err := dynamicClient.Resource(gvr).Namespace(namespace). List(ctx, metav1.ListOptions{LabelSelector: getLabelOperatorsNamespace()}) if err != nil { - return nil, fmt.Errorf("could note get resource: %v, %v", gvr, err) + return nil, fmt.Errorf("could not list resource: %v, %v", gvr, err) } return resourceList, nil From f70e7a806b325ae67dd8975f4d69da6ab298e813 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 30 Oct 2024 16:52:07 +0100 Subject: [PATCH 118/836] chore(config): migrate renovate config (#5986) The Renovate config in this repository needs migrating. Typically this is because one or more configuration options you are using have been renamed. --- .github/renovate.json5 | 901 +++++++++++++++++++++-------------------- 1 file changed, 466 insertions(+), 435 deletions(-) diff --git a/.github/renovate.json5 b/.github/renovate.json5 index 1c5fe0970b..00735afaea 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -1,439 +1,470 @@ { - "$schema": "https://docs.renovatebot.com/renovate-schema.json", - "extends": [ - "config:base" + $schema: 'https://docs.renovatebot.com/renovate-schema.json', + extends: [ + 'config:recommended', ], - "rebaseWhen": "never", -// The maximum number of PRs to be created in parallel - "prConcurrentLimit": 5, -// The branches renovate should target -// PLEASE UPDATE THIS WHEN RELEASING. - "baseBranches": ["main","release-1.22", "release-1.23", "release-1.24"], - "ignorePaths": ["docs/**", "releases/**", "contribute/**", "licenses/**", "pkg/versions/**"], - "postUpdateOptions": ["gomodTidy"], - "semanticCommits": "enabled", -// All PRs should have a label - "labels": ["automated", "do not backport", "no-issue"], - "regexManagers": [ - { - // We want a PR to bump Kustomize version in the Makefile - "fileMatch": [ - "^Makefile$", - ], - "matchStrings": [ - "KUSTOMIZE_VERSION \\?= (?.*?)\\n" - ], - "datasourceTemplate": "go", - "depNameTemplate": "sigs.k8s.io/kustomize/kustomize/v5", - }, { - // We want a PR to bump controller-gen version in the Makefile - "fileMatch": [ - "^Makefile$", - ], - "matchStrings": [ - "CONTROLLER_TOOLS_VERSION \\?= (?.*?)\\n" - ], - "datasourceTemplate": "go", - "depNameTemplate": "sigs.k8s.io/controller-tools", - }, { - // We want a PR to bump goreleaser version in the Makefile - "fileMatch": [ - "^Makefile$", - ], - "matchStrings": [ - "GORELEASER_VERSION \\?= (?.*?)\\n" - ], - "datasourceTemplate": "go", - "versioningTemplate": "loose", - "depNameTemplate": "github.com/goreleaser/goreleaser", - }, { - // We want a PR to bump the external-snapshotter version - "fileMatch": [ - "^.github/workflows/continuous-delivery.yml", - "^hack/setup-cluster\.sh$", - ], - "matchStrings": [ - "EXTERNAL_SNAPSHOTTER_VERSION: \"(?.*?)\"", - "EXTERNAL_SNAPSHOTTER_VERSION=(?.*?)\\n", - ], - "datasourceTemplate": "github-releases", - "versioningTemplate": "loose", - "depNameTemplate": "kubernetes-csi/external-snapshotter", - "extractVersionTemplate": "^(?v\\d+\\.\\d+\\.\\d+)" - }, { - // We want a PR to bump the external-provisioner version - "fileMatch": [ - "^hack/setup-cluster\.sh$", - ], - "matchStrings": [ - "EXTERNAL_PROVISIONER_VERSION=(?.*?)\\n", - ], - "datasourceTemplate": "github-releases", - "versioningTemplate": "loose", - "depNameTemplate": "kubernetes-csi/external-provisioner", - "extractVersionTemplate": "^(?v\\d+\\.\\d+\\.\\d+)" - }, { - // We want a PR to bump the external-resizer version - "fileMatch": [ - "^hack/setup-cluster\.sh$", - ], - "matchStrings": [ - "EXTERNAL_RESIZER_VERSION=(?.*?)\\n", - ], - "datasourceTemplate": "github-releases", - "versioningTemplate": "loose", - "depNameTemplate": "kubernetes-csi/external-resizer", - "extractVersionTemplate": "^(?v\\d+\\.\\d+\\.\\d+)" - }, { - // We want a PR to bump the external-attacher version - "fileMatch": [ - "^hack/setup-cluster\.sh$", - ], - "matchStrings": [ - "EXTERNAL_ATTACHER_VERSION=(?.*?)\\n", - ], - "datasourceTemplate": "github-releases", - "versioningTemplate": "loose", - "depNameTemplate": "kubernetes-csi/external-attacher", - "extractVersionTemplate": "^(?v\\d+\\.\\d+\\.\\d+)" - }, { - // We want a PR to bump the csi-driver-host-path version - "fileMatch": [ - "^hack/setup-cluster\.sh$", - ], - "matchStrings": [ - "CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=(?.*?)\\n", - ], - "datasourceTemplate": "github-releases", - "versioningTemplate": "loose", - "depNameTemplate": "kubernetes-csi/csi-driver-host-path", - "extractVersionTemplate": "^(?v\\d+\\.\\d+\\.\\d+)" - }, { - // We want a PR to bump the rook version - "fileMatch": [ - "^.github/workflows/continuous-delivery.yml", - ], - "matchStrings": [ - "ROOK_VERSION: \"(?.*?)\"", - ], - "datasourceTemplate": "github-releases", - "versioningTemplate": "loose", - "depNameTemplate": "rook/rook", - "extractVersionTemplate": "^(?v\\d+\\.\\d+\\.\\d+)" - }, { - // We want a PR to bump kind version - "fileMatch": [ - "^.github/workflows/continuous-delivery.yml", - "^.github/workflows/continuous-integration.yml", - ], - "matchStrings": [ - "KIND_VERSION: \"(?.*?)\"", - ], - "datasourceTemplate": "github-tags", - "depNameTemplate": "kubernetes-sigs/kind", - }, { - // We want a PR to bump kind node version - "fileMatch": [ - "^hack/setup-cluster.sh$", - "^hack/e2e/run-e2e-kind.sh$", - ], - "matchStrings": [ - "KIND_NODE_DEFAULT_VERSION=(?.*?)\\n", - ], - "datasourceTemplate": "docker", - "versioningTemplate": "loose", - "depNameTemplate": "kindest/node", - }, { - // We want a PR to bump k3d node version - "fileMatch": [ - "^hack/setup-cluster.sh$", - "^hack/e2e/run-e2e-k3d.sh$", - ], - "matchStrings": [ - "K3D_NODE_DEFAULT_VERSION=(?.*?)\\n", - ], - "versioningTemplate": "regex:^v(?\\d+)(\\.(?\\d+))?(\\.(?\\d+))(\\+k3s?(?\\d+))?$", - "extractVersionTemplate": "^(?v\\d+\\.\\d+\\.\\d+)", - "datasourceTemplate": "github-releases", - "depNameTemplate": "k3s-io/k3s", - }, { - // We want a PR to bump spellcheck version in the Makefile - "fileMatch": [ - "^Makefile$", - ], - "matchStrings": [ - "SPELLCHECK_VERSION \\?= (?.*?)\\n" - ], - "datasourceTemplate": "docker", - "versioningTemplate": "loose", - "depNameTemplate": "jonasbn/github-action-spellcheck", - }, { - // We want a PR to bump woke version in the Makefile - "fileMatch": [ - "^Makefile$", - ], - "matchStrings": [ - "WOKE_VERSION \\?= (?.*?)\\n" - ], - "datasourceTemplate": "docker", - "versioningTemplate": "loose", - "depNameTemplate": "getwoke/woke", - }, { - // We want a PR to bump operator-sdk in the Makefile - "fileMatch": [ - "^Makefile$", - ], - "matchStrings": [ - "OPERATOR_SDK_VERSION \\?= (?.*?)\\n" - ], - "datasourceTemplate": "github-releases", - "depNameTemplate": "operator-framework/operator-sdk", - "versioningTemplate": "loose", - "extractVersionTemplate": "^(?v\\d+\\.\\d+\\.\\d+)" - }, { - // We want a PR to bump operator package manager (opm) in the Makefile - "fileMatch": [ - "^Makefile$", - ], - "matchStrings": [ - "OPM_VERSION \\?= (?.*?)\\n" - ], - "datasourceTemplate": "github-releases", - "depNameTemplate": "operator-framework/operator-registry", - "versioningTemplate": "loose", - "extractVersionTemplate": "^(?v\\d+\\.\\d+\\.\\d+)" - }, { - // We want a PR to bump redhat-preflight in the Makefile - "fileMatch": [ - "^Makefile$", - ], - "matchStrings": [ - "PREFLIGHT_VERSION \\?= (?.*?)\\n" - ], - "datasourceTemplate": "github-releases", - "depNameTemplate": "redhat-openshift-ecosystem/openshift-preflight", - "versioningTemplate": "loose", - "extractVersionTemplate": "^(?\\d+\\.\\d+\\.\\d+)" - }, { - "fileMatch": [ - "^config\\/olm-scorecard\\/patches\\/basic\\.config\\.yaml$", - "^config\\/olm-scorecard\\/patches\\/olm\\.config\\.yaml$", - ], - "matchStrings": [ - "image: quay.io/operator-framework/scorecard-test:(?.*?)\\n", - ], - "datasourceTemplate": "docker", - "versioningTemplate": "loose", - "depNameTemplate": "quay.io/operator-framework/scorecard-test", - "extractVersionTemplate": "^(?v\\d+\\.\\d+\\.\\d+)" - },{ - // We want a PR to bump Default Container Images versions. - "fileMatch": [ - "^pkg\\/versions\\/versions\\.go$", - "^pkg\\/specs\\/pgbouncer\\/deployments\\.go$" - ], - "matchStrings": [ - "DefaultImageName = \"(?.+?):(?.*?)\"\\n", - "DefaultPgbouncerImage = \"(?.+?):(?.*?)\"\\n", - ], - "datasourceTemplate": "docker", - "versioningTemplate": "loose", - }, { -// We want a PR to bump Go versions used through env variables in any GitHub -// Actions, taking it from the official GitHub repository. - "fileMatch": ["^\\.github\\/workflows\\/[^/]+\\.ya?ml$"], - "matchStrings": [ - "GOLANG_VERSION: \"(?.*?)\\.x\"", - ], - "datasourceTemplate": "golang-version", - "depNameTemplate": "golang", - "versioningTemplate": "loose", - "extractVersionTemplate": "^(?\\d+\\.\\d+)" - }, { -// We want a PR to bump golangci-lint versions used through env variables in -// any GitHub Actions or Makefile, taking it from the official GitHub -// repository tags. - "fileMatch": ["^\\.github\\/workflows\\/[^/]+\\.ya?ml$"], - "matchStrings": [ - "GOLANGCI_LINT_VERSION: \"v(?.*?)\"", - ], - "datasourceTemplate": "github-releases", - "depNameTemplate": "golangci/golangci-lint", - "versioningTemplate": "loose", - "extractVersionTemplate": "^v(?\\d+\\.\\d+\\.\\d+)" - }, { - "fileMatch": ["^.github/workflows/continuous-delivery.yml",], - "matchStrings": [ - "VELERO_VERSION: \"v(?.*?)\"", - ], - "datasourceTemplate": "github-releases", - "depNameTemplate": "vmware-tanzu/velero", - "versioningTemplate": "loose", - "extractVersionTemplate": "^(?v\\d+\\.\\d+\\.\\d+)" - }, { - "fileMatch": ["^.github/workflows/continuous-delivery.yml",], - "matchStrings": [ - "VELERO_AWS_PLUGIN_VERSION: \"v(?.*?)\"", - ], - "datasourceTemplate": "github-releases", - "depNameTemplate": "vmware-tanzu/velero-plugin-for-aws", - "versioningTemplate": "loose", - "extractVersionTemplate": "^(?v\\d+\\.\\d+\\.\\d+)" - } + rebaseWhen: 'never', + prConcurrentLimit: 5, + baseBranches: [ + 'main', + 'release-1.22', + 'release-1.23', + 'release-1.24', + ], + ignorePaths: [ + 'docs/**', + 'releases/**', + 'contribute/**', + 'licenses/**', + 'pkg/versions/**', + ], + postUpdateOptions: [ + 'gomodTidy', + ], + semanticCommits: 'enabled', + labels: [ + 'automated', + 'do not backport', + 'no-issue', + ], + customManagers: [ + { + customType: 'regex', + fileMatch: [ + '^Makefile$', + ], + matchStrings: [ + 'KUSTOMIZE_VERSION \\?= (?.*?)\\n', + ], + datasourceTemplate: 'go', + depNameTemplate: 'sigs.k8s.io/kustomize/kustomize/v5', + }, + { + customType: 'regex', + fileMatch: [ + '^Makefile$', + ], + matchStrings: [ + 'CONTROLLER_TOOLS_VERSION \\?= (?.*?)\\n', + ], + datasourceTemplate: 'go', + depNameTemplate: 'sigs.k8s.io/controller-tools', + }, + { + customType: 'regex', + fileMatch: [ + '^Makefile$', + ], + matchStrings: [ + 'GORELEASER_VERSION \\?= (?.*?)\\n', + ], + datasourceTemplate: 'go', + versioningTemplate: 'loose', + depNameTemplate: 'github.com/goreleaser/goreleaser', + }, + { + customType: 'regex', + fileMatch: [ + '^.github/workflows/continuous-delivery.yml', + '^hack/setup-cluster.sh$', + ], + matchStrings: [ + 'EXTERNAL_SNAPSHOTTER_VERSION: "(?.*?)"', + 'EXTERNAL_SNAPSHOTTER_VERSION=(?.*?)\\n', + ], + datasourceTemplate: 'github-releases', + versioningTemplate: 'loose', + depNameTemplate: 'kubernetes-csi/external-snapshotter', + extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)', + }, + { + customType: 'regex', + fileMatch: [ + '^hack/setup-cluster.sh$', + ], + matchStrings: [ + 'EXTERNAL_PROVISIONER_VERSION=(?.*?)\\n', + ], + datasourceTemplate: 'github-releases', + versioningTemplate: 'loose', + depNameTemplate: 'kubernetes-csi/external-provisioner', + extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)', + }, + { + customType: 'regex', + fileMatch: [ + '^hack/setup-cluster.sh$', + ], + matchStrings: [ + 'EXTERNAL_RESIZER_VERSION=(?.*?)\\n', + ], + datasourceTemplate: 'github-releases', + versioningTemplate: 'loose', + depNameTemplate: 'kubernetes-csi/external-resizer', + extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)', + }, + { + customType: 'regex', + fileMatch: [ + '^hack/setup-cluster.sh$', + ], + matchStrings: [ + 'EXTERNAL_ATTACHER_VERSION=(?.*?)\\n', + ], + datasourceTemplate: 'github-releases', + versioningTemplate: 'loose', + depNameTemplate: 'kubernetes-csi/external-attacher', + extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)', + }, + { + customType: 'regex', + fileMatch: [ + '^hack/setup-cluster.sh$', + ], + matchStrings: [ + 'CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=(?.*?)\\n', + ], + datasourceTemplate: 'github-releases', + versioningTemplate: 'loose', + depNameTemplate: 'kubernetes-csi/csi-driver-host-path', + extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)', + }, + { + customType: 'regex', + fileMatch: [ + '^.github/workflows/continuous-delivery.yml', + ], + matchStrings: [ + 'ROOK_VERSION: "(?.*?)"', + ], + datasourceTemplate: 'github-releases', + versioningTemplate: 'loose', + depNameTemplate: 'rook/rook', + extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)', + }, + { + customType: 'regex', + fileMatch: [ + '^.github/workflows/continuous-delivery.yml', + '^.github/workflows/continuous-integration.yml', + ], + matchStrings: [ + 'KIND_VERSION: "(?.*?)"', + ], + datasourceTemplate: 'github-tags', + depNameTemplate: 'kubernetes-sigs/kind', + }, + { + customType: 'regex', + fileMatch: [ + '^hack/setup-cluster.sh$', + '^hack/e2e/run-e2e-kind.sh$', + ], + matchStrings: [ + 'KIND_NODE_DEFAULT_VERSION=(?.*?)\\n', + ], + datasourceTemplate: 'docker', + versioningTemplate: 'loose', + depNameTemplate: 'kindest/node', + }, + { + customType: 'regex', + fileMatch: [ + '^hack/setup-cluster.sh$', + '^hack/e2e/run-e2e-k3d.sh$', + ], + matchStrings: [ + 'K3D_NODE_DEFAULT_VERSION=(?.*?)\\n', + ], + versioningTemplate: 'regex:^v(?\\d+)(\\.(?\\d+))?(\\.(?\\d+))(\\+k3s?(?\\d+))?$', + extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)', + datasourceTemplate: 'github-releases', + depNameTemplate: 'k3s-io/k3s', + }, + { + customType: 'regex', + fileMatch: [ + '^Makefile$', + ], + matchStrings: [ + 'SPELLCHECK_VERSION \\?= (?.*?)\\n', + ], + datasourceTemplate: 'docker', + versioningTemplate: 'loose', + depNameTemplate: 'jonasbn/github-action-spellcheck', + }, + { + customType: 'regex', + fileMatch: [ + '^Makefile$', + ], + matchStrings: [ + 'WOKE_VERSION \\?= (?.*?)\\n', + ], + datasourceTemplate: 'docker', + versioningTemplate: 'loose', + depNameTemplate: 'getwoke/woke', + }, + { + customType: 'regex', + fileMatch: [ + '^Makefile$', + ], + matchStrings: [ + 'OPERATOR_SDK_VERSION \\?= (?.*?)\\n', + ], + datasourceTemplate: 'github-releases', + depNameTemplate: 'operator-framework/operator-sdk', + versioningTemplate: 'loose', + extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)', + }, + { + customType: 'regex', + fileMatch: [ + '^Makefile$', + ], + matchStrings: [ + 'OPM_VERSION \\?= (?.*?)\\n', + ], + datasourceTemplate: 'github-releases', + depNameTemplate: 'operator-framework/operator-registry', + versioningTemplate: 'loose', + extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)', + }, + { + customType: 'regex', + fileMatch: [ + '^Makefile$', + ], + matchStrings: [ + 'PREFLIGHT_VERSION \\?= (?.*?)\\n', + ], + datasourceTemplate: 'github-releases', + depNameTemplate: 'redhat-openshift-ecosystem/openshift-preflight', + versioningTemplate: 'loose', + extractVersionTemplate: '^(?\\d+\\.\\d+\\.\\d+)', + }, + { + customType: 'regex', + fileMatch: [ + '^config\\/olm-scorecard\\/patches\\/basic\\.config\\.yaml$', + '^config\\/olm-scorecard\\/patches\\/olm\\.config\\.yaml$', + ], + matchStrings: [ + 'image: quay.io/operator-framework/scorecard-test:(?.*?)\\n', + ], + datasourceTemplate: 'docker', + versioningTemplate: 'loose', + depNameTemplate: 'quay.io/operator-framework/scorecard-test', + extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)', + }, + { + customType: 'regex', + fileMatch: [ + '^pkg\\/versions\\/versions\\.go$', + '^pkg\\/specs\\/pgbouncer\\/deployments\\.go$', + ], + matchStrings: [ + 'DefaultImageName = "(?.+?):(?.*?)"\\n', + 'DefaultPgbouncerImage = "(?.+?):(?.*?)"\\n', + ], + datasourceTemplate: 'docker', + versioningTemplate: 'loose', + }, + { + customType: 'regex', + fileMatch: [ + '^\\.github\\/workflows\\/[^/]+\\.ya?ml$', + ], + matchStrings: [ + 'GOLANG_VERSION: "(?.*?)\\.x"', + ], + datasourceTemplate: 'golang-version', + depNameTemplate: 'golang', + versioningTemplate: 'loose', + extractVersionTemplate: '^(?\\d+\\.\\d+)', + }, + { + customType: 'regex', + fileMatch: [ + '^\\.github\\/workflows\\/[^/]+\\.ya?ml$', + ], + matchStrings: [ + 'GOLANGCI_LINT_VERSION: "v(?.*?)"', + ], + datasourceTemplate: 'github-releases', + depNameTemplate: 'golangci/golangci-lint', + versioningTemplate: 'loose', + extractVersionTemplate: '^v(?\\d+\\.\\d+\\.\\d+)', + }, + { + customType: 'regex', + fileMatch: [ + '^.github/workflows/continuous-delivery.yml', + ], + matchStrings: [ + 'VELERO_VERSION: "v(?.*?)"', + ], + datasourceTemplate: 'github-releases', + depNameTemplate: 'vmware-tanzu/velero', + versioningTemplate: 'loose', + extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)', + }, + { + customType: 'regex', + fileMatch: [ + '^.github/workflows/continuous-delivery.yml', + ], + matchStrings: [ + 'VELERO_AWS_PLUGIN_VERSION: "v(?.*?)"', + ], + datasourceTemplate: 'github-releases', + depNameTemplate: 'vmware-tanzu/velero-plugin-for-aws', + versioningTemplate: 'loose', + extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)', + }, + ], + packageRules: [ + { + matchDatasources: [ + 'docker', + ], + allowedVersions: '!/alpha/', + }, + { + matchDatasources: [ + 'go', + ], + matchDepNames: [ + 'k8s.io/client-go', + ], + allowedVersions: '<1.0', + }, + { + matchDatasources: [ + 'go', + ], + groupName: 'kubernetes patches', + matchUpdateTypes: [ + 'patch', + 'digest', + ], + matchPackageNames: [ + 'k8s.io{/,}**', + 'sigs.k8s.io{/,}**', + 'github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring{/,}**', + ], + matchDepNames: [ + '!sigs.k8s.io/kustomize/kustomize/v5', + '!sigs.k8s.io/controller-tools', + ], + }, + { + matchDatasources: [ + 'go', + ], + matchUpdateTypes: [ + 'major', + 'minor', + ], + matchPackageNames: [ + 'k8s.io{/,}**', + 'sigs.k8s.io{/,}**', + 'github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring{/,}**', + ], + }, + { + matchDatasources: [ + 'go', + ], + matchUpdateTypes: [ + 'major', + ], + matchPackageNames: [ + '*', + '!k8s.io{/,}**', + '!sigs.k8s.io{/,}**', + '!github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring{/,}**', + ], + }, + { + matchDatasources: [ + 'go', + ], + matchUpdateTypes: [ + 'minor', + 'patch', + 'digest', + ], + groupName: 'all non-major go dependencies', + matchPackageNames: [ + '*', + '!k8s.io{/,}**', + '!sigs.k8s.io{/,}**', + '!github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring{/,}**', + '!github.com/cloudnative-pg/{/,}**', + ], + }, + { + matchDepTypes: [ + 'action', + ], + matchUpdateTypes: [ + 'minor', + 'patch', + ], + groupName: 'all non-major github action', + pinDigests: false, + }, + { + matchDepTypes: [ + 'action', + ], + pinDigests: false, + }, + { + groupName: 'kubernetes CSI', + separateMajorMinor: false, + pinDigests: false, + matchPackageNames: [ + 'kubernetes-csi{/,}**', + 'rook{/,}**', + ], + }, + { + groupName: 'backup test tools', + separateMajorMinor: false, + pinDigests: false, + matchPackageNames: [ + 'vmware-tanzu{/,}**', + ], + }, + { + groupName: 'operator framework', + separateMajorMinor: false, + pinDigests: false, + matchPackageNames: [ + 'operator-framework{/,}**', + 'redhat-openshift-ecosystem{/,}**', + 'quay.io/operator-framework{/,}**', + ], + }, + { + groupName: 'spellcheck', + separateMajorMinor: false, + pinDigests: false, + matchPackageNames: [ + 'jonasbn/github-action-spellcheck{/,}**', + 'rojopolis/spellcheck-github-actions{/,}**', + ], + }, + { + groupName: 'cnpg', + matchPackageNames: [ + 'github.com/cloudnative-pg/', + ], + separateMajorMinor: false, + pinDigests: false, + }, ], - "packageRules": [ - { - "matchDatasources": [ - "docker" - ], - "allowedVersions": "!/alpha/", - }, - { -// We need to ignore k8s.io/client-go older versions as they switched to -// semantic version and old tags are still available in the repo. - "matchDatasources": [ - "go" - ], - "matchDepNames": [ - "k8s.io/client-go" - ], - "allowedVersions": "<1.0" - }, { -// We want a single PR for all the patches bumps of kubernetes related -// dependencies, as usually these are all strictly related. - "matchDatasources": [ - "go" - ], - "groupName": "kubernetes patches", - "matchUpdateTypes": [ - "patch", - "digest" - ], - "matchPackagePrefixes": [ - "k8s.io", - "sigs.k8s.io", - "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring" - ], - "excludeDepNames": [ - "sigs.k8s.io/kustomize/kustomize/v5", - "sigs.k8s.io/controller-tools" - ] - }, { -// We want dedicated PRs for each minor and major bumps to kubernetes related -// dependencies. - "matchDatasources": [ - "go" - ], - "matchUpdateTypes": [ - "major", - "minor" - ], - "matchPackagePrefixes": [ - "k8s.io", - "sigs.k8s.io", - "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring" - ] - }, { -// We want dedicated PRs for each bump to non-kubernetes Go dependencies. - "matchDatasources": [ - "go" - ], - "matchPackagePatterns": [ - "*" - ], - "excludePackagePrefixes": [ - "k8s.io", - "sigs.k8s.io", - "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring" - ], - "matchUpdateTypes": [ - "major", - ], - }, { -// We want a single PR for all minor and patch bumps to non-kubernetes Go -// dependencies. - "matchDatasources": [ - "go" - ], - "matchPackagePatterns": [ - "*" - ], - "excludePackagePrefixes": [ - "k8s.io", - "sigs.k8s.io", - "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring", - "github.com/cloudnative-pg/" - ], - "matchUpdateTypes": [ - "minor", - "patch", - "digest" - ], - "groupName": "all non-major go dependencies" - }, { -// We want a single PR for all minor and patch bumps of GitHub Actions - "matchDepTypes": [ - "action" - ], - "matchUpdateTypes": [ - "minor", - "patch" - ], - "groupName": "all non-major github action", - "pinDigests": false - },{ -// We want dedicated PRs for each major bump to GitHub Actions - "matchDepTypes": [ - "action" - ], - "pinDigests": false - },{ -// PR group for Kubernetes CSI - "groupName": "kubernetes CSI", - "matchPackagePrefixes": [ - "kubernetes-csi", - "rook", - ], - "separateMajorMinor": "false", - "pinDigests": false - }, { -// PR group for backup test tools - "groupName": "backup test tools", - "matchPackagePrefixes": [ - "vmware-tanzu", - ], - "separateMajorMinor": "false", - "pinDigests": false - }, - { -// PR group for all the operator framework related things - "groupName": "operator framework", - "matchPackagePrefixes": [ - "operator-framework", - "redhat-openshift-ecosystem", - "quay.io/operator-framework", - ], - "separateMajorMinor": "false", - "pinDigests": false - }, - { -// PR group for spellcheck - "groupName": "spellcheck", - "matchPackagePrefixes": [ - "jonasbn/github-action-spellcheck", - "rojopolis/spellcheck-github-actions", - ], - "separateMajorMinor": "false", - "pinDigests": false, - }, - { -// PR group for CNPG dependencies - "groupName": "cnpg", - "matchPackageNames": [ - "github.com/cloudnative-pg/", - ], - "separateMajorMinor": "false", - "pinDigests": false, - }, - ] } From 73df771ebbf4909e144750989e0e58c4a0aa0bdb Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 30 Oct 2024 18:39:17 +0100 Subject: [PATCH 119/836] chore(deps): update spellcheck to v0.44.0 (main) (#5982) --- .github/workflows/spellcheck.yml | 2 +- Makefile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/spellcheck.yml b/.github/workflows/spellcheck.yml index 6dc47bd04b..07b87f3bdf 100644 --- a/.github/workflows/spellcheck.yml +++ b/.github/workflows/spellcheck.yml @@ -28,4 +28,4 @@ jobs: uses: actions/checkout@v4 - name: Spellcheck - uses: rojopolis/spellcheck-github-actions@0.43.1 + uses: rojopolis/spellcheck-github-actions@0.44.0 diff --git a/Makefile b/Makefile index 1a4698e810..07e55cdde8 100644 --- a/Makefile +++ b/Makefile @@ -44,7 +44,7 @@ POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions KUSTOMIZE_VERSION ?= v5.5.0 CONTROLLER_TOOLS_VERSION ?= v0.16.4 GORELEASER_VERSION ?= v2.3.2 -SPELLCHECK_VERSION ?= 0.43.1 +SPELLCHECK_VERSION ?= 0.44.0 WOKE_VERSION ?= 0.19.0 OPERATOR_SDK_VERSION ?= v1.37.0 OPM_VERSION ?= v1.48.0 From 83feb2cddb31d666c64e3883ed554b430f7b1400 Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Thu, 31 Oct 2024 10:22:21 +0100 Subject: [PATCH 120/836] fix(docs): properly indent a block in logging page (#5970) Signed-off-by: Gabriele Bartolini --- docs/src/logging.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/src/logging.md b/docs/src/logging.md index 8543446929..69433688b4 100644 --- a/docs/src/logging.md +++ b/docs/src/logging.md @@ -37,9 +37,9 @@ specification using the `logLevel` option. Available log levels are: `error`, `warning`, `info` (default), `debug`, and `trace`. !!! Important - Currently, the log level can only be set at the time the instance starts. - Changes to the log level in the cluster specification after the cluster has - started will only apply to new pods, not existing ones. + Currently, the log level can only be set at the time the instance starts. + Changes to the log level in the cluster specification after the cluster has + started will only apply to new pods, not existing ones. ## Operator Logs From 97f1e9ef76d14033d3bf513dadbecda344707978 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Thu, 31 Oct 2024 17:33:07 +0100 Subject: [PATCH 121/836] fix(webhook): validate the number of `synchronous` replicas (#5985) Closes #5964 ## Release Notes Fix an issue where the user could specify one or fewer instances along with the `synchronous` stanza. When specified together, these parameters would generate an incorrect replica configuration. Signed-off-by: Armando Ruocco Signed-off-by: Gabriele Bartolini Signed-off-by: Marco Nenciarini Co-authored-by: Gabriele Bartolini Co-authored-by: Marco Nenciarini --- api/v1/cluster_webhook.go | 23 +++++++++++ api/v1/cluster_webhook_test.go | 70 ++++++++++++++++++++++++++++++++++ 2 files changed, 93 insertions(+) diff --git a/api/v1/cluster_webhook.go b/api/v1/cluster_webhook.go index 60add4a31a..ee2a572867 100644 --- a/api/v1/cluster_webhook.go +++ b/api/v1/cluster_webhook.go @@ -356,6 +356,7 @@ func (r *Cluster) Validate() (allErrs field.ErrorList) { r.validateBackupConfiguration, r.validateRetentionPolicy, r.validateConfiguration, + r.validateSynchronousReplicaConfiguration, r.validateLDAP, r.validateReplicationSlots, r.validateEnv, @@ -1074,6 +1075,28 @@ func (r *Cluster) validateResources() field.ErrorList { return result } +func (r *Cluster) validateSynchronousReplicaConfiguration() field.ErrorList { + if r.Spec.PostgresConfiguration.Synchronous == nil { + return nil + } + + var result field.ErrorList + + if r.Spec.PostgresConfiguration.Synchronous.Number >= (r.Spec.Instances + + len(r.Spec.PostgresConfiguration.Synchronous.StandbyNamesPost) + + len(r.Spec.PostgresConfiguration.Synchronous.StandbyNamesPre)) { + err := field.Invalid( + field.NewPath("spec", "postgresql", "synchronous"), + r.Spec.PostgresConfiguration.Synchronous, + "Invalid synchronous configuration: the number of synchronous replicas must be less than the "+ + "total number of instances and the provided standby names.", + ) + result = append(result, err) + } + + return result +} + // validateConfiguration determines whether a PostgreSQL configuration is valid func (r *Cluster) validateConfiguration() field.ErrorList { var result field.ErrorList diff --git a/api/v1/cluster_webhook_test.go b/api/v1/cluster_webhook_test.go index f6bea56fca..cdbf5585c2 100644 --- a/api/v1/cluster_webhook_test.go +++ b/api/v1/cluster_webhook_test.go @@ -1940,6 +1940,76 @@ var _ = Describe("Number of synchronous replicas", func() { }) }) +var _ = Describe("validateSynchronousReplicaConfiguration", func() { + It("returns no error when synchronous configuration is nil", func() { + cluster := &Cluster{ + Spec: ClusterSpec{ + PostgresConfiguration: PostgresConfiguration{ + Synchronous: nil, + }, + }, + } + errors := cluster.validateSynchronousReplicaConfiguration() + Expect(errors).To(BeEmpty()) + }) + + It("returns an error when number of synchronous replicas is greater than the total instances and standbys", func() { + cluster := &Cluster{ + Spec: ClusterSpec{ + Instances: 2, + PostgresConfiguration: PostgresConfiguration{ + Synchronous: &SynchronousReplicaConfiguration{ + Number: 5, + StandbyNamesPost: []string{"standby1"}, + StandbyNamesPre: []string{"standby2"}, + }, + }, + }, + } + errors := cluster.validateSynchronousReplicaConfiguration() + Expect(errors).To(HaveLen(1)) + Expect(errors[0].Detail).To( + Equal("Invalid synchronous configuration: the number of synchronous replicas must be less than the " + + "total number of instances and the provided standby names.")) + }) + + It("returns an error when number of synchronous replicas is equal to total instances and standbys", func() { + cluster := &Cluster{ + Spec: ClusterSpec{ + Instances: 3, + PostgresConfiguration: PostgresConfiguration{ + Synchronous: &SynchronousReplicaConfiguration{ + Number: 5, + StandbyNamesPost: []string{"standby1"}, + StandbyNamesPre: []string{"standby2"}, + }, + }, + }, + } + errors := cluster.validateSynchronousReplicaConfiguration() + Expect(errors).To(HaveLen(1)) + Expect(errors[0].Detail).To(Equal("Invalid synchronous configuration: the number of synchronous replicas " + + "must be less than the total number of instances and the provided standby names.")) + }) + + It("returns no error when number of synchronous replicas is less than total instances and standbys", func() { + cluster := &Cluster{ + Spec: ClusterSpec{ + Instances: 2, + PostgresConfiguration: PostgresConfiguration{ + Synchronous: &SynchronousReplicaConfiguration{ + Number: 2, + StandbyNamesPost: []string{"standby1"}, + StandbyNamesPre: []string{"standby2"}, + }, + }, + }, + } + errors := cluster.validateSynchronousReplicaConfiguration() + Expect(errors).To(BeEmpty()) + }) +}) + var _ = Describe("storage configuration validation", func() { It("complains if the size is being reduced", func() { clusterOld := Cluster{ From be6134579f8720ad389e81680b0f2b12a21e3d6f Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Thu, 31 Oct 2024 17:43:30 +0100 Subject: [PATCH 122/836] feat: make data durability configurable for sync replication (#5878) This patch introduces an option to relax strict data durability requirements within the new synchronous replication interface. By setting `.spec.postgresql.synchronous.dataDurability` to `preferred`, the operator limits the required number of synchronous standby replicas to match the number of available instances, prioritizing self-healing over strict data durability. The `preferred` method requires both `standbyNamesPre` and `standbyNamesPost` to be empty. Closes: #5793 ## Release Notes Introduce the `dataDurability` option in the `.spec.postgresql.synchronous` stanza to control the trade-off between data safety and availability for synchronous replication. The option can be set to `required` or `preferred`, with the default being `required` if not specified. Signed-off-by: Leonardo Cecchi Signed-off-by: Francesco Canovai Signed-off-by: Jaime Silvela Signed-off-by: Gabriele Bartolini Signed-off-by: Armando Ruocco Signed-off-by: Marco Nenciarini Co-authored-by: Francesco Canovai Co-authored-by: Jaime Silvela Co-authored-by: Gabriele Bartolini Co-authored-by: Armando Ruocco Co-authored-by: Marco Nenciarini --- .wordlist-en-custom.txt | 2 + api/v1/cluster_types.go | 27 ++ .../bases/postgresql.cnpg.io_clusters.yaml | 21 + docs/src/cloudnative-pg.v1.md | 29 ++ docs/src/replication.md | 375 ++++++++++++------ pkg/postgres/replication/explicit.go | 56 ++- pkg/postgres/replication/explicit_test.go | 321 ++++++++++----- pkg/postgres/replication/legacy.go | 4 +- .../sync_replicas/preferred.yaml.template | 24 ++ tests/e2e/syncreplicas_test.go | 55 ++- 10 files changed, 694 insertions(+), 220 deletions(-) create mode 100644 tests/e2e/fixtures/sync_replicas/preferred.yaml.template diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index df5953f2a3..45c9728531 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -118,6 +118,7 @@ DISA DNS DataBackupConfiguration DataBase +DataDurabilityLevel DataSource DatabaseReclaimPolicy DatabaseRoleRef @@ -674,6 +675,7 @@ cyber dT danglingPVC dataChecksums +dataDurability databackupconfiguration databaseReclaimPolicy datacenter diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go index 2fc34c4926..70b53221c4 100644 --- a/api/v1/cluster_types.go +++ b/api/v1/cluster_types.go @@ -1173,10 +1173,24 @@ const ( SynchronousReplicaConfigurationMethodAny = SynchronousReplicaConfigurationMethod("any") ) +// DataDurabilityLevel specifies how strictly to enforce synchronous replication +// when cluster instances are unavailable. Options are `required` or `preferred`. +type DataDurabilityLevel string + +const ( + // DataDurabilityLevelRequired means that data durability is strictly enforced + DataDurabilityLevelRequired DataDurabilityLevel = "required" + + // DataDurabilityLevelPreferred means that data durability is enforced + // only when healthy replicas are available + DataDurabilityLevelPreferred DataDurabilityLevel = "preferred" +) + // SynchronousReplicaConfiguration contains the configuration of the // PostgreSQL synchronous replication feature. // Important: at this moment, also `.spec.minSyncReplicas` and `.spec.maxSyncReplicas` // need to be considered. +// +kubebuilder:validation:XValidation:rule="self.dataDurability!='preferred' || ((!has(self.standbyNamesPre) || self.standbyNamesPre.size()==0) && (!has(self.standbyNamesPost) || self.standbyNamesPost.size()==0))",message="dataDurability set to 'preferred' requires empty 'standbyNamesPre' and empty 'standbyNamesPost'" type SynchronousReplicaConfiguration struct { // Method to select synchronous replication standbys from the listed // servers, accepting 'any' (quorum-based synchronous replication) or @@ -1206,6 +1220,19 @@ type SynchronousReplicaConfiguration struct { // only useful for priority-based synchronous replication). // +optional StandbyNamesPost []string `json:"standbyNamesPost,omitempty"` + + // If set to "required", data durability is strictly enforced. Write operations + // with synchronous commit settings (`on`, `remote_write`, or `remote_apply`) will + // block if there are insufficient healthy replicas, ensuring data persistence. + // If set to "preferred", data durability is maintained when healthy replicas + // are available, but the required number of instances will adjust dynamically + // if replicas become unavailable. This setting relaxes strict durability enforcement + // to allow for operational continuity. This setting is only applicable if both + // `standbyNamesPre` and `standbyNamesPost` are unset (empty). + // +kubebuilder:validation:Enum=required;preferred + // +kubebuilder:default:=required + // +optional + DataDurability DataDurabilityLevel `json:"dataDurability,omitempty"` } // PostgresConfiguration defines the PostgreSQL configuration diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml index d2f810b24e..ab30fed50d 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml @@ -4058,6 +4058,21 @@ spec: description: Configuration of the PostgreSQL synchronous replication feature properties: + dataDurability: + default: required + description: |- + If set to "required", data durability is strictly enforced. Write operations + with synchronous commit settings (`on`, `remote_write`, or `remote_apply`) will + block if there are insufficient healthy replicas, ensuring data persistence. + If set to "preferred", data durability is maintained when healthy replicas + are available, but the required number of instances will adjust dynamically + if replicas become unavailable. This setting relaxes strict durability enforcement + to allow for operational continuity. This setting is only applicable if both + `standbyNamesPre` and `standbyNamesPost` are unset (empty). + enum: + - required + - preferred + type: string maxStandbyNamesFromCluster: description: |- Specifies the maximum number of local cluster pods that can be @@ -4102,6 +4117,12 @@ spec: - method - number type: object + x-kubernetes-validations: + - message: dataDurability set to 'preferred' requires empty 'standbyNamesPre' + and empty 'standbyNamesPost' + rule: self.dataDurability!='preferred' || ((!has(self.standbyNamesPre) + || self.standbyNamesPre.size()==0) && (!has(self.standbyNamesPost) + || self.standbyNamesPost.size()==0)) type: object primaryUpdateMethod: default: restart diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md index 45d99cc4f4..e922148fbc 100644 --- a/docs/src/cloudnative-pg.v1.md +++ b/docs/src/cloudnative-pg.v1.md @@ -2199,6 +2199,21 @@ Map keys are the config map names, map values are the versions

FieldDescription
shm [Required]
+
shm
k8s.io/apimachinery/pkg/api/resource.Quantity

Shm is the size limit of the shared memory volume

temporaryData [Required]
+
temporaryData
k8s.io/apimachinery/pkg/api/resource.Quantity
@@ -3063,7 +3066,7 @@ It includes the type of service and its associated template specification.

Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services.

updateStrategy [Required]
+
updateStrategy
ServiceUpdateStrategy
@@ -3102,7 +3105,7 @@ Valid values are "rw", "r", and "ro", representing Valid values are "r", and "ro", representing read, and read-only services.

additional [Required]
+
additional
[]ManagedService
@@ -3133,7 +3136,7 @@ not using the core data types.

-
FieldDescription
name [Required]
+
name
string
@@ -3478,6 +3481,21 @@ the operator calls PgBouncer's PAUSE and RESUME comman
+## PluginConfigurationList {#postgresql-cnpg-io-v1-PluginConfigurationList} + +(Alias of `[]github.com/cloudnative-pg/cloudnative-pg/api/v1.PluginConfiguration`) + +**Appears in:** + +- [ClusterSpec](#postgresql-cnpg-io-v1-ClusterSpec) + + +

PluginConfigurationList represent a set of plugin with their +configuration parameters

+ + + + ## PluginStatus {#postgresql-cnpg-io-v1-PluginStatus} @@ -3507,7 +3525,7 @@ the operator calls PgBouncer's PAUSE and RESUME comman latest reconciliation loop

capabilities [Required]
+
capabilities
[]string
@@ -3515,7 +3533,7 @@ latest reconciliation loop

plugin

operatorCapabilities [Required]
+
operatorCapabilities
[]string
@@ -3523,7 +3541,7 @@ plugin

plugin regarding the reconciler

walCapabilities [Required]
+
walCapabilities
[]string
@@ -3531,7 +3549,7 @@ plugin regarding the reconciler

plugin regarding the WAL management

backupCapabilities [Required]
+
backupCapabilities
[]string
@@ -3539,7 +3557,7 @@ plugin regarding the WAL management

plugin regarding the Backup management

status [Required]
+
status
string
@@ -4045,7 +4063,7 @@ cluster

- - - - - - - -
FieldDescription
self [Required]
+
self
string
@@ -4053,7 +4071,7 @@ cluster

or a replica cluster, comparing it with primary

primary [Required]
+
primary
string
@@ -4068,7 +4086,7 @@ topology specified in externalClusters

The name of the external cluster which is the replication origin

enabled [Required]
+
enabled
bool
@@ -4078,7 +4096,7 @@ object store or via streaming through pg_basebackup. Refer to the Replica clusters page of the documentation for more information.

promotionToken [Required]
+
promotionToken
string
@@ -4086,7 +4104,7 @@ Refer to the Replica clusters page of the documentation for more information.

minApplyDelay [Required]
+
minApplyDelay
meta/v1.Duration
@@ -4900,12 +4918,6 @@ physical replication slots

List of regular expression patterns to match the names of replication slots to be excluded (by default empty)

- [Required]
-synchronizeReplicasCache -
- No description provided.
diff --git a/docs/src/wal_archiving.md b/docs/src/wal_archiving.md index bc67b13757..1f7b60e0c7 100644 --- a/docs/src/wal_archiving.md +++ b/docs/src/wal_archiving.md @@ -13,8 +13,8 @@ the ["Backup on object stores" section](backup_barmanobjectstore.md) to set up the WAL archive. !!! Info - Please refer to [`BarmanObjectStoreConfiguration`](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-BarmanObjectStoreConfiguration) - in the API reference for a full list of options. + Please refer to [`BarmanObjectStoreConfiguration`](https://pkg.go.dev/github.com/cloudnative-pg/barman-cloud/pkg/api#BarmanObjectStoreConfiguration) + in the barman-cloud API for a full list of options. If required, you can choose to compress WAL files as soon as they are uploaded and/or encrypt them: From 47d6fa77eb7cadd699c1e6093041b7452ce018bd Mon Sep 17 00:00:00 2001 From: Anton Lindholm Date: Fri, 25 Oct 2024 09:07:41 +0300 Subject: [PATCH 110/836] docs: add Walkbase to `ADOPTERS.md` (#5951) Signed-off-by: Anton Lindholm --- ADOPTERS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/ADOPTERS.md b/ADOPTERS.md index 38662aa076..c2038c9480 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -55,3 +55,4 @@ This list is sorted in chronological order, based on the submission date. | [GEICO Tech](https://www.geico.com/tech/) | @ardentperf | 2024-09-24 | GEICO Tech is building the most consumer-centric insurance offerings in America. CloudNativePG is used to provide a highly available Kubernetes-based Postgres service, both in the cloud and on-premises. | | [Cambium](https://www.cambium.earth) | @Mmoncadaisla | 2024-09-25 | Cambium leverages CloudNativePG at its core to analyze and visualize geospatial data for carbon market applications, ranging from site selection to monitoring, reporting, and verification. | | [MIND Informatica srl](https://mind-informatica.com) | @simonerocchi | 2024-09-25 | We use CloudNativePG to run PostgreSQL clusters for our web applications. | +| [Walkbase](https://walkbase.com/) | @LinAnt | 2024-10-24 | CloudNativePG currently manages all our Postgres instances on Kubernetes via GitOps. | From 8e47a6a85319cf087021ae014123a13916db0688 Mon Sep 17 00:00:00 2001 From: Jaime Silvela Date: Fri, 25 Oct 2024 14:32:17 +0200 Subject: [PATCH 111/836] test(database): increase coverage of corner cases (#5749) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch extends test coverage to manage edge cases in database and cluster lifecycle events, including scenarios where resources become unavailable and ensuring finalizer removal post-drop. Includes a standard success case to verify expected functionality. Closes #5681 Signed-off-by: Jaime Silvela Signed-off-by: Niccolò Fei Co-authored-by: Niccolò Fei --- .../controller/database_controller_test.go | 192 +++++++++++++++++- 1 file changed, 190 insertions(+), 2 deletions(-) diff --git a/internal/management/controller/database_controller_test.go b/internal/management/controller/database_controller_test.go index 3b252db76e..ecf9d4cd60 100644 --- a/internal/management/controller/database_controller_test.go +++ b/internal/management/controller/database_controller_test.go @@ -23,6 +23,7 @@ import ( "github.com/DATA-DOG/go-sqlmock" "github.com/jackc/pgx/v5" corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" @@ -78,8 +79,9 @@ var _ = Describe("Managed Database status", func() { ClusterRef: corev1.LocalObjectReference{ Name: cluster.Name, }, - Name: "db-one", - Owner: "app", + ReclaimPolicy: apiv1.DatabaseReclaimDelete, + Name: "db-one", + Owner: "app", }, } @@ -112,6 +114,42 @@ var _ = Describe("Managed Database status", func() { Expect(dbMock.ExpectationsWereMet()).To(Succeed()) }) + It("adds finalizer and sets status ready on success", func(ctx SpecContext) { + Expect(database.Finalizers).To(BeEmpty()) + + // Mocking DetectDB + expectedValue := sqlmock.NewRows([]string{""}).AddRow("0") + dbMock.ExpectQuery(`SELECT count(*) + FROM pg_database + WHERE datname = $1`).WithArgs(database.Spec.Name).WillReturnRows(expectedValue) + + // Mocking CreateDB + expectedCreate := sqlmock.NewResult(0, 1) + expectedQuery := fmt.Sprintf( + "CREATE DATABASE %s OWNER %s", + pgx.Identifier{database.Spec.Name}.Sanitize(), pgx.Identifier{database.Spec.Owner}.Sanitize(), + ) + dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedCreate) + + // Reconcile and get the updated object + _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ + Namespace: database.Namespace, + Name: database.Name, + }}) + Expect(err).ToNot(HaveOccurred()) + + var updatedDatabase apiv1.Database + err = fakeClient.Get(ctx, client.ObjectKey{ + Namespace: database.Namespace, + Name: database.Name, + }, &updatedDatabase) + Expect(err).ToNot(HaveOccurred()) + + Expect(updatedDatabase.Status.Ready).Should(BeTrue()) + Expect(updatedDatabase.Status.Error).Should(BeEmpty()) + Expect(updatedDatabase.Finalizers).NotTo(BeEmpty()) + }) + It("database object inherits error after patching", func(ctx SpecContext) { // Mocking DetectDB expectedValue := sqlmock.NewRows([]string{""}).AddRow("1") @@ -127,6 +165,7 @@ var _ = Describe("Managed Database status", func() { ) dbMock.ExpectExec(expectedQuery).WillReturnError(expectedError) + // Reconcile and get the updated object _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ Namespace: database.Namespace, Name: database.Name, @@ -144,6 +183,155 @@ var _ = Describe("Managed Database status", func() { Expect(updatedDatabase.Status.Error).Should(ContainSubstring(expectedError.Error())) }) + It("on deletion it removes finalizers and drops DB", func(ctx SpecContext) { + Expect(database.Finalizers).To(BeEmpty()) + + // Mocking DetectDB + expectedValue := sqlmock.NewRows([]string{""}).AddRow("0") + dbMock.ExpectQuery(`SELECT count(*) + FROM pg_database + WHERE datname = $1`).WithArgs(database.Spec.Name).WillReturnRows(expectedValue) + + // Mocking CreateDB + expectedCreate := sqlmock.NewResult(0, 1) + expectedQuery := fmt.Sprintf( + "CREATE DATABASE %s OWNER %s", + pgx.Identifier{database.Spec.Name}.Sanitize(), pgx.Identifier{database.Spec.Owner}.Sanitize(), + ) + dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedCreate) + + // Reconcile and get the updated object + _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ + Namespace: database.Namespace, + Name: database.Name, + }}) + Expect(err).ToNot(HaveOccurred()) + + var updatedDatabase apiv1.Database + err = fakeClient.Get(ctx, client.ObjectKey{ + Namespace: database.Namespace, + Name: database.Name, + }, &updatedDatabase) + Expect(err).ToNot(HaveOccurred()) + + Expect(updatedDatabase.Status.Ready).Should(BeTrue()) + Expect(updatedDatabase.Status.Error).Should(BeEmpty()) + Expect(updatedDatabase.Finalizers).NotTo(BeEmpty()) + + // the next 3 lines are a hacky bit to make sure the next reconciler + // call doesn't skip on account of Generation == ObservedGeneration. + // See fake.Client known issues with `Generation` + // https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/client/fake@v0.19.0#NewClientBuilder + currentDatabase := updatedDatabase.DeepCopy() + updatedDatabase.Status.ObservedGeneration = 2 + Expect(fakeClient.Status().Patch(ctx, &updatedDatabase, client.MergeFrom(currentDatabase))).To(Succeed()) + + // We now look at the behavior when we delete the Database object + Expect(fakeClient.Delete(ctx, database)).To(Succeed()) + + // the Database object is Deleted, but its finalizer prevents removal from + // the API + var fadingDatabase apiv1.Database + err = fakeClient.Get(ctx, client.ObjectKey{ + Namespace: database.Namespace, + Name: database.Name, + }, &fadingDatabase) + Expect(err).ToNot(HaveOccurred()) + Expect(fadingDatabase.DeletionTimestamp).NotTo(BeZero()) + Expect(fadingDatabase.Finalizers).NotTo(BeEmpty()) + + // Mocking Drop Database + expectedDrop := fmt.Sprintf("DROP DATABASE IF EXISTS %s", + pgx.Identifier{database.Spec.Name}.Sanitize(), + ) + dbMock.ExpectExec(expectedDrop).WillReturnResult(sqlmock.NewResult(0, 1)) + + // Reconcile and get the updated object + _, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ + Namespace: database.Namespace, + Name: database.Name, + }}) + Expect(err).ToNot(HaveOccurred()) + + var finalDatabase apiv1.Database + err = fakeClient.Get(ctx, client.ObjectKey{ + Namespace: database.Namespace, + Name: database.Name, + }, &finalDatabase) + Expect(err).To(HaveOccurred()) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + + It("skips reconciliation if cluster isn't found (deleted cluster)", func(ctx SpecContext) { + // since the fakeClient has the `cluster-example` cluster, let's reference + // another cluster `cluster-other` that is not found by the fakeClient + pgInstance := postgres.NewInstance(). + WithNamespace("default"). + WithPodName("cluster-other-1"). + WithClusterName("cluster-other") + + f := fakeInstanceData{ + Instance: pgInstance, + db: db, + } + + r = &DatabaseReconciler{ + Client: fakeClient, + Scheme: schemeBuilder.BuildWithAllKnownScheme(), + instance: &f, + } + + // patching the Database object to reference the newly created Cluster + originalDatabase := database.DeepCopy() + database.Spec.ClusterRef.Name = "cluster-other" + Expect(fakeClient.Patch(ctx, database, client.MergeFrom(originalDatabase))).To(Succeed()) + + // Reconcile and get the updated object + _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ + Namespace: database.Namespace, + Name: database.Name, + }}) + Expect(err).ToNot(HaveOccurred()) + + var updatedDatabase apiv1.Database + err = fakeClient.Get(ctx, client.ObjectKey{ + Namespace: database.Namespace, + Name: database.Name, + }, &updatedDatabase) + Expect(err).ToNot(HaveOccurred()) + + Expect(updatedDatabase.Status.Ready).Should(BeFalse()) + Expect(updatedDatabase.Status.Error).Should(BeEmpty()) + }) + + It("skips reconciliation if database object isn't found (deleted database)", func(ctx SpecContext) { + // Initialize a new Database but without creating it in the K8S Cluster + otherDatabase := &apiv1.Database{ + ObjectMeta: metav1.ObjectMeta{ + Name: "db-other", + Namespace: "default", + Generation: 1, + }, + Spec: apiv1.DatabaseSpec{ + ClusterRef: corev1.LocalObjectReference{ + Name: cluster.Name, + }, + Name: "db-one", + Owner: "app", + }, + } + + // Reconcile the database that hasn't been created in the K8S Cluster + result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ + Namespace: otherDatabase.Namespace, + Name: otherDatabase.Name, + }}) + + // Expect the reconciler to exit silently since the object doesn't exist + Expect(err).ToNot(HaveOccurred()) + Expect(result).Should(BeZero()) // nothing to do, since the DB is being deleted + }) + It("properly marks the status on a succeeded reconciliation", func(ctx SpecContext) { _, err := r.succeededReconciliation(ctx, database) Expect(err).ToNot(HaveOccurred()) From 573ca984aa942cc9830eb3516849875af2a44a43 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niccol=C3=B2=20Fei?= Date: Fri, 25 Oct 2024 15:21:51 +0200 Subject: [PATCH 112/836] feat(database): introduce `ensure` field for controlling database presence (#5750) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Introduces a new `ensure` field in the Database specification to specify whether a Database should be `present` or `absent`, with the default set to `present`. * Changes the name of `.status.ready` to `.status.applied`. This field will be cleared if we are reconciling on a replica cluster. * Renames `.status.error` to `.status.message`, since not every reconciliation output is necessarily an error message. Closes: #5722 Signed-off-by: Niccolò Fei Signed-off-by: Gabriele Quaresima Signed-off-by: Jaime Silvela Signed-off-by: Marco Nenciarini Co-authored-by: Gabriele Quaresima Co-authored-by: Jaime Silvela Co-authored-by: Marco Nenciarini --- api/v1/database_types.go | 18 ++-- api/v1/zz_generated.deepcopy.go | 7 +- .../bases/postgresql.cnpg.io_databases.yaml | 28 ++++-- docs/src/cloudnative-pg.v1.md | 17 +++- docs/src/declarative_database_management.md | 2 +- .../controller/database_controller.go | 40 ++++++-- .../controller/database_controller_sql.go | 10 +- .../controller/database_controller_test.go | 93 ++++++++++++++++--- .../declarative_database_management_test.go | 5 +- 9 files changed, 168 insertions(+), 52 deletions(-) diff --git a/api/v1/database_types.go b/api/v1/database_types.go index 1089a97957..12786a38f6 100644 --- a/api/v1/database_types.go +++ b/api/v1/database_types.go @@ -40,6 +40,12 @@ type DatabaseSpec struct { // The corresponding cluster ClusterRef corev1.LocalObjectReference `json:"cluster"` + // Ensure the PostgreSQL database is `present` or `absent` - defaults to "present" + // +kubebuilder:default:="present" + // +kubebuilder:validation:Enum=present;absent + // +optional + Ensure EnsureOption `json:"ensure,omitempty"` + // The name inside PostgreSQL // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="name is immutable" // +kubebuilder:validation:XValidation:rule="self != 'postgres'",message="the name postgres is reserved" @@ -131,13 +137,13 @@ type DatabaseStatus struct { // +optional ObservedGeneration int64 `json:"observedGeneration,omitempty"` - // Ready is true if the database was reconciled correctly + // Applied is true if the database was reconciled correctly // +optional - Ready bool `json:"ready,omitempty"` + Applied *bool `json:"applied,omitempty"` - // Error is the reconciliation error message + // Message is the reconciliation output message // +optional - Error string `json:"error,omitempty"` + Message string `json:"message,omitempty"` } // +genclient @@ -147,8 +153,8 @@ type DatabaseStatus struct { // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" // +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".spec.cluster.name" // +kubebuilder:printcolumn:name="PG Name",type="string",JSONPath=".spec.name" -// +kubebuilder:printcolumn:name="Ready",type="boolean",JSONPath=".status.ready" -// +kubebuilder:printcolumn:name="Error",type="string",JSONPath=".status.error",description="Latest error message" +// +kubebuilder:printcolumn:name="Applied",type="boolean",JSONPath=".status.applied" +// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Latest reconciliation message" // Database is the Schema for the databases API type Database struct { diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index b9b4cf7690..b2ca687cbb 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -1028,7 +1028,7 @@ func (in *Database) DeepCopyInto(out *Database) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Database. @@ -1130,6 +1130,11 @@ func (in *DatabaseSpec) DeepCopy() *DatabaseSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DatabaseStatus) DeepCopyInto(out *DatabaseStatus) { *out = *in + if in.Applied != nil { + in, out := &in.Applied, &out.Applied + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseStatus. diff --git a/config/crd/bases/postgresql.cnpg.io_databases.yaml b/config/crd/bases/postgresql.cnpg.io_databases.yaml index ea1fbfdba5..820ee383f8 100644 --- a/config/crd/bases/postgresql.cnpg.io_databases.yaml +++ b/config/crd/bases/postgresql.cnpg.io_databases.yaml @@ -24,12 +24,12 @@ spec: - jsonPath: .spec.name name: PG Name type: string - - jsonPath: .status.ready - name: Ready + - jsonPath: .status.applied + name: Applied type: boolean - - description: Latest error message - jsonPath: .status.error - name: Error + - description: Latest reconciliation message + jsonPath: .status.message + name: Message type: string name: v1 schema: @@ -105,6 +105,14 @@ spec: x-kubernetes-validations: - message: encoding is immutable rule: self == oldSelf + ensure: + default: present + description: Ensure the PostgreSQL database is `present` or `absent` + - defaults to "present" + enum: + - present + - absent + type: string icu_locale: description: The ICU_LOCALE (cannot be changed) type: string @@ -180,8 +188,11 @@ spec: date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status properties: - error: - description: Error is the reconciliation error message + applied: + description: Applied is true if the database was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message type: string observedGeneration: description: |- @@ -189,9 +200,6 @@ spec: desired state that was synchronized format: int64 type: integer - ready: - description: Ready is true if the database was reconciled correctly - type: boolean type: object required: - metadata diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md index 7af6805e2e..45d99cc4f4 100644 --- a/docs/src/cloudnative-pg.v1.md +++ b/docs/src/cloudnative-pg.v1.md @@ -2296,6 +2296,13 @@ PostgreSQL cluster from an existing storage

The corresponding cluster

ensure
+EnsureOption +
+

Ensure the PostgreSQL database is present or absent - defaults to "present"

+
name [Required]
string
ready
+
applied
bool
-

Ready is true if the database was reconciled correctly

+

Applied is true if the database was reconciled correctly

error
+
message
string
-

Error is the reconciliation error message

+

Message is the reconciliation output message

+## DataDurabilityLevel {#postgresql-cnpg-io-v1-DataDurabilityLevel} + +(Alias of `string`) + +**Appears in:** + +- [SynchronousReplicaConfiguration](#postgresql-cnpg-io-v1-SynchronousReplicaConfiguration) + + +

DataDurabilityLevel specifies how strictly to enforce synchronous replication +when cluster instances are unavailable. Options are required or preferred.

+ + + + ## DataSource {#postgresql-cnpg-io-v1-DataSource} @@ -4991,6 +5006,20 @@ only useful for priority-based synchronous replication).

only useful for priority-based synchronous replication).

+dataDurability
+DataDurabilityLevel + + +

If set to "required", data durability is strictly enforced. Write operations +with synchronous commit settings (on, remote_write, or remote_apply) will +block if there are insufficient healthy replicas, ensuring data persistence. +If set to "preferred", data durability is maintained when healthy replicas +are available, but the required number of instances will adjust dynamically +if replicas become unavailable. This setting relaxes strict durability enforcement +to allow for operational continuity. This setting is only applicable if both +standbyNamesPre and standbyNamesPost are unset (empty).

+ + diff --git a/docs/src/replication.md b/docs/src/replication.md index b0e61e380e..fac1db21c6 100644 --- a/docs/src/replication.md +++ b/docs/src/replication.md @@ -1,46 +1,46 @@ # Replication Physical replication is one of the strengths of PostgreSQL and one of the -reasons why some of the largest organizations in the world have chosen -it for the management of their data in business continuity contexts. -Primarily used to achieve high availability, physical replication also allows -scale-out of read-only workloads and offloading of some work from the primary. +reasons why some of the largest organizations in the world have chosen it for +the management of their data in business continuity contexts. Primarily used to +achieve high availability, physical replication also allows scale-out of +read-only workloads and offloading of some work from the primary. !!! Important This section is about replication within the same `Cluster` resource managed in the same Kubernetes cluster. For information about how to replicate with another Postgres `Cluster` resource, even across different - Kubernetes clusters, please refer to the ["Replica clusters"](replica_cluster.md) - section. + Kubernetes clusters, please refer to the + ["Replica clusters"](replica_cluster.md) section. ## Application-level replication -Having contributed throughout the years to the replication feature in PostgreSQL, -we have decided to build high availability in CloudNativePG on top of -the native physical replication technology, and integrate it -directly in the Kubernetes API. +Having contributed throughout the years to the replication feature in +PostgreSQL, we have decided to build high availability in CloudNativePG on top +of the native physical replication technology, and integrate it directly in the +Kubernetes API. -In Kubernetes terms, this is referred to as **application-level replication**, in -contrast with *storage-level replication*. +In Kubernetes terms, this is referred to as **application-level replication**, +in contrast with *storage-level replication*. ## A very mature technology PostgreSQL has a very robust and mature native framework for replicating data -from the primary instance to one or more replicas, built around the -concept of transactional changes continuously stored in the WAL (Write Ahead Log). +from the primary instance to one or more replicas, built around the concept of +transactional changes continuously stored in the WAL (Write Ahead Log). Started as the evolution of crash recovery and point in time recovery technologies, physical replication was first introduced in PostgreSQL 8.2 -(2006) through WAL shipping from the primary to a warm standby in -continuous recovery. +(2006) through WAL shipping from the primary to a warm standby in continuous +recovery. PostgreSQL 9.0 (2010) introduced WAL streaming and read-only replicas through *hot standby*. In 2011, PostgreSQL 9.1 brought synchronous replication at the -transaction level, supporting RPO=0 clusters. Cascading replication was added -in PostgreSQL 9.2 (2012). The foundations for logical replication were -established in PostgreSQL 9.4 (2014), and version 10 (2017) introduced native -support for the publisher/subscriber pattern to replicate data from an origin -to a destination. The table below summarizes these milestones. +transaction level, supporting RPO=0 clusters. Cascading replication was added in +PostgreSQL 9.2 (2012). The foundations for logical replication were established +in PostgreSQL 9.4 (2014), and version 10 (2017) introduced native support for +the publisher/subscriber pattern to replicate data from an origin to a +destination. The table below summarizes these milestones. | Version | Year | Feature | |:-------:|:----:|-----------------------------------------------------------------------| @@ -56,9 +56,9 @@ versions. ## Streaming replication support -At the moment, CloudNativePG natively and transparently manages -physical streaming replicas within a cluster in a declarative way, based on -the number of provided `instances` in the `spec`: +At the moment, CloudNativePG natively and transparently manages physical +streaming replicas within a cluster in a declarative way, based on the number of +provided `instances` in the `spec`: ``` replicas = instances - 1 (where instances > 0) @@ -69,13 +69,13 @@ called `streaming_replica` as follows: ```sql CREATE USER streaming_replica WITH REPLICATION; - -- NOSUPERUSER INHERIT NOCREATEROLE NOCREATEDB NOBYPASSRLS +-- NOSUPERUSER INHERIT NOCREATEROLE NOCREATEDB NOBYPASSRLS ``` Out of the box, the operator automatically sets up streaming replication within the cluster over an encrypted channel and enforces TLS client certificate -authentication for the `streaming_replica` user - as highlighted by the following -excerpt taken from `pg_hba.conf`: +authentication for the `streaming_replica` user - as highlighted by the +following excerpt taken from `pg_hba.conf`: ``` # Require client certificate authentication for the streaming_replica user @@ -101,9 +101,9 @@ the primary's storage, even after a failover or switchover. ### Continuous backup integration In case continuous backup is configured in the cluster, CloudNativePG -transparently configures replicas to take advantage of `restore_command` when -in continuous recovery. As a result, PostgreSQL can use the WAL archive -as a fallback option whenever pulling WALs via streaming replication fails. +transparently configures replicas to take advantage of `restore_command` when in +continuous recovery. As a result, PostgreSQL can use the WAL archive as a +fallback option whenever pulling WALs via streaming replication fails. ## Synchronous Replication @@ -111,16 +111,19 @@ CloudNativePG supports both [quorum-based and priority-based synchronous replication for PostgreSQL](https://www.postgresql.org/docs/current/warm-standby.html#SYNCHRONOUS-REPLICATION). !!! Warning - Please be aware that synchronous replication will halt your write - operations if the required number of standby nodes to replicate WAL data for - transaction commits is unavailable. In such cases, write operations for your - applications will hang. This behavior differs from the previous implementation - in CloudNativePG but aligns with the expectations of a PostgreSQL DBA for this - capability. - -While direct configuration of the `synchronous_standby_names` option is -prohibited, CloudNativePG allows you to customize its content and extend -synchronous replication beyond the `Cluster` resource through the + By default, synchronous replication pauses write operations if the required + number of standby nodes for WAL replication during transaction commits is + unavailable. This behavior prioritizes data durability and aligns with + PostgreSQL DBA best practices. However, if self-healing is a higher priority + than strict data durability in your setup, this setting can be adjusted. For + details on managing this behavior, refer to the [Data Durability and Synchronous Replication](#data-durability-and-synchronous-replication) + section. + +Direct configuration of the `synchronous_standby_names` option is not +permitted. However, CloudNativePG automatically populates this option with the +names of local pods, while also allowing customization to extend synchronous +replication beyond the `Cluster` resource. +This can be achieved through the [`.spec.postgresql.synchronous` stanza](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-SynchronousReplicaConfiguration). Synchronous replication is disabled by default (the `synchronous` stanza is not @@ -132,17 +135,39 @@ defined). When defined, two options are mandatory: ### Quorum-based Synchronous Replication -PostgreSQL's quorum-based synchronous replication makes transaction commits -wait until their WAL records are replicated to at least a certain number of -standbys. To use this method, set `method` to `any`. +In PostgreSQL, quorum-based synchronous replication ensures that transaction +commits wait until their WAL records are replicated to a specified number of +standbys. To enable this, set the `method` to `any`. -#### Migrating from the Deprecated Synchronous Replication Implementation +This replication method is the most common setup for a CloudNativePG cluster. -This section provides instructions on migrating your existing quorum-based -synchronous replication, defined using the deprecated form, to the new and more -robust capability in CloudNativePG. +#### Example -Suppose you have the following manifest: +The example below, based on a typical `cluster-example` configuration with +three instances, sets up quorum-based synchronous replication with at least one +instance: + +```yaml +postgresql: + synchronous: + method: any + number: 1 +``` + +With this configuration, CloudNativePG automatically sets the content of +`synchronous_standby_names` as follows: + +```console +ANY 1 (cluster-example-2, cluster-example-3, cluster-example-1) +``` + +#### Migrating from Deprecated Synchronous Replication Implementation + +This section outlines how to migrate from the deprecated quorum-based +synchronous replication format to the newer, more robust implementation in +CloudNativePG. + +Given the following manifest: ```yaml apiVersion: postgresql.cnpg.io/v1 @@ -151,7 +176,6 @@ metadata: name: angus spec: instances: 3 - minSyncReplicas: 1 maxSyncReplicas: 1 @@ -159,7 +183,7 @@ spec: size: 1G ``` -You can convert it to the new quorum-based format as follows: +You can update it to the new format as follows: ```yaml apiVersion: postgresql.cnpg.io/v1 @@ -176,14 +200,11 @@ spec: synchronous: method: any number: 1 + dataDurability: required ``` -!!! Important - The primary difference with the new capability is that PostgreSQL will - always prioritize data durability over high availability. Consequently, if no - replica is available, write operations on the primary will be blocked. However, - this behavior is consistent with the expectations of a PostgreSQL DBA for this - capability. +To prioritize self-healing over strict data durability, set `dataDurability` +to `preferred` instead. ### Priority-based Synchronous Replication @@ -222,31 +243,16 @@ the PostgreSQL cluster. You can customize the content of !!! Warning You are responsible for ensuring the correct names in `standbyNamesPre` and - `standbyNamesPost`. CloudNativePG expects that you manage any standby with an - `application_name` listed here, ensuring their high availability. Incorrect - entries can jeopardize your PostgreSQL database uptime. + `standbyNamesPost`. CloudNativePG expects that you manage any standby with + an `application_name` listed here, ensuring their high availability. + Incorrect entries can jeopardize your PostgreSQL database uptime. -### Examples +#### Examples Here are some examples, all based on a `cluster-example` with three instances: If you set: -```yaml -postgresql: - synchronous: - method: any - number: 1 -``` - -The content of `synchronous_standby_names` will be: - -```console -ANY 1 (cluster-example-2, cluster-example-3) -``` - -If you set: - ```yaml postgresql: synchronous: @@ -302,14 +308,150 @@ The `synchronous_standby_names` option will look like: FIRST 2 (angus, cluster-example-2, malcolm) ``` +### Data Durability and Synchronous Replication + +The `dataDurability` option in the `.spec.postgresql.synchronous` stanza +controls the trade-off between data safety and availability for synchronous +replication. It can be set to `required` or `preferred`, with the default being +`required` if not specified. + +!!! Important + `preferred` can only be used when `standbyNamesPre` and `standbyNamesPost` + are unset. + +#### Required Data Durability + +When `dataDurability` is set to `required`, PostgreSQL only considers +transactions committed once WAL (Write-Ahead Log) records have been replicated +to the specified number of synchronous standbys. This setting prioritizes data +safety over availability, meaning write operations will pause if the required +number of synchronous standbys is unavailable. This ensures zero data loss +(RPO=0) but may reduce database availability during network disruptions or +standby failures. + +Synchronous standbys are selected in this priority order: + +1. Healthy instances +2. Unhealthy instances +3. Primary + +The list is then truncated based on `maxStandbyNamesFromCluster` if this value +is set, prioritizing healthy instances and ensuring `synchronous_standby_names` +is populated. + +##### Example + +Consider the following example: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: foo +spec: + instances: 3 + postgresql: + synchronous: + method: any + number: 1 + dataDurability: required +``` + +1. Initial state. The content of `synchronous_standby_names` is: + + ``` + ANY 1 ("foo-2","foo-3","foo-1") + ``` + +2. `foo-2` becomes unavailable. It gets pushed back in priority: + + ``` + ANY 1 ("foo-3","foo-2","foo-1") + ``` + +3. `foo-3` also becomes unavailable. The list contains no healthy standbys: + + ``` + ANY 1 ("foo-2","foo-3","foo-1") + ``` + + At this point no write operations will be allowed until at least one of the + standbys is available again. + +4. When the standbys are available again, `synchronous_standby_names` will + be back to the initial state. + +#### Preferred Data Durability + +When `dataDurability` is set to `preferred`, the required number of synchronous +instances adjusts based on the number of available standbys. PostgreSQL will +attempt to replicate WAL records to the designated number of synchronous +standbys, but write operations will continue even if fewer than the requested +number of standbys are available. + +This setting balances data safety with availability, enabling applications to +continue writing during temporary standby unavailability—hence, it’s also known +as *self-healing mode*. + +!!! Warning + This mode may result in data loss if all standbys become unavailable. + +With `preferred` data durability, **only healthy replicas** are included in +`synchronous_standby_names`. + +##### Example + +Consider the following example. For demonstration, we’ll use a cluster named +`bar` with 5 instances and 2 synchronous standbys: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: bar +spec: + instances: 5 + postgresql: + synchronous: + method: any + number: 2 + dataDurability: required +``` + +1. Initial state. The content of `synchronous_standby_names` is: + + ``` + ANY 2 ("bar-2","bar-3", "bar-4", "bar-5") + ``` + +2. `bar-2` and `bar-3` become unavailable. They are removed from the list: + + ``` + ANY 2 ("bar-4", "bar-5") + ``` + +3. `bar-4` also becomes unavailable. It gets removed from the list. Since the + number of available standbys is less than the requested number, the requested + amount gets reduced: + + ``` + ANY 1 ("bar-5") + ``` + +4. `bar-5` also becomes unavailable. `synchronous_standby_names` becomes empty, + disabling synchronous replication completely. Write operations will continue, + but with the risk of potential data loss in case of a primary failure. +5. When the replicas are back, `synchronous_standby_names` will be back to + the initial state. + ## Synchronous Replication (Deprecated) !!! Warning Prior to CloudNativePG 1.24, only the quorum-based synchronous replication - implementation was supported. Although this method is now deprecated, it will - not be removed anytime soon. - The new method prioritizes data durability over self-healing and offers - more robust features, including priority-based synchronous replication and full + implementation was supported. Although this method is now deprecated, it + will not be removed anytime soon. + The new method prioritizes data durability over self-healing and offers more + robust features, including priority-based synchronous replication and full control over the `synchronous_standby_names` option. It is recommended to gradually migrate to the new configuration method for synchronous replication, as explained in the previous paragraph. @@ -380,12 +522,13 @@ Postgres pod are. legacy implementation of synchronous replication (see ["Synchronous Replication (Deprecated)"](replication.md#synchronous-replication-deprecated)). -As an example use-case for this feature: in a cluster with a single sync replica, -we would be able to ensure the sync replica will be in a different availability -zone from the primary instance, usually identified by the `topology.kubernetes.io/zone` +As an example use-case for this feature: in a cluster with a single sync +replica, we would be able to ensure the sync replica will be in a different +availability zone from the primary instance, usually identified by +the `topology.kubernetes.io/zone` [label on a node](https://kubernetes.io/docs/reference/labels-annotations-taints/#topologykubernetesiozone). -This would increase the robustness of the cluster in case of an outage in a single -availability zone, especially in terms of recovery point objective (RPO). +This would increase the robustness of the cluster in case of an outage in a +single availability zone, especially in terms of recovery point objective (RPO). The idea of anti-affinity is to ensure that sync replicas that participate in the quorum are chosen from pods running on nodes that have different values for @@ -400,8 +543,8 @@ the replicas are eligible for synchronous replication. The example below shows how this can be done through the `syncReplicaElectionConstraint` section within `.spec.postgresql`. -`nodeLabelsAntiAffinity` allows you to specify those node labels that need to -be evaluated to make sure that synchronous replication will be dynamically +`nodeLabelsAntiAffinity` allows you to specify those node labels that need to be +evaluated to make sure that synchronous replication will be dynamically configured by the operator between the current primary and the replicas which are located on nodes having a value of the availability zone label different from that of the node where the primary is: @@ -426,22 +569,24 @@ as storage, CPU, or memory. [Replication slots](https://www.postgresql.org/docs/current/warm-standby.html#STREAMING-REPLICATION-SLOTS) are a native PostgreSQL feature introduced in 9.4 that provides an automated way to ensure that the primary does not remove WAL segments until all the attached -streaming replication clients have received them, and that the primary -does not remove rows which could cause a recovery conflict even when the -standby is (temporarily) disconnected. +streaming replication clients have received them, and that the primary does not +remove rows which could cause a recovery conflict even when the standby is ( +temporarily) disconnected. A replication slot exists solely on the instance that created it, and PostgreSQL -does not replicate it on the standby servers. As a result, after a failover -or a switchover, the new primary does not contain the replication slot from -the old primary. This can create problems for the streaming replication clients -that were connected to the old primary and have lost their slot. +does not replicate it on the standby servers. As a result, after a failover or a +switchover, the new primary does not contain the replication slot from the old +primary. This can create problems for the streaming replication clients that +were connected to the old primary and have lost their slot. CloudNativePG provides a turn-key solution to synchronize the content of physical replication slots from the primary to each standby, addressing two use cases: - the replication slots automatically created for the High Availability of the - Postgres cluster (see ["Replication slots for High Availability" below](#replication-slots-for-high-availability) for details) + Postgres cluster ( + see ["Replication slots for High Availability" below](#replication-slots-for-high-availability) + for details) - [user-defined replication slots](#user-defined-replication-slots) created on the primary @@ -449,22 +594,22 @@ cases: CloudNativePG fills this gap by introducing the concept of cluster-managed replication slots, starting with high availability clusters. This feature -automatically manages physical replication slots for each hot standby replica -in the High Availability cluster, both in the primary and the standby. +automatically manages physical replication slots for each hot standby replica in +the High Availability cluster, both in the primary and the standby. In CloudNativePG, we use the terms: - **Primary HA slot**: a physical replication slot whose lifecycle is entirely - managed by the current primary of the cluster and whose purpose is to map to - a specific standby in streaming replication. Such a slot lives on the primary + managed by the current primary of the cluster and whose purpose is to map to a + specific standby in streaming replication. Such a slot lives on the primary only. -- **Standby HA slot**: a physical replication slot for a standby whose - lifecycle is entirely managed by another standby in the cluster, based on the - content of the `pg_replication_slots` view in the primary, and updated at regular +- **Standby HA slot**: a physical replication slot for a standby whose lifecycle + is entirely managed by another standby in the cluster, based on the content of + the `pg_replication_slots` view in the primary, and updated at regular intervals using `pg_replication_slot_advance()`. -This feature is enabled by default and can be disabled via configuration. -For details, please refer to the +This feature is enabled by default and can be disabled via configuration. For +details, please refer to the ["replicationSlots" section in the API reference](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-ReplicationSlotsConfiguration). Here follows a brief description of the main options: @@ -472,13 +617,13 @@ Here follows a brief description of the main options: : if `true`, the feature is enabled (`true` is the default) `.spec.replicationSlots.highAvailability.slotPrefix` -: the prefix that identifies replication slots managed by the operator - for this feature (default: `_cnpg_`) +: the prefix that identifies replication slots managed by the operator for this +feature (default: `_cnpg_`) `.spec.replicationSlots.updateInterval` : how often the standby synchronizes the position of the local copy of the - replication slots with the position on the current primary, expressed in - seconds (default: 30) +replication slots with the position on the current primary, expressed in +seconds (default: 30) Although it is not recommended, if you desire a different behavior, you can customize the above options. @@ -580,18 +725,18 @@ spec: ### Capping the WAL size retained for replication slots -When replication slots is enabled, you might end up running out of disk -space due to PostgreSQL trying to retain WAL files requested by a replication -slot. This might happen due to a standby that is (temporarily?) down, or -lagging, or simply an orphan replication slot. +When replication slots is enabled, you might end up running out of disk space +due to PostgreSQL trying to retain WAL files requested by a replication slot. +This might happen due to a standby that is (temporarily?) down, or lagging, or +simply an orphan replication slot. Starting with PostgreSQL 13, you can take advantage of the [`max_slot_wal_keep_size`](https://www.postgresql.org/docs/current/runtime-config-replication.html#GUC-MAX-SLOT-WAL-KEEP-SIZE) configuration option controlling the maximum size of WAL files that replication -slots are allowed to retain in the `pg_wal` directory at checkpoint time. -By default, in PostgreSQL `max_slot_wal_keep_size` is set to `-1`, meaning that -replication slots may retain an unlimited amount of WAL files. -As a result, our recommendation is to explicitly set `max_slot_wal_keep_size` +slots are allowed to retain in the `pg_wal` directory at checkpoint time. By +default, in PostgreSQL `max_slot_wal_keep_size` is set to `-1`, meaning that +replication slots may retain an unlimited amount of WAL files. As a result, our +recommendation is to explicitly set `max_slot_wal_keep_size` when replication slots support is enabled. For example: ```ini diff --git a/pkg/postgres/replication/explicit.go b/pkg/postgres/replication/explicit.go index dfeb7a2857..834b7d2cb6 100644 --- a/pkg/postgres/replication/explicit.go +++ b/pkg/postgres/replication/explicit.go @@ -31,19 +31,37 @@ import ( const placeholderInstanceNameSuffix = "-placeholder" func explicitSynchronousStandbyNames(cluster *apiv1.Cluster) string { + switch cluster.Spec.PostgresConfiguration.Synchronous.DataDurability { + case apiv1.DataDurabilityLevelPreferred: + return explicitSynchronousStandbyNamesDataDurabilityPreferred(cluster) + + default: + return explicitSynchronousStandbyNamesDataDurabilityRequired(cluster) + } +} + +func explicitSynchronousStandbyNamesDataDurabilityRequired(cluster *apiv1.Cluster) string { config := cluster.Spec.PostgresConfiguration.Synchronous // Create the list of pod names clusterInstancesList := getSortedInstanceNames(cluster) + + // Cap the number of standby names using the configuration on the cluster if config.MaxStandbyNamesFromCluster != nil && len(clusterInstancesList) > *config.MaxStandbyNamesFromCluster { clusterInstancesList = clusterInstancesList[:*config.MaxStandbyNamesFromCluster] } // Add prefix and suffix - instancesList := config.StandbyNamesPre + instancesList := make([]string, 0, + len(clusterInstancesList)+len(config.StandbyNamesPre)+len(config.StandbyNamesPost)) + instancesList = append(instancesList, config.StandbyNamesPre...) instancesList = append(instancesList, clusterInstancesList...) instancesList = append(instancesList, config.StandbyNamesPost...) + // An empty instances list would generate a PostgreSQL syntax error + // because configuring synchronous replication with an empty replica + // list is not allowed. + // Adding this as a safeguard, but this should never get into a postgres configuration. if len(instancesList) == 0 { instancesList = []string{ cluster.Name + placeholderInstanceNameSuffix, @@ -63,6 +81,42 @@ func explicitSynchronousStandbyNames(cluster *apiv1.Cluster) string { strings.Join(escapedReplicas, ",")) } +func explicitSynchronousStandbyNamesDataDurabilityPreferred(cluster *apiv1.Cluster) string { + config := cluster.Spec.PostgresConfiguration.Synchronous + + // Create the list of healthy replicas + instancesList := getSortedNonPrimaryHealthyInstanceNames(cluster) + + // Cap the number of standby names using the configuration on the cluster + if config.MaxStandbyNamesFromCluster != nil && len(instancesList) > *config.MaxStandbyNamesFromCluster { + instancesList = instancesList[:*config.MaxStandbyNamesFromCluster] + } + + // Escape the pod list + escapedReplicas := make([]string, len(instancesList)) + for idx, name := range instancesList { + escapedReplicas[idx] = escapePostgresConfLiteral(name) + } + + // If data durability is not enforced, we cap the number of synchronous + // replicas to be required to the number or available replicas. + syncReplicaNumber := config.Number + if syncReplicaNumber > len(instancesList) { + syncReplicaNumber = len(instancesList) + } + + // An empty instances list is not allowed in synchronous_standby_names + if len(instancesList) == 0 { + return "" + } + + return fmt.Sprintf( + "%s %v (%v)", + config.Method.ToPostgreSQLConfigurationKeyword(), + syncReplicaNumber, + strings.Join(escapedReplicas, ",")) +} + // getSortedInstanceNames gets a list of all the known PostgreSQL instances in a // order that would be meaningful to be used by `synchronous_standby_names`. // diff --git a/pkg/postgres/replication/explicit_test.go b/pkg/postgres/replication/explicit_test.go index fa474b531f..4870f945dd 100644 --- a/pkg/postgres/replication/explicit_test.go +++ b/pkg/postgres/replication/explicit_test.go @@ -26,112 +26,231 @@ import ( ) var _ = Describe("synchronous replica configuration with the new API", func() { - It("creates configuration with the ANY clause", func() { - cluster := createFakeCluster("example") - cluster.Spec.PostgresConfiguration.Synchronous = &apiv1.SynchronousReplicaConfiguration{ - Method: apiv1.SynchronousReplicaConfigurationMethodAny, - Number: 2, - MaxStandbyNamesFromCluster: nil, - StandbyNamesPre: []string{}, - StandbyNamesPost: []string{}, - } - cluster.Status = apiv1.ClusterStatus{ - CurrentPrimary: "one", - InstancesStatus: map[apiv1.PodStatus][]string{ - apiv1.PodHealthy: {"one", "two", "three"}, - }, - } - - Expect(explicitSynchronousStandbyNames(cluster)).To(Equal("ANY 2 (\"three\",\"two\",\"one\")")) - }) + When("data durability is required", func() { + It("creates configuration with the ANY clause", func() { + cluster := createFakeCluster("example") + cluster.Spec.PostgresConfiguration.Synchronous = &apiv1.SynchronousReplicaConfiguration{ + Method: apiv1.SynchronousReplicaConfigurationMethodAny, + Number: 2, + MaxStandbyNamesFromCluster: nil, + StandbyNamesPre: []string{}, + StandbyNamesPost: []string{}, + } + cluster.Status = apiv1.ClusterStatus{ + CurrentPrimary: "one", + InstancesStatus: map[apiv1.PodStatus][]string{ + apiv1.PodHealthy: {"one", "two", "three"}, + }, + } - It("creates configuration with the FIRST clause", func() { - cluster := createFakeCluster("example") - cluster.Spec.PostgresConfiguration.Synchronous = &apiv1.SynchronousReplicaConfiguration{ - Method: apiv1.SynchronousReplicaConfigurationMethodFirst, - Number: 2, - MaxStandbyNamesFromCluster: nil, - StandbyNamesPre: []string{}, - StandbyNamesPost: []string{}, - } - cluster.Status = apiv1.ClusterStatus{ - CurrentPrimary: "one", - InstancesStatus: map[apiv1.PodStatus][]string{ - apiv1.PodHealthy: {"one", "two", "three"}, - }, - } - - Expect(explicitSynchronousStandbyNames(cluster)).To(Equal("FIRST 2 (\"three\",\"two\",\"one\")")) - }) + Expect(explicitSynchronousStandbyNames(cluster)).To(Equal("ANY 2 (\"three\",\"two\",\"one\")")) + }) - It("considers the maximum number of standby names", func() { - cluster := createFakeCluster("example") - cluster.Spec.PostgresConfiguration.Synchronous = &apiv1.SynchronousReplicaConfiguration{ - Method: apiv1.SynchronousReplicaConfigurationMethodFirst, - Number: 2, - MaxStandbyNamesFromCluster: ptr.To(1), - StandbyNamesPre: []string{}, - StandbyNamesPost: []string{}, - } - cluster.Status = apiv1.ClusterStatus{ - CurrentPrimary: "one", - InstancesStatus: map[apiv1.PodStatus][]string{ - apiv1.PodHealthy: {"one", "two", "three"}, - }, - } - - Expect(explicitSynchronousStandbyNames(cluster)).To(Equal("FIRST 2 (\"three\")")) - }) + It("creates configuration with the FIRST clause", func() { + cluster := createFakeCluster("example") + cluster.Spec.PostgresConfiguration.Synchronous = &apiv1.SynchronousReplicaConfiguration{ + Method: apiv1.SynchronousReplicaConfigurationMethodFirst, + Number: 2, + MaxStandbyNamesFromCluster: nil, + StandbyNamesPre: []string{}, + StandbyNamesPost: []string{}, + } + cluster.Status = apiv1.ClusterStatus{ + CurrentPrimary: "one", + InstancesStatus: map[apiv1.PodStatus][]string{ + apiv1.PodHealthy: {"one", "two", "three"}, + }, + } - It("prepends the prefix and append the suffix", func() { - cluster := createFakeCluster("example") - cluster.Spec.PostgresConfiguration.Synchronous = &apiv1.SynchronousReplicaConfiguration{ - Method: apiv1.SynchronousReplicaConfigurationMethodFirst, - Number: 2, - MaxStandbyNamesFromCluster: ptr.To(1), - StandbyNamesPre: []string{"prefix", "here"}, - StandbyNamesPost: []string{"suffix", "there"}, - } - cluster.Status = apiv1.ClusterStatus{ - CurrentPrimary: "one", - InstancesStatus: map[apiv1.PodStatus][]string{ - apiv1.PodHealthy: {"one", "two", "three"}, - }, - } - - Expect(explicitSynchronousStandbyNames(cluster)).To( - Equal("FIRST 2 (\"prefix\",\"here\",\"three\",\"suffix\",\"there\")")) - }) + Expect(explicitSynchronousStandbyNames(cluster)).To(Equal("FIRST 2 (\"three\",\"two\",\"one\")")) + }) + + It("considers the maximum number of standby names", func() { + cluster := createFakeCluster("example") + cluster.Spec.PostgresConfiguration.Synchronous = &apiv1.SynchronousReplicaConfiguration{ + Method: apiv1.SynchronousReplicaConfigurationMethodFirst, + Number: 2, + MaxStandbyNamesFromCluster: ptr.To(1), + StandbyNamesPre: []string{}, + StandbyNamesPost: []string{}, + } + cluster.Status = apiv1.ClusterStatus{ + CurrentPrimary: "one", + InstancesStatus: map[apiv1.PodStatus][]string{ + apiv1.PodHealthy: {"one", "two", "three"}, + }, + } + + Expect(explicitSynchronousStandbyNames(cluster)).To(Equal("FIRST 2 (\"three\")")) + }) + + It("prepends the prefix and append the suffix", func() { + cluster := createFakeCluster("example") + cluster.Spec.PostgresConfiguration.Synchronous = &apiv1.SynchronousReplicaConfiguration{ + Method: apiv1.SynchronousReplicaConfigurationMethodFirst, + Number: 2, + MaxStandbyNamesFromCluster: ptr.To(1), + StandbyNamesPre: []string{"prefix", "here"}, + StandbyNamesPost: []string{"suffix", "there"}, + } + cluster.Status = apiv1.ClusterStatus{ + CurrentPrimary: "one", + InstancesStatus: map[apiv1.PodStatus][]string{ + apiv1.PodHealthy: {"one", "two", "three"}, + }, + } + + Expect(explicitSynchronousStandbyNames(cluster)).To( + Equal("FIRST 2 (\"prefix\",\"here\",\"three\",\"suffix\",\"there\")")) + }) + + It("enforce synchronous replication even if there are no healthy replicas", func() { + cluster := createFakeCluster("example") + cluster.Spec.PostgresConfiguration.Synchronous = &apiv1.SynchronousReplicaConfiguration{ + Method: apiv1.SynchronousReplicaConfigurationMethodFirst, + Number: 2, + MaxStandbyNamesFromCluster: ptr.To(1), + } + cluster.Status = apiv1.ClusterStatus{} + + Expect(explicitSynchronousStandbyNames(cluster)).To( + Equal("FIRST 2 (\"example-placeholder\")")) + }) - It("returns an empty value when no instance is available", func() { - cluster := createFakeCluster("example") - cluster.Spec.PostgresConfiguration.Synchronous = &apiv1.SynchronousReplicaConfiguration{ - Method: apiv1.SynchronousReplicaConfigurationMethodFirst, - Number: 2, - MaxStandbyNamesFromCluster: ptr.To(1), - } - cluster.Status = apiv1.ClusterStatus{} - - Expect(explicitSynchronousStandbyNames(cluster)).To( - Equal("FIRST 2 (\"example-placeholder\")")) + It("includes pods that do not report the status", func() { + cluster := createFakeCluster("example") + cluster.Spec.PostgresConfiguration.Synchronous = &apiv1.SynchronousReplicaConfiguration{ + Method: apiv1.SynchronousReplicaConfigurationMethodFirst, + Number: 2, + MaxStandbyNamesFromCluster: nil, + StandbyNamesPre: []string{}, + StandbyNamesPost: []string{}, + } + cluster.Status = apiv1.ClusterStatus{ + CurrentPrimary: "one", + InstancesStatus: map[apiv1.PodStatus][]string{ + apiv1.PodHealthy: {"one", "three"}, + }, + InstanceNames: []string{"one", "two", "three"}, + } + Expect(explicitSynchronousStandbyNames(cluster)).To(Equal("FIRST 2 (\"three\",\"two\",\"one\")")) + }) }) - It("includes pods that do not report the status", func() { - cluster := createFakeCluster("example") - cluster.Spec.PostgresConfiguration.Synchronous = &apiv1.SynchronousReplicaConfiguration{ - Method: apiv1.SynchronousReplicaConfigurationMethodFirst, - Number: 2, - MaxStandbyNamesFromCluster: nil, - StandbyNamesPre: []string{}, - StandbyNamesPost: []string{}, - } - cluster.Status = apiv1.ClusterStatus{ - CurrentPrimary: "one", - InstancesStatus: map[apiv1.PodStatus][]string{ - apiv1.PodHealthy: {"one", "three"}, - }, - InstanceNames: []string{"one", "two", "three"}, - } - Expect(explicitSynchronousStandbyNames(cluster)).To(Equal("FIRST 2 (\"three\",\"two\",\"one\")")) + When("Data durability is preferred", func() { + It("creates configuration with the ANY clause", func() { + cluster := createFakeCluster("example") + cluster.Spec.PostgresConfiguration.Synchronous = &apiv1.SynchronousReplicaConfiguration{ + DataDurability: apiv1.DataDurabilityLevelPreferred, + Method: apiv1.SynchronousReplicaConfigurationMethodAny, + Number: 2, + MaxStandbyNamesFromCluster: nil, + StandbyNamesPre: []string{}, + StandbyNamesPost: []string{}, + } + cluster.Status = apiv1.ClusterStatus{ + CurrentPrimary: "one", + InstancesStatus: map[apiv1.PodStatus][]string{ + apiv1.PodHealthy: {"one", "two", "three"}, + }, + } + + // Important: the name of the primary is not included in the list + Expect(explicitSynchronousStandbyNames(cluster)).To(Equal("ANY 2 (\"three\",\"two\")")) + }) + + It("creates configuration with the FIRST clause", func() { + cluster := createFakeCluster("example") + cluster.Spec.PostgresConfiguration.Synchronous = &apiv1.SynchronousReplicaConfiguration{ + DataDurability: apiv1.DataDurabilityLevelPreferred, + Method: apiv1.SynchronousReplicaConfigurationMethodFirst, + Number: 2, + MaxStandbyNamesFromCluster: nil, + StandbyNamesPre: []string{}, + StandbyNamesPost: []string{}, + } + cluster.Status = apiv1.ClusterStatus{ + CurrentPrimary: "one", + InstancesStatus: map[apiv1.PodStatus][]string{ + apiv1.PodHealthy: {"one", "two", "three"}, + }, + } + + // Important: the name of the primary is not included in the list + Expect(explicitSynchronousStandbyNames(cluster)).To(Equal("FIRST 2 (\"three\",\"two\")")) + }) + + It("considers the maximum number of standby names", func() { + cluster := createFakeCluster("example") + cluster.Spec.PostgresConfiguration.Synchronous = &apiv1.SynchronousReplicaConfiguration{ + DataDurability: apiv1.DataDurabilityLevelPreferred, + Method: apiv1.SynchronousReplicaConfigurationMethodFirst, + Number: 2, + MaxStandbyNamesFromCluster: ptr.To(1), + StandbyNamesPre: []string{}, + StandbyNamesPost: []string{}, + } + cluster.Status = apiv1.ClusterStatus{ + CurrentPrimary: "a-primary", + InstancesStatus: map[apiv1.PodStatus][]string{ + apiv1.PodHealthy: {"a-primary", "two", "three"}, + }, + } + + Expect(explicitSynchronousStandbyNames(cluster)).To(Equal("FIRST 1 (\"three\")")) + }) + + It("ignores the prefix and the suffix", func() { + cluster := createFakeCluster("example") + cluster.Spec.PostgresConfiguration.Synchronous = &apiv1.SynchronousReplicaConfiguration{ + DataDurability: apiv1.DataDurabilityLevelPreferred, + Method: apiv1.SynchronousReplicaConfigurationMethodFirst, + Number: 2, + StandbyNamesPre: []string{"prefix", "here"}, + StandbyNamesPost: []string{"suffix", "there"}, + } + cluster.Status = apiv1.ClusterStatus{ + CurrentPrimary: "one", + InstancesStatus: map[apiv1.PodStatus][]string{ + apiv1.PodHealthy: {"one", "two", "three"}, + }, + } + + Expect(explicitSynchronousStandbyNames(cluster)).To( + Equal("FIRST 2 (\"three\",\"two\")")) + }) + + It("disables synchronous replication when no instance is available", func() { + cluster := createFakeCluster("example") + cluster.Spec.PostgresConfiguration.Synchronous = &apiv1.SynchronousReplicaConfiguration{ + DataDurability: apiv1.DataDurabilityLevelPreferred, + Method: apiv1.SynchronousReplicaConfigurationMethodFirst, + Number: 2, + MaxStandbyNamesFromCluster: ptr.To(1), + } + cluster.Status = apiv1.ClusterStatus{} + + Expect(explicitSynchronousStandbyNames(cluster)).To(Equal("")) + }) + + It("does not include pods that do not report the status", func() { + cluster := createFakeCluster("example") + cluster.Spec.PostgresConfiguration.Synchronous = &apiv1.SynchronousReplicaConfiguration{ + DataDurability: apiv1.DataDurabilityLevelPreferred, + Method: apiv1.SynchronousReplicaConfigurationMethodFirst, + Number: 2, + MaxStandbyNamesFromCluster: nil, + StandbyNamesPre: []string{}, + StandbyNamesPost: []string{}, + } + cluster.Status = apiv1.ClusterStatus{ + CurrentPrimary: "one", + InstancesStatus: map[apiv1.PodStatus][]string{ + apiv1.PodHealthy: {"one", "three"}, + }, + InstanceNames: []string{"one", "two", "three"}, + } + Expect(explicitSynchronousStandbyNames(cluster)).To(Equal("FIRST 1 (\"three\")")) + }) }) }) diff --git a/pkg/postgres/replication/legacy.go b/pkg/postgres/replication/legacy.go index 81c61d47e9..569930732b 100644 --- a/pkg/postgres/replication/legacy.go +++ b/pkg/postgres/replication/legacy.go @@ -95,7 +95,7 @@ func getSyncReplicasData(cluster *apiv1.Cluster) (syncReplicas int, electableSyn // getElectableSyncReplicas computes the names of the instances that can be elected to sync replicas func getElectableSyncReplicas(cluster *apiv1.Cluster) []string { - nonPrimaryInstances := getSortedNonPrimaryInstanceNames(cluster) + nonPrimaryInstances := getSortedNonPrimaryHealthyInstanceNames(cluster) topology := cluster.Status.Topology // We need to include every replica inside the list of possible synchronous standbys if we have no constraints @@ -145,7 +145,7 @@ func getElectableSyncReplicas(cluster *apiv1.Cluster) []string { return electableReplicas } -func getSortedNonPrimaryInstanceNames(cluster *apiv1.Cluster) []string { +func getSortedNonPrimaryHealthyInstanceNames(cluster *apiv1.Cluster) []string { var nonPrimaryInstances []string for _, instance := range cluster.Status.InstancesStatus[apiv1.PodHealthy] { if cluster.Status.CurrentPrimary != instance { diff --git a/tests/e2e/fixtures/sync_replicas/preferred.yaml.template b/tests/e2e/fixtures/sync_replicas/preferred.yaml.template new file mode 100644 index 0000000000..86aee12481 --- /dev/null +++ b/tests/e2e/fixtures/sync_replicas/preferred.yaml.template @@ -0,0 +1,24 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: cluster-sync-replica +spec: + instances: 3 + + postgresql: + synchronous: + method: any + number: 2 + dataDurability: preferred + parameters: + log_checkpoints: "on" + log_lock_waits: "on" + log_min_duration_statement: '1000' + log_statement: 'ddl' + log_temp_files: '1024' + log_autovacuum_min_duration: '1s' + log_replication_commands: 'on' + + storage: + storageClass: ${E2E_DEFAULT_STORAGE_CLASS} + size: 1G diff --git a/tests/e2e/syncreplicas_test.go b/tests/e2e/syncreplicas_test.go index e0384579a9..1b9f47c94b 100644 --- a/tests/e2e/syncreplicas_test.go +++ b/tests/e2e/syncreplicas_test.go @@ -20,10 +20,12 @@ import ( "fmt" "strconv" "strings" + "time" "k8s.io/utils/ptr" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/tests" "github.com/cloudnative-pg/cloudnative-pg/tests/utils" @@ -110,7 +112,8 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { }, RetryTimeout, 5).Should(BeNil()) // Scale the cluster down to 2 pods - _, _, err := utils.Run(fmt.Sprintf("kubectl scale --replicas=2 -n %v cluster/%v", namespace, clusterName)) + _, _, err := utils.Run(fmt.Sprintf("kubectl scale --replicas=2 -n %v cluster/%v", namespace, + clusterName)) Expect(err).ToNot(HaveOccurred()) timeout := 120 // Wait for pod 3 to be completely terminated @@ -228,5 +231,55 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { compareSynchronousStandbyNames(namespace, clusterName, "\"postSyncReplica\")") }) }) + + Context("data durability is preferred", func() { + It("will decrease the number of sync replicas to the number of available replicas", func() { + const ( + namespacePrefix = "sync-replicas-preferred" + sampleFile = fixturesDir + "/sync_replicas/preferred.yaml.template" + ) + clusterName, err := env.GetResourceNameFromYAML(sampleFile) + Expect(err).ToNot(HaveOccurred()) + + namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + Expect(err).ToNot(HaveOccurred()) + AssertCreateCluster(namespace, clusterName, sampleFile, env) + + By("verifying we have 2 quorum-based replicas", func() { + getSyncReplicationCount(namespace, clusterName, "quorum", 2) + compareSynchronousStandbyNames(namespace, clusterName, "ANY 2") + }) + + By("fencing a replica and verifying we have only 1 quorum-based replica", func() { + Expect(utils.FencingOn(env, fmt.Sprintf("%v-3", clusterName), + namespace, clusterName, utils.UsingAnnotation)).Should(Succeed()) + getSyncReplicationCount(namespace, clusterName, "quorum", 1) + compareSynchronousStandbyNames(namespace, clusterName, "ANY 1") + }) + By("fencing the second replica and verifying we unset synchronous_standby_names", func() { + Expect(utils.FencingOn(env, fmt.Sprintf("%v-2", clusterName), + namespace, clusterName, utils.UsingAnnotation)).Should(Succeed()) + Eventually(func() string { + commandTimeout := time.Second * 10 + primary, err := env.GetClusterPrimary(namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + + stdout, _, err := env.ExecCommand(env.Ctx, *primary, specs.PostgresContainerName, + &commandTimeout, + "psql", "-U", "postgres", "-tAc", "show synchronous_standby_names") + Expect(err).ToNot(HaveOccurred()) + return strings.Trim(stdout, "\n") + }, 160).Should(BeEmpty()) + }) + By("unfenicing the replicas and verifying we have 2 quorum-based replicas", func() { + Expect(utils.FencingOff(env, fmt.Sprintf("%v-3", clusterName), + namespace, clusterName, utils.UsingAnnotation)).Should(Succeed()) + Expect(utils.FencingOff(env, fmt.Sprintf("%v-2", clusterName), + namespace, clusterName, utils.UsingAnnotation)).Should(Succeed()) + getSyncReplicationCount(namespace, clusterName, "quorum", 2) + compareSynchronousStandbyNames(namespace, clusterName, "ANY 2") + }) + }) + }) }) }) From 31807f33dc5cffbe4e951e65887e8cc967c90ab7 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 1 Nov 2024 14:03:11 +0100 Subject: [PATCH 123/836] chore(deps): update module sigs.k8s.io/controller-tools to v0.16.5 (main) (#5977) --- Makefile | 2 +- config/crd/bases/postgresql.cnpg.io_backups.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_clusters.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_databases.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_poolers.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Makefile b/Makefile index 07e55cdde8..04793b62d3 100644 --- a/Makefile +++ b/Makefile @@ -42,7 +42,7 @@ LOCALBIN ?= $(shell pwd)/bin BUILD_IMAGE ?= true POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions.go" | cut -f 2 -d \") KUSTOMIZE_VERSION ?= v5.5.0 -CONTROLLER_TOOLS_VERSION ?= v0.16.4 +CONTROLLER_TOOLS_VERSION ?= v0.16.5 GORELEASER_VERSION ?= v2.3.2 SPELLCHECK_VERSION ?= 0.44.0 WOKE_VERSION ?= 0.19.0 diff --git a/config/crd/bases/postgresql.cnpg.io_backups.yaml b/config/crd/bases/postgresql.cnpg.io_backups.yaml index a6f002623f..d4d5b3bc97 100644 --- a/config/crd/bases/postgresql.cnpg.io_backups.yaml +++ b/config/crd/bases/postgresql.cnpg.io_backups.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.4 + controller-gen.kubebuilder.io/version: v0.16.5 name: backups.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml b/config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml index 0bbb4455be..06d1592286 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.4 + controller-gen.kubebuilder.io/version: v0.16.5 name: clusterimagecatalogs.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml index ab30fed50d..257529e5ef 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.4 + controller-gen.kubebuilder.io/version: v0.16.5 name: clusters.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_databases.yaml b/config/crd/bases/postgresql.cnpg.io_databases.yaml index 820ee383f8..d50fb58224 100644 --- a/config/crd/bases/postgresql.cnpg.io_databases.yaml +++ b/config/crd/bases/postgresql.cnpg.io_databases.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.4 + controller-gen.kubebuilder.io/version: v0.16.5 name: databases.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml b/config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml index 1205cd2261..cf90a01fa3 100644 --- a/config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml +++ b/config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.4 + controller-gen.kubebuilder.io/version: v0.16.5 name: imagecatalogs.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_poolers.yaml b/config/crd/bases/postgresql.cnpg.io_poolers.yaml index 59d32f7571..ac283c038f 100644 --- a/config/crd/bases/postgresql.cnpg.io_poolers.yaml +++ b/config/crd/bases/postgresql.cnpg.io_poolers.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.4 + controller-gen.kubebuilder.io/version: v0.16.5 name: poolers.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml b/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml index 6c43327c8e..534a4e423c 100644 --- a/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml +++ b/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.4 + controller-gen.kubebuilder.io/version: v0.16.5 name: scheduledbackups.postgresql.cnpg.io spec: group: postgresql.cnpg.io From dde18fc84e0cf5e01e7bb91e8e85877e6fdded8f Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 1 Nov 2024 17:11:14 +0100 Subject: [PATCH 124/836] fix(deps): update all non-major go dependencies (main) (#6004) https://github.com/onsi/ginkgo `v2.20.2` -> `v2.21.0` https://github.com/onsi/gomega `v1.34.2` -> `v1.35.1` github.com/google/pprof `v0.0.0-20240910150728-a0b0bb1d4134` -> `v0.0.0-20241029153458-d1b30febd7db` golang.org/x/tools `v0.25.0` -> `v0.26.0` --- go.mod | 8 ++++---- go.sum | 16 ++++++++-------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index e5c3535123..af78b66281 100644 --- a/go.mod +++ b/go.mod @@ -25,8 +25,8 @@ require ( github.com/lib/pq v1.10.9 github.com/logrusorgru/aurora/v4 v4.0.0 github.com/mitchellh/go-ps v1.0.0 - github.com/onsi/ginkgo/v2 v2.20.2 - github.com/onsi/gomega v1.34.2 + github.com/onsi/ginkgo/v2 v2.21.0 + github.com/onsi/gomega v1.35.1 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.77.2 github.com/prometheus/client_golang v1.20.5 github.com/robfig/cron v1.2.0 @@ -72,7 +72,7 @@ require ( github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20240910150728-a0b0bb1d4134 // indirect + github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect @@ -111,7 +111,7 @@ require ( golang.org/x/sys v0.26.0 // indirect golang.org/x/text v0.19.0 // indirect golang.org/x/time v0.7.0 // indirect - golang.org/x/tools v0.25.0 // indirect + golang.org/x/tools v0.26.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect google.golang.org/protobuf v1.35.1 // indirect diff --git a/go.sum b/go.sum index 6c616a7b4e..52eedaa126 100644 --- a/go.sum +++ b/go.sum @@ -73,8 +73,8 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20240910150728-a0b0bb1d4134 h1:c5FlPPgxOn7kJz3VoPLkQYQXGBS3EklQ4Zfi57uOuqQ= -github.com/google/pprof v0.0.0-20240910150728-a0b0bb1d4134/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -146,10 +146,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.20.2 h1:7NVCeyIWROIAheY21RLS+3j2bb52W0W82tkberYytp4= -github.com/onsi/ginkgo/v2 v2.20.2/go.mod h1:K9gyxPIlb+aIvnZ8bd9Ak+YP18w3APlR+5coaZoE2ag= -github.com/onsi/gomega v1.34.2 h1:pNCwDkzrsv7MS9kpaQvVb1aVLahQXyJ/Tv5oAZMI3i8= -github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc= +github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= +github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= +github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -250,8 +250,8 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= -golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= +golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From b6db193344ca14f903faf7f5170c61b0a3aa3716 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 1 Nov 2024 20:22:19 +0100 Subject: [PATCH 125/836] fix(deps): update module github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring to v0.78.0 (main) (#6005) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index af78b66281..8a7f678246 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/mitchellh/go-ps v1.0.0 github.com/onsi/ginkgo/v2 v2.21.0 github.com/onsi/gomega v1.35.1 - github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.77.2 + github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.0 github.com/prometheus/client_golang v1.20.5 github.com/robfig/cron v1.2.0 github.com/sethvargo/go-password v0.3.1 diff --git a/go.sum b/go.sum index 52eedaa126..d26e9fc31e 100644 --- a/go.sum +++ b/go.sum @@ -157,8 +157,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.77.2 h1:F/MALZ518KfI1zEg+Kg8/uTzoXKDyqw+LNC/5irJlJE= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.77.2/go.mod h1:D0KY8md81DQKdaR/cXwnhoWB3MYYyc/UjvqE8GFkIvA= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.0 h1:b2L36QF60oB8Ty97UOCOnN2VnRbT6eaxzYda9kmk9zE= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.0/go.mod h1:SvsRXw4m1F2vk7HquU5h475bFpke27mIUswfyw9u3ug= github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= From 8f59212a56464e3d1f93acf17f30794aeb73cc59 Mon Sep 17 00:00:00 2001 From: Peggie Date: Fri, 1 Nov 2024 22:58:04 +0100 Subject: [PATCH 126/836] feat: Public Cloud K8S versions update (#5938) Update the versions used to test the operator on public cloud providers Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Signed-off-by: Jonathan Gonzalez V. Co-authored-by: public-cloud-k8s-versions-check --- .github/aks_versions.json | 1 + .github/gke_versions.json | 1 + 2 files changed, 2 insertions(+) diff --git a/.github/aks_versions.json b/.github/aks_versions.json index 873c7f6786..3ffbbe129d 100644 --- a/.github/aks_versions.json +++ b/.github/aks_versions.json @@ -1,4 +1,5 @@ [ + "1.31.1", "1.30.5", "1.29.9", "1.28.9" diff --git a/.github/gke_versions.json b/.github/gke_versions.json index f4408312dc..3121122733 100644 --- a/.github/gke_versions.json +++ b/.github/gke_versions.json @@ -1,4 +1,5 @@ [ + "1.31", "1.30", "1.29", "1.28" From dcea2d5f7cc51f20b67d7f85ef9b3fd656cd768c Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sat, 2 Nov 2024 01:41:57 +0100 Subject: [PATCH 127/836] test: Updated Postgres versions used in E2E tests (#5937) Update the Postgres versions used in E2E tests Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Signed-off-by: Jonathan Gonzalez V. Co-authored-by: postgres-versions-updater --- .github/pg_versions.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/pg_versions.json b/.github/pg_versions.json index 3f2a5d2f85..fa6d5ae4ed 100644 --- a/.github/pg_versions.json +++ b/.github/pg_versions.json @@ -1,7 +1,7 @@ { "17": [ "17.0", - "17.0-20" + "17.0-27" ], "16": [ "16.4", From fb651159e42f787e05121a9e02516ed1f579dd00 Mon Sep 17 00:00:00 2001 From: Zekiye Aydemir Date: Mon, 4 Nov 2024 19:32:30 +0300 Subject: [PATCH 128/836] fix(plugin): check for empty pod list in "cnpg psql" (#6023) This patch adds a check for an empty pod list in NewCommand function and raises an error if no pods are found. Closes #5913 Signed-off-by: Zekiye Aydemir --- internal/cmd/plugin/psql/psql.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/internal/cmd/plugin/psql/psql.go b/internal/cmd/plugin/psql/psql.go index 1207f50188..758a9f97b6 100644 --- a/internal/cmd/plugin/psql/psql.go +++ b/internal/cmd/plugin/psql/psql.go @@ -83,6 +83,11 @@ func NewCommand( return nil, err } + // Check if the pod list is empty + if len(pods.Items) == 0 { + return nil, fmt.Errorf("cluster does not exist or is not accessible") + } + kubectlPath, err := exec.LookPath(kubectlCommand) if err != nil { return nil, fmt.Errorf("while getting kubectl path: %w", err) From d22482a650248288ced7b249e21a454fdfe2ba38 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 5 Nov 2024 09:13:35 +0400 Subject: [PATCH 129/836] chore(deps): update module github.com/goreleaser/goreleaser to v2.4.4 (main) (#6019) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 04793b62d3..9f06b5ae5f 100644 --- a/Makefile +++ b/Makefile @@ -43,7 +43,7 @@ BUILD_IMAGE ?= true POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions.go" | cut -f 2 -d \") KUSTOMIZE_VERSION ?= v5.5.0 CONTROLLER_TOOLS_VERSION ?= v0.16.5 -GORELEASER_VERSION ?= v2.3.2 +GORELEASER_VERSION ?= v2.4.4 SPELLCHECK_VERSION ?= 0.44.0 WOKE_VERSION ?= 0.19.0 OPERATOR_SDK_VERSION ?= v1.37.0 From 3a2f99d904cf6d30893716bfe0e1a49042a4e68a Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 5 Nov 2024 15:02:53 +0100 Subject: [PATCH 130/836] fix(deps): update github.com/cloudnative-pg/cnpg-i digest to ac20c72 (main) (#6032) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 8a7f678246..e9a12f527b 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/blang/semver v3.5.1+incompatible github.com/cheynewallace/tabby v1.1.1 github.com/cloudnative-pg/barman-cloud v0.0.0-20241016085606-44f56f711a5c - github.com/cloudnative-pg/cnpg-i v0.0.0-20241016132832-8d61352831c6 + github.com/cloudnative-pg/cnpg-i v0.0.0-20241031183132-ac20c72b8fc3 github.com/cloudnative-pg/machinery v0.0.0-20241014090714-c27747f9974b github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/evanphx/json-patch/v5 v5.9.0 diff --git a/go.sum b/go.sum index d26e9fc31e..bb75da667a 100644 --- a/go.sum +++ b/go.sum @@ -20,8 +20,8 @@ github.com/cheynewallace/tabby v1.1.1 h1:JvUR8waht4Y0S3JF17G6Vhyt+FRhnqVCkk8l4Yr github.com/cheynewallace/tabby v1.1.1/go.mod h1:Pba/6cUL8uYqvOc9RkyvFbHGrQ9wShyrn6/S/1OYVys= github.com/cloudnative-pg/barman-cloud v0.0.0-20241016085606-44f56f711a5c h1:JQK5GOXSukWTInG5GzgmlTwY/rs5yO446+xy09NqbLg= github.com/cloudnative-pg/barman-cloud v0.0.0-20241016085606-44f56f711a5c/go.mod h1:Jm0tOp5oB7utpt8wz6RfSv31h1mThOtffjfyxVupriE= -github.com/cloudnative-pg/cnpg-i v0.0.0-20241016132832-8d61352831c6 h1:QokKbYfQ0sRWMHDB0sVUL1H/kGQki+AXBfBRp7J+9Og= -github.com/cloudnative-pg/cnpg-i v0.0.0-20241016132832-8d61352831c6/go.mod h1:fAU7ySVzjpt/RZntxWZiWJCjaBJayzIxEnd0NuO7oQc= +github.com/cloudnative-pg/cnpg-i v0.0.0-20241031183132-ac20c72b8fc3 h1:0y2XdAd050gXAa/myF4ELAiguaba6/XZHAElnOtfyA8= +github.com/cloudnative-pg/cnpg-i v0.0.0-20241031183132-ac20c72b8fc3/go.mod h1:fAU7ySVzjpt/RZntxWZiWJCjaBJayzIxEnd0NuO7oQc= github.com/cloudnative-pg/machinery v0.0.0-20241014090714-c27747f9974b h1:4Q2VQsPlLHliJdi87zodQ0FHLd1cJINMm4N70eu8rRg= github.com/cloudnative-pg/machinery v0.0.0-20241014090714-c27747f9974b/go.mod h1:+mUFdys1IX+qwQUrV+/i56Tey/mYh8ZzWZYttwivRns= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= From ac08737b0d91325436ffc218f456c80db067042e Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 5 Nov 2024 15:26:10 +0100 Subject: [PATCH 131/836] fix(deps): update github.com/cloudnative-pg/barman-cloud digest to ae6c240 (main) (#6029) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index e9a12f527b..69777470ba 100644 --- a/go.mod +++ b/go.mod @@ -10,9 +10,9 @@ require ( github.com/avast/retry-go/v4 v4.6.0 github.com/blang/semver v3.5.1+incompatible github.com/cheynewallace/tabby v1.1.1 - github.com/cloudnative-pg/barman-cloud v0.0.0-20241016085606-44f56f711a5c + github.com/cloudnative-pg/barman-cloud v0.0.0-20241105055149-ae6c2408bd14 github.com/cloudnative-pg/cnpg-i v0.0.0-20241031183132-ac20c72b8fc3 - github.com/cloudnative-pg/machinery v0.0.0-20241014090714-c27747f9974b + github.com/cloudnative-pg/machinery v0.0.0-20241030141148-670a0f16f836 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/evanphx/json-patch/v5 v5.9.0 github.com/go-logr/logr v1.4.2 diff --git a/go.sum b/go.sum index bb75da667a..2687385ae8 100644 --- a/go.sum +++ b/go.sum @@ -18,12 +18,12 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cheynewallace/tabby v1.1.1 h1:JvUR8waht4Y0S3JF17G6Vhyt+FRhnqVCkk8l4YrOU54= github.com/cheynewallace/tabby v1.1.1/go.mod h1:Pba/6cUL8uYqvOc9RkyvFbHGrQ9wShyrn6/S/1OYVys= -github.com/cloudnative-pg/barman-cloud v0.0.0-20241016085606-44f56f711a5c h1:JQK5GOXSukWTInG5GzgmlTwY/rs5yO446+xy09NqbLg= -github.com/cloudnative-pg/barman-cloud v0.0.0-20241016085606-44f56f711a5c/go.mod h1:Jm0tOp5oB7utpt8wz6RfSv31h1mThOtffjfyxVupriE= +github.com/cloudnative-pg/barman-cloud v0.0.0-20241105055149-ae6c2408bd14 h1:HX5pXyzVAqfjcDgCa1l8b4sumf7XYnGqiP+6XMgbB2E= +github.com/cloudnative-pg/barman-cloud v0.0.0-20241105055149-ae6c2408bd14/go.mod h1:HPGwXHlatQEnb2HdsbGTZLEo8qlxKLdxTHiTeF9TTqw= github.com/cloudnative-pg/cnpg-i v0.0.0-20241031183132-ac20c72b8fc3 h1:0y2XdAd050gXAa/myF4ELAiguaba6/XZHAElnOtfyA8= github.com/cloudnative-pg/cnpg-i v0.0.0-20241031183132-ac20c72b8fc3/go.mod h1:fAU7ySVzjpt/RZntxWZiWJCjaBJayzIxEnd0NuO7oQc= -github.com/cloudnative-pg/machinery v0.0.0-20241014090714-c27747f9974b h1:4Q2VQsPlLHliJdi87zodQ0FHLd1cJINMm4N70eu8rRg= -github.com/cloudnative-pg/machinery v0.0.0-20241014090714-c27747f9974b/go.mod h1:+mUFdys1IX+qwQUrV+/i56Tey/mYh8ZzWZYttwivRns= +github.com/cloudnative-pg/machinery v0.0.0-20241030141148-670a0f16f836 h1:Hhg+I2QcaPNN5XaSsYb7Xw3PbQlvCA9eDY+SvVf902Q= +github.com/cloudnative-pg/machinery v0.0.0-20241030141148-670a0f16f836/go.mod h1:+mUFdys1IX+qwQUrV+/i56Tey/mYh8ZzWZYttwivRns= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= From 34a1093199f4e591a5ace9149973220e01b249ee Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 5 Nov 2024 16:21:25 +0100 Subject: [PATCH 132/836] fix(deps): update github.com/cloudnative-pg/cnpg-i digest to c704f46 (main) (#6033) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 69777470ba..9de3e882b0 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/blang/semver v3.5.1+incompatible github.com/cheynewallace/tabby v1.1.1 github.com/cloudnative-pg/barman-cloud v0.0.0-20241105055149-ae6c2408bd14 - github.com/cloudnative-pg/cnpg-i v0.0.0-20241031183132-ac20c72b8fc3 + github.com/cloudnative-pg/cnpg-i v0.0.0-20241105133936-c704f46c20e0 github.com/cloudnative-pg/machinery v0.0.0-20241030141148-670a0f16f836 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/evanphx/json-patch/v5 v5.9.0 diff --git a/go.sum b/go.sum index 2687385ae8..cc512e6223 100644 --- a/go.sum +++ b/go.sum @@ -20,8 +20,8 @@ github.com/cheynewallace/tabby v1.1.1 h1:JvUR8waht4Y0S3JF17G6Vhyt+FRhnqVCkk8l4Yr github.com/cheynewallace/tabby v1.1.1/go.mod h1:Pba/6cUL8uYqvOc9RkyvFbHGrQ9wShyrn6/S/1OYVys= github.com/cloudnative-pg/barman-cloud v0.0.0-20241105055149-ae6c2408bd14 h1:HX5pXyzVAqfjcDgCa1l8b4sumf7XYnGqiP+6XMgbB2E= github.com/cloudnative-pg/barman-cloud v0.0.0-20241105055149-ae6c2408bd14/go.mod h1:HPGwXHlatQEnb2HdsbGTZLEo8qlxKLdxTHiTeF9TTqw= -github.com/cloudnative-pg/cnpg-i v0.0.0-20241031183132-ac20c72b8fc3 h1:0y2XdAd050gXAa/myF4ELAiguaba6/XZHAElnOtfyA8= -github.com/cloudnative-pg/cnpg-i v0.0.0-20241031183132-ac20c72b8fc3/go.mod h1:fAU7ySVzjpt/RZntxWZiWJCjaBJayzIxEnd0NuO7oQc= +github.com/cloudnative-pg/cnpg-i v0.0.0-20241105133936-c704f46c20e0 h1:8mrkOCJTFnhbG5j9qS7ZKXHvWek6Tp6rwyVXXQiN4JA= +github.com/cloudnative-pg/cnpg-i v0.0.0-20241105133936-c704f46c20e0/go.mod h1:fAU7ySVzjpt/RZntxWZiWJCjaBJayzIxEnd0NuO7oQc= github.com/cloudnative-pg/machinery v0.0.0-20241030141148-670a0f16f836 h1:Hhg+I2QcaPNN5XaSsYb7Xw3PbQlvCA9eDY+SvVf902Q= github.com/cloudnative-pg/machinery v0.0.0-20241030141148-670a0f16f836/go.mod h1:+mUFdys1IX+qwQUrV+/i56Tey/mYh8ZzWZYttwivRns= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= From 58eb4dcab5a5e13850de523090618d1cb9c32e26 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Wed, 6 Nov 2024 10:41:12 +0100 Subject: [PATCH 133/836] feat: restore job hooks (#5821) This patch allows a CNPG-i plugin to enhance the cluster bootstrap process by injecting a sidecar into the operator's `full-recovery` job and communicating with it using the CNPG-i protocol. Signed-off-by: Armando Ruocco Signed-off-by: Francesco Canovai Signed-off-by: Leonardo Cecchi Co-authored-by: Francesco Canovai Co-authored-by: Leonardo Cecchi --- .wordlist-en-custom.txt | 1 + api/v1/cluster_funcs.go | 51 ++++++ api/v1/cluster_types.go | 9 +- api/v1/cluster_webhook.go | 6 +- api/v1/zz_generated.deepcopy.go | 28 +++- .../bases/postgresql.cnpg.io_clusters.yaml | 20 +++ docs/src/cloudnative-pg.v1.md | 116 +++++--------- internal/cmd/manager/instance/restore/cmd.go | 2 + internal/cmd/manager/walrestore/cmd.go | 5 +- internal/cnpi/plugin/client/backup.go | 1 + internal/cnpi/plugin/client/contracts.go | 8 + internal/cnpi/plugin/client/restore_job.go | 61 ++++++++ internal/cnpi/plugin/client/suite_test.go | 9 ++ internal/cnpi/plugin/connection/connection.go | 82 +++++++--- internal/cnpi/plugin/connection/unix.go | 15 +- internal/controller/cluster_controller.go | 6 +- pkg/management/postgres/restore.go | 145 ++++++++++++++---- pkg/management/postgres/webserver/local.go | 4 + pkg/utils/context.go | 3 + 19 files changed, 438 insertions(+), 134 deletions(-) create mode 100644 internal/cnpi/plugin/client/restore_job.go diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index 45c9728531..51fefca5de 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -154,6 +154,7 @@ EphemeralVolumeSource EphemeralVolumesSizeLimit EphemeralVolumesSizeLimitConfiguration ExternalCluster +ExternalClusterList FQDN Fei Filesystem diff --git a/api/v1/cluster_funcs.go b/api/v1/cluster_funcs.go index fdfa88c158..093cb41837 100644 --- a/api/v1/cluster_funcs.go +++ b/api/v1/cluster_funcs.go @@ -33,6 +33,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" @@ -81,6 +82,20 @@ func (pluginList PluginConfigurationList) GetEnabledPluginNames() (result []stri return pluginNames } +// GetEnabledPluginNames gets the name of the plugins that are +// involved in the reconciliation of this external cluster list. This +// list is usually composed by the plugins that need to be active to +// recover data from the external clusters. +func (externalClusterList ExternalClusterList) GetEnabledPluginNames() (result []string) { + pluginNames := make([]string, 0, len(externalClusterList)) + for _, externalCluster := range externalClusterList { + if externalCluster.PluginConfiguration != nil { + pluginNames = append(pluginNames, externalCluster.PluginConfiguration.Name) + } + } + return pluginNames +} + // GetShmLimit gets the `/dev/shm` memory size limit func (e *EphemeralVolumesSizeLimitConfiguration) GetShmLimit() *resource.Quantity { if e == nil { @@ -1347,6 +1362,42 @@ func (cluster *Cluster) IsReadOnlyServiceEnabled() bool { return !slices.Contains(cluster.Spec.Managed.Services.DisabledDefaultServices, ServiceSelectorTypeRO) } +// GetRecoverySourcePlugin returns the configuration of the plugin being +// the recovery source of the cluster. If no such plugin have been configured, +// nil is returned +func (cluster *Cluster) GetRecoverySourcePlugin() *PluginConfiguration { + if cluster.Spec.Bootstrap == nil || cluster.Spec.Bootstrap.Recovery == nil { + return nil + } + + recoveryConfig := cluster.Spec.Bootstrap.Recovery + if len(recoveryConfig.Source) == 0 { + // Plugin-based recovery is supported only with + // An external cluster definition + return nil + } + + recoveryExternalCluster, found := cluster.ExternalCluster(recoveryConfig.Source) + if !found { + // This error should have already been detected + // by the validating webhook. + return nil + } + + return recoveryExternalCluster.PluginConfiguration +} + +// EnsureGVKIsPresent ensures that the GroupVersionKind (GVK) metadata is present in the Backup object. +// This is necessary because informers do not automatically include metadata inside the object. +// By setting the GVK, we ensure that components such as the plugins have enough metadata to typecheck the object. +func (cluster *Cluster) EnsureGVKIsPresent() { + cluster.SetGroupVersionKind(schema.GroupVersionKind{ + Group: GroupVersion.Group, + Version: GroupVersion.Version, + Kind: ClusterKind, + }) +} + // BuildPostgresOptions create the list of options that // should be added to the PostgreSQL configuration to // recover given a certain target diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go index 70b53221c4..c701345216 100644 --- a/api/v1/cluster_types.go +++ b/api/v1/cluster_types.go @@ -422,7 +422,7 @@ type ClusterSpec struct { // The list of external clusters which are used in the configuration // +optional - ExternalClusters []ExternalCluster `json:"externalClusters,omitempty"` + ExternalClusters ExternalClusterList `json:"externalClusters,omitempty"` // The instances' log level, one of the following values: error, warning, info (default), debug, trace // +kubebuilder:default:=info @@ -1960,6 +1960,9 @@ type ClusterMonitoringTLSConfiguration struct { Enabled bool `json:"enabled,omitempty"` } +// ExternalClusterList is a list of external clusters +type ExternalClusterList []ExternalCluster + // ExternalCluster represents the connection parameters to an // external cluster which is used in the other sections of the configuration type ExternalCluster struct { @@ -1998,6 +2001,10 @@ type ExternalCluster struct { // The configuration for the barman-cloud tool suite // +optional BarmanObjectStore *BarmanObjectStoreConfiguration `json:"barmanObjectStore,omitempty"` + + // The configuration of the plugin that is taking care + // of WAL archiving and backups for this external cluster + PluginConfiguration *PluginConfiguration `json:"plugin,omitempty"` } // EnsureOption represents whether we should enforce the presence or absence of diff --git a/api/v1/cluster_webhook.go b/api/v1/cluster_webhook.go index ee2a572867..f3f8ee9d63 100644 --- a/api/v1/cluster_webhook.go +++ b/api/v1/cluster_webhook.go @@ -1876,12 +1876,14 @@ func (r *Cluster) validateExternalClusters() field.ErrorList { func (r *Cluster) validateExternalCluster(externalCluster *ExternalCluster, path *field.Path) field.ErrorList { var result field.ErrorList - if externalCluster.ConnectionParameters == nil && externalCluster.BarmanObjectStore == nil { + if externalCluster.ConnectionParameters == nil && + externalCluster.BarmanObjectStore == nil && + externalCluster.PluginConfiguration == nil { result = append(result, field.Invalid( path, externalCluster, - "one of connectionParameters and barmanObjectStore is required")) + "one of connectionParameters, plugin and barmanObjectStore is required")) } return result diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index b2ca687cbb..47e5083fc9 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -790,7 +790,7 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { } if in.ExternalClusters != nil { in, out := &in.ExternalClusters, &out.ExternalClusters - *out = make([]ExternalCluster, len(*in)) + *out = make(ExternalClusterList, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1236,6 +1236,11 @@ func (in *ExternalCluster) DeepCopyInto(out *ExternalCluster) { *out = new(pkgapi.BarmanObjectStoreConfiguration) (*in).DeepCopyInto(*out) } + if in.PluginConfiguration != nil { + in, out := &in.PluginConfiguration, &out.PluginConfiguration + *out = new(PluginConfiguration) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalCluster. @@ -1248,6 +1253,27 @@ func (in *ExternalCluster) DeepCopy() *ExternalCluster { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ExternalClusterList) DeepCopyInto(out *ExternalClusterList) { + { + in := &in + *out = make(ExternalClusterList, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalClusterList. +func (in ExternalClusterList) DeepCopy() ExternalClusterList { + if in == nil { + return nil + } + out := new(ExternalClusterList) + in.DeepCopyInto(out) + return *out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ImageCatalog) DeepCopyInto(out *ImageCatalog) { *out = *in diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml index 257529e5ef..53bd6f571e 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml @@ -2878,6 +2878,26 @@ spec: - key type: object x-kubernetes-map-type: atomic + plugin: + description: |- + The configuration of the plugin that is taking care + of WAL archiving and backups for this external cluster + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object sslCert: description: |- The reference to an SSL certificate to be used to connect to this diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md index e922148fbc..0868475df7 100644 --- a/docs/src/cloudnative-pg.v1.md +++ b/docs/src/cloudnative-pg.v1.md @@ -1735,7 +1735,7 @@ it can be with a switchover (switchover) or in-place (restart externalClusters
-[]ExternalCluster +ExternalClusterList

The list of external clusters which are used in the configuration

@@ -2558,82 +2558,6 @@ storage

-## ExternalCluster {#postgresql-cnpg-io-v1-ExternalCluster} - - -**Appears in:** - -- [ClusterSpec](#postgresql-cnpg-io-v1-ClusterSpec) - - -

ExternalCluster represents the connection parameters to an -external cluster which is used in the other sections of the configuration

- - - - - - - - - - - - - - - - - - - - - - - - - - - -
FieldDescription
name [Required]
-string -
-

The server name, required

-
connectionParameters
-map[string]string -
-

The list of connection parameters, such as dbname, host, username, etc

-
sslCert
-core/v1.SecretKeySelector -
-

The reference to an SSL certificate to be used to connect to this -instance

-
sslKey
-core/v1.SecretKeySelector -
-

The reference to an SSL private key to be used to connect to this -instance

-
sslRootCert
-core/v1.SecretKeySelector -
-

The reference to an SSL CA public key to be used to connect to this -instance

-
password
-core/v1.SecretKeySelector -
-

The reference to the password to be used to connect to the server. -If a password is provided, CloudNativePG creates a PostgreSQL -passfile at /controller/external/NAME/pass (where "NAME" is the -cluster's name). This passfile is automatically referenced in the -connection string when establishing a connection to the remote -PostgreSQL server from the current PostgreSQL Cluster. This ensures -secure and efficient password management for external clusters.

-
barmanObjectStore
-github.com/cloudnative-pg/barman-cloud/pkg/api.BarmanObjectStoreConfiguration -
-

The configuration for the barman-cloud tool suite

-
- ## ImageCatalogRef {#postgresql-cnpg-io-v1-ImageCatalogRef} @@ -3505,6 +3429,44 @@ the operator calls PgBouncer's PAUSE and RESUME comman +## PluginConfiguration {#postgresql-cnpg-io-v1-PluginConfiguration} + + +**Appears in:** + + + +

PluginConfiguration specifies a plugin that need to be loaded for this +cluster to be reconciled

+ + + + + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+

Name is the plugin name

+
enabled
+bool +
+

Enabled is true if this plugin will be used

+
parameters
+map[string]string +
+

Parameters is the configuration of the plugin

+
+ ## PluginConfigurationList {#postgresql-cnpg-io-v1-PluginConfigurationList} (Alias of `[]github.com/cloudnative-pg/cloudnative-pg/api/v1.PluginConfiguration`) diff --git a/internal/cmd/manager/instance/restore/cmd.go b/internal/cmd/manager/instance/restore/cmd.go index f2d4d63052..26dd30a4d1 100644 --- a/internal/cmd/manager/instance/restore/cmd.go +++ b/internal/cmd/manager/instance/restore/cmd.go @@ -95,6 +95,8 @@ func restoreSubCommand(ctx context.Context, info postgres.InitInfo) error { return err } + contextLogger.Info("restore command execution completed without errors") + return nil } diff --git a/internal/cmd/manager/walrestore/cmd.go b/internal/cmd/manager/walrestore/cmd.go index 9ca043ee0e..56bcdf447d 100644 --- a/internal/cmd/manager/walrestore/cmd.go +++ b/internal/cmd/manager/walrestore/cmd.go @@ -255,7 +255,10 @@ func restoreWALViaPlugins( defer plugins.Close() availablePluginNamesSet := stringset.From(availablePluginNames) - enabledPluginNamesSet := stringset.From(cluster.Spec.Plugins.GetEnabledPluginNames()) + + enabledPluginNames := cluster.Spec.Plugins.GetEnabledPluginNames() + enabledPluginNames = append(enabledPluginNames, cluster.Spec.ExternalClusters.GetEnabledPluginNames()...) + enabledPluginNamesSet := stringset.From(enabledPluginNames) client, err := pluginClient.WithPlugins( ctx, diff --git a/internal/cnpi/plugin/client/backup.go b/internal/cnpi/plugin/client/backup.go index 131438247a..54aab9cc4f 100644 --- a/internal/cnpi/plugin/client/backup.go +++ b/internal/cnpi/plugin/client/backup.go @@ -87,6 +87,7 @@ type BackupResponse struct { // This field is set to true for online/hot backups and to false otherwise. Online bool + // This field contains the metadata to be associated with this backup Metadata map[string]string } diff --git a/internal/cnpi/plugin/client/contracts.go b/internal/cnpi/plugin/client/contracts.go index c1b141f0e9..7ecf00960e 100644 --- a/internal/cnpi/plugin/client/contracts.go +++ b/internal/cnpi/plugin/client/contracts.go @@ -19,10 +19,12 @@ package client import ( "context" + restore "github.com/cloudnative-pg/cnpg-i/pkg/restore/job" "k8s.io/apimachinery/pkg/util/validation/field" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin" "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/connection" ) @@ -35,6 +37,7 @@ type Client interface { LifecycleCapabilities WalCapabilities BackupCapabilities + RestoreJobHooksCapabilities } // Connection describes a set of behaviour needed to properly handle the plugin connections @@ -144,3 +147,8 @@ type BackupCapabilities interface { parameters map[string]string, ) (*BackupResponse, error) } + +// RestoreJobHooksCapabilities describes a set of behaviour needed to run the Restore +type RestoreJobHooksCapabilities interface { + Restore(ctx context.Context, cluster *apiv1.Cluster) (*restore.RestoreResponse, error) +} diff --git a/internal/cnpi/plugin/client/restore_job.go b/internal/cnpi/plugin/client/restore_job.go new file mode 100644 index 0000000000..028c0d3e95 --- /dev/null +++ b/internal/cnpi/plugin/client/restore_job.go @@ -0,0 +1,61 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "encoding/json" + "errors" + "slices" + + restore "github.com/cloudnative-pg/cnpg-i/pkg/restore/job" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" +) + +// ErrNoPluginSupportsRestoreJobHooksCapability is raised when no plugin supports the restore job hooks capability +var ErrNoPluginSupportsRestoreJobHooksCapability = errors.New("no plugin supports the restore job hooks capability") + +func (data *data) Restore( + ctx context.Context, + cluster *apiv1.Cluster, +) (*restore.RestoreResponse, error) { + cluster.EnsureGVKIsPresent() + + for idx := range data.plugins { + plugin := data.plugins[idx] + + if !slices.Contains(plugin.RestoreJobHooksCapabilities(), restore.RestoreJobHooksCapability_KIND_RESTORE) { + continue + } + + clusterDefinition, err := json.Marshal(cluster) + if err != nil { + return nil, err + } + request := restore.RestoreRequest{ + ClusterDefinition: clusterDefinition, + } + res, err := plugin.RestoreJobHooksClient().Restore(ctx, &request) + if err != nil { + return nil, err + } + return res, nil + } + + return nil, ErrNoPluginSupportsRestoreJobHooksCapability +} diff --git a/internal/cnpi/plugin/client/suite_test.go b/internal/cnpi/plugin/client/suite_test.go index 490518410f..ba9e9a64db 100644 --- a/internal/cnpi/plugin/client/suite_test.go +++ b/internal/cnpi/plugin/client/suite_test.go @@ -25,6 +25,7 @@ import ( "github.com/cloudnative-pg/cnpg-i/pkg/lifecycle" "github.com/cloudnative-pg/cnpg-i/pkg/operator" "github.com/cloudnative-pg/cnpg-i/pkg/reconciler" + restore "github.com/cloudnative-pg/cnpg-i/pkg/restore/job" "github.com/cloudnative-pg/cnpg-i/pkg/wal" "google.golang.org/grpc" @@ -103,6 +104,14 @@ type fakeConnection struct { operatorClient *fakeOperatorClient } +func (f *fakeConnection) RestoreJobHooksClient() restore.RestoreJobHooksClient { + panic("implement me") +} + +func (f *fakeConnection) RestoreJobHooksCapabilities() []restore.RestoreJobHooksCapability_Kind { + panic("implement me") +} + func (f *fakeConnection) setStatusResponse(status []byte) { f.operatorClient.status = &operator.SetStatusInClusterResponse{ JsonStatus: status, diff --git a/internal/cnpi/plugin/connection/connection.go b/internal/cnpi/plugin/connection/connection.go index 1a8d46e7a9..0e9826d530 100644 --- a/internal/cnpi/plugin/connection/connection.go +++ b/internal/cnpi/plugin/connection/connection.go @@ -28,6 +28,7 @@ import ( "github.com/cloudnative-pg/cnpg-i/pkg/lifecycle" "github.com/cloudnative-pg/cnpg-i/pkg/operator" "github.com/cloudnative-pg/cnpg-i/pkg/reconciler" + restore "github.com/cloudnative-pg/cnpg-i/pkg/restore/job" "github.com/cloudnative-pg/cnpg-i/pkg/wal" "google.golang.org/grpc" ) @@ -57,6 +58,7 @@ type Interface interface { WALClient() wal.WALClient BackupClient() backup.BackupClient ReconcilerHooksClient() reconciler.ReconcilerHooksClient + RestoreJobHooksClient() restore.RestoreJobHooksClient PluginCapabilities() []identity.PluginCapability_Service_Type OperatorCapabilities() []operator.OperatorCapability_RPC_Type @@ -64,6 +66,7 @@ type Interface interface { LifecycleCapabilities() []*lifecycle.OperatorLifecycleCapabilities BackupCapabilities() []backup.BackupCapability_RPC_Type ReconcilerCapabilities() []reconciler.ReconcilerHooksCapability_Kind + RestoreJobHooksCapabilities() []restore.RestoreJobHooksCapability_Kind Ping(ctx context.Context) error Close() error @@ -77,15 +80,17 @@ type data struct { walClient wal.WALClient backupClient backup.BackupClient reconcilerHooksClient reconciler.ReconcilerHooksClient - - name string - version string - capabilities []identity.PluginCapability_Service_Type - operatorCapabilities []operator.OperatorCapability_RPC_Type - walCapabilities []wal.WALCapability_RPC_Type - lifecycleCapabilities []*lifecycle.OperatorLifecycleCapabilities - backupCapabilities []backup.BackupCapability_RPC_Type - reconcilerCapabilities []reconciler.ReconcilerHooksCapability_Kind + restoreJobHooksClient restore.RestoreJobHooksClient + + name string + version string + capabilities []identity.PluginCapability_Service_Type + operatorCapabilities []operator.OperatorCapability_RPC_Type + walCapabilities []wal.WALCapability_RPC_Type + lifecycleCapabilities []*lifecycle.OperatorLifecycleCapabilities + backupCapabilities []backup.BackupCapability_RPC_Type + reconcilerCapabilities []reconciler.ReconcilerHooksCapability_Kind + restoreJobHooksCapabilities []restore.RestoreJobHooksCapability_Kind } func newPluginDataFromConnection(ctx context.Context, connection Handler) (data, error) { @@ -102,16 +107,18 @@ func newPluginDataFromConnection(ctx context.Context, connection Handler) (data, return data{}, fmt.Errorf("while querying plugin identity: %w", err) } - result := data{} - result.connection = connection - result.name = pluginInfoResponse.Name - result.version = pluginInfoResponse.Version - result.identityClient = identity.NewIdentityClient(connection) - result.operatorClient = operator.NewOperatorClient(connection) - result.lifecycleClient = lifecycle.NewOperatorLifecycleClient(connection) - result.walClient = wal.NewWALClient(connection) - result.backupClient = backup.NewBackupClient(connection) - result.reconcilerHooksClient = reconciler.NewReconcilerHooksClient(connection) + result := data{ + connection: connection, + name: pluginInfoResponse.Name, + version: pluginInfoResponse.Version, + identityClient: identity.NewIdentityClient(connection), + operatorClient: operator.NewOperatorClient(connection), + lifecycleClient: lifecycle.NewOperatorLifecycleClient(connection), + walClient: wal.NewWALClient(connection), + backupClient: backup.NewBackupClient(connection), + reconcilerHooksClient: reconciler.NewReconcilerHooksClient(connection), + restoreJobHooksClient: restore.NewRestoreJobHooksClient(connection), + } return result, err } @@ -232,6 +239,27 @@ func (pluginData *data) loadBackupCapabilities(ctx context.Context) error { return nil } +func (pluginData *data) loadRestoreJobHooksCapabilities(ctx context.Context) error { + var restoreJobHooksCapabilitiesResponse *restore.RestoreJobHooksCapabilitiesResult + var err error + + if restoreJobHooksCapabilitiesResponse, err = pluginData.restoreJobHooksClient.GetCapabilities( + ctx, + &restore.RestoreJobHooksCapabilitiesRequest{}, + ); err != nil { + return fmt.Errorf("while querying plugin operator capabilities: %w", err) + } + + pluginData.restoreJobHooksCapabilities = make( + []restore.RestoreJobHooksCapability_Kind, + len(restoreJobHooksCapabilitiesResponse.Capabilities)) + for i := range pluginData.restoreJobHooksCapabilities { + pluginData.restoreJobHooksCapabilities[i] = restoreJobHooksCapabilitiesResponse.Capabilities[i].Kind + } + + return nil +} + // Metadata extracts the plugin metadata reading from // the internal metadata func (pluginData *data) Metadata() Metadata { @@ -288,6 +316,10 @@ func (pluginData *data) BackupClient() backup.BackupClient { return pluginData.backupClient } +func (pluginData *data) RestoreJobHooksClient() restore.RestoreJobHooksClient { + return pluginData.restoreJobHooksClient +} + func (pluginData *data) ReconcilerHooksClient() reconciler.ReconcilerHooksClient { return pluginData.reconcilerHooksClient } @@ -316,6 +348,10 @@ func (pluginData *data) ReconcilerCapabilities() []reconciler.ReconcilerHooksCap return pluginData.reconcilerCapabilities } +func (pluginData *data) RestoreJobHooksCapabilities() []restore.RestoreJobHooksCapability_Kind { + return pluginData.restoreJobHooksCapabilities +} + func (pluginData *data) Ping(ctx context.Context) error { _, err := pluginData.identityClient.Probe(ctx, &identity.ProbeRequest{}) return err @@ -374,5 +410,13 @@ func LoadPlugin(ctx context.Context, handler Handler) (Interface, error) { } } + // If the plugin implements the restore job hooks, load its + // capabilities + if slices.Contains(result.capabilities, identity.PluginCapability_Service_TYPE_RESTORE_JOB) { + if err = result.loadRestoreJobHooksCapabilities(ctx); err != nil { + return nil, err + } + } + return &result, nil } diff --git a/internal/cnpi/plugin/connection/unix.go b/internal/cnpi/plugin/connection/unix.go index 93495ba4eb..1a485c179e 100644 --- a/internal/cnpi/plugin/connection/unix.go +++ b/internal/cnpi/plugin/connection/unix.go @@ -20,11 +20,14 @@ package connection import ( "context" "fmt" + "time" "github.com/cloudnative-pg/machinery/pkg/log" "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/timeout" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) // ProtocolUnix is for plugins that are reachable over a @@ -38,11 +41,15 @@ func (p ProtocolUnix) Dial(ctx context.Context) (Handler, error) { contextLogger.Debug("Connecting to plugin via local socket", "path", dialPath) + timeoutValue := defaultTimeout + value, ok := ctx.Value(utils.GRPCTimeoutKey).(time.Duration) + if ok { + contextLogger.Debug("Using custom timeout value", "timeout", value) + timeoutValue = value + } + return grpc.NewClient( dialPath, grpc.WithTransportCredentials(insecure.NewCredentials()), - grpc.WithUnaryInterceptor( - timeout.UnaryClientInterceptor(defaultTimeout), - ), - ) + grpc.WithUnaryInterceptor(timeout.UnaryClientInterceptor(timeoutValue))) } diff --git a/internal/controller/cluster_controller.go b/internal/controller/cluster_controller.go index 1c5974cb7d..f433ab5d69 100644 --- a/internal/controller/cluster_controller.go +++ b/internal/controller/cluster_controller.go @@ -173,8 +173,10 @@ func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct ctx = cluster.SetInContext(ctx) - // Load the required plugins - pluginClient, err := cnpgiClient.WithPlugins(ctx, r.Plugins, cluster.Spec.Plugins.GetEnabledPluginNames()...) + // Load the plugins required to bootstrap and reconcile this cluster + enabledPluginNames := cluster.Spec.Plugins.GetEnabledPluginNames() + enabledPluginNames = append(enabledPluginNames, cluster.Spec.ExternalClusters.GetEnabledPluginNames()...) + pluginClient, err := cnpgiClient.WithPlugins(ctx, r.Plugins, enabledPluginNames...) if err != nil { var errUnknownPlugin *repository.ErrUnknownPlugin if errors.As(err, &errUnknownPlugin) { diff --git a/pkg/management/postgres/restore.go b/pkg/management/postgres/restore.go index 347c997786..1e43cb6996 100644 --- a/pkg/management/postgres/restore.go +++ b/pkg/management/postgres/restore.go @@ -37,15 +37,20 @@ import ( barmanCommand "github.com/cloudnative-pg/barman-cloud/pkg/command" barmanCredentials "github.com/cloudnative-pg/barman-cloud/pkg/credentials" barmanRestorer "github.com/cloudnative-pg/barman-cloud/pkg/restorer" + restore "github.com/cloudnative-pg/cnpg-i/pkg/restore/job" "github.com/cloudnative-pg/machinery/pkg/execlog" "github.com/cloudnative-pg/machinery/pkg/fileutils" "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/stringset" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/retry" "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + pluginClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client" + "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository" + "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" "github.com/cloudnative-pg/cloudnative-pg/pkg/configfile" "github.com/cloudnative-pg/cloudnative-pg/pkg/management" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/external" @@ -228,6 +233,7 @@ func (info InitInfo) createBackupObjectForSnapshotRestore( // Restore restores a PostgreSQL cluster from a backup into the object storage func (info InitInfo) Restore(ctx context.Context) error { + contextLogger := log.FromContext(ctx) typedClient, err := management.NewControllerRuntimeClient() if err != nil { return err @@ -248,29 +254,53 @@ func (info InitInfo) Restore(ctx context.Context) error { info.ApplicationDatabase = cluster.GetApplicationDatabaseName() } - // Before starting the restore we check if the archive destination is safe to use - // otherwise, we stop creating the cluster - err = info.checkBackupDestination(ctx, typedClient, cluster) - if err != nil { - return err - } + var envs []string + var config string - // If we need to download data from a backup, we do it - backup, env, err := info.loadBackup(ctx, typedClient, cluster) - if err != nil { - return err - } + // nolint:nestif + if pluginConfiguration := cluster.GetRecoverySourcePlugin(); pluginConfiguration != nil { + contextLogger.Info("Restore through plugin detected, proceeding...") + res, err := restoreViaPlugin(ctx, cluster, pluginConfiguration) + if err != nil { + return err + } + if res == nil { + return errors.New("empty response from restoreViaPlugin, programmatic error") + } + envs = res.Envs + config = res.RestoreConfig + } else { + // Before starting the restore we check if the archive destination is safe to use + // otherwise, we stop creating the cluster + err = info.checkBackupDestination(ctx, typedClient, cluster) + if err != nil { + return err + } - if err := info.ensureArchiveContainsLastCheckpointRedoWAL(ctx, cluster, env, backup); err != nil { - return err - } + // If we need to download data from a backup, we do it + backup, env, err := info.loadBackup(ctx, typedClient, cluster) + if err != nil { + return err + } - if err := info.restoreDataDir(ctx, backup, env); err != nil { - return err - } + if err := info.ensureArchiveContainsLastCheckpointRedoWAL(ctx, cluster, env, backup); err != nil { + return err + } - if _, err := info.restoreCustomWalDir(ctx); err != nil { - return err + if err := info.restoreDataDir(ctx, backup, env); err != nil { + return err + } + + if _, err := info.restoreCustomWalDir(ctx); err != nil { + return err + } + + conf, err := getRestoreWalConfig(ctx, backup) + if err != nil { + return err + } + config = conf + envs = env } if err := info.WriteInitialPostgresqlConf(ctx, cluster); err != nil { @@ -304,11 +334,11 @@ func (info InitInfo) Restore(ctx context.Context) error { return err } - if err := info.writeRestoreWalConfig(ctx, backup, cluster); err != nil { + if err := info.writeCustomRestoreWalConfig(cluster, config); err != nil { return err } - return info.ConfigureInstanceAfterRestore(ctx, cluster, env) + return info.ConfigureInstanceAfterRestore(ctx, cluster, envs) } func (info InitInfo) ensureArchiveContainsLastCheckpointRedoWAL( @@ -581,6 +611,33 @@ func (info InitInfo) writeRestoreWalConfig( backup *apiv1.Backup, cluster *apiv1.Cluster, ) error { + conf, err := getRestoreWalConfig(ctx, backup) + if err != nil { + return err + } + recoveryFileContents := fmt.Sprintf( + "%s\n"+ + "%s", + conf, + cluster.Spec.Bootstrap.Recovery.RecoveryTarget.BuildPostgresOptions()) + + return info.writeRecoveryConfiguration(cluster, recoveryFileContents) +} + +func (info InitInfo) writeCustomRestoreWalConfig(cluster *apiv1.Cluster, conf string) error { + recoveryFileContents := fmt.Sprintf( + "%s\n"+ + "%s", + conf, + cluster.Spec.Bootstrap.Recovery.RecoveryTarget.BuildPostgresOptions()) + + return info.writeRecoveryConfiguration(cluster, recoveryFileContents) +} + +// getRestoreWalConfig obtains the content to append to `custom.conf` allowing PostgreSQL +// to complete the WAL recovery from the object storage and then start +// as a new primary +func getRestoreWalConfig(ctx context.Context, backup *apiv1.Backup) (string, error) { var err error cmd := []string{barmanCapabilities.BarmanCloudWalRestore} @@ -593,19 +650,17 @@ func (info InitInfo) writeRestoreWalConfig( cmd, err = barmanCommand.AppendCloudProviderOptionsFromBackup( ctx, cmd, backup.Status.BarmanCredentials) if err != nil { - return err + return "", err } cmd = append(cmd, "%f", "%p") recoveryFileContents := fmt.Sprintf( "recovery_target_action = promote\n"+ - "restore_command = '%s'\n"+ - "%s", - strings.Join(cmd, " "), - cluster.Spec.Bootstrap.Recovery.RecoveryTarget.BuildPostgresOptions()) + "restore_command = '%s'\n", + strings.Join(cmd, " ")) - return info.writeRecoveryConfiguration(cluster, recoveryFileContents) + return recoveryFileContents, nil } func (info InitInfo) writeRecoveryConfiguration(cluster *apiv1.Cluster, recoveryFileContents string) error { @@ -985,3 +1040,39 @@ func waitUntilRecoveryFinishes(db *sql.DB) error { return nil }) } + +// restoreViaPlugin tries to restore the cluster using a plugin if available and enabled. +// Returns true if a restore plugin was found and any error encountered. +func restoreViaPlugin( + ctx context.Context, + cluster *apiv1.Cluster, + plugin *apiv1.PluginConfiguration, +) (*restore.RestoreResponse, error) { + contextLogger := log.FromContext(ctx) + + // TODO: timeout should be configurable by the user + ctx = context.WithValue(ctx, utils.GRPCTimeoutKey, 100*time.Minute) + + plugins := repository.New() + availablePluginNames, err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir) + if err != nil { + contextLogger.Error(err, "Error while loading local plugins") + } + defer plugins.Close() + + availablePluginNamesSet := stringset.From(availablePluginNames) + contextLogger.Info("available plugins", "plugins", availablePluginNamesSet) + + pClient, err := pluginClient.WithPlugins( + ctx, + plugins, + plugin.Name, + ) + if err != nil { + contextLogger.Error(err, "Error while loading required plugins") + return nil, err + } + defer pClient.Close(ctx) + + return pClient.Restore(ctx, cluster) +} diff --git a/pkg/management/postgres/webserver/local.go b/pkg/management/postgres/webserver/local.go index f818623c44..7981576c9f 100644 --- a/pkg/management/postgres/webserver/local.go +++ b/pkg/management/postgres/webserver/local.go @@ -23,6 +23,7 @@ import ( "fmt" "net/http" "strings" + "time" "github.com/cloudnative-pg/machinery/pkg/log" apierrs "k8s.io/apimachinery/pkg/api/errors" @@ -33,6 +34,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/url" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) type localWebserverEndpoints struct { @@ -230,5 +232,7 @@ func (ws *localWebserverEndpoints) startPluginBackup( cluster *apiv1.Cluster, backup *apiv1.Backup, ) { + // TODO: timeout should be configurable by the user + ctx = context.WithValue(ctx, utils.GRPCTimeoutKey, 100*time.Minute) NewPluginBackupCommand(cluster, backup, ws.typedClient, ws.eventRecorder).Start(ctx) } diff --git a/pkg/utils/context.go b/pkg/utils/context.go index e91aebab61..1f5b25a06e 100644 --- a/pkg/utils/context.go +++ b/pkg/utils/context.go @@ -24,3 +24,6 @@ const ContextKeyCluster contextKey = "cluster" // PluginClientKey is the context key holding cluster data const PluginClientKey contextKey = "pluginClient" + +// GRPCTimeoutKey is the context key holding the gRPC timeout +const GRPCTimeoutKey contextKey = "grpcTimeout" From 7331a41a99f87d5446d22d3deb7230b7b3e98c9e Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 6 Nov 2024 17:42:01 +0400 Subject: [PATCH 134/836] fix(deps): update kubernetes patches (main) (#6027) https://github.com/prometheus-operator/prometheus-operator `v0.78.0` -> `v0.78.1` https://github.com/kubernetes/utils `49e7df5` -> `6fe5fd8` --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 9de3e882b0..cdb1248b22 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/mitchellh/go-ps v1.0.0 github.com/onsi/ginkgo/v2 v2.21.0 github.com/onsi/gomega v1.35.1 - github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.0 + github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.1 github.com/prometheus/client_golang v1.20.5 github.com/robfig/cron v1.2.0 github.com/sethvargo/go-password v0.3.1 @@ -45,7 +45,7 @@ require ( k8s.io/apimachinery v0.31.2 k8s.io/cli-runtime v0.31.2 k8s.io/client-go v0.31.2 - k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 + k8s.io/utils v0.0.0-20241104163129-6fe5fd82f078 sigs.k8s.io/controller-runtime v0.19.1 sigs.k8s.io/yaml v1.4.0 ) diff --git a/go.sum b/go.sum index cc512e6223..5fc91d02a5 100644 --- a/go.sum +++ b/go.sum @@ -157,8 +157,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.0 h1:b2L36QF60oB8Ty97UOCOnN2VnRbT6eaxzYda9kmk9zE= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.0/go.mod h1:SvsRXw4m1F2vk7HquU5h475bFpke27mIUswfyw9u3ug= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.1 h1:Fm9Z+FabnB+6EoGq15j+pyLmaK6hYrYOpBlTzOLTQ+E= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.1/go.mod h1:SvsRXw4m1F2vk7HquU5h475bFpke27mIUswfyw9u3ug= github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= @@ -292,8 +292,8 @@ k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 h1:1dWzkmJrrprYvjGwh9kEUxmcUV/CtNU8QM7h1FLWQOo= k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38/go.mod h1:coRQXBK9NxO98XUv3ZD6AK3xzHCxV6+b7lrquKwaKzA= -k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 h1:MDF6h2H/h4tbzmtIKTuctcwZmY0tY9mD9fNT47QO6HI= -k8s.io/utils v0.0.0-20240921022957-49e7df575cb6/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20241104163129-6fe5fd82f078 h1:jGnCPejIetjiy2gqaJ5V0NLwTpF4wbQ6cZIItJCSHno= +k8s.io/utils v0.0.0-20241104163129-6fe5fd82f078/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/controller-runtime v0.19.1 h1:Son+Q40+Be3QWb+niBXAg2vFiYWolDjjRfO8hn/cxOk= sigs.k8s.io/controller-runtime v0.19.1/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= From 6b5da958154db3650240e83e3d182120c1346bee Mon Sep 17 00:00:00 2001 From: Jaime Silvela Date: Thu, 7 Nov 2024 11:08:50 +0100 Subject: [PATCH 135/836] refactor: bring role reconciler Postgres functions in line with other reconcilers (#5958) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Use an instance interface rather than a `postgres.Instance` in the Reconcile, to make it testable - Rewrite the Postgres-facing functions to take a `sql.DB` parameter - Discard the custom mocks and use sqlmock for unit tests - Do proper error propagation on unexpected errors when reconciling with the DB Closes #5957 Signed-off-by: Jaime Silvela Signed-off-by: Armando Ruocco Signed-off-by: Niccolò Fei Co-authored-by: Armando Ruocco Co-authored-by: Niccolò Fei --- .../management/controller/roles/contract.go | 23 - .../management/controller/roles/postgres.go | 83 +- .../controller/roles/postgres_test.go | 83 +- .../management/controller/roles/reconciler.go | 3 +- .../controller/roles/reconciler_test.go | 2 +- .../management/controller/roles/runnable.go | 101 ++- .../controller/roles/runnable_test.go | 777 +++++++----------- .../management/controller/roles/suite_test.go | 23 + 8 files changed, 414 insertions(+), 681 deletions(-) diff --git a/internal/management/controller/roles/contract.go b/internal/management/controller/roles/contract.go index 294023c4e5..d72cf3e675 100644 --- a/internal/management/controller/roles/contract.go +++ b/internal/management/controller/roles/contract.go @@ -17,7 +17,6 @@ limitations under the License. package roles import ( - "context" "database/sql" "reflect" "sort" @@ -121,25 +120,3 @@ func (d *DatabaseRole) isEquivalentTo(inSpec apiv1.RoleConfiguration) bool { return reflect.DeepEqual(role, spec) && d.hasSameValidUntilAs(inSpec) } - -// RoleManager abstracts the functionality of reconciling with PostgreSQL roles -type RoleManager interface { - // List the roles in the database - List(ctx context.Context) ([]DatabaseRole, error) - // Update the role in the database - Update(ctx context.Context, role DatabaseRole) error - // Create the role in the database - Create(ctx context.Context, role DatabaseRole) error - // Delete the role in the database - Delete(ctx context.Context, role DatabaseRole) error - // GetLastTransactionID returns the last TransactionID as the `xmin` - // from the database - // See https://www.postgresql.org/docs/current/datatype-oid.html for reference - GetLastTransactionID(ctx context.Context, role DatabaseRole) (int64, error) - // UpdateComment Update the comment of role in the database - UpdateComment(ctx context.Context, role DatabaseRole) error - // UpdateMembership Update the In Role membership of role in the database - UpdateMembership(ctx context.Context, role DatabaseRole, rolesToGrant []string, rolesToRevoke []string) error - // GetParentRoles returns the roles the given role is a member of - GetParentRoles(ctx context.Context, role DatabaseRole) ([]string, error) -} diff --git a/internal/management/controller/roles/postgres.go b/internal/management/controller/roles/postgres.go index 7ef2ea2ff0..b6d7b1bf13 100644 --- a/internal/management/controller/roles/postgres.go +++ b/internal/management/controller/roles/postgres.go @@ -19,6 +19,7 @@ package roles import ( "context" "database/sql" + "errors" "fmt" "strings" @@ -28,26 +29,14 @@ import ( "github.com/lib/pq" ) -// PostgresRoleManager is a RoleManager for a database instance -type PostgresRoleManager struct { - superUserDB *sql.DB -} - -// NewPostgresRoleManager returns an implementation of RoleManager for postgres -func NewPostgresRoleManager(superDB *sql.DB) RoleManager { - return PostgresRoleManager{ - superUserDB: superDB, - } -} - // List the available roles excluding all the roles that start with `pg_` -func (sm PostgresRoleManager) List( - ctx context.Context, -) ([]DatabaseRole, error) { +func List(ctx context.Context, db *sql.DB) ([]DatabaseRole, error) { logger := log.FromContext(ctx).WithName("roles_reconciler") - wrapErr := func(err error) error { return fmt.Errorf("while listing DB roles for DRM: %w", err) } + wrapErr := func(err error) error { + return fmt.Errorf("while listing DB roles for role reconciler: %w", err) + } - rows, err := sm.superUserDB.QueryContext( + rows, err := db.QueryContext( ctx, `SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolconnlimit, rolpassword, rolvaliduntil, rolbypassrls, @@ -109,11 +98,11 @@ func (sm PostgresRoleManager) List( } // Update the role -func (sm PostgresRoleManager) Update(ctx context.Context, role DatabaseRole) error { +func Update(ctx context.Context, db *sql.DB, role DatabaseRole) error { contextLog := log.FromContext(ctx).WithName("roles_reconciler") contextLog.Trace("Invoked", "role", role) wrapErr := func(err error) error { - return fmt.Errorf("while updating role %s with DRM: %w", role.Name, err) + return fmt.Errorf("while updating role %s with role reconciler: %w", role.Name, err) } var query strings.Builder @@ -124,7 +113,7 @@ func (sm PostgresRoleManager) Update(ctx context.Context, role DatabaseRole) err // will change no matter what, the next reconciliation cycle we would update the password appendPasswordOption(role, &query) - _, err := sm.superUserDB.ExecContext(ctx, query.String()) + _, err := db.ExecContext(ctx, query.String()) if err != nil { return wrapErr(err) } @@ -133,11 +122,11 @@ func (sm PostgresRoleManager) Update(ctx context.Context, role DatabaseRole) err // Create the role // TODO: do we give the role any database-level permissions? -func (sm PostgresRoleManager) Create(ctx context.Context, role DatabaseRole) error { +func Create(ctx context.Context, db *sql.DB, role DatabaseRole) error { contextLog := log.FromContext(ctx).WithName("roles_reconciler") contextLog.Trace("Invoked", "role", role) wrapErr := func(err error) error { - return fmt.Errorf("while creating role %s with DRM: %w", role.Name, err) + return fmt.Errorf("while creating role %s with role reconciler: %w", role.Name, err) } var query strings.Builder @@ -150,7 +139,7 @@ func (sm PostgresRoleManager) Create(ctx context.Context, role DatabaseRole) err // NOTE: defensively we might think of doing CREATE ... IF EXISTS // but at least during development, we want to catch the error // Even after, this may be "the kubernetes way" - if _, err := sm.superUserDB.ExecContext(ctx, query.String()); err != nil { + if _, err := db.ExecContext(ctx, query.String()); err != nil { return wrapErr(err) } @@ -159,7 +148,7 @@ func (sm PostgresRoleManager) Create(ctx context.Context, role DatabaseRole) err query.WriteString(fmt.Sprintf("COMMENT ON ROLE %s IS %s", pgx.Identifier{role.Name}.Sanitize(), pq.QuoteLiteral(role.Comment))) - if _, err := sm.superUserDB.ExecContext(ctx, query.String()); err != nil { + if _, err := db.ExecContext(ctx, query.String()); err != nil { return wrapErr(err) } } @@ -168,16 +157,16 @@ func (sm PostgresRoleManager) Create(ctx context.Context, role DatabaseRole) err } // Delete the role -func (sm PostgresRoleManager) Delete(ctx context.Context, role DatabaseRole) error { +func Delete(ctx context.Context, db *sql.DB, role DatabaseRole) error { contextLog := log.FromContext(ctx).WithName("roles_reconciler") contextLog.Trace("Invoked", "role", role) wrapErr := func(err error) error { - return fmt.Errorf("while deleting role %s with DRM: %w", role.Name, err) + return fmt.Errorf("while deleting role %s with role reconciler: %w", role.Name, err) } query := fmt.Sprintf("DROP ROLE %s", pgx.Identifier{role.Name}.Sanitize()) contextLog.Debug("Dropping", "query", query) - _, err := sm.superUserDB.ExecContext(ctx, query) + _, err := db.ExecContext(ctx, query) if err != nil { return wrapErr(err) } @@ -187,18 +176,18 @@ func (sm PostgresRoleManager) Delete(ctx context.Context, role DatabaseRole) err // GetLastTransactionID get the last xmin for the role, to help keep track of // whether the role has been changed in on the Database since last reconciliation -func (sm PostgresRoleManager) GetLastTransactionID(ctx context.Context, role DatabaseRole) (int64, error) { +func GetLastTransactionID(ctx context.Context, db *sql.DB, role DatabaseRole) (int64, error) { contextLog := log.FromContext(ctx).WithName("roles_reconciler") contextLog.Trace("Invoked", "role", role) wrapErr := func(err error) error { - return fmt.Errorf("while getting last xmin for role %s with DRM: %w", role.Name, err) + return fmt.Errorf("while getting last xmin for role %s with role reconciler: %w", role.Name, err) } var xmin int64 - err := sm.superUserDB.QueryRowContext(ctx, + err := db.QueryRowContext(ctx, `SELECT xmin FROM pg_catalog.pg_authid WHERE rolname = $1`, role.Name).Scan(&xmin) - if err == sql.ErrNoRows { + if errors.Is(err, sql.ErrNoRows) { return 0, wrapErr(err) } if err != nil { @@ -209,17 +198,17 @@ func (sm PostgresRoleManager) GetLastTransactionID(ctx context.Context, role Dat } // UpdateComment of the role -func (sm PostgresRoleManager) UpdateComment(ctx context.Context, role DatabaseRole) error { +func UpdateComment(ctx context.Context, db *sql.DB, role DatabaseRole) error { contextLog := log.FromContext(ctx).WithName("roles_reconciler") contextLog.Trace("Invoked", "role", role) wrapErr := func(err error) error { - return fmt.Errorf("while updating comment for role %s with DRM: %w", role.Name, err) + return fmt.Errorf("while updating comment for role %s with role reconciler: %w", role.Name, err) } query := fmt.Sprintf("COMMENT ON ROLE %s IS %s", pgx.Identifier{role.Name}.Sanitize(), pq.QuoteLiteral(role.Comment)) contextLog.Debug("Updating comment", "query", query) - _, err := sm.superUserDB.ExecContext(ctx, query) + _, err := db.ExecContext(ctx, query) if err != nil { return wrapErr(err) } @@ -232,8 +221,9 @@ func (sm PostgresRoleManager) UpdateComment(ctx context.Context, role DatabaseRo // IMPORTANT: the various REVOKE and GRANT commands that may be required to // reconcile the role will be done in a single transaction. So, if any one // of them fails, the role will not get updated -func (sm PostgresRoleManager) UpdateMembership( +func UpdateMembership( ctx context.Context, + db *sql.DB, role DatabaseRole, rolesToGrant []string, rolesToRevoke []string, @@ -241,7 +231,7 @@ func (sm PostgresRoleManager) UpdateMembership( contextLog := log.FromContext(ctx).WithName("roles_reconciler") contextLog.Trace("Invoked", "role", role) wrapErr := func(err error) error { - return fmt.Errorf("while updating memberships for role %s with DRM: %w", role.Name, err) + return fmt.Errorf("while updating memberships for role %s with role reconciler: %w", role.Name, err) } if len(rolesToRevoke)+len(rolesToGrant) == 0 { contextLog.Debug("No membership change query to execute for role") @@ -261,20 +251,20 @@ func (sm PostgresRoleManager) UpdateMembership( ) } - tx, err := sm.superUserDB.BeginTx(ctx, nil) + tx, err := db.BeginTx(ctx, nil) if err != nil { return wrapErr(err) } defer func() { rollbackErr := tx.Rollback() - if rollbackErr != nil && rollbackErr != sql.ErrTxDone { + if rollbackErr != nil && !errors.Is(rollbackErr, sql.ErrTxDone) { contextLog.Error(rollbackErr, "rolling back transaction") } }() for _, sqlQuery := range queries { contextLog.Debug("Executing query", "sqlQuery", sqlQuery) - if _, err := sm.superUserDB.ExecContext(ctx, sqlQuery); err != nil { + if _, err := db.ExecContext(ctx, sqlQuery); err != nil { contextLog.Error(err, "executing query", "sqlQuery", sqlQuery, "err", err) return wrapErr(err) } @@ -283,14 +273,11 @@ func (sm PostgresRoleManager) UpdateMembership( } // GetParentRoles get the in roles of this role -func (sm PostgresRoleManager) GetParentRoles( - ctx context.Context, - role DatabaseRole, -) ([]string, error) { +func GetParentRoles(ctx context.Context, db *sql.DB, role DatabaseRole) ([]string, error) { contextLog := log.FromContext(ctx).WithName("roles_reconciler") contextLog.Trace("Invoked", "role", role) wrapErr := func(err error) error { - return fmt.Errorf("while getting parents for role %s with DRM: %w", role.Name, err) + return fmt.Errorf("while getting parents for role %s with role reconciler: %w", role.Name, err) } query := `SELECT mem.inroles FROM pg_catalog.pg_authid as auth @@ -301,8 +288,8 @@ func (sm PostgresRoleManager) GetParentRoles( WHERE rolname = $1` contextLog.Debug("get parent role", "query", query) var parentRoles pq.StringArray - err := sm.superUserDB.QueryRowContext(ctx, query, role.Name).Scan(&parentRoles) - if err == sql.ErrNoRows { + err := db.QueryRowContext(ctx, query, role.Name).Scan(&parentRoles) + if errors.Is(err, sql.ErrNoRows) { return nil, wrapErr(err) } if err != nil { @@ -364,9 +351,7 @@ func appendRoleOptions(role DatabaseRole, query *strings.Builder) { query.WriteString(fmt.Sprintf(" CONNECTION LIMIT %d", role.ConnectionLimit)) } -func appendPasswordOption(role DatabaseRole, - query *strings.Builder, -) { +func appendPasswordOption(role DatabaseRole, query *strings.Builder) { switch { case role.ignorePassword: // Postgres may allow to set the VALID UNTIL of a role independently of diff --git a/internal/management/controller/roles/postgres_test.go b/internal/management/controller/roles/postgres_test.go index 60fdbbe99f..01f3dd1dc9 100644 --- a/internal/management/controller/roles/postgres_test.go +++ b/internal/management/controller/roles/postgres_test.go @@ -127,37 +127,18 @@ var _ = Describe("Postgres RoleManager implementation test", func() { wantedRoleWithDefaultConnectionLimit.Name) wantedRoleCommentStmt := fmt.Sprintf( - "COMMENT ON ROLE \"%s\" IS %s", + wantedRoleCommentTpl, wantedRole.Name, pq.QuoteLiteral(wantedRole.Comment)) wantedRoleExpectedAltStmt := fmt.Sprintf( "ALTER ROLE \"%s\" BYPASSRLS NOCREATEDB CREATEROLE NOINHERIT LOGIN NOREPLICATION NOSUPERUSER CONNECTION LIMIT 2 ", wantedRole.Name) unWantedRoleExpectedDelStmt := fmt.Sprintf("DROP ROLE \"%s\"", unWantedRole.Name) - expectedSelStmt := `SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, - rolcanlogin, rolreplication, rolconnlimit, rolpassword, rolvaliduntil, rolbypassrls, - pg_catalog.shobj_description(auth.oid, 'pg_authid') as comment, auth.xmin, - mem.inroles - FROM pg_catalog.pg_authid as auth - LEFT JOIN ( - SELECT array_agg(pg_get_userbyid(roleid)) as inroles, member - FROM pg_auth_members GROUP BY member - ) mem ON member = oid - WHERE rolname not like 'pg\_%'` - - expectedMembershipStmt := `SELECT mem.inroles - FROM pg_catalog.pg_authid as auth - LEFT JOIN ( - SELECT array_agg(pg_get_userbyid(roleid)) as inroles, member - FROM pg_auth_members GROUP BY member - ) mem ON member = oid - WHERE rolname = $1` // Testing List It("List can read the list of roles from the DB", func(ctx context.Context) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) - prm := NewPostgresRoleManager(db) testDate := time.Date(2023, 4, 4, 0, 0, 0, 0, time.UTC) @@ -182,7 +163,7 @@ var _ = Describe("Postgres RoleManager implementation test", func() { }, false, []byte("This is streaming_replica user"), 22, []byte(`{"role1","role2"}`)) mock.ExpectQuery(expectedSelStmt).WillReturnRows(rows) mock.ExpectExec("CREATE ROLE foo").WillReturnResult(sqlmock.NewResult(11, 1)) - roles, err := prm.List(ctx) + roles, err := List(ctx, db) Expect(err).ShouldNot(HaveOccurred()) Expect(roles).To(HaveLen(3)) password1 := sql.NullString{ @@ -231,46 +212,42 @@ var _ = Describe("Postgres RoleManager implementation test", func() { It("List returns error if there is a problem with the DB", func(ctx context.Context) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) - prm := NewPostgresRoleManager(db) dbError := errors.New("Kaboom") mock.ExpectQuery(expectedSelStmt).WillReturnError(dbError) - roles, err := prm.List(ctx) + roles, err := List(ctx, db) Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(BeEquivalentTo("while listing DB roles for DRM: Kaboom")) + Expect(err.Error()).To(BeEquivalentTo("while listing DB roles for role reconciler: Kaboom")) Expect(roles).To(BeEmpty()) }) // Testing Create It("Create will send a correct CREATE to the DB", func(ctx context.Context) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) - prm := NewPostgresRoleManager(db) mock.ExpectExec(wantedRoleExpectedCrtStmt). WillReturnResult(sqlmock.NewResult(2, 3)) mock.ExpectExec(wantedRoleCommentStmt). WillReturnResult(sqlmock.NewResult(2, 3)) - err = prm.Create(ctx, internalWantedRole.toDatabaseRole()) + err = Create(ctx, db, internalWantedRole.toDatabaseRole()) Expect(err).ShouldNot(HaveOccurred()) }) It("Create will return error if there is a problem creating the role in the DB", func(ctx context.Context) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) - prm := NewPostgresRoleManager(db) dbError := errors.New("Kaboom") mock.ExpectExec(wantedRoleExpectedCrtStmt). WillReturnError(dbError) - err = prm.Create(ctx, internalWantedRole.toDatabaseRole()) + err = Create(ctx, db, internalWantedRole.toDatabaseRole()) Expect(err).To(HaveOccurred()) Expect(errors.Unwrap(err)).To(BeEquivalentTo(dbError)) }) It("Create will send a correct CREATE with password to the DB", func(ctx context.Context) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) - prm := NewPostgresRoleManager(db) mock.ExpectExec(wantedRoleWithPassExpectedCrtStmt). WillReturnResult(sqlmock.NewResult(2, 3)) @@ -281,13 +258,12 @@ var _ = Describe("Postgres RoleManager implementation test", func() { // In this unit test we are not testing the retrieval of secrets, so let's // fetch the password content by hand dbRole.password = sql.NullString{Valid: true, String: "myPassword"} - err = prm.Create(ctx, dbRole) + err = Create(ctx, db, dbRole) Expect(err).ShouldNot(HaveOccurred()) }) It("Create will send a correct CREATE with perpetual password to the DB", func(ctx context.Context) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) - prm := NewPostgresRoleManager(db) mock.ExpectExec(wantedRoleWithoutValidUntilExpectedCrtStmt). WillReturnResult(sqlmock.NewResult(2, 3)) @@ -300,32 +276,30 @@ var _ = Describe("Postgres RoleManager implementation test", func() { // In this unit test we are not testing the retrieval of secrets, so let's // fetch the password content by hand dbRole.password = sql.NullString{Valid: true, String: "myPassword"} - err = prm.Create(ctx, dbRole) + err = Create(ctx, db, dbRole) Expect(err).ShouldNot(HaveOccurred()) }) It("Create will send a correct CREATE with password deletion to the DB", func(ctx context.Context) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) - prm := NewPostgresRoleManager(db) mock.ExpectExec(wantedRoleWithPassDeletionExpectedCrtStmt). WillReturnResult(sqlmock.NewResult(2, 3)) mock.ExpectExec(wantedRoleCommentStmt). WillReturnResult(sqlmock.NewResult(2, 3)) - err = prm.Create(ctx, + err = Create(ctx, db, roleConfigurationAdapter{RoleConfiguration: wantedRoleWithPassDeletion}.toDatabaseRole()) Expect(err).ShouldNot(HaveOccurred()) }) It("Create will send a correct CREATE with password deletion to the DB", func(ctx context.Context) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) - prm := NewPostgresRoleManager(db) mock.ExpectExec(wantedRoleWithDefaultConnectionLimitExpectedCrtStmt). WillReturnResult(sqlmock.NewResult(2, 3)) - err = prm.Create(ctx, + err = Create(ctx, db, roleConfigurationAdapter{RoleConfiguration: wantedRoleWithDefaultConnectionLimit}.toDatabaseRole()) Expect(err).ShouldNot(HaveOccurred()) }) @@ -333,24 +307,22 @@ var _ = Describe("Postgres RoleManager implementation test", func() { It("Delete will send a correct DROP to the DB", func(ctx context.Context) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) - prm := NewPostgresRoleManager(db) mock.ExpectExec(unWantedRoleExpectedDelStmt). WillReturnResult(sqlmock.NewResult(2, 3)) - err = prm.Delete(ctx, roleConfigurationAdapter{RoleConfiguration: unWantedRole}.toDatabaseRole()) + err = Delete(ctx, db, roleConfigurationAdapter{RoleConfiguration: unWantedRole}.toDatabaseRole()) Expect(err).ShouldNot(HaveOccurred()) }) It("Delete will return error if there is a problem deleting the role in the DB", func(ctx context.Context) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) - prm := NewPostgresRoleManager(db) dbError := errors.New("Kaboom") mock.ExpectExec(unWantedRoleExpectedDelStmt). WillReturnError(dbError) - err = prm.Delete(ctx, roleConfigurationAdapter{RoleConfiguration: unWantedRole}.toDatabaseRole()) + err = Delete(ctx, db, roleConfigurationAdapter{RoleConfiguration: unWantedRole}.toDatabaseRole()) Expect(err).To(HaveOccurred()) coreErr := errors.Unwrap(err) Expect(coreErr).To(BeEquivalentTo(dbError)) @@ -359,23 +331,21 @@ var _ = Describe("Postgres RoleManager implementation test", func() { It("Update will send a correct ALTER to the DB", func(ctx context.Context) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherRegexp)) Expect(err).ToNot(HaveOccurred()) - prm := NewPostgresRoleManager(db) mock.ExpectExec(wantedRoleExpectedAltStmt). WillReturnResult(sqlmock.NewResult(2, 3)) - err = prm.Update(ctx, roleConfigurationAdapter{RoleConfiguration: wantedRole}.toDatabaseRole()) + err = Update(ctx, db, roleConfigurationAdapter{RoleConfiguration: wantedRole}.toDatabaseRole()) Expect(err).ShouldNot(HaveOccurred()) }) It("Update will return error if there is a problem updating the role in the DB", func(ctx context.Context) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherRegexp)) Expect(err).ToNot(HaveOccurred()) - prm := NewPostgresRoleManager(db) dbError := errors.New("Kaboom") mock.ExpectExec(wantedRoleExpectedAltStmt). WillReturnError(dbError) - err = prm.Update(ctx, roleConfigurationAdapter{RoleConfiguration: wantedRole}.toDatabaseRole()) + err = Update(ctx, db, roleConfigurationAdapter{RoleConfiguration: wantedRole}.toDatabaseRole()) Expect(err).To(HaveOccurred()) Expect(errors.Is(err, dbError)).To(BeTrue()) }) @@ -384,24 +354,22 @@ var _ = Describe("Postgres RoleManager implementation test", func() { It("UpdateComment will send a correct COMMENT to the DB", func(ctx context.Context) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherRegexp)) Expect(err).ToNot(HaveOccurred()) - prm := NewPostgresRoleManager(db) mock.ExpectExec(wantedRoleCommentStmt). WillReturnResult(sqlmock.NewResult(2, 3)) - err = prm.UpdateComment(ctx, roleConfigurationAdapter{RoleConfiguration: wantedRole}.toDatabaseRole()) + err = UpdateComment(ctx, db, roleConfigurationAdapter{RoleConfiguration: wantedRole}.toDatabaseRole()) Expect(err).ShouldNot(HaveOccurred()) }) It("UpdateComment will return error if there is a problem updating the role in the DB", func(ctx context.Context) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherRegexp)) Expect(err).ToNot(HaveOccurred()) - prm := NewPostgresRoleManager(db) dbError := errors.New("Kaboom") mock.ExpectExec(wantedRoleCommentStmt). WillReturnError(dbError) - err = prm.UpdateComment(ctx, roleConfigurationAdapter{RoleConfiguration: wantedRole}.toDatabaseRole()) + err = UpdateComment(ctx, db, roleConfigurationAdapter{RoleConfiguration: wantedRole}.toDatabaseRole()) Expect(err).To(HaveOccurred()) Expect(errors.Is(err, dbError)).To(BeTrue()) }) @@ -409,7 +377,6 @@ var _ = Describe("Postgres RoleManager implementation test", func() { It("GetParentRoles will return the roles a given role belongs to", func(ctx context.Context) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) - prm := NewPostgresRoleManager(db) rows := sqlmock.NewRows([]string{ "inroles", @@ -417,7 +384,7 @@ var _ = Describe("Postgres RoleManager implementation test", func() { AddRow([]byte(`{"role1","role2"}`)) mock.ExpectQuery(expectedMembershipStmt).WithArgs("foo").WillReturnRows(rows) - roles, err := prm.GetParentRoles(ctx, DatabaseRole{Name: "foo"}) + roles, err := GetParentRoles(ctx, db, DatabaseRole{Name: "foo"}) Expect(err).ShouldNot(HaveOccurred()) Expect(roles).To(HaveLen(2)) Expect(roles).To(ConsistOf("role1", "role2")) @@ -426,10 +393,9 @@ var _ = Describe("Postgres RoleManager implementation test", func() { It("GetParentRoles will error if there is a problem querying the database", func(ctx context.Context) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) - prm := NewPostgresRoleManager(db) mock.ExpectQuery(expectedMembershipStmt).WithArgs("foo").WillReturnError(fmt.Errorf("kaboom")) - roles, err := prm.GetParentRoles(ctx, DatabaseRole{Name: "foo"}) + roles, err := GetParentRoles(ctx, db, DatabaseRole{Name: "foo"}) Expect(err).Should(HaveOccurred()) Expect(roles).To(BeEmpty()) }) @@ -437,7 +403,6 @@ var _ = Describe("Postgres RoleManager implementation test", func() { It("UpdateMembership will send correct GRANT and REVOKE statements to the DB", func(ctx context.Context) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) - prm := NewPostgresRoleManager(db) expectedMembershipExecs := []string{ `GRANT "pg_monitor" TO "foo"`, @@ -454,14 +419,13 @@ var _ = Describe("Postgres RoleManager implementation test", func() { mock.ExpectCommit() - err = prm.UpdateMembership(ctx, DatabaseRole{Name: "foo"}, []string{"pg_monitor", "quux"}, []string{"bar"}) + err = UpdateMembership(ctx, db, DatabaseRole{Name: "foo"}, []string{"pg_monitor", "quux"}, []string{"bar"}) Expect(err).ShouldNot(HaveOccurred()) }) It("UpdateMembership will roll back if there is an error in the DB", func(ctx context.Context) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) - prm := NewPostgresRoleManager(db) okMembership := `GRANT "pg_monitor" TO "foo"` badMembership := `GRANT "quux" TO "foo"` @@ -474,7 +438,7 @@ var _ = Describe("Postgres RoleManager implementation test", func() { mock.ExpectRollback() - err = prm.UpdateMembership(ctx, DatabaseRole{Name: "foo"}, []string{"pg_monitor", "quux"}, []string{"bar"}) + err = UpdateMembership(ctx, db, DatabaseRole{Name: "foo"}, []string{"pg_monitor", "quux"}, []string{"bar"}) Expect(err).Should(HaveOccurred()) }) @@ -551,23 +515,22 @@ var _ = Describe("Postgres RoleManager implementation test", func() { It("Getting the proper TransactionID per rol", func(ctx SpecContext) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) - prm := NewPostgresRoleManager(db) rows := mock.NewRows([]string{"xmin"}) lastTransactionQuery := "SELECT xmin FROM pg_catalog.pg_authid WHERE rolname = $1" dbRole := roleConfigurationAdapter{RoleConfiguration: wantedRole}.toDatabaseRole() mock.ExpectQuery(lastTransactionQuery).WithArgs("foo").WillReturnError(errors.New("Kaboom")) - _, err = prm.GetLastTransactionID(ctx, dbRole) + _, err = GetLastTransactionID(ctx, db, dbRole) Expect(err).To(HaveOccurred()) mock.ExpectQuery(lastTransactionQuery).WithArgs("foo").WillReturnError(sql.ErrNoRows) - _, err = prm.GetLastTransactionID(ctx, dbRole) + _, err = GetLastTransactionID(ctx, db, dbRole) Expect(err).To(HaveOccurred()) rows.AddRow("1321") mock.ExpectQuery(lastTransactionQuery).WithArgs("foo").WillReturnRows(rows) - transID, err := prm.GetLastTransactionID(ctx, dbRole) + transID, err := GetLastTransactionID(ctx, db, dbRole) Expect(err).ToNot(HaveOccurred()) Expect(transID).To(BeEquivalentTo(1321)) }) diff --git a/internal/management/controller/roles/reconciler.go b/internal/management/controller/roles/reconciler.go index 09190ab23e..af850c7f72 100644 --- a/internal/management/controller/roles/reconciler.go +++ b/internal/management/controller/roles/reconciler.go @@ -56,8 +56,7 @@ func Reconcile( } contextLogger.Debug("getting the managed roles status") - roleManager := NewPostgresRoleManager(db) - rolesInDB, err := roleManager.List(ctx) + rolesInDB, err := List(ctx, db) if err != nil { return reconcile.Result{}, err } diff --git a/internal/management/controller/roles/reconciler_test.go b/internal/management/controller/roles/reconciler_test.go index a126b73ef4..8e49d9a692 100644 --- a/internal/management/controller/roles/reconciler_test.go +++ b/internal/management/controller/roles/reconciler_test.go @@ -53,7 +53,7 @@ var _ = Describe("Role reconciler test", func() { }, }, } - pgStringError := "while listing DB roles for DRM: " + + pgStringError := "while listing DB roles for role reconciler: " + "failed to connect to `user=postgres database=postgres`: " + "/controller/run/.s.PGSQL.5432 (/controller/run): " + "dial error: dial unix /controller/run/.s.PGSQL.5432: connect: no such file or directory" diff --git a/internal/management/controller/roles/runnable.go b/internal/management/controller/roles/runnable.go index 1eed8f037d..58c127da00 100644 --- a/internal/management/controller/roles/runnable.go +++ b/internal/management/controller/roles/runnable.go @@ -49,12 +49,21 @@ const ( roleUpdateMemberships roleAction = "UPDATE_MEMBERSHIPS" ) +type instanceInterface interface { + GetSuperUserDB() (*sql.DB, error) + IsPrimary() (bool, error) + RoleSynchronizerChan() <-chan *apiv1.ManagedConfiguration + IsServerHealthy() error + GetClusterName() string + GetNamespaceName() string +} + // A RoleSynchronizer is a Kubernetes manager.Runnable // that makes sure the Roles in the PostgreSQL databases are in sync with the spec // // c.f. https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/manager#Runnable type RoleSynchronizer struct { - instance *postgres.Instance + instance instanceInterface client client.Client } @@ -130,12 +139,6 @@ func (sr *RoleSynchronizer) reconcile(ctx context.Context, config *apiv1.Managed return nil } - superUserDB, err := sr.instance.GetSuperUserDB() - if err != nil { - return fmt.Errorf("while reconciling managed roles: %w", err) - } - roleManager := NewPostgresRoleManager(superUserDB) - var remoteCluster apiv1.Cluster if err = sr.client.Get(ctx, types.NamespacedName{ Name: sr.instance.GetClusterName(), @@ -148,7 +151,11 @@ func (sr *RoleSynchronizer) reconcile(ctx context.Context, config *apiv1.Managed if rolePasswords == nil { rolePasswords = map[string]apiv1.PasswordState{} } - appliedState, irreconcilableRoles, err := sr.synchronizeRoles(ctx, roleManager, config, rolePasswords) + superUserDB, err := sr.instance.GetSuperUserDB() + if err != nil { + return fmt.Errorf("while getting superuser connection: %w", err) + } + appliedState, irreconcilableRoles, err := sr.synchronizeRoles(ctx, superUserDB, config, rolePasswords) if err != nil { return fmt.Errorf("while syncrhonizing managed roles: %w", err) } @@ -174,9 +181,13 @@ func getRoleNames(roles []roleConfigurationAdapter) []string { } // synchronizeRoles aligns roles in the database to the spec +// It returns +// - the PasswordState for any updated roles +// - any roles that had expectable postgres errors +// - any unexpected error func (sr *RoleSynchronizer) synchronizeRoles( ctx context.Context, - roleManager RoleManager, + db *sql.DB, config *apiv1.ManagedConfiguration, storedPasswordState map[string]apiv1.PasswordState, ) (map[string]apiv1.PasswordState, map[string][]string, error) { @@ -185,22 +196,18 @@ func (sr *RoleSynchronizer) synchronizeRoles( if err != nil { return nil, nil, err } - rolesInDB, err := roleManager.List(ctx) + rolesInDB, err := List(ctx, db) if err != nil { return nil, nil, err } rolesByAction := evaluateNextRoleActions( ctx, config, rolesInDB, storedPasswordState, latestSecretResourceVersion) + + passwordStates, irreconcilableRoles, err := sr.applyRoleActions(ctx, db, rolesByAction) if err != nil { - return nil, nil, fmt.Errorf("while syncrhonizing managed roles: %w", err) + return nil, nil, err } - passwordStates, irreconcilableRoles := sr.applyRoleActions( - ctx, - roleManager, - rolesByAction, - ) - // Merge the status from database into spec. We should keep all the status // otherwise in the next loop the user without status will be marked as need update for role, stateInDatabase := range passwordStates { @@ -213,31 +220,33 @@ func (sr *RoleSynchronizer) synchronizeRoles( // It returns the apiv1.PasswordState for each role, as well as a map of roles that // cannot be reconciled for expectable errors, e.g. dropping a role owning content // -// NOTE: applyRoleActions will not error out if a single role operation fails. -// This is designed so that a role configuration that cannot be honored by PostgreSQL -// cannot stop the reconciliation loop and prevent other roles from being applied +// NOTE: applyRoleActions will carry on after an expectable error, i.e. an error +// due to an invalid request for postgres. This is so that other actions will not +// be blocked by a user error. +// It will, however, error out on unexpected errors. func (sr *RoleSynchronizer) applyRoleActions( ctx context.Context, - roleManager RoleManager, + db *sql.DB, rolesByAction rolesByAction, -) (map[string]apiv1.PasswordState, map[string][]string) { +) (map[string]apiv1.PasswordState, map[string][]string, error) { contextLog := log.FromContext(ctx).WithName("roles_reconciler") contextLog.Debug("applying role actions") irreconcilableRoles := make(map[string][]string) appliedChanges := make(map[string]apiv1.PasswordState) - handleRoleError := func(errToEvaluate error, roleName string, action roleAction) { + handleRoleError := func(errToEvaluate error, roleName string, action roleAction) error { // log unexpected errors, collect expectable PostgreSQL errors if errToEvaluate == nil { - return + return nil } roleError, err := parseRoleError(errToEvaluate, roleName, action) if err != nil { contextLog.Error(err, "while performing "+string(action), "role", roleName) - return + return err } irreconcilableRoles[roleName] = append(irreconcilableRoles[roleName], roleError.Error()) + return nil } for action, roles := range rolesByAction { @@ -251,44 +260,48 @@ func (sr *RoleSynchronizer) applyRoleActions( "roles", getRoleNames(roles), "action", action) for _, role := range roles { + var ( + err error + appliedState apiv1.PasswordState + grants, revokes []string + ) switch action { case roleCreate, roleUpdate: - appliedState, err := sr.applyRoleCreateUpdate(ctx, roleManager, role, action) + appliedState, err = sr.applyRoleCreateUpdate(ctx, db, role, action) if err == nil { appliedChanges[role.Name] = appliedState } - handleRoleError(err, role.Name, action) case roleDelete: - err := roleManager.Delete(ctx, role.toDatabaseRole()) - handleRoleError(err, role.Name, action) + err = Delete(ctx, db, role.toDatabaseRole()) case roleSetComment: // NOTE: adding/updating a comment on a role does not alter its TransactionID - err := roleManager.UpdateComment(ctx, role.toDatabaseRole()) - handleRoleError(err, role.Name, action) + err = UpdateComment(ctx, db, role.toDatabaseRole()) case roleUpdateMemberships: // NOTE: revoking / granting to a role does not alter its TransactionID dbRole := role.toDatabaseRole() - grants, revokes, err := getRoleMembershipDiff(ctx, roleManager, role, dbRole) - if err != nil { - contextLog.Error(err, "while performing "+string(action), "role", role.Name) - continue + grants, revokes, err = getRoleMembershipDiff(ctx, db, role, dbRole) + if unhandledErr := handleRoleError(err, role.Name, action); unhandledErr != nil { + return nil, nil, unhandledErr } - err = roleManager.UpdateMembership(ctx, dbRole, grants, revokes) - handleRoleError(err, role.Name, action) + + err = UpdateMembership(ctx, db, dbRole, grants, revokes) + } + if unhandledErr := handleRoleError(err, role.Name, action); unhandledErr != nil { + return nil, nil, unhandledErr } } } - return appliedChanges, irreconcilableRoles + return appliedChanges, irreconcilableRoles, nil } func getRoleMembershipDiff( ctx context.Context, - roleManager RoleManager, + db *sql.DB, role roleConfigurationAdapter, dbRole DatabaseRole, ) ([]string, []string, error) { - inRoleInDB, err := roleManager.GetParentRoles(ctx, dbRole) + inRoleInDB, err := GetParentRoles(ctx, db, dbRole) if err != nil { return nil, nil, err } @@ -302,7 +315,7 @@ func getRoleMembershipDiff( // Returns the PasswordState, as well as any error encountered func (sr *RoleSynchronizer) applyRoleCreateUpdate( ctx context.Context, - roleManager RoleManager, + db *sql.DB, role roleConfigurationAdapter, action roleAction, ) (apiv1.PasswordState, error) { @@ -332,15 +345,15 @@ func (sr *RoleSynchronizer) applyRoleCreateUpdate( var err error switch action { case roleCreate: - err = roleManager.Create(ctx, databaseRole) + err = Create(ctx, db, databaseRole) case roleUpdate: - err = roleManager.Update(ctx, databaseRole) + err = Update(ctx, db, databaseRole) } if err != nil { return apiv1.PasswordState{}, err } - transactionID, err := roleManager.GetLastTransactionID(ctx, databaseRole) + transactionID, err := GetLastTransactionID(ctx, db, databaseRole) if err != nil { return apiv1.PasswordState{}, err } diff --git a/internal/management/controller/roles/runnable_test.go b/internal/management/controller/roles/runnable_test.go index 4ba41763c0..370ac6bab2 100644 --- a/internal/management/controller/roles/runnable_test.go +++ b/internal/management/controller/roles/runnable_test.go @@ -18,11 +18,17 @@ package roles import ( "context" + "database/sql" "fmt" + "time" + "github.com/DATA-DOG/go-sqlmock" "github.com/jackc/pgx/v5/pgconn" + "github.com/jackc/pgx/v5/pgtype" + "github.com/lib/pq" corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client/fake" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" @@ -33,241 +39,87 @@ import ( . "github.com/onsi/gomega" ) -type funcCall struct{ verb, roleName string } - -type mockRoleManager struct { - roles map[string]DatabaseRole - callHistory []funcCall -} - -func (m *mockRoleManager) List(_ context.Context) ([]DatabaseRole, error) { - m.callHistory = append(m.callHistory, funcCall{"list", ""}) - re := make([]DatabaseRole, len(m.roles)) - i := 0 - for _, r := range m.roles { - re[i] = r - i++ - } - return re, nil -} - -func (m *mockRoleManager) Update( - _ context.Context, role DatabaseRole, -) error { - m.callHistory = append(m.callHistory, funcCall{"update", role.Name}) - _, found := m.roles[role.Name] - if !found { - return fmt.Errorf("tring to update unknown role: %s", role.Name) - } - m.roles[role.Name] = role - return nil -} - -func (m *mockRoleManager) UpdateComment( - _ context.Context, role DatabaseRole, -) error { - m.callHistory = append(m.callHistory, funcCall{"updateComment", role.Name}) - _, found := m.roles[role.Name] - if !found { - return fmt.Errorf("tring to update comment of unknown role: %s", role.Name) - } - m.roles[role.Name] = role - return nil -} - -func (m *mockRoleManager) Create( - _ context.Context, role DatabaseRole, -) error { - m.callHistory = append(m.callHistory, funcCall{"create", role.Name}) - _, found := m.roles[role.Name] - if found { - return fmt.Errorf("tring to create existing role: %s", role.Name) - } - m.roles[role.Name] = role - return nil -} - -func (m *mockRoleManager) Delete( - _ context.Context, role DatabaseRole, -) error { - m.callHistory = append(m.callHistory, funcCall{"delete", role.Name}) - _, found := m.roles[role.Name] - if !found { - return fmt.Errorf("tring to delete unknown role: %s", role.Name) - } - delete(m.roles, role.Name) - return nil -} - -func (m *mockRoleManager) GetLastTransactionID(_ context.Context, _ DatabaseRole) (int64, error) { - return 0, nil -} - -func (m *mockRoleManager) UpdateMembership( - _ context.Context, - role DatabaseRole, - _ []string, - _ []string, -) error { - m.callHistory = append(m.callHistory, funcCall{"updateMembership", role.Name}) - _, found := m.roles[role.Name] - if !found { - return fmt.Errorf("trying to update Role Members of unknown role: %s", role.Name) - } - m.roles[role.Name] = role - return nil -} - -func (m *mockRoleManager) GetParentRoles(_ context.Context, role DatabaseRole) ([]string, error) { - m.callHistory = append(m.callHistory, funcCall{"getParentRoles", role.Name}) - _, found := m.roles[role.Name] - if !found { - return nil, fmt.Errorf("trying to get parent of unknown role: %s", role.Name) - } - m.roles[role.Name] = role - return nil, nil -} - -// mock.ExpectExec(unWantedRoleExpectedDelStmt). -// WillReturnError(&pgconn.PgError{Code: "2BP01"}) - -type mockRoleManagerWithError struct { - roles map[string]DatabaseRole - callHistory []funcCall -} - -func (m *mockRoleManagerWithError) List(_ context.Context) ([]DatabaseRole, error) { - m.callHistory = append(m.callHistory, funcCall{"list", ""}) - re := make([]DatabaseRole, len(m.roles)) - i := 0 - for _, r := range m.roles { - re[i] = r - i++ - } - return re, nil -} - -func (m *mockRoleManagerWithError) Update( - _ context.Context, role DatabaseRole, -) error { - m.callHistory = append(m.callHistory, funcCall{"update", role.Name}) - _, found := m.roles[role.Name] - if !found { - return fmt.Errorf("tring to update unknown role: %s", role.Name) - } - m.roles[role.Name] = role - return nil -} - -func (m *mockRoleManagerWithError) UpdateComment( - _ context.Context, role DatabaseRole, -) error { - m.callHistory = append(m.callHistory, funcCall{"updateComment", role.Name}) - _, found := m.roles[role.Name] - if !found { - return fmt.Errorf("tring to update comment of unknown role: %s", role.Name) - } - m.roles[role.Name] = role - return nil +type fakeInstanceData struct { + *postgres.Instance + db *sql.DB } -func (m *mockRoleManagerWithError) Create( - _ context.Context, role DatabaseRole, -) error { - m.callHistory = append(m.callHistory, funcCall{"create", role.Name}) - _, found := m.roles[role.Name] - if found { - return fmt.Errorf("tring to create existing role: %s", role.Name) - } - m.roles[role.Name] = role - return nil +func (f *fakeInstanceData) GetSuperUserDB() (*sql.DB, error) { + return f.db, nil } -func (m *mockRoleManagerWithError) Delete( - _ context.Context, role DatabaseRole, -) error { - m.callHistory = append(m.callHistory, funcCall{"delete", role.Name}) - _, found := m.roles[role.Name] - if !found { - return fmt.Errorf("tring to delete unknown role: %s", role.Name) - } - return fmt.Errorf("could not delete role 'foo': %w", - &pgconn.PgError{ - Code: "2BP01", Detail: "owner of database edbDatabase", - Message: `role "dante" cannot be dropped because some objects depend on it`, +var _ = Describe("Role synchronizer tests", func() { + var ( + db *sql.DB + mock sqlmock.Sqlmock + err error + roleSynchronizer RoleSynchronizer + ) + + BeforeEach(func() { + db, mock, err = sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) + Expect(err).ToNot(HaveOccurred()) + DeferCleanup(func() { + Expect(mock.ExpectationsWereMet()).To(Succeed()) }) -} - -func (m *mockRoleManagerWithError) GetLastTransactionID(_ context.Context, _ DatabaseRole) (int64, error) { - return 0, nil -} - -func (m *mockRoleManagerWithError) UpdateMembership( - _ context.Context, - role DatabaseRole, - _ []string, - _ []string, -) error { - m.callHistory = append(m.callHistory, funcCall{"updateMembership", role.Name}) - _, found := m.roles[role.Name] - if !found { - return fmt.Errorf("trying to update Role Members of unknown role: %s", role.Name) - } - m.roles[role.Name] = role - return &pgconn.PgError{Code: "42704", Message: "unknown role 'blah'"} -} - -func (m *mockRoleManagerWithError) GetParentRoles(_ context.Context, role DatabaseRole) ([]string, error) { - m.callHistory = append(m.callHistory, funcCall{"getParentRoles", role.Name}) - _, found := m.roles[role.Name] - if !found { - return nil, fmt.Errorf("trying to get parent of unknown role: %s", role.Name) - } - m.roles[role.Name] = role - return nil, nil -} -var _ = Describe("Role synchronizer tests", func() { - roleSynchronizer := RoleSynchronizer{ - instance: postgres.NewInstance().WithNamespace("myPod"), - } + testDate := time.Date(2023, 4, 4, 0, 0, 0, 0, time.UTC) + + rowsInMockDatabase := sqlmock.NewRows([]string{ + "rolname", "rolsuper", "rolinherit", "rolcreaterole", "rolcreatedb", + "rolcanlogin", "rolreplication", "rolconnlimit", "rolpassword", "rolvaliduntil", "rolbypassrls", "comment", + "xmin", "inroles", + }). + AddRow("postgres", true, false, true, true, true, false, -1, []byte("12345"), + nil, false, []byte("This is postgres user"), 11, []byte("{}")). + AddRow("streaming_replica", false, false, true, true, false, true, 10, []byte("54321"), + pgtype.Timestamp{ + Valid: true, + Time: testDate, + InfinityModifier: pgtype.Finite, + }, false, []byte("This is streaming_replica user"), 22, []byte(`{"role1","role2"}`)). + AddRow("role_to_ignore", true, false, true, true, true, false, -1, []byte("12345"), + nil, false, []byte("This is a custom role in the DB"), 11, []byte("{}")). + AddRow("role_to_test1", true, true, false, false, false, false, -1, []byte("12345"), + nil, false, []byte("This is a role to test with"), 11, []byte("{}")). + AddRow("role_to_test2", true, true, false, false, false, false, -1, []byte("12345"), + nil, false, []byte("This is a role to test with"), 11, []byte("{inrole}")) + mock.ExpectQuery(expectedSelStmt).WillReturnRows(rowsInMockDatabase) + + roleSynchronizer = RoleSynchronizer{ + instance: &fakeInstanceData{ + Instance: postgres.NewInstance().WithNamespace("default"), + db: db, + }, + } + }) When("role configurations are realizable", func() { It("it will Create ensure:present roles in spec missing from DB", func(ctx context.Context) { + mock.ExpectExec("CREATE ROLE \"foo_bar\" NOBYPASSRLS NOCREATEDB NOCREATEROLE INHERIT " + + "NOLOGIN NOREPLICATION NOSUPERUSER CONNECTION LIMIT 0"). + WillReturnResult(sqlmock.NewResult(11, 1)) managedConf := apiv1.ManagedConfiguration{ Roles: []apiv1.RoleConfiguration{ - { - Name: "edb_test", - Ensure: apiv1.EnsurePresent, - }, { Name: "foo_bar", Ensure: apiv1.EnsurePresent, }, }, } - rm := mockRoleManager{ - roles: map[string]DatabaseRole{ - "postgres": { - Name: "postgres", - Superuser: true, - }, - }, - } - _, _, err := roleSynchronizer.synchronizeRoles(ctx, &rm, &managedConf, map[string]apiv1.PasswordState{}) + rows := mock.NewRows([]string{"xmin"}).AddRow("12") + lastTransactionQuery := "SELECT xmin FROM pg_catalog.pg_authid WHERE rolname = $1" + mock.ExpectQuery(lastTransactionQuery).WithArgs("foo_bar").WillReturnRows(rows) + passwordState, rolesWithErrors, err := roleSynchronizer.synchronizeRoles(ctx, db, &managedConf, + map[string]apiv1.PasswordState{}) Expect(err).ShouldNot(HaveOccurred()) - Expect(rm.callHistory).To(ConsistOf( - []funcCall{ - {"list", ""}, - {"create", "edb_test"}, - {"create", "foo_bar"}, + Expect(rolesWithErrors).To(BeEmpty()) + Expect(passwordState).To(BeEquivalentTo(map[string]apiv1.PasswordState{ + "foo_bar": { + TransactionID: 12, + SecretResourceVersion: "", }, - )) - Expect(rm.callHistory).To(ConsistOf( - funcCall{"list", ""}, - funcCall{"create", "edb_test"}, - funcCall{"create", "foo_bar"}, - )) + })) }) It("it will ignore ensure:absent roles in spec missing from DB", func(ctx context.Context) { @@ -279,324 +131,255 @@ var _ = Describe("Role synchronizer tests", func() { }, }, } - rm := mockRoleManager{ - roles: map[string]DatabaseRole{ - "postgres": { - Name: "postgres", - Superuser: true, - }, - }, - } - _, _, err := roleSynchronizer.synchronizeRoles(ctx, &rm, &managedConf, map[string]apiv1.PasswordState{}) + _, _, err := roleSynchronizer.synchronizeRoles(ctx, db, &managedConf, map[string]apiv1.PasswordState{}) Expect(err).ShouldNot(HaveOccurred()) - Expect(rm.callHistory).To(ConsistOf(funcCall{"list", ""})) }) - It("it will ignore DB roles that are not in spec", func(ctx context.Context) { + It("it will call the necessary grants to update membership", func(ctx context.Context) { managedConf := apiv1.ManagedConfiguration{ Roles: []apiv1.RoleConfiguration{ { - Name: "edb_test", - Ensure: apiv1.EnsureAbsent, - }, - }, - } - rm := mockRoleManager{ - roles: map[string]DatabaseRole{ - "postgres": { - Name: "postgres", - Superuser: true, - }, - "ignorezMoi": { - Name: "ignorezMoi", + Name: "role_to_test1", Superuser: true, + Inherit: ptr.To(true), + InRoles: []string{ + "role1", + "role2", + }, + Comment: "This is a role to test with", + ConnectionLimit: -1, }, }, } - _, _, err := roleSynchronizer.synchronizeRoles(ctx, &rm, &managedConf, map[string]apiv1.PasswordState{}) + noParents := sqlmock.NewRows([]string{"inroles"}).AddRow([]byte(`{}`)) + mock.ExpectQuery(expectedMembershipStmt).WithArgs("role_to_test1").WillReturnRows(noParents) + mock.ExpectBegin() + expectedMembershipExecs := []string{ + `GRANT "role1" TO "role_to_test1"`, + `GRANT "role2" TO "role_to_test1"`, + } + + for _, ex := range expectedMembershipExecs { + mock.ExpectExec(ex). + WillReturnResult(sqlmock.NewResult(2, 3)) + } + + mock.ExpectCommit() + + _, rolesWithErrors, err := roleSynchronizer.synchronizeRoles(ctx, db, &managedConf, map[string]apiv1.PasswordState{ + "role_to_test1": { + TransactionID: 11, // defined in the mock query to the DB above + }, + }) Expect(err).ShouldNot(HaveOccurred()) - Expect(rm.callHistory).To(ConsistOf(funcCall{"list", ""})) + Expect(rolesWithErrors).To(BeEmpty()) }) - It("it will call the updateMembership method", func(ctx context.Context) { - trueValue := true + It("it will call the necessary revokes to update membership", func(ctx context.Context) { managedConf := apiv1.ManagedConfiguration{ Roles: []apiv1.RoleConfiguration{ { - Name: "edb_test", - Superuser: true, - Inherit: &trueValue, - InRoles: []string{ - "role1", - "role2", - }, + Name: "role_to_test2", + Superuser: true, + Inherit: ptr.To(true), + InRoles: []string{}, + Comment: "This is a role to test with", + ConnectionLimit: -1, }, }, } - rm := mockRoleManager{ - roles: map[string]DatabaseRole{ - "edb_test": { - Name: "edb_test", - Superuser: true, - Inherit: true, - }, + rows := sqlmock.NewRows([]string{ + "inroles", + }). + AddRow([]byte(`{"foo"}`)) + mock.ExpectQuery(expectedMembershipStmt).WithArgs("role_to_test2").WillReturnRows(rows) + mock.ExpectBegin() + + mock.ExpectExec(`REVOKE "foo" FROM "role_to_test2"`). + WillReturnResult(sqlmock.NewResult(2, 3)) + + mock.ExpectCommit() + + _, rolesWithErrors, err := roleSynchronizer.synchronizeRoles(ctx, db, &managedConf, map[string]apiv1.PasswordState{ + "role_to_test2": { + TransactionID: 11, // defined in the mock query to the DB above }, - } - _, _, err := roleSynchronizer.synchronizeRoles(ctx, &rm, &managedConf, map[string]apiv1.PasswordState{}) + }) Expect(err).ShouldNot(HaveOccurred()) - Expect(rm.callHistory).To(ConsistOf(funcCall{"list", ""}, - funcCall{"getParentRoles", "edb_test"}, - funcCall{"updateMembership", "edb_test"})) + Expect(rolesWithErrors).To(BeEmpty()) }) It("it will call the updateComment method", func(ctx context.Context) { - trueValue := true managedConf := apiv1.ManagedConfiguration{ Roles: []apiv1.RoleConfiguration{ { - Name: "edb_test", - Superuser: true, - Inherit: &trueValue, - Comment: "my comment", - }, - }, - } - rm := mockRoleManager{ - roles: map[string]DatabaseRole{ - "edb_test": { - Name: "edb_test", - Superuser: true, - Inherit: true, - Comment: "my tailor is rich", + Name: "role_to_test1", + Superuser: true, + Inherit: ptr.To(true), + Comment: "my comment", + ConnectionLimit: -1, }, }, } - _, _, err := roleSynchronizer.synchronizeRoles(ctx, &rm, &managedConf, map[string]apiv1.PasswordState{}) + wantedRoleCommentStmt := fmt.Sprintf( + wantedRoleCommentTpl, + managedConf.Roles[0].Name, pq.QuoteLiteral(managedConf.Roles[0].Comment)) + mock.ExpectExec(wantedRoleCommentStmt).WillReturnResult(sqlmock.NewResult(2, 3)) + _, _, err := roleSynchronizer.synchronizeRoles(ctx, db, &managedConf, map[string]apiv1.PasswordState{ + "role_to_test1": { + TransactionID: 11, // defined in the mock query to the DB above + }, + }) Expect(err).ShouldNot(HaveOccurred()) - Expect(rm.callHistory).To(ConsistOf(funcCall{"list", ""}, - funcCall{"updateComment", "edb_test"})) }) It("it will no-op if the roles are reconciled", func(ctx context.Context) { - trueValue := true managedConf := apiv1.ManagedConfiguration{ Roles: []apiv1.RoleConfiguration{ { - Name: "edb_test", - Superuser: true, - Inherit: &trueValue, + Name: "role_to_test1", + Superuser: true, + Inherit: ptr.To(true), + Comment: "This is a role to test with", + ConnectionLimit: -1, }, }, } - rm := mockRoleManager{ - roles: map[string]DatabaseRole{ - "edb_test": { - Name: "edb_test", - Superuser: true, - Inherit: true, - }, + _, _, err := roleSynchronizer.synchronizeRoles(ctx, db, &managedConf, map[string]apiv1.PasswordState{ + "role_to_test1": { + TransactionID: 11, // defined in the mock query to the DB above }, - } - _, _, err := roleSynchronizer.synchronizeRoles(ctx, &rm, &managedConf, map[string]apiv1.PasswordState{}) + }) Expect(err).ShouldNot(HaveOccurred()) - Expect(rm.callHistory).To(ConsistOf( - funcCall{"list", ""})) }) It("it will Delete ensure:absent roles that are in the DB", func(ctx context.Context) { managedConf := apiv1.ManagedConfiguration{ Roles: []apiv1.RoleConfiguration{ { - Name: "edb_test", + Name: "role_to_test1", Ensure: apiv1.EnsureAbsent, }, }, } - rm := mockRoleManager{ - roles: map[string]DatabaseRole{ - "postgres": { - Name: "postgres", - Superuser: true, - }, - "edb_test": { - Name: "edb_test", - Superuser: true, - }, + roleDeletionStmt := fmt.Sprintf("DROP ROLE \"%s\"", "role_to_test1") + mock.ExpectExec(roleDeletionStmt).WillReturnResult(sqlmock.NewResult(2, 3)) + _, _, err := roleSynchronizer.synchronizeRoles(ctx, db, &managedConf, map[string]apiv1.PasswordState{ + "role_to_test1": { + TransactionID: 11, // defined in the mock query to the DB above }, - } - _, _, err := roleSynchronizer.synchronizeRoles(ctx, &rm, &managedConf, map[string]apiv1.PasswordState{}) + }) Expect(err).ShouldNot(HaveOccurred()) - Expect(rm.callHistory).To(ConsistOf( - funcCall{"list", ""}, - funcCall{"delete", "edb_test"}, - )) }) It("it will Update ensure:present roles that are in the DB but have different fields", func(ctx context.Context) { managedConf := apiv1.ManagedConfiguration{ Roles: []apiv1.RoleConfiguration{ { - Name: "edb_test", - Ensure: apiv1.EnsurePresent, - CreateDB: true, - BypassRLS: true, + Name: "role_to_test1", + Superuser: false, + Inherit: ptr.To(false), + Comment: "This is a role to test with", + BypassRLS: true, + CreateRole: true, + Login: true, + ConnectionLimit: 2, }, }, } - rm := mockRoleManager{ - roles: map[string]DatabaseRole{ - "postgres": { - Name: "postgres", - Superuser: true, - }, - "edb_test": { - Name: "edb_test", - Superuser: true, + alterStmt := fmt.Sprintf( + "ALTER ROLE \"%s\" BYPASSRLS NOCREATEDB CREATEROLE NOINHERIT LOGIN NOREPLICATION NOSUPERUSER CONNECTION LIMIT 2 ", + "role_to_test1") + mock.ExpectExec(alterStmt).WillReturnResult(sqlmock.NewResult(2, 3)) + rows := mock.NewRows([]string{"xmin"}).AddRow("12") + lastTransactionQuery := "SELECT xmin FROM pg_catalog.pg_authid WHERE rolname = $1" + mock.ExpectQuery(lastTransactionQuery).WithArgs("role_to_test1").WillReturnRows(rows) + passwordState, rolesWithErrors, err := roleSynchronizer.synchronizeRoles(ctx, db, &managedConf, + map[string]apiv1.PasswordState{ + "role_to_test1": { + TransactionID: 11, // defined in the mock query to the DB above }, - }, - } - _, _, err := roleSynchronizer.synchronizeRoles(ctx, &rm, &managedConf, map[string]apiv1.PasswordState{}) + }) Expect(err).ShouldNot(HaveOccurred()) - Expect(rm.callHistory).To(ConsistOf( - funcCall{"list", ""}, - funcCall{"update", "edb_test"}, - )) + Expect(rolesWithErrors).To(BeEmpty()) + Expect(passwordState).To(BeEquivalentTo(map[string]apiv1.PasswordState{ + "role_to_test1": { + TransactionID: 12, + SecretResourceVersion: "", + }, + })) }) }) When("role configurations are unrealizable", func() { - It("it will record that updateMembership could not succeed", func(ctx context.Context) { - trueValue := true + It("it will carry on and capture postgres errors per role", func(ctx context.Context) { managedConf := apiv1.ManagedConfiguration{ Roles: []apiv1.RoleConfiguration{ { - Name: "edb_test", + Name: "role_to_test1", Superuser: true, - Inherit: &trueValue, + Inherit: ptr.To(true), InRoles: []string{ "role1", "role2", }, + Comment: "This is a role to test with", + ConnectionLimit: -1, }, - }, - } - rm := mockRoleManagerWithError{ - roles: map[string]DatabaseRole{ - "edb_test": { - Name: "edb_test", - Superuser: true, - Inherit: true, - }, - }, - } - _, unrealizable, err := roleSynchronizer.synchronizeRoles(ctx, &rm, &managedConf, map[string]apiv1.PasswordState{}) - Expect(err).ShouldNot(HaveOccurred()) - Expect(rm.callHistory).To(ConsistOf(funcCall{"list", ""}, - funcCall{"getParentRoles", "edb_test"}, - funcCall{"updateMembership", "edb_test"})) - Expect(unrealizable).To(HaveLen(1)) - Expect(unrealizable["edb_test"]).To(HaveLen(1)) - Expect(unrealizable["edb_test"][0]).To(BeEquivalentTo( - "could not perform UPDATE_MEMBERSHIPS on role edb_test: unknown role 'blah'")) - }) - - It("it will record that Delete could not succeed", func(ctx context.Context) { - managedConf := apiv1.ManagedConfiguration{ - Roles: []apiv1.RoleConfiguration{ { - Name: "edb_test", + Name: "role_to_test2", Ensure: apiv1.EnsureAbsent, }, }, } - rm := mockRoleManagerWithError{ - roles: map[string]DatabaseRole{ - "postgres": { - Name: "postgres", - Superuser: true, - }, - "edb_test": { - Name: "edb_test", - Superuser: true, - }, - }, + + noParents := sqlmock.NewRows([]string{"inroles"}).AddRow([]byte(`{}`)) + mock.ExpectQuery(expectedMembershipStmt).WithArgs("role_to_test1").WillReturnRows(noParents) + mock.ExpectBegin() + + mock.ExpectExec(`GRANT "role1" TO "role_to_test1"`). + WillReturnResult(sqlmock.NewResult(2, 3)) + + impossibleGrantError := pgconn.PgError{ + Code: "0LP01", // 0LP01 -> invalid_grant_operation + Message: "unknown role 'role2'", } - _, unrealizable, err := roleSynchronizer.synchronizeRoles(ctx, &rm, &managedConf, map[string]apiv1.PasswordState{}) - Expect(err).ShouldNot(HaveOccurred()) - Expect(rm.callHistory).To(ConsistOf( - funcCall{"list", ""}, - funcCall{"delete", "edb_test"}, - )) - Expect(unrealizable).To(HaveLen(1)) - Expect(unrealizable["edb_test"]).To(HaveLen(1)) - Expect(unrealizable["edb_test"][0]).To(BeEquivalentTo( - "could not perform DELETE on role edb_test: owner of database edbDatabase")) - }) + mock.ExpectExec(`GRANT "role2" TO "role_to_test1"`). + WillReturnError(&impossibleGrantError) - It("it will continue the synchronization even if it finds errors", func(ctx context.Context) { - trueValue := true - managedConf := apiv1.ManagedConfiguration{ - Roles: []apiv1.RoleConfiguration{ - { - Name: "edb_test", - Ensure: apiv1.EnsureAbsent, - }, - { - Name: "another_test", - Ensure: apiv1.EnsurePresent, - Superuser: true, - Inherit: &trueValue, - InRoles: []string{ - "role1", - "role2", - }, - }, - }, + mock.ExpectRollback() + + impossibleDeleteError := pgconn.PgError{ + Code: "2BP01", // 2BP01 -> dependent_objects_still_exist + Detail: "owner of database edbDatabase", } - rm := mockRoleManagerWithError{ - roles: map[string]DatabaseRole{ - "postgres": { - Name: "postgres", - Superuser: true, - }, - "edb_test": { - Name: "edb_test", - Superuser: true, - }, - "another_test": { - Name: "another_test", - Superuser: true, - Inherit: true, - }, + + roleDeletionStmt := fmt.Sprintf("DROP ROLE \"%s\"", "role_to_test2") + mock.ExpectExec(roleDeletionStmt).WillReturnError(&impossibleDeleteError) + + _, unrealizable, err := roleSynchronizer.synchronizeRoles(ctx, db, &managedConf, map[string]apiv1.PasswordState{ + "role_to_test1": { + TransactionID: 11, // defined in the mock query to the DB above }, - } - _, unrealizable, err := roleSynchronizer.synchronizeRoles(ctx, &rm, &managedConf, map[string]apiv1.PasswordState{}) + }) + Expect(err).ShouldNot(HaveOccurred()) - Expect(rm.callHistory).To(ConsistOf( - funcCall{"list", ""}, - funcCall{"delete", "edb_test"}, - funcCall{"getParentRoles", "another_test"}, - funcCall{"updateMembership", "another_test"}, - )) Expect(unrealizable).To(HaveLen(2)) - Expect(unrealizable["edb_test"]).To(HaveLen(1)) - Expect(unrealizable["edb_test"][0]).To(BeEquivalentTo( - "could not perform DELETE on role edb_test: owner of database edbDatabase")) - Expect(unrealizable["another_test"]).To(HaveLen(1)) - Expect(unrealizable["another_test"][0]).To(BeEquivalentTo( - "could not perform UPDATE_MEMBERSHIPS on role another_test: unknown role 'blah'")) + Expect(unrealizable["role_to_test1"]).To(HaveLen(1)) + Expect(unrealizable["role_to_test1"][0]).To(BeEquivalentTo( + "could not perform UPDATE_MEMBERSHIPS on role role_to_test1: unknown role 'role2'")) + Expect(unrealizable["role_to_test2"]).To(HaveLen(1)) + Expect(unrealizable["role_to_test2"][0]).To(BeEquivalentTo( + "could not perform DELETE on role role_to_test2: owner of database edbDatabase")) }) }) }) -var _ = DescribeTable("Role status getter tests", - func(spec *apiv1.ManagedConfiguration, db mockRoleManager, expected map[string]apiv1.RoleStatus) { +var _ = DescribeTable("Role status tests", + func(spec *apiv1.ManagedConfiguration, roles []DatabaseRole, expected map[string]apiv1.RoleStatus) { ctx := context.TODO() - roles, err := db.List(ctx) - Expect(err).ToNot(HaveOccurred()) - statusMap := evaluateNextRoleActions(ctx, spec, roles, map[string]apiv1.PasswordState{ "roleWithChangedPassInSpec": { TransactionID: 101, @@ -637,17 +420,15 @@ var _ = DescribeTable("Role status getter tests", }, }, }, - mockRoleManager{ - roles: map[string]DatabaseRole{ - "postgres": { - Name: "postgres", - Superuser: true, - }, - "ensurePresent": { - Name: "ensurePresent", - Superuser: true, - Inherit: true, - }, + []DatabaseRole{ + { + Name: "postgres", + Superuser: true, + }, + { + Name: "ensurePresent", + Superuser: true, + Inherit: true, }, }, map[string]apiv1.RoleStatus{ @@ -676,20 +457,18 @@ var _ = DescribeTable("Role status getter tests", }, }, }, - mockRoleManager{ - roles: map[string]DatabaseRole{ - "postgres": { - Name: "postgres", - Superuser: true, - }, - "unwantedInDB": { - Name: "unwantedInDB", - Superuser: true, - }, - "drifted": { - Name: "drifted", - Superuser: false, - }, + []DatabaseRole{ + { + Name: "postgres", + Superuser: true, + }, + { + Name: "unwantedInDB", + Superuser: true, + }, + { + Name: "drifted", + Superuser: false, }, }, map[string]apiv1.RoleStatus{ @@ -709,21 +488,19 @@ var _ = DescribeTable("Role status getter tests", }, }, }, - mockRoleManager{ - roles: map[string]DatabaseRole{ - "postgres": { - Name: "postgres", - Superuser: true, - }, - "edb_admin": { - Name: "edb_admin", - Superuser: true, - Inherit: true, - }, - "missingFromSpec": { - Name: "missingFromSpec", - Superuser: false, - }, + []DatabaseRole{ + { + Name: "postgres", + Superuser: true, + }, + { + Name: "edb_admin", + Superuser: true, + Inherit: true, + }, + { + Name: "missingFromSpec", + Superuser: false, }, }, map[string]apiv1.RoleStatus{ @@ -743,18 +520,16 @@ var _ = DescribeTable("Role status getter tests", }, }, }, - mockRoleManager{ - roles: map[string]DatabaseRole{ - "postgres": { - Name: "postgres", - Superuser: true, - }, - "roleWithChangedPassInDB": { - Name: "roleWithChangedPassInDB", - Superuser: true, - transactionID: 102, - Inherit: true, - }, + []DatabaseRole{ + { + Name: "postgres", + Superuser: true, + }, + { + Name: "roleWithChangedPassInDB", + Superuser: true, + transactionID: 102, + Inherit: true, }, }, map[string]apiv1.RoleStatus{ @@ -772,18 +547,16 @@ var _ = DescribeTable("Role status getter tests", }, }, }, - mockRoleManager{ - roles: map[string]DatabaseRole{ - "postgres": { - Name: "postgres", - Superuser: true, - }, - "roleWithChangedPassInSpec": { - Name: "roleWithChangedPassInSpec", - Superuser: true, - transactionID: 101, - Inherit: true, - }, + []DatabaseRole{ + { + Name: "postgres", + Superuser: true, + }, + { + Name: "roleWithChangedPassInSpec", + Superuser: true, + transactionID: 101, + Inherit: true, }, }, map[string]apiv1.RoleStatus{ diff --git a/internal/management/controller/roles/suite_test.go b/internal/management/controller/roles/suite_test.go index a6dd16bc1d..82061021e3 100644 --- a/internal/management/controller/roles/suite_test.go +++ b/internal/management/controller/roles/suite_test.go @@ -23,6 +23,29 @@ import ( . "github.com/onsi/gomega" ) +const ( + expectedSelStmt = `SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolconnlimit, rolpassword, rolvaliduntil, rolbypassrls, + pg_catalog.shobj_description(auth.oid, 'pg_authid') as comment, auth.xmin, + mem.inroles + FROM pg_catalog.pg_authid as auth + LEFT JOIN ( + SELECT array_agg(pg_get_userbyid(roleid)) as inroles, member + FROM pg_auth_members GROUP BY member + ) mem ON member = oid + WHERE rolname not like 'pg\_%'` + + expectedMembershipStmt = `SELECT mem.inroles + FROM pg_catalog.pg_authid as auth + LEFT JOIN ( + SELECT array_agg(pg_get_userbyid(roleid)) as inroles, member + FROM pg_auth_members GROUP BY member + ) mem ON member = oid + WHERE rolname = $1` + + wantedRoleCommentTpl = "COMMENT ON ROLE \"%s\" IS %s" +) + func TestReconciler(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Internal Management Controller Roles Reconciler Suite") From b4b47447b039d79fd4858cedb2bf60a5900ba9ab Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Thu, 7 Nov 2024 12:30:54 +0100 Subject: [PATCH 136/836] docs(kubectl-plugin): update formatting and version (#6026) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix the formatting of the architecture list and update the version to match the current one. Signed-off-by: Marco Nenciarini Signed-off-by: Niccolò Fei Co-authored-by: Niccolò Fei --- docs/src/kubectl-plugin.md | 220 +++++++++++++++++++------------------ hack/release.sh | 2 + 2 files changed, 113 insertions(+), 109 deletions(-) diff --git a/docs/src/kubectl-plugin.md b/docs/src/kubectl-plugin.md index 35c66f2494..793fd706ff 100755 --- a/docs/src/kubectl-plugin.md +++ b/docs/src/kubectl-plugin.md @@ -30,52 +30,53 @@ them in your systems. #### Debian packages -For example, let's install the 1.22.2 release of the plugin, for an Intel based +For example, let's install the 1.24.1 release of the plugin, for an Intel based 64 bit server. First, we download the right `.deb` file. -``` sh -$ wget https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.22.1/kubectl-cnpg_1.22.2_linux_x86_64.deb +```sh +wget https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.24.1/kubectl-cnpg_1.24.1_linux_x86_64.deb \ + --output-document kube-plugin.deb ``` -Then, install from the local file using `dpkg`: +Then, with superuser privileges, install from the local file using `dpkg`: -``` sh -$ dpkg -i kubectl-cnpg_1.22.2_linux_x86_64.deb -(Reading database ... 702524 files and directories currently installed.) -Preparing to unpack kubectl-cnpg_1.22.2_linux_x86_64.deb ... -Unpacking cnpg (1.22.2) over (1.22.2) ... -Setting up cnpg (1.22.2) .. +```console +$ sudo dpkg -i kube-plugin.deb +Selecting previously unselected package cnpg. +(Reading database ... 6688 files and directories currently installed.) +Preparing to unpack kube-plugin.deb ... +Unpacking cnpg (1.24.1) ... +Setting up cnpg (1.24.1) ... ``` #### RPM packages -As in the example for `.deb` packages, let's install the 1.22.2 release for an +As in the example for `.rpm` packages, let's install the 1.24.1 release for an Intel 64 bit machine. Note the `--output` flag to provide a file name. -``` sh -curl -L https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.22.2/kubectl-cnpg_1.22.2_linux_x86_64.rpm \ +```sh +curl -L https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.24.1/kubectl-cnpg_1.24.1_linux_x86_64.rpm \ --output kube-plugin.rpm ``` -Then install with `yum`, and you're ready to use: +Then, with superuser privileges, install with `yum`, and you're ready to use: -``` sh -$ yum --disablerepo=* localinstall kube-plugin.rpm -yum --disablerepo=* localinstall kube-plugin.rpm +```console +$ sudo yum --disablerepo=* localinstall kube-plugin.rpm Failed to set locale, defaulting to C.UTF-8 Dependencies resolved. ==================================================================================================== Package Architecture Version Repository Size ==================================================================================================== Installing: - cnpg x86_64 1.22.2-1 @commandline 17 M + cnpg x86_64 1.24.1-1 @commandline 20 M Transaction Summary ==================================================================================================== Install 1 Package -Total size: 14 M -Installed size: 43 M +Total size: 20 M +Installed size: 78 M Is this ok [y/N]: y ``` @@ -126,19 +127,19 @@ CloudNativePG Plugin is currently built for the following operating system and architectures: * Linux - * amd64 - * arm 5/6/7 - * arm64 - * s390x - * ppc64le + * amd64 + * arm 5/6/7 + * arm64 + * s390x + * ppc64le * macOS - * amd64 - * arm64 + * amd64 + * arm64 * Windows - * 386 - * amd64 - * arm 5/6/7 - * arm64 + * 386 + * amd64 + * arm 5/6/7 + * arm64 ### Configuring auto-completion @@ -146,7 +147,7 @@ To configure auto-completion for the plugin, a helper shell script needs to be installed into your current PATH. Assuming the latter contains `/usr/local/bin`, this can be done with the following commands: -```shell +```sh cat > kubectl_complete-cnpg < ``` @@ -185,7 +186,7 @@ installation namespace, namespaces to watch, and so on. For details and available options, run: -```shell +```sh kubectl cnpg install generate --help ``` @@ -206,7 +207,7 @@ The main options are: An example of the `generate` command, which will generate a YAML manifest that will install the operator, is as follows: -```shell +```sh kubectl cnpg install generate \ -n king \ --version 1.23 \ @@ -246,11 +247,11 @@ cluster, including: from the `Current LSN` field in the instances status as it is taken at two different time intervals. -```shell +```sh kubectl cnpg status sandbox ``` -```shell +```output Cluster Summary Name: default/sandbox System ID: 7423474350493388827 @@ -276,19 +277,19 @@ sandbox-3 0/604DE38 0/604DE38 0/604DE38 0/604DE38 00:00:00 00:00:00 00 Instances status Name Current LSN Replication role Status QoS Manager Version Node ---- ----------- ---------------- ------ --- --------------- ---- -sandbox-1 0/604DE38 Primary OK BestEffort 1.24.0 k8s-eu-worker -sandbox-2 0/604DE38 Standby (async) OK BestEffort 1.24.0 k8s-eu-worker2 -sandbox-3 0/604DE38 Standby (async) OK BestEffort 1.24.0 k8s-eu-worker +sandbox-1 0/604DE38 Primary OK BestEffort 1.24.1 k8s-eu-worker +sandbox-2 0/604DE38 Standby (async) OK BestEffort 1.24.1 k8s-eu-worker2 +sandbox-3 0/604DE38 Standby (async) OK BestEffort 1.24.1 k8s-eu-worker ``` If you require more detailed status information, use the `--verbose` option (or `-v` for short). The level of detail increases each time the flag is repeated: -```shell +```sh kubectl cnpg status sandbox --verbose ``` -```shell +```output Cluster Summary Name: default/sandbox System ID: 7423474350493388827 @@ -332,9 +333,9 @@ sandbox-primary primary 1 1 1 Instances status Name Current LSN Replication role Status QoS Manager Version Node ---- ----------- ---------------- ------ --- --------------- ---- -sandbox-1 0/6053720 Primary OK BestEffort 1.24.0 k8s-eu-worker -sandbox-2 0/6053720 Standby (async) OK BestEffort 1.24.0 k8s-eu-worker2 -sandbox-3 0/6053720 Standby (async) OK BestEffort 1.24.0 k8s-eu-worker +sandbox-1 0/6053720 Primary OK BestEffort 1.24.1 k8s-eu-worker +sandbox-2 0/6053720 Standby (async) OK BestEffort 1.24.1 k8s-eu-worker2 +sandbox-3 0/6053720 Standby (async) OK BestEffort 1.24.1 k8s-eu-worker ``` With an additional `-v` (e.g. `kubectl cnpg status sandbox -v -v`), you can @@ -347,13 +348,13 @@ The command also supports output in `yaml` and `json` format. The meaning of this command is to `promote` a pod in the cluster to primary, so you can start with maintenance work or test a switch-over situation in your cluster -```shell +```sh kubectl cnpg promote cluster-example cluster-example-2 ``` Or you can use the instance node number to promote -```shell +```sh kubectl cnpg promote cluster-example 2 ``` @@ -365,19 +366,19 @@ a TLS authentication certificate. To get a certificate, you need to provide a name for the secret to store the credentials, the cluster name, and a user for this certificate -```shell +```sh kubectl cnpg certificate cluster-cert --cnpg-cluster cluster-example --cnpg-user appuser ``` After the secret it's created, you can get it using `kubectl` -```shell +```sh kubectl get secret cluster-cert ``` And the content of the same in plain text using the following commands: -```shell +```sh kubectl get secret cluster-cert -o json | jq -r '.data | map(@base64d) | .[]' ``` @@ -394,7 +395,7 @@ The `kubectl cnpg restart` command can be used in two cases: the cluster's primary or deleting and recreating the pod if it is a replica. -```shell +```sh # this command will restart a whole cluster in a rollout fashion kubectl cnpg restart [clusterName] @@ -418,7 +419,7 @@ to cluster dependent objects, such as ConfigMaps containing custom monitoring qu The following command will reload all configurations for a given cluster: -```shell +```sh kubectl cnpg reload [cluster_name] ``` @@ -443,13 +444,13 @@ all the cluster in the list. If you want to set in maintenance all the PostgreSQL in your Kubernetes cluster, just need to write the following command: -```shell +```sh kubectl cnpg maintenance set --all-namespaces ``` And you'll have the list of all the cluster to update -```shell +```output The following are the new values for the clusters Namespace Cluster Name Maintenance reusePVC --------- ------------ ----------- -------- @@ -501,32 +502,32 @@ default time-stamped filename is created for the zip file. namespace as the clusters. E.g. the default installation namespace is cnpg-system -```shell +```sh kubectl cnpg report operator -n ``` results in -```shell +```output Successfully written report to "report_operator_.zip" (format: "yaml") ``` With the `-f` flag set: -```shell +```sh kubectl cnpg report operator -n -f reportRedacted.zip ``` Unzipping the file will produce a time-stamped top-level folder to keep the directory tidy: -```shell +```sh unzip reportRedacted.zip ``` will result in: -```shell +```output Archive: reportRedacted.zip creating: report_operator_/ creating: report_operator_/manifests/ @@ -542,7 +543,7 @@ Archive: reportRedacted.zip If you activated the `--logs` option, you'd see an extra subdirectory: -```shell +```output Archive: report_operator_.zip creating: report_operator_/operator-logs/ @@ -555,14 +556,14 @@ Archive: report_operator_.zip In all cases, it will also try to get the CURRENT operator logs. If current and previous logs are available, it will show them both. -``` json +```output ====== Begin of Previous Log ===== -2023-03-28T12:56:41.251711811Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.19.1","build":{"Version":"1.19.0+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} +2023-03-28T12:56:41.251711811Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.24.1","build":{"Version":"1.24.1+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} 2023-03-28T12:56:41.251851909Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"} ====== End of Previous Log ===== -2023-03-28T12:57:09.854306024Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.19.1","build":{"Version":"1.19.0+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} +2023-03-28T12:57:09.854306024Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.24.1","build":{"Version":"1.24.1+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} 2023-03-28T12:57:09.854363943Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"} ``` @@ -571,7 +572,7 @@ and `====== End …` guards, with no content inside. You can verify that the confidential information is REDACTED by default: -```shell +```sh cd report_operator_/manifests/ head cnpg-ca-secret.yaml ``` @@ -590,18 +591,18 @@ metadata: With the `-S` (`--stopRedaction`) option activated, secrets are shown: -```shell +```sh kubectl cnpg report operator -n -f reportNonRedacted.zip -S ``` You'll get a reminder that you're about to view confidential information: -```shell +```output WARNING: secret Redaction is OFF. Use it with caution Successfully written report to "reportNonRedacted.zip" (format: "yaml") ``` -```shell +```sh unzip reportNonRedacted.zip head cnpg-ca-secret.yaml ``` @@ -639,7 +640,7 @@ so the `-S` is disabled. Usage: -```shell +```sh kubectl cnpg report cluster [flags] ``` @@ -647,17 +648,17 @@ Note that, unlike the `operator` sub-command, for the `cluster` sub-command you need to provide the cluster name, and very likely the namespace, unless the cluster is in the default one. -```shell +```sh kubectl cnpg report cluster example -f report.zip -n example_namespace ``` and then: -```shell +```sh unzip report.zip ``` -```shell +```output Archive: report.zip creating: report_cluster_example_/ creating: report_cluster_example_/manifests/ @@ -669,21 +670,21 @@ Archive: report.zip Remember that you can use the `--logs` flag to add the pod and job logs to the ZIP. -```shell +```sh kubectl cnpg report cluster example -n example_namespace --logs ``` will result in: -```shell +```output Successfully written report to "report_cluster_example_.zip" (format: "yaml") ``` -```shell +```sh unzip report_cluster_.zip ``` -```shell +```output Archive: report_cluster_example_.zip creating: report_cluster_example_/ creating: report_cluster_example_/manifests/ @@ -718,7 +719,7 @@ the `-h` flag: `kubectl cnpg logs cluster -h` The `logs` command will display logs in JSON-lines format, unless the -`--timestamps` flag is used, in which case, a human readable timestamp will be +`--timestamps` flag is used, in which case, a human-readable timestamp will be prepended to each line. In this case, lines will no longer be valid JSON, and tools such as `jq` may not work as desired. @@ -741,7 +742,7 @@ The `--tail` flag can be used to specify how many log lines will be retrieved from each pod in the cluster. By default, the `logs cluster` sub-command will display all the logs from each pod in the cluster. If combined with the "follow" flag `-f`, the number of logs specified by `--tail` will be retrieved until the -current time, and and from then the new logs will be followed. +current time, and from then the new logs will be followed. NOTE: unlike other `cnpg` plugin commands, the `-f` is used to denote "follow" rather than specify a file. This keeps with the convention of `kubectl logs`, @@ -749,24 +750,24 @@ which takes `-f` to mean the logs should be followed. Usage: -```shell +```sh kubectl cnpg logs cluster [flags] ``` Using the `-f` option to follow: -```shell +```sh kubectl cnpg report cluster cluster-example -f ``` Using `--tail` option to display 3 lines from each pod and the `-f` option to follow: -```shell +```sh kubectl cnpg report cluster cluster-example -f --tail 3 ``` -``` json +```output {"level":"info","ts":"2023-06-30T13:37:33Z","logger":"postgres","msg":"2023-06-30 13:37:33.142 UTC [26] LOG: ending log output to stderr","source":"/controller/log/postgres","logging_pod":"cluster-example-3"} {"level":"info","ts":"2023-06-30T13:37:33Z","logger":"postgres","msg":"2023-06-30 13:37:33.142 UTC [26] HINT: Future log output will go to log destination \"csvlog\".","source":"/controller/log/postgres","logging_pod":"cluster-example-3"} … @@ -775,8 +776,8 @@ kubectl cnpg report cluster cluster-example -f --tail 3 With the `-o` option omitted, and with `--output` specified: -``` sh -kubectl cnpg logs cluster cluster-example --output my-cluster.log +```console +$ kubectl cnpg logs cluster cluster-example --output my-cluster.log Successfully written logs to "my-cluster.log" ``` @@ -789,7 +790,7 @@ into a human-readable output, and attempts to sort the entries by timestamp. It can be used in combination with `kubectl cnpg logs cluster`, as shown in the following example: -``` sh +```console $ kubectl cnpg logs cluster cluster-example | kubectl cnpg logs pretty 2024-10-15T17:35:00.336 INFO cluster-example-1 instance-manager Starting CloudNativePG Instance Manager 2024-10-15T17:35:00.336 INFO cluster-example-1 instance-manager Checking for free disk space for WALs before starting PostgreSQL @@ -802,7 +803,7 @@ Alternatively, it can be used in combination with other commands that produce CNPG logs in JSON format, such as `stern`, or `kubectl logs`, as in the following example: -``` sh +```console $ kubectl logs cluster-example-1 | kubectl cnpg logs pretty 2024-10-15T17:35:00.336 INFO cluster-example-1 instance-manager Starting CloudNativePG Instance Manager 2024-10-15T17:35:00.336 INFO cluster-example-1 instance-manager Checking for free disk space for WALs before starting PostgreSQL @@ -816,7 +817,7 @@ to display logs for specific pods or loggers, or to filter logs by severity level. Here's an example: -``` sh +```console $ kubectl cnpg logs cluster cluster-example | kubectl cnpg logs pretty --pods cluster-example-1 --loggers postgres --log-level info 2024-10-15T17:35:00.509 INFO cluster-example-1 postgres 2024-10-15 17:35:00.509 UTC [29] LOG: redirecting log output to logging collector process 2024-10-15T17:35:00.509 INFO cluster-example-1 postgres 2024-10-15 17:35:00.509 UTC [29] HINT: Future log output will appear in directory "/controller/log"... @@ -833,7 +834,7 @@ mode. The sub-command will add a group separator line, `---`, at the end of each sorted group. The size of the grouping can be configured via the `--sorting-group-size` flag (default: 1000), as illustrated in the following example: -``` sh +```console $ kubectl cnpg logs cluster cluster-example | kubectl cnpg logs pretty --sorting-group-size=3 2024-10-15T17:35:20.426 INFO cluster-example-2 instance-manager Starting CloudNativePG Instance Manager 2024-10-15T17:35:20.426 INFO cluster-example-2 instance-manager Checking for free disk space for WALs before starting PostgreSQL @@ -867,14 +868,14 @@ detached PVCs. Usage: -``` +```sh kubectl cnpg destroy [CLUSTER_NAME] [INSTANCE_ID] ``` The following example removes the `cluster-example-2` pod and the associated PVCs: -``` +```sh kubectl cnpg destroy cluster-example 2 ``` @@ -893,7 +894,7 @@ instance. You can hibernate a cluster with: -``` +```sh kubectl cnpg hibernate on ``` @@ -916,13 +917,13 @@ available status, including content from `pg_controldata`. In case of error the operator will not be able to revert the procedure. You can still force the operation with: -``` +```sh kubectl cnpg hibernate on cluster-example --force ``` A hibernated cluster can be resumed with: -``` +```sh kubectl cnpg hibernate off ``` @@ -930,7 +931,7 @@ Once the cluster has been hibernated, it's possible to show the last configuration and the status that PostgreSQL had after it was shut down. That can be done with: -``` +```sh kubectl cnpg hibernate status ``` @@ -939,7 +940,7 @@ kubectl cnpg hibernate status Pgbench can be run against an existing PostgreSQL cluster with following command: -``` +```sh kubectl cnpg pgbench -- --time 30 --client 1 --jobs 1 ``` @@ -950,7 +951,7 @@ details. fio can be run on an existing storage class with following command: -``` +```sh kubectl cnpg fio -n ``` @@ -963,20 +964,20 @@ an existing Postgres cluster by creating a new `Backup` resource. The following example requests an on-demand backup for a given cluster: -```shell +```sh kubectl cnpg backup [cluster_name] ``` or, if using volume snapshots: -```shell +```sh kubectl cnpg backup [cluster_name] -m volumeSnapshot ``` The created backup will be named after the request time: -```shell -kubectl cnpg backup cluster-example +```console +$ kubectl cnpg backup cluster-example backup/cluster-example-20230121002300 created ``` @@ -1002,8 +1003,8 @@ it from the actual pod. This means that you will be using the `postgres` user. As you will be connecting as `postgres` user, in production environments this method should be used with extreme care, by authorized personnel only. -```shell -kubectl cnpg psql cluster-example +```console +$ kubectl cnpg psql cluster-example psql (17.0 (Debian 17.0-1.pgdg110+1)) Type "help" for help. @@ -1014,8 +1015,9 @@ postgres=# By default, the command will connect to the primary instance. The user can select to work against a replica by using the `--replica` option: -```shell -kubectl cnpg psql --replica cluster-example +```console +$ kubectl cnpg psql --replica cluster-example + psql (17.0 (Debian 17.0-1.pgdg110+1)) Type "help" for help. @@ -1181,7 +1183,7 @@ to `source-cluster`. We can run: -``` sh +```sh kubectl cnpg publication create destination-cluster \ --external-cluster=source-cluster --all-tables ``` @@ -1191,7 +1193,7 @@ the SQL commands on the `destination-cluster`. Or instead, we can run: -``` sh +```sh kubectl cnpg publication create source-cluster \ --publication=app --all-tables ``` @@ -1276,7 +1278,7 @@ As in the section on publications, we have a `source-cluster` and a The following command: -``` sh +```sh kubectl cnpg subscription create destination-cluster \ --external-cluster=source-cluster \ --publication=app --subscription=app @@ -1350,7 +1352,7 @@ subscription, both called `app`, are already present. The following command will synchronize the sequences involved in the `app` subscription, from the source cluster into the destination cluster. -``` sh +```sh kubectl cnpg subscription sync-sequences destination-cluster \ --subscription=app ``` diff --git a/hack/release.sh b/hack/release.sh index 4b5802d83b..0c0fc0596b 100755 --- a/hack/release.sh +++ b/hack/release.sh @@ -106,6 +106,8 @@ sed -i -e "s@release-[0-9.]*/releases/cnpg-[0-9.]*.yaml@${branch}/releases/cnpg- -e "s@artifacts/release-[0-9.]*/@artifacts/${branch}/@g" \ docs/src/installation_upgrade.md +sed -i -e "s@1\.[0-9]\+\.[0-9]\+@${release_version}@g" docs/src/kubectl-plugin.md + CONFIG_TMP_DIR=$(mktemp -d) cp -r config/* "${CONFIG_TMP_DIR}" ( From 3cfb17d6df27657fe3dfbbbaf56a20142a5d25ef Mon Sep 17 00:00:00 2001 From: Jaime Silvela Date: Thu, 7 Nov 2024 12:32:45 +0100 Subject: [PATCH 137/836] refactor: simplify replication slots code, clarify tests (#6003) - eliminate the "Postgres manager" and use `sql.DB` as parameters instead. - rewrite the unit tests with sqlmock. Signed-off-by: Jaime Silvela Signed-off-by: Armando Ruocco Co-authored-by: Armando Ruocco --- .../controller/instance_controller.go | 7 +- .../slots/infrastructure/contract.go | 36 --- .../slots/infrastructure/postgresmanager.go | 54 +--- .../infrastructure/postgresmanager_test.go | 72 ++--- .../slots/infrastructure/suite_test.go | 22 -- .../slots/reconciler/replicationslot.go | 21 +- .../slots/reconciler/replicationslot_test.go | 232 ++++++++--------- .../controller/slots/runner/runner.go | 39 +-- .../controller/slots/runner/runner_test.go | 245 ++++++++---------- 9 files changed, 304 insertions(+), 424 deletions(-) delete mode 100644 internal/management/controller/slots/infrastructure/contract.go diff --git a/internal/management/controller/instance_controller.go b/internal/management/controller/instance_controller.go index 71f207bb17..9a48a4c849 100644 --- a/internal/management/controller/instance_controller.go +++ b/internal/management/controller/instance_controller.go @@ -44,7 +44,6 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/internal/controller" "github.com/cloudnative-pg/cloudnative-pg/internal/management/controller/roles" - "github.com/cloudnative-pg/cloudnative-pg/internal/management/controller/slots/infrastructure" "github.com/cloudnative-pg/cloudnative-pg/internal/management/controller/slots/reconciler" "github.com/cloudnative-pg/cloudnative-pg/internal/management/utils" "github.com/cloudnative-pg/cloudnative-pg/pkg/certs" @@ -238,10 +237,14 @@ func (r *InstanceReconciler) Reconcile( r.configureSlotReplicator(cluster) + postgresDB, err := r.instance.ConnectionPool().Connection("postgres") + if err != nil { + return reconcile.Result{}, fmt.Errorf("while getting the postgres connection: %w", err) + } if result, err := reconciler.ReconcileReplicationSlots( ctx, r.instance.GetPodName(), - infrastructure.NewPostgresManager(r.instance.ConnectionPool()), + postgresDB, cluster, ); err != nil || !result.IsZero() { return result, err diff --git a/internal/management/controller/slots/infrastructure/contract.go b/internal/management/controller/slots/infrastructure/contract.go deleted file mode 100644 index d0e3d0d992..0000000000 --- a/internal/management/controller/slots/infrastructure/contract.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package infrastructure - -import ( - "context" - - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" -) - -// Manager abstracts the operations that need to be sent to -// the database instance for the management of Replication Slots -type Manager interface { - // List the available replication slots - List(ctx context.Context, config *apiv1.ReplicationSlotsConfiguration) (ReplicationSlotList, error) - // Update the replication slot - Update(ctx context.Context, slot ReplicationSlot) error - // Create the replication slot - Create(ctx context.Context, slot ReplicationSlot) error - // Delete the replication slot - Delete(ctx context.Context, slot ReplicationSlot) error -} diff --git a/internal/management/controller/slots/infrastructure/postgresmanager.go b/internal/management/controller/slots/infrastructure/postgresmanager.go index 74360cf783..726a33986f 100644 --- a/internal/management/controller/slots/infrastructure/postgresmanager.go +++ b/internal/management/controller/slots/infrastructure/postgresmanager.go @@ -18,40 +18,16 @@ package infrastructure import ( "context" + "database/sql" "strings" "github.com/cloudnative-pg/machinery/pkg/log" v1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/pool" ) -// PostgresManager is a Manager for a database instance -type PostgresManager struct { - pool pool.Pooler -} - -// NewPostgresManager returns an implementation of Manager for postgres -func NewPostgresManager(pool pool.Pooler) Manager { - return PostgresManager{ - pool: pool, - } -} - -func (sm PostgresManager) String() string { - return sm.pool.GetDsn("postgres") -} - // List the available replication slots -func (sm PostgresManager) List( - ctx context.Context, - config *v1.ReplicationSlotsConfiguration, -) (ReplicationSlotList, error) { - db, err := sm.pool.Connection("postgres") - if err != nil { - return ReplicationSlotList{}, err - } - +func List(ctx context.Context, db *sql.DB, config *v1.ReplicationSlotsConfiguration) (ReplicationSlotList, error) { rows, err := db.QueryContext( ctx, `SELECT slot_name, slot_type, active, coalesce(restart_lsn::TEXT, '') AS restart_lsn, @@ -100,49 +76,35 @@ func (sm PostgresManager) List( } // Update the replication slot -func (sm PostgresManager) Update(ctx context.Context, slot ReplicationSlot) error { +func Update(ctx context.Context, db *sql.DB, slot ReplicationSlot) error { contextLog := log.FromContext(ctx).WithName("updateSlot") contextLog.Trace("Invoked", "slot", slot) if slot.RestartLSN == "" { return nil } - db, err := sm.pool.Connection("postgres") - if err != nil { - return err - } - _, err = db.ExecContext(ctx, "SELECT pg_replication_slot_advance($1, $2)", slot.SlotName, slot.RestartLSN) + _, err := db.ExecContext(ctx, "SELECT pg_replication_slot_advance($1, $2)", slot.SlotName, slot.RestartLSN) return err } // Create the replication slot -func (sm PostgresManager) Create(ctx context.Context, slot ReplicationSlot) error { +func Create(ctx context.Context, db *sql.DB, slot ReplicationSlot) error { contextLog := log.FromContext(ctx).WithName("createSlot") contextLog.Trace("Invoked", "slot", slot) - db, err := sm.pool.Connection("postgres") - if err != nil { - return err - } - - _, err = db.ExecContext(ctx, "SELECT pg_create_physical_replication_slot($1, $2)", + _, err := db.ExecContext(ctx, "SELECT pg_create_physical_replication_slot($1, $2)", slot.SlotName, slot.RestartLSN != "") return err } // Delete the replication slot -func (sm PostgresManager) Delete(ctx context.Context, slot ReplicationSlot) error { +func Delete(ctx context.Context, db *sql.DB, slot ReplicationSlot) error { contextLog := log.FromContext(ctx).WithName("dropSlot") contextLog.Trace("Invoked", "slot", slot) if slot.Active { return nil } - db, err := sm.pool.Connection("postgres") - if err != nil { - return err - } - - _, err = db.ExecContext(ctx, "SELECT pg_drop_replication_slot($1)", slot.SlotName) + _, err := db.ExecContext(ctx, "SELECT pg_drop_replication_slot($1)", slot.SlotName) return err } diff --git a/internal/management/controller/slots/infrastructure/postgresmanager_test.go b/internal/management/controller/slots/infrastructure/postgresmanager_test.go index 251832847c..5fdbf41718 100644 --- a/internal/management/controller/slots/infrastructure/postgresmanager_test.go +++ b/internal/management/controller/slots/infrastructure/postgresmanager_test.go @@ -17,7 +17,6 @@ limitations under the License. package infrastructure import ( - "context" "database/sql" "errors" @@ -31,17 +30,15 @@ import ( var _ = Describe("PostgresManager", func() { var ( - manager Manager - mock sqlmock.Sqlmock - db *sql.DB - slot ReplicationSlot + mock sqlmock.Sqlmock + db *sql.DB + slot ReplicationSlot ) BeforeEach(func() { var err error db, mock, err = sqlmock.New() Expect(err).NotTo(HaveOccurred()) - manager = NewPostgresManager(&mockPooler{db: db}) slot = ReplicationSlot{ SlotName: "slot1", Type: SlotTypePhysical, @@ -55,26 +52,29 @@ var _ = Describe("PostgresManager", func() { }) Context("Create", func() { - It("should successfully create a replication slot", func() { - mock.ExpectExec("SELECT pg_create_physical_replication_slot"). + const expectedSQL = "SELECT pg_create_physical_replication_slot" + It("should successfully create a replication slot", func(ctx SpecContext) { + mock.ExpectExec(expectedSQL). WithArgs(slot.SlotName, slot.RestartLSN != ""). WillReturnResult(sqlmock.NewResult(1, 1)) - err := manager.Create(context.Background(), slot) + err := Create(ctx, db, slot) Expect(err).NotTo(HaveOccurred()) }) - It("should return error when the database execution fails", func() { - mock.ExpectExec("SELECT pg_create_physical_replication_slot"). + It("should return error when the database execution fails", func(ctx SpecContext) { + mock.ExpectExec(expectedSQL). WithArgs(slot.SlotName, slot.RestartLSN != ""). WillReturnError(errors.New("mock error")) - err := manager.Create(context.Background(), slot) + err := Create(ctx, db, slot) Expect(err).To(HaveOccurred()) }) }) Context("List", func() { + const expectedSQL = "^SELECT (.+) FROM pg_replication_slots" + var config *v1.ReplicationSlotsConfiguration BeforeEach(func() { config = &v1.ReplicationSlotsConfiguration{ @@ -86,15 +86,15 @@ var _ = Describe("PostgresManager", func() { } }) - It("should successfully list replication slots", func() { + It("should successfully list replication slots", func(ctx SpecContext) { rows := sqlmock.NewRows([]string{"slot_name", "slot_type", "active", "restart_lsn", "holds_xmin"}). AddRow("_cnpg_slot1", string(SlotTypePhysical), true, "lsn1", false). AddRow("slot2", string(SlotTypePhysical), true, "lsn2", false) - mock.ExpectQuery("^SELECT (.+) FROM pg_replication_slots"). + mock.ExpectQuery(expectedSQL). WillReturnRows(rows) - result, err := manager.List(context.Background(), config) + result, err := List(ctx, db, config) Expect(err).NotTo(HaveOccurred()) Expect(result.Items).To(HaveLen(2)) Expect(result.Has("_cnpg_slot1")).To(BeTrue()) @@ -113,65 +113,69 @@ var _ = Describe("PostgresManager", func() { Expect(slot2.IsHA).To(BeFalse()) }) - It("should return error when database query fails", func() { - mock.ExpectQuery("^SELECT (.+) FROM pg_replication_slots"). + It("should return error when database query fails", func(ctx SpecContext) { + mock.ExpectQuery(expectedSQL). WillReturnError(errors.New("mock error")) - _, err := manager.List(context.Background(), config) + _, err := List(ctx, db, config) Expect(err).To(HaveOccurred()) }) }) Context("Update", func() { - It("should successfully update a replication slot", func() { - mock.ExpectExec("SELECT pg_replication_slot_advance"). + const expectedSQL = "SELECT pg_replication_slot_advance" + + It("should successfully update a replication slot", func(ctx SpecContext) { + mock.ExpectExec(expectedSQL). WithArgs(slot.SlotName, slot.RestartLSN). WillReturnResult(sqlmock.NewResult(1, 1)) - err := manager.Update(context.Background(), slot) + err := Update(ctx, db, slot) Expect(err).NotTo(HaveOccurred()) }) - It("should return error when the database execution fails", func() { - mock.ExpectExec("SELECT pg_replication_slot_advance"). + It("should return error when the database execution fails", func(ctx SpecContext) { + mock.ExpectExec(expectedSQL). WithArgs(slot.SlotName, slot.RestartLSN). WillReturnError(errors.New("mock error")) - err := manager.Update(context.Background(), slot) + err := Update(ctx, db, slot) Expect(err).To(HaveOccurred()) }) - It("should not update a replication slot when RestartLSN is empty", func() { + It("should not update a replication slot when RestartLSN is empty", func(ctx SpecContext) { slot.RestartLSN = "" - err := manager.Update(context.Background(), slot) + err := Update(ctx, db, slot) Expect(err).NotTo(HaveOccurred()) }) }) Context("Delete", func() { - It("should successfully delete a replication slot", func() { + const expectedSQL = "SELECT pg_drop_replication_slot" + + It("should successfully delete a replication slot", func(ctx SpecContext) { slot.Active = false - mock.ExpectExec("SELECT pg_drop_replication_slot").WithArgs(slot.SlotName). + mock.ExpectExec(expectedSQL).WithArgs(slot.SlotName). WillReturnResult(sqlmock.NewResult(1, 1)) - err := manager.Delete(context.Background(), slot) + err := Delete(ctx, db, slot) Expect(err).NotTo(HaveOccurred()) }) - It("should return error when the database execution fails", func() { + It("should return error when the database execution fails", func(ctx SpecContext) { slot.Active = false - mock.ExpectExec("SELECT pg_drop_replication_slot").WithArgs(slot.SlotName). + mock.ExpectExec(expectedSQL).WithArgs(slot.SlotName). WillReturnError(errors.New("mock error")) - err := manager.Delete(context.Background(), slot) + err := Delete(ctx, db, slot) Expect(err).To(HaveOccurred()) }) - It("should not delete an active replication slot", func() { + It("should not delete an active replication slot", func(ctx SpecContext) { slot.RestartLSN = "" - err := manager.Delete(context.Background(), slot) + err := Delete(ctx, db, slot) Expect(err).NotTo(HaveOccurred()) }) }) diff --git a/internal/management/controller/slots/infrastructure/suite_test.go b/internal/management/controller/slots/infrastructure/suite_test.go index ec8b6e54af..30bf0edf16 100644 --- a/internal/management/controller/slots/infrastructure/suite_test.go +++ b/internal/management/controller/slots/infrastructure/suite_test.go @@ -17,8 +17,6 @@ limitations under the License. package infrastructure import ( - "database/sql" - "errors" "testing" . "github.com/onsi/ginkgo/v2" @@ -29,23 +27,3 @@ func TestReconciler(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Internal Management Controller Slots Infrastructure Suite") } - -// mockPooler is a mock implementation of the Pooler interface -type mockPooler struct { - db *sql.DB -} - -func (mp *mockPooler) Connection(_ string) (*sql.DB, error) { - if mp.db == nil { - return nil, errors.New("connection error") - } - return mp.db, nil -} - -func (mp *mockPooler) GetDsn(_ string) string { - return "mocked DSN" -} - -func (mp *mockPooler) ShutdownConnections() { - // no-op in mock -} diff --git a/internal/management/controller/slots/reconciler/replicationslot.go b/internal/management/controller/slots/reconciler/replicationslot.go index 7871358414..6d7382330b 100644 --- a/internal/management/controller/slots/reconciler/replicationslot.go +++ b/internal/management/controller/slots/reconciler/replicationslot.go @@ -18,6 +18,7 @@ package reconciler import ( "context" + "database/sql" "fmt" "time" @@ -32,7 +33,7 @@ import ( func ReconcileReplicationSlots( ctx context.Context, instanceName string, - manager infrastructure.Manager, + db *sql.DB, cluster *apiv1.Cluster, ) (reconcile.Result, error) { if cluster.Spec.ReplicationSlots == nil || @@ -48,11 +49,11 @@ func ReconcileReplicationSlots( // we also clean up the slots that fall under the user defined replication slots feature here. // TODO: split-out user defined replication slots code if !cluster.Spec.ReplicationSlots.HighAvailability.GetEnabled() { - return dropReplicationSlots(ctx, manager, cluster, isPrimary) + return dropReplicationSlots(ctx, db, cluster, isPrimary) } if isPrimary { - return reconcilePrimaryHAReplicationSlots(ctx, manager, cluster) + return reconcilePrimaryHAReplicationSlots(ctx, db, cluster) } return reconcile.Result{}, nil @@ -61,13 +62,13 @@ func ReconcileReplicationSlots( // reconcilePrimaryHAReplicationSlots reconciles the HA replication slots of the primary instance func reconcilePrimaryHAReplicationSlots( ctx context.Context, - manager infrastructure.Manager, + db *sql.DB, cluster *apiv1.Cluster, ) (reconcile.Result, error) { contextLogger := log.FromContext(ctx) contextLogger.Debug("Updating primary HA replication slots") - currentSlots, err := manager.List(ctx, cluster.Spec.ReplicationSlots) + currentSlots, err := infrastructure.List(ctx, db, cluster.Spec.ReplicationSlots) if err != nil { return reconcile.Result{}, fmt.Errorf("reconciling primary replication slots: %w", err) } @@ -88,7 +89,7 @@ func reconcilePrimaryHAReplicationSlots( } // At this point, the cluster instance does not have a HA replication slot - if err := manager.Create(ctx, infrastructure.ReplicationSlot{SlotName: slotName}); err != nil { + if err := infrastructure.Create(ctx, db, infrastructure.ReplicationSlot{SlotName: slotName}); err != nil { return reconcile.Result{}, fmt.Errorf("creating primary HA replication slots: %w", err) } } @@ -115,7 +116,7 @@ func reconcilePrimaryHAReplicationSlots( } contextLogger.Trace("Attempt to delete replication slot", "slot", slot) - if err := manager.Delete(ctx, slot); err != nil { + if err := infrastructure.Delete(ctx, db, slot); err != nil { return reconcile.Result{}, fmt.Errorf("failure deleting replication slot %q: %w", slot.SlotName, err) } } @@ -133,7 +134,7 @@ func reconcilePrimaryHAReplicationSlots( // we also clean up the slots that fall under the user defined replication slots feature here. func dropReplicationSlots( ctx context.Context, - manager infrastructure.Manager, + db *sql.DB, cluster *apiv1.Cluster, isPrimary bool, ) (reconcile.Result, error) { @@ -144,7 +145,7 @@ func dropReplicationSlots( dropUserSlots := !cluster.Spec.ReplicationSlots.SynchronizeReplicas.GetEnabled() // we fetch all replication slots - slots, err := manager.List(ctx, cluster.Spec.ReplicationSlots) + slots, err := infrastructure.List(ctx, db, cluster.Spec.ReplicationSlots) if err != nil { return reconcile.Result{}, err } @@ -169,7 +170,7 @@ func dropReplicationSlots( } contextLogger.Trace("Attempt to delete replication slot", "slot", slot) - if err := manager.Delete(ctx, slot); err != nil { + if err := infrastructure.Delete(ctx, db, slot); err != nil { return reconcile.Result{}, fmt.Errorf("while disabling standby HA replication slots: %w", err) } } diff --git a/internal/management/controller/slots/reconciler/replicationslot_test.go b/internal/management/controller/slots/reconciler/replicationslot_test.go index 8e90f2d068..c124597df7 100644 --- a/internal/management/controller/slots/reconciler/replicationslot_test.go +++ b/internal/management/controller/slots/reconciler/replicationslot_test.go @@ -17,11 +17,12 @@ limitations under the License. package reconciler import ( - "context" + "database/sql" + "database/sql/driver" "errors" - "strings" "time" + "github.com/DATA-DOG/go-sqlmock" "k8s.io/utils/ptr" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" @@ -31,58 +32,9 @@ import ( . "github.com/onsi/gomega" ) -type fakeSlot struct { - name string - active bool - isHA bool -} - -type fakeReplicationSlotManager struct { - replicationSlots map[fakeSlot]bool - triggerListError bool - triggerDeleteError bool -} - const slotPrefix = "_cnpg_" -func (fk fakeReplicationSlotManager) Create(_ context.Context, slot infrastructure.ReplicationSlot) error { - isHA := strings.HasPrefix(slot.SlotName, slotPrefix) - fk.replicationSlots[fakeSlot{name: slot.SlotName, isHA: isHA}] = true - return nil -} - -func (fk fakeReplicationSlotManager) Delete(_ context.Context, slot infrastructure.ReplicationSlot) error { - if fk.triggerDeleteError { - return errors.New("triggered delete error") - } - delete(fk.replicationSlots, fakeSlot{name: slot.SlotName, active: slot.Active, isHA: slot.IsHA}) - return nil -} - -func (fk fakeReplicationSlotManager) Update(_ context.Context, _ infrastructure.ReplicationSlot) error { - return nil -} - -func (fk fakeReplicationSlotManager) List( - _ context.Context, - _ *apiv1.ReplicationSlotsConfiguration, -) (infrastructure.ReplicationSlotList, error) { - var slotList infrastructure.ReplicationSlotList - if fk.triggerListError { - return slotList, errors.New("triggered list error") - } - - for slot := range fk.replicationSlots { - slotList.Items = append(slotList.Items, infrastructure.ReplicationSlot{ - SlotName: slot.name, - RestartLSN: "", - Type: infrastructure.SlotTypePhysical, - Active: slot.active, - IsHA: slot.isHA, - }) - } - return slotList, nil -} +var repSlotColumns = []string{"slot_name", "slot_type", "active", "restart_lsn", "holds_xmin"} func makeClusterWithInstanceNames(instanceNames []string, primary string) apiv1.Cluster { return apiv1.Cluster{ @@ -102,134 +54,156 @@ func makeClusterWithInstanceNames(instanceNames []string, primary string) apiv1. } } +func newRepSlot(name string, active bool, restartLSN string) []driver.Value { + return []driver.Value{ + slotPrefix + name, string(infrastructure.SlotTypePhysical), active, restartLSN, false, + } +} + var _ = Describe("HA Replication Slots reconciliation in Primary", func() { + var ( + db *sql.DB + mock sqlmock.Sqlmock + ) + BeforeEach(func() { + var err error + db, mock, err = sqlmock.New() + Expect(err).NotTo(HaveOccurred()) + }) + AfterEach(func() { + Expect(mock.ExpectationsWereMet()).To(Succeed()) + }) It("can create a new replication slot for a new cluster instance", func(ctx SpecContext) { - fakeSlotManager := fakeReplicationSlotManager{ - replicationSlots: map[fakeSlot]bool{ - {name: slotPrefix + "instance1", isHA: true}: true, - {name: slotPrefix + "instance2", isHA: true}: true, - }, - } + rows := sqlmock.NewRows(repSlotColumns). + AddRow(newRepSlot("instance1", true, "lsn1")...). + AddRow(newRepSlot("instance2", true, "lsn2")...) - cluster := makeClusterWithInstanceNames([]string{"instance1", "instance2", "instance3"}, "instance1") + mock.ExpectQuery("^SELECT (.+) FROM pg_replication_slots"). + WillReturnRows(rows) - Expect(fakeSlotManager.replicationSlots).To(HaveLen(2)) - Expect(fakeSlotManager.replicationSlots[fakeSlot{name: "_cnpg_instance1", isHA: true}]).To(BeTrue()) - Expect(fakeSlotManager.replicationSlots[fakeSlot{name: "_cnpg_instance2", isHA: true}]).To(BeTrue()) + mock.ExpectExec("SELECT pg_create_physical_replication_slot"). + WithArgs(slotPrefix+"instance3", false). + WillReturnResult(sqlmock.NewResult(1, 1)) - _, err := ReconcileReplicationSlots(ctx, "instance1", fakeSlotManager, &cluster) + cluster := makeClusterWithInstanceNames([]string{"instance1", "instance2", "instance3"}, "instance1") + + _, err := ReconcileReplicationSlots(ctx, "instance1", db, &cluster) Expect(err).ShouldNot(HaveOccurred()) - Expect(fakeSlotManager.replicationSlots[fakeSlot{name: "_cnpg_instance1", isHA: true}]).To(BeFalse()) - Expect(fakeSlotManager.replicationSlots[fakeSlot{name: "_cnpg_instance3", isHA: true}]).To(BeTrue()) - Expect(fakeSlotManager.replicationSlots[fakeSlot{name: "_cnpg_instance2", isHA: true}]).To(BeTrue()) - Expect(fakeSlotManager.replicationSlots).To(HaveLen(2)) }) It("can delete an inactive HA replication slot that is not in the cluster", func(ctx SpecContext) { - fakeSlotManager := fakeReplicationSlotManager{ - replicationSlots: map[fakeSlot]bool{ - {name: slotPrefix + "instance1", isHA: true}: true, - {name: slotPrefix + "instance2", isHA: true}: true, - {name: slotPrefix + "instance3", isHA: true}: true, - }, - } + rows := sqlmock.NewRows(repSlotColumns). + AddRow(newRepSlot("instance1", true, "lsn1")...). + AddRow(newRepSlot("instance2", true, "lsn2")...). + AddRow(newRepSlot("instance3", false, "lsn2")...) - cluster := makeClusterWithInstanceNames([]string{"instance1", "instance2"}, "instance1") + mock.ExpectQuery("^SELECT (.+) FROM pg_replication_slots"). + WillReturnRows(rows) - Expect(fakeSlotManager.replicationSlots).To(HaveLen(3)) + mock.ExpectExec("SELECT pg_drop_replication_slot").WithArgs(slotPrefix + "instance3"). + WillReturnResult(sqlmock.NewResult(1, 1)) - _, err := ReconcileReplicationSlots(ctx, "instance1", fakeSlotManager, &cluster) + cluster := makeClusterWithInstanceNames([]string{"instance1", "instance2"}, "instance1") + + _, err := ReconcileReplicationSlots(ctx, "instance1", db, &cluster) Expect(err).ShouldNot(HaveOccurred()) - Expect(fakeSlotManager.replicationSlots[fakeSlot{name: "_cnpg_instance3", isHA: true}]).To(BeFalse()) - Expect(fakeSlotManager.replicationSlots).To(HaveLen(1)) }) It("will not delete an active HA replication slot that is not in the cluster", func(ctx SpecContext) { - fakeSlotManager := fakeReplicationSlotManager{ - replicationSlots: map[fakeSlot]bool{ - {name: slotPrefix + "instance1", isHA: true}: true, - {name: slotPrefix + "instance2", isHA: true}: true, - {name: slotPrefix + "instance3", isHA: true, active: true}: true, - }, - } + rows := sqlmock.NewRows(repSlotColumns). + AddRow(newRepSlot("instance1", true, "lsn1")...). + AddRow(newRepSlot("instance2", true, "lsn2")...). + AddRow(newRepSlot("instance3", true, "lsn2")...) - cluster := makeClusterWithInstanceNames([]string{"instance1", "instance2"}, "instance1") + mock.ExpectQuery("^SELECT (.+) FROM pg_replication_slots"). + WillReturnRows(rows) - Expect(fakeSlotManager.replicationSlots).To(HaveLen(3)) + cluster := makeClusterWithInstanceNames([]string{"instance1", "instance2"}, "instance1") - _, err := ReconcileReplicationSlots(ctx, "instance1", fakeSlotManager, &cluster) + _, err := ReconcileReplicationSlots(ctx, "instance1", db, &cluster) Expect(err).ShouldNot(HaveOccurred()) - Expect(fakeSlotManager.replicationSlots[fakeSlot{name: slotPrefix + "instance3", isHA: true, active: true}]). - To(BeTrue()) - Expect(fakeSlotManager.replicationSlots).To(HaveLen(2)) }) }) var _ = Describe("dropReplicationSlots", func() { - It("returns error when listing slots fails", func() { - fakeManager := &fakeReplicationSlotManager{ - replicationSlots: make(map[fakeSlot]bool), - triggerListError: true, - } + const selectPgRepSlot = "^SELECT (.+) FROM pg_replication_slots" + + var ( + db *sql.DB + mock sqlmock.Sqlmock + ) + BeforeEach(func() { + var err error + db, mock, err = sqlmock.New() + Expect(err).NotTo(HaveOccurred()) + }) + AfterEach(func() { + Expect(mock.ExpectationsWereMet()).To(Succeed()) + }) + + It("returns error when listing slots fails", func(ctx SpecContext) { cluster := makeClusterWithInstanceNames([]string{}, "") - _, err := dropReplicationSlots(context.Background(), fakeManager, &cluster, true) + mock.ExpectQuery(selectPgRepSlot).WillReturnError(errors.New("triggered list error")) + + _, err := dropReplicationSlots(ctx, db, &cluster, true) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("triggered list error")) }) - It("skips deletion of active HA slots and reschedules", func() { - fakeManager := &fakeReplicationSlotManager{ - replicationSlots: map[fakeSlot]bool{ - {name: "slot1", active: true, isHA: true}: true, - }, - } + It("skips deletion of active HA slots and reschedules", func(ctx SpecContext) { + rows := sqlmock.NewRows(repSlotColumns). + AddRow(newRepSlot("instance1", true, "lsn1")...) + mock.ExpectQuery(selectPgRepSlot).WillReturnRows(rows) + cluster := makeClusterWithInstanceNames([]string{}, "") - res, err := dropReplicationSlots(context.Background(), fakeManager, &cluster, true) + res, err := dropReplicationSlots(ctx, db, &cluster, true) Expect(err).NotTo(HaveOccurred()) Expect(res.RequeueAfter).To(Equal(time.Second)) }) - It("skips the deletion of user defined replication slots on the primary", func() { - fakeManager := &fakeReplicationSlotManager{ - replicationSlots: map[fakeSlot]bool{ - {name: "slot1", active: true}: true, - }, - } + It("skips the deletion of user defined replication slots on the primary", func(ctx SpecContext) { + rows := sqlmock.NewRows(repSlotColumns). + AddRow("custom-slot", string(infrastructure.SlotTypePhysical), true, "lsn1", false) + mock.ExpectQuery("^SELECT (.+) FROM pg_replication_slots"). + WillReturnRows(rows) + cluster := makeClusterWithInstanceNames([]string{}, "") - res, err := dropReplicationSlots(context.Background(), fakeManager, &cluster, true) + res, err := dropReplicationSlots(ctx, db, &cluster, true) Expect(err).NotTo(HaveOccurred()) Expect(res.RequeueAfter).To(Equal(time.Duration(0))) Expect(res.IsZero()).To(BeTrue()) }) - It("returns error when deleting a slot fails", func() { - fakeManager := &fakeReplicationSlotManager{ - replicationSlots: map[fakeSlot]bool{ - {name: "slot1", active: false, isHA: true}: true, - }, - triggerDeleteError: true, - } + It("returns error when deleting a slot fails", func(ctx SpecContext) { + rows := sqlmock.NewRows(repSlotColumns). + AddRow(newRepSlot("instance1", false, "lsn1")...) + mock.ExpectQuery(selectPgRepSlot).WillReturnRows(rows) + + mock.ExpectExec("SELECT pg_drop_replication_slot").WithArgs(slotPrefix + "instance1"). + WillReturnError(errors.New("delete error")) + cluster := makeClusterWithInstanceNames([]string{}, "") - _, err := dropReplicationSlots(context.Background(), fakeManager, &cluster, true) + _, err := dropReplicationSlots(ctx, db, &cluster, true) Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("triggered delete error")) + Expect(err.Error()).To(ContainSubstring("delete error")) }) - It("deletes inactive slots and does not reschedule", func() { - fakeManager := &fakeReplicationSlotManager{ - replicationSlots: map[fakeSlot]bool{ - {name: "slot1", active: false, isHA: true}: true, - }, - } + It("deletes inactive slots and does not reschedule", func(ctx SpecContext) { + rows := sqlmock.NewRows(repSlotColumns). + AddRow(newRepSlot("instance1", false, "lsn1")...) + mock.ExpectQuery(selectPgRepSlot).WillReturnRows(rows) + + mock.ExpectExec("SELECT pg_drop_replication_slot").WithArgs(slotPrefix + "instance1"). + WillReturnResult(sqlmock.NewResult(1, 1)) + cluster := makeClusterWithInstanceNames([]string{}, "") - res, err := dropReplicationSlots(context.Background(), fakeManager, &cluster, true) + res, err := dropReplicationSlots(ctx, db, &cluster, true) Expect(err).NotTo(HaveOccurred()) Expect(res.RequeueAfter).To(Equal(time.Duration(0))) - Expect(fakeManager.replicationSlots).NotTo(HaveKey(fakeSlot{name: "slot1", active: false})) }) }) diff --git a/internal/management/controller/slots/runner/runner.go b/internal/management/controller/slots/runner/runner.go index 221a5195e0..9b200c76af 100644 --- a/internal/management/controller/slots/runner/runner.go +++ b/internal/management/controller/slots/runner/runner.go @@ -18,6 +18,7 @@ package runner import ( "context" + "database/sql" "fmt" "time" @@ -108,10 +109,23 @@ func (sr *Replicator) reconcile(ctx context.Context, config *apiv1.ReplicationSl primaryPool := sr.instance.PrimaryConnectionPool() localPool := sr.instance.ConnectionPool() + primaryDB, err := primaryPool.Connection("postgres") + if err != nil { + return err + } + localDB, err := localPool.Connection("postgres") + if err != nil { + return err + } + contextLog.Trace("Invoked", + "primary", primaryPool.GetDsn("postgres"), + "local", localPool.GetDsn("postgres"), + "podName", sr.instance.GetPodName(), + "config", config) err = synchronizeReplicationSlots( ctx, - infrastructure.NewPostgresManager(primaryPool), - infrastructure.NewPostgresManager(localPool), + primaryDB, + localDB, sr.instance.GetPodName(), config, ) @@ -122,25 +136,20 @@ func (sr *Replicator) reconcile(ctx context.Context, config *apiv1.ReplicationSl // nolint: gocognit func synchronizeReplicationSlots( ctx context.Context, - primarySlotManager infrastructure.Manager, - localSlotManager infrastructure.Manager, + primaryDB *sql.DB, + localDB *sql.DB, podName string, config *apiv1.ReplicationSlotsConfiguration, ) error { contextLog := log.FromContext(ctx).WithName("synchronizeReplicationSlots") - contextLog.Trace("Invoked", - "primary", primarySlotManager, - "local", localSlotManager, - "podName", podName, - "config", config) - slotsInPrimary, err := primarySlotManager.List(ctx, config) + slotsInPrimary, err := infrastructure.List(ctx, primaryDB, config) if err != nil { return fmt.Errorf("getting replication slot status from primary: %v", err) } contextLog.Trace("primary slot status", "slotsInPrimary", slotsInPrimary) - slotsInLocal, err := localSlotManager.List(ctx, config) + slotsInLocal, err := infrastructure.List(ctx, localDB, config) if err != nil { return fmt.Errorf("getting replication slot status from local: %v", err) } @@ -167,12 +176,12 @@ func synchronizeReplicationSlots( } if !slotsInLocal.Has(slot.SlotName) { - err := localSlotManager.Create(ctx, slot) + err := infrastructure.Create(ctx, localDB, slot) if err != nil { return err } } - err := localSlotManager.Update(ctx, slot) + err := infrastructure.Update(ctx, localDB, slot) if err != nil { return err } @@ -184,14 +193,14 @@ func synchronizeReplicationSlots( // * slots holding xmin (this can happen on a former primary, and will prevent VACUUM from // removing tuples deleted by any later transaction.) if !slotsInPrimary.Has(slot.SlotName) || slot.SlotName == mySlotName || slot.HoldsXmin { - if err := localSlotManager.Delete(ctx, slot); err != nil { + if err := infrastructure.Delete(ctx, localDB, slot); err != nil { return err } } // when the user turns off the feature we should delete all the created replication slots that aren't from HA if !slot.IsHA && !config.SynchronizeReplicas.GetEnabled() { - if err := localSlotManager.Delete(ctx, slot); err != nil { + if err := infrastructure.Delete(ctx, localDB, slot); err != nil { return err } } diff --git a/internal/management/controller/slots/runner/runner_test.go b/internal/management/controller/slots/runner/runner_test.go index df73585c72..87ebe69350 100644 --- a/internal/management/controller/slots/runner/runner_test.go +++ b/internal/management/controller/slots/runner/runner_test.go @@ -17,9 +17,9 @@ limitations under the License. package runner import ( - "context" - "fmt" + "database/sql" + "github.com/DATA-DOG/go-sqlmock" "k8s.io/utils/ptr" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" @@ -29,153 +29,138 @@ import ( . "github.com/onsi/gomega" ) -type fakeSlot struct { - name string - restartLSN string - holdsXmin bool -} - -type fakeSlotManager struct { - slots map[string]fakeSlot - slotsUpdated int - slotsCreated int - slotsDeleted int -} - -func (sm *fakeSlotManager) List( - _ context.Context, - _ *apiv1.ReplicationSlotsConfiguration, -) (infrastructure.ReplicationSlotList, error) { - var slotList infrastructure.ReplicationSlotList - for _, slot := range sm.slots { - slotList.Items = append(slotList.Items, infrastructure.ReplicationSlot{ - SlotName: slot.name, - RestartLSN: slot.restartLSN, - Type: infrastructure.SlotTypePhysical, - Active: false, - HoldsXmin: slot.holdsXmin, - }) - } - return slotList, nil -} - -func (sm *fakeSlotManager) Update(_ context.Context, slot infrastructure.ReplicationSlot) error { - localSlot, found := sm.slots[slot.SlotName] - if !found { - return fmt.Errorf("while updating slot: Slot %s not found", slot.SlotName) - } - if localSlot.restartLSN != slot.RestartLSN { - sm.slots[slot.SlotName] = fakeSlot{name: slot.SlotName, restartLSN: slot.RestartLSN} - sm.slotsUpdated++ - } - return nil -} - -func (sm *fakeSlotManager) Create(_ context.Context, slot infrastructure.ReplicationSlot) error { - if _, found := sm.slots[slot.SlotName]; found { - return fmt.Errorf("while creating slot: Slot %s already exists", slot.SlotName) - } - sm.slots[slot.SlotName] = fakeSlot{name: slot.SlotName, restartLSN: slot.RestartLSN} - sm.slotsCreated++ - return nil -} - -func (sm *fakeSlotManager) Delete(_ context.Context, slot infrastructure.ReplicationSlot) error { - if _, found := sm.slots[slot.SlotName]; !found { - return fmt.Errorf("while deleting slot: Slot %s not found", slot.SlotName) - } - delete(sm.slots, slot.SlotName) - sm.slotsDeleted++ - return nil -} - var _ = Describe("Slot synchronization", Ordered, func() { - localPodName := "cluster-2" - localSlotName := "_cnpg_cluster_2" - slot3 := "cluster-3" - slot4 := "cluster-4" - - primary := &fakeSlotManager{ - slots: map[string]fakeSlot{ - localSlotName: {name: localSlotName, restartLSN: "0/301C4D8"}, - slot3: {name: slot3, restartLSN: "0/302C4D8"}, - slot4: {name: slot4, restartLSN: "0/303C4D8"}, - }, - } - local := &fakeSlotManager{ - slots: map[string]fakeSlot{}, - } - config := apiv1.ReplicationSlotsConfiguration{ - HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{ - Enabled: ptr.To(true), - SlotPrefix: "_cnpg_", - }, - } + const ( + selectPgReplicationSlots = "^SELECT (.+) FROM pg_replication_slots" + selectPgSlotAdvance = "SELECT pg_replication_slot_advance" + + localPodName = "cluster-2" + localSlotName = "_cnpg_cluster_2" + slot3 = "cluster-3" + slot4 = "cluster-4" + lsnSlot3 = "0/302C4D8" + lsnSlot4 = "0/303C4D8" + ) + + var ( + config = apiv1.ReplicationSlotsConfiguration{ + HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{ + Enabled: ptr.To(true), + SlotPrefix: "_cnpg_", + }, + } + columns = []string{"slot_name", "slot_type", "active", "restart_lsn", "holds_xmin"} + ) + + var ( + dbLocal, dbPrimary *sql.DB + mockLocal, mockPrimary sqlmock.Sqlmock + ) + + BeforeEach(func() { + var err error + dbLocal, mockLocal, err = sqlmock.New() + Expect(err).NotTo(HaveOccurred()) + dbPrimary, mockPrimary, err = sqlmock.New() + Expect(err).NotTo(HaveOccurred()) + }) + AfterEach(func() { + Expect(mockLocal.ExpectationsWereMet()).To(Succeed(), "failed expectations in LOCAL") + Expect(mockPrimary.ExpectationsWereMet()).To(Succeed(), "failed expectations in PRIMARY") + }) It("can create slots in local from those on primary", func(ctx SpecContext) { - localSlotsBefore, err := local.List(ctx, &config) - Expect(err).ShouldNot(HaveOccurred()) - Expect(localSlotsBefore.Items).Should(BeEmpty()) - - err = synchronizeReplicationSlots(ctx, primary, local, localPodName, &config) + // the primary contains slots + mockPrimary.ExpectQuery(selectPgReplicationSlots). + WillReturnRows(sqlmock.NewRows(columns). + AddRow(localSlotName, string(infrastructure.SlotTypePhysical), true, "0/301C4D8", false). + AddRow(slot3, string(infrastructure.SlotTypePhysical), true, lsnSlot3, false). + AddRow(slot4, string(infrastructure.SlotTypePhysical), true, lsnSlot4, false)) + + // but the local contains none + mockLocal.ExpectQuery(selectPgReplicationSlots). + WillReturnRows(sqlmock.NewRows(columns)) + + mockLocal.ExpectExec("SELECT pg_create_physical_replication_slot"). + WithArgs(slot3, true). + WillReturnResult(sqlmock.NewResult(1, 1)) + + mockLocal.ExpectExec(selectPgSlotAdvance). + WithArgs(slot3, lsnSlot3). + WillReturnResult(sqlmock.NewResult(1, 1)) + + mockLocal.ExpectExec("SELECT pg_create_physical_replication_slot"). + WithArgs(slot4, true). + WillReturnResult(sqlmock.NewResult(1, 1)) + + mockLocal.ExpectExec(selectPgSlotAdvance). + WithArgs(slot4, lsnSlot4). + WillReturnResult(sqlmock.NewResult(1, 1)) + + err := synchronizeReplicationSlots(ctx, dbPrimary, dbLocal, localPodName, &config) Expect(err).ShouldNot(HaveOccurred()) - - localSlotsAfter, err := local.List(ctx, &config) - Expect(err).ShouldNot(HaveOccurred()) - Expect(localSlotsAfter.Items).Should(HaveLen(2)) - Expect(localSlotsAfter.Has(slot3)).To(BeTrue()) - Expect(localSlotsAfter.Has(slot4)).To(BeTrue()) - Expect(local.slotsCreated).To(Equal(2)) }) It("can update slots in local when ReplayLSN in primary advanced", func(ctx SpecContext) { - // advance slot3 in primary newLSN := "0/308C4D8" - err := primary.Update(ctx, infrastructure.ReplicationSlot{SlotName: slot3, RestartLSN: newLSN}) - Expect(err).ShouldNot(HaveOccurred()) - err = synchronizeReplicationSlots(ctx, primary, local, localPodName, &config) + // Simulate we advance slot3 in primary + mockPrimary.ExpectQuery(selectPgReplicationSlots). + WillReturnRows(sqlmock.NewRows(columns). + AddRow(localSlotName, string(infrastructure.SlotTypePhysical), true, "0/301C4D8", false). + AddRow(slot3, string(infrastructure.SlotTypePhysical), true, newLSN, false). + AddRow(slot4, string(infrastructure.SlotTypePhysical), true, lsnSlot4, false)) + // But local has the old values + mockLocal.ExpectQuery(selectPgReplicationSlots). + WillReturnRows(sqlmock.NewRows(columns). + AddRow(slot3, string(infrastructure.SlotTypePhysical), true, lsnSlot3, false). + AddRow(slot4, string(infrastructure.SlotTypePhysical), true, lsnSlot4, false)) + + mockLocal.ExpectExec(selectPgSlotAdvance). + WithArgs(slot3, newLSN). + WillReturnResult(sqlmock.NewResult(1, 1)) + mockLocal.ExpectExec(selectPgSlotAdvance). + WithArgs(slot4, lsnSlot4). + WillReturnResult(sqlmock.NewResult(1, 1)) + + err := synchronizeReplicationSlots(ctx, dbPrimary, dbLocal, localPodName, &config) Expect(err).ShouldNot(HaveOccurred()) - - localSlotsAfter, err := local.List(ctx, &config) - Expect(err).ShouldNot(HaveOccurred()) - Expect(localSlotsAfter.Items).Should(HaveLen(2)) - Expect(localSlotsAfter.Has(slot3)).To(BeTrue()) - slot := localSlotsAfter.Get(slot3) - Expect(slot.RestartLSN).To(Equal(newLSN)) - Expect(local.slotsUpdated).To(Equal(1)) }) - It("can drop slots in local when they are no longer in primary", func(ctx SpecContext) { - err := primary.Delete(ctx, infrastructure.ReplicationSlot{SlotName: slot4}) - Expect(err).ShouldNot(HaveOccurred()) + It("can drop inactive slots in local when they are no longer in primary", func(ctx SpecContext) { + // Simulate primary has no longer slot4 + mockPrimary.ExpectQuery(selectPgReplicationSlots). + WillReturnRows(sqlmock.NewRows(columns). + AddRow(localSlotName, string(infrastructure.SlotTypePhysical), true, "0/301C4D8", false)) + // But local still has it + mockLocal.ExpectQuery(selectPgReplicationSlots). + WillReturnRows(sqlmock.NewRows(columns). + AddRow(slot4, string(infrastructure.SlotTypePhysical), false, lsnSlot4, false)) - err = synchronizeReplicationSlots(ctx, primary, local, localPodName, &config) - Expect(err).ShouldNot(HaveOccurred()) + mockLocal.ExpectExec("SELECT pg_drop_replication_slot").WithArgs(slot4). + WillReturnResult(sqlmock.NewResult(1, 1)) - localSlotsAfter, err := local.List(ctx, &config) + err := synchronizeReplicationSlots(ctx, dbPrimary, dbLocal, localPodName, &config) Expect(err).ShouldNot(HaveOccurred()) - Expect(localSlotsAfter.Items).Should(HaveLen(1)) - Expect(localSlotsAfter.Has(slot3)).To(BeTrue()) - Expect(local.slotsDeleted).To(Equal(1)) }) It("can drop slots in local that hold xmin", func(ctx SpecContext) { slotWithXmin := "_cnpg_xmin" - err := primary.Create(ctx, infrastructure.ReplicationSlot{SlotName: slotWithXmin}) - Expect(err).ShouldNot(HaveOccurred()) - local.slots[slotWithXmin] = fakeSlot{name: slotWithXmin, holdsXmin: true} - localSlotsBefore, err := local.List(ctx, &config) - Expect(err).ShouldNot(HaveOccurred()) - Expect(localSlotsBefore.Has(slotWithXmin)).To(BeTrue()) - - err = synchronizeReplicationSlots(ctx, primary, local, localPodName, &config) - Expect(err).ShouldNot(HaveOccurred()) - - localSlotsAfter, err := local.List(ctx, &config) + mockPrimary.ExpectQuery(selectPgReplicationSlots). + WillReturnRows(sqlmock.NewRows(columns). + AddRow(localSlotName, string(infrastructure.SlotTypePhysical), true, "0/301C4D8", false). + AddRow(slotWithXmin, string(infrastructure.SlotTypePhysical), true, "0/301C4D8", true)) + mockLocal.ExpectQuery(selectPgReplicationSlots). + WillReturnRows(sqlmock.NewRows(columns). + AddRow(localSlotName, string(infrastructure.SlotTypePhysical), true, "0/301C4D8", false). + AddRow(slotWithXmin, string(infrastructure.SlotTypePhysical), false, "0/301C4D8", true)) // inactive but with Xmin + + mockLocal.ExpectExec(selectPgSlotAdvance).WithArgs(slotWithXmin, "0/301C4D8"). + WillReturnResult(sqlmock.NewResult(1, 1)) + mockLocal.ExpectExec("SELECT pg_drop_replication_slot").WithArgs(slotWithXmin). + WillReturnResult(sqlmock.NewResult(1, 1)) + + err := synchronizeReplicationSlots(ctx, dbPrimary, dbLocal, localPodName, &config) Expect(err).ShouldNot(HaveOccurred()) - Expect(localSlotsAfter.Has(slotWithXmin)).To(BeFalse()) - Expect(localSlotsAfter.Items).Should(HaveLen(1)) - Expect(local.slotsDeleted).To(Equal(2)) }) }) From 7cc309fb0fee6f9f309653a2c3932dd7a3db1c86 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 8 Nov 2024 11:04:29 +0400 Subject: [PATCH 138/836] chore(deps): update spellcheck to v0.45.0 (main) (#6039) jonasbn/github-action-spellcheck `0.44.0` -> `0.45.0` https://redirect.github.com/rojopolis/spellcheck-github-actions `0.44.0` -> `0.45.0` --- .github/workflows/spellcheck.yml | 2 +- Makefile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/spellcheck.yml b/.github/workflows/spellcheck.yml index 07b87f3bdf..1fd12f3085 100644 --- a/.github/workflows/spellcheck.yml +++ b/.github/workflows/spellcheck.yml @@ -28,4 +28,4 @@ jobs: uses: actions/checkout@v4 - name: Spellcheck - uses: rojopolis/spellcheck-github-actions@0.44.0 + uses: rojopolis/spellcheck-github-actions@0.45.0 diff --git a/Makefile b/Makefile index 9f06b5ae5f..82201e9083 100644 --- a/Makefile +++ b/Makefile @@ -44,7 +44,7 @@ POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions KUSTOMIZE_VERSION ?= v5.5.0 CONTROLLER_TOOLS_VERSION ?= v0.16.5 GORELEASER_VERSION ?= v2.4.4 -SPELLCHECK_VERSION ?= 0.44.0 +SPELLCHECK_VERSION ?= 0.45.0 WOKE_VERSION ?= 0.19.0 OPERATOR_SDK_VERSION ?= v1.37.0 OPM_VERSION ?= v1.48.0 From 23aea138340882431df7a5f3a3e27d1e04d80769 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Tue, 12 Nov 2024 07:48:15 +0100 Subject: [PATCH 139/836] fix(test): avoid checking permissions in cloud services (#6062) We set the cluster configuration of OpenShift to `credentialsMode: Mint` to avoid checking the permissions when creating a new OpenShift instance. Closes #6061 Signed-off-by: Jonathan Gonzalez V. --- hack/install-config.yaml.template | 1 + 1 file changed, 1 insertion(+) diff --git a/hack/install-config.yaml.template b/hack/install-config.yaml.template index 840388b89b..34e80e3580 100644 --- a/hack/install-config.yaml.template +++ b/hack/install-config.yaml.template @@ -31,3 +31,4 @@ platform: publish: External pullSecret: '${REDHAT_PULL}' sshKey: ${SSH_PUBLIC_KEY} +credentialsMode: Mint From 7634f7e11b003d3a597c2adbe0f8bcc4b9d39c17 Mon Sep 17 00:00:00 2001 From: Jaime Silvela Date: Wed, 13 Nov 2024 07:46:48 +0100 Subject: [PATCH 140/836] fix: correct transaction code and resolve non deterministic role apply (#6064) - Updated transaction management in role reconciler to use `TX` instead of `DB` within a transaction loop, this was avoiding the expected transaction rollback - Fixed flaky unit tests by ensuring the SQL operation order is enforced, stabilizing test outcomes, this was done by making deterministic the function used to apply roles, applyRoleActions() Signed-off-by: Jaime Silvela --- .../management/controller/roles/postgres.go | 2 +- .../management/controller/roles/runnable.go | 71 +++++++++---------- 2 files changed, 36 insertions(+), 37 deletions(-) diff --git a/internal/management/controller/roles/postgres.go b/internal/management/controller/roles/postgres.go index b6d7b1bf13..eb1dcf913c 100644 --- a/internal/management/controller/roles/postgres.go +++ b/internal/management/controller/roles/postgres.go @@ -264,7 +264,7 @@ func UpdateMembership( for _, sqlQuery := range queries { contextLog.Debug("Executing query", "sqlQuery", sqlQuery) - if _, err := db.ExecContext(ctx, sqlQuery); err != nil { + if _, err := tx.ExecContext(ctx, sqlQuery); err != nil { contextLog.Error(err, "executing query", "sqlQuery", sqlQuery, "err", err) return wrapErr(err) } diff --git a/internal/management/controller/roles/runnable.go b/internal/management/controller/roles/runnable.go index 58c127da00..1d97e6bfaf 100644 --- a/internal/management/controller/roles/runnable.go +++ b/internal/management/controller/roles/runnable.go @@ -249,42 +249,12 @@ func (sr *RoleSynchronizer) applyRoleActions( return nil } - for action, roles := range rolesByAction { - switch action { - case roleIgnore, roleIsReconciled, roleIsReserved: - contextLog.Debug("no action required", "action", action) - continue - } - - contextLog.Info("roles in DB out of sync with Spec, evaluating action", - "roles", getRoleNames(roles), "action", action) - - for _, role := range roles { - var ( - err error - appliedState apiv1.PasswordState - grants, revokes []string - ) - switch action { - case roleCreate, roleUpdate: - appliedState, err = sr.applyRoleCreateUpdate(ctx, db, role, action) - if err == nil { - appliedChanges[role.Name] = appliedState - } - case roleDelete: - err = Delete(ctx, db, role.toDatabaseRole()) - case roleSetComment: - // NOTE: adding/updating a comment on a role does not alter its TransactionID - err = UpdateComment(ctx, db, role.toDatabaseRole()) - case roleUpdateMemberships: - // NOTE: revoking / granting to a role does not alter its TransactionID - dbRole := role.toDatabaseRole() - grants, revokes, err = getRoleMembershipDiff(ctx, db, role, dbRole) - if unhandledErr := handleRoleError(err, role.Name, action); unhandledErr != nil { - return nil, nil, unhandledErr - } - - err = UpdateMembership(ctx, db, dbRole, grants, revokes) + actionsCreateUpdate := []roleAction{roleCreate, roleUpdate} + for _, action := range actionsCreateUpdate { + for _, role := range rolesByAction[action] { + appliedState, err := sr.applyRoleCreateUpdate(ctx, db, role, action) + if err == nil { + appliedChanges[role.Name] = appliedState } if unhandledErr := handleRoleError(err, role.Name, action); unhandledErr != nil { return nil, nil, unhandledErr @@ -292,6 +262,35 @@ func (sr *RoleSynchronizer) applyRoleActions( } } + for _, role := range rolesByAction[roleSetComment] { + // NOTE: adding/updating a comment on a role does not alter its TransactionID + err := UpdateComment(ctx, db, role.toDatabaseRole()) + if unhandledErr := handleRoleError(err, role.Name, roleSetComment); unhandledErr != nil { + return nil, nil, unhandledErr + } + } + + for _, role := range rolesByAction[roleUpdateMemberships] { + // NOTE: revoking / granting to a role does not alter its TransactionID + dbRole := role.toDatabaseRole() + grants, revokes, err := getRoleMembershipDiff(ctx, db, role, dbRole) + if unhandledErr := handleRoleError(err, role.Name, roleUpdateMemberships); unhandledErr != nil { + return nil, nil, unhandledErr + } + + err = UpdateMembership(ctx, db, dbRole, grants, revokes) + if unhandledErr := handleRoleError(err, role.Name, roleUpdateMemberships); unhandledErr != nil { + return nil, nil, unhandledErr + } + } + + for _, role := range rolesByAction[roleDelete] { + err := Delete(ctx, db, role.toDatabaseRole()) + if unhandledErr := handleRoleError(err, role.Name, roleDelete); unhandledErr != nil { + return nil, nil, unhandledErr + } + } + return appliedChanges, irreconcilableRoles, nil } From 54b234787d3f0f5ec519008d67d87b4a457a1e7d Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 13 Nov 2024 09:34:34 +0100 Subject: [PATCH 141/836] chore(deps): update dependency go to v1.23.3 (main) (#6050) --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index cdb1248b22..2adca64b38 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module github.com/cloudnative-pg/cloudnative-pg go 1.23 -toolchain go1.23.2 +toolchain go1.23.3 require ( github.com/DATA-DOG/go-sqlmock v1.5.2 From ac681936011631528fa51246370b0e1f2ebaaaa6 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 13 Nov 2024 10:20:51 +0100 Subject: [PATCH 142/836] chore(deps): update dependency rook/rook to v1.15.5 (main) (#6070) --- .github/workflows/continuous-delivery.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index f5bc575720..b1891561f3 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -37,7 +37,7 @@ env: GOLANG_VERSION: "1.23.x" KUBEBUILDER_VERSION: "2.3.1" KIND_VERSION: "v0.24.0" - ROOK_VERSION: "v1.15.4" + ROOK_VERSION: "v1.15.5" EXTERNAL_SNAPSHOTTER_VERSION: "v8.1.0" OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing" BUILD_PUSH_PROVENANCE: "" From 7d9b9937d56ff460202bcc3d3c08d24b99b91898 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Wed, 13 Nov 2024 10:28:10 +0100 Subject: [PATCH 143/836] chore(security): improve Snyk scan (#6059) Use snyk `code test` for static analysis, in addition to `test`. Closes #6058 Signed-off-by: Jonathan Gonzalez V. --- .github/workflows/snyk.yml | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/.github/workflows/snyk.yml b/.github/workflows/snyk.yml index e41fca8302..9fdb83fe0b 100644 --- a/.github/workflows/snyk.yml +++ b/.github/workflows/snyk.yml @@ -16,15 +16,29 @@ jobs: - name: Checkout code uses: actions/checkout@v4 - - name: Run Snyk to check for vulnerabilities + - name: Static Code Analysis uses: snyk/actions/golang@0.4.0 continue-on-error: true env: SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} with: - args: --sarif-file-output=snyk.sarif + command: 'code test' + args: --sarif-file-output=snyk-static.sarif - name: Upload result to GitHub Code Scanning uses: github/codeql-action/upload-sarif@v3 with: - sarif_file: snyk.sarif + sarif_file: snyk-static.sarif + + - name: Vulnerability scan + uses: snyk/actions/golang@0.4.0 + continue-on-error: true + env: + SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} + with: + args: --sarif-file-output=snyk-test.sarif + + - name: Upload result to GitHub Code Scanning + uses: github/codeql-action/upload-sarif@v3 + with: + sarif_file: snyk-test.sarif From 43b694379bfc17841323230b8348ba031455d503 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 13 Nov 2024 11:37:59 +0100 Subject: [PATCH 144/836] chore(deps): update kindest/node docker tag to v1.31.2 (main) (#6075) --- hack/e2e/run-e2e-kind.sh | 2 +- hack/setup-cluster.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hack/e2e/run-e2e-kind.sh b/hack/e2e/run-e2e-kind.sh index 3a977f6628..e795c4a4dc 100755 --- a/hack/e2e/run-e2e-kind.sh +++ b/hack/e2e/run-e2e-kind.sh @@ -29,7 +29,7 @@ E2E_DIR="${HACK_DIR}/e2e" export PRESERVE_CLUSTER=${PRESERVE_CLUSTER:-false} export BUILD_IMAGE=${BUILD_IMAGE:-false} -KIND_NODE_DEFAULT_VERSION=v1.31.1 +KIND_NODE_DEFAULT_VERSION=v1.31.2 export K8S_VERSION=${K8S_VERSION:-$KIND_NODE_DEFAULT_VERSION} export CLUSTER_ENGINE=kind export CLUSTER_NAME=pg-operator-e2e-${K8S_VERSION//./-} diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh index a5444c392f..698baa2e3f 100755 --- a/hack/setup-cluster.sh +++ b/hack/setup-cluster.sh @@ -24,7 +24,7 @@ if [ "${DEBUG-}" = true ]; then fi # Defaults -KIND_NODE_DEFAULT_VERSION=v1.31.1 +KIND_NODE_DEFAULT_VERSION=v1.31.2 K3D_NODE_DEFAULT_VERSION=v1.30.3 CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=v1.15.0 EXTERNAL_SNAPSHOTTER_VERSION=v8.1.0 From 8608232c28131d5df88a80ccdec631f198947150 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 13 Nov 2024 14:45:12 +0100 Subject: [PATCH 145/836] chore(deps): update rajatjindal/krew-release-bot action to v0.0.47 (main) (#6079) --- .github/workflows/release-publish.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml index 11cf5be297..d1b429c154 100644 --- a/.github/workflows/release-publish.yml +++ b/.github/workflows/release-publish.yml @@ -165,7 +165,7 @@ jobs: if: | needs.check-version.outputs.is_latest == 'true' && needs.check-version.outputs.is_stable == 'true' - uses: rajatjindal/krew-release-bot@v0.0.46 + uses: rajatjindal/krew-release-bot@v0.0.47 with: krew_template_file: dist/krew/cnpg.yaml - From 92f26c60f4f98a61c9b3d9032449eac234bdae6e Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 14 Nov 2024 10:06:15 +0100 Subject: [PATCH 146/836] chore(deps): update dependency kubernetes-sigs/kind to v0.25.0 (main) (#6095) --- .github/workflows/continuous-delivery.yml | 2 +- .github/workflows/continuous-integration.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index b1891561f3..d4d118b458 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -36,7 +36,7 @@ on: env: GOLANG_VERSION: "1.23.x" KUBEBUILDER_VERSION: "2.3.1" - KIND_VERSION: "v0.24.0" + KIND_VERSION: "v0.25.0" ROOK_VERSION: "v1.15.5" EXTERNAL_SNAPSHOTTER_VERSION: "v8.1.0" OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing" diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 212f877293..592525397c 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -19,7 +19,7 @@ env: GOLANG_VERSION: "1.23.x" GOLANGCI_LINT_VERSION: "v1.61.0" KUBEBUILDER_VERSION: "2.3.1" - KIND_VERSION: "v0.24.0" + KIND_VERSION: "v0.25.0" OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing" API_DOC_NAME: "cloudnative-pg.v1.md" SLACK_USERNAME: "cnpg-bot" From 3c2c3f695eafbf1a0db96954f1796ff37cbd0733 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 14 Nov 2024 11:49:32 +0100 Subject: [PATCH 147/836] chore(deps): update dependency vmware-tanzu/velero to v1.15.0 (main) (#6102) --- .github/workflows/continuous-delivery.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index d4d118b458..a8d8d97ecf 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -1343,7 +1343,7 @@ jobs: name: Setup Velero uses: nick-fields/retry@v3 env: - VELERO_VERSION: "v1.14.1" + VELERO_VERSION: "v1.15.0" VELERO_AWS_PLUGIN_VERSION: "v1.10.1" with: timeout_minutes: 10 From 726a97358656f1ca900d0c51573461780119f1d1 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Fri, 15 Nov 2024 07:21:00 +0100 Subject: [PATCH 148/836] fix(tests): update the way we check the EKS versions (#6101) AWS changed the entire format and content of the documentation for EKS and the way we detected the supported versions was not working, now we use the new format and the new path to get the versions. Closes #6100 Signed-off-by: Jonathan Gonzalez V. --- .github/workflows/k8s-versions-check.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/k8s-versions-check.yml b/.github/workflows/k8s-versions-check.yml index 4178b76391..9db538b271 100644 --- a/.github/workflows/k8s-versions-check.yml +++ b/.github/workflows/k8s-versions-check.yml @@ -42,8 +42,8 @@ jobs: # There is no command to get EKS k8s versions, we have to parse the documentation name: Get updated EKS versions run: | - DOC_URL="https://raw.githubusercontent.com/awsdocs/amazon-eks-user-guide/main/doc_source/kubernetes-versions.md" - curl --silent "${DOC_URL}" | grep -E '^\+ `[0-9]\.[0-9]{2}`$' | sed -e 's/[\ +`]//g' | \ + DOC_URL="https://raw.githubusercontent.com/awsdocs/amazon-eks-user-guide/mainline/latest/ug/clusters/kubernetes-versions-standard.adoc" + curl --silent "${DOC_URL}" | sed -e 's/.*`Kubernetes` \([0-9].[0-9][0-9]\).*/\1/;/^[0-9]\./!d' | uniq | \ awk -vv=$MINIMAL_K8S '$0>=v {print $0}' | \ jq -Rn '[inputs]' | tee .github/eks_versions.json if: github.event.inputs.limit == null || github.event.inputs.limit == 'eks' From b3b6dbe7899d27dd65b42d4ecd89c9d0c3d780b0 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 15 Nov 2024 10:48:49 +0100 Subject: [PATCH 149/836] chore(deps): update dependency vmware-tanzu/velero-plugin-for-aws to v1.11.0 (main) (#6116) --- .github/workflows/continuous-delivery.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index a8d8d97ecf..996673da01 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -1344,7 +1344,7 @@ jobs: uses: nick-fields/retry@v3 env: VELERO_VERSION: "v1.15.0" - VELERO_AWS_PLUGIN_VERSION: "v1.10.1" + VELERO_AWS_PLUGIN_VERSION: "v1.11.0" with: timeout_minutes: 10 max_attempts: 3 From bfc966caa27c08b34a372eb6c0f7fb0a0d31d2ef Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sat, 16 Nov 2024 10:46:58 +0100 Subject: [PATCH 150/836] chore(deps): update all non-major go dependencies (main) (#6115) https://github.com/goreleaser/goreleaser k`v2.4.4` -> `v2.4.5` golang.org/x/term `v0.25.0` -> `v0.26.0` https://github.com/grpc/grpc-go `v1.67.1` -> `v1.68.0` golang.org/x/sys `v0.26.0` -> `v0.27.0` google.golang.org/genproto/googleapis/rpc `v0.0.0-20240814211410-ddb44dafa142` -> `v0.0.0-20240903143218-8af14fe29dc1` --- Makefile | 2 +- go.mod | 8 ++++---- go.sum | 16 ++++++++-------- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/Makefile b/Makefile index 82201e9083..b535b35bb4 100644 --- a/Makefile +++ b/Makefile @@ -43,7 +43,7 @@ BUILD_IMAGE ?= true POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions.go" | cut -f 2 -d \") KUSTOMIZE_VERSION ?= v5.5.0 CONTROLLER_TOOLS_VERSION ?= v0.16.5 -GORELEASER_VERSION ?= v2.4.4 +GORELEASER_VERSION ?= v2.4.5 SPELLCHECK_VERSION ?= 0.45.0 WOKE_VERSION ?= 0.19.0 OPERATOR_SDK_VERSION ?= v1.37.0 diff --git a/go.mod b/go.mod index 2adca64b38..14918aad86 100644 --- a/go.mod +++ b/go.mod @@ -37,8 +37,8 @@ require ( go.uber.org/atomic v1.11.0 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 - golang.org/x/term v0.25.0 - google.golang.org/grpc v1.67.1 + golang.org/x/term v0.26.0 + google.golang.org/grpc v1.68.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.31.2 k8s.io/apiextensions-apiserver v0.31.2 @@ -108,12 +108,12 @@ require ( golang.org/x/net v0.30.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.26.0 // indirect + golang.org/x/sys v0.27.0 // indirect golang.org/x/text v0.19.0 // indirect golang.org/x/time v0.7.0 // indirect golang.org/x/tools v0.26.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index 5fc91d02a5..d9cc553609 100644 --- a/go.sum +++ b/go.sum @@ -236,10 +236,10 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= -golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU= +golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= @@ -258,10 +258,10 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1:e7S5W7MGGLaSu8j3YjdezkZ+m1/Nm0uRVRMEMGk26Xs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= -google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0= +google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From 214af9c68707826405ee0b75ff93e279cdc83fc0 Mon Sep 17 00:00:00 2001 From: Jaime Silvela Date: Mon, 18 Nov 2024 10:43:24 +0100 Subject: [PATCH 151/836] chore: simplify tablespaces interfaces for testing (#5635) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The tablespaces feature utilizes an unnecessary interface to mock the database. This ticket replaces it with sqlmock, improving unit tests by focusing on testing the top-level `Reconcile` instead of the internals. Signed-off-by: Jaime Silvela Signed-off-by: Gabriele Quaresima Signed-off-by: Niccolò Fei Co-authored-by: Gabriele Quaresima Co-authored-by: Niccolò Fei --- .../controller/tablespaces/actions.go | 13 +- .../controller/tablespaces/controller_test.go | 445 ++++++++++++------ .../tablespaces/infrastructure/contract.go | 14 - .../tablespaces/infrastructure/postgres.go | 29 +- .../infrastructure/postgres_test.go | 38 +- .../controller/tablespaces/manager.go | 33 +- .../controller/tablespaces/reconciler.go | 15 +- .../controller/tablespaces/storage.go | 2 + .../postgres/readiness/readiness.go | 13 +- 9 files changed, 367 insertions(+), 235 deletions(-) diff --git a/internal/management/controller/tablespaces/actions.go b/internal/management/controller/tablespaces/actions.go index d886dc4968..64fcc059d6 100644 --- a/internal/management/controller/tablespaces/actions.go +++ b/internal/management/controller/tablespaces/actions.go @@ -18,6 +18,7 @@ package tablespaces import ( "context" + "database/sql" "github.com/cloudnative-pg/machinery/pkg/log" @@ -27,7 +28,7 @@ import ( type tablespaceReconcilerStep interface { execute(ctx context.Context, - tbsManager infrastructure.TablespaceManager, + db *sql.DB, tbsStorageManager tablespaceStorageManager, ) apiv1.TablespaceState } @@ -38,7 +39,7 @@ type createTablespaceAction struct { func (r *createTablespaceAction) execute( ctx context.Context, - tbsManager infrastructure.TablespaceManager, + db *sql.DB, tbsStorageManager tablespaceStorageManager, ) apiv1.TablespaceState { contextLog := log.FromContext(ctx).WithName("tbs_create_reconciler") @@ -59,7 +60,7 @@ func (r *createTablespaceAction) execute( Name: r.tablespace.Name, Owner: r.tablespace.Owner.Name, } - err := tbsManager.Create(ctx, tablespace) + err := infrastructure.Create(ctx, db, tablespace) if err != nil { contextLog.Error(err, "while performing action", "tablespace", r.tablespace.Name) return apiv1.TablespaceState{ @@ -83,7 +84,7 @@ type updateTablespaceAction struct { func (r *updateTablespaceAction) execute( ctx context.Context, - tbsManager infrastructure.TablespaceManager, + db *sql.DB, _ tablespaceStorageManager, ) apiv1.TablespaceState { contextLog := log.FromContext(ctx).WithName("tbs_update_reconciler") @@ -93,7 +94,7 @@ func (r *updateTablespaceAction) execute( Name: r.tablespace.Name, Owner: r.tablespace.Owner.Name, } - err := tbsManager.Update(ctx, tablespace) + err := infrastructure.Update(ctx, db, tablespace) if err != nil { contextLog.Error( err, "while performing action", @@ -119,7 +120,7 @@ type noopTablespaceAction struct { func (r *noopTablespaceAction) execute( _ context.Context, - _ infrastructure.TablespaceManager, + _ *sql.DB, _ tablespaceStorageManager, ) apiv1.TablespaceState { return apiv1.TablespaceState{ diff --git a/internal/management/controller/tablespaces/controller_test.go b/internal/management/controller/tablespaces/controller_test.go index 4c5bf682ec..4bb80e5409 100644 --- a/internal/management/controller/tablespaces/controller_test.go +++ b/internal/management/controller/tablespaces/controller_test.go @@ -18,52 +18,27 @@ package tablespaces import ( "context" + "database/sql" + "errors" "fmt" "slices" + "github.com/DATA-DOG/go-sqlmock" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/internal/management/controller/tablespaces/infrastructure" + schemeBuilder "github.com/cloudnative-pg/cloudnative-pg/internal/scheme" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) -type mockTablespaceManager struct { - tablespaces map[string]infrastructure.Tablespace - callHistory []string -} - -func (m *mockTablespaceManager) List(_ context.Context) ([]infrastructure.Tablespace, error) { - m.callHistory = append(m.callHistory, "list") - re := make([]infrastructure.Tablespace, len(m.tablespaces)) - i := 0 - for _, r := range m.tablespaces { - re[i] = r - i++ - } - return re, nil -} - -func (m *mockTablespaceManager) Update( - _ context.Context, _ infrastructure.Tablespace, -) error { - m.callHistory = append(m.callHistory, "update") - return nil -} - -func (m *mockTablespaceManager) Create( - _ context.Context, tablespace infrastructure.Tablespace, -) error { - m.callHistory = append(m.callHistory, "create") - _, found := m.tablespaces[tablespace.Name] - if found { - return fmt.Errorf("trying to create existing tablespace: %s", tablespace.Name) - } - m.tablespaces[tablespace.Name] = tablespace - return nil -} - +// mockTablespaceStorageManager is a storage manager where storage exists by +// default unless explicitly mounted as unavailable type mockTablespaceStorageManager struct { unavailableStorageLocations []string } @@ -79,156 +54,322 @@ func (mst mockTablespaceStorageManager) getStorageLocation(tablespaceName string return fmt.Sprintf("/%s", tablespaceName) } -var _ = Describe("Tablespace synchronizer tests", func() { +type fakeInstance struct { + *postgres.Instance + db *sql.DB +} + +func (f fakeInstance) GetSuperUserDB() (*sql.DB, error) { + return f.db, nil +} + +func (f fakeInstance) CanCheckReadiness() bool { + return true +} + +func (f fakeInstance) IsPrimary() (bool, error) { + return true, nil +} + +const ( + expectedListStmt = ` + SELECT + pg_tablespace.spcname spcname, + COALESCE(pg_roles.rolname, '') rolname + FROM pg_tablespace + LEFT JOIN pg_roles ON pg_tablespace.spcowner = pg_roles.oid + WHERE spcname NOT LIKE $1 + ` + expectedCreateStmt = "CREATE TABLESPACE \"%s\" OWNER \"%s\" " + + "LOCATION '%s'" + + expectedUpdateStmt = "ALTER TABLESPACE \"%s\" OWNER TO \"%s\"" + + expectedReadinessCheck = ` + SELECT + NOT pg_is_in_recovery() + OR (SELECT coalesce(setting, '') = '' FROM pg_settings WHERE name = 'primary_conninfo') + OR pg_last_wal_replay_lsn() IS NOT NULL + ` +) + +func getCluster(ctx context.Context, c client.Client, cluster *apiv1.Cluster) (*apiv1.Cluster, error) { + var updatedCluster apiv1.Cluster + err := c.Get(ctx, client.ObjectKey{ + Namespace: cluster.Namespace, + Name: cluster.Name, + }, &updatedCluster) + return &updatedCluster, err +} + +// tablespaceTest represents all the variable bits that go into a test of the +// tablespace reconciler +type tablespaceTest struct { + tablespacesInSpec []apiv1.TablespaceConfiguration + postgresExpectations func(sqlmock.Sqlmock) + shouldRequeue bool + storageManager tablespaceStorageManager + expectedTablespaceStatus []apiv1.TablespaceState +} + +// assertTablespaceReconciled is the full test, going from setting up the mocks +// and the cluster to verifying all expectations are met +func assertTablespaceReconciled(ctx context.Context, tt tablespaceTest) { + db, dbMock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual), sqlmock.MonitorPingsOption(true)) + Expect(err).ToNot(HaveOccurred()) + + DeferCleanup(func() { + Expect(dbMock.ExpectationsWereMet()).To(Succeed()) + }) + + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-example", + Namespace: "default", + }, + } + cluster.Spec.Tablespaces = tt.tablespacesInSpec + + fakeClient := fake.NewClientBuilder().WithScheme(schemeBuilder.BuildWithAllKnownScheme()). + WithObjects(cluster). + WithStatusSubresource(&apiv1.Cluster{}). + Build() + + pgInstance := postgres.NewInstance(). + WithNamespace("default"). + WithClusterName("cluster-example") + + instance := fakeInstance{ + Instance: pgInstance, + db: db, + } + tablespaceReconciler := TablespaceReconciler{ - instance: postgres.NewInstance().WithNamespace("myPod"), + instance: &instance, + client: fakeClient, + storageManager: tt.storageManager, + } + + // these bits happen because the reconciler checks for instance readiness + dbMock.ExpectPing() + expectedReadiness := sqlmock.NewRows([]string{""}).AddRow("t") + dbMock.ExpectQuery(expectedReadinessCheck).WillReturnRows(expectedReadiness) + + tt.postgresExpectations(dbMock) + + results, err := tablespaceReconciler.Reconcile(ctx, reconcile.Request{}) + Expect(err).ShouldNot(HaveOccurred()) + if tt.shouldRequeue { + Expect(results).NotTo(BeZero()) + } else { + Expect(results).To(BeZero()) } + updatedCluster, err := getCluster(ctx, fakeClient, cluster) + Expect(err).ToNot(HaveOccurred()) + Expect(updatedCluster.Status.TablespacesStatus).To(Equal(tt.expectedTablespaceStatus)) +} + +var _ = Describe("Tablespace synchronizer tests", func() { When("tablespace configurations are realizable", func() { It("will do nothing if the DB contains the tablespaces in spec", func(ctx context.Context) { - tablespacesSpec := []apiv1.TablespaceConfiguration{ - { - Name: "foo", - Storage: apiv1.StorageConfiguration{ - Size: "1Gi", - }, - Owner: apiv1.DatabaseRoleRef{ - Name: "app", + assertTablespaceReconciled(ctx, tablespaceTest{ + tablespacesInSpec: []apiv1.TablespaceConfiguration{ + { + Name: "foo", + Storage: apiv1.StorageConfiguration{ + Size: "1Gi", + }, + Owner: apiv1.DatabaseRoleRef{ + Name: "app", + }, }, }, - } - tbsManager := mockTablespaceManager{ - tablespaces: map[string]infrastructure.Tablespace{ - "foo": { + postgresExpectations: func(mock sqlmock.Sqlmock) { + // we expect the reconciler to list the tablespaces on the DB + rows := sqlmock.NewRows( + []string{"spcname", "rolname"}). + AddRow("foo", "app") + mock.ExpectQuery(expectedListStmt).WithArgs("pg_").WillReturnRows(rows) + }, + shouldRequeue: false, + expectedTablespaceStatus: []apiv1.TablespaceState{ + { Name: "foo", Owner: "app", + State: "reconciled", }, }, - } - tbsInDatabase, err := tbsManager.List(ctx) - Expect(err).ShouldNot(HaveOccurred()) - tbsSteps := evaluateNextSteps(ctx, tbsInDatabase, tablespacesSpec) - result := tablespaceReconciler.applySteps(ctx, &tbsManager, - mockTablespaceStorageManager{}, tbsSteps) - Expect(result).To(ConsistOf(apiv1.TablespaceState{ - Name: "foo", - Owner: "app", - State: apiv1.TablespaceStatusReconciled, - Error: "", - })) - Expect(tbsManager.callHistory).To(HaveLen(1)) - Expect(tbsManager.callHistory).To(ConsistOf("list")) + }) }) It("will change the owner when needed", func(ctx context.Context) { - tablespacesSpec := []apiv1.TablespaceConfiguration{ - { - Name: "foo", - Storage: apiv1.StorageConfiguration{ - Size: "1Gi", - }, - Owner: apiv1.DatabaseRoleRef{ - Name: "new_user", + assertTablespaceReconciled(ctx, tablespaceTest{ + tablespacesInSpec: []apiv1.TablespaceConfiguration{ + { + Name: "foo", + Storage: apiv1.StorageConfiguration{ + Size: "1Gi", + }, + Owner: apiv1.DatabaseRoleRef{ + Name: "new_user", + }, }, }, - } - tbsManager := mockTablespaceManager{ - tablespaces: map[string]infrastructure.Tablespace{ - "foo": { + postgresExpectations: func(mock sqlmock.Sqlmock) { + rows := sqlmock.NewRows( + []string{"spcname", "rolname"}). + AddRow("foo", "app") + mock.ExpectQuery(expectedListStmt).WithArgs("pg_").WillReturnRows(rows) + stmt := fmt.Sprintf(expectedUpdateStmt, "foo", "new_user") + mock.ExpectExec(stmt). + WillReturnResult(sqlmock.NewResult(2, 1)) + }, + shouldRequeue: false, + expectedTablespaceStatus: []apiv1.TablespaceState{ + { Name: "foo", - Owner: "app", + Owner: "new_user", + State: "reconciled", }, }, - } - tbsInDatabase, err := tbsManager.List(ctx) - Expect(err).ShouldNot(HaveOccurred()) - tbsByAction := evaluateNextSteps(ctx, tbsInDatabase, tablespacesSpec) - result := tablespaceReconciler.applySteps(ctx, &tbsManager, - mockTablespaceStorageManager{}, tbsByAction) - Expect(result).To(ConsistOf( - apiv1.TablespaceState{ - Name: "foo", - Owner: "new_user", - State: apiv1.TablespaceStatusReconciled, - Error: "", - }, - )) - Expect(tbsManager.callHistory).To(HaveLen(2)) - Expect(tbsManager.callHistory).To(ConsistOf("list", "update")) + }) }) - It("will create a tablespace in spec that is missing from DB", func(ctx context.Context) { - tablespacesSpec := []apiv1.TablespaceConfiguration{ - { - Name: "foo", - Storage: apiv1.StorageConfiguration{ - Size: "1Gi", + It("will create a tablespace in spec that is missing from DB if mount point exists", func(ctx context.Context) { + assertTablespaceReconciled(ctx, tablespaceTest{ + tablespacesInSpec: []apiv1.TablespaceConfiguration{ + { + Name: "foo", + Storage: apiv1.StorageConfiguration{ + Size: "1Gi", + }, + }, + { + Name: "bar", + Storage: apiv1.StorageConfiguration{ + Size: "1Gi", + }, + Owner: apiv1.DatabaseRoleRef{ + Name: "new_user", + }, + }, + }, + postgresExpectations: func(mock sqlmock.Sqlmock) { + // we expect the reconciler to list the tablespaces on DB, and to + // create a new tablespace + rows := sqlmock.NewRows( + []string{"spcname", "rolname"}). + AddRow("foo", "") + mock.ExpectQuery(expectedListStmt).WithArgs("pg_").WillReturnRows(rows) + stmt := fmt.Sprintf(expectedCreateStmt, "bar", "new_user", "/var/lib/postgresql/tablespaces/bar/data") + mock.ExpectExec(stmt). + WillReturnResult(sqlmock.NewResult(2, 1)) + }, + shouldRequeue: false, + storageManager: mockTablespaceStorageManager{ + unavailableStorageLocations: []string{ + "/foo", }, }, - { - Name: "bar", - Storage: apiv1.StorageConfiguration{ - Size: "1Gi", + expectedTablespaceStatus: []apiv1.TablespaceState{ + { + Name: "foo", + Owner: "", + State: "reconciled", + }, + { + Name: "bar", + Owner: "new_user", + State: "reconciled", }, }, - } - tbsManager := mockTablespaceManager{ - tablespaces: map[string]infrastructure.Tablespace{ - "foo": { + }) + }) + + It("will mark tablespace status as pending with error when the DB CREATE fails", func(ctx context.Context) { + assertTablespaceReconciled(ctx, tablespaceTest{ + tablespacesInSpec: []apiv1.TablespaceConfiguration{ + { Name: "foo", + Storage: apiv1.StorageConfiguration{ + Size: "1Gi", + }, + }, + { + Name: "bar", + Storage: apiv1.StorageConfiguration{ + Size: "1Gi", + }, + Owner: apiv1.DatabaseRoleRef{ + Name: "new_user", + }, + }, + }, + postgresExpectations: func(mock sqlmock.Sqlmock) { + // we expect the reconciler to list the tablespaces on DB, and to + // create a new tablespace + rows := sqlmock.NewRows( + []string{"spcname", "rolname"}). + AddRow("foo", "") + mock.ExpectQuery(expectedListStmt).WithArgs("pg_").WillReturnRows(rows) + // we simulate DB command failure + stmt := fmt.Sprintf(expectedCreateStmt, "bar", "new_user", "/var/lib/postgresql/tablespaces/bar/data") + mock.ExpectExec(stmt). + WillReturnError(errors.New("boom")) + }, + shouldRequeue: true, + storageManager: mockTablespaceStorageManager{ + unavailableStorageLocations: []string{ + "/foo", + }, + }, + expectedTablespaceStatus: []apiv1.TablespaceState{ + { + Name: "foo", + Owner: "", + State: "reconciled", + }, + { + Name: "bar", + Owner: "new_user", + State: "pending", + Error: "while creating tablespace bar: boom", }, }, - } - tbsInDatabase, err := tbsManager.List(ctx) - Expect(err).ShouldNot(HaveOccurred()) - tbsSteps := evaluateNextSteps(ctx, tbsInDatabase, tablespacesSpec) - result := tablespaceReconciler.applySteps(ctx, &tbsManager, - mockTablespaceStorageManager{}, tbsSteps) - Expect(result).To(ConsistOf( - apiv1.TablespaceState{ - Name: "foo", - Owner: "", - State: apiv1.TablespaceStatusReconciled, - }, - apiv1.TablespaceState{ - Name: "bar", - Owner: "", - State: apiv1.TablespaceStatusReconciled, - }, - )) - Expect(tbsManager.callHistory).To(HaveLen(2)) - Expect(tbsManager.callHistory).To(ConsistOf("list", "create")) + }) }) It("will requeue the tablespace creation if the mount path doesn't exist", func(ctx context.Context) { - tablespacesSpec := []apiv1.TablespaceConfiguration{ - { - Name: "foo", - Storage: apiv1.StorageConfiguration{ - Size: "1Gi", - }, - }, - } - tbsManager := mockTablespaceManager{} - tbsInDatabase, err := tbsManager.List(ctx) - Expect(err).ShouldNot(HaveOccurred()) - tbsByAction := evaluateNextSteps(ctx, tbsInDatabase, tablespacesSpec) - result := tablespaceReconciler.applySteps(ctx, &tbsManager, - mockTablespaceStorageManager{ + assertTablespaceReconciled(ctx, tablespaceTest{ + tablespacesInSpec: []apiv1.TablespaceConfiguration{ + { + Name: "foo", + Storage: apiv1.StorageConfiguration{ + Size: "1Gi", + }, + }, + }, + postgresExpectations: func(mock sqlmock.Sqlmock) { + rows := sqlmock.NewRows( + []string{"spcname", "rolname"}) + mock.ExpectQuery(expectedListStmt).WithArgs("pg_").WillReturnRows(rows) + }, + shouldRequeue: true, + storageManager: mockTablespaceStorageManager{ unavailableStorageLocations: []string{ "/foo", }, - }, tbsByAction) - Expect(result).To(ConsistOf( - apiv1.TablespaceState{ - Name: "foo", - Owner: "", - State: apiv1.TablespaceStatusPendingReconciliation, - Error: "deferred until mount point is created", - }, - )) - Expect(tbsManager.callHistory).To(HaveLen(1)) - Expect(tbsManager.callHistory).To(ConsistOf("list")) + }, + expectedTablespaceStatus: []apiv1.TablespaceState{ + { + Name: "foo", + Owner: "", + State: "pending", + Error: "deferred until mount point is created", + }, + }, + }) }) }) }) diff --git a/internal/management/controller/tablespaces/infrastructure/contract.go b/internal/management/controller/tablespaces/infrastructure/contract.go index 398e277849..d9a3bd16a0 100644 --- a/internal/management/controller/tablespaces/infrastructure/contract.go +++ b/internal/management/controller/tablespaces/infrastructure/contract.go @@ -16,8 +16,6 @@ limitations under the License. package infrastructure -import "context" - // Tablespace represents the tablespace information read from / written to the Database type Tablespace struct { // Name is the name of the tablespace @@ -26,15 +24,3 @@ type Tablespace struct { // Owner is the owner of this tablespace Owner string `json:"owner"` } - -// TablespaceManager abstracts the functionality of reconciling with PostgreSQL tablespaces -type TablespaceManager interface { - // List the tablespace in the database - List(ctx context.Context) ([]Tablespace, error) - - // Create the tablespace in the database - Create(ctx context.Context, tablespace Tablespace) error - - // Update the tablespace in the database (change ownership) - Update(ctx context.Context, tablespace Tablespace) error -} diff --git a/internal/management/controller/tablespaces/infrastructure/postgres.go b/internal/management/controller/tablespaces/infrastructure/postgres.go index 6b01c8184a..16f6eb0ae7 100644 --- a/internal/management/controller/tablespaces/infrastructure/postgres.go +++ b/internal/management/controller/tablespaces/infrastructure/postgres.go @@ -28,31 +28,14 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" ) -// postgresTablespaceManager is a TablespaceManager for a database instance -type postgresTablespaceManager struct { - superUserDB *sql.DB -} - -// NewPostgresTablespaceManager returns an implementation of TablespaceManager for postgres -func NewPostgresTablespaceManager(superDB *sql.DB) TablespaceManager { - return newPostgresTablespaceManager(superDB) -} - -// NewPostgresTablespaceManager returns an implementation of TablespaceManager for postgres -func newPostgresTablespaceManager(superDB *sql.DB) postgresTablespaceManager { - return postgresTablespaceManager{ - superUserDB: superDB, - } -} - // List the tablespaces in the database // The content exclude pg_default and pg_global database -func (tbsMgr postgresTablespaceManager) List(ctx context.Context) ([]Tablespace, error) { +func List(ctx context.Context, db *sql.DB) ([]Tablespace, error) { logger := log.FromContext(ctx).WithName("tbs_reconciler_list") logger.Trace("Invoked list") wrapErr := func(err error) error { return fmt.Errorf("while listing DB tablespaces: %w", err) } - rows, err := tbsMgr.superUserDB.QueryContext( + rows, err := db.QueryContext( ctx, ` SELECT @@ -93,7 +76,7 @@ func (tbsMgr postgresTablespaceManager) List(ctx context.Context) ([]Tablespace, } // Create the tablespace in the database, if tablespace is temporary tablespace, need reload configure -func (tbsMgr postgresTablespaceManager) Create(ctx context.Context, tbs Tablespace) error { +func Create(ctx context.Context, db *sql.DB, tbs Tablespace) error { contextLog := log.FromContext(ctx).WithName("tbs_reconciler_create") tablespaceLocation := specs.LocationForTablespace(tbs.Name) @@ -104,7 +87,7 @@ func (tbsMgr postgresTablespaceManager) Create(ctx context.Context, tbs Tablespa return fmt.Errorf("while creating tablespace %s: %w", tbs.Name, err) } var err error - if _, err = tbsMgr.superUserDB.ExecContext( + if _, err = db.ExecContext( ctx, fmt.Sprintf( "CREATE TABLESPACE %s OWNER %s LOCATION '%s'", @@ -119,7 +102,7 @@ func (tbsMgr postgresTablespaceManager) Create(ctx context.Context, tbs Tablespa } // Update the tablespace in the database (change ownership) -func (tbsMgr postgresTablespaceManager) Update(ctx context.Context, tbs Tablespace) error { +func Update(ctx context.Context, db *sql.DB, tbs Tablespace) error { contextLog := log.FromContext(ctx).WithName("tbs_reconciler_update") tablespaceLocation := specs.LocationForTablespace(tbs.Name) @@ -130,7 +113,7 @@ func (tbsMgr postgresTablespaceManager) Update(ctx context.Context, tbs Tablespa return fmt.Errorf("while updating tablespace %s: %w", tbs.Name, err) } var err error - if _, err = tbsMgr.superUserDB.ExecContext( + if _, err = db.ExecContext( ctx, fmt.Sprintf( "ALTER TABLESPACE %s OWNER TO %s", diff --git a/internal/management/controller/tablespaces/infrastructure/postgres_test.go b/internal/management/controller/tablespaces/infrastructure/postgres_test.go index 51299e6c57..78b6e10f9a 100644 --- a/internal/management/controller/tablespaces/infrastructure/postgres_test.go +++ b/internal/management/controller/tablespaces/infrastructure/postgres_test.go @@ -36,30 +36,34 @@ var _ = Describe("Postgres tablespaces functions test", func() { ` expectedCreateStmt := "CREATE TABLESPACE \"%s\" OWNER \"%s\" " + "LOCATION '/var/lib/postgresql/tablespaces/atablespace/data'" + + expectedUpdateStmt := "ALTER TABLESPACE \"%s\" OWNER TO \"%s\"" + It("should send the expected query to list tablespaces and parse the return", func(ctx SpecContext) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) + tbsName := "atablespace" + anotherTbsName := "anothertablespace" + ownerName := "postgres" - tbsManager := newPostgresTablespaceManager(db) rows := sqlmock.NewRows( []string{"spcname", "rolname"}). - AddRow("atablespace", "postgres"). - AddRow("anothertablespace", "postgres") + AddRow(tbsName, ownerName). + AddRow(anotherTbsName, ownerName) mock.ExpectQuery(expectedListStmt).WithArgs("pg_").WillReturnRows(rows) - tbs, err := tbsManager.List(ctx) + tbs, err := List(ctx, db) Expect(err).ShouldNot(HaveOccurred()) Expect(tbs).To(HaveLen(2)) Expect(tbs).To(ConsistOf( - Tablespace{Name: "atablespace", Owner: "postgres"}, - Tablespace{Name: "anothertablespace", Owner: "postgres"})) + Tablespace{Name: tbsName, Owner: ownerName}, + Tablespace{Name: anotherTbsName, Owner: ownerName})) }) It("should detect error if the list query returns error", func(ctx SpecContext) { db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) Expect(err).ToNot(HaveOccurred()) - tbsManager := newPostgresTablespaceManager(db) mock.ExpectQuery(expectedListStmt).WithArgs("pg_").WillReturnError(fmt.Errorf("boom")) - tbs, err := tbsManager.List(ctx) + tbs, err := List(ctx, db) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("boom")) Expect(tbs).To(BeEmpty()) @@ -71,10 +75,9 @@ var _ = Describe("Postgres tablespaces functions test", func() { tbsName := "atablespace" ownerName := "postgres" stmt := fmt.Sprintf(expectedCreateStmt, tbsName, ownerName) - tbsManager := newPostgresTablespaceManager(db) mock.ExpectExec(stmt). WillReturnResult(sqlmock.NewResult(2, 1)) - err = tbsManager.Create(ctx, Tablespace{Name: tbsName, Owner: "postgres"}) + err = Create(ctx, db, Tablespace{Name: tbsName, Owner: ownerName}) Expect(err).ShouldNot(HaveOccurred()) Expect(mock.ExpectationsWereMet()).To(Succeed()) }) @@ -84,12 +87,23 @@ var _ = Describe("Postgres tablespaces functions test", func() { tbsName := "atablespace" ownerName := "postgres" stmt := fmt.Sprintf(expectedCreateStmt, tbsName, ownerName) - tbsManager := newPostgresTablespaceManager(db) mock.ExpectExec(stmt). WillReturnError(fmt.Errorf("boom")) - err = tbsManager.Create(ctx, Tablespace{Name: tbsName, Owner: "postgres"}) + err = Create(ctx, db, Tablespace{Name: tbsName, Owner: ownerName}) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("boom")) Expect(mock.ExpectationsWereMet()).To(Succeed()) }) + It("should issue the expected command to update a tablespace", func(ctx SpecContext) { + db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) + Expect(err).ToNot(HaveOccurred()) + tbsName := "atablespace" + ownerName := "postgres" + stmt := fmt.Sprintf(expectedUpdateStmt, tbsName, ownerName) + mock.ExpectExec(stmt). + WillReturnResult(sqlmock.NewResult(2, 1)) + err = Update(ctx, db, Tablespace{Name: tbsName, Owner: ownerName}) + Expect(err).ShouldNot(HaveOccurred()) + Expect(mock.ExpectationsWereMet()).To(Succeed()) + }) }) diff --git a/internal/management/controller/tablespaces/manager.go b/internal/management/controller/tablespaces/manager.go index 1b793189dc..6ba7490a6f 100644 --- a/internal/management/controller/tablespaces/manager.go +++ b/internal/management/controller/tablespaces/manager.go @@ -18,6 +18,7 @@ package tablespaces import ( "context" + "database/sql" "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" @@ -27,18 +28,30 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" ) +// instanceInterface represents the behavior required for the reconciler for +// instance operations +type instanceInterface interface { + GetNamespaceName() string + GetClusterName() string + GetSuperUserDB() (*sql.DB, error) + IsPrimary() (bool, error) + CanCheckReadiness() bool +} + // TablespaceReconciler is a Kubernetes controller that ensures Tablespaces // are created in Postgres type TablespaceReconciler struct { - instance *postgres.Instance - client client.Client + instance instanceInterface + storageManager tablespaceStorageManager + client client.Client } // NewTablespaceReconciler creates a new TablespaceReconciler func NewTablespaceReconciler(instance *postgres.Instance, client client.Client) *TablespaceReconciler { controller := &TablespaceReconciler{ - instance: instance, - client: client, + instance: instance, + client: client, + storageManager: instanceTablespaceStorageManager{}, } return controller } @@ -54,7 +67,7 @@ func (r *TablespaceReconciler) SetupWithManager(mgr ctrl.Manager) error { // GetCluster gets the managed cluster through the client func (r *TablespaceReconciler) GetCluster(ctx context.Context) (*apiv1.Cluster, error) { var cluster apiv1.Cluster - err := r.GetClient().Get(ctx, + err := r.client.Get(ctx, types.NamespacedName{ Namespace: r.instance.GetNamespaceName(), Name: r.instance.GetClusterName(), @@ -66,13 +79,3 @@ func (r *TablespaceReconciler) GetCluster(ctx context.Context) (*apiv1.Cluster, return &cluster, nil } - -// GetClient returns the dynamic client that is being used for a certain reconciler -func (r *TablespaceReconciler) GetClient() client.Client { - return r.client -} - -// Instance returns the PostgreSQL instance that this reconciler is working on -func (r *TablespaceReconciler) Instance() *postgres.Instance { - return r.instance -} diff --git a/internal/management/controller/tablespaces/reconciler.go b/internal/management/controller/tablespaces/reconciler.go index 2c1e70d79f..031ea0bfeb 100644 --- a/internal/management/controller/tablespaces/reconciler.go +++ b/internal/management/controller/tablespaces/reconciler.go @@ -18,6 +18,7 @@ package tablespaces import ( "context" + "database/sql" "fmt" "time" @@ -96,9 +97,7 @@ func (r *TablespaceReconciler) reconcile( return nil, fmt.Errorf("while reconcile tablespaces: %w", err) } - tbsManager := infrastructure.NewPostgresTablespaceManager(superUserDB) - tbsStorageManager := instanceTablespaceStorageManager{} - tbsInDatabase, err := tbsManager.List(ctx) + tbsInDatabase, err := infrastructure.List(ctx, superUserDB) if err != nil { return nil, fmt.Errorf("could not fetch tablespaces from database: %w", err) } @@ -106,15 +105,14 @@ func (r *TablespaceReconciler) reconcile( steps := evaluateNextSteps(ctx, tbsInDatabase, cluster.Spec.Tablespaces) result := r.applySteps( ctx, - tbsManager, - tbsStorageManager, + superUserDB, steps, ) // update the cluster status updatedCluster := cluster.DeepCopy() updatedCluster.Status.TablespacesStatus = result - if err := r.GetClient().Status().Patch(ctx, updatedCluster, client.MergeFrom(cluster)); err != nil { + if err := r.client.Status().Patch(ctx, updatedCluster, client.MergeFrom(cluster)); err != nil { return nil, fmt.Errorf("while setting the tablespace reconciler status: %w", err) } @@ -132,14 +130,13 @@ func (r *TablespaceReconciler) reconcile( // if they arose when applying the steps func (r *TablespaceReconciler) applySteps( ctx context.Context, - tbsManager infrastructure.TablespaceManager, - tbsStorageManager tablespaceStorageManager, + db *sql.DB, actions []tablespaceReconcilerStep, ) []apiv1.TablespaceState { result := make([]apiv1.TablespaceState, len(actions)) for idx, step := range actions { - result[idx] = step.execute(ctx, tbsManager, tbsStorageManager) + result[idx] = step.execute(ctx, db, r.storageManager) } return result diff --git a/internal/management/controller/tablespaces/storage.go b/internal/management/controller/tablespaces/storage.go index c9984305aa..d8ca019dff 100644 --- a/internal/management/controller/tablespaces/storage.go +++ b/internal/management/controller/tablespaces/storage.go @@ -22,6 +22,8 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" ) +// tablespaceStorageManager represents the required behavior in terms of storage +// for the tablespace reconciler type tablespaceStorageManager interface { getStorageLocation(tbsName string) string storageExists(tbsName string) (bool, error) diff --git a/pkg/management/postgres/readiness/readiness.go b/pkg/management/postgres/readiness/readiness.go index fc2d0f748b..ed1edb90dc 100644 --- a/pkg/management/postgres/readiness/readiness.go +++ b/pkg/management/postgres/readiness/readiness.go @@ -18,23 +18,28 @@ package readiness import ( "context" + "database/sql" "errors" - - "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" ) // ErrStreamingReplicaNotConnected is raised for streaming replicas that never connected to its primary var ErrStreamingReplicaNotConnected = errors.New("streaming replica was never connected to the primary node") +// instanceInterface represents the required behavior for use in the readiness probe +type instanceInterface interface { + CanCheckReadiness() bool + GetSuperUserDB() (*sql.DB, error) +} + // Data is the readiness checker structure type Data struct { - instance *postgres.Instance + instance instanceInterface streamingReplicaValidated bool } // ForInstance creates a readiness checker for a certain instance -func ForInstance(instance *postgres.Instance) *Data { +func ForInstance(instance instanceInterface) *Data { return &Data{ instance: instance, } From c556f597345d386cf5cb8e6a0f4a68b162e6f4ad Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Mon, 18 Nov 2024 15:44:28 +0100 Subject: [PATCH 152/836] chore(perf): Use controller-runtime pprof server (#6000) The controller-runtime provides a pprof server that we can just enable, this use that capability and remove the pprof server we used to have. Closes #5999 Signed-off-by: Jonathan Gonzalez V. Signed-off-by: Armando Ruocco Co-authored-by: Armando Ruocco --- config/manager/env_override.yaml | 4 ++ internal/cmd/manager/controller/controller.go | 46 ++----------------- 2 files changed, 9 insertions(+), 41 deletions(-) diff --git a/config/manager/env_override.yaml b/config/manager/env_override.yaml index 867e3b7f83..633bff2e15 100644 --- a/config/manager/env_override.yaml +++ b/config/manager/env_override.yaml @@ -20,3 +20,7 @@ spec: - --webhook-port=9443 - --log-level=debug - --pprof-server=true + ports: + - containerPort: 6060 + name: pprof + protocol: TCP diff --git a/internal/cmd/manager/controller/controller.go b/internal/cmd/manager/controller/controller.go index b61f967844..fe8028545d 100644 --- a/internal/cmd/manager/controller/controller.go +++ b/internal/cmd/manager/controller/controller.go @@ -19,10 +19,8 @@ package controller import ( "context" - "errors" "fmt" "net/http" - "net/http/pprof" "time" "github.com/cloudnative-pg/machinery/pkg/log" @@ -41,7 +39,6 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/internal/controller" schemeBuilder "github.com/cloudnative-pg/cloudnative-pg/internal/scheme" "github.com/cloudnative-pg/cloudnative-pg/pkg/certs" - "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver" "github.com/cloudnative-pg/cloudnative-pg/pkg/multicache" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" @@ -101,15 +98,10 @@ func RunController( conf *configuration.Data, ) error { ctx := context.Background() - setupLog.Info("Starting CloudNativePG Operator", "version", versions.Version, "build", versions.Info) - if pprofDebug { - startPprofDebugServer(ctx) - } - managerOptions := ctrl.Options{ Scheme: scheme, Metrics: server.Options{ @@ -123,6 +115,7 @@ func RunController( Port: port, CertDir: defaultWebhookCertDir, }), + PprofBindAddress: getPprofServerAddress(pprofDebug), // LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily // when the Manager ends. This requires the binary to immediately end when the // Manager is stopped, otherwise, this setting is unsafe. Setting this significantly @@ -464,39 +457,10 @@ func readSecret( return data, nil } -// startPprofDebugServer exposes pprof debug server if the pprof-server env variable is set to true -func startPprofDebugServer(ctx context.Context) { - mux := http.NewServeMux() - mux.HandleFunc("/debug/pprof/", pprof.Index) - mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) - mux.HandleFunc("/debug/pprof/profile", pprof.Profile) - mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) - mux.HandleFunc("/debug/pprof/trace", pprof.Trace) - - pprofServer := http.Server{ - Addr: "0.0.0.0:6060", - Handler: mux, - ReadTimeout: webserver.DefaultReadTimeout, - ReadHeaderTimeout: webserver.DefaultReadHeaderTimeout, +func getPprofServerAddress(enabled bool) string { + if enabled { + return "0.0.0.0:6060" } - setupLog.Info("Starting pprof HTTP server", "addr", pprofServer.Addr) - - go func() { - go func() { - <-ctx.Done() - - setupLog.Info("shutting down pprof HTTP server") - ctx, cancelFunc := context.WithTimeout(context.Background(), 5*time.Second) - defer cancelFunc() - - if err := pprofServer.Shutdown(ctx); err != nil { - setupLog.Error(err, "Failed to shutdown pprof HTTP server") - } - }() - - if err := pprofServer.ListenAndServe(); !errors.Is(err, http.ErrServerClosed) { - setupLog.Error(err, "Failed to start pprof HTTP server") - } - }() + return "" } From 8714eaa8070d135f203f9fb0676d246deafc2721 Mon Sep 17 00:00:00 2001 From: Sander Rodenhuis <53382213+srodenhuis@users.noreply.github.com> Date: Wed, 20 Nov 2024 14:03:25 +0100 Subject: [PATCH 153/836] docs: add Akamai Technologies as an adopter (#6140) Add Akamai Technologies as an adopter. Akamai used cloudnative-pg for all managed databases in the Akamai App Platform (for LKE). See: https://github.com/linode/apl-core Signed-off-by: Sander Rodenhuis --- ADOPTERS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/ADOPTERS.md b/ADOPTERS.md index c2038c9480..03a9c9841d 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -56,3 +56,4 @@ This list is sorted in chronological order, based on the submission date. | [Cambium](https://www.cambium.earth) | @Mmoncadaisla | 2024-09-25 | Cambium leverages CloudNativePG at its core to analyze and visualize geospatial data for carbon market applications, ranging from site selection to monitoring, reporting, and verification. | | [MIND Informatica srl](https://mind-informatica.com) | @simonerocchi | 2024-09-25 | We use CloudNativePG to run PostgreSQL clusters for our web applications. | | [Walkbase](https://walkbase.com/) | @LinAnt | 2024-10-24 | CloudNativePG currently manages all our Postgres instances on Kubernetes via GitOps. | +| [Akamai Technologies](https://www.akamai.com/) | @srodenhuis | 2024-11-20 | CloudNativePG is used in the [Akamai App PLatform](https://github.com/linode/apl-core) for all platform managed PostgreSQL databases. | From 2b0dc71181362b1a81a5b6e08c789fd00bec7f8f Mon Sep 17 00:00:00 2001 From: Casper Nielsen Date: Wed, 20 Nov 2024 14:06:44 +0100 Subject: [PATCH 154/836] docs: add Novo Nordisk as a CloudNativePG adopter (#6142) We've been running CloudNativePG clusters as described for the last 9-10 months and are extremely happy with the operator. Glad to show our support this way! Signed-off-by: Casper Nielsen --- ADOPTERS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/ADOPTERS.md b/ADOPTERS.md index 03a9c9841d..7ce2afc859 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -57,3 +57,4 @@ This list is sorted in chronological order, based on the submission date. | [MIND Informatica srl](https://mind-informatica.com) | @simonerocchi | 2024-09-25 | We use CloudNativePG to run PostgreSQL clusters for our web applications. | | [Walkbase](https://walkbase.com/) | @LinAnt | 2024-10-24 | CloudNativePG currently manages all our Postgres instances on Kubernetes via GitOps. | | [Akamai Technologies](https://www.akamai.com/) | @srodenhuis | 2024-11-20 | CloudNativePG is used in the [Akamai App PLatform](https://github.com/linode/apl-core) for all platform managed PostgreSQL databases. | +| [Novo Nordisk](https://www.novonordisk.com/) | [scni@novonordisk.com](mailto:scni@novonordisk.com) ([@CasperGN](https://github.com/CasperGN)) | 2024-11-24 | Backing of Grafana UI states for central Observability platform and datastore for our Developer Portal based off Backstage. | From 72e2f5c490fd17346f1f9ff539589a8d9f01690c Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Thu, 21 Nov 2024 11:01:44 +0100 Subject: [PATCH 155/836] feat(plugin): allow "plugin" backup method in kubectl cnpg backup (#6045) This patch adds the following features to The `kubectl cnpg backup` subcommand: * add the `plugin` backup method. * add a `plugin-name` option that allows the user to specify the plugin that should manage the backup. * add a new `plugin-parameters` option that allows the user to specify a set of parameters to be passed to the plugin while taking a backup of a cluster. Signed-off-by: Leonardo Cecchi Signed-off-by: Armando Ruocco Signed-off-by: wolfox Co-authored-by: Armando Ruocco Co-authored-by: wolfox --- internal/cmd/plugin/backup/cmd.go | 55 ++++++++++++++++--- internal/cmd/plugin/backup/parameters.go | 53 ++++++++++++++++++ internal/cmd/plugin/backup/parameters_test.go | 53 ++++++++++++++++++ internal/cmd/plugin/backup/suite_test.go | 30 ++++++++++ 4 files changed, 183 insertions(+), 8 deletions(-) create mode 100644 internal/cmd/plugin/backup/parameters.go create mode 100644 internal/cmd/plugin/backup/parameters_test.go create mode 100644 internal/cmd/plugin/backup/suite_test.go diff --git a/internal/cmd/plugin/backup/cmd.go b/internal/cmd/plugin/backup/cmd.go index 6c29147f7f..17aceabe31 100644 --- a/internal/cmd/plugin/backup/cmd.go +++ b/internal/cmd/plugin/backup/cmd.go @@ -21,6 +21,7 @@ import ( "fmt" "slices" "strconv" + "strings" "time" pgTime "github.com/cloudnative-pg/machinery/pkg/postgres/time" @@ -44,6 +45,8 @@ type backupCommandOptions struct { online *bool immediateCheckpoint *bool waitForArchive *bool + pluginName string + pluginParameters pluginParameters } func (options backupCommandOptions) getOnlineConfiguration() *apiv1.OnlineConfiguration { @@ -59,7 +62,14 @@ func (options backupCommandOptions) getOnlineConfiguration() *apiv1.OnlineConfig // NewCmd creates the new "backup" subcommand func NewCmd() *cobra.Command { - var backupName, backupTarget, backupMethod, online, immediateCheckpoint, waitForArchive string + var backupName, backupTarget, backupMethod, online, immediateCheckpoint, waitForArchive, pluginName string + var pluginParameters pluginParameters + + backupMethods := []string{ + string(apiv1.BackupMethodBarmanObjectStore), + string(apiv1.BackupMethodVolumeSnapshot), + string(apiv1.BackupMethodPlugin), + } backupSubcommand := &cobra.Command{ Use: "backup [cluster]", @@ -91,15 +101,24 @@ func NewCmd() *cobra.Command { } // Check if the backup method is correct - allowedBackupMethods := []string{ - "", - string(apiv1.BackupMethodBarmanObjectStore), - string(apiv1.BackupMethodVolumeSnapshot), - } + allowedBackupMethods := backupMethods + allowedBackupMethods = append(allowedBackupMethods, "") if !slices.Contains(allowedBackupMethods, backupMethod) { return fmt.Errorf("backup-method: %s is not supported by the backup command", backupMethod) } + if backupMethod != string(apiv1.BackupMethodPlugin) { + if len(pluginName) > 0 { + return fmt.Errorf("plugin-name is allowed only when backup method in %s", + apiv1.BackupMethodPlugin) + } + + if len(pluginParameters) > 0 { + return fmt.Errorf("plugin-parameters is allowed only when backup method in %s", + apiv1.BackupMethodPlugin) + } + } + var cluster apiv1.Cluster // check if the cluster exists err := plugin.Client.Get( @@ -137,6 +156,8 @@ func NewCmd() *cobra.Command { online: parsedOnline, immediateCheckpoint: parsedImmediateCheckpoint, waitForArchive: parsedWaitForArchive, + pluginName: pluginName, + pluginParameters: pluginParameters, }) }, } @@ -161,8 +182,8 @@ func NewCmd() *cobra.Command { "method", "m", "", - "If present, will override the backup method defined in backup resource, "+ - "valid values are volumeSnapshot and barmanObjectStore.", + fmt.Sprintf("If present, will override the backup method defined in backup resource, "+ + "valid values are: %s.", strings.Join(backupMethods, ", ")), ) const optionalAcceptedValues = "Optional. Accepted values: true|false|\"\"." @@ -188,6 +209,17 @@ func NewCmd() *cobra.Command { optionalAcceptedValues, ) + backupSubcommand.Flags().StringVar(&pluginName, "plugin-name", "", + "The name of the plugin that should take the backup. This option "+ + "is allowed only when the backup method is set to 'plugin'", + ) + + backupSubcommand.Flags().VarP(&pluginParameters, "plugin-parameters", "", + "The set of plugin parameters that should be passed to the backup plugin "+ + " i.e. param-one=value,param-two=value. This option "+ + "is allowed only when the backup method is set to 'plugin'", + ) + return backupSubcommand } @@ -210,6 +242,13 @@ func createBackup(ctx context.Context, options backupCommandOptions) error { } utils.LabelClusterName(&backup.ObjectMeta, options.clusterName) + if len(options.pluginName) > 0 { + backup.Spec.PluginConfiguration = &apiv1.BackupPluginConfiguration{ + Name: options.pluginName, + Parameters: options.pluginParameters, + } + } + err := plugin.Client.Create(ctx, &backup) if err == nil { fmt.Printf("backup/%v created\n", backup.Name) diff --git a/internal/cmd/plugin/backup/parameters.go b/internal/cmd/plugin/backup/parameters.go new file mode 100644 index 0000000000..126031a0e3 --- /dev/null +++ b/internal/cmd/plugin/backup/parameters.go @@ -0,0 +1,53 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backup + +import ( + "strings" + + "github.com/cloudnative-pg/machinery/pkg/stringset" +) + +// pluginParameters is a set of parameters to be passed +// to the plugin when taking a backup +type pluginParameters map[string]string + +// String implements the pflag.Value interface +func (e pluginParameters) String() string { + return strings.Join(stringset.FromKeys(e).ToList(), ",") +} + +// Type implements the pflag.Value interface +func (e pluginParameters) Type() string { + return "map[string]string" +} + +// Set implements the pflag.Value interface +func (e *pluginParameters) Set(val string) error { + entries := strings.Split(val, ",") + result := make(map[string]string, len(entries)) + for _, entry := range entries { + if len(entry) == 0 { + continue + } + + before, after, _ := strings.Cut(entry, "=") + result[before] = after + } + *e = result + return nil +} diff --git a/internal/cmd/plugin/backup/parameters_test.go b/internal/cmd/plugin/backup/parameters_test.go new file mode 100644 index 0000000000..4b4e44cc22 --- /dev/null +++ b/internal/cmd/plugin/backup/parameters_test.go @@ -0,0 +1,53 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backup + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("plugin parameters parsing", func() { + DescribeTable( + "plugin parameters and values table", + func(value string, expectedParams pluginParameters) { + var params pluginParameters + Expect(params.Set(value)).ToNot(HaveOccurred()) + Expect(params).To(HaveLen(len(expectedParams))) + for k, v := range expectedParams { + Expect(params).To(HaveKeyWithValue(k, v)) + } + }, + Entry("empty value", "", nil), + Entry("singleton", "a=b", map[string]string{ + "a": "b", + }), + Entry("singleton without value", "a", map[string]string{ + "a": "", + }), + Entry("set", "a=b,c=d", map[string]string{ + "a": "b", + "c": "d", + }), + Entry("set with elements without value", "a=b,c,d=,e=f", map[string]string{ + "a": "b", + "c": "", + "d": "", + "e": "f", + }), + ) +}) diff --git a/internal/cmd/plugin/backup/suite_test.go b/internal/cmd/plugin/backup/suite_test.go new file mode 100644 index 0000000000..2dd5a10241 --- /dev/null +++ b/internal/cmd/plugin/backup/suite_test.go @@ -0,0 +1,30 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backup + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestCerts(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "CNPG Backup subcommand tests") +} From 07008a8ca57518039c1a5ee0e84c8d76787fb849 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Fri, 22 Nov 2024 14:32:42 +0100 Subject: [PATCH 156/836] perf(wal-archive): remove typed client from cmd execution (#6066) Performance improvement: the `wal-archive` command in the instance manager now uses the local webserver `controller-runtime` client instead of creating a new one. Closes #5366 Signed-off-by: Armando Ruocco Signed-off-by: Marco Nenciarini Co-authored-by: Marco Nenciarini --- internal/cmd/manager/walarchive/cmd.go | 42 ++------- pkg/management/postgres/webserver/local.go | 93 +++++++++++++++---- .../postgres/webserver/local_client.go | 63 +++++++++++++ pkg/management/url/url.go | 3 + 4 files changed, 151 insertions(+), 50 deletions(-) create mode 100644 pkg/management/postgres/webserver/local_client.go diff --git a/internal/cmd/manager/walarchive/cmd.go b/internal/cmd/manager/walarchive/cmd.go index 7ff6adf958..fef861040f 100644 --- a/internal/cmd/manager/walarchive/cmd.go +++ b/internal/cmd/manager/walarchive/cmd.go @@ -31,7 +31,6 @@ import ( "github.com/cloudnative-pg/machinery/pkg/log" "github.com/cloudnative-pg/machinery/pkg/stringset" "github.com/spf13/cobra" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" pluginClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client" @@ -39,9 +38,8 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache" cacheClient "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache/client" - "github.com/cloudnative-pg/cloudnative-pg/pkg/conditions" - "github.com/cloudnative-pg/cloudnative-pg/pkg/management" pgManagement "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) @@ -70,49 +68,27 @@ func NewCmd() *cobra.Command { return err } - typedClient, err := management.NewControllerRuntimeClient() - if err != nil { - contextLog.Error(err, "creating controller-runtine client") - return err - } - - cluster, err := cacheClient.GetCluster() - if err != nil { - return fmt.Errorf("failed to get cluster: %w", err) + cluster, errCluster := cacheClient.GetCluster() + if errCluster != nil { + return fmt.Errorf("failed to get cluster: %w", errCluster) } - err = run(ctx, podName, pgData, cluster, args) - if err != nil { + if err := run(ctx, podName, pgData, cluster, args); err != nil { if errors.Is(err, errSwitchoverInProgress) { contextLog.Warning("Refusing to archive WALs until the switchover is not completed", "err", err) } else { contextLog.Error(err, logErrorMessage) } - - condition := metav1.Condition{ - Type: string(apiv1.ConditionContinuousArchiving), - Status: metav1.ConditionFalse, - Reason: string(apiv1.ConditionReasonContinuousArchivingFailing), - Message: err.Error(), - } - if errCond := conditions.Patch(ctx, typedClient, cluster, &condition); errCond != nil { - contextLog.Error(errCond, "Error changing wal archiving condition (wal archiving failed)") + if reqErr := webserver.NewLocalClient().SetWALArchiveStatusCondition(ctx, err.Error()); err != nil { + contextLog.Error(reqErr, "while invoking the set wal archive condition endpoint") } return err } - // Update the condition if needed. - condition := metav1.Condition{ - Type: string(apiv1.ConditionContinuousArchiving), - Status: metav1.ConditionTrue, - Reason: string(apiv1.ConditionReasonContinuousArchivingSuccess), - Message: "Continuous archiving is working", - } - if errCond := conditions.Patch(ctx, typedClient, cluster, &condition); errCond != nil { - contextLog.Error(errCond, "Error changing wal archiving condition (wal archiving succeeded)") + if err := webserver.NewLocalClient().SetWALArchiveStatusCondition(ctx, ""); err != nil { + contextLog.Error(err, "while invoking the set wal archive condition endpoint") } - return nil }, } diff --git a/pkg/management/postgres/webserver/local.go b/pkg/management/postgres/webserver/local.go index 7981576c9f..0d15f851ca 100644 --- a/pkg/management/postgres/webserver/local.go +++ b/pkg/management/postgres/webserver/local.go @@ -27,11 +27,13 @@ import ( "github.com/cloudnative-pg/machinery/pkg/log" apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache" + "github.com/cloudnative-pg/cloudnative-pg/pkg/conditions" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/url" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" @@ -58,6 +60,7 @@ func NewLocalWebServer( serveMux := http.NewServeMux() serveMux.HandleFunc(url.PathCache, endpoints.serveCache) serveMux.HandleFunc(url.PathPgBackup, endpoints.requestBackup) + serveMux.HandleFunc(url.PathWALArchiveStatusCondition, endpoints.setWALArchiveStatusCondition) server := &http.Server{ Addr: fmt.Sprintf("localhost:%d", url.LocalPort), @@ -80,15 +83,7 @@ func (ws *localWebserverEndpoints) serveCache(w http.ResponseWriter, r *http.Req var js []byte switch requestedObject { case cache.ClusterKey: - var cluster apiv1.Cluster - err := ws.typedClient.Get( - r.Context(), - client.ObjectKey{ - Name: ws.instance.GetClusterName(), - Namespace: ws.instance.GetNamespaceName(), - }, - &cluster, - ) + cluster, err := ws.getCluster(r.Context()) if apierrs.IsNotFound(err) { w.WriteHeader(http.StatusNotFound) return @@ -98,7 +93,7 @@ func (ws *localWebserverEndpoints) serveCache(w http.ResponseWriter, r *http.Req return } - js, err = json.Marshal(&cluster) + js, err = json.Marshal(cluster) if err != nil { log.Error(err, "while marshalling the cluster") w.WriteHeader(http.StatusInternalServerError) @@ -133,7 +128,6 @@ func (ws *localWebserverEndpoints) serveCache(w http.ResponseWriter, r *http.Req // This function schedule a backup func (ws *localWebserverEndpoints) requestBackup(w http.ResponseWriter, r *http.Request) { - var cluster apiv1.Cluster var backup apiv1.Backup ctx := context.Background() @@ -144,10 +138,8 @@ func (ws *localWebserverEndpoints) requestBackup(w http.ResponseWriter, r *http. return } - if err := ws.typedClient.Get(ctx, client.ObjectKey{ - Namespace: ws.instance.GetNamespaceName(), - Name: ws.instance.GetClusterName(), - }, &cluster); err != nil { + cluster, err := ws.getCluster(ctx) + if err != nil { http.Error( w, fmt.Sprintf("error while getting cluster: %v", err.Error()), @@ -173,7 +165,7 @@ func (ws *localWebserverEndpoints) requestBackup(w http.ResponseWriter, r *http. return } - if err := ws.startBarmanBackup(ctx, &cluster, &backup); err != nil { + if err := ws.startBarmanBackup(ctx, cluster, &backup); err != nil { http.Error( w, fmt.Sprintf("error while requesting backup: %v", err.Error()), @@ -188,7 +180,7 @@ func (ws *localWebserverEndpoints) requestBackup(w http.ResponseWriter, r *http. return } - ws.startPluginBackup(ctx, &cluster, &backup) + ws.startPluginBackup(ctx, cluster, &backup) _, _ = fmt.Fprint(w, "OK") default: @@ -199,6 +191,17 @@ func (ws *localWebserverEndpoints) requestBackup(w http.ResponseWriter, r *http. } } +func (ws *localWebserverEndpoints) getCluster(ctx context.Context) (*apiv1.Cluster, error) { + var cluster apiv1.Cluster + if err := ws.typedClient.Get(ctx, client.ObjectKey{ + Namespace: ws.instance.GetNamespaceName(), + Name: ws.instance.GetClusterName(), + }, &cluster); err != nil { + return nil, err + } + return &cluster, nil +} + func (ws *localWebserverEndpoints) startBarmanBackup( ctx context.Context, cluster *apiv1.Cluster, @@ -236,3 +239,59 @@ func (ws *localWebserverEndpoints) startPluginBackup( ctx = context.WithValue(ctx, utils.GRPCTimeoutKey, 100*time.Minute) NewPluginBackupCommand(cluster, backup, ws.typedClient, ws.eventRecorder).Start(ctx) } + +// ArchiveStatusRequest is the request body for the archive status endpoint +type ArchiveStatusRequest struct { + Error string `json:"error,omitempty"` +} + +func (asr *ArchiveStatusRequest) getContinuousArchivingCondition() *metav1.Condition { + if asr.Error != "" { + return &metav1.Condition{ + Type: string(apiv1.ConditionContinuousArchiving), + Status: metav1.ConditionFalse, + Reason: string(apiv1.ConditionReasonContinuousArchivingFailing), + Message: asr.Error, + } + } + + return &metav1.Condition{ + Type: string(apiv1.ConditionContinuousArchiving), + Status: metav1.ConditionTrue, + Reason: string(apiv1.ConditionReasonContinuousArchivingSuccess), + Message: "Continuous archiving is working", + } +} + +func (ws *localWebserverEndpoints) setWALArchiveStatusCondition(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + contextLogger := log.FromContext(ctx) + // decode body req + var asr ArchiveStatusRequest + if err := json.NewDecoder(r.Body).Decode(&asr); err != nil { + contextLogger.Error(err, "error while decoding request") + http.Error(w, fmt.Sprintf("error while decoding request: %v", err.Error()), http.StatusBadRequest) + return + } + + cluster, err := ws.getCluster(ctx) + if err != nil { + http.Error( + w, + fmt.Sprintf("error while getting cluster: %v", err.Error()), + http.StatusInternalServerError) + return + } + + if errCond := conditions.Patch(ctx, ws.typedClient, cluster, asr.getContinuousArchivingCondition()); errCond != nil { + contextLogger.Error(errCond, "Error changing wal archiving condition", + "condition", asr.getContinuousArchivingCondition()) + http.Error( + w, + fmt.Sprintf("error while updating wal archiving condition: %v", errCond.Error()), + http.StatusInternalServerError) + return + } + + _, _ = fmt.Fprint(w, "OK") +} diff --git a/pkg/management/postgres/webserver/local_client.go b/pkg/management/postgres/webserver/local_client.go new file mode 100644 index 0000000000..4f2ec068f5 --- /dev/null +++ b/pkg/management/postgres/webserver/local_client.go @@ -0,0 +1,63 @@ +package webserver + +import ( + "bytes" + "context" + "encoding/json" + "net/http" + "time" + + "github.com/cloudnative-pg/machinery/pkg/log" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/url" + "github.com/cloudnative-pg/cloudnative-pg/pkg/resources" +) + +// LocalClient is an entity capable of interacting with the local webserver endpoints +type LocalClient interface { + // SetWALArchiveStatusCondition sets the wal-archive status condition. + // An empty errMessage means that the archive process was successful. + // Returns any error encountered during the request. + SetWALArchiveStatusCondition(ctx context.Context, errMessage string) error +} + +type localClient struct { + cli *http.Client +} + +// NewLocalClient returns a new instance of LocalClient +func NewLocalClient() LocalClient { + const connectionTimeout = 2 * time.Second + const requestTimeout = 30 * time.Second + + return &localClient{cli: resources.NewHTTPClient(connectionTimeout, requestTimeout)} +} + +func (c *localClient) SetWALArchiveStatusCondition(ctx context.Context, errMessage string) error { + contextLogger := log.FromContext(ctx).WithValues("endpoint", url.PathWALArchiveStatusCondition) + + asr := ArchiveStatusRequest{ + Error: errMessage, + } + + encoded, err := json.Marshal(&asr) + if err != nil { + return err + } + + resp, err := http.Post( + url.Local(url.PathWALArchiveStatusCondition, url.LocalPort), + "application/json", + bytes.NewBuffer(encoded), + ) + if err != nil { + return err + } + defer func() { + if errClose := resp.Body.Close(); errClose != nil { + contextLogger.Error(err, "while closing response body") + } + }() + + return nil +} diff --git a/pkg/management/url/url.go b/pkg/management/url/url.go index b72e89d9d1..31e3b4cead 100644 --- a/pkg/management/url/url.go +++ b/pkg/management/url/url.go @@ -43,6 +43,9 @@ const ( // PathPgStatus is the URL path for PostgreSQL Status PathPgStatus string = "/pg/status" + // PathWALArchiveStatusCondition is the URL path for setting the wal-archive condition on the Cluster object + PathWALArchiveStatusCondition string = "/cluster/status/condition/wal/archive" + // PathPgBackup is the URL path for PostgreSQL Backup PathPgBackup string = "/pg/backup" From a03c9ffea127a2c96fd6b343e36fae54d60b8e33 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Fri, 22 Nov 2024 16:42:38 +0100 Subject: [PATCH 157/836] fix: avoid injecting plugin environment while restoring a backup (#6144) Signed-off-by: Leonardo Cecchi --- go.mod | 2 +- go.sum | 4 ++-- pkg/management/postgres/restore.go | 14 +++++++++++++- 3 files changed, 16 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 14918aad86..187b8fd06e 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/cheynewallace/tabby v1.1.1 github.com/cloudnative-pg/barman-cloud v0.0.0-20241105055149-ae6c2408bd14 github.com/cloudnative-pg/cnpg-i v0.0.0-20241105133936-c704f46c20e0 - github.com/cloudnative-pg/machinery v0.0.0-20241030141148-670a0f16f836 + github.com/cloudnative-pg/machinery v0.0.0-20241120125931-257ab8d1e6a2 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/evanphx/json-patch/v5 v5.9.0 github.com/go-logr/logr v1.4.2 diff --git a/go.sum b/go.sum index d9cc553609..8c152fd74c 100644 --- a/go.sum +++ b/go.sum @@ -22,8 +22,8 @@ github.com/cloudnative-pg/barman-cloud v0.0.0-20241105055149-ae6c2408bd14 h1:HX5 github.com/cloudnative-pg/barman-cloud v0.0.0-20241105055149-ae6c2408bd14/go.mod h1:HPGwXHlatQEnb2HdsbGTZLEo8qlxKLdxTHiTeF9TTqw= github.com/cloudnative-pg/cnpg-i v0.0.0-20241105133936-c704f46c20e0 h1:8mrkOCJTFnhbG5j9qS7ZKXHvWek6Tp6rwyVXXQiN4JA= github.com/cloudnative-pg/cnpg-i v0.0.0-20241105133936-c704f46c20e0/go.mod h1:fAU7ySVzjpt/RZntxWZiWJCjaBJayzIxEnd0NuO7oQc= -github.com/cloudnative-pg/machinery v0.0.0-20241030141148-670a0f16f836 h1:Hhg+I2QcaPNN5XaSsYb7Xw3PbQlvCA9eDY+SvVf902Q= -github.com/cloudnative-pg/machinery v0.0.0-20241030141148-670a0f16f836/go.mod h1:+mUFdys1IX+qwQUrV+/i56Tey/mYh8ZzWZYttwivRns= +github.com/cloudnative-pg/machinery v0.0.0-20241120125931-257ab8d1e6a2 h1:Je4vgmwTN9JIyWzQ4utFw3eQ3eP5sah/d7aS+U9bUhU= +github.com/cloudnative-pg/machinery v0.0.0-20241120125931-257ab8d1e6a2/go.mod h1:uBHGRIk5rt07mO4zjIC1uvGBWTH6PqIiD1PfpvPGZKU= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= diff --git a/pkg/management/postgres/restore.go b/pkg/management/postgres/restore.go index 1e43cb6996..dc72d03ee7 100644 --- a/pkg/management/postgres/restore.go +++ b/pkg/management/postgres/restore.go @@ -38,6 +38,7 @@ import ( barmanCredentials "github.com/cloudnative-pg/barman-cloud/pkg/credentials" barmanRestorer "github.com/cloudnative-pg/barman-cloud/pkg/restorer" restore "github.com/cloudnative-pg/cnpg-i/pkg/restore/job" + "github.com/cloudnative-pg/machinery/pkg/envmap" "github.com/cloudnative-pg/machinery/pkg/execlog" "github.com/cloudnative-pg/machinery/pkg/fileutils" "github.com/cloudnative-pg/machinery/pkg/log" @@ -267,7 +268,18 @@ func (info InitInfo) Restore(ctx context.Context) error { if res == nil { return errors.New("empty response from restoreViaPlugin, programmatic error") } - envs = res.Envs + + processEnvironment, err := envmap.ParseEnviron() + if err != nil { + return fmt.Errorf("error while parsing the process environment: %w", err) + } + + pluginEnvironment, err := envmap.Parse(res.Envs) + if err != nil { + return fmt.Errorf("error while parsing the plugin environment: %w", err) + } + + envs = envmap.Merge(processEnvironment, pluginEnvironment).StringSlice() config = res.RestoreConfig } else { // Before starting the restore we check if the archive destination is safe to use From 333d561ff5e4680337f2a07d879370cf9b4bf4c6 Mon Sep 17 00:00:00 2001 From: Jaime Silvela Date: Fri, 22 Nov 2024 17:07:16 +0100 Subject: [PATCH 158/836] docs: complete the database feature documentation (#5647) Closes #5587 Signed-off-by: Jaime Silvela Signed-off-by: Gabriele Bartolini Signed-off-by: Marco Nenciarini Co-authored-by: Gabriele Bartolini Co-authored-by: Marco Nenciarini --- .wordlist-en-custom.txt | 1 + api/v1/cluster_webhook.go | 2 +- docs/mkdocs.yml | 1 + docs/src/declarative_database_management.md | 205 +++++++++++++++--- docs/src/declarative_role_management.md | 2 +- docs/src/index.md | 1 + docs/src/operator_capability_levels.md | 12 +- .../cmd/manager/instance/pgbasebackup/cmd.go | 2 +- tests/e2e/asserts_test.go | 2 +- 9 files changed, 195 insertions(+), 33 deletions(-) diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index 51fefca5de..9bc6770ca5 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -1128,6 +1128,7 @@ scheduledbackupspec scheduledbackupstatus schedulerName schemaOnly +schemas sdk searchAttribute searchFilter diff --git a/api/v1/cluster_webhook.go b/api/v1/cluster_webhook.go index f3f8ee9d63..0d6da62c95 100644 --- a/api/v1/cluster_webhook.go +++ b/api/v1/cluster_webhook.go @@ -102,7 +102,7 @@ func (r *Cluster) setDefaults(preserveUserSettings bool) { r.Spec.Bootstrap = &BootstrapConfiguration{} } - // Defaulting initDB if no other boostrap method was passed + // Defaulting initDB if no other bootstrap method was passed switch { case r.Spec.Bootstrap.Recovery != nil: r.defaultRecovery() diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 471250cf7d..b9808c650d 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -39,6 +39,7 @@ nav: - service_management.md - postgresql_conf.md - declarative_role_management.md + - declarative_database_management.md - tablespaces.md - operator_conf.md - cluster_conf.md diff --git a/docs/src/declarative_database_management.md b/docs/src/declarative_database_management.md index b740292660..52e9b2c76e 100644 --- a/docs/src/declarative_database_management.md +++ b/docs/src/declarative_database_management.md @@ -1,21 +1,38 @@ -# Declarative Database Management +# PostgreSQL Database Management -Declarative database management enables users to control the lifecycle of -databases via a new Custom Resource Definition (CRD) called `Database`. +CloudNativePG simplifies PostgreSQL database provisioning by automatically +creating an application database named `app` by default. This default behavior +is explained in the ["Bootstrap an Empty Cluster"](bootstrap.md#bootstrap-an-empty-cluster-initdb) +section. -A `Database` object is managed by the instance manager of the cluster's -primary instance. This feature is not supported in replica clusters, -as replica clusters lack a primary instance to manage the `Database` object. +For more advanced use cases, CloudNativePG introduces **declarative database +management**, which empowers users to define and control the lifecycle of +PostgreSQL databases using the `Database` Custom Resource Definition (CRD). +This method seamlessly integrates with Kubernetes, providing a scalable, +automated, and consistent approach to managing PostgreSQL databases. -### Example: Simple Database Declaration +--- -Below is an example of a basic `Database` configuration: +## Key Concepts + +### Scope of Management + +!!! Important + CloudNativePG manages **global objects** in PostgreSQL clusters, such as + databases, roles, and tablespaces. However, it does **not** manage the content + of databases (e.g., schemas and tables). For database content, specialized + tools or the applications themselves should be used. + +### Declarative `Database` Manifest + +The following example demonstrates how a `Database` resource interacts with a +`Cluster`: ```yaml apiVersion: postgresql.cnpg.io/v1 kind: Database metadata: - name: db-one + name: cluster-example-one spec: name: one owner: app @@ -23,32 +40,91 @@ spec: name: cluster-example ``` -Once the reconciliation cycle is completed successfully, the `Database` -status will show a `applied` field set to `true` and an empty `message` field. +When applied, this manifest creates a `Database` object called +`cluster-example-one` requesting a database named `one`, owned by the `app` +role, in the `cluster-example` PostgreSQL cluster. + +!!! Info + Please refer to the [API reference](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-DatabaseSpec) + the full list of attributes you can define for each `Database` object. + +### Required Fields in the `Database` Manifest + +- `metadata.name`: Unique name of the Kubernetes object within its namespace. +- `spec.name`: Name of the database as it will appear in PostgreSQL. +- `spec.owner`: PostgreSQL role that owns the database. +- `spec.cluster.name`: Name of the target PostgreSQL cluster. + +The `Database` object must reference a specific `Cluster`, determining where +the database will be created. It is managed by the cluster's primary instance, +ensuring the database is created or updated as needed. -### Database Deletion and Reclaim Policies +!!! Info + The distinction between `metadata.name` and `spec.name` allows multiple + `Database` resources to reference databases with the same name across different + CloudNativePG clusters in the same Kubernetes namespace. -A finalizer named `cnpg.io/deleteDatabase` is automatically added -to each `Database` object to control its deletion process. +## Reserved Database Names -By default, the `databaseReclaimPolicy` is set to `retain`, which means -that if the `Database` object is deleted, the actual PostgreSQL database -is retained for manual management by an administrator. +PostgreSQL automatically creates databases such as `postgres`, `template0`, and +`template1`. These names are reserved and cannot be used for new `Database` +objects in CloudNativePG. -Alternatively, if the `databaseReclaimPolicy` is set to `delete`, -the PostgreSQL database will be automatically deleted when the `Database` -object is removed. +!!! Important + Creating a `Database` with `spec.name` set to `postgres`, `template0`, or + `template1` is not allowed. -### Example: Database with Delete Reclaim Policy +## Reconciliation and Status -The following example illustrates a `Database` object with a `delete` -reclaim policy: +Once a `Database` object is reconciled successfully: + +- `status.applied` will be set to `true`. +- `status.observedGeneration` will match the `metadata.generation` of the last + applied configuration. + +Example of a reconciled `Database` object: ```yaml apiVersion: postgresql.cnpg.io/v1 kind: Database metadata: - name: db-one-with-delete-reclaim-policy + generation: 1 + name: cluster-example-one +spec: + cluster: + name: cluster-example + name: one + owner: app +status: + observedGeneration: 1 + applied: true +``` + +If an error occurs during reconciliation, `status.applied` will be `false`, and +an error message will be included in the `status.message` field. + +## Deleting a Database + +CloudNativePG supports two methods for database deletion: + +1. Using the `delete` reclaim policy +2. Declaratively setting the database's `ensure` field to `absent` + +### Deleting via `delete` Reclaim Policy + +The `databaseReclaimPolicy` field determines the behavior when a `Database` +object is deleted: + +- `retain` (default): The database remains in PostgreSQL for manual management. +- `delete`: The database is automatically removed from PostgreSQL. + +Example: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Database +metadata: + name: cluster-example-two spec: databaseReclaimPolicy: delete name: two @@ -57,4 +133,83 @@ spec: name: cluster-example ``` -In this case, when the `Database` object is deleted, the corresponding PostgreSQL database will also be removed automatically. +Deleting this `Database` object will automatically remove the `two` database +from the `cluster-example` cluster. + +### Declaratively Setting `ensure: absent` + +To remove a database, set the `ensure` field to `absent` like in the following +example:. + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Database +metadata: + name: cluster-example-database-to-drop +spec: + cluster: + name: cluster-example + name: database-to-drop + owner: app + ensure: absent +``` + +This manifest ensures that the `database-to-drop` database is removed from the +`cluster-example` cluster. + +## Limitations and Caveats + +### Renaming a database + +While CloudNativePG adheres to PostgreSQL’s +[CREATE DATABASE](https://www.postgresql.org/docs/current/sql-createdatabase.html) and +[ALTER DATABASE](https://www.postgresql.org/docs/current/sql-alterdatabase.html) +commands, **renaming databases is not supported**. +Attempting to modify `spec.name` in an existing `Database` object will result +in rejection by Kubernetes. + +### Creating vs. Altering a Database + +- For new databases, CloudNativePG uses the `CREATE DATABASE` statement. +- For existing databases, `ALTER DATABASE` is used to apply changes. + +It is important to note that there are some differences between these two +Postgres commands: in particular, the options accepted by `ALTER` are a subset +of those accepted by `CREATE`. + +!!! Warning + Some fields, such as encoding and collation settings, are immutable in + PostgreSQL. Attempts to modify these fields on existing databases will be + ignored. + +### Replica Clusters + +Database objects declared on replica clusters cannot be enforced, as replicas +lack write privileges. These objects will remain in a pending state until the +replica is promoted. + +### Conflict Resolution + +If two `Database` objects in the same namespace manage the same PostgreSQL +database (i.e., identical `spec.name` and `spec.cluster.name`), the second +object will be rejected. + +Example status message: + +```yaml +status: + applied: false + message: 'reconciliation error: database "one" is already managed by Database object "cluster-example-one"' +``` + +### Postgres Version Differences + +CloudNativePG adheres to PostgreSQL's capabilities. For example, features like +`ICU_RULES` introduced in PostgreSQL 16 are unavailable in earlier versions. +Errors from PostgreSQL will be reflected in the `Database` object's `status`. + +### Manual Changes + +CloudNativePG does not overwrite manual changes to databases. Once reconciled, +a `Database` object will not be reapplied unless its `metadata.generation` +changes, giving flexibility for direct PostgreSQL modifications. diff --git a/docs/src/declarative_role_management.md b/docs/src/declarative_role_management.md index 04b328c977..2c0c109cbc 100644 --- a/docs/src/declarative_role_management.md +++ b/docs/src/declarative_role_management.md @@ -1,4 +1,4 @@ -# Database Role Management +# PostgreSQL Role Management From its inception, CloudNativePG has managed the creation of specific roles required in PostgreSQL instances: diff --git a/docs/src/index.md b/docs/src/index.md index 641c074d2f..815dc0af85 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -87,6 +87,7 @@ Additionally, the community provides images for the [PostGIS extension](postgis. Postgres extensions through the cluster `spec`: `pgaudit`, `auto_explain`, `pg_stat_statements`, and `pg_failover_slots` * Declarative management of Postgres roles, users and groups +* Declarative management of Postgres databases * Support for Local Persistent Volumes with PVC templates * Reuse of Persistent Volumes storage in Pods * Separate volumes for WAL files and tablespaces diff --git a/docs/src/operator_capability_levels.md b/docs/src/operator_capability_levels.md index ce7f31e8c0..3ef8b0f5d8 100644 --- a/docs/src/operator_capability_levels.md +++ b/docs/src/operator_capability_levels.md @@ -136,14 +136,18 @@ PostgreSQL outside Kubernetes. This is particularly useful for DBaaS purposes. ### Database configuration -The operator is designed to manage a PostgreSQL cluster with a single -database. The operator transparently manages access to the database through -three Kubernetes services provisioned and managed for read-write, +The operator is designed to bootstrap a PostgreSQL cluster with a single +database. The operator transparently manages network access to the cluster +through three Kubernetes services provisioned and managed for read-write, read, and read-only workloads. Using the convention-over-configuration approach, the operator creates a database called `app`, by default owned by a regular Postgres user with the same name. You can specify both the database name and the user name, if -required. +required, as part of the bootstrap. + +Additional databases can be created or managed via +[declarative database management](declarative_database_management.md) using +the `Database` CRD. Although no configuration is required to run the cluster, you can customize both PostgreSQL runtime configuration and PostgreSQL host-based diff --git a/internal/cmd/manager/instance/pgbasebackup/cmd.go b/internal/cmd/manager/instance/pgbasebackup/cmd.go index 3f9d9719e1..57a92cd83c 100644 --- a/internal/cmd/manager/instance/pgbasebackup/cmd.go +++ b/internal/cmd/manager/instance/pgbasebackup/cmd.go @@ -77,7 +77,7 @@ func NewCmd() *cobra.Command { } if err = env.bootstrapUsingPgbasebackup(ctx); err != nil { - contextLogger.Error(err, "Unable to boostrap cluster") + contextLogger.Error(err, "Unable to bootstrap cluster") } return err }, diff --git a/tests/e2e/asserts_test.go b/tests/e2e/asserts_test.go index 685630691f..37c0c0276d 100644 --- a/tests/e2e/asserts_test.go +++ b/tests/e2e/asserts_test.go @@ -975,7 +975,7 @@ func AssertReplicaModeCluster( // AssertDetachReplicaModeCluster verifies that a replica cluster can be detached from the // source cluster, and its target primary can be promoted. As such, new write operation // on the source cluster shouldn't be received anymore by the detached replica cluster. -// Also, make sure the boostrap fields database and owner of the replica cluster are +// Also, make sure the bootstrap fields database and owner of the replica cluster are // properly ignored func AssertDetachReplicaModeCluster( namespace, From fda635b550f3c05d81326b9ee1d9eb64f66b8954 Mon Sep 17 00:00:00 2001 From: Peggie Date: Sat, 23 Nov 2024 19:37:14 +0100 Subject: [PATCH 159/836] feat: Public Cloud K8S versions update (#6129) Update the versions used to test the operator on public cloud providers Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: public-cloud-k8s-versions-check --- .github/eks_versions.json | 3 +-- .github/kind_versions.json | 8 ++++---- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/.github/eks_versions.json b/.github/eks_versions.json index 537d9f2b64..3121122733 100644 --- a/.github/eks_versions.json +++ b/.github/eks_versions.json @@ -2,6 +2,5 @@ "1.31", "1.30", "1.29", - "1.28", - "1.27" + "1.28" ] diff --git a/.github/kind_versions.json b/.github/kind_versions.json index d9bd1a1215..85547c7125 100644 --- a/.github/kind_versions.json +++ b/.github/kind_versions.json @@ -1,7 +1,7 @@ [ - "v1.31.1", - "v1.30.4", - "v1.29.8", - "v1.28.13", + "v1.31.2", + "v1.30.6", + "v1.29.10", + "v1.28.15", "v1.27.16" ] From 5e80915aff117dc8e92b2914ac3097b1c4fc090d Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sat, 23 Nov 2024 20:08:13 +0100 Subject: [PATCH 160/836] feat: update default PostgreSQL version to 17.2 (#6018) Update default PostgreSQL version from 17.0 to 17.2 Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Signed-off-by: Jonathan Gonzalez V. Co-authored-by: postgres-versions-updater --- .github/pg_versions.json | 24 +++++++++++----------- docs/src/bootstrap.md | 10 ++++----- docs/src/declarative_hibernation.md | 2 +- docs/src/image_catalog.md | 4 ++-- docs/src/kubectl-plugin.md | 4 ++-- docs/src/monitoring.md | 2 +- docs/src/postgis.md | 2 +- docs/src/samples/cluster-example-full.yaml | 2 +- docs/src/scheduling.md | 2 +- docs/src/ssl_connections.md | 2 +- docs/src/troubleshooting.md | 4 ++-- pkg/versions/versions.go | 2 +- 12 files changed, 30 insertions(+), 30 deletions(-) diff --git a/.github/pg_versions.json b/.github/pg_versions.json index fa6d5ae4ed..119882aec3 100644 --- a/.github/pg_versions.json +++ b/.github/pg_versions.json @@ -1,26 +1,26 @@ { "17": [ - "17.0", - "17.0-27" + "17.2", + "17.1" ], "16": [ - "16.4", - "16.3" + "16.6", + "16.5" ], "15": [ - "15.8", - "15.7" + "15.10", + "15.9" ], "14": [ - "14.13", - "14.12" + "14.15", + "14.14" ], "13": [ - "13.16", - "13.15" + "13.18", + "13.17" ], "12": [ - "12.20", - "12.19" + "12.22", + "12.21" ] } \ No newline at end of file diff --git a/docs/src/bootstrap.md b/docs/src/bootstrap.md index 87525b4679..6aff83a8a6 100644 --- a/docs/src/bootstrap.md +++ b/docs/src/bootstrap.md @@ -519,7 +519,7 @@ file on the source PostgreSQL instance: host replication streaming_replica all md5 ``` -The following manifest creates a new PostgreSQL 17.0 cluster, +The following manifest creates a new PostgreSQL 17.2 cluster, called `target-db`, using the `pg_basebackup` bootstrap method to clone an external PostgreSQL cluster defined as `source-db` (in the `externalClusters` array). As you can see, the `source-db` @@ -534,7 +534,7 @@ metadata: name: target-db spec: instances: 3 - imageName: ghcr.io/cloudnative-pg/postgresql:17.0 + imageName: ghcr.io/cloudnative-pg/postgresql:17.2 bootstrap: pg_basebackup: @@ -554,7 +554,7 @@ spec: ``` All the requirements must be met for the clone operation to work, including -the same PostgreSQL version (in our case 17.0). +the same PostgreSQL version (in our case 17.2). #### TLS certificate authentication @@ -569,7 +569,7 @@ in the same Kubernetes cluster. This example can be easily adapted to cover an instance that resides outside the Kubernetes cluster. -The manifest defines a new PostgreSQL 17.0 cluster called `cluster-clone-tls`, +The manifest defines a new PostgreSQL 17.2 cluster called `cluster-clone-tls`, which is bootstrapped using the `pg_basebackup` method from the `cluster-example` external cluster. The host is identified by the read/write service in the same cluster, while the `streaming_replica` user is authenticated @@ -584,7 +584,7 @@ metadata: name: cluster-clone-tls spec: instances: 3 - imageName: ghcr.io/cloudnative-pg/postgresql:17.0 + imageName: ghcr.io/cloudnative-pg/postgresql:17.2 bootstrap: pg_basebackup: diff --git a/docs/src/declarative_hibernation.md b/docs/src/declarative_hibernation.md index 1b7a64f7af..4df6e3403d 100644 --- a/docs/src/declarative_hibernation.md +++ b/docs/src/declarative_hibernation.md @@ -58,7 +58,7 @@ $ kubectl cnpg status Cluster Summary Name: cluster-example Namespace: default -PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:17.0 +PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:17.2 Primary instance: cluster-example-2 Status: Cluster in healthy state Instances: 3 diff --git a/docs/src/image_catalog.md b/docs/src/image_catalog.md index a84890a480..6078124fa6 100644 --- a/docs/src/image_catalog.md +++ b/docs/src/image_catalog.md @@ -32,7 +32,7 @@ spec: - major: 15 image: ghcr.io/cloudnative-pg/postgresql:15.6 - major: 16 - image: ghcr.io/cloudnative-pg/postgresql:17.0 + image: ghcr.io/cloudnative-pg/postgresql:17.2 ``` **Example of a Cluster-Wide Catalog using `ClusterImageCatalog` Resource:** @@ -47,7 +47,7 @@ spec: - major: 15 image: ghcr.io/cloudnative-pg/postgresql:15.6 - major: 16 - image: ghcr.io/cloudnative-pg/postgresql:17.0 + image: ghcr.io/cloudnative-pg/postgresql:17.2 ``` A `Cluster` resource has the flexibility to reference either an `ImageCatalog` diff --git a/docs/src/kubectl-plugin.md b/docs/src/kubectl-plugin.md index 793fd706ff..e066bbc082 100755 --- a/docs/src/kubectl-plugin.md +++ b/docs/src/kubectl-plugin.md @@ -1006,7 +1006,7 @@ it from the actual pod. This means that you will be using the `postgres` user. ```console $ kubectl cnpg psql cluster-example -psql (17.0 (Debian 17.0-1.pgdg110+1)) +psql (17.2 (Debian 17.2-1.pgdg110+1)) Type "help" for help. postgres=# @@ -1018,7 +1018,7 @@ select to work against a replica by using the `--replica` option: ```console $ kubectl cnpg psql --replica cluster-example -psql (17.0 (Debian 17.0-1.pgdg110+1)) +psql (17.2 (Debian 17.2-1.pgdg110+1)) Type "help" for help. diff --git a/docs/src/monitoring.md b/docs/src/monitoring.md index 7814949e06..3fa83cb2f6 100644 --- a/docs/src/monitoring.md +++ b/docs/src/monitoring.md @@ -217,7 +217,7 @@ cnpg_collector_up{cluster="cluster-example"} 1 # HELP cnpg_collector_postgres_version Postgres version # TYPE cnpg_collector_postgres_version gauge -cnpg_collector_postgres_version{cluster="cluster-example",full="17.0"} 17.0 +cnpg_collector_postgres_version{cluster="cluster-example",full="17.2"} 17.2 # HELP cnpg_collector_last_failed_backup_timestamp The last failed backup as a unix timestamp # TYPE cnpg_collector_last_failed_backup_timestamp gauge diff --git a/docs/src/postgis.md b/docs/src/postgis.md index a31fb607c7..cd139ac5d7 100644 --- a/docs/src/postgis.md +++ b/docs/src/postgis.md @@ -100,7 +100,7 @@ values from the ones in this document): ```console $ kubectl exec -ti postgis-example-1 -- psql app Defaulted container "postgres" out of: postgres, bootstrap-controller (init) -psql (17.0 (Debian 17.0-1.pgdg110+1)) +psql (17.2 (Debian 17.2-1.pgdg110+1)) Type "help" for help. app=# SELECT * FROM pg_available_extensions WHERE name ~ '^postgis' ORDER BY 1; diff --git a/docs/src/samples/cluster-example-full.yaml b/docs/src/samples/cluster-example-full.yaml index f0fa1fe10e..321e94a2fe 100644 --- a/docs/src/samples/cluster-example-full.yaml +++ b/docs/src/samples/cluster-example-full.yaml @@ -35,7 +35,7 @@ metadata: name: cluster-example-full spec: description: "Example of cluster" - imageName: ghcr.io/cloudnative-pg/postgresql:17.0 + imageName: ghcr.io/cloudnative-pg/postgresql:17.2 # imagePullSecret is only required if the images are located in a private registry # imagePullSecrets: # - name: private_registry_access diff --git a/docs/src/scheduling.md b/docs/src/scheduling.md index 57eb71b69b..a681f412a6 100644 --- a/docs/src/scheduling.md +++ b/docs/src/scheduling.md @@ -40,7 +40,7 @@ metadata: name: cluster-example spec: instances: 3 - imageName: ghcr.io/cloudnative-pg/postgresql:17.0 + imageName: ghcr.io/cloudnative-pg/postgresql:17.2 affinity: enablePodAntiAffinity: true # Default value diff --git a/docs/src/ssl_connections.md b/docs/src/ssl_connections.md index b11ea1a620..3762ab95d8 100644 --- a/docs/src/ssl_connections.md +++ b/docs/src/ssl_connections.md @@ -173,7 +173,7 @@ Output: version -------------------------------------------------------------------------------------- ------------------ -PostgreSQL 17.0 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 8.3.1 20191121 (Red Hat +PostgreSQL 17.2 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 8.3.1 20191121 (Red Hat 8.3.1-5), 64-bit (1 row) ``` diff --git a/docs/src/troubleshooting.md b/docs/src/troubleshooting.md index fc6f59c39f..aa67c8be17 100644 --- a/docs/src/troubleshooting.md +++ b/docs/src/troubleshooting.md @@ -220,7 +220,7 @@ Cluster in healthy state Name: cluster-example Namespace: default System ID: 7044925089871458324 -PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:17.0-3 +PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:17.2-3 Primary instance: cluster-example-1 Instances: 3 Ready instances: 3 @@ -288,7 +288,7 @@ kubectl describe cluster -n | grep "Image Name" Output: ```shell - Image Name: ghcr.io/cloudnative-pg/postgresql:17.0-3 + Image Name: ghcr.io/cloudnative-pg/postgresql:17.2-3 ``` !!! Note diff --git a/pkg/versions/versions.go b/pkg/versions/versions.go index 9493ffe2a3..6189bdad1f 100644 --- a/pkg/versions/versions.go +++ b/pkg/versions/versions.go @@ -23,7 +23,7 @@ const ( Version = "1.24.1" // DefaultImageName is the default image used by the operator to create pods - DefaultImageName = "ghcr.io/cloudnative-pg/postgresql:17.0" + DefaultImageName = "ghcr.io/cloudnative-pg/postgresql:17.2" // DefaultOperatorImageName is the default operator image used by the controller in the pods running PostgreSQL DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.24.1" From c390c1dccce7ab8b5b817624f42dc3198db2128e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gr=C3=A9goire=20Bellon-Gervais?= Date: Sun, 24 Nov 2024 10:01:38 +0100 Subject: [PATCH 161/836] docs: add Docaposte to ADOPTERS.md (#6145) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Docaposte is the digital trust leader in France. We use CloudNativePG because it is the most elegant and efficient solution for running PostgreSQL in production. Signed-off-by: Grégoire Bellon-Gervais --- ADOPTERS.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ADOPTERS.md b/ADOPTERS.md index 7ce2afc859..44eb525966 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -57,4 +57,5 @@ This list is sorted in chronological order, based on the submission date. | [MIND Informatica srl](https://mind-informatica.com) | @simonerocchi | 2024-09-25 | We use CloudNativePG to run PostgreSQL clusters for our web applications. | | [Walkbase](https://walkbase.com/) | @LinAnt | 2024-10-24 | CloudNativePG currently manages all our Postgres instances on Kubernetes via GitOps. | | [Akamai Technologies](https://www.akamai.com/) | @srodenhuis | 2024-11-20 | CloudNativePG is used in the [Akamai App PLatform](https://github.com/linode/apl-core) for all platform managed PostgreSQL databases. | -| [Novo Nordisk](https://www.novonordisk.com/) | [scni@novonordisk.com](mailto:scni@novonordisk.com) ([@CasperGN](https://github.com/CasperGN)) | 2024-11-24 | Backing of Grafana UI states for central Observability platform and datastore for our Developer Portal based off Backstage. | +| [Novo Nordisk](https://www.novonordisk.com/) | [scni@novonordisk.com](mailto:scni@novonordisk.com) ([@CasperGN](https://github.com/CasperGN)) | 2024-11-20 | Backing of Grafana UI states for central Observability platform and datastore for our Developer Portal based off Backstage. | +| [Docaposte](https://docaposte.fr) | @albundy83 | 2024-11-20 | Docaposte is the digital trust leader in France. We use CloudNativePG because it is the most elegant and efficient solution for running PostgreSQL in production. | From 9eb3cbde1e62c489edb474a952f41a6bd9add8d2 Mon Sep 17 00:00:00 2001 From: Klavs Klavsen Date: Mon, 25 Nov 2024 14:53:23 +0100 Subject: [PATCH 162/836] docs: add Obmondo to `ADOPTERS.md` (#6162) At Obmondo we use CloudNativePG in our open-source Kubernetes meta-management platform called KubeAid to easily manage all PostgreSQL databases across clusters from a centralized interface. Signed-off-by: Klavs Klavsen --- ADOPTERS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/ADOPTERS.md b/ADOPTERS.md index 44eb525966..4a4601f02f 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -59,3 +59,4 @@ This list is sorted in chronological order, based on the submission date. | [Akamai Technologies](https://www.akamai.com/) | @srodenhuis | 2024-11-20 | CloudNativePG is used in the [Akamai App PLatform](https://github.com/linode/apl-core) for all platform managed PostgreSQL databases. | | [Novo Nordisk](https://www.novonordisk.com/) | [scni@novonordisk.com](mailto:scni@novonordisk.com) ([@CasperGN](https://github.com/CasperGN)) | 2024-11-20 | Backing of Grafana UI states for central Observability platform and datastore for our Developer Portal based off Backstage. | | [Docaposte](https://docaposte.fr) | @albundy83 | 2024-11-20 | Docaposte is the digital trust leader in France. We use CloudNativePG because it is the most elegant and efficient solution for running PostgreSQL in production. | +| [Obmondo](https://obmondo.com) | @Obmondo | 2024-11-25 | At Obmondo we use CloudNativePG in our open-source Kubernetes meta-management platform called [KubeAid](https://kubeaid.io/) to easily manage all PostgreSQL databases across clusters from a centralized interface. | From a3b0fdb67384215fa2bdd727cbc391e15481e957 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Tue, 26 Nov 2024 19:04:56 +0100 Subject: [PATCH 163/836] fix: ensure former primary WALs are flushed before resyncing (#6141) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch fixes an issue where the former primary node may fail to archive its WAL files before resynchronizing as a replica after a switchover event. This gap in the WAL stream could prevent point-in-time recovery (PITR) to positions between the current primary node’s promotion and the next backup. The fix ensures the former primary flushes and archives all pending WAL files before rejoining the cluster, maintaining a complete and consistent WAL stream. Closes #5959 Signed-off-by: Armando Ruocco Signed-off-by: Leonardo Cecchi Co-authored-by: Leonardo Cecchi --- go.mod | 2 +- go.sum | 4 +- internal/cmd/manager/walarchive/cmd.go | 214 +----------- .../controller/instance_controller.go | 2 +- .../management/controller/instance_startup.go | 6 + pkg/management/postgres/archiver/archiver.go | 325 ++++++++++++++++++ pkg/management/postgres/archiver/doc.go | 18 + .../postgres/constants/constants.go | 5 + pkg/management/postgres/initdb.go | 9 +- pkg/management/postgres/restore.go | 2 +- 10 files changed, 362 insertions(+), 225 deletions(-) create mode 100644 pkg/management/postgres/archiver/archiver.go create mode 100644 pkg/management/postgres/archiver/doc.go diff --git a/go.mod b/go.mod index 187b8fd06e..2920230af2 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/cheynewallace/tabby v1.1.1 github.com/cloudnative-pg/barman-cloud v0.0.0-20241105055149-ae6c2408bd14 github.com/cloudnative-pg/cnpg-i v0.0.0-20241105133936-c704f46c20e0 - github.com/cloudnative-pg/machinery v0.0.0-20241120125931-257ab8d1e6a2 + github.com/cloudnative-pg/machinery v0.0.0-20241122084004-33b997fc6c61 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/evanphx/json-patch/v5 v5.9.0 github.com/go-logr/logr v1.4.2 diff --git a/go.sum b/go.sum index 8c152fd74c..e372961fb6 100644 --- a/go.sum +++ b/go.sum @@ -22,8 +22,8 @@ github.com/cloudnative-pg/barman-cloud v0.0.0-20241105055149-ae6c2408bd14 h1:HX5 github.com/cloudnative-pg/barman-cloud v0.0.0-20241105055149-ae6c2408bd14/go.mod h1:HPGwXHlatQEnb2HdsbGTZLEo8qlxKLdxTHiTeF9TTqw= github.com/cloudnative-pg/cnpg-i v0.0.0-20241105133936-c704f46c20e0 h1:8mrkOCJTFnhbG5j9qS7ZKXHvWek6Tp6rwyVXXQiN4JA= github.com/cloudnative-pg/cnpg-i v0.0.0-20241105133936-c704f46c20e0/go.mod h1:fAU7ySVzjpt/RZntxWZiWJCjaBJayzIxEnd0NuO7oQc= -github.com/cloudnative-pg/machinery v0.0.0-20241120125931-257ab8d1e6a2 h1:Je4vgmwTN9JIyWzQ4utFw3eQ3eP5sah/d7aS+U9bUhU= -github.com/cloudnative-pg/machinery v0.0.0-20241120125931-257ab8d1e6a2/go.mod h1:uBHGRIk5rt07mO4zjIC1uvGBWTH6PqIiD1PfpvPGZKU= +github.com/cloudnative-pg/machinery v0.0.0-20241122084004-33b997fc6c61 h1:KzazCP/OVbCPAkhhg9hLLNzLyAHcYzxA3U3wsyLDWbs= +github.com/cloudnative-pg/machinery v0.0.0-20241122084004-33b997fc6c61/go.mod h1:uBHGRIk5rt07mO4zjIC1uvGBWTH6PqIiD1PfpvPGZKU= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= diff --git a/internal/cmd/manager/walarchive/cmd.go b/internal/cmd/manager/walarchive/cmd.go index fef861040f..23756b5cdd 100644 --- a/internal/cmd/manager/walarchive/cmd.go +++ b/internal/cmd/manager/walarchive/cmd.go @@ -18,30 +18,16 @@ limitations under the License. package walarchive import ( - "context" "errors" "fmt" "os" - "path" - "path/filepath" - "time" - barmanArchiver "github.com/cloudnative-pg/barman-cloud/pkg/archiver" - "github.com/cloudnative-pg/machinery/pkg/fileutils" "github.com/cloudnative-pg/machinery/pkg/log" - "github.com/cloudnative-pg/machinery/pkg/stringset" "github.com/spf13/cobra" - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - pluginClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client" - "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository" - "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" - "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache" cacheClient "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache/client" - pgManagement "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/archiver" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver" - "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) // errSwitchoverInProgress is raised when there is a switchover in progress @@ -73,7 +59,7 @@ func NewCmd() *cobra.Command { return fmt.Errorf("failed to get cluster: %w", errCluster) } - if err := run(ctx, podName, pgData, cluster, args); err != nil { + if err := archiver.Run(ctx, podName, pgData, cluster, args[0]); err != nil { if errors.Is(err, errSwitchoverInProgress) { contextLog.Warning("Refusing to archive WALs until the switchover is not completed", "err", err) @@ -98,199 +84,3 @@ func NewCmd() *cobra.Command { return &cmd } - -func run( - ctx context.Context, - podName, pgData string, - cluster *apiv1.Cluster, - args []string, -) error { - startTime := time.Now() - contextLog := log.FromContext(ctx) - walName := args[0] - - if cluster.IsReplica() { - if podName != cluster.Status.CurrentPrimary && podName != cluster.Status.TargetPrimary { - contextLog.Debug("WAL archiving on a replica cluster, "+ - "but this node is not the target primary nor the current one. "+ - "Skipping WAL archiving", - "walName", walName, - "currentPrimary", cluster.Status.CurrentPrimary, - "targetPrimary", cluster.Status.TargetPrimary, - ) - return nil - } - } - - if cluster.Status.CurrentPrimary != podName { - contextLog.Info("Refusing to archive WAL when there is a switchover in progress", - "currentPrimary", cluster.Status.CurrentPrimary, - "targetPrimary", cluster.Status.TargetPrimary, - "podName", podName) - return errSwitchoverInProgress - } - - // Request the plugins to archive this WAL - if err := archiveWALViaPlugins(ctx, cluster, path.Join(pgData, walName)); err != nil { - return err - } - - // Request Barman Cloud to archive this WAL - if cluster.Spec.Backup == nil || cluster.Spec.Backup.BarmanObjectStore == nil { - // Backup not configured, skipping WAL - contextLog.Debug("Backup not configured, skip WAL archiving via Barman Cloud", - "walName", walName, - "currentPrimary", cluster.Status.CurrentPrimary, - "targetPrimary", cluster.Status.TargetPrimary, - ) - return nil - } - - // Get environment from cache - env, err := cacheClient.GetEnv(cache.WALArchiveKey) - if err != nil { - return fmt.Errorf("failed to get envs: %w", err) - } - - maxParallel := 1 - if cluster.Spec.Backup.BarmanObjectStore.Wal != nil { - maxParallel = cluster.Spec.Backup.BarmanObjectStore.Wal.MaxParallel - } - - // Create the archiver - var walArchiver *barmanArchiver.WALArchiver - if walArchiver, err = barmanArchiver.New( - ctx, - env, - postgres.SpoolDirectory, - pgData, - path.Join(pgData, pgManagement.CheckEmptyWalArchiveFile)); err != nil { - return fmt.Errorf("while creating the archiver: %w", err) - } - - // Step 1: Check if the archive location is safe to perform archiving - if utils.IsEmptyWalArchiveCheckEnabled(&cluster.ObjectMeta) { - if err := checkWalArchive(ctx, cluster, walArchiver, pgData); err != nil { - return err - } - } - - // Step 2: check if this WAL file has not been already archived - var isDeletedFromSpool bool - isDeletedFromSpool, err = walArchiver.DeleteFromSpool(walName) - if err != nil { - return fmt.Errorf("while testing the existence of the WAL file in the spool directory: %w", err) - } - if isDeletedFromSpool { - contextLog.Info("Archived WAL file (parallel)", - "walName", walName, - "currentPrimary", cluster.Status.CurrentPrimary, - "targetPrimary", cluster.Status.TargetPrimary) - return nil - } - - // Step 3: gather the WAL files names to archive - walFilesList := walArchiver.GatherWALFilesToArchive(ctx, walName, maxParallel) - - options, err := walArchiver.BarmanCloudWalArchiveOptions( - ctx, cluster.Spec.Backup.BarmanObjectStore, cluster.Name) - if err != nil { - return err - } - - // Step 5: archive the WAL files in parallel - uploadStartTime := time.Now() - walStatus := walArchiver.ArchiveList(ctx, walFilesList, options) - if len(walStatus) > 1 { - contextLog.Info("Completed archive command (parallel)", - "walsCount", len(walStatus), - "startTime", startTime, - "uploadStartTime", uploadStartTime, - "uploadTotalTime", time.Since(uploadStartTime), - "totalTime", time.Since(startTime)) - } - - // We return only the first error to PostgreSQL, because the first error - // is the one raised by the file that PostgreSQL has requested to archive. - // The other errors are related to WAL files that were pre-archived as - // a performance optimization and are just logged - return walStatus[0].Err -} - -// archiveWALViaPlugins requests every capable plugin to archive the passed -// WAL file, and returns an error if a configured plugin fails to do so. -// It will not return an error if there's no plugin capable of WAL archiving -func archiveWALViaPlugins( - ctx context.Context, - cluster *apiv1.Cluster, - walName string, -) error { - contextLogger := log.FromContext(ctx) - - plugins := repository.New() - availablePluginNames, err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir) - if err != nil { - contextLogger.Error(err, "Error while loading local plugins") - } - defer plugins.Close() - - availablePluginNamesSet := stringset.From(availablePluginNames) - enabledPluginNamesSet := stringset.From(cluster.Spec.Plugins.GetEnabledPluginNames()) - - client, err := pluginClient.WithPlugins( - ctx, - plugins, - availablePluginNamesSet.Intersect(enabledPluginNamesSet).ToList()..., - ) - if err != nil { - contextLogger.Error(err, "Error while loading required plugins") - return err - } - defer client.Close(ctx) - - return client.ArchiveWAL(ctx, cluster, walName) -} - -// isCheckWalArchiveFlagFilePresent returns true if the file CheckEmptyWalArchiveFile is present in the PGDATA directory -func isCheckWalArchiveFlagFilePresent(ctx context.Context, pgDataDirectory string) bool { - contextLogger := log.FromContext(ctx) - filePath := filepath.Join(pgDataDirectory, pgManagement.CheckEmptyWalArchiveFile) - - exists, err := fileutils.FileExists(filePath) - if err != nil { - contextLogger.Error(err, "error while checking for the existence of the CheckEmptyWalArchiveFile") - } - // If the check empty wal archive file doesn't exist this it's a no-op - if !exists { - contextLogger.Debug("WAL check flag file not found, skipping check") - return false - } - - return exists -} - -func checkWalArchive( - ctx context.Context, - cluster *apiv1.Cluster, - walArchiver *barmanArchiver.WALArchiver, - pgData string, -) error { - contextLogger := log.FromContext(ctx) - checkWalOptions, err := walArchiver.BarmanCloudCheckWalArchiveOptions( - ctx, cluster.Spec.Backup.BarmanObjectStore, cluster.Name) - if err != nil { - contextLogger.Error(err, "while getting barman-cloud-wal-archive options") - return err - } - - if !isCheckWalArchiveFlagFilePresent(ctx, pgData) { - return nil - } - - if err := walArchiver.CheckWalArchiveDestination(ctx, checkWalOptions); err != nil { - contextLogger.Error(err, "while barman-cloud-check-wal-archive") - return err - } - - return nil -} diff --git a/internal/management/controller/instance_controller.go b/internal/management/controller/instance_controller.go index 9a48a4c849..dda32b0920 100644 --- a/internal/management/controller/instance_controller.go +++ b/internal/management/controller/instance_controller.go @@ -1009,7 +1009,7 @@ func (r *InstanceReconciler) reconcilePostgreSQLAutoConfFilePermissions(ctx cont // The file is created immediately after initdb and removed after the // first WAL is archived func (r *InstanceReconciler) reconcileCheckWalArchiveFile(cluster *apiv1.Cluster) error { - filePath := filepath.Join(r.instance.PgData, postgresManagement.CheckEmptyWalArchiveFile) + filePath := filepath.Join(r.instance.PgData, constants.CheckEmptyWalArchiveFile) for _, condition := range cluster.Status.Conditions { // If our current condition is archiving we can delete the file if condition.Type == string(apiv1.ConditionContinuousArchiving) && condition.Status == metav1.ConditionTrue { diff --git a/internal/management/controller/instance_startup.go b/internal/management/controller/instance_startup.go index caeeda9c55..ee81ed483c 100644 --- a/internal/management/controller/instance_startup.go +++ b/internal/management/controller/instance_startup.go @@ -32,6 +32,7 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/internal/controller" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/archiver" postgresSpec "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" ) @@ -256,6 +257,11 @@ func (r *InstanceReconciler) verifyPgDataCoherenceForPrimary(ctx context.Context err, "Error while changing mode of the postgresql.auto.conf file before pg_rewind, skipped") } + // We archive every WAL that have not been archived from the latest postmaster invocation. + if err := archiver.ArchiveAllReadyWALs(ctx, cluster, r.instance.PgData); err != nil { + return fmt.Errorf("while ensuring all WAL files are archived: %w", err) + } + // pg_rewind could require a clean shutdown of the old primary to // work. Unfortunately, if the old primary is already clean starting // it up may make it advance in respect to the new one. diff --git a/pkg/management/postgres/archiver/archiver.go b/pkg/management/postgres/archiver/archiver.go new file mode 100644 index 0000000000..251ca52f5a --- /dev/null +++ b/pkg/management/postgres/archiver/archiver.go @@ -0,0 +1,325 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package archiver + +import ( + "context" + "errors" + "fmt" + "math" + "path" + "path/filepath" + "time" + + barmanArchiver "github.com/cloudnative-pg/barman-cloud/pkg/archiver" + "github.com/cloudnative-pg/machinery/pkg/fileutils" + walUtils "github.com/cloudnative-pg/machinery/pkg/fileutils/wals" + "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/stringset" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + pluginClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client" + "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository" + "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" + "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache" + cacheClient "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache/client" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/constants" + "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" +) + +// errSwitchoverInProgress is raised when there is a switchover in progress +// and the new primary have not completed the promotion +var errSwitchoverInProgress = fmt.Errorf("switchover in progress, refusing archiving") + +// ArchiveAllReadyWALs ensures that all WAL files that are in the "ready" +// queue have been archived. +// This is used to ensure that a former primary will archive the WAL files in +// its queue even in case of an unclean shutdown. +func ArchiveAllReadyWALs( + ctx context.Context, + cluster *apiv1.Cluster, + pgData string, +) error { + contextLog := log.FromContext(ctx) + + noWALLeft := errors.New("no wal files to archive") + + iterator := func() error { + walList := walUtils.GatherReadyWALFiles( + ctx, walUtils.GatherReadyWALFilesConfig{ + MaxResults: math.MaxInt32 - 1, + PgDataPath: pgData, + }, + ) + + if len(walList.Ready) > 0 { + contextLog.Info( + "Detected ready WAL files in a former primary, triggering WAL archiving", + "readyWALCount", len(walList.Ready), + ) + contextLog.Debug( + "List of ready WALs", + "readyWALs", walList.Ready, + ) + } + + for _, wal := range walList.ReadyItemsToSlice() { + if err := internalRun(ctx, pgData, cluster, wal); err != nil { + return err + } + + if err := walList.MarkAsDone(ctx, wal); err != nil { + return err + } + } + + if !walList.HasMoreResults { + return noWALLeft + } + + return nil + } + + for { + if err := iterator(); err != nil { + if errors.Is(err, noWALLeft) { + return nil + } + return err + } + } +} + +// Run implements the WAL archiving process given the current cluster definition +// and the current Pod Name. +func Run( + ctx context.Context, + podName, pgData string, + cluster *apiv1.Cluster, + walName string, +) error { + contextLog := log.FromContext(ctx) + + if cluster.IsReplica() { + if podName != cluster.Status.CurrentPrimary && podName != cluster.Status.TargetPrimary { + contextLog.Debug("WAL archiving on a replica cluster, "+ + "but this node is not the target primary nor the current one. "+ + "Skipping WAL archiving", + "walName", walName, + "currentPrimary", cluster.Status.CurrentPrimary, + "targetPrimary", cluster.Status.TargetPrimary, + ) + return nil + } + } + + if cluster.Status.CurrentPrimary != podName { + contextLog.Info("Refusing to archive WAL when there is a switchover in progress", + "currentPrimary", cluster.Status.CurrentPrimary, + "targetPrimary", cluster.Status.TargetPrimary, + "podName", podName) + return errSwitchoverInProgress + } + + return internalRun(ctx, pgData, cluster, walName) +} + +func internalRun( + ctx context.Context, + pgData string, + cluster *apiv1.Cluster, + walName string, +) error { + contextLog := log.FromContext(ctx) + startTime := time.Now() + + // Request the plugins to archive this WAL + if err := archiveWALViaPlugins(ctx, cluster, path.Join(pgData, walName)); err != nil { + return err + } + + // Request Barman Cloud to archive this WAL + if cluster.Spec.Backup == nil || cluster.Spec.Backup.BarmanObjectStore == nil { + // Backup not configured, skipping WAL + contextLog.Debug("Backup not configured, skip WAL archiving via Barman Cloud", + "walName", walName, + "currentPrimary", cluster.Status.CurrentPrimary, + "targetPrimary", cluster.Status.TargetPrimary, + ) + return nil + } + + // Get environment from cache + env, err := cacheClient.GetEnv(cache.WALArchiveKey) + if err != nil { + return fmt.Errorf("failed to get envs: %w", err) + } + + maxParallel := 1 + if cluster.Spec.Backup.BarmanObjectStore.Wal != nil { + maxParallel = cluster.Spec.Backup.BarmanObjectStore.Wal.MaxParallel + } + + // Create the archiver + var walArchiver *barmanArchiver.WALArchiver + if walArchiver, err = barmanArchiver.New( + ctx, + env, + postgres.SpoolDirectory, + pgData, + path.Join(pgData, constants.CheckEmptyWalArchiveFile)); err != nil { + return fmt.Errorf("while creating the archiver: %w", err) + } + + // Step 1: Check if the archive location is safe to perform archiving + if utils.IsEmptyWalArchiveCheckEnabled(&cluster.ObjectMeta) { + if err := checkWalArchive(ctx, cluster, walArchiver, pgData); err != nil { + return err + } + } + + // Step 2: check if this WAL file has not been already archived + var isDeletedFromSpool bool + isDeletedFromSpool, err = walArchiver.DeleteFromSpool(walName) + if err != nil { + return fmt.Errorf("while testing the existence of the WAL file in the spool directory: %w", err) + } + if isDeletedFromSpool { + contextLog.Info("Archived WAL file (parallel)", + "walName", walName, + "currentPrimary", cluster.Status.CurrentPrimary, + "targetPrimary", cluster.Status.TargetPrimary) + return nil + } + + // Step 3: gather the WAL files names to archive + walFilesList := walUtils.GatherReadyWALFiles( + ctx, + walUtils.GatherReadyWALFilesConfig{ + MaxResults: maxParallel, + SkipWALs: []string{walName}, + PgDataPath: pgData, + }, + ) + + // Ensure the requested WAL file is always the first one being + // archived + walFilesList.Ready = append([]string{walName}, walFilesList.Ready...) + + options, err := walArchiver.BarmanCloudWalArchiveOptions( + ctx, cluster.Spec.Backup.BarmanObjectStore, cluster.Name) + if err != nil { + return err + } + + // Step 5: archive the WAL files in parallel + uploadStartTime := time.Now() + walStatus := walArchiver.ArchiveList(ctx, walFilesList.ReadyItemsToSlice(), options) + if len(walStatus) > 1 { + contextLog.Info("Completed archive command (parallel)", + "walsCount", len(walStatus), + "startTime", startTime, + "uploadStartTime", uploadStartTime, + "uploadTotalTime", time.Since(uploadStartTime), + "totalTime", time.Since(startTime)) + } + + // We return only the first error to PostgreSQL, because the first error + // is the one raised by the file that PostgreSQL has requested to archive. + // The other errors are related to WAL files that were pre-archived as + // a performance optimization and are just logged + return walStatus[0].Err +} + +// archiveWALViaPlugins requests every capable plugin to archive the passed +// WAL file, and returns an error if a configured plugin fails to do so. +// It will not return an error if there's no plugin capable of WAL archiving +func archiveWALViaPlugins( + ctx context.Context, + cluster *apiv1.Cluster, + walName string, +) error { + contextLogger := log.FromContext(ctx) + + plugins := repository.New() + availablePluginNames, err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir) + if err != nil { + contextLogger.Error(err, "Error while loading local plugins") + } + defer plugins.Close() + + availablePluginNamesSet := stringset.From(availablePluginNames) + enabledPluginNamesSet := stringset.From(cluster.Spec.Plugins.GetEnabledPluginNames()) + + client, err := pluginClient.WithPlugins( + ctx, + plugins, + availablePluginNamesSet.Intersect(enabledPluginNamesSet).ToList()..., + ) + if err != nil { + contextLogger.Error(err, "Error while loading required plugins") + return err + } + defer client.Close(ctx) + + return client.ArchiveWAL(ctx, cluster, walName) +} + +// isCheckWalArchiveFlagFilePresent returns true if the file CheckEmptyWalArchiveFile is present in the PGDATA directory +func isCheckWalArchiveFlagFilePresent(ctx context.Context, pgDataDirectory string) bool { + contextLogger := log.FromContext(ctx) + filePath := filepath.Join(pgDataDirectory, constants.CheckEmptyWalArchiveFile) + + exists, err := fileutils.FileExists(filePath) + if err != nil { + contextLogger.Error(err, "error while checking for the existence of the CheckEmptyWalArchiveFile") + } + // If the check empty wal archive file doesn't exist this it's a no-op + if !exists { + contextLogger.Debug("WAL check flag file not found, skipping check") + return false + } + + return exists +} + +func checkWalArchive( + ctx context.Context, + cluster *apiv1.Cluster, + walArchiver *barmanArchiver.WALArchiver, + pgData string, +) error { + contextLogger := log.FromContext(ctx) + checkWalOptions, err := walArchiver.BarmanCloudCheckWalArchiveOptions( + ctx, cluster.Spec.Backup.BarmanObjectStore, cluster.Name) + if err != nil { + contextLogger.Error(err, "while getting barman-cloud-wal-archive options") + return err + } + + if !isCheckWalArchiveFlagFilePresent(ctx, pgData) { + return nil + } + + if err := walArchiver.CheckWalArchiveDestination(ctx, checkWalOptions); err != nil { + contextLogger.Error(err, "while barman-cloud-check-wal-archive") + return err + } + + return nil +} diff --git a/pkg/management/postgres/archiver/doc.go b/pkg/management/postgres/archiver/doc.go new file mode 100644 index 0000000000..e29ef4fe9d --- /dev/null +++ b/pkg/management/postgres/archiver/doc.go @@ -0,0 +1,18 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package archiver contains the logic of the CloudNativePG WAL archiver +package archiver diff --git a/pkg/management/postgres/constants/constants.go b/pkg/management/postgres/constants/constants.go index 51d73ac1f7..5f26ed85f6 100644 --- a/pkg/management/postgres/constants/constants.go +++ b/pkg/management/postgres/constants/constants.go @@ -51,4 +51,9 @@ const ( // Startup is the name of a file that is created once during the first reconcile of an instance Startup = "cnpg_initialized" + + // CheckEmptyWalArchiveFile is the name of the file in the PGDATA that, + // if present, requires the WAL archiver to check that the backup object + // store is empty. + CheckEmptyWalArchiveFile = ".check-empty-wal-archive" ) diff --git a/pkg/management/postgres/initdb.go b/pkg/management/postgres/initdb.go index ee3b1d9ada..cc338d5201 100644 --- a/pkg/management/postgres/initdb.go +++ b/pkg/management/postgres/initdb.go @@ -47,13 +47,6 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/system" ) -const ( - // CheckEmptyWalArchiveFile is the name of the file in the PGDATA that, - // if present, requires the WAL archiver to check that the backup object - // store is empty. - CheckEmptyWalArchiveFile = ".check-empty-wal-archive" -) - // InitInfo contains all the info needed to bootstrap a new PostgreSQL instance type InitInfo struct { // The data directory where to generate the new cluster @@ -339,7 +332,7 @@ func (info InitInfo) ConfigureNewInstance(instance *Instance) error { return fmt.Errorf("could not execute post init application SQL refs: %w", err) } - filePath := filepath.Join(info.PgData, CheckEmptyWalArchiveFile) + filePath := filepath.Join(info.PgData, constants.CheckEmptyWalArchiveFile) // We create the check empty wal archive file to tell that we should check if the // destination path it is empty if err := fileutils.CreateEmptyFile(filepath.Clean(filePath)); err != nil { diff --git a/pkg/management/postgres/restore.go b/pkg/management/postgres/restore.go index dc72d03ee7..c1652107c8 100644 --- a/pkg/management/postgres/restore.go +++ b/pkg/management/postgres/restore.go @@ -1005,7 +1005,7 @@ func (info *InitInfo) checkBackupDestination( env, postgresSpec.SpoolDirectory, info.PgData, - path.Join(info.PgData, CheckEmptyWalArchiveFile)) + path.Join(info.PgData, constants.CheckEmptyWalArchiveFile)) if err != nil { return fmt.Errorf("while creating the archiver: %w", err) } From 9444ebc5e1863155009c2461173b45d3c766953e Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Tue, 26 Nov 2024 20:42:52 +0100 Subject: [PATCH 164/836] fix: remove spurious log line on walarchive failure (#6169) The implementation in #6066 introduced a regression where, in the event of a WAL archive failure, an additional incorrect error message was always logged: "Error while invoking the set WAL archive condition endpoint." This patch ensures the error messages are logged correctly without misleading additional errors. Signed-off-by: Marco Nenciarini --- internal/cmd/manager/walarchive/cmd.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/cmd/manager/walarchive/cmd.go b/internal/cmd/manager/walarchive/cmd.go index 23756b5cdd..2442f32715 100644 --- a/internal/cmd/manager/walarchive/cmd.go +++ b/internal/cmd/manager/walarchive/cmd.go @@ -66,7 +66,7 @@ func NewCmd() *cobra.Command { } else { contextLog.Error(err, logErrorMessage) } - if reqErr := webserver.NewLocalClient().SetWALArchiveStatusCondition(ctx, err.Error()); err != nil { + if reqErr := webserver.NewLocalClient().SetWALArchiveStatusCondition(ctx, err.Error()); reqErr != nil { contextLog.Error(reqErr, "while invoking the set wal archive condition endpoint") } return err From 965393641d75d1bda37eac83e62c1fc56becaaf2 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 27 Nov 2024 15:54:03 +0100 Subject: [PATCH 165/836] fix(deps): update all non-major go dependencies (main) (#6131) https://github.com/Masterminds/semver `v3.3.0` -> `v3.3.1` https://github.com/goreleaser/goreleaser `v2.4.5` -> `v2.4.8` https://github.com/onsi/ginkgo `v2.21.0` -> `v2.22.0` https://github.com/onsi/gomega `v1.35.1` -> `v1.36.0` Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- Makefile | 2 +- go.mod | 6 +++--- go.sum | 12 ++++++------ 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Makefile b/Makefile index b535b35bb4..170f87025f 100644 --- a/Makefile +++ b/Makefile @@ -43,7 +43,7 @@ BUILD_IMAGE ?= true POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions.go" | cut -f 2 -d \") KUSTOMIZE_VERSION ?= v5.5.0 CONTROLLER_TOOLS_VERSION ?= v0.16.5 -GORELEASER_VERSION ?= v2.4.5 +GORELEASER_VERSION ?= v2.4.8 SPELLCHECK_VERSION ?= 0.45.0 WOKE_VERSION ?= 0.19.0 OPERATOR_SDK_VERSION ?= v1.37.0 diff --git a/go.mod b/go.mod index 2920230af2..f276419c5c 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ toolchain go1.23.3 require ( github.com/DATA-DOG/go-sqlmock v1.5.2 - github.com/Masterminds/semver/v3 v3.3.0 + github.com/Masterminds/semver/v3 v3.3.1 github.com/avast/retry-go/v4 v4.6.0 github.com/blang/semver v3.5.1+incompatible github.com/cheynewallace/tabby v1.1.1 @@ -25,8 +25,8 @@ require ( github.com/lib/pq v1.10.9 github.com/logrusorgru/aurora/v4 v4.0.0 github.com/mitchellh/go-ps v1.0.0 - github.com/onsi/ginkgo/v2 v2.21.0 - github.com/onsi/gomega v1.35.1 + github.com/onsi/ginkgo/v2 v2.22.0 + github.com/onsi/gomega v1.36.0 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.1 github.com/prometheus/client_golang v1.20.5 github.com/robfig/cron v1.2.0 diff --git a/go.sum b/go.sum index e372961fb6..317da64330 100644 --- a/go.sum +++ b/go.sum @@ -2,8 +2,8 @@ github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25 github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= -github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0= -github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4= +github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/avast/retry-go/v4 v4.6.0 h1:K9xNA+KeB8HHc2aWFuLb25Offp+0iVRXEvFx8IinRJA= @@ -146,10 +146,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= -github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= -github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= +github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.36.0 h1:Pb12RlruUtj4XUuPUqeEWc6j5DkVVVA49Uf6YLfC95Y= +github.com/onsi/gomega v1.36.0/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= From b656983d9cb50a7a47352bbd5368f570e3a642f4 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 27 Nov 2024 16:33:21 +0100 Subject: [PATCH 166/836] chore(deps): update cloudnative-pg/ciclops action to v1.3.1 (main) (#6177) --- .github/workflows/continuous-delivery.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index 996673da01..3132f662be 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -2133,7 +2133,7 @@ jobs: - name: Compute the E2E test summary id: generate-summary - uses: cloudnative-pg/ciclops@v1.3.0 + uses: cloudnative-pg/ciclops@v1.3.1 with: artifact_directory: test-artifacts/data From 061752274a0327fddd33cb0783b9a8eb6cb22dd0 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 27 Nov 2024 17:48:13 +0100 Subject: [PATCH 167/836] chore(deps): update dependency rook/rook to v1.15.6 (main) (#6158) --- .github/workflows/continuous-delivery.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index 3132f662be..3f7d561e15 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -37,7 +37,7 @@ env: GOLANG_VERSION: "1.23.x" KUBEBUILDER_VERSION: "2.3.1" KIND_VERSION: "v0.25.0" - ROOK_VERSION: "v1.15.5" + ROOK_VERSION: "v1.15.6" EXTERNAL_SNAPSHOTTER_VERSION: "v8.1.0" OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing" BUILD_PUSH_PROVENANCE: "" From e8fb1e742e04ff5808df4eb5dc55d2596459c11c Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 27 Nov 2024 18:13:49 +0100 Subject: [PATCH 168/836] chore(deps): update operator framework to v1.38.0 (main) (#6186) --- Makefile | 2 +- config/olm-scorecard/patches/basic.config.yaml | 2 +- config/olm-scorecard/patches/olm.config.yaml | 10 +++++----- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Makefile b/Makefile index 170f87025f..bf85b798eb 100644 --- a/Makefile +++ b/Makefile @@ -46,7 +46,7 @@ CONTROLLER_TOOLS_VERSION ?= v0.16.5 GORELEASER_VERSION ?= v2.4.8 SPELLCHECK_VERSION ?= 0.45.0 WOKE_VERSION ?= 0.19.0 -OPERATOR_SDK_VERSION ?= v1.37.0 +OPERATOR_SDK_VERSION ?= v1.38.0 OPM_VERSION ?= v1.48.0 PREFLIGHT_VERSION ?= 1.10.2 OPENSHIFT_VERSIONS ?= v4.12-v4.17 diff --git a/config/olm-scorecard/patches/basic.config.yaml b/config/olm-scorecard/patches/basic.config.yaml index fd6200ae97..84683cf8d7 100644 --- a/config/olm-scorecard/patches/basic.config.yaml +++ b/config/olm-scorecard/patches/basic.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - basic-check-spec - image: quay.io/operator-framework/scorecard-test:v1.37.0 + image: quay.io/operator-framework/scorecard-test:v1.38.0 labels: suite: basic test: basic-check-spec-test diff --git a/config/olm-scorecard/patches/olm.config.yaml b/config/olm-scorecard/patches/olm.config.yaml index a547ce213d..43f40a8b3f 100644 --- a/config/olm-scorecard/patches/olm.config.yaml +++ b/config/olm-scorecard/patches/olm.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - olm-bundle-validation - image: quay.io/operator-framework/scorecard-test:v1.37.0 + image: quay.io/operator-framework/scorecard-test:v1.38.0 labels: suite: olm test: olm-bundle-validation-test @@ -14,7 +14,7 @@ entrypoint: - scorecard-test - olm-crds-have-validation - image: quay.io/operator-framework/scorecard-test:v1.37.0 + image: quay.io/operator-framework/scorecard-test:v1.38.0 labels: suite: olm test: olm-crds-have-validation-test @@ -24,7 +24,7 @@ entrypoint: - scorecard-test - olm-crds-have-resources - image: quay.io/operator-framework/scorecard-test:v1.37.0 + image: quay.io/operator-framework/scorecard-test:v1.38.0 labels: suite: olm test: olm-crds-have-resources-test @@ -34,7 +34,7 @@ entrypoint: - scorecard-test - olm-spec-descriptors - image: quay.io/operator-framework/scorecard-test:v1.37.0 + image: quay.io/operator-framework/scorecard-test:v1.38.0 labels: suite: olm test: olm-spec-descriptors-test @@ -44,7 +44,7 @@ entrypoint: - scorecard-test - olm-status-descriptors - image: quay.io/operator-framework/scorecard-test:v1.37.0 + image: quay.io/operator-framework/scorecard-test:v1.38.0 labels: suite: olm test: olm-status-descriptors-test From b3b411633fd733f8baec7ea1acf543a29633645c Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Wed, 27 Nov 2024 18:25:38 +0100 Subject: [PATCH 169/836] feat: add declarative management of PostgreSQL logical replication (#5329) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This pull request adds a feature for managing PostgreSQL logical replication through a declarative method by introducing two new Kubernetes CRDs: Publications and Subscriptions. Closes #5567 Signed-off-by: Leonardo Cecchi Signed-off-by: Gabriele Quaresima Signed-off-by: Niccolò Fei Signed-off-by: Jaime Silvela Signed-off-by: Armando Ruocco Signed-off-by: Marco Nenciarini Signed-off-by: Gabriele Bartolini Co-authored-by: Gabriele Quaresima Co-authored-by: Niccolò Fei Co-authored-by: Jaime Silvela Co-authored-by: Armando Ruocco Co-authored-by: Marco Nenciarini Co-authored-by: Gabriele Bartolini --- .wordlist-en-custom.txt | 18 + PROJECT | 18 + api/v1/database_funcs.go | 24 + api/v1/publication_funcs.go | 46 ++ api/v1/publication_types.go | 162 +++++++ api/v1/subscription_funcs.go | 46 ++ api/v1/subscription_types.go | 121 +++++ api/v1/zz_generated.deepcopy.go | 267 +++++++++++ .../postgresql.cnpg.io_publications.yaml | 195 ++++++++ .../postgresql.cnpg.io_subscriptions.yaml | 146 ++++++ config/crd/kustomization.yaml | 14 + .../cloudnative-pg.clusterserviceversion.yaml | 49 +- config/olm-samples/kustomization.yaml | 2 + .../postgresql_v1_publication.yaml | 11 + .../postgresql_v1_subscription.yaml | 11 + config/rbac/kustomization.yaml | 4 + config/rbac/publication_editor_role.yaml | 27 ++ config/rbac/publication_viewer_role.yaml | 23 + config/rbac/role.yaml | 4 + config/rbac/subscription_editor_role.yaml | 27 ++ config/rbac/subscription_viewer_role.yaml | 23 + contribute/e2e_testing_environment/README.md | 1 + docs/mkdocs.yml | 1 + docs/src/cloudnative-pg.v1.md | 430 +++++++++++++++++ docs/src/database_import.md | 18 + docs/src/e2e.md | 1 + docs/src/index.md | 7 +- docs/src/logical_replication.md | 444 ++++++++++++++++++ docs/src/operator_capability_levels.md | 9 + docs/src/replication.md | 8 +- .../cluster-example-logical-destination.yaml | 20 +- .../cluster-example-logical-source.yaml | 28 +- .../samples/publication-example-objects.yaml | 16 + docs/src/samples/publication-example.yaml | 11 + docs/src/samples/subscription-example.yaml | 11 + internal/cmd/manager/instance/run/cmd.go | 24 + .../logical/publication/create/publication.go | 16 +- .../publication/create/publication_test.go | 8 +- internal/controller/cluster_controller.go | 4 +- internal/controller/finalizers_delete.go | 84 +++- internal/controller/finalizers_delete_test.go | 169 ++++++- internal/management/controller/common.go | 111 +++++ .../controller/database_controller.go | 14 +- internal/management/controller/finalizers.go | 49 ++ internal/management/controller/manager.go | 13 +- .../controller/publication_controller.go | 178 +++++++ .../controller/publication_controller_sql.go | 193 ++++++++ .../publication_controller_sql_test.go | 225 +++++++++ .../controller/subscription_controller.go | 195 ++++++++ .../controller/subscription_controller_sql.go | 150 ++++++ .../subscription_controller_sql_test.go | 169 +++++++ .../subscription_controller_test.go | 32 ++ pkg/specs/roles.go | 56 +++ pkg/specs/roles_test.go | 2 +- pkg/utils/finalizers.go | 8 + .../destination-cluster.yaml.template | 48 ++ .../destination-database.yaml | 9 + .../e2e/fixtures/declarative_pub_sub/pub.yaml | 11 + .../source-cluster.yaml.template | 48 ++ .../declarative_pub_sub/source-database.yaml | 9 + .../e2e/fixtures/declarative_pub_sub/sub.yaml | 11 + tests/e2e/publication_subscription_test.go | 236 ++++++++++ tests/labels.go | 3 + 63 files changed, 4235 insertions(+), 83 deletions(-) create mode 100644 api/v1/database_funcs.go create mode 100644 api/v1/publication_funcs.go create mode 100644 api/v1/publication_types.go create mode 100644 api/v1/subscription_funcs.go create mode 100644 api/v1/subscription_types.go create mode 100644 config/crd/bases/postgresql.cnpg.io_publications.yaml create mode 100644 config/crd/bases/postgresql.cnpg.io_subscriptions.yaml create mode 100644 config/olm-samples/postgresql_v1_publication.yaml create mode 100644 config/olm-samples/postgresql_v1_subscription.yaml create mode 100644 config/rbac/publication_editor_role.yaml create mode 100644 config/rbac/publication_viewer_role.yaml create mode 100644 config/rbac/subscription_editor_role.yaml create mode 100644 config/rbac/subscription_viewer_role.yaml create mode 100644 docs/src/logical_replication.md create mode 100644 docs/src/samples/publication-example-objects.yaml create mode 100644 docs/src/samples/publication-example.yaml create mode 100644 docs/src/samples/subscription-example.yaml create mode 100644 internal/management/controller/common.go create mode 100644 internal/management/controller/finalizers.go create mode 100644 internal/management/controller/publication_controller.go create mode 100644 internal/management/controller/publication_controller_sql.go create mode 100644 internal/management/controller/publication_controller_sql_test.go create mode 100644 internal/management/controller/subscription_controller.go create mode 100644 internal/management/controller/subscription_controller_sql.go create mode 100644 internal/management/controller/subscription_controller_sql_test.go create mode 100644 internal/management/controller/subscription_controller_test.go create mode 100644 tests/e2e/fixtures/declarative_pub_sub/destination-cluster.yaml.template create mode 100644 tests/e2e/fixtures/declarative_pub_sub/destination-database.yaml create mode 100644 tests/e2e/fixtures/declarative_pub_sub/pub.yaml create mode 100644 tests/e2e/fixtures/declarative_pub_sub/source-cluster.yaml.template create mode 100644 tests/e2e/fixtures/declarative_pub_sub/source-database.yaml create mode 100644 tests/e2e/fixtures/declarative_pub_sub/sub.yaml create mode 100644 tests/e2e/publication_subscription_test.go diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index 9bc6770ca5..c72a9994a2 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -340,6 +340,13 @@ PrimaryUpdateStrategy PriorityClass PriorityClassName ProjectedVolumeSource +PublicationReclaimPolicy +PublicationSpec +PublicationStatus +PublicationTarget +PublicationTargetAllTables +PublicationTargetObject +PublicationTargetTable PullPolicy QoS Quaresima @@ -425,6 +432,9 @@ StatefulSets StorageClass StorageConfiguration Storages +SubscriptionReclaimPolicy +SubscriptionSpec +SubscriptionStatus SuccessfullyExtracted SwitchReplicaClusterStatus SyncReplicaElectionConstraints @@ -494,6 +504,7 @@ addons affinityconfiguration aks albert +allTables allnamespaces alloc allocator @@ -741,6 +752,7 @@ executables expirations extensibility externalCluster +externalClusterName externalClusterSecretVersion externalClusters externalclusters @@ -1063,6 +1075,9 @@ promotionTimeout promotionToken provisioner psql +publicationDBName +publicationName +publicationReclaimPolicy pv pvc pvcCount @@ -1208,6 +1223,7 @@ subcommand subcommands subdirectory subresource +subscriptionReclaimPolicy substatement successfullyExtracted sudo @@ -1226,6 +1242,8 @@ syslog systemd sysv tAc +tableExpression +tablesInSchema tablespace tablespaceClassName tablespaceMapFile diff --git a/PROJECT b/PROJECT index 59c5113ca1..27b49f0be3 100644 --- a/PROJECT +++ b/PROJECT @@ -66,3 +66,21 @@ resources: kind: Database path: github.com/cloudnative-pg/cloudnative-pg/api/v1 version: v1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: cnpg.io + group: postgresql + kind: Publication + path: github.com/cloudnative-pg/cloudnative-pg/api/v1 + version: v1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: cnpg.io + group: postgresql + kind: Subscription + path: github.com/cloudnative-pg/cloudnative-pg/api/v1 + version: v1 diff --git a/api/v1/database_funcs.go b/api/v1/database_funcs.go new file mode 100644 index 0000000000..879d97490c --- /dev/null +++ b/api/v1/database_funcs.go @@ -0,0 +1,24 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import corev1 "k8s.io/api/core/v1" + +// GetClusterRef returns the cluster reference of the database +func (db *Database) GetClusterRef() corev1.LocalObjectReference { + return db.Spec.ClusterRef +} diff --git a/api/v1/publication_funcs.go b/api/v1/publication_funcs.go new file mode 100644 index 0000000000..e67255b68c --- /dev/null +++ b/api/v1/publication_funcs.go @@ -0,0 +1,46 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/utils/ptr" +) + +// SetAsFailed sets the publication as failed with the given error +func (pub *Publication) SetAsFailed(err error) { + pub.Status.Applied = ptr.To(false) + pub.Status.Message = err.Error() +} + +// SetAsUnknown sets the publication as unknown with the given error +func (pub *Publication) SetAsUnknown(err error) { + pub.Status.Applied = nil + pub.Status.Message = err.Error() +} + +// SetAsReady sets the subscription as working correctly +func (pub *Publication) SetAsReady() { + pub.Status.Applied = ptr.To(true) + pub.Status.Message = "" + pub.Status.ObservedGeneration = pub.Generation +} + +// GetClusterRef returns the cluster reference of the publication +func (pub *Publication) GetClusterRef() corev1.LocalObjectReference { + return pub.Spec.ClusterRef +} diff --git a/api/v1/publication_types.go b/api/v1/publication_types.go new file mode 100644 index 0000000000..39be47ef63 --- /dev/null +++ b/api/v1/publication_types.go @@ -0,0 +1,162 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// PublicationReclaimPolicy defines a policy for end-of-life maintenance of Publications. +// +enum +type PublicationReclaimPolicy string + +const ( + // PublicationReclaimDelete means the publication will be deleted from Kubernetes on release + // from its claim. + PublicationReclaimDelete PublicationReclaimPolicy = "delete" + + // PublicationReclaimRetain means the publication will be left in its current phase for manual + // reclamation by the administrator. The default policy is Retain. + PublicationReclaimRetain PublicationReclaimPolicy = "retain" +) + +// PublicationSpec defines the desired state of Publication +type PublicationSpec struct { + // The name of the PostgreSQL cluster that identifies the "publisher" + ClusterRef corev1.LocalObjectReference `json:"cluster"` + + // The name of the publication inside PostgreSQL + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="name is immutable" + Name string `json:"name"` + + // The name of the database where the publication will be installed in + // the "publisher" cluster + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="dbname is immutable" + DBName string `json:"dbname"` + + // Publication parameters part of the `WITH` clause as expected by + // PostgreSQL `CREATE PUBLICATION` command + // +optional + Parameters map[string]string `json:"parameters,omitempty"` + + // Target of the publication as expected by PostgreSQL `CREATE PUBLICATION` command + Target PublicationTarget `json:"target"` + + // The policy for end-of-life maintenance of this publication + // +kubebuilder:validation:Enum=delete;retain + // +kubebuilder:default:=retain + // +optional + ReclaimPolicy PublicationReclaimPolicy `json:"publicationReclaimPolicy,omitempty"` +} + +// PublicationTarget is what this publication should publish +// +kubebuilder:validation:XValidation:rule="(has(self.allTables) && !has(self.objects)) || (!has(self.allTables) && has(self.objects))",message="allTables and objects are mutually exclusive" +type PublicationTarget struct { + // Marks the publication as one that replicates changes for all tables + // in the database, including tables created in the future. + // Corresponding to `FOR ALL TABLES` in PostgreSQL. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="allTables is immutable" + // +optional + AllTables bool `json:"allTables,omitempty"` + + // Just the following schema objects + // +kubebuilder:validation:XValidation:rule="!(self.exists(o, has(o.table) && has(o.table.columns)) && self.exists(o, has(o.tablesInSchema)))",message="specifying a column list when the publication also publishes tablesInSchema is not supported" + // +kubebuilder:validation:MaxItems=100000 + // +optional + Objects []PublicationTargetObject `json:"objects,omitempty"` +} + +// PublicationTargetObject is an object to publish +// +kubebuilder:validation:XValidation:rule="(has(self.tablesInSchema) && !has(self.table)) || (!has(self.tablesInSchema) && has(self.table))",message="tablesInSchema and table are mutually exclusive" +type PublicationTargetObject struct { + // Marks the publication as one that replicates changes for all tables + // in the specified list of schemas, including tables created in the + // future. Corresponding to `FOR TABLES IN SCHEMA` in PostgreSQL. + // +optional + TablesInSchema string `json:"tablesInSchema,omitempty"` + + // Specifies a list of tables to add to the publication. Corresponding + // to `FOR TABLE` in PostgreSQL. + // +optional + Table *PublicationTargetTable `json:"table,omitempty"` +} + +// PublicationTargetTable is a table to publish +type PublicationTargetTable struct { + // Whether to limit to the table only or include all its descendants + // +optional + Only bool `json:"only,omitempty"` + + // The table name + Name string `json:"name"` + + // The schema name + // +optional + Schema string `json:"schema,omitempty"` + + // The columns to publish + // +optional + Columns []string `json:"columns,omitempty"` +} + +// PublicationStatus defines the observed state of Publication +type PublicationStatus struct { + // A sequence number representing the latest + // desired state that was synchronized + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Applied is true if the publication was reconciled correctly + // +optional + Applied *bool `json:"applied,omitempty"` + + // Message is the reconciliation output message + // +optional + Message string `json:"message,omitempty"` +} + +// +genclient +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".spec.cluster.name" +// +kubebuilder:printcolumn:name="PG Name",type="string",JSONPath=".spec.name" +// +kubebuilder:printcolumn:name="Applied",type="boolean",JSONPath=".status.applied" +// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Latest reconciliation message" + +// Publication is the Schema for the publications API +type Publication struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + Spec PublicationSpec `json:"spec"` + Status PublicationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PublicationList contains a list of Publication +type PublicationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Publication `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Publication{}, &PublicationList{}) +} diff --git a/api/v1/subscription_funcs.go b/api/v1/subscription_funcs.go new file mode 100644 index 0000000000..49a418bdae --- /dev/null +++ b/api/v1/subscription_funcs.go @@ -0,0 +1,46 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/utils/ptr" +) + +// SetAsFailed sets the subscription as failed with the given error +func (sub *Subscription) SetAsFailed(err error) { + sub.Status.Applied = ptr.To(false) + sub.Status.Message = err.Error() +} + +// SetAsUnknown sets the subscription as unknown with the given error +func (sub *Subscription) SetAsUnknown(err error) { + sub.Status.Applied = nil + sub.Status.Message = err.Error() +} + +// SetAsReady sets the subscription as working correctly +func (sub *Subscription) SetAsReady() { + sub.Status.Applied = ptr.To(true) + sub.Status.Message = "" + sub.Status.ObservedGeneration = sub.Generation +} + +// GetClusterRef returns the cluster reference of the subscription +func (sub *Subscription) GetClusterRef() corev1.LocalObjectReference { + return sub.Spec.ClusterRef +} diff --git a/api/v1/subscription_types.go b/api/v1/subscription_types.go new file mode 100644 index 0000000000..628ec8a4da --- /dev/null +++ b/api/v1/subscription_types.go @@ -0,0 +1,121 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// SubscriptionReclaimPolicy describes a policy for end-of-life maintenance of Subscriptions. +// +enum +type SubscriptionReclaimPolicy string + +const ( + // SubscriptionReclaimDelete means the subscription will be deleted from Kubernetes on release + // from its claim. + SubscriptionReclaimDelete SubscriptionReclaimPolicy = "delete" + + // SubscriptionReclaimRetain means the subscription will be left in its current phase for manual + // reclamation by the administrator. The default policy is Retain. + SubscriptionReclaimRetain SubscriptionReclaimPolicy = "retain" +) + +// SubscriptionSpec defines the desired state of Subscription +type SubscriptionSpec struct { + // The name of the PostgreSQL cluster that identifies the "subscriber" + ClusterRef corev1.LocalObjectReference `json:"cluster"` + + // The name of the subscription inside PostgreSQL + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="name is immutable" + Name string `json:"name"` + + // The name of the database where the publication will be installed in + // the "subscriber" cluster + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="dbname is immutable" + DBName string `json:"dbname"` + + // Subscription parameters part of the `WITH` clause as expected by + // PostgreSQL `CREATE SUBSCRIPTION` command + // +optional + Parameters map[string]string `json:"parameters,omitempty"` + + // The name of the publication inside the PostgreSQL database in the + // "publisher" + PublicationName string `json:"publicationName"` + + // The name of the database containing the publication on the external + // cluster. Defaults to the one in the external cluster definition. + // +optional + PublicationDBName string `json:"publicationDBName,omitempty"` + + // The name of the external cluster with the publication ("publisher") + ExternalClusterName string `json:"externalClusterName"` + + // The policy for end-of-life maintenance of this subscription + // +kubebuilder:validation:Enum=delete;retain + // +kubebuilder:default:=retain + // +optional + ReclaimPolicy SubscriptionReclaimPolicy `json:"subscriptionReclaimPolicy,omitempty"` +} + +// SubscriptionStatus defines the observed state of Subscription +type SubscriptionStatus struct { + // A sequence number representing the latest + // desired state that was synchronized + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Applied is true if the subscription was reconciled correctly + // +optional + Applied *bool `json:"applied,omitempty"` + + // Message is the reconciliation output message + // +optional + Message string `json:"message,omitempty"` +} + +// +genclient +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".spec.cluster.name" +// +kubebuilder:printcolumn:name="PG Name",type="string",JSONPath=".spec.name" +// +kubebuilder:printcolumn:name="Applied",type="boolean",JSONPath=".status.applied" +// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Latest reconciliation message" + +// Subscription is the Schema for the subscriptions API +type Subscription struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + Spec SubscriptionSpec `json:"spec"` + Status SubscriptionStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SubscriptionList contains a list of Subscription +type SubscriptionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Subscription `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Subscription{}, &SubscriptionList{}) +} diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index 47e5083fc9..0c367fc416 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -2201,6 +2201,171 @@ func (in *PostgresConfiguration) DeepCopy() *PostgresConfiguration { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Publication) DeepCopyInto(out *Publication) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Publication. +func (in *Publication) DeepCopy() *Publication { + if in == nil { + return nil + } + out := new(Publication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Publication) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublicationList) DeepCopyInto(out *PublicationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Publication, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicationList. +func (in *PublicationList) DeepCopy() *PublicationList { + if in == nil { + return nil + } + out := new(PublicationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PublicationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublicationSpec) DeepCopyInto(out *PublicationSpec) { + *out = *in + out.ClusterRef = in.ClusterRef + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.Target.DeepCopyInto(&out.Target) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicationSpec. +func (in *PublicationSpec) DeepCopy() *PublicationSpec { + if in == nil { + return nil + } + out := new(PublicationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublicationStatus) DeepCopyInto(out *PublicationStatus) { + *out = *in + if in.Applied != nil { + in, out := &in.Applied, &out.Applied + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicationStatus. +func (in *PublicationStatus) DeepCopy() *PublicationStatus { + if in == nil { + return nil + } + out := new(PublicationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublicationTarget) DeepCopyInto(out *PublicationTarget) { + *out = *in + if in.Objects != nil { + in, out := &in.Objects, &out.Objects + *out = make([]PublicationTargetObject, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicationTarget. +func (in *PublicationTarget) DeepCopy() *PublicationTarget { + if in == nil { + return nil + } + out := new(PublicationTarget) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublicationTargetObject) DeepCopyInto(out *PublicationTargetObject) { + *out = *in + if in.Table != nil { + in, out := &in.Table, &out.Table + *out = new(PublicationTargetTable) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicationTargetObject. +func (in *PublicationTargetObject) DeepCopy() *PublicationTargetObject { + if in == nil { + return nil + } + out := new(PublicationTargetObject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublicationTargetTable) DeepCopyInto(out *PublicationTargetTable) { + *out = *in + if in.Columns != nil { + in, out := &in.Columns, &out.Columns + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicationTargetTable. +func (in *PublicationTargetTable) DeepCopy() *PublicationTargetTable { + if in == nil { + return nil + } + out := new(PublicationTargetTable) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RecoveryTarget) DeepCopyInto(out *RecoveryTarget) { *out = *in @@ -2612,6 +2777,108 @@ func (in *StorageConfiguration) DeepCopy() *StorageConfiguration { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Subscription) DeepCopyInto(out *Subscription) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subscription. +func (in *Subscription) DeepCopy() *Subscription { + if in == nil { + return nil + } + out := new(Subscription) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Subscription) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionList) DeepCopyInto(out *SubscriptionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Subscription, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionList. +func (in *SubscriptionList) DeepCopy() *SubscriptionList { + if in == nil { + return nil + } + out := new(SubscriptionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SubscriptionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionSpec) DeepCopyInto(out *SubscriptionSpec) { + *out = *in + out.ClusterRef = in.ClusterRef + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionSpec. +func (in *SubscriptionSpec) DeepCopy() *SubscriptionSpec { + if in == nil { + return nil + } + out := new(SubscriptionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubscriptionStatus) DeepCopyInto(out *SubscriptionStatus) { + *out = *in + if in.Applied != nil { + in, out := &in.Applied, &out.Applied + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionStatus. +func (in *SubscriptionStatus) DeepCopy() *SubscriptionStatus { + if in == nil { + return nil + } + out := new(SubscriptionStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SwitchReplicaClusterStatus) DeepCopyInto(out *SwitchReplicaClusterStatus) { *out = *in diff --git a/config/crd/bases/postgresql.cnpg.io_publications.yaml b/config/crd/bases/postgresql.cnpg.io_publications.yaml new file mode 100644 index 0000000000..2e0fdaf0e9 --- /dev/null +++ b/config/crd/bases/postgresql.cnpg.io_publications.yaml @@ -0,0 +1,195 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: publications.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Publication + listKind: PublicationList + plural: publications + singular: publication + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Publication is the Schema for the publications API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PublicationSpec defines the desired state of Publication + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "publisher" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "publisher" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + name: + description: The name of the publication inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Publication parameters part of the `WITH` clause as expected by + PostgreSQL `CREATE PUBLICATION` command + type: object + publicationReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this publication + enum: + - delete + - retain + type: string + target: + description: Target of the publication as expected by PostgreSQL `CREATE + PUBLICATION` command + properties: + allTables: + description: |- + Marks the publication as one that replicates changes for all tables + in the database, including tables created in the future. + Corresponding to `FOR ALL TABLES` in PostgreSQL. + type: boolean + x-kubernetes-validations: + - message: allTables is immutable + rule: self == oldSelf + objects: + description: Just the following schema objects + items: + description: PublicationTargetObject is an object to publish + properties: + table: + description: |- + Specifies a list of tables to add to the publication. Corresponding + to `FOR TABLE` in PostgreSQL. + properties: + columns: + description: The columns to publish + items: + type: string + type: array + name: + description: The table name + type: string + only: + description: Whether to limit to the table only or include + all its descendants + type: boolean + schema: + description: The schema name + type: string + required: + - name + type: object + tablesInSchema: + description: |- + Marks the publication as one that replicates changes for all tables + in the specified list of schemas, including tables created in the + future. Corresponding to `FOR TABLES IN SCHEMA` in PostgreSQL. + type: string + type: object + x-kubernetes-validations: + - message: tablesInSchema and table are mutually exclusive + rule: (has(self.tablesInSchema) && !has(self.table)) || (!has(self.tablesInSchema) + && has(self.table)) + maxItems: 100000 + type: array + x-kubernetes-validations: + - message: specifying a column list when the publication also + publishes tablesInSchema is not supported + rule: '!(self.exists(o, has(o.table) && has(o.table.columns)) + && self.exists(o, has(o.tablesInSchema)))' + type: object + x-kubernetes-validations: + - message: allTables and objects are mutually exclusive + rule: (has(self.allTables) && !has(self.objects)) || (!has(self.allTables) + && has(self.objects)) + required: + - cluster + - dbname + - name + - target + type: object + status: + description: PublicationStatus defines the observed state of Publication + properties: + applied: + description: Applied is true if the publication was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/postgresql.cnpg.io_subscriptions.yaml b/config/crd/bases/postgresql.cnpg.io_subscriptions.yaml new file mode 100644 index 0000000000..24a9ff12a1 --- /dev/null +++ b/config/crd/bases/postgresql.cnpg.io_subscriptions.yaml @@ -0,0 +1,146 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: subscriptions.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Subscription + listKind: SubscriptionList + plural: subscriptions + singular: subscription + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Subscription is the Schema for the subscriptions API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SubscriptionSpec defines the desired state of Subscription + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "subscriber" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "subscriber" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + externalClusterName: + description: The name of the external cluster with the publication + ("publisher") + type: string + name: + description: The name of the subscription inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Subscription parameters part of the `WITH` clause as expected by + PostgreSQL `CREATE SUBSCRIPTION` command + type: object + publicationDBName: + description: |- + The name of the database containing the publication on the external + cluster. Defaults to the one in the external cluster definition. + type: string + publicationName: + description: |- + The name of the publication inside the PostgreSQL database in the + "publisher" + type: string + subscriptionReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this subscription + enum: + - delete + - retain + type: string + required: + - cluster + - dbname + - externalClusterName + - name + - publicationName + type: object + status: + description: SubscriptionStatus defines the observed state of Subscription + properties: + applied: + description: Applied is true if the subscription was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 5e4757d42c..6100960f12 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -11,6 +11,9 @@ resources: - bases/postgresql.cnpg.io_imagecatalogs.yaml - bases/postgresql.cnpg.io_clusterimagecatalogs.yaml - bases/postgresql.cnpg.io_databases.yaml +- bases/postgresql.cnpg.io_publications.yaml +- bases/postgresql.cnpg.io_subscriptions.yaml + # +kubebuilder:scaffold:crdkustomizeresource patches: # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. @@ -52,6 +55,17 @@ patches: # kind: CustomResourceDefinition # name: poolers.postgresql.cnpg.io #- path: patches/cainjection_in_databases.yaml +# target: +# kind: CustomResourceDefinition +# name: databases.postgresql.cnpg.io +#- path: patches/cainjection_in_publications.yaml +# target: +# kind: CustomResourceDefinition +# name: publications.postgresql.cnpg.io +#- path: patches/cainjection_in_subscriptions.yaml +# target: +# kind: CustomResourceDefinition +# name: subscriptions.postgresql.cnpg.io # +kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. diff --git a/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml b/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml index 6f7a2108f5..0bf3485944 100644 --- a/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml +++ b/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml @@ -688,7 +688,7 @@ spec: specDescriptors: - path: databaseReclaimPolicy displayName: Database reclaim policy - description: Database reclame policy + description: Database reclaim policy - path: cluster displayName: Cluster requested to create the database description: Cluster requested to create the database @@ -698,3 +698,50 @@ spec: - path: owner displayName: Database Owner description: Database Owner + - kind: Publication + name: publications.postgresql.cnpg.io + displayName: Publication + description: Declarative publication + version: v1 + resources: + - kind: Cluster + name: '' + version: v1 + specDescriptors: + - path: name + displayName: Publication name + description: Publication name + - path: dbname + displayName: Database name + description: Database name + - path: cluster + displayName: Cluster requested to create the publication + description: Cluster requested to create the publication + - path: target + displayName: Publication target + description: Publication target + - kind: Subscription + name: subscriptions.postgresql.cnpg.io + displayName: Subscription + description: Declarative subscription + version: v1 + resources: + - kind: Cluster + name: '' + version: v1 + specDescriptors: + - path: name + displayName: Subscription name + description: Subscription name + - path: dbname + displayName: Database name + description: Database name + - path: publicationName + displayName: Publication name + description: Publication name + - path: cluster + displayName: Cluster requested to create the subscription + description: Cluster requested to create the subscription + - path: externalClusterName + displayName: Name of the external cluster with publication + description: Name of the external cluster with publication diff --git a/config/olm-samples/kustomization.yaml b/config/olm-samples/kustomization.yaml index 205a50a544..6bb494f569 100644 --- a/config/olm-samples/kustomization.yaml +++ b/config/olm-samples/kustomization.yaml @@ -6,3 +6,5 @@ resources: - postgresql_v1_imagecatalog.yaml - postgresql_v1_clusterimagecatalog.yaml - postgresql_v1_database.yaml +- postgresql_v1_publication.yaml +- postgresql_v1_subscription.yaml diff --git a/config/olm-samples/postgresql_v1_publication.yaml b/config/olm-samples/postgresql_v1_publication.yaml new file mode 100644 index 0000000000..598c02a2bb --- /dev/null +++ b/config/olm-samples/postgresql_v1_publication.yaml @@ -0,0 +1,11 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Publication +metadata: + name: publication-sample +spec: + name: pub + dbname: app + cluster: + name: cluster-sample + target: + allTables: true diff --git a/config/olm-samples/postgresql_v1_subscription.yaml b/config/olm-samples/postgresql_v1_subscription.yaml new file mode 100644 index 0000000000..ecc016619b --- /dev/null +++ b/config/olm-samples/postgresql_v1_subscription.yaml @@ -0,0 +1,11 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Subscription +metadata: + name: subscription-sample +spec: + name: sub + dbname: app + publicationName: pub + cluster: + name: cluster-sample-dest + externalClusterName: cluster-sample diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml index 99493b37c4..a561c73dc9 100644 --- a/config/rbac/kustomization.yaml +++ b/config/rbac/kustomization.yaml @@ -14,6 +14,10 @@ resources: # default, aiding admins in cluster management. Those roles are # not used by the Project itself. You can comment the following lines # if you do not want those helpers be installed with your Project. +- subscription_editor_role.yaml +- subscription_viewer_role.yaml +- publication_editor_role.yaml +- publication_viewer_role.yaml - database_editor_role.yaml - database_viewer_role.yaml diff --git a/config/rbac/publication_editor_role.yaml b/config/rbac/publication_editor_role.yaml new file mode 100644 index 0000000000..f741900fa3 --- /dev/null +++ b/config/rbac/publication_editor_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to edit publications. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + app.kubernetes.io/managed-by: kustomize + name: publication-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get diff --git a/config/rbac/publication_viewer_role.yaml b/config/rbac/publication_viewer_role.yaml new file mode 100644 index 0000000000..32e84f531f --- /dev/null +++ b/config/rbac/publication_viewer_role.yaml @@ -0,0 +1,23 @@ +# permissions for end users to view publications. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + app.kubernetes.io/managed-by: kustomize + name: publication-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index ce1e7ded88..f47a568f0d 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -141,7 +141,9 @@ rules: - clusters - databases - poolers + - publications - scheduledbackups + - subscriptions verbs: - create - delete @@ -155,7 +157,9 @@ rules: resources: - backups/status - databases/status + - publications/status - scheduledbackups/status + - subscriptions/status verbs: - get - patch diff --git a/config/rbac/subscription_editor_role.yaml b/config/rbac/subscription_editor_role.yaml new file mode 100644 index 0000000000..066b1c494d --- /dev/null +++ b/config/rbac/subscription_editor_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to edit subscriptions. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + app.kubernetes.io/managed-by: kustomize + name: subscription-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get diff --git a/config/rbac/subscription_viewer_role.yaml b/config/rbac/subscription_viewer_role.yaml new file mode 100644 index 0000000000..4cf8ff0d06 --- /dev/null +++ b/config/rbac/subscription_viewer_role.yaml @@ -0,0 +1,23 @@ +# permissions for end users to view subscriptions. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + app.kubernetes.io/managed-by: kustomize + name: subscription-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get diff --git a/contribute/e2e_testing_environment/README.md b/contribute/e2e_testing_environment/README.md index 30a41ddaf4..dd956ab464 100644 --- a/contribute/e2e_testing_environment/README.md +++ b/contribute/e2e_testing_environment/README.md @@ -206,6 +206,7 @@ exported, it will select all medium test cases from the feature type provided. | `security` | | `maintenance` | | `tablespaces` | +| `publication-subscription` | | `declarative-databases` | ex: diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index b9808c650d..2d50e97023 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -31,6 +31,7 @@ nav: - failure_modes.md - rolling_update.md - replication.md + - logical_replication.md - backup.md - backup_barmanobjectstore.md - wal_archiving.md diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md index 0868475df7..c323ced382 100644 --- a/docs/src/cloudnative-pg.v1.md +++ b/docs/src/cloudnative-pg.v1.md @@ -12,7 +12,9 @@ - [Database](#postgresql-cnpg-io-v1-Database) - [ImageCatalog](#postgresql-cnpg-io-v1-ImageCatalog) - [Pooler](#postgresql-cnpg-io-v1-Pooler) +- [Publication](#postgresql-cnpg-io-v1-Publication) - [ScheduledBackup](#postgresql-cnpg-io-v1-ScheduledBackup) +- [Subscription](#postgresql-cnpg-io-v1-Subscription) ## Backup {#postgresql-cnpg-io-v1-Backup} @@ -224,6 +226,42 @@ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api- +## Publication {#postgresql-cnpg-io-v1-Publication} + + +**Appears in:** + + + +

Publication is the Schema for the publications API

+ + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion [Required]
string
postgresql.cnpg.io/v1
kind [Required]
string
Publication
metadata [Required]
+meta/v1.ObjectMeta +
+ No description provided.Refer to the Kubernetes API documentation for the fields of the metadata field.
spec [Required]
+PublicationSpec +
+ No description provided.
status [Required]
+PublicationStatus +
+ No description provided.
+ ## ScheduledBackup {#postgresql-cnpg-io-v1-ScheduledBackup} @@ -262,6 +300,42 @@ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api- +## Subscription {#postgresql-cnpg-io-v1-Subscription} + + +**Appears in:** + + + +

Subscription is the Schema for the subscriptions API

+ + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion [Required]
string
postgresql.cnpg.io/v1
kind [Required]
string
Subscription
metadata [Required]
+meta/v1.ObjectMeta +
+ No description provided.Refer to the Kubernetes API documentation for the fields of the metadata field.
spec [Required]
+SubscriptionSpec +
+ No description provided.
status [Required]
+SubscriptionStatus +
+ No description provided.
+ ## AffinityConfiguration {#postgresql-cnpg-io-v1-AffinityConfiguration} @@ -3955,6 +4029,232 @@ the primary server of the cluster as part of rolling updates

+## PublicationReclaimPolicy {#postgresql-cnpg-io-v1-PublicationReclaimPolicy} + +(Alias of `string`) + +**Appears in:** + +- [PublicationSpec](#postgresql-cnpg-io-v1-PublicationSpec) + + +

PublicationReclaimPolicy defines a policy for end-of-life maintenance of Publications.

+ + + + +## PublicationSpec {#postgresql-cnpg-io-v1-PublicationSpec} + + +**Appears in:** + +- [Publication](#postgresql-cnpg-io-v1-Publication) + + +

PublicationSpec defines the desired state of Publication

+ + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
cluster [Required]
+core/v1.LocalObjectReference +
+

The name of the PostgreSQL cluster that identifies the "publisher"

+
name [Required]
+string +
+

The name of the publication inside PostgreSQL

+
dbname [Required]
+string +
+

The name of the database where the publication will be installed in +the "publisher" cluster

+
parameters
+map[string]string +
+

Publication parameters part of the WITH clause as expected by +PostgreSQL CREATE PUBLICATION command

+
target [Required]
+PublicationTarget +
+

Target of the publication as expected by PostgreSQL CREATE PUBLICATION command

+
publicationReclaimPolicy
+PublicationReclaimPolicy +
+

The policy for end-of-life maintenance of this publication

+
+ +## PublicationStatus {#postgresql-cnpg-io-v1-PublicationStatus} + + +**Appears in:** + +- [Publication](#postgresql-cnpg-io-v1-Publication) + + +

PublicationStatus defines the observed state of Publication

+ + + + + + + + + + + + + + + +
FieldDescription
observedGeneration
+int64 +
+

A sequence number representing the latest +desired state that was synchronized

+
applied
+bool +
+

Applied is true if the publication was reconciled correctly

+
message
+string +
+

Message is the reconciliation output message

+
+ +## PublicationTarget {#postgresql-cnpg-io-v1-PublicationTarget} + + +**Appears in:** + +- [PublicationSpec](#postgresql-cnpg-io-v1-PublicationSpec) + + +

PublicationTarget is what this publication should publish

+ + + + + + + + + + + + +
FieldDescription
allTables
+bool +
+

Marks the publication as one that replicates changes for all tables +in the database, including tables created in the future. +Corresponding to FOR ALL TABLES in PostgreSQL.

+
objects
+[]PublicationTargetObject +
+

Just the following schema objects

+
+ +## PublicationTargetObject {#postgresql-cnpg-io-v1-PublicationTargetObject} + + +**Appears in:** + +- [PublicationTarget](#postgresql-cnpg-io-v1-PublicationTarget) + + +

PublicationTargetObject is an object to publish

+ + + + + + + + + + + + +
FieldDescription
tablesInSchema
+string +
+

Marks the publication as one that replicates changes for all tables +in the specified list of schemas, including tables created in the +future. Corresponding to FOR TABLES IN SCHEMA in PostgreSQL.

+
table
+PublicationTargetTable +
+

Specifies a list of tables to add to the publication. Corresponding +to FOR TABLE in PostgreSQL.

+
+ +## PublicationTargetTable {#postgresql-cnpg-io-v1-PublicationTargetTable} + + +**Appears in:** + +- [PublicationTargetObject](#postgresql-cnpg-io-v1-PublicationTargetObject) + + +

PublicationTargetTable is a table to publish

+ + + + + + + + + + + + + + + + + + +
FieldDescription
only
+bool +
+

Whether to limit to the table only or include all its descendants

+
name [Required]
+string +
+

The table name

+
schema
+string +
+

The schema name

+
columns
+[]string +
+

The columns to publish

+
+ ## RecoveryTarget {#postgresql-cnpg-io-v1-RecoveryTarget} @@ -4817,6 +5117,136 @@ Size cannot be decreased.

+## SubscriptionReclaimPolicy {#postgresql-cnpg-io-v1-SubscriptionReclaimPolicy} + +(Alias of `string`) + +**Appears in:** + +- [SubscriptionSpec](#postgresql-cnpg-io-v1-SubscriptionSpec) + + +

SubscriptionReclaimPolicy describes a policy for end-of-life maintenance of Subscriptions.

+ + + + +## SubscriptionSpec {#postgresql-cnpg-io-v1-SubscriptionSpec} + + +**Appears in:** + +- [Subscription](#postgresql-cnpg-io-v1-Subscription) + + +

SubscriptionSpec defines the desired state of Subscription

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
cluster [Required]
+core/v1.LocalObjectReference +
+

The name of the PostgreSQL cluster that identifies the "subscriber"

+
name [Required]
+string +
+

The name of the subscription inside PostgreSQL

+
dbname [Required]
+string +
+

The name of the database where the publication will be installed in +the "subscriber" cluster

+
parameters
+map[string]string +
+

Subscription parameters part of the WITH clause as expected by +PostgreSQL CREATE SUBSCRIPTION command

+
publicationName [Required]
+string +
+

The name of the publication inside the PostgreSQL database in the +"publisher"

+
publicationDBName
+string +
+

The name of the database containing the publication on the external +cluster. Defaults to the one in the external cluster definition.

+
externalClusterName [Required]
+string +
+

The name of the external cluster with the publication ("publisher")

+
subscriptionReclaimPolicy
+SubscriptionReclaimPolicy +
+

The policy for end-of-life maintenance of this subscription

+
+ +## SubscriptionStatus {#postgresql-cnpg-io-v1-SubscriptionStatus} + + +**Appears in:** + +- [Subscription](#postgresql-cnpg-io-v1-Subscription) + + +

SubscriptionStatus defines the observed state of Subscription

+ + + + + + + + + + + + + + + +
FieldDescription
observedGeneration
+int64 +
+

A sequence number representing the latest +desired state that was synchronized

+
applied
+bool +
+

Applied is true if the subscription was reconciled correctly

+
message
+string +
+

Message is the reconciliation output message

+
+ ## SwitchReplicaClusterStatus {#postgresql-cnpg-io-v1-SwitchReplicaClusterStatus} diff --git a/docs/src/database_import.md b/docs/src/database_import.md index f8bba32c4a..3308b5f6f1 100644 --- a/docs/src/database_import.md +++ b/docs/src/database_import.md @@ -267,3 +267,21 @@ topic is beyond the scope of CloudNativePG, we recommend that you reduce unnecessary writes in the checkpoint area by tuning Postgres GUCs like `shared_buffers`, `max_wal_size`, `checkpoint_timeout` directly in the `Cluster` configuration. + +## Online Import and Upgrades + +Logical replication offers a powerful way to import any PostgreSQL database +accessible over the network using the following approach: + +- **Import Bootstrap with Schema-Only Option**: Initialize the schema in the + target database before replication begins. +- **`Subscription` Resource**: Set up continuous replication to synchronize + data changes. + +This technique can also be leveraged for performing major PostgreSQL upgrades +with minimal downtime, making it ideal for seamless migrations and system +upgrades. + +For more details, including limitations and best practices, refer to the +[Logical Replication](logical_replication.md) section in the documentation. + diff --git a/docs/src/e2e.md b/docs/src/e2e.md index e796db13b6..de06101da5 100644 --- a/docs/src/e2e.md +++ b/docs/src/e2e.md @@ -60,6 +60,7 @@ and the following suite of E2E tests are performed on that cluster: * Replication Slots * Synchronous replication * Scale-up and scale-down of a Cluster + * Logical replication via declarative Publication / Subscription * **Replica clusters** * Bootstrapping a replica cluster from backup diff --git a/docs/src/index.md b/docs/src/index.md index 815dc0af85..06dec9712e 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -96,12 +96,15 @@ Additionally, the community provides images for the [PostGIS extension](postgis. * In-place or rolling updates for operator upgrades * TLS connections and client certificate authentication * Support for custom TLS certificates (including integration with cert-manager) -* Continuous WAL archiving to an object store (AWS S3 and S3-compatible, Azure Blob Storage, and Google Cloud Storage) +* Continuous WAL archiving to an object store (AWS S3 and S3-compatible, Azure + Blob Storage, and Google Cloud Storage) * Backups on volume snapshots (where supported by the underlying storage classes) * Backups on object stores (AWS S3 and S3-compatible, Azure Blob Storage, and Google Cloud Storage) * Full recovery and Point-In-Time recovery from an existing backup on volume snapshots or object stores * Offline import of existing PostgreSQL databases, including major upgrades of PostgreSQL -* Online import of existing PostgreSQL databases, including major upgrades of PostgreSQL, through PostgreSQL native logical replication (imperative, via the `cnpg` plugin) +* Online import of existing PostgreSQL databases, including major upgrades of + PostgreSQL, through PostgreSQL native logical replication (declarative, via + the `Subscription` resource) * Fencing of an entire PostgreSQL cluster, or a subset of the instances in a declarative way * Hibernation of a PostgreSQL cluster in a declarative way * Support for quorum-based and priority-based Synchronous Replication diff --git a/docs/src/logical_replication.md b/docs/src/logical_replication.md new file mode 100644 index 0000000000..345dfe0cae --- /dev/null +++ b/docs/src/logical_replication.md @@ -0,0 +1,444 @@ +# Logical Replication + +PostgreSQL extends its replication capabilities beyond physical replication, +which operates at the level of exact block addresses and byte-by-byte copying, +by offering [logical replication](https://www.postgresql.org/docs/current/logical-replication.html). +Logical replication replicates data objects and their changes based on a +defined replication identity, typically the primary key. + +Logical replication uses a publish-and-subscribe model, where subscribers +connect to publications on a publisher node. Subscribers pull data changes from +these publications and can re-publish them, enabling cascading replication and +complex topologies. + +This flexible model is particularly useful for: + +- Online data migrations +- Live PostgreSQL version upgrades +- Data distribution across systems +- Real-time analytics +- Integration with external applications + +!!! Info + For more details, examples, and limitations, please refer to the + [official PostgreSQL documentation on Logical Replication](https://www.postgresql.org/docs/current/logical-replication.html). + +**CloudNativePG** enhances this capability by providing declarative support for +key PostgreSQL logical replication objects: + +- **Publications** via the `Publication` resource +- **Subscriptions** via the `Subscription` resource + +## Publications + +In PostgreSQL's publish-and-subscribe replication model, a +[**publication**](https://www.postgresql.org/docs/current/logical-replication-publication.html) +is the source of data changes. It acts as a logical container for the change +sets (also known as *replication sets*) generated from one or more tables within +a database. Publications can be defined on any PostgreSQL 10+ instance acting +as the *publisher*, including instances managed by popular DBaaS solutions in the +public cloud. Each publication is tied to a single database and provides +fine-grained control over which tables and changes are replicated. + +For publishers outside Kubernetes, you can [create publications using SQL](https://www.postgresql.org/docs/current/sql-createpublication.html) +or leverage the [`cnpg publication create` plugin command](kubectl-plugin.md#logical-replication-publications). + +When managing `Cluster` objects with **CloudNativePG**, PostgreSQL publications +can be defined declaratively through the `Publication` resource. + +!!! Info + Please refer to the [API reference](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-Publication) + for the full list of attributes you can define for each `Publication` object. + +Suppose you have a cluster named `freddie` and want to replicate all tables in +the `app` database. Here's a `Publication` manifest: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Publication +metadata: + name: freddie-publisher +spec: + cluster: + name: freddie + dbname: app + name: publisher + target: + allTables: true +``` + +In the above example: + +- The publication object is named `freddie-publisher` (`metadata.name`). +- The publication is created via the primary of the `freddie` cluster + (`spec.cluster.name`) with name `publisher` (`spec.name`). +- It includes all tables (`spec.target.allTables: true`) from the `app` + database (`spec.dbname`). + +!!! Important + While `allTables` simplifies configuration, PostgreSQL offers fine-grained + control for replicating specific tables or targeted data changes. For advanced + configurations, consult the [PostgreSQL documentation](https://www.postgresql.org/docs/current/logical-replication.html). + Additionally, refer to the [CloudNativePG API reference](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-PublicationTarget) + for details on declaratively customizing replication targets. + +### Required Fields in the `Publication` Manifest + +The following fields are required for a `Publication` object: + +- `metadata.name`: Unique name for the Kubernetes `Publication` object. +- `spec.cluster.name`: Name of the PostgreSQL cluster. +- `spec.dbname`: Database name where the publication is created. +- `spec.name`: Publication name in PostgreSQL. +- `spec.target`: Specifies the tables or changes to include in the publication. + +The `Publication` object must reference a specific `Cluster`, determining where +the publication will be created. It is managed by the cluster's primary instance, +ensuring the publication is created or updated as needed. + +### Reconciliation and Status + +After creating a `Publication`, CloudNativePG manages it on the primary +instance of the specified cluster. Following a successful reconciliation cycle, +the `Publication` status will reflect the following: + +- `applied: true`, indicates the configuration has been successfully applied. +- `observedGeneration` matches `metadata.generation`, confirming the applied + configuration corresponds to the most recent changes. + +If an error occurs during reconciliation, `status.applied` will be `false`, and +an error message will be included in the `status.message` field. + +### Removing a publication + +The `publicationReclaimPolicy` field controls the behavior when deleting a +`Publication` object: + +- `retain` (default): Leaves the publication in PostgreSQL for manual + management. +- `delete`: Automatically removes the publication from PostgreSQL. + +Consider the following example: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Publication +metadata: + name: freddie-publisher +spec: + cluster: + name: freddie + dbname: app + name: publisher + target: + allTables: true + publicationReclaimPolicy: delete +``` + +In this case, deleting the `Publication` object also removes the `publisher` +publication from the `app` database of the `freddie` cluster. + +## Subscriptions + +In PostgreSQL's publish-and-subscribe replication model, a +[**subscription**](https://www.postgresql.org/docs/current/logical-replication-subscription.html) +represents the downstream component that consumes data changes. +A subscription establishes the connection to a publisher's database and +specifies the set of publications (one or more) it subscribes to. Subscriptions +can be created on any supported PostgreSQL instance acting as the *subscriber*. + +!!! Important + Since schema definitions are not replicated, the subscriber must have the + corresponding tables already defined before data replication begins. + +CloudNativePG simplifies subscription management by enabling you to define them +declaratively using the `Subscription` resource. + +!!! Info + Please refer to the [API reference](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-Subscription) + for the full list of attributes you can define for each `Subscription` object. + +Suppose you want to replicate changes from the `publisher` publication on the +`app` database of the `freddie` cluster (*publisher*) to the `app` database of +the `king` cluster (*subscriber*). Here's an example of a `Subscription` +manifest: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Subscription +metadata: + name: freddie-to-king-subscription +spec: + cluster: + name: king + dbname: app + name: subscriber + externalClusterName: freddie + publicationName: publisher +``` + +In the above example: + +- The subscription object is named `freddie-to-king-subscriber` (`metadata.name`). +- The subscription is created in the `app` database (`spec.dbname`) of the + `king` cluster (`spec.cluster.name`), with name `subscriber` (`spec.name`). +- It connects to the `publisher` publication in the external `freddie` cluster, + referenced by `spec.externalClusterName`. + +To facilitate this setup, the `freddie` external cluster must be defined in the +`king` cluster's configuration. Below is an example excerpt showing how to +define the external cluster in the `king` manifest: + +```yaml +externalClusters: + - name: freddie + connectionParameters: + host: freddie-rw.default.svc + user: postgres + dbname: app +``` + +!!! Info + For more details on configuring the `externalClusters` section, see the + ["Bootstrap" section](bootstrap.md#the-externalclusters-section) of the + documentation. + +As you can see, a subscription can connect to any PostgreSQL database +accessible over the network. This flexibility allows you to seamlessly migrate +your data into Kubernetes with nearly zero downtime. It’s an excellent option +for transitioning from various environments, including popular cloud-based +Database-as-a-Service (DBaaS) platforms. + +### Required Fields in the `Subscription` Manifest + +The following fields are mandatory for defining a `Subscription` object: + +- `metadata.name`: A unique name for the Kubernetes `Subscription` object + within its namespace. +- `spec.cluster.name`: The name of the PostgreSQL cluster where the + subscription will be created. +- `spec.dbname`: The name of the database in which the subscription will be + created. +- `spec.name`: The name of the subscription as it will appear in PostgreSQL. +- `spec.externalClusterName`: The name of the external cluster, as defined in + the `spec.cluster.name` cluster's configuration. This references the + publisher database. +- `spec.publicationName`: The name of the publication in the publisher database + to which the subscription will connect. + +The `Subscription` object must reference a specific `Cluster`, determining +where the subscription will be managed. CloudNativePG ensures that the +subscription is created or updated on the primary instance of the specified +cluster. + +### Reconciliation and Status + +After creating a `Subscription`, CloudNativePG manages it on the primary +instance of the specified cluster. Following a successful reconciliation cycle, +the `Subscription` status will reflect the following: + +- `applied: true`, indicates the configuration has been successfully applied. +- `observedGeneration` matches `metadata.generation`, confirming the applied + configuration corresponds to the most recent changes. + +If an error occurs during reconciliation, `status.applied` will be `false`, and +an error message will be included in the `status.message` field. + +### Removing a subscription + +The `subscriptionReclaimPolicy` field controls the behavior when deleting a +`Subscription` object: + +- `retain` (default): Leaves the subscription in PostgreSQL for manual + management. +- `delete`: Automatically removes the subscription from PostgreSQL. + +Consider the following example: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Subscription +metadata: + name: freddie-to-king-subscription +spec: + cluster: + name: king + dbname: app + name: subscriber + externalClusterName: freddie + publicationName: publisher + subscriptionReclaimPolicy: delete +``` + +In this case, deleting the `Subscription` object also removes the `subscriber` +subscription from the `app` database of the `king` cluster. + +## Limitations + +Logical replication in PostgreSQL has some inherent limitations, as outlined in +the [official documentation](https://www.postgresql.org/docs/current/logical-replication-restrictions.html). +Notably, the following objects are not replicated: + +- **Database schema and DDL commands** +- **Sequence data** +- **Large objects** + +### Addressing Schema Replication + +The first limitation, related to schema replication, can be easily addressed +using CloudNativePG's capabilities. For instance, you can leverage the `import` +bootstrap feature to copy the schema of the tables you need to replicate. +Alternatively, you can manually create the schema as you would for any +PostgreSQL database. + +### Handling Sequences + +While sequences are not automatically kept in sync through logical replication, +CloudNativePG provides a solution to be used in live migrations. +You can use the [`cnpg` plugin](kubectl-plugin.md#synchronizing-sequences) +to synchronize sequence values, ensuring consistency between the publisher and +subscriber databases. + +## Example of live migration and major Postgres upgrade with logical replication + +To highlight the powerful capabilities of logical replication, this example +demonstrates how to replicate data from a publisher database (`freddie`) +running PostgreSQL 16 to a subscriber database (`king`) running the latest +PostgreSQL version. This setup can be deployed in your Kubernetes cluster for +evaluation and hands-on learning. + +This example illustrates how logical replication facilitates live migrations +and upgrades between PostgreSQL versions while ensuring data consistency. By +combining logical replication with CloudNativePG, you can easily set up, +manage, and evaluate such scenarios in a Kubernetes environment. + +### Step 1: Setting Up the Publisher (`freddie`) + +The first step involves creating a `freddie` PostgreSQL cluster with version 16. +The cluster contains a single instance and includes an `app` database +initialized with a table, `n`, storing 10,000 numbers. A logical replication +publication named `publisher` is also configured to include all tables in the +database. + +Here’s the manifest for setting up the `freddie` cluster and its publication +resource: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: freddie +spec: + instances: 1 + + imageName: ghcr.io/cloudnative-pg/postgresql:16 + + storage: + size: 1Gi + + bootstrap: + initdb: + postInitApplicationSQL: + - CREATE TABLE n (i SERIAL PRIMARY KEY, m INTEGER) + - INSERT INTO n (m) (SELECT generate_series(1, 10000)) + - ALTER TABLE n OWNER TO app + + managed: + roles: + - name: app + login: true + replication: true +--- +apiVersion: postgresql.cnpg.io/v1 +kind: Publication +metadata: + name: freddie-publisher +spec: + cluster: + name: freddie + dbname: app + name: publisher + target: + allTables: true +``` + +### Step 2: Setting Up the Subscriber (`king`) + +Next, create the `king` PostgreSQL cluster, running the latest version of +PostgreSQL. This cluster initializes by importing the schema from the `app` +database on the `freddie` cluster using the external cluster configuration. A +`Subscription` resource, `freddie-to-king-subscription`, is then configured to +consume changes published by the `publisher` on `freddie`. + +Below is the manifest for setting up the `king` cluster and its subscription: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: king +spec: + instances: 1 + + storage: + size: 1Gi + + bootstrap: + initdb: + import: + type: microservice + schemaOnly: true + databases: + - app + source: + externalCluster: freddie + + externalClusters: + - name: freddie + connectionParameters: + host: freddie-rw.default.svc + user: app + dbname: app + password: + name: freddie-app + key: password +--- +apiVersion: postgresql.cnpg.io/v1 +kind: Subscription +metadata: + name: freddie-to-king-subscription +spec: + cluster: + name: king + dbname: app + name: subscriber + externalClusterName: freddie + publicationName: publisher +``` + +Once the `king` cluster is running, you can verify that the replication is +working by connecting to the `app` database and counting the records in the `n` +table. The following example uses the `psql` command provided by the `cnpg` +plugin for simplicity: + +```console +kubectl cnpg psql king -- app -qAt -c 'SELECT count(*) FROM n' +10000 +``` + +This command should return `10000`, confirming that the data from the `freddie` +cluster has been successfully replicated to the `king` cluster. + +Using the `cnpg` plugin, you can also synchronize existing sequences to ensure +consistency between the publisher and subscriber. The example below +demonstrates how to synchronize a sequence for the `king` cluster: + +```console +kubectl cnpg subscription sync-sequences king --subscription=subscriber +SELECT setval('"public"."n_i_seq"', 10000); + +10000 +``` + +This command updates the sequence `n_i_seq` in the `king` cluster to match the +current value, ensuring it is in sync with the source database. diff --git a/docs/src/operator_capability_levels.md b/docs/src/operator_capability_levels.md index 3ef8b0f5d8..5008d33b8c 100644 --- a/docs/src/operator_capability_levels.md +++ b/docs/src/operator_capability_levels.md @@ -590,6 +590,15 @@ and makes the use of the underlying PostgreSQL resources more efficient. Instead of connecting directly to a PostgreSQL service, applications can now connect to the PgBouncer service and start reusing any existing connection. +### Logical Replication + +CloudNativePG supports PostgreSQL's logical replication in a declarative manner +using `Publication` and `Subscription` custom resource definitions. + +Logical replication is particularly useful together with the import facility +for online data migrations (even from public DBaaS solutions) and major +PostgreSQL upgrades. + ## Level 4: Deep insights Capability level 4 is about *observability*: monitoring, diff --git a/docs/src/replication.md b/docs/src/replication.md index fac1db21c6..ff9f0f1f41 100644 --- a/docs/src/replication.md +++ b/docs/src/replication.md @@ -37,10 +37,10 @@ recovery. PostgreSQL 9.0 (2010) introduced WAL streaming and read-only replicas through *hot standby*. In 2011, PostgreSQL 9.1 brought synchronous replication at the transaction level, supporting RPO=0 clusters. Cascading replication was added in -PostgreSQL 9.2 (2012). The foundations for logical replication were established -in PostgreSQL 9.4 (2014), and version 10 (2017) introduced native support for -the publisher/subscriber pattern to replicate data from an origin to a -destination. The table below summarizes these milestones. +PostgreSQL 9.2 (2012). The foundations for [logical replication](logical_replication.md) +were established in PostgreSQL 9.4 (2014), and version 10 (2017) introduced +native support for the publisher/subscriber pattern to replicate data from an +origin to a destination. The table below summarizes these milestones. | Version | Year | Feature | |:-------:|:----:|-----------------------------------------------------------------------| diff --git a/docs/src/samples/cluster-example-logical-destination.yaml b/docs/src/samples/cluster-example-logical-destination.yaml index 75cb3f2af2..e8a2f574f9 100644 --- a/docs/src/samples/cluster-example-logical-destination.yaml +++ b/docs/src/samples/cluster-example-logical-destination.yaml @@ -22,12 +22,20 @@ spec: - name: cluster-example connectionParameters: host: cluster-example-rw.default.svc - # We're using the superuser to allow the publication to be - # created directly when connected to the target server. - # See cluster-example-logical-source.yaml for more information - # about this. - user: postgres + user: app dbname: app password: - name: cluster-example-superuser + name: cluster-example-app key: password +--- +apiVersion: postgresql.cnpg.io/v1 +kind: Subscription +metadata: + name: cluster-example-dest-sub +spec: + cluster: + name: cluster-example-dest + name: sub + dbname: app + publicationName: pub + externalClusterName: cluster-example diff --git a/docs/src/samples/cluster-example-logical-source.yaml b/docs/src/samples/cluster-example-logical-source.yaml index ad9f888353..95bac8cd82 100644 --- a/docs/src/samples/cluster-example-logical-source.yaml +++ b/docs/src/samples/cluster-example-logical-source.yaml @@ -5,7 +5,7 @@ metadata: spec: instances: 1 - imageName: ghcr.io/cloudnative-pg/postgresql:13 + imageName: ghcr.io/cloudnative-pg/postgresql:16 storage: size: 1Gi @@ -15,18 +15,30 @@ spec: postInitApplicationSQL: - CREATE TABLE numbers (i SERIAL PRIMARY KEY, m INTEGER) - INSERT INTO numbers (m) (SELECT generate_series(1,10000)) - - ALTER TABLE numbers OWNER TO app; + - ALTER TABLE numbers OWNER TO app - CREATE TABLE numbers_two (i SERIAL PRIMARY KEY, m INTEGER) - INSERT INTO numbers_two (m) (SELECT generate_series(1,10000)) - - ALTER TABLE numbers_two OWNER TO app; - - CREATE TABLE numbers_three (i SERIAL PRIMARY KEY, m INTEGER) - - INSERT INTO numbers_three (m) (SELECT generate_series(1,10000)) - - ALTER TABLE numbers_three OWNER TO app; - - enableSuperuserAccess: true + - ALTER TABLE numbers_two OWNER TO app + - CREATE SCHEMA another_schema + - ALTER SCHEMA another_schema OWNER TO app + - CREATE TABLE another_schema.numbers_three (i SERIAL PRIMARY KEY, m INTEGER) + - INSERT INTO another_schema.numbers_three (m) (SELECT generate_series(1,10000)) + - ALTER TABLE another_schema.numbers_three OWNER TO app managed: roles: - name: app login: true replication: true +--- +apiVersion: postgresql.cnpg.io/v1 +kind: Publication +metadata: + name: cluster-example-pub +spec: + name: pub + dbname: app + cluster: + name: cluster-example + target: + allTables: true diff --git a/docs/src/samples/publication-example-objects.yaml b/docs/src/samples/publication-example-objects.yaml new file mode 100644 index 0000000000..2cc68a5296 --- /dev/null +++ b/docs/src/samples/publication-example-objects.yaml @@ -0,0 +1,16 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Publication +metadata: + name: publication-example-objects +spec: + cluster: + name: cluster-example + name: pub-objects + dbname: app + target: + objects: + - tablesInSchema: public + - table: + schema: another_schema + name: numbers_three + only: true diff --git a/docs/src/samples/publication-example.yaml b/docs/src/samples/publication-example.yaml new file mode 100644 index 0000000000..d2df4bc3f2 --- /dev/null +++ b/docs/src/samples/publication-example.yaml @@ -0,0 +1,11 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Publication +metadata: + name: publication-example +spec: + cluster: + name: cluster-example + name: pub-all + dbname: app + target: + allTables: true diff --git a/docs/src/samples/subscription-example.yaml b/docs/src/samples/subscription-example.yaml new file mode 100644 index 0000000000..6392d71830 --- /dev/null +++ b/docs/src/samples/subscription-example.yaml @@ -0,0 +1,11 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Subscription +metadata: + name: subscription-sample +spec: + name: sub + dbname: app + publicationName: pub-all + cluster: + name: cluster-example-dest + externalClusterName: cluster-example diff --git a/internal/cmd/manager/instance/run/cmd.go b/internal/cmd/manager/instance/run/cmd.go index 066360ddaa..e02d06d55c 100644 --- a/internal/cmd/manager/instance/run/cmd.go +++ b/internal/cmd/manager/instance/run/cmd.go @@ -165,6 +165,16 @@ func runSubCommand(ctx context.Context, instance *postgres.Instance) error { instance.GetNamespaceName(): {}, }, }, + &apiv1.Publication{}: { + Namespaces: map[string]cache.Config{ + instance.GetNamespaceName(): {}, + }, + }, + &apiv1.Subscription{}: { + Namespaces: map[string]cache.Config{ + instance.GetNamespaceName(): {}, + }, + }, }, }, // We don't need a cache for secrets and configmap, as all reloads @@ -215,6 +225,20 @@ func runSubCommand(ctx context.Context, instance *postgres.Instance) error { return err } + // database publication reconciler + publicationReconciler := controller.NewPublicationReconciler(mgr, instance) + if err := publicationReconciler.SetupWithManager(mgr); err != nil { + contextLogger.Error(err, "unable to create publication controller") + return err + } + + // database subscription reconciler + subscriptionReconciler := controller.NewSubscriptionReconciler(mgr, instance) + if err := subscriptionReconciler.SetupWithManager(mgr); err != nil { + contextLogger.Error(err, "unable to create subscription controller") + return err + } + // postgres CSV logs handler (PGAudit too) postgresLogPipe := logpipe.NewLogPipe() if err := mgr.Add(postgresLogPipe); err != nil { diff --git a/internal/cmd/plugin/logical/publication/create/publication.go b/internal/cmd/plugin/logical/publication/create/publication.go index 9a6be06362..964f35c578 100644 --- a/internal/cmd/plugin/logical/publication/create/publication.go +++ b/internal/cmd/plugin/logical/publication/create/publication.go @@ -56,7 +56,7 @@ type PublicationTarget interface { ToPublicationTargetSQL() string } -// PublicationTargetALLTables will publicate all tables +// PublicationTargetALLTables will publish all tables type PublicationTargetALLTables struct{} // ToPublicationTargetSQL implements the PublicationTarget interface @@ -64,7 +64,7 @@ func (PublicationTargetALLTables) ToPublicationTargetSQL() string { return "FOR ALL TABLES" } -// PublicationTargetPublicationObjects publicates multiple publication objects +// PublicationTargetPublicationObjects publishes multiple publication objects type PublicationTargetPublicationObjects struct { PublicationObjects []PublicationObject } @@ -85,15 +85,15 @@ func (objs *PublicationTargetPublicationObjects) ToPublicationTargetSQL() string return result } -// PublicationObject represent an object to publicate +// PublicationObject represent an object to publish type PublicationObject interface { - // Create the SQL statement to publicate this object + // ToPublicationObjectSQL creates the SQL statement to publish this object ToPublicationObjectSQL() string } -// PublicationObjectSchema will publicate all the tables in a certain schema +// PublicationObjectSchema will publish all the tables in a certain schema type PublicationObjectSchema struct { - // The schema to publicate + // The schema to publish SchemaName string } @@ -102,9 +102,9 @@ func (obj PublicationObjectSchema) ToPublicationObjectSQL() string { return fmt.Sprintf("TABLES IN SCHEMA %s", pgx.Identifier{obj.SchemaName}.Sanitize()) } -// PublicationObjectTableExpression will publicate the passed table expression +// PublicationObjectTableExpression will publish the passed table expression type PublicationObjectTableExpression struct { - // The table expression to publicate + // The table expression to publish TableExpressions []string } diff --git a/internal/cmd/plugin/logical/publication/create/publication_test.go b/internal/cmd/plugin/logical/publication/create/publication_test.go index 60081a0b0e..1aafaa1772 100644 --- a/internal/cmd/plugin/logical/publication/create/publication_test.go +++ b/internal/cmd/plugin/logical/publication/create/publication_test.go @@ -22,14 +22,14 @@ import ( ) var _ = Describe("create publication SQL generator", func() { - It("can publicate all tables", func() { + It("can publish all tables", func() { Expect(PublicationCmdBuilder{ PublicationName: "app", PublicationTarget: PublicationTargetALLTables{}, }.ToSQL()).To(Equal(`CREATE PUBLICATION "app" FOR ALL TABLES`)) }) - It("can publicate all tables with custom parameters", func() { + It("can publish all tables with custom parameters", func() { Expect(PublicationCmdBuilder{ PublicationName: "app", PublicationTarget: PublicationTargetALLTables{}, @@ -37,7 +37,7 @@ var _ = Describe("create publication SQL generator", func() { }.ToSQL()).To(Equal(`CREATE PUBLICATION "app" FOR ALL TABLES WITH (publish='insert')`)) }) - It("can publicate a list of tables via multiple publication objects", func() { + It("can publish a list of tables via multiple publication objects", func() { // This is supported from PG 15 Expect(PublicationCmdBuilder{ PublicationName: "app", @@ -54,7 +54,7 @@ var _ = Describe("create publication SQL generator", func() { }.ToSQL()).To(Equal(`CREATE PUBLICATION "app" FOR TABLE a, TABLE b`)) }) - It("can publicate a list of tables via multiple table expressions", func() { + It("can publish a list of tables via multiple table expressions", func() { // This is supported in PG < 15 Expect(PublicationCmdBuilder{ PublicationName: "app", diff --git a/internal/controller/cluster_controller.go b/internal/controller/cluster_controller.go index f433ab5d69..2cdd503e82 100644 --- a/internal/controller/cluster_controller.go +++ b/internal/controller/cluster_controller.go @@ -160,10 +160,10 @@ func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct "namespace", req.Namespace, ) } - if err := r.deleteDatabaseFinalizers(ctx, req.NamespacedName); err != nil { + if err := r.deleteFinalizers(ctx, req.NamespacedName); err != nil { contextLogger.Error( err, - "error while deleting finalizers of Databases on the cluster", + "error while deleting finalizers of objects on the cluster", "clusterName", req.Name, "namespace", req.Namespace, ) diff --git a/internal/controller/finalizers_delete.go b/internal/controller/finalizers_delete.go index 6e88bf429a..0bee4928df 100644 --- a/internal/controller/finalizers_delete.go +++ b/internal/controller/finalizers_delete.go @@ -20,6 +20,8 @@ import ( "context" "github.com/cloudnative-pg/machinery/pkg/log" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -28,36 +30,80 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) -// deleteDatabaseFinalizers deletes Database object finalizers when the cluster they were in has been deleted -func (r *ClusterReconciler) deleteDatabaseFinalizers(ctx context.Context, namespacedName types.NamespacedName) error { - contextLogger := log.FromContext(ctx) +// ClusterReferrer is an object containing a cluster reference +type ClusterReferrer interface { + GetClusterRef() corev1.LocalObjectReference + client.Object +} - databases := apiv1.DatabaseList{} - if err := r.List(ctx, - &databases, - client.InNamespace(namespacedName.Namespace), +// deleteFinalizers deletes object finalizers when the cluster they were in has been deleted +func (r *ClusterReconciler) deleteFinalizers(ctx context.Context, namespacedName types.NamespacedName) error { + if err := r.deleteFinalizersForResource( + ctx, + namespacedName, + &apiv1.DatabaseList{}, + utils.DatabaseFinalizerName, ); err != nil { return err } - for idx := range databases.Items { - database := &databases.Items[idx] + if err := r.deleteFinalizersForResource( + ctx, + namespacedName, + &apiv1.PublicationList{}, + utils.PublicationFinalizerName, + ); err != nil { + return err + } + + return r.deleteFinalizersForResource( + ctx, + namespacedName, + &apiv1.SubscriptionList{}, + utils.SubscriptionFinalizerName, + ) +} + +// deleteFinalizersForResource deletes finalizers for a given resource type +func (r *ClusterReconciler) deleteFinalizersForResource( + ctx context.Context, + namespacedName types.NamespacedName, + list client.ObjectList, + finalizerName string, +) error { + contextLogger := log.FromContext(ctx) + + if err := r.List(ctx, list, client.InNamespace(namespacedName.Namespace)); err != nil { + return err + } + + items, err := meta.ExtractList(list) + if err != nil { + return err + } + + for _, item := range items { + obj, ok := item.(ClusterReferrer) + if !ok { + continue + } - if database.Spec.ClusterRef.Name != namespacedName.Name { + if obj.GetClusterRef().Name != namespacedName.Name { continue } - origDatabase := database.DeepCopy() - if controllerutil.RemoveFinalizer(database, utils.DatabaseFinalizerName) { - contextLogger.Debug("Removing finalizer from database", - "finalizer", utils.DatabaseFinalizerName, "database", database.Name) - if err := r.Patch(ctx, database, client.MergeFrom(origDatabase)); err != nil { + origObj := obj.DeepCopyObject().(ClusterReferrer) + if controllerutil.RemoveFinalizer(obj, finalizerName) { + contextLogger.Debug("Removing finalizer from resource", + "finalizer", finalizerName, "resource", obj.GetName()) + if err := r.Patch(ctx, obj, client.MergeFrom(origObj)); err != nil { contextLogger.Error( err, - "error while removing finalizer from database", - "database", database.Name, - "oldFinalizerList", origDatabase.ObjectMeta.Finalizers, - "newFinalizerList", database.ObjectMeta.Finalizers, + "error while removing finalizer from resource", + "resource", obj.GetName(), + "kind", obj.GetObjectKind().GroupVersionKind().Kind, + "oldFinalizerList", origObj.GetFinalizers(), + "newFinalizerList", obj.GetFinalizers(), ) return err } diff --git a/internal/controller/finalizers_delete_test.go b/internal/controller/finalizers_delete_test.go index 7354f68d83..cc6c0d5651 100644 --- a/internal/controller/finalizers_delete_test.go +++ b/internal/controller/finalizers_delete_test.go @@ -32,7 +32,8 @@ import ( . "github.com/onsi/gomega" ) -var _ = Describe("Database CRD finalizers", func() { +// nolint: dupl +var _ = Describe("CRD finalizers", func() { var ( r ClusterReconciler scheme *runtime.Scheme @@ -88,7 +89,7 @@ var _ = Describe("Database CRD finalizers", func() { cli := fake.NewClientBuilder().WithScheme(scheme).WithLists(databaseList).Build() r.Client = cli - err := r.deleteDatabaseFinalizers(ctx, namespacedName) + err := r.deleteFinalizers(ctx, namespacedName) Expect(err).ToNot(HaveOccurred()) for _, db := range databaseList.Items { @@ -123,7 +124,7 @@ var _ = Describe("Database CRD finalizers", func() { cli := fake.NewClientBuilder().WithScheme(scheme).WithLists(databaseList).Build() r.Client = cli - err := r.deleteDatabaseFinalizers(ctx, namespacedName) + err := r.deleteFinalizers(ctx, namespacedName) Expect(err).ToNot(HaveOccurred()) database := &apiv1.Database{} @@ -131,4 +132,166 @@ var _ = Describe("Database CRD finalizers", func() { Expect(err).ToNot(HaveOccurred()) Expect(database.Finalizers).To(BeEquivalentTo([]string{utils.DatabaseFinalizerName})) }) + + It("should delete publication finalizers for publications on the cluster", func(ctx SpecContext) { + publicationList := &apiv1.PublicationList{ + Items: []apiv1.Publication{ + { + ObjectMeta: metav1.ObjectMeta{ + Finalizers: []string{ + utils.PublicationFinalizerName, + }, + Name: "pub-1", + Namespace: "test", + }, + Spec: apiv1.PublicationSpec{ + Name: "pub-test", + ClusterRef: corev1.LocalObjectReference{ + Name: "cluster", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Finalizers: []string{ + utils.PublicationFinalizerName, + }, + Name: "pub-2", + Namespace: "test", + }, + Spec: apiv1.PublicationSpec{ + Name: "pub-test-2", + ClusterRef: corev1.LocalObjectReference{ + Name: "cluster", + }, + }, + }, + }, + } + + cli := fake.NewClientBuilder().WithScheme(scheme).WithLists(publicationList).Build() + r.Client = cli + err := r.deleteFinalizers(ctx, namespacedName) + Expect(err).ToNot(HaveOccurred()) + + for _, pub := range publicationList.Items { + publication := &apiv1.Publication{} + err = cli.Get(ctx, client.ObjectKeyFromObject(&pub), publication) + Expect(err).ToNot(HaveOccurred()) + Expect(publication.Finalizers).To(BeZero()) + } + }) + + It("should not delete publication finalizers for publications in another cluster", func(ctx SpecContext) { + publicationList := &apiv1.PublicationList{ + Items: []apiv1.Publication{ + { + ObjectMeta: metav1.ObjectMeta{ + Finalizers: []string{ + utils.PublicationFinalizerName, + }, + Name: "pub-1", + Namespace: "test", + }, + Spec: apiv1.PublicationSpec{ + Name: "pub-test", + ClusterRef: corev1.LocalObjectReference{ + Name: "another-cluster", + }, + }, + }, + }, + } + + cli := fake.NewClientBuilder().WithScheme(scheme).WithLists(publicationList).Build() + r.Client = cli + err := r.deleteFinalizers(ctx, namespacedName) + Expect(err).ToNot(HaveOccurred()) + + publication := &apiv1.Publication{} + err = cli.Get(ctx, client.ObjectKeyFromObject(&publicationList.Items[0]), publication) + Expect(err).ToNot(HaveOccurred()) + Expect(publication.Finalizers).To(BeEquivalentTo([]string{utils.PublicationFinalizerName})) + }) + + It("should delete subscription finalizers for subscriptions on the cluster", func(ctx SpecContext) { + subscriptionList := &apiv1.SubscriptionList{ + Items: []apiv1.Subscription{ + { + ObjectMeta: metav1.ObjectMeta{ + Finalizers: []string{ + utils.SubscriptionFinalizerName, + }, + Name: "sub-1", + Namespace: "test", + }, + Spec: apiv1.SubscriptionSpec{ + Name: "sub-test", + ClusterRef: corev1.LocalObjectReference{ + Name: "cluster", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Finalizers: []string{ + utils.SubscriptionFinalizerName, + }, + Name: "sub-2", + Namespace: "test", + }, + Spec: apiv1.SubscriptionSpec{ + Name: "sub-test-2", + ClusterRef: corev1.LocalObjectReference{ + Name: "cluster", + }, + }, + }, + }, + } + + cli := fake.NewClientBuilder().WithScheme(scheme).WithLists(subscriptionList).Build() + r.Client = cli + err := r.deleteFinalizers(ctx, namespacedName) + Expect(err).ToNot(HaveOccurred()) + + for _, sub := range subscriptionList.Items { + subscription := &apiv1.Subscription{} + err = cli.Get(ctx, client.ObjectKeyFromObject(&sub), subscription) + Expect(err).ToNot(HaveOccurred()) + Expect(subscription.Finalizers).To(BeZero()) + } + }) + + It("should not delete subscription finalizers for subscriptions in another cluster", func(ctx SpecContext) { + subscriptionList := &apiv1.SubscriptionList{ + Items: []apiv1.Subscription{ + { + ObjectMeta: metav1.ObjectMeta{ + Finalizers: []string{ + utils.SubscriptionFinalizerName, + }, + Name: "sub-1", + Namespace: "test", + }, + Spec: apiv1.SubscriptionSpec{ + Name: "sub-test", + ClusterRef: corev1.LocalObjectReference{ + Name: "another-cluster", + }, + }, + }, + }, + } + + cli := fake.NewClientBuilder().WithScheme(scheme).WithLists(subscriptionList).Build() + r.Client = cli + err := r.deleteFinalizers(ctx, namespacedName) + Expect(err).ToNot(HaveOccurred()) + + subscription := &apiv1.Subscription{} + err = cli.Get(ctx, client.ObjectKeyFromObject(&subscriptionList.Items[0]), subscription) + Expect(err).ToNot(HaveOccurred()) + Expect(subscription.Finalizers).To(BeEquivalentTo([]string{utils.SubscriptionFinalizerName})) + }) }) diff --git a/internal/management/controller/common.go b/internal/management/controller/common.go new file mode 100644 index 0000000000..c0d87aeb97 --- /dev/null +++ b/internal/management/controller/common.go @@ -0,0 +1,111 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "bytes" + "context" + "fmt" + "maps" + "slices" + + "github.com/jackc/pgx/v5" + "github.com/lib/pq" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" +) + +type markableAsFailed interface { + client.Object + SetAsFailed(err error) +} + +// markAsFailed marks the reconciliation as failed and logs the corresponding error +func markAsFailed( + ctx context.Context, + cli client.Client, + resource markableAsFailed, + err error, +) error { + oldResource := resource.DeepCopyObject().(markableAsFailed) + resource.SetAsFailed(err) + return cli.Status().Patch(ctx, resource, client.MergeFrom(oldResource)) +} + +type markableAsUnknown interface { + client.Object + SetAsUnknown(err error) +} + +// markAsFailed marks the reconciliation as failed and logs the corresponding error +func markAsUnknown( + ctx context.Context, + cli client.Client, + resource markableAsUnknown, + err error, +) error { + oldResource := resource.DeepCopyObject().(markableAsUnknown) + resource.SetAsUnknown(err) + return cli.Status().Patch(ctx, resource, client.MergeFrom(oldResource)) +} + +type markableAsReady interface { + client.Object + SetAsReady() +} + +// markAsReady marks the reconciliation as succeeded inside the resource +func markAsReady( + ctx context.Context, + cli client.Client, + resource markableAsReady, +) error { + oldResource := resource.DeepCopyObject().(markableAsReady) + resource.SetAsReady() + + return cli.Status().Patch(ctx, resource, client.MergeFrom(oldResource)) +} + +func getClusterFromInstance( + ctx context.Context, + cli client.Client, + instance instanceInterface, +) (*apiv1.Cluster, error) { + var cluster apiv1.Cluster + err := cli.Get(ctx, types.NamespacedName{ + Name: instance.GetClusterName(), + Namespace: instance.GetNamespaceName(), + }, &cluster) + return &cluster, err +} + +func toPostgresParameters(parameters map[string]string) string { + if len(parameters) == 0 { + return "" + } + + b := new(bytes.Buffer) + for _, key := range slices.Sorted(maps.Keys(parameters)) { + // TODO(armru): any alternative to pg.QuoteLiteral? + _, _ = fmt.Fprintf(b, "%s = %s, ", pgx.Identifier{key}.Sanitize(), pq.QuoteLiteral(parameters[key])) + } + + // pruning last 2 chars `, ` + return b.String()[:len(b.String())-2] +} diff --git a/internal/management/controller/database_controller.go b/internal/management/controller/database_controller.go index 7fb0b2a6f4..a82f8ce5a1 100644 --- a/internal/management/controller/database_controller.go +++ b/internal/management/controller/database_controller.go @@ -26,7 +26,6 @@ import ( "github.com/cloudnative-pg/machinery/pkg/log" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -308,18 +307,7 @@ func (r *DatabaseReconciler) SetupWithManager(mgr ctrl.Manager) error { // GetCluster gets the managed cluster through the client func (r *DatabaseReconciler) GetCluster(ctx context.Context) (*apiv1.Cluster, error) { - var cluster apiv1.Cluster - err := r.Client.Get(ctx, - types.NamespacedName{ - Namespace: r.instance.GetNamespaceName(), - Name: r.instance.GetClusterName(), - }, - &cluster) - if err != nil { - return nil, err - } - - return &cluster, nil + return getClusterFromInstance(ctx, r.Client, r.instance) } func (r *DatabaseReconciler) reconcileDatabase(ctx context.Context, obj *apiv1.Database) error { diff --git a/internal/management/controller/finalizers.go b/internal/management/controller/finalizers.go new file mode 100644 index 0000000000..ed334d16fb --- /dev/null +++ b/internal/management/controller/finalizers.go @@ -0,0 +1,49 @@ +package controller + +import ( + "context" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +type finalizerReconciler[T client.Object] struct { + cli client.Client + finalizerName string + onRemoveFunc func(ctx context.Context, resource T) error +} + +func newFinalizerReconciler[T client.Object]( + cli client.Client, + finalizerName string, + onRemoveFunc func(ctx context.Context, resource T) error, +) *finalizerReconciler[T] { + return &finalizerReconciler[T]{ + cli: cli, + finalizerName: finalizerName, + onRemoveFunc: onRemoveFunc, + } +} + +func (f finalizerReconciler[T]) reconcile(ctx context.Context, resource T) error { + // add finalizer in non-deleted publications if not present + if resource.GetDeletionTimestamp().IsZero() { + if !controllerutil.AddFinalizer(resource, f.finalizerName) { + return nil + } + return f.cli.Update(ctx, resource) + } + + // the publication is being deleted but no finalizer is present, we can quit + if !controllerutil.ContainsFinalizer(resource, f.finalizerName) { + return nil + } + + if err := f.onRemoveFunc(ctx, resource); err != nil { + return err + } + + // remove our finalizer from the list and update it. + controllerutil.RemoveFinalizer(resource, f.finalizerName) + return f.cli.Update(ctx, resource) +} diff --git a/internal/management/controller/manager.go b/internal/management/controller/manager.go index b1c01130d7..426f85fd14 100644 --- a/internal/management/controller/manager.go +++ b/internal/management/controller/manager.go @@ -82,18 +82,7 @@ func (r *InstanceReconciler) Instance() *postgres.Instance { // GetCluster gets the managed cluster through the client func (r *InstanceReconciler) GetCluster(ctx context.Context) (*apiv1.Cluster, error) { - var cluster apiv1.Cluster - err := r.GetClient().Get(ctx, - types.NamespacedName{ - Namespace: r.instance.GetNamespaceName(), - Name: r.instance.GetClusterName(), - }, - &cluster) - if err != nil { - return nil, err - } - - return &cluster, nil + return getClusterFromInstance(ctx, r.client, r.instance) } // GetSecret will get a named secret in the instance namespace diff --git a/internal/management/controller/publication_controller.go b/internal/management/controller/publication_controller.go new file mode 100644 index 0000000000..f9d1bc8bd9 --- /dev/null +++ b/internal/management/controller/publication_controller.go @@ -0,0 +1,178 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + "time" + + "github.com/cloudnative-pg/machinery/pkg/log" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" +) + +// PublicationReconciler reconciles a Publication object +type PublicationReconciler struct { + client.Client + Scheme *runtime.Scheme + + instance *postgres.Instance + finalizerReconciler *finalizerReconciler[*apiv1.Publication] +} + +// publicationReconciliationInterval is the time between the +// publication reconciliation loop failures +const publicationReconciliationInterval = 30 * time.Second + +// +kubebuilder:rbac:groups=postgresql.cnpg.io,resources=publications,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=postgresql.cnpg.io,resources=publications/status,verbs=get;update;patch + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the Publication object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.18.4/pkg/reconcile +func (r *PublicationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + contextLogger := log.FromContext(ctx) + + contextLogger.Debug("Reconciliation loop start") + defer func() { + contextLogger.Debug("Reconciliation loop end") + }() + + // Get the publication object + var publication apiv1.Publication + if err := r.Client.Get(ctx, client.ObjectKey{ + Namespace: req.Namespace, + Name: req.Name, + }, &publication); err != nil { + contextLogger.Trace("Could not fetch Publication", "error", err) + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + // This is not for me! + if publication.Spec.ClusterRef.Name != r.instance.GetClusterName() { + contextLogger.Trace("Publication is not for this cluster", + "cluster", publication.Spec.ClusterRef.Name, + "expected", r.instance.GetClusterName(), + ) + return ctrl.Result{}, nil + } + + // If everything is reconciled, we're done here + if publication.Generation == publication.Status.ObservedGeneration { + return ctrl.Result{}, nil + } + + // Fetch the Cluster from the cache + cluster, err := r.GetCluster(ctx) + if err != nil { + return ctrl.Result{}, markAsFailed(ctx, r.Client, &publication, fmt.Errorf("while fetching the cluster: %w", err)) + } + + // Still not for me, we're waiting for a switchover + if cluster.Status.CurrentPrimary != cluster.Status.TargetPrimary { + return ctrl.Result{RequeueAfter: publicationReconciliationInterval}, nil + } + + // This is not for me, at least now + if cluster.Status.CurrentPrimary != r.instance.GetPodName() { + return ctrl.Result{RequeueAfter: publicationReconciliationInterval}, nil + } + + // Cannot do anything on a replica cluster + if cluster.IsReplica() { + if err := markAsUnknown(ctx, r.Client, &publication, errClusterIsReplica); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{RequeueAfter: publicationReconciliationInterval}, nil + } + + if err := r.finalizerReconciler.reconcile(ctx, &publication); err != nil { + return ctrl.Result{}, fmt.Errorf("while reconciling the finalizer: %w", err) + } + if !publication.GetDeletionTimestamp().IsZero() { + return ctrl.Result{}, nil + } + + if err := r.alignPublication(ctx, &publication); err != nil { + if err := markAsFailed(ctx, r.Client, &publication, err); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{RequeueAfter: publicationReconciliationInterval}, nil + } + + if err := markAsReady(ctx, r.Client, &publication); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{RequeueAfter: publicationReconciliationInterval}, nil +} + +func (r *PublicationReconciler) evaluateDropPublication(ctx context.Context, pub *apiv1.Publication) error { + if pub.Spec.ReclaimPolicy != apiv1.PublicationReclaimDelete { + return nil + } + db, err := r.instance.ConnectionPool().Connection(pub.Spec.DBName) + if err != nil { + return fmt.Errorf("while getting DB connection: %w", err) + } + + return executeDropPublication(ctx, db, pub.Spec.Name) +} + +// NewPublicationReconciler creates a new publication reconciler +func NewPublicationReconciler( + mgr manager.Manager, + instance *postgres.Instance, +) *PublicationReconciler { + pr := &PublicationReconciler{ + Client: mgr.GetClient(), + instance: instance, + } + + pr.finalizerReconciler = newFinalizerReconciler( + mgr.GetClient(), + utils.PublicationFinalizerName, + pr.evaluateDropPublication, + ) + + return pr +} + +// SetupWithManager sets up the controller with the Manager. +func (r *PublicationReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&apiv1.Publication{}). + Named("instance-publication"). + Complete(r) +} + +// GetCluster gets the managed cluster through the client +func (r *PublicationReconciler) GetCluster(ctx context.Context) (*apiv1.Cluster, error) { + return getClusterFromInstance(ctx, r.Client, r.instance) +} diff --git a/internal/management/controller/publication_controller_sql.go b/internal/management/controller/publication_controller_sql.go new file mode 100644 index 0000000000..e179e71bf2 --- /dev/null +++ b/internal/management/controller/publication_controller_sql.go @@ -0,0 +1,193 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "database/sql" + "fmt" + "strings" + + "github.com/jackc/pgx/v5" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" +) + +func (r *PublicationReconciler) alignPublication(ctx context.Context, obj *apiv1.Publication) error { + db, err := r.instance.ConnectionPool().Connection(obj.Spec.DBName) + if err != nil { + return fmt.Errorf("while getting DB connection: %w", err) + } + + row := db.QueryRowContext( + ctx, + ` + SELECT count(*) + FROM pg_publication + WHERE pubname = $1 + `, + obj.Spec.Name) + if row.Err() != nil { + return fmt.Errorf("while getting publication status: %w", row.Err()) + } + + var count int + if err := row.Scan(&count); err != nil { + return fmt.Errorf("while getting publication status (scan): %w", err) + } + + if count > 0 { + if err := r.patchPublication(ctx, db, obj); err != nil { + return fmt.Errorf("while patching publication: %w", err) + } + return nil + } + + if err := r.createPublication(ctx, db, obj); err != nil { + return fmt.Errorf("while creating publication: %w", err) + } + + return nil +} + +func (r *PublicationReconciler) patchPublication( + ctx context.Context, + db *sql.DB, + obj *apiv1.Publication, +) error { + sqls := toPublicationAlterSQL(obj) + for _, sqlQuery := range sqls { + if _, err := db.ExecContext(ctx, sqlQuery); err != nil { + return err + } + } + + return nil +} + +func (r *PublicationReconciler) createPublication( + ctx context.Context, + db *sql.DB, + obj *apiv1.Publication, +) error { + sqlQuery := toPublicationCreateSQL(obj) + _, err := db.ExecContext(ctx, sqlQuery) + return err +} + +func toPublicationCreateSQL(obj *apiv1.Publication) string { + createQuery := fmt.Sprintf( + "CREATE PUBLICATION %s %s", + pgx.Identifier{obj.Spec.Name}.Sanitize(), + toPublicationTargetSQL(&obj.Spec.Target), + ) + if len(obj.Spec.Parameters) > 0 { + createQuery = fmt.Sprintf("%s WITH (%s)", createQuery, toPostgresParameters(obj.Spec.Parameters)) + } + + return createQuery +} + +func toPublicationAlterSQL(obj *apiv1.Publication) []string { + result := make([]string, 0, 2) + + if len(obj.Spec.Target.Objects) > 0 { + result = append(result, + fmt.Sprintf( + "ALTER PUBLICATION %s SET %s", + pgx.Identifier{obj.Spec.Name}.Sanitize(), + toPublicationTargetObjectsSQL(&obj.Spec.Target), + ), + ) + } + + if len(obj.Spec.Parameters) > 0 { + result = append(result, + fmt.Sprintf( + "ALTER PUBLICATION %s SET (%s)", + pgx.Identifier{obj.Spec.Name}.Sanitize(), + toPostgresParameters(obj.Spec.Parameters), + ), + ) + } + + return result +} + +func executeDropPublication(ctx context.Context, db *sql.DB, name string) error { + if _, err := db.ExecContext( + ctx, + fmt.Sprintf("DROP PUBLICATION IF EXISTS %s", pgx.Identifier{name}.Sanitize()), + ); err != nil { + return fmt.Errorf("while dropping publication: %w", err) + } + + return nil +} + +func toPublicationTargetSQL(obj *apiv1.PublicationTarget) string { + if obj.AllTables { + return "FOR ALL TABLES" + } + + result := toPublicationTargetObjectsSQL(obj) + if len(result) > 0 { + result = fmt.Sprintf("FOR %s", result) + } + return result +} + +func toPublicationTargetObjectsSQL(obj *apiv1.PublicationTarget) string { + result := "" + for _, object := range obj.Objects { + if len(result) > 0 { + result += ", " + } + result += toPublicationObjectSQL(&object) + } + + return result +} + +func toPublicationObjectSQL(obj *apiv1.PublicationTargetObject) string { + if len(obj.TablesInSchema) > 0 { + return fmt.Sprintf("TABLES IN SCHEMA %s", pgx.Identifier{obj.TablesInSchema}.Sanitize()) + } + + result := strings.Builder{} + result.WriteString("TABLE ") + + if obj.Table.Only { + result.WriteString("ONLY ") + } + + if len(obj.Table.Schema) > 0 { + result.WriteString(fmt.Sprintf("%s.", pgx.Identifier{obj.Table.Schema}.Sanitize())) + } + + result.WriteString(pgx.Identifier{obj.Table.Name}.Sanitize()) + + if len(obj.Table.Columns) > 0 { + sanitizedColumns := make([]string, 0, len(obj.Table.Columns)) + for _, column := range obj.Table.Columns { + sanitizedColumns = append(sanitizedColumns, pgx.Identifier{column}.Sanitize()) + } + result.WriteString(fmt.Sprintf(" (%s)", strings.Join(sanitizedColumns, ", "))) + } + + return result.String() +} diff --git a/internal/management/controller/publication_controller_sql_test.go b/internal/management/controller/publication_controller_sql_test.go new file mode 100644 index 0000000000..b993b93576 --- /dev/null +++ b/internal/management/controller/publication_controller_sql_test.go @@ -0,0 +1,225 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// nolint: dupl +package controller + +import ( + "database/sql" + "fmt" + + "github.com/DATA-DOG/go-sqlmock" + "github.com/jackc/pgx/v5" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("publication sql", func() { + var ( + dbMock sqlmock.Sqlmock + db *sql.DB + ) + + BeforeEach(func() { + var err error + db, dbMock, err = sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) + Expect(err).ToNot(HaveOccurred()) + }) + + AfterEach(func() { + Expect(dbMock.ExpectationsWereMet()).To(Succeed()) + }) + + It("drops the publication successfully", func(ctx SpecContext) { + dbMock.ExpectExec(fmt.Sprintf("DROP PUBLICATION IF EXISTS %s", pgx.Identifier{"publication_name"}.Sanitize())). + WillReturnResult(sqlmock.NewResult(1, 1)) + + err := executeDropPublication(ctx, db, "publication_name") + Expect(err).ToNot(HaveOccurred()) + }) + + It("returns an error when dropping the publication fails", func(ctx SpecContext) { + dbMock.ExpectExec(fmt.Sprintf("DROP PUBLICATION IF EXISTS %s", + pgx.Identifier{"publication_name"}.Sanitize())). + WillReturnError(fmt.Errorf("drop publication error")) + + err := executeDropPublication(ctx, db, "publication_name") + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("while dropping publication: drop publication error")) + }) + + It("sanitizes the publication name correctly", func(ctx SpecContext) { + dbMock.ExpectExec( + fmt.Sprintf("DROP PUBLICATION IF EXISTS %s", pgx.Identifier{"sanitized_name"}.Sanitize())). + WillReturnResult(sqlmock.NewResult(1, 1)) + + err := executeDropPublication(ctx, db, "sanitized_name") + Expect(err).ToNot(HaveOccurred()) + }) + + It("generates correct SQL for altering publication with target objects", func() { + obj := &apiv1.Publication{ + Spec: apiv1.PublicationSpec{ + Name: "test_pub", + Target: apiv1.PublicationTarget{ + Objects: []apiv1.PublicationTargetObject{ + {TablesInSchema: "public"}, + }, + }, + }, + } + + sqls := toPublicationAlterSQL(obj) + Expect(sqls).To(ContainElement(`ALTER PUBLICATION "test_pub" SET TABLES IN SCHEMA "public"`)) + }) + + It("generates correct SQL for altering publication with parameters", func() { + obj := &apiv1.Publication{ + Spec: apiv1.PublicationSpec{ + Name: "test_pub", + Parameters: map[string]string{ + "param1": "value1", + "param2": "value2", + }, + }, + } + + sqls := toPublicationAlterSQL(obj) + Expect(sqls).To(ContainElement(`ALTER PUBLICATION "test_pub" SET ("param1" = 'value1', "param2" = 'value2')`)) + }) + + It("returns empty SQL list when no alterations are needed", func() { + obj := &apiv1.Publication{ + Spec: apiv1.PublicationSpec{ + Name: "test_pub", + }, + } + + sqls := toPublicationAlterSQL(obj) + Expect(sqls).To(BeEmpty()) + }) + + It("generates correct SQL for creating publication with target schema", func() { + obj := &apiv1.Publication{ + Spec: apiv1.PublicationSpec{ + Name: "test_pub", + Target: apiv1.PublicationTarget{ + Objects: []apiv1.PublicationTargetObject{ + {TablesInSchema: "public"}, + }, + }, + }, + } + + sql := toPublicationCreateSQL(obj) + Expect(sql).To(Equal(`CREATE PUBLICATION "test_pub" FOR TABLES IN SCHEMA "public"`)) + }) + + It("generates correct SQL for creating publication with target table", func() { + obj := &apiv1.Publication{ + Spec: apiv1.PublicationSpec{ + Name: "test_pub", + Target: apiv1.PublicationTarget{ + Objects: []apiv1.PublicationTargetObject{ + {Table: &apiv1.PublicationTargetTable{Name: "table", Schema: "test", Columns: []string{"a", "b"}}}, + }, + }, + }, + } + + sql := toPublicationCreateSQL(obj) + Expect(sql).To(Equal(`CREATE PUBLICATION "test_pub" FOR TABLE "test"."table" ("a", "b")`)) + }) + + It("generates correct SQL for creating publication with parameters", func() { + obj := &apiv1.Publication{ + Spec: apiv1.PublicationSpec{ + Name: "test_pub", + Parameters: map[string]string{ + "param1": "value1", + "param2": "value2", + }, + Target: apiv1.PublicationTarget{ + Objects: []apiv1.PublicationTargetObject{ + {TablesInSchema: "public"}, + }, + }, + }, + } + + sql := toPublicationCreateSQL(obj) + Expect(sql).To(Equal( + `CREATE PUBLICATION "test_pub" FOR TABLES IN SCHEMA "public" WITH ("param1" = 'value1', "param2" = 'value2')`, + )) + }) +}) + +var _ = Describe("toPublicationObjectSQL", func() { + It("returns correct SQL for tables in schema", func() { + obj := &apiv1.PublicationTargetObject{ + TablesInSchema: "public", + } + result := toPublicationObjectSQL(obj) + Expect(result).To(Equal(`TABLES IN SCHEMA "public"`)) + }) + + It("returns correct SQL for table with schema and columns", func() { + obj := &apiv1.PublicationTargetObject{ + Table: &apiv1.PublicationTargetTable{ + Name: "table", + Schema: "test", + Columns: []string{"a", "b"}, + }, + } + result := toPublicationObjectSQL(obj) + Expect(result).To(Equal(`TABLE "test"."table" ("a", "b")`)) + }) + + It("returns correct SQL for table with only clause", func() { + obj := &apiv1.PublicationTargetObject{ + Table: &apiv1.PublicationTargetTable{ + Name: "table", + Only: true, + }, + } + result := toPublicationObjectSQL(obj) + Expect(result).To(Equal(`TABLE ONLY "table"`)) + }) + + It("returns correct SQL for table without schema and columns", func() { + obj := &apiv1.PublicationTargetObject{ + Table: &apiv1.PublicationTargetTable{ + Name: "table", + }, + } + result := toPublicationObjectSQL(obj) + Expect(result).To(Equal(`TABLE "table"`)) + }) + + It("returns correct SQL for table with schema but without columns", func() { + obj := &apiv1.PublicationTargetObject{ + Table: &apiv1.PublicationTargetTable{ + Name: "table", + Schema: "test", + }, + } + result := toPublicationObjectSQL(obj) + Expect(result).To(Equal(`TABLE "test"."table"`)) + }) +}) diff --git a/internal/management/controller/subscription_controller.go b/internal/management/controller/subscription_controller.go new file mode 100644 index 0000000000..f1a3af65bf --- /dev/null +++ b/internal/management/controller/subscription_controller.go @@ -0,0 +1,195 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + "time" + + "github.com/cloudnative-pg/machinery/pkg/log" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/external" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" +) + +// SubscriptionReconciler reconciles a Subscription object +type SubscriptionReconciler struct { + client.Client + Scheme *runtime.Scheme + + instance *postgres.Instance + finalizerReconciler *finalizerReconciler[*apiv1.Subscription] +} + +// subscriptionReconciliationInterval is the time between the +// subscription reconciliation loop failures +const subscriptionReconciliationInterval = 30 * time.Second + +// +kubebuilder:rbac:groups=postgresql.cnpg.io,resources=subscriptions,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=postgresql.cnpg.io,resources=subscriptions/status,verbs=get;update;patch + +// Reconcile is the subscription reconciliation loop +func (r *SubscriptionReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + contextLogger := log.FromContext(ctx) + + contextLogger.Debug("Reconciliation loop start") + defer func() { + contextLogger.Debug("Reconciliation loop end") + }() + + // Get the subscription object + var subscription apiv1.Subscription + if err := r.Client.Get(ctx, client.ObjectKey{ + Namespace: req.Namespace, + Name: req.Name, + }, &subscription); err != nil { + contextLogger.Trace("Could not fetch Subscription", "error", err) + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + // This is not for me! + if subscription.Spec.ClusterRef.Name != r.instance.GetClusterName() { + contextLogger.Trace("Subscription is not for this cluster", + "cluster", subscription.Spec.ClusterRef.Name, + "expected", r.instance.GetClusterName(), + ) + return ctrl.Result{}, nil + } + + // If everything is reconciled, we're done here + if subscription.Generation == subscription.Status.ObservedGeneration { + return ctrl.Result{}, nil + } + + // Fetch the Cluster from the cache + cluster, err := r.GetCluster(ctx) + if err != nil { + return ctrl.Result{}, markAsFailed(ctx, r.Client, &subscription, fmt.Errorf("while fetching the cluster: %w", err)) + } + + // Still not for me, we're waiting for a switchover + if cluster.Status.CurrentPrimary != cluster.Status.TargetPrimary { + return ctrl.Result{RequeueAfter: subscriptionReconciliationInterval}, nil + } + + // This is not for me, at least now + if cluster.Status.CurrentPrimary != r.instance.GetPodName() { + return ctrl.Result{RequeueAfter: subscriptionReconciliationInterval}, nil + } + + // Cannot do anything on a replica cluster + if cluster.IsReplica() { + if err := markAsUnknown(ctx, r.Client, &subscription, errClusterIsReplica); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{RequeueAfter: subscriptionReconciliationInterval}, nil + } + + if err := r.finalizerReconciler.reconcile(ctx, &subscription); err != nil { + return ctrl.Result{}, fmt.Errorf("while reconciling the finalizer: %w", err) + } + if !subscription.GetDeletionTimestamp().IsZero() { + return ctrl.Result{}, nil + } + + // Let's get the connection string + connString, err := getSubscriptionConnectionString( + cluster, + subscription.Spec.ExternalClusterName, + subscription.Spec.PublicationDBName, + ) + if err != nil { + if err := markAsFailed(ctx, r.Client, &subscription, err); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{RequeueAfter: subscriptionReconciliationInterval}, nil + } + + if err := r.alignSubscription(ctx, &subscription, connString); err != nil { + if err := markAsFailed(ctx, r.Client, &subscription, err); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{RequeueAfter: subscriptionReconciliationInterval}, nil + } + + if err := markAsReady(ctx, r.Client, &subscription); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{RequeueAfter: subscriptionReconciliationInterval}, nil +} + +func (r *SubscriptionReconciler) evaluateDropSubscription(ctx context.Context, sub *apiv1.Subscription) error { + if sub.Spec.ReclaimPolicy != apiv1.SubscriptionReclaimDelete { + return nil + } + + db, err := r.instance.ConnectionPool().Connection(sub.Spec.DBName) + if err != nil { + return fmt.Errorf("while getting DB connection: %w", err) + } + return executeDropSubscription(ctx, db, sub.Spec.Name) +} + +// NewSubscriptionReconciler creates a new subscription reconciler +func NewSubscriptionReconciler( + mgr manager.Manager, + instance *postgres.Instance, +) *SubscriptionReconciler { + sr := &SubscriptionReconciler{Client: mgr.GetClient(), instance: instance} + sr.finalizerReconciler = newFinalizerReconciler( + mgr.GetClient(), + utils.SubscriptionFinalizerName, + sr.evaluateDropSubscription, + ) + + return sr +} + +// SetupWithManager sets up the controller with the Manager +func (r *SubscriptionReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&apiv1.Subscription{}). + Named("instance-subscription"). + Complete(r) +} + +// GetCluster gets the managed cluster through the client +func (r *SubscriptionReconciler) GetCluster(ctx context.Context) (*apiv1.Cluster, error) { + return getClusterFromInstance(ctx, r.Client, r.instance) +} + +// getSubscriptionConnectionString gets the connection string to be used to connect to +// the specified external cluster, while connected to a pod of the specified cluster +func getSubscriptionConnectionString( + cluster *apiv1.Cluster, + externalClusterName string, + databaseName string, +) (string, error) { + externalCluster, ok := cluster.ExternalCluster(externalClusterName) + if !ok { + return "", fmt.Errorf("externalCluster '%s' not declared in cluster %s", externalClusterName, cluster.Name) + } + + return external.GetServerConnectionString(&externalCluster, databaseName), nil +} diff --git a/internal/management/controller/subscription_controller_sql.go b/internal/management/controller/subscription_controller_sql.go new file mode 100644 index 0000000000..47f9f945df --- /dev/null +++ b/internal/management/controller/subscription_controller_sql.go @@ -0,0 +1,150 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "database/sql" + "fmt" + + "github.com/jackc/pgx/v5" + "github.com/lib/pq" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" +) + +func (r *SubscriptionReconciler) alignSubscription( + ctx context.Context, + obj *apiv1.Subscription, + connString string, +) error { + db, err := r.instance.ConnectionPool().Connection(obj.Spec.DBName) + if err != nil { + return fmt.Errorf("while getting DB connection: %w", err) + } + + row := db.QueryRowContext( + ctx, + ` + SELECT count(*) + FROM pg_subscription + WHERE subname = $1 + `, + obj.Spec.Name) + if row.Err() != nil { + return fmt.Errorf("while getting subscription status: %w", row.Err()) + } + + var count int + if err := row.Scan(&count); err != nil { + return fmt.Errorf("while getting subscription status (scan): %w", err) + } + + if count > 0 { + if err := r.patchSubscription(ctx, db, obj, connString); err != nil { + return fmt.Errorf("while patching subscription: %w", err) + } + return nil + } + + if err := r.createSubscription(ctx, db, obj, connString); err != nil { + return fmt.Errorf("while creating subscription: %w", err) + } + + return nil +} + +func (r *SubscriptionReconciler) patchSubscription( + ctx context.Context, + db *sql.DB, + obj *apiv1.Subscription, + connString string, +) error { + sqls := toSubscriptionAlterSQL(obj, connString) + for _, sqlQuery := range sqls { + if _, err := db.ExecContext(ctx, sqlQuery); err != nil { + return err + } + } + + return nil +} + +func (r *SubscriptionReconciler) createSubscription( + ctx context.Context, + db *sql.DB, + obj *apiv1.Subscription, + connString string, +) error { + sqlQuery := toSubscriptionCreateSQL(obj, connString) + _, err := db.ExecContext(ctx, sqlQuery) + return err +} + +func toSubscriptionCreateSQL(obj *apiv1.Subscription, connString string) string { + createQuery := fmt.Sprintf( + "CREATE SUBSCRIPTION %s CONNECTION %s PUBLICATION %s", + pgx.Identifier{obj.Spec.Name}.Sanitize(), + pq.QuoteLiteral(connString), + pgx.Identifier{obj.Spec.PublicationName}.Sanitize(), + ) + if len(obj.Spec.Parameters) > 0 { + createQuery = fmt.Sprintf("%s WITH (%s)", createQuery, toPostgresParameters(obj.Spec.Parameters)) + } + + return createQuery +} + +func toSubscriptionAlterSQL(obj *apiv1.Subscription, connString string) []string { + result := make([]string, 0, 3) + + setPublicationSQL := fmt.Sprintf( + "ALTER SUBSCRIPTION %s SET PUBLICATION %s", + pgx.Identifier{obj.Spec.Name}.Sanitize(), + pgx.Identifier{obj.Spec.PublicationName}.Sanitize(), + ) + + setConnStringSQL := fmt.Sprintf( + "ALTER SUBSCRIPTION %s CONNECTION %s", + pgx.Identifier{obj.Spec.Name}.Sanitize(), + pq.QuoteLiteral(connString), + ) + result = append(result, setPublicationSQL, setConnStringSQL) + + if len(obj.Spec.Parameters) > 0 { + result = append(result, + fmt.Sprintf( + "ALTER SUBSCRIPTION %s SET (%s)", + pgx.Identifier{obj.Spec.Name}.Sanitize(), + toPostgresParameters(obj.Spec.Parameters), + ), + ) + } + + return result +} + +func executeDropSubscription(ctx context.Context, db *sql.DB, name string) error { + if _, err := db.ExecContext( + ctx, + fmt.Sprintf("DROP SUBSCRIPTION IF EXISTS %s", pgx.Identifier{name}.Sanitize()), + ); err != nil { + return fmt.Errorf("while dropping subscription: %w", err) + } + + return nil +} diff --git a/internal/management/controller/subscription_controller_sql_test.go b/internal/management/controller/subscription_controller_sql_test.go new file mode 100644 index 0000000000..8afe3019f6 --- /dev/null +++ b/internal/management/controller/subscription_controller_sql_test.go @@ -0,0 +1,169 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// nolint: dupl +package controller + +import ( + "database/sql" + "fmt" + + "github.com/DATA-DOG/go-sqlmock" + "github.com/jackc/pgx/v5" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +// nolint: dupl +var _ = Describe("subscription sql", func() { + var ( + dbMock sqlmock.Sqlmock + db *sql.DB + ) + + BeforeEach(func() { + var err error + db, dbMock, err = sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) + Expect(err).ToNot(HaveOccurred()) + }) + + AfterEach(func() { + Expect(dbMock.ExpectationsWereMet()).To(Succeed()) + }) + + It("drops the subscription successfully", func(ctx SpecContext) { + dbMock.ExpectExec(fmt.Sprintf("DROP SUBSCRIPTION IF EXISTS %s", pgx.Identifier{"subscription_name"}.Sanitize())). + WillReturnResult(sqlmock.NewResult(1, 1)) + + err := executeDropSubscription(ctx, db, "subscription_name") + Expect(err).ToNot(HaveOccurred()) + }) + + It("returns an error when dropping the subscription fails", func(ctx SpecContext) { + dbMock.ExpectExec(fmt.Sprintf("DROP SUBSCRIPTION IF EXISTS %s", pgx.Identifier{"subscription_name"}.Sanitize())). + WillReturnError(fmt.Errorf("drop subscription error")) + + err := executeDropSubscription(ctx, db, "subscription_name") + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("while dropping subscription: drop subscription error")) + }) + + It("sanitizes the subscription name correctly", func(ctx SpecContext) { + dbMock.ExpectExec(fmt.Sprintf("DROP SUBSCRIPTION IF EXISTS %s", pgx.Identifier{"sanitized_name"}.Sanitize())). + WillReturnResult(sqlmock.NewResult(1, 1)) + + err := executeDropSubscription(ctx, db, "sanitized_name") + Expect(err).ToNot(HaveOccurred()) + }) + + It("generates correct SQL for creating subscription with publication and connection string", func() { + obj := &apiv1.Subscription{ + Spec: apiv1.SubscriptionSpec{ + Name: "test_sub", + PublicationName: "test_pub", + }, + } + connString := "host=localhost user=test dbname=test" + + sql := toSubscriptionCreateSQL(obj, connString) + Expect(sql).To(Equal( + `CREATE SUBSCRIPTION "test_sub" CONNECTION 'host=localhost user=test dbname=test' PUBLICATION "test_pub"`)) + }) + + It("generates correct SQL for creating subscription with parameters", func() { + obj := &apiv1.Subscription{ + Spec: apiv1.SubscriptionSpec{ + Name: "test_sub", + PublicationName: "test_pub", + Parameters: map[string]string{ + "param1": "value1", + "param2": "value2", + }, + }, + } + connString := "host=localhost user=test dbname=test" + + sql := toSubscriptionCreateSQL(obj, connString) + expectedElement := `CREATE SUBSCRIPTION "test_sub" ` + + `CONNECTION 'host=localhost user=test dbname=test' ` + + `PUBLICATION "test_pub" WITH ("param1" = 'value1', "param2" = 'value2')` + Expect(sql).To(Equal(expectedElement)) + }) + + It("returns correct SQL for creating subscription with no owner or parameters", func() { + obj := &apiv1.Subscription{ + Spec: apiv1.SubscriptionSpec{ + Name: "test_sub", + PublicationName: "test_pub", + }, + } + connString := "host=localhost user=test dbname=test" + + sql := toSubscriptionCreateSQL(obj, connString) + Expect(sql).To(Equal( + `CREATE SUBSCRIPTION "test_sub" CONNECTION 'host=localhost user=test dbname=test' PUBLICATION "test_pub"`)) + }) + + It("generates correct SQL for altering subscription with publication and connection string", func() { + obj := &apiv1.Subscription{ + Spec: apiv1.SubscriptionSpec{ + Name: "test_sub", + PublicationName: "test_pub", + }, + } + connString := "host=localhost user=test dbname=test" + + sqls := toSubscriptionAlterSQL(obj, connString) + Expect(sqls).To(ContainElement(`ALTER SUBSCRIPTION "test_sub" SET PUBLICATION "test_pub"`)) + Expect(sqls).To(ContainElement(`ALTER SUBSCRIPTION "test_sub" CONNECTION 'host=localhost user=test dbname=test'`)) + }) + + It("generates correct SQL for altering subscription with parameters", func() { + obj := &apiv1.Subscription{ + Spec: apiv1.SubscriptionSpec{ + Name: "test_sub", + PublicationName: "test_pub", + Parameters: map[string]string{ + "param1": "value1", + "param2": "value2", + }, + }, + } + connString := "host=localhost user=test dbname=test" + + sqls := toSubscriptionAlterSQL(obj, connString) + Expect(sqls).To(ContainElement(`ALTER SUBSCRIPTION "test_sub" SET PUBLICATION "test_pub"`)) + Expect(sqls).To(ContainElement(`ALTER SUBSCRIPTION "test_sub" CONNECTION 'host=localhost user=test dbname=test'`)) + Expect(sqls).To(ContainElement(`ALTER SUBSCRIPTION "test_sub" SET ("param1" = 'value1', "param2" = 'value2')`)) + }) + + It("returns correct SQL for altering subscription with no owner or parameters", func() { + obj := &apiv1.Subscription{ + Spec: apiv1.SubscriptionSpec{ + Name: "test_sub", + PublicationName: "test_pub", + }, + } + connString := "host=localhost user=test dbname=test" + + sqls := toSubscriptionAlterSQL(obj, connString) + Expect(sqls).To(ContainElement(`ALTER SUBSCRIPTION "test_sub" SET PUBLICATION "test_pub"`)) + Expect(sqls).To(ContainElement(`ALTER SUBSCRIPTION "test_sub" CONNECTION 'host=localhost user=test dbname=test'`)) + }) +}) diff --git a/internal/management/controller/subscription_controller_test.go b/internal/management/controller/subscription_controller_test.go new file mode 100644 index 0000000000..901ab93f61 --- /dev/null +++ b/internal/management/controller/subscription_controller_test.go @@ -0,0 +1,32 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Conversion of PG parameters from map to string of key/value pairs", func() { + It("returns expected well-formed list", func() { + m := map[string]string{ + "a": "1", "b": "2", + } + res := toPostgresParameters(m) + Expect(res).To(Equal(`"a" = '1', "b" = '2'`)) + }) +}) diff --git a/pkg/specs/roles.go b/pkg/specs/roles.go index f0d9bf4cb1..ac328cc66e 100644 --- a/pkg/specs/roles.go +++ b/pkg/specs/roles.go @@ -154,6 +154,62 @@ func CreateRole(cluster apiv1.Cluster, backupOrigin *apiv1.Backup) rbacv1.Role { "update", }, }, + { + APIGroups: []string{ + "postgresql.cnpg.io", + }, + Resources: []string{ + "publications", + }, + Verbs: []string{ + "get", + "update", + "list", + "watch", + }, + ResourceNames: []string{}, + }, + { + APIGroups: []string{ + "postgresql.cnpg.io", + }, + Resources: []string{ + "publications/status", + }, + Verbs: []string{ + "get", + "patch", + "update", + }, + }, + { + APIGroups: []string{ + "postgresql.cnpg.io", + }, + Resources: []string{ + "subscriptions", + }, + Verbs: []string{ + "get", + "update", + "list", + "watch", + }, + ResourceNames: []string{}, + }, + { + APIGroups: []string{ + "postgresql.cnpg.io", + }, + Resources: []string{ + "subscriptions/status", + }, + Verbs: []string{ + "get", + "patch", + "update", + }, + }, } return rbacv1.Role{ diff --git a/pkg/specs/roles_test.go b/pkg/specs/roles_test.go index 3753a66154..0d3df97d28 100644 --- a/pkg/specs/roles_test.go +++ b/pkg/specs/roles_test.go @@ -165,7 +165,7 @@ var _ = Describe("Roles", func() { serviceAccount := CreateRole(cluster, nil) Expect(serviceAccount.Name).To(Equal(cluster.Name)) Expect(serviceAccount.Namespace).To(Equal(cluster.Namespace)) - Expect(serviceAccount.Rules).To(HaveLen(9)) + Expect(serviceAccount.Rules).To(HaveLen(13)) }) It("should contain every secret of the origin backup and backup configuration of every external cluster", func() { diff --git a/pkg/utils/finalizers.go b/pkg/utils/finalizers.go index 81d958df6d..ba9ed64f16 100644 --- a/pkg/utils/finalizers.go +++ b/pkg/utils/finalizers.go @@ -20,4 +20,12 @@ const ( // DatabaseFinalizerName is the name of the finalizer // triggering the deletion of the database DatabaseFinalizerName = MetadataNamespace + "/deleteDatabase" + + // PublicationFinalizerName is the name of the finalizer + // triggering the deletion of the publication + PublicationFinalizerName = MetadataNamespace + "/deletePublication" + + // SubscriptionFinalizerName is the name of the finalizer + // triggering the deletion of the subscription + SubscriptionFinalizerName = MetadataNamespace + "/deleteSubscription" ) diff --git a/tests/e2e/fixtures/declarative_pub_sub/destination-cluster.yaml.template b/tests/e2e/fixtures/declarative_pub_sub/destination-cluster.yaml.template new file mode 100644 index 0000000000..1597981714 --- /dev/null +++ b/tests/e2e/fixtures/declarative_pub_sub/destination-cluster.yaml.template @@ -0,0 +1,48 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: destination-cluster +spec: + instances: 1 + externalClusters: + - name: source-cluster + connectionParameters: + host: source-cluster-rw + user: app + dbname: declarative + port: "5432" + password: + name: source-cluster-app + key: password + + postgresql: + parameters: + max_connections: "110" + log_checkpoints: "on" + log_lock_waits: "on" + log_min_duration_statement: '1000' + log_statement: 'ddl' + log_temp_files: '1024' + log_autovacuum_min_duration: '1s' + log_replication_commands: 'on' + + # Example of rolling update strategy: + # - unsupervised: automated update of the primary once all + # replicas have been upgraded (default) + # - supervised: requires manual supervision to perform + # the switchover of the primary + primaryUpdateStrategy: unsupervised + primaryUpdateMethod: switchover + + bootstrap: + initdb: + database: app + owner: app + + # Persistent storage configuration + storage: + storageClass: ${E2E_DEFAULT_STORAGE_CLASS} + size: 1Gi + walStorage: + storageClass: ${E2E_DEFAULT_STORAGE_CLASS} + size: 1Gi diff --git a/tests/e2e/fixtures/declarative_pub_sub/destination-database.yaml b/tests/e2e/fixtures/declarative_pub_sub/destination-database.yaml new file mode 100644 index 0000000000..2a6e122647 --- /dev/null +++ b/tests/e2e/fixtures/declarative_pub_sub/destination-database.yaml @@ -0,0 +1,9 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Database +metadata: + name: destination-db-declarative +spec: + name: declarative + owner: app + cluster: + name: destination-cluster diff --git a/tests/e2e/fixtures/declarative_pub_sub/pub.yaml b/tests/e2e/fixtures/declarative_pub_sub/pub.yaml new file mode 100644 index 0000000000..bd09d64014 --- /dev/null +++ b/tests/e2e/fixtures/declarative_pub_sub/pub.yaml @@ -0,0 +1,11 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Publication +metadata: + name: publication-declarative +spec: + name: pub + dbname: declarative + cluster: + name: source-cluster + target: + allTables: true diff --git a/tests/e2e/fixtures/declarative_pub_sub/source-cluster.yaml.template b/tests/e2e/fixtures/declarative_pub_sub/source-cluster.yaml.template new file mode 100644 index 0000000000..398a6613c8 --- /dev/null +++ b/tests/e2e/fixtures/declarative_pub_sub/source-cluster.yaml.template @@ -0,0 +1,48 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: source-cluster +spec: + instances: 1 + + postgresql: + parameters: + max_connections: "110" + log_checkpoints: "on" + log_lock_waits: "on" + log_min_duration_statement: '1000' + log_statement: 'ddl' + log_temp_files: '1024' + log_autovacuum_min_duration: '1s' + log_replication_commands: 'on' + pg_hba: + - hostssl replication app all scram-sha-256 + + managed: + roles: + - name: app + ensure: present + login: true + replication: true + + + # Example of rolling update strategy: + # - unsupervised: automated update of the primary once all + # replicas have been upgraded (default) + # - supervised: requires manual supervision to perform + # the switchover of the primary + primaryUpdateStrategy: unsupervised + primaryUpdateMethod: switchover + + bootstrap: + initdb: + database: app + owner: app + + # Persistent storage configuration + storage: + storageClass: ${E2E_DEFAULT_STORAGE_CLASS} + size: 1Gi + walStorage: + storageClass: ${E2E_DEFAULT_STORAGE_CLASS} + size: 1Gi diff --git a/tests/e2e/fixtures/declarative_pub_sub/source-database.yaml b/tests/e2e/fixtures/declarative_pub_sub/source-database.yaml new file mode 100644 index 0000000000..80d5a4cf27 --- /dev/null +++ b/tests/e2e/fixtures/declarative_pub_sub/source-database.yaml @@ -0,0 +1,9 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Database +metadata: + name: source-db-declarative +spec: + name: declarative + owner: app + cluster: + name: source-cluster diff --git a/tests/e2e/fixtures/declarative_pub_sub/sub.yaml b/tests/e2e/fixtures/declarative_pub_sub/sub.yaml new file mode 100644 index 0000000000..8eb5aabdc4 --- /dev/null +++ b/tests/e2e/fixtures/declarative_pub_sub/sub.yaml @@ -0,0 +1,11 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Subscription +metadata: + name: subscription-declarative +spec: + name: sub + dbname: declarative + publicationName: pub + cluster: + name: destination-cluster + externalClusterName: source-cluster diff --git a/tests/e2e/publication_subscription_test.go b/tests/e2e/publication_subscription_test.go new file mode 100644 index 0000000000..3133bd3ef7 --- /dev/null +++ b/tests/e2e/publication_subscription_test.go @@ -0,0 +1,236 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "fmt" + "time" + + "k8s.io/apimachinery/pkg/types" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/tests" + testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +// - spinning up a cluster, apply a declarative publication/subscription on it + +// Set of tests in which we use the declarative publication and subscription CRDs on an existing cluster +var _ = Describe("Publication and Subscription", Label(tests.LabelDeclarativePubSub), func() { + const ( + sourceClusterManifest = fixturesDir + "/declarative_pub_sub/source-cluster.yaml.template" + destinationClusterManifest = fixturesDir + "/declarative_pub_sub/destination-cluster.yaml.template" + sourceDatabaseManifest = fixturesDir + "/declarative_pub_sub/source-database.yaml" + destinationDatabaseManifest = fixturesDir + "/declarative_pub_sub/destination-database.yaml" + pubManifest = fixturesDir + "/declarative_pub_sub/pub.yaml" + subManifest = fixturesDir + "/declarative_pub_sub/sub.yaml" + level = tests.Medium + ) + + BeforeEach(func() { + if testLevelEnv.Depth < int(level) { + Skip("Test depth is lower than the amount requested for this test") + } + }) + + Context("in a plain vanilla cluster", Ordered, func() { + const ( + namespacePrefix = "declarative-pub-sub" + dbname = "declarative" + tableName = "test" + ) + var ( + sourceClusterName, destinationClusterName, namespace string + databaseObjectName, pubObjectName, subObjectName string + pub *apiv1.Publication + sub *apiv1.Subscription + err error + ) + + BeforeAll(func() { + // Create a cluster in a namespace we'll delete after the test + namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + Expect(err).ToNot(HaveOccurred()) + + sourceClusterName, err = env.GetResourceNameFromYAML(sourceClusterManifest) + Expect(err).ToNot(HaveOccurred()) + + destinationClusterName, err = env.GetResourceNameFromYAML(destinationClusterManifest) + Expect(err).ToNot(HaveOccurred()) + + By("setting up source cluster", func() { + AssertCreateCluster(namespace, sourceClusterName, sourceClusterManifest, env) + }) + + By("setting up destination cluster", func() { + AssertCreateCluster(namespace, destinationClusterName, destinationClusterManifest, env) + }) + }) + + assertCreateDatabase := func(namespace, clusterName, databaseManifest, databaseName string) { + databaseObjectName, err = env.GetResourceNameFromYAML(databaseManifest) + Expect(err).NotTo(HaveOccurred()) + + By(fmt.Sprintf("applying the %s Database CRD manifest", databaseObjectName), func() { + CreateResourceFromFile(namespace, databaseManifest) + }) + + By(fmt.Sprintf("ensuring the %s Database CRD succeeded reconciliation", databaseObjectName), func() { + databaseObject := &apiv1.Database{} + databaseNamespacedName := types.NamespacedName{ + Namespace: namespace, + Name: databaseObjectName, + } + + Eventually(func(g Gomega) { + err := env.Client.Get(env.Ctx, databaseNamespacedName, databaseObject) + Expect(err).ToNot(HaveOccurred()) + g.Expect(databaseObject.Status.Applied).Should(HaveValue(BeTrue())) + }, 300).WithPolling(10 * time.Second).Should(Succeed()) + }) + + By(fmt.Sprintf("verifying the %s database has been created", databaseName), func() { + primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + + AssertDatabaseExists(primaryPodInfo, databaseName, true) + }) + } + + assertPublicationExists := func(namespace, primaryPod string, pub *apiv1.Publication) { + query := fmt.Sprintf("select count(*) from pg_publication where pubname = '%s'", + pub.Spec.Name) + Eventually(func(g Gomega) { + stdout, _, err := env.ExecQueryInInstancePod( + testUtils.PodLocator{ + Namespace: namespace, + PodName: primaryPod, + }, + dbname, + query) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(stdout).Should(ContainSubstring("1"), "expected publication not found") + }, 30).Should(Succeed()) + } + + assertSubscriptionExists := func(namespace, primaryPod string, sub *apiv1.Subscription) { + query := fmt.Sprintf("select count(*) from pg_subscription where subname = '%s'", + sub.Spec.Name) + Eventually(func(g Gomega) { + stdout, _, err := env.ExecQueryInInstancePod( + testUtils.PodLocator{ + Namespace: namespace, + PodName: primaryPod, + }, + dbname, + query) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(stdout).Should(ContainSubstring("1"), "expected subscription not found") + }, 30).Should(Succeed()) + } + + It("can perform logical replication", func() { + assertCreateDatabase(namespace, sourceClusterName, sourceDatabaseManifest, dbname) + + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: sourceClusterName, + DatabaseName: dbname, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) + + assertCreateDatabase(namespace, destinationClusterName, destinationDatabaseManifest, dbname) + + By("creating an empty table inside the destination database", func() { + query := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %v (column1 int) ;", tableName) + _, err = testUtils.RunExecOverForward(env, namespace, destinationClusterName, dbname, + apiv1.ApplicationUserSecretSuffix, query) + Expect(err).ToNot(HaveOccurred()) + }) + + By("applying Publication CRD manifest", func() { + CreateResourceFromFile(namespace, pubManifest) + pubObjectName, err = env.GetResourceNameFromYAML(pubManifest) + Expect(err).NotTo(HaveOccurred()) + }) + + By("ensuring the Publication CRD succeeded reconciliation", func() { + // get publication object + pub = &apiv1.Publication{} + pubNamespacedName := types.NamespacedName{ + Namespace: namespace, + Name: pubObjectName, + } + + Eventually(func(g Gomega) { + err := env.Client.Get(env.Ctx, pubNamespacedName, pub) + Expect(err).ToNot(HaveOccurred()) + g.Expect(pub.Status.Applied).Should(HaveValue(BeTrue())) + }, 300).WithPolling(10 * time.Second).Should(Succeed()) + }) + + By("verifying new publication has been created", func() { + primaryPodInfo, err := env.GetClusterPrimary(namespace, sourceClusterName) + Expect(err).ToNot(HaveOccurred()) + + assertPublicationExists(namespace, primaryPodInfo.Name, pub) + }) + + By("applying Subscription CRD manifest", func() { + CreateResourceFromFile(namespace, subManifest) + subObjectName, err = env.GetResourceNameFromYAML(subManifest) + Expect(err).NotTo(HaveOccurred()) + }) + + By("ensuring the Subscription CRD succeeded reconciliation", func() { + // get subscription object + sub = &apiv1.Subscription{} + pubNamespacedName := types.NamespacedName{ + Namespace: namespace, + Name: subObjectName, + } + + Eventually(func(g Gomega) { + err := env.Client.Get(env.Ctx, pubNamespacedName, sub) + Expect(err).ToNot(HaveOccurred()) + g.Expect(sub.Status.Applied).Should(HaveValue(BeTrue())) + }, 300).WithPolling(10 * time.Second).Should(Succeed()) + }) + + By("verifying new subscription has been created", func() { + primaryPodInfo, err := env.GetClusterPrimary(namespace, destinationClusterName) + Expect(err).ToNot(HaveOccurred()) + + assertSubscriptionExists(namespace, primaryPodInfo.Name, sub) + }) + + By("checking that the data is present inside the destination cluster database", func() { + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: destinationClusterName, + DatabaseName: dbname, + TableName: tableName, + } + AssertDataExpectedCount(env, tableLocator, 2) + }) + }) + }) +}) diff --git a/tests/labels.go b/tests/labels.go index 25b2b858b5..98649f2be2 100644 --- a/tests/labels.go +++ b/tests/labels.go @@ -32,6 +32,9 @@ const ( // LabelDeclarativeDatabases is a label for selecting the declarative databases test LabelDeclarativeDatabases = "declarative-databases" + // LabelDeclarativePubSub is a label for selecting the publication / subscription test + LabelDeclarativePubSub = "publication-subscription" + // LabelDisruptive is the string for labelling disruptive tests LabelDisruptive = "disruptive" From 79b10310abc39ca5eb6f93358e8aa6805ba473d9 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 27 Nov 2024 19:04:54 +0100 Subject: [PATCH 170/836] chore(deps): update xt0rted/pull-request-comment-branch action to v3 (main) (#6190) --- .github/workflows/continuous-delivery.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index 3f7d561e15..04c3cd31ef 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -145,7 +145,7 @@ jobs: echo "LOG_LEVEL=${LOG_LEVEL}" >> $GITHUB_ENV - name: Resolve Git reference - uses: xt0rted/pull-request-comment-branch@v2 + uses: xt0rted/pull-request-comment-branch@v3 id: refs - name: Create comment From 56a3916a17a3b5957b5034fa1bccc9f836ee74ed Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Thu, 28 Nov 2024 10:25:41 +0100 Subject: [PATCH 171/836] perf(restore): add informers and local webserver cache (#6147) Signed-off-by: Armando Ruocco Signed-off-by: Leonardo Cecchi Co-authored-by: Leonardo Cecchi --- internal/cmd/manager/instance/restore/cmd.go | 147 +++++++++++------- internal/cmd/manager/instance/restore/doc.go | 18 +++ .../cmd/manager/instance/restore/restore.go | 105 +++++++++++++ internal/cmd/manager/walrestore/cmd.go | 10 +- internal/cnpi/plugin/client/contracts.go | 5 +- internal/cnpi/plugin/client/wal.go | 8 +- pkg/management/client.go | 9 +- pkg/management/postgres/restore.go | 15 +- 8 files changed, 240 insertions(+), 77 deletions(-) create mode 100644 internal/cmd/manager/instance/restore/doc.go create mode 100644 internal/cmd/manager/instance/restore/restore.go diff --git a/internal/cmd/manager/instance/restore/cmd.go b/internal/cmd/manager/instance/restore/cmd.go index 26dd30a4d1..120dfef278 100644 --- a/internal/cmd/manager/instance/restore/cmd.go +++ b/internal/cmd/manager/instance/restore/cmd.go @@ -14,7 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package restore implements the "instance restore" subcommand of the operator package restore import ( @@ -22,16 +21,22 @@ import ( "errors" "os" - barmanCommand "github.com/cloudnative-pg/barman-cloud/pkg/command" - "github.com/cloudnative-pg/machinery/pkg/fileutils" "github.com/cloudnative-pg/machinery/pkg/log" "github.com/spf13/cobra" - ctrl "sigs.k8s.io/controller-runtime/pkg/client" - + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/fields" + controllerruntime "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/internal/management/istio" "github.com/cloudnative-pg/cloudnative-pg/internal/management/linkerd" - "github.com/cloudnative-pg/cloudnative-pg/pkg/management" + "github.com/cloudnative-pg/cloudnative-pg/internal/scheme" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver" ) // NewCmd creates the "restore" subcommand @@ -44,24 +49,62 @@ func NewCmd() *cobra.Command { cmd := &cobra.Command{ Use: "restore [flags]", SilenceErrors: true, - PreRunE: func(cmd *cobra.Command, _ []string) error { - return management.WaitForGetCluster(cmd.Context(), ctrl.ObjectKey{ - Name: clusterName, - Namespace: namespace, - }) - }, RunE: func(cmd *cobra.Command, _ []string) error { - ctx := cmd.Context() + contextLogger := log.FromContext(cmd.Context()) + + // Canceling this context + ctx, cancel := context.WithCancel(cmd.Context()) + defer cancel() + + // Step 1: build the manager + mgr, err := buildManager(clusterName, namespace) + if err != nil { + contextLogger.Error(err, "while building the manager") + return err + } + + // Step 1.1: add the local webserver to the manager + localSrv, err := webserver.NewLocalWebServer( + postgres.NewInstance().WithClusterName(clusterName).WithNamespace(namespace), + mgr.GetClient(), + mgr.GetEventRecorderFor("local-webserver"), + ) + if err != nil { + return err + } + if err = mgr.Add(localSrv); err != nil { + contextLogger.Error(err, "unable to add local webserver runnable") + return err + } + + // Step 2: add the restore process to the manager + restoreProcess := restoreRunnable{ + cli: mgr.GetClient(), + clusterName: clusterName, + namespace: namespace, + pgData: pgData, + pgWal: pgWal, + cancel: cancel, + } + if mgr.Add(&restoreProcess) != nil { + contextLogger.Error(err, "while building the restore process") + return err + } + + // Step 3: start everything + if err := mgr.Start(ctx); err != nil { + contextLogger.Error(err, "restore error") + return err + } - info := postgres.InitInfo{ - ClusterName: clusterName, - Namespace: namespace, - PgData: pgData, - PgWal: pgWal, + if !errors.Is(ctx.Err(), context.Canceled) { + contextLogger.Error(err, "error while recovering backup") + return err } - return restoreSubCommand(ctx, info) + return nil }, + PostRunE: func(cmd *cobra.Command, _ []string) error { if err := istio.TryInvokeQuitEndpoint(cmd.Context()); err != nil { return err @@ -81,42 +124,32 @@ func NewCmd() *cobra.Command { return cmd } -func restoreSubCommand(ctx context.Context, info postgres.InitInfo) error { - contextLogger := log.FromContext(ctx) - err := info.CheckTargetDataDirectory(ctx) - if err != nil { - return err - } - - err = info.Restore(ctx) - if err != nil { - contextLogger.Error(err, "Error while restoring a backup") - cleanupDataDirectoryIfNeeded(ctx, err, info.PgData) - return err - } - - contextLogger.Info("restore command execution completed without errors") - - return nil -} - -func cleanupDataDirectoryIfNeeded(ctx context.Context, restoreError error, dataDirectory string) { - contextLogger := log.FromContext(ctx) - - var barmanError *barmanCommand.CloudRestoreError - if !errors.As(restoreError, &barmanError) { - return - } - - if !barmanError.IsRetriable() { - return - } - - contextLogger.Info("Cleaning up data directory", "directory", dataDirectory) - if err := fileutils.RemoveDirectory(dataDirectory); err != nil && !os.IsNotExist(err) { - contextLogger.Error( - err, - "error occurred cleaning up data directory", - "directory", dataDirectory) - } +func buildManager(clusterName string, namespace string) (manager.Manager, error) { + return controllerruntime.NewManager(controllerruntime.GetConfigOrDie(), controllerruntime.Options{ + Scheme: scheme.BuildWithAllKnownScheme(), + Cache: cache.Options{ + ByObject: map[client.Object]cache.ByObject{ + &apiv1.Cluster{}: { + Field: fields.OneTermEqualSelector("metadata.name", clusterName), + Namespaces: map[string]cache.Config{ + namespace: {}, + }, + }, + }, + }, + Client: client.Options{ + Cache: &client.CacheOptions{ + DisableFor: []client.Object{ + &corev1.Secret{}, + &corev1.ConfigMap{}, + // todo(armru): we should remove the backup endpoints from the local webserver + &apiv1.Backup{}, + }, + }, + }, + LeaderElection: false, + Metrics: metricsserver.Options{ + BindAddress: "0", + }, + }) } diff --git a/internal/cmd/manager/instance/restore/doc.go b/internal/cmd/manager/instance/restore/doc.go new file mode 100644 index 0000000000..edb70590d7 --- /dev/null +++ b/internal/cmd/manager/instance/restore/doc.go @@ -0,0 +1,18 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package restore implements the "instance restore" subcommand of the operator +package restore diff --git a/internal/cmd/manager/instance/restore/restore.go b/internal/cmd/manager/instance/restore/restore.go new file mode 100644 index 0000000000..7c05ea097b --- /dev/null +++ b/internal/cmd/manager/instance/restore/restore.go @@ -0,0 +1,105 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restore + +import ( + "context" + "errors" + "fmt" + "os" + + barmanCommand "github.com/cloudnative-pg/barman-cloud/pkg/command" + "github.com/cloudnative-pg/machinery/pkg/fileutils" + "github.com/cloudnative-pg/machinery/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/management" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" +) + +type restoreRunnable struct { + cli client.Client + clusterName string + namespace string + pgData string + pgWal string + cancel context.CancelFunc +} + +func (r *restoreRunnable) Start(ctx context.Context) error { + // we will wait this way for the mgr and informers to be online + if err := management.WaitForGetClusterWithClient(ctx, r.cli, client.ObjectKey{ + Name: r.clusterName, + Namespace: r.namespace, + }); err != nil { + return fmt.Errorf("while waiting for API server connectivity: %w", err) + } + + info := postgres.InitInfo{ + ClusterName: r.clusterName, + Namespace: r.namespace, + PgData: r.pgData, + PgWal: r.pgWal, + } + + if err := restoreSubCommand(ctx, info, r.cli); err != nil { + return fmt.Errorf("while restoring cluster: %s", err) + } + + // the backup was restored correctly and we now ask + // the manager to quit + r.cancel() + return nil +} + +func restoreSubCommand(ctx context.Context, info postgres.InitInfo, cli client.Client) error { + contextLogger := log.FromContext(ctx) + if err := info.CheckTargetDataDirectory(ctx); err != nil { + return err + } + + if err := info.Restore(ctx, cli); err != nil { + contextLogger.Error(err, "Error while restoring a backup") + cleanupDataDirectoryIfNeeded(ctx, err, info.PgData) + return err + } + + contextLogger.Info("restore command execution completed without errors") + + return nil +} + +func cleanupDataDirectoryIfNeeded(ctx context.Context, restoreError error, dataDirectory string) { + contextLogger := log.FromContext(ctx) + + var barmanError *barmanCommand.CloudRestoreError + if !errors.As(restoreError, &barmanError) { + return + } + + if !barmanError.IsRetriable() { + return + } + + contextLogger.Info("Cleaning up data directory", "directory", dataDirectory) + if err := fileutils.RemoveDirectory(dataDirectory); err != nil && !os.IsNotExist(err) { + contextLogger.Error( + err, + "error occurred cleaning up data directory", + "directory", dataDirectory) + } +} diff --git a/internal/cmd/manager/walrestore/cmd.go b/internal/cmd/manager/walrestore/cmd.go index 56bcdf447d..db88cb2725 100644 --- a/internal/cmd/manager/walrestore/cmd.go +++ b/internal/cmd/manager/walrestore/cmd.go @@ -118,9 +118,13 @@ func run(ctx context.Context, pgData string, podName string, args []string) erro return fmt.Errorf("failed to get cluster: %w", err) } - if err := restoreWALViaPlugins(ctx, cluster, walName, path.Join(pgData, destinationPath)); err != nil { + walFound, err := restoreWALViaPlugins(ctx, cluster, walName, path.Join(pgData, destinationPath)) + if err != nil { return err } + if walFound { + return nil + } recoverClusterName, recoverEnv, barmanConfiguration, err := GetRecoverConfiguration(cluster, podName) if errors.Is(err, ErrNoBackupConfigured) { @@ -244,7 +248,7 @@ func restoreWALViaPlugins( cluster *apiv1.Cluster, walName string, destinationPathName string, -) error { +) (bool, error) { contextLogger := log.FromContext(ctx) plugins := repository.New() @@ -267,7 +271,7 @@ func restoreWALViaPlugins( ) if err != nil { contextLogger.Error(err, "Error while loading required plugins") - return err + return false, err } defer client.Close(ctx) diff --git a/internal/cnpi/plugin/client/contracts.go b/internal/cnpi/plugin/client/contracts.go index 7ecf00960e..d136a71d08 100644 --- a/internal/cnpi/plugin/client/contracts.go +++ b/internal/cnpi/plugin/client/contracts.go @@ -126,13 +126,14 @@ type WalCapabilities interface { ) error // RestoreWAL calls the loaded plugins to archive a WAL file. - // This call is a no-op if there's no plugin implementing WAL archiving + // This call returns a boolean indicating if the WAL was restored + // by a plugin and the occurred error. RestoreWAL( ctx context.Context, cluster client.Object, sourceWALName string, destinationFileName string, - ) error + ) (bool, error) } // BackupCapabilities describes a set of behaviour needed to backup diff --git a/internal/cnpi/plugin/client/wal.go b/internal/cnpi/plugin/client/wal.go index c4e1bbcede..81e0234848 100644 --- a/internal/cnpi/plugin/client/wal.go +++ b/internal/cnpi/plugin/client/wal.go @@ -76,14 +76,14 @@ func (data *data) RestoreWAL( cluster client.Object, sourceWALName string, destinationFileName string, -) error { +) (bool, error) { var errorCollector error contextLogger := log.FromContext(ctx) serializedCluster, err := json.Marshal(cluster) if err != nil { - return fmt.Errorf("while serializing %s %s/%s to JSON: %w", + return false, fmt.Errorf("while serializing %s %s/%s to JSON: %w", cluster.GetObjectKind().GroupVersionKind().Kind, cluster.GetNamespace(), cluster.GetName(), err, @@ -114,9 +114,9 @@ func (data *data) RestoreWAL( pluginLogger.Trace("WAL restore via plugin failed, trying next one", "err", err) errorCollector = multierr.Append(errorCollector, err) } else { - return nil + return true, nil } } - return errorCollector + return false, errorCollector } diff --git a/pkg/management/client.go b/pkg/management/client.go index cd35e60485..95105530f6 100644 --- a/pkg/management/client.go +++ b/pkg/management/client.go @@ -140,7 +140,14 @@ func WaitForGetCluster(ctx context.Context, clusterObjectKey client.ObjectKey) e return err } - err = retry.OnError(readinessCheckRetry, resources.RetryAlways, func() error { + return WaitForGetClusterWithClient(ctx, cli, clusterObjectKey) +} + +// WaitForGetClusterWithClient will wait for a successful get cluster to be executed +func WaitForGetClusterWithClient(ctx context.Context, cli client.Client, clusterObjectKey client.ObjectKey) error { + logger := log.FromContext(ctx).WithName("wait-for-get-cluster") + + err := retry.OnError(readinessCheckRetry, resources.RetryAlways, func() error { if err := cli.Get(ctx, clusterObjectKey, &apiv1.Cluster{}); err != nil { logger.Warning("Encountered an error while executing get cluster. Will wait and retry", "error", err.Error()) return err diff --git a/pkg/management/postgres/restore.go b/pkg/management/postgres/restore.go index c1652107c8..2cdad8c8ea 100644 --- a/pkg/management/postgres/restore.go +++ b/pkg/management/postgres/restore.go @@ -53,7 +53,6 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository" "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" "github.com/cloudnative-pg/cloudnative-pg/pkg/configfile" - "github.com/cloudnative-pg/cloudnative-pg/pkg/management" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/external" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/constants" postgresSpec "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" @@ -233,14 +232,10 @@ func (info InitInfo) createBackupObjectForSnapshotRestore( } // Restore restores a PostgreSQL cluster from a backup into the object storage -func (info InitInfo) Restore(ctx context.Context) error { +func (info InitInfo) Restore(ctx context.Context, cli client.Client) error { contextLogger := log.FromContext(ctx) - typedClient, err := management.NewControllerRuntimeClient() - if err != nil { - return err - } - cluster, err := info.loadCluster(ctx, typedClient) + cluster, err := info.loadCluster(ctx, cli) if err != nil { return err } @@ -284,13 +279,13 @@ func (info InitInfo) Restore(ctx context.Context) error { } else { // Before starting the restore we check if the archive destination is safe to use // otherwise, we stop creating the cluster - err = info.checkBackupDestination(ctx, typedClient, cluster) + err = info.checkBackupDestination(ctx, cli, cluster) if err != nil { return err } // If we need to download data from a backup, we do it - backup, env, err := info.loadBackup(ctx, typedClient, cluster) + backup, env, err := info.loadBackup(ctx, cli, cluster) if err != nil { return err } @@ -332,7 +327,7 @@ func (info InitInfo) Restore(ctx context.Context) error { } connectionString, err := external.ConfigureConnectionToServer( - ctx, typedClient, info.Namespace, &server) + ctx, cli, info.Namespace, &server) if err != nil { return err } From e8320138d5338f75ac146f56c41b0b5cbf58daec Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Thu, 28 Nov 2024 13:26:46 +0100 Subject: [PATCH 172/836] fix(linter): prepare code for golangci-lint 1.62.2 (#6198) The new linter found new stuff that should have fix before merging the new version into the checks. One of the main changes is ginkgo-linter that now recommends use Succeed() instead of BeNil() Signed-off-by: Jonathan Gonzalez V. --- tests/e2e/asserts_test.go | 12 ++++++------ tests/e2e/certificates_test.go | 6 +++--- tests/e2e/logs_test.go | 4 ++-- tests/e2e/managed_services_test.go | 6 +++--- tests/e2e/replica_mode_cluster_test.go | 4 ++-- tests/e2e/rolling_update_test.go | 4 ++-- tests/e2e/storage_expansion_test.go | 2 +- tests/e2e/syncreplicas_test.go | 8 ++++---- tests/utils/backup.go | 2 +- tests/utils/environment.go | 2 +- tests/utils/pod.go | 7 ++----- 11 files changed, 27 insertions(+), 30 deletions(-) diff --git a/tests/e2e/asserts_test.go b/tests/e2e/asserts_test.go index 37c0c0276d..5bdf763c9a 100644 --- a/tests/e2e/asserts_test.go +++ b/tests/e2e/asserts_test.go @@ -916,7 +916,7 @@ func AssertReplicaModeCluster( Eventually(func() error { primaryReplicaCluster, err = env.GetClusterPrimary(namespace, replicaClusterName) return err - }, 30, 3).Should(BeNil()) + }, 30, 3).Should(Succeed()) AssertPgRecoveryMode(primaryReplicaCluster, true) }) @@ -1704,7 +1704,7 @@ func AssertSuspendScheduleBackups(namespace, scheduledBackupName string) { return err } return nil - }, 60, 5).Should(BeNil()) + }, 60, 5).Should(Succeed()) scheduledBackupNamespacedName := types.NamespacedName{ Namespace: namespace, Name: scheduledBackupName, @@ -1752,7 +1752,7 @@ func AssertSuspendScheduleBackups(namespace, scheduledBackupName string) { return err } return nil - }, 60, 5).Should(BeNil()) + }, 60, 5).Should(Succeed()) scheduledBackupNamespacedName := types.NamespacedName{ Namespace: namespace, Name: scheduledBackupName, @@ -2203,7 +2203,7 @@ func OnlineResizePVC(namespace, clusterName string) { Eventually(func() error { _, _, err := testsUtils.RunUnchecked(cmd) return err - }, 60, 5).Should(BeNil()) + }, 60, 5).Should(Succeed()) } }) By("verifying Cluster storage is expanded", func() { @@ -2259,7 +2259,7 @@ func OfflineResizePVC(namespace, clusterName string, timeout int) { Eventually(func() error { _, _, err := testsUtils.RunUnchecked(cmd) return err - }, 60, 5).Should(BeNil()) + }, 60, 5).Should(Succeed()) } }) By("deleting Pod and PVCs, first replicas then the primary", func() { @@ -2484,7 +2484,7 @@ func CreateResourcesFromFileWithError(namespace, sampleFilePath string) error { func CreateResourceFromFile(namespace, sampleFilePath string) { Eventually(func() error { return CreateResourcesFromFileWithError(namespace, sampleFilePath) - }, RetryTimeout, PollingTime).Should(BeNil()) + }, RetryTimeout, PollingTime).Should(Succeed()) } // GetYAMLContent opens a .yaml of .template file and returns its content diff --git a/tests/e2e/certificates_test.go b/tests/e2e/certificates_test.go index 5c419acb8a..5cd0f173d7 100644 --- a/tests/e2e/certificates_test.go +++ b/tests/e2e/certificates_test.go @@ -135,7 +135,7 @@ var _ = Describe("Certificates", func() { return err } return nil - }, 60, 5).Should(BeNil()) + }, 60, 5).Should(Succeed()) Eventually(func() (bool, error) { certUpdateStatus := false @@ -176,7 +176,7 @@ var _ = Describe("Certificates", func() { return err } return nil - }, 60, 5).Should(BeNil()) + }, 60, 5).Should(Succeed()) Eventually(func() (bool, error) { cluster, err := env.GetCluster(namespace, clusterName) @@ -212,7 +212,7 @@ var _ = Describe("Certificates", func() { return err } return nil - }, 60, 5).Should(BeNil()) + }, 60, 5).Should(Succeed()) Eventually(func() (bool, error) { cluster, err := env.GetCluster(namespace, clusterName) diff --git a/tests/e2e/logs_test.go b/tests/e2e/logs_test.go index 59a2f11337..7bd3c44259 100644 --- a/tests/e2e/logs_test.go +++ b/tests/e2e/logs_test.go @@ -86,7 +86,7 @@ var _ = Describe("JSON log output", Label(tests.LabelObservability), func() { specs.PostgresContainerName, &commandTimeout, "psql", "-U", "postgres", "app", "-tAc", errorTestQuery) return queryError - }, RetryTimeout, PollingTime).ShouldNot(BeNil()) + }, RetryTimeout, PollingTime).ShouldNot(Succeed()) // Eventually the error log line will be logged Eventually(func(g Gomega) bool { @@ -118,7 +118,7 @@ var _ = Describe("JSON log output", Label(tests.LabelObservability), func() { *primaryPod, specs.PostgresContainerName, &commandTimeout, "psql", "-U", "postgres", "app", "-tAc", errorTestQuery) return queryError - }, RetryTimeout, PollingTime).ShouldNot(BeNil()) + }, RetryTimeout, PollingTime).ShouldNot(Succeed()) // Expect the query to be eventually logged on the primary Eventually(func() (bool, error) { diff --git a/tests/e2e/managed_services_test.go b/tests/e2e/managed_services_test.go index 68131aded9..ee139aa744 100644 --- a/tests/e2e/managed_services_test.go +++ b/tests/e2e/managed_services_test.go @@ -82,7 +82,7 @@ var _ = Describe("Managed services tests", Label(tests.LabelSmoke, tests.LabelBa g.Expect(err).ToNot(HaveOccurred()) cluster.Spec.Managed.Services.Additional = []apiv1.ManagedService{} return env.Client.Update(ctx, cluster) - }, RetryTimeout, PollingTime).Should(BeNil()) + }, RetryTimeout, PollingTime).Should(Succeed()) AssertClusterIsReady(namespace, clusterName, testTimeouts[utils.ManagedServices], env) Eventually(func(g Gomega) { @@ -128,7 +128,7 @@ var _ = Describe("Managed services tests", Label(tests.LabelSmoke, tests.LabelBa g.Expect(err).ToNot(HaveOccurred()) cluster.Spec.Managed.Services.DisabledDefaultServices = []apiv1.ServiceSelectorType{} return env.Client.Update(ctx, cluster) - }, RetryTimeout, PollingTime).Should(BeNil()) + }, RetryTimeout, PollingTime).Should(Succeed()) AssertClusterIsReady(namespace, clusterName, testTimeouts[utils.ManagedServices], env) @@ -189,7 +189,7 @@ var _ = Describe("Managed services tests", Label(tests.LabelSmoke, tests.LabelBa g.Expect(err).ToNot(HaveOccurred()) cluster.Spec.Managed.Services.Additional[0].ServiceTemplate.ObjectMeta.Labels["new-label"] = "new" return env.Client.Update(ctx, cluster) - }, RetryTimeout, PollingTime).Should(BeNil()) + }, RetryTimeout, PollingTime).Should(Succeed()) }) By("expecting the service to be recreated", func() { diff --git a/tests/e2e/replica_mode_cluster_test.go b/tests/e2e/replica_mode_cluster_test.go index a22f4b98ad..97b94c9781 100644 --- a/tests/e2e/replica_mode_cluster_test.go +++ b/tests/e2e/replica_mode_cluster_test.go @@ -188,7 +188,7 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { Eventually(func() error { clusterOnePrimary, err = env.GetClusterPrimary(namespace, clusterOneName) return err - }, 30, 3).Should(BeNil()) + }, 30, 3).Should(Succeed()) AssertPgRecoveryMode(clusterOnePrimary, true) }) @@ -206,7 +206,7 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { Eventually(func() error { clusterTwoPrimary, err = env.GetClusterPrimary(namespace, clusterTwoName) return err - }, 30, 3).Should(BeNil()) + }, 30, 3).Should(Succeed()) AssertPgRecoveryMode(clusterTwoPrimary, false) }) diff --git a/tests/e2e/rolling_update_test.go b/tests/e2e/rolling_update_test.go index ec50ba5119..7b3bde3ae9 100644 --- a/tests/e2e/rolling_update_test.go +++ b/tests/e2e/rolling_update_test.go @@ -118,7 +118,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun cluster.Spec.ImageName = updatedImageName return env.Client.Update(env.Ctx, cluster) - }, RetryTimeout, PollingTime).Should(BeNil()) + }, RetryTimeout, PollingTime).Should(Succeed()) // All the postgres containers should have the updated image AssertPodsRunOnImage(namespace, clusterName, updatedImageName, cluster.Spec.Instances, timeout) @@ -599,7 +599,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun // Wait until we really deleted it Eventually(func() error { return env.Client.Get(env.Ctx, ctrl.ObjectKey{Name: catalog.Name}, catalog) - }, 30).Should(MatchError(apierrs.IsNotFound, metav1.StatusReasonNotFound)) + }, 30).Should(MatchError(apierrs.IsNotFound, string(metav1.StatusReasonNotFound))) }) Context("Three Instances", func() { const ( diff --git a/tests/e2e/storage_expansion_test.go b/tests/e2e/storage_expansion_test.go index 7b27204806..4713dde4c3 100644 --- a/tests/e2e/storage_expansion_test.go +++ b/tests/e2e/storage_expansion_test.go @@ -98,7 +98,7 @@ var _ = Describe("Verify storage", Label(tests.LabelStorage), func() { return err } return nil - }, 60, 5).Should(BeNil()) + }, 60, 5).Should(Succeed()) }) OfflineResizePVC(namespace, clusterName, 600) }) diff --git a/tests/e2e/syncreplicas_test.go b/tests/e2e/syncreplicas_test.go index 1b9f47c94b..fcd321874e 100644 --- a/tests/e2e/syncreplicas_test.go +++ b/tests/e2e/syncreplicas_test.go @@ -109,7 +109,7 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { cluster.Spec.MaxSyncReplicas = 1 return env.Client.Update(env.Ctx, cluster) - }, RetryTimeout, 5).Should(BeNil()) + }, RetryTimeout, 5).Should(Succeed()) // Scale the cluster down to 2 pods _, _, err := utils.Run(fmt.Sprintf("kubectl scale --replicas=2 -n %v cluster/%v", namespace, @@ -200,7 +200,7 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { cluster.Spec.PostgresConfiguration.Synchronous.MaxStandbyNamesFromCluster = ptr.To(1) cluster.Spec.PostgresConfiguration.Synchronous.Number = 1 return env.Client.Update(env.Ctx, cluster) - }, RetryTimeout, 5).Should(BeNil()) + }, RetryTimeout, 5).Should(Succeed()) getSyncReplicationCount(namespace, clusterName, "quorum", 1) compareSynchronousStandbyNames(namespace, clusterName, "ANY 1") @@ -212,7 +212,7 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { g.Expect(err).ToNot(HaveOccurred()) cluster.Spec.PostgresConfiguration.Synchronous.Method = apiv1.SynchronousReplicaConfigurationMethodFirst return env.Client.Update(env.Ctx, cluster) - }, RetryTimeout, 5).Should(BeNil()) + }, RetryTimeout, 5).Should(Succeed()) getSyncReplicationCount(namespace, clusterName, "sync", 1) compareSynchronousStandbyNames(namespace, clusterName, "FIRST 1") @@ -226,7 +226,7 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { cluster.Spec.PostgresConfiguration.Synchronous.StandbyNamesPre = []string{"preSyncReplica"} cluster.Spec.PostgresConfiguration.Synchronous.StandbyNamesPost = []string{"postSyncReplica"} return env.Client.Update(env.Ctx, cluster) - }, RetryTimeout, 5).Should(BeNil()) + }, RetryTimeout, 5).Should(Succeed()) compareSynchronousStandbyNames(namespace, clusterName, "FIRST 1 (\"preSyncReplica\"") compareSynchronousStandbyNames(namespace, clusterName, "\"postSyncReplica\")") }) diff --git a/tests/utils/backup.go b/tests/utils/backup.go index e07f20d2a5..9ef2aadd9c 100644 --- a/tests/utils/backup.go +++ b/tests/utils/backup.go @@ -46,7 +46,7 @@ func ExecuteBackup( return fmt.Errorf("could not create backup.\nStdErr: %v\nError: %v", stderr, err) } return nil - }, RetryTimeout, PollingTime).Should(BeNil()) + }, RetryTimeout, PollingTime).Should(Succeed()) backupNamespacedName := types.NamespacedName{ Namespace: namespace, Name: backupName, diff --git a/tests/utils/environment.go b/tests/utils/environment.go index 2596f87186..b93ea595d8 100644 --- a/tests/utils/environment.go +++ b/tests/utils/environment.go @@ -186,7 +186,7 @@ func (env TestingEnvironment) EventuallyExecCommand( return err } return nil - }, RetryTimeout, PollingTime).Should(BeNil()) + }, RetryTimeout, PollingTime).Should(Succeed()) return stdOut, stdErr, err } diff --git a/tests/utils/pod.go b/tests/utils/pod.go index 25841da6a1..e439d0e00f 100644 --- a/tests/utils/pod.go +++ b/tests/utils/pod.go @@ -266,11 +266,8 @@ func (env TestingEnvironment) EventuallyExecQueryInInstancePod( Namespace: podLocator.Namespace, PodName: podLocator.PodName, }, dbname, query) - if err != nil { - return err - } - return nil - }, retryTimeout, pollingTime).Should(BeNil()) + return err + }, retryTimeout, pollingTime).Should(Succeed()) return stdOut, stdErr, err } From fb1554712f23f34c6fd5d3532a1898098f6e3886 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 28 Nov 2024 15:44:14 +0100 Subject: [PATCH 173/836] chore(deps): update dependency golangci/golangci-lint to v1.62.2 (main) (#6182) --- .github/workflows/continuous-integration.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 592525397c..db7bbb243e 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -17,7 +17,7 @@ on: # set up environment variables to be used across all the jobs env: GOLANG_VERSION: "1.23.x" - GOLANGCI_LINT_VERSION: "v1.61.0" + GOLANGCI_LINT_VERSION: "v1.62.2" KUBEBUILDER_VERSION: "2.3.1" KIND_VERSION: "v0.25.0" OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing" From 18f1c062b4166556ddbbdfd0684a22c22fd8b822 Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Fri, 29 Nov 2024 06:53:25 +0100 Subject: [PATCH 174/836] docs: clarify support for PostgreSQL 17 (#6202) Closes #4685 Signed-off-by: Gabriele Bartolini --- docs/src/supported_releases.md | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/docs/src/supported_releases.md b/docs/src/supported_releases.md index c1bfbd9128..9717ee5cc0 100644 --- a/docs/src/supported_releases.md +++ b/docs/src/supported_releases.md @@ -79,13 +79,14 @@ Git tags for versions are prefixed with `v`. ## Support status of CloudNativePG releases -| Version | Currently supported | Release date | End of life | Supported Kubernetes versions | Tested, but not supported | Supported Postgres versions | -|-----------------|----------------------|-------------------|---------------------|-------------------------------|---------------------------|-----------------------------| -| 1.24.x | Yes | August 22, 2024 | ~ February, 2025 | 1.28, 1.29, 1.30, 1.31 | 1.27 | 121 - 17 | -| 1.23.x | Yes | April 24, 2024 | November 24, 2024 | 1.27, 1.28, 1.29 | 1.30, 1.31 | 121 - 17 | -| main | No, development only | | | | | 121 - 17 | + +| Version | Currently supported | Release date | End of life | Supported Kubernetes versions | Tested, but not supported | Supported Postgres versions | +|-----------------|----------------------|---------------------|---------------------|-------------------------------|---------------------------|-----------------------------| +| 1.25.x | Yes | December XX, 2024 | ~ February, 2025 | 1.29, 1.30, 1.31, 1.32 (??) | 1.27, 1.28 | 13 - 17 | +| 1.24.x | Yes | August 22, 2024 | February XX, 2025 | 1.28, 1.29, 1.30, 1.31 | 1.27 | 13 - 17 | +| main | No, development only | | | | | 13 - 17 | -1 _PostgreSQL 12 will be supported until November 14, 2024._ + The list of supported Kubernetes versions in the table depends on what the CloudNativePG maintainers think is reasonable to support and to test. @@ -139,6 +140,7 @@ version of PostgreSQL, we might not be able to help you. | Version | Release date | End of life | Compatible Kubernetes versions | |-----------------|-------------------|---------------------|--------------------------------| +| 1.23.x | April 24, 2024 | November 24, 2024 | 1.27, 1.28, 1.29 | | 1.22.x | December 21, 2023 | July 24, 2024 | 1.26, 1.27, 1.28 | | 1.21.x | October 12, 2023 | Jun 12, 2024 | 1.25, 1.26, 1.27, 1.28 | | 1.20.x | April 27, 2023 | January 21, 2024 | 1.24, 1.25, 1.26, 1.27 | From 8b7bd438d747336e027fb281b60ece76a86c4ee7 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 29 Nov 2024 15:21:37 +0100 Subject: [PATCH 175/836] fix(deps): update kubernetes patches (main) (#6181) https://github.com/prometheus-operator/prometheus-operator `v0.78.1` -> `v0.78.2` https://github.com/kubernetes/api `v0.31.2` -> `v0.31.3` https://github.com/kubernetes/apiextensions-apiserver `v0.31.2` -> `v0.31.3` https://github.com/kubernetes/apimachinery `v0.31.2` -> `v0.31.3` https://github.com/kubernetes/cli-runtime `v0.31.2` -> `v0.31.3` https://github.com/kubernetes/client-go `v0.31.2` -> `v0.31.3` https://github.com/kubernetes-sigs/controller-runtime `v0.19.1` -> `v0.19.2` --- go.mod | 14 +++++++------- go.sum | 28 ++++++++++++++-------------- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/go.mod b/go.mod index f276419c5c..eaa13af67f 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/mitchellh/go-ps v1.0.0 github.com/onsi/ginkgo/v2 v2.22.0 github.com/onsi/gomega v1.36.0 - github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.1 + github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.2 github.com/prometheus/client_golang v1.20.5 github.com/robfig/cron v1.2.0 github.com/sethvargo/go-password v0.3.1 @@ -40,13 +40,13 @@ require ( golang.org/x/term v0.26.0 google.golang.org/grpc v1.68.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.31.2 - k8s.io/apiextensions-apiserver v0.31.2 - k8s.io/apimachinery v0.31.2 - k8s.io/cli-runtime v0.31.2 - k8s.io/client-go v0.31.2 + k8s.io/api v0.31.3 + k8s.io/apiextensions-apiserver v0.31.3 + k8s.io/apimachinery v0.31.3 + k8s.io/cli-runtime v0.31.3 + k8s.io/client-go v0.31.3 k8s.io/utils v0.0.0-20241104163129-6fe5fd82f078 - sigs.k8s.io/controller-runtime v0.19.1 + sigs.k8s.io/controller-runtime v0.19.2 sigs.k8s.io/yaml v1.4.0 ) diff --git a/go.sum b/go.sum index 317da64330..cf4ed929c2 100644 --- a/go.sum +++ b/go.sum @@ -157,8 +157,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.1 h1:Fm9Z+FabnB+6EoGq15j+pyLmaK6hYrYOpBlTzOLTQ+E= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.1/go.mod h1:SvsRXw4m1F2vk7HquU5h475bFpke27mIUswfyw9u3ug= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.2 h1:SyoVBXD/r0PntR1rprb90ClI32FSUNOCWqqTatnipHM= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.2/go.mod h1:SvsRXw4m1F2vk7HquU5h475bFpke27mIUswfyw9u3ug= github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= @@ -278,24 +278,24 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.31.2 h1:3wLBbL5Uom/8Zy98GRPXpJ254nEFpl+hwndmk9RwmL0= -k8s.io/api v0.31.2/go.mod h1:bWmGvrGPssSK1ljmLzd3pwCQ9MgoTsRCuK35u6SygUk= -k8s.io/apiextensions-apiserver v0.31.2 h1:W8EwUb8+WXBLu56ser5IudT2cOho0gAKeTOnywBLxd0= -k8s.io/apiextensions-apiserver v0.31.2/go.mod h1:i+Geh+nGCJEGiCGR3MlBDkS7koHIIKWVfWeRFiOsUcM= -k8s.io/apimachinery v0.31.2 h1:i4vUt2hPK56W6mlT7Ry+AO8eEsyxMD1U44NR22CLTYw= -k8s.io/apimachinery v0.31.2/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= -k8s.io/cli-runtime v0.31.2 h1:7FQt4C4Xnqx8V1GJqymInK0FFsoC+fAZtbLqgXYVOLQ= -k8s.io/cli-runtime v0.31.2/go.mod h1:XROyicf+G7rQ6FQJMbeDV9jqxzkWXTYD6Uxd15noe0Q= -k8s.io/client-go v0.31.2 h1:Y2F4dxU5d3AQj+ybwSMqQnpZH9F30//1ObxOKlTI9yc= -k8s.io/client-go v0.31.2/go.mod h1:NPa74jSVR/+eez2dFsEIHNa+3o09vtNaWwWwb1qSxSs= +k8s.io/api v0.31.3 h1:umzm5o8lFbdN/hIXbrK9oRpOproJO62CV1zqxXrLgk8= +k8s.io/api v0.31.3/go.mod h1:UJrkIp9pnMOI9K2nlL6vwpxRzzEX5sWgn8kGQe92kCE= +k8s.io/apiextensions-apiserver v0.31.3 h1:+GFGj2qFiU7rGCsA5o+p/rul1OQIq6oYpQw4+u+nciE= +k8s.io/apiextensions-apiserver v0.31.3/go.mod h1:2DSpFhUZZJmn/cr/RweH1cEVVbzFw9YBu4T+U3mf1e4= +k8s.io/apimachinery v0.31.3 h1:6l0WhcYgasZ/wk9ktLq5vLaoXJJr5ts6lkaQzgeYPq4= +k8s.io/apimachinery v0.31.3/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/cli-runtime v0.31.3 h1:fEQD9Xokir78y7pVK/fCJN090/iYNrLHpFbGU4ul9TI= +k8s.io/cli-runtime v0.31.3/go.mod h1:Q2jkyTpl+f6AtodQvgDI8io3jrfr+Z0LyQBPJJ2Btq8= +k8s.io/client-go v0.31.3 h1:CAlZuM+PH2cm+86LOBemaJI/lQ5linJ6UFxKX/SoG+4= +k8s.io/client-go v0.31.3/go.mod h1:2CgjPUTpv3fE5dNygAr2NcM8nhHzXvxB8KL5gYc3kJs= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 h1:1dWzkmJrrprYvjGwh9kEUxmcUV/CtNU8QM7h1FLWQOo= k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38/go.mod h1:coRQXBK9NxO98XUv3ZD6AK3xzHCxV6+b7lrquKwaKzA= k8s.io/utils v0.0.0-20241104163129-6fe5fd82f078 h1:jGnCPejIetjiy2gqaJ5V0NLwTpF4wbQ6cZIItJCSHno= k8s.io/utils v0.0.0-20241104163129-6fe5fd82f078/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/controller-runtime v0.19.1 h1:Son+Q40+Be3QWb+niBXAg2vFiYWolDjjRfO8hn/cxOk= -sigs.k8s.io/controller-runtime v0.19.1/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= +sigs.k8s.io/controller-runtime v0.19.2 h1:3sPrF58XQEPzbE8T81TN6selQIMGbtYwuaJ6eDssDF8= +sigs.k8s.io/controller-runtime v0.19.2/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kustomize/api v0.17.3 h1:6GCuHSsxq7fN5yhF2XrC+AAr8gxQwhexgHflOAD/JJU= From 28bcc2cc18736705713660e0f711d55d91a89c2c Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Fri, 29 Nov 2024 16:01:32 +0100 Subject: [PATCH 176/836] docs: update join slack link (#6217) Closes #6218 Signed-off-by: Jonathan Gonzalez V. --- .github/ISSUE_TEMPLATE/config.yml | 2 +- CONTRIBUTING.md | 2 +- README.md | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index f522006c44..17f1e503e7 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -4,5 +4,5 @@ contact_links: url: https://github.com/cloudnative-pg/cloudnative-pg/discussions about: Please ask and answer questions here. - name: Slack chat - url: https://join.slack.com/t/cloudnativepg/shared_invite/zt-2ij5hagfo-B04EQ9DUlGFzD6GEHDqE0g + url: https://join.slack.com/t/cloudnativepg/shared_invite/zt-2vedd06pe-vMZf4wJ3l_H_hB3YCZ947A about: Please join the slack channel and interact with our community diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c3a18b3d1d..e6b74b5db6 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -40,7 +40,7 @@ For development contributions, please refer to the separate section called ## Ask for Help The best way to reach us with a question when contributing is to drop a line in -our [Slack channel](https://join.slack.com/t/cloudnativepg/shared_invite/zt-2ij5hagfo-B04EQ9DUlGFzD6GEHDqE0g), or +our [Slack channel](https://join.slack.com/t/cloudnativepg/shared_invite/zt-2vedd06pe-vMZf4wJ3l_H_hB3YCZ947A), or start a new Github discussion. ## Raising Issues diff --git a/README.md b/README.md index 43b055e250..8a8e8df0d6 100644 --- a/README.md +++ b/README.md @@ -113,7 +113,7 @@ MariaDB cluster). ## Communications -- [Slack Channel](https://join.slack.com/t/cloudnativepg/shared_invite/zt-2ij5hagfo-B04EQ9DUlGFzD6GEHDqE0g) +- [Slack Channel](https://join.slack.com/t/cloudnativepg/shared_invite/zt-2vedd06pe-vMZf4wJ3l_H_hB3YCZ947A) - [Github Discussions](https://github.com/cloudnative-pg/cloudnative-pg/discussions) - [Twitter](https://twitter.com/CloudNativePg) From 6c4dfc933173b70a101aef5c93714439c032dd6c Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Mon, 2 Dec 2024 14:23:58 +0100 Subject: [PATCH 177/836] chore(docs): template for release notes for a new minor (#6229) Signed-off-by: Gabriele Bartolini --- contribute/release-notes-template.md | 70 ++++++++++++++++++++++++++++ contribute/release_procedure.md | 2 + 2 files changed, 72 insertions(+) create mode 100644 contribute/release-notes-template.md diff --git a/contribute/release-notes-template.md b/contribute/release-notes-template.md new file mode 100644 index 0000000000..53501c9492 --- /dev/null +++ b/contribute/release-notes-template.md @@ -0,0 +1,70 @@ + +# Release notes for CloudNativePG 1.XX + +History of user-visible changes in the 1.XX minor release of CloudNativePG. + +For a complete list of changes, please refer to the +[commits](https://github.com/cloudnative-pg/cloudnative-pg/commits/release-1.XXX +on the release branch in GitHub. + +## Version 1.XX.0-rc1 + +**Release date:** Mon DD, 20YY + +### Important changes: + +- OPTIONAL +- OPTIONAL + +### Features: + +- **MAIN FEATURE #1**: short description +- **MAIN FEATURE #2**: short description + +### Enhancements: + +- Add ... +- Introduce ... +- Allow ... +- Enhance ... +- `cnpg` plugin updates: + - Enhance ... + - Add ... + +### Security: + +- Add ... +- Improve ... + +### Fixes: + +- Enhance ... +- Disable ... +- Gracefully handle ... +- Wait ... +- Fix ... +- Address ... +- `cnpg` plugin: + - ... + - ... + +### Supported versions + +- Kubernetes 1.31, 1.30, and 1.29 +- PostgreSQL 17, 16, 15, 14, and 13 + - PostgreSQL 17.X is the default image + - PostgreSQL 13 support ends on November 12, 2025 diff --git a/contribute/release_procedure.md b/contribute/release_procedure.md index 0e708302e2..15aa72df43 100644 --- a/contribute/release_procedure.md +++ b/contribute/release_procedure.md @@ -71,6 +71,8 @@ activities: update [`docs/src/release_notes.md`](../docs/src/release_notes.md) and [`.github/ISSUE_TEMPLATE/bug.yml`](../.github/ISSUE_TEMPLATE/bug.yml). These changes should go in a PR against `main`, and get maintainer approval. + Look at the template file to get an idea of how to start a new minor release + version document. - **Capabilities page:** in case of a new minor release, ensure that the operator capability levels page in From cfda74bfbc10da77a57b2ebb2685c26d77afdfd8 Mon Sep 17 00:00:00 2001 From: Timo Adler <44780691+Eykha@users.noreply.github.com> Date: Mon, 2 Dec 2024 14:27:38 +0100 Subject: [PATCH 178/836] fix(docs): use correct value for `cnpg.io/skipWalArchiving` (#4848) Signed-off-by: Timo Adler <44780691+Eykha@users.noreply.github.com> --- docs/src/labels_annotations.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/labels_annotations.md b/docs/src/labels_annotations.md index 74b9d247ae..8da4514584 100644 --- a/docs/src/labels_annotations.md +++ b/docs/src/labels_annotations.md @@ -190,7 +190,7 @@ These predefined annotations are managed by CloudNativePG. risk. `cnpg.io/skipWalArchiving` -: When set to `true` on a `Cluster` resource, the operator disables WAL archiving. +: When set to `enabled` on a `Cluster` resource, the operator disables WAL archiving. This will set `archive_mode` to `off` and require a restart of all PostgreSQL instances. Use at your own risk. From fa663f2b74974352bca24c831e93735fbbc21ce8 Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Mon, 2 Dec 2024 14:33:15 +0100 Subject: [PATCH 179/836] feat(import): add support for extra `pg_dump` and `pg_restore` options (#6214) This commit introduces two optional parameters in the `.spec.initdb.import` stanza: - `pgDumpExtraOptions` - `pgRestoreExtraOptions` These parameters allow users to specify additional options for the underlying `pg_dump` and `pg_restore` commands, providing greater flexibility when using the database import feature. Additionally, the default file format has been changed from `custom` (`-Fc`) to `directory` (`-Fd`) for improved compatibility and performance (e.g. `--jobs` requires the directory format). Closes #5832 Signed-off-by: Ben Healey Signed-off-by: Gabriele Bartolini Signed-off-by: Jonathan Gonzalez V. Co-authored-by: Ben Healey Co-authored-by: Jonathan Gonzalez V. --- .wordlist-en-custom.txt | 2 + api/v1/cluster_types.go | 14 +++++++ api/v1/zz_generated.deepcopy.go | 10 +++++ .../bases/postgresql.cnpg.io_clusters.yaml | 18 +++++++++ docs/src/cloudnative-pg.v1.md | 20 ++++++++++ docs/src/database_import.md | 39 +++++++++++++++++-- .../cluster-import-snapshot-basicauth.yaml | 4 ++ .../postgres/logicalimport/database.go | 14 ++++++- .../postgres/logicalimport/microservice.go | 30 ++++++++++---- .../postgres/logicalimport/monolith.go | 14 ++++++- tests/utils/import_db.go | 2 + 11 files changed, 152 insertions(+), 15 deletions(-) diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index c72a9994a2..fc6ba07aae 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -1008,6 +1008,8 @@ pgAdmin pgBouncer pgBouncerIntegration pgBouncerSecrets +pgDumpExtraOptions +pgRestoreExtraOptions pgSQL pgadmin pgaudit diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go index c701345216..c841a04954 100644 --- a/api/v1/cluster_types.go +++ b/api/v1/cluster_types.go @@ -1566,6 +1566,20 @@ type Import struct { // `pg_restore` are invoked, avoiding data import. Default: `false`. // +optional SchemaOnly bool `json:"schemaOnly,omitempty"` + + // List of custom options to pass to the `pg_dump` command. IMPORTANT: + // Use these options with caution and at your own risk, as the operator + // does not validate their content. Be aware that certain options may + // conflict with the operator's intended functionality or design. + // +optional + PgDumpExtraOptions []string `json:"pgDumpExtraOptions,omitempty"` + + // List of custom options to pass to the `pg_restore` command. IMPORTANT: + // Use these options with caution and at your own risk, as the operator + // does not validate their content. Be aware that certain options may + // conflict with the operator's intended functionality or design. + // +optional + PgRestoreExtraOptions []string `json:"pgRestoreExtraOptions,omitempty"` } // ImportSource describes the source for the logical snapshot diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index 0c367fc416..a0b80fcd9c 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -1387,6 +1387,16 @@ func (in *Import) DeepCopyInto(out *Import) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.PgDumpExtraOptions != nil { + in, out := &in.PgDumpExtraOptions, &out.PgDumpExtraOptions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PgRestoreExtraOptions != nil { + in, out := &in.PgRestoreExtraOptions, &out.PgRestoreExtraOptions + *out = make([]string, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Import. diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml index 53bd6f571e..eca57c182e 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml @@ -1518,6 +1518,24 @@ spec: items: type: string type: array + pgDumpExtraOptions: + description: |- + List of custom options to pass to the `pg_dump` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + pgRestoreExtraOptions: + description: |- + List of custom options to pass to the `pg_restore` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array postImportApplicationSQL: description: |- List of SQL queries to be executed as a superuser in the application diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md index c323ced382..2ef5d831f4 100644 --- a/docs/src/cloudnative-pg.v1.md +++ b/docs/src/cloudnative-pg.v1.md @@ -2747,6 +2747,26 @@ database right after is imported - to be used with extreme care pg_restore are invoked, avoiding data import. Default: false.

+pgDumpExtraOptions
+[]string + + +

List of custom options to pass to the pg_dump command. IMPORTANT: +Use these options with caution and at your own risk, as the operator +does not validate their content. Be aware that certain options may +conflict with the operator's intended functionality or design.

+ + +pgRestoreExtraOptions
+[]string + + +

List of custom options to pass to the pg_restore command. IMPORTANT: +Use these options with caution and at your own risk, as the operator +does not validate their content. Be aware that certain options may +conflict with the operator's intended functionality or design.

+ + diff --git a/docs/src/database_import.md b/docs/src/database_import.md index 3308b5f6f1..2fc3b4500e 100644 --- a/docs/src/database_import.md +++ b/docs/src/database_import.md @@ -73,7 +73,7 @@ performed in 4 steps: - `initdb` bootstrap of the new cluster - export of the selected database (in `initdb.import.databases`) using - `pg_dump -Fc` + `pg_dump -Fd` - import of the database using `pg_restore --no-acl --no-owner` into the `initdb.database` (application database) owned by the `initdb.owner` user - cleanup of the database dump file @@ -145,7 +145,7 @@ There are a few things you need to be aware of when using the `microservice` typ `externalCluster` during the operation - Connection to the source database must be granted with the specified user that needs to run `pg_dump` and read roles information (*superuser* is OK) -- Currently, the `pg_dump -Fc` result is stored temporarily inside the `dumps` +- Currently, the `pg_dump -Fd` result is stored temporarily inside the `dumps` folder in the `PGDATA` volume, so there should be enough available space to temporarily contain the dump result on the assigned node, as well as the restored data and indexes. Once the import operation is completed, this @@ -162,7 +162,7 @@ The operation is performed in the following steps: - `initdb` bootstrap of the new cluster - export and import of the selected roles - export of the selected databases (in `initdb.import.databases`), one at a time, - using `pg_dump -Fc` + using `pg_dump -Fd` - create each of the selected databases and import data using `pg_restore` - run `ANALYZE` on each imported database - cleanup of the database dump files @@ -222,7 +222,7 @@ There are a few things you need to be aware of when using the `monolith` type: - Connection to the source database must be granted with the specified user that needs to run `pg_dump` and retrieve roles information (*superuser* is OK) -- Currently, the `pg_dump -Fc` result is stored temporarily inside the `dumps` +- Currently, the `pg_dump -Fd` result is stored temporarily inside the `dumps` folder in the `PGDATA` volume, so there should be enough available space to temporarily contain the dump result on the assigned node, as well as the restored data and indexes. Once the import operation is completed, this @@ -268,6 +268,37 @@ unnecessary writes in the checkpoint area by tuning Postgres GUCs like `shared_buffers`, `max_wal_size`, `checkpoint_timeout` directly in the `Cluster` configuration. +## Customizing `pg_dump` and `pg_restore` Behavior + +You can customize the behavior of `pg_dump` and `pg_restore` by specifying +additional options using the `pgDumpExtraOptions` and `pgRestoreExtraOptions` +parameters. For instance, you can enable parallel jobs to speed up data +import/export processes, as shown in the following example: + +```yaml + # + bootstrap: + initdb: + import: + type: microservice + databases: + - app + source: + externalCluster: cluster-example + pgDumpExtraOptions: + - '--jobs=2' + pgRestoreExtraOptions: + - '--jobs=2' + # +``` + +!!! Warning + Use the `pgDumpExtraOptions` and `pgRestoreExtraOptions` fields with + caution and at your own risk. These options are not validated or verified by + the operator, and some configurations may conflict with its intended + functionality or behavior. Always test thoroughly in a safe and controlled + environment before applying them in production. + ## Online Import and Upgrades Logical replication offers a powerful way to import any PostgreSQL database diff --git a/docs/src/samples/cluster-import-snapshot-basicauth.yaml b/docs/src/samples/cluster-import-snapshot-basicauth.yaml index 967f23adba..5f6cf6e76b 100644 --- a/docs/src/samples/cluster-import-snapshot-basicauth.yaml +++ b/docs/src/samples/cluster-import-snapshot-basicauth.yaml @@ -13,6 +13,10 @@ spec: - app source: externalCluster: cluster-example + pgDumpExtraOptions: + - '--jobs=2' + pgRestoreExtraOptions: + - '--jobs=2' storage: size: 1Gi externalClusters: diff --git a/pkg/management/postgres/logicalimport/database.go b/pkg/management/postgres/logicalimport/database.go index 82e87ed089..e09adf3e4a 100644 --- a/pkg/management/postgres/logicalimport/database.go +++ b/pkg/management/postgres/logicalimport/database.go @@ -88,6 +88,7 @@ func (ds *databaseSnapshotter) exportDatabases( ctx context.Context, target pool.Pooler, databases []string, + extraOptions []string, ) error { contextLogger := log.FromContext(ctx) sectionsToExport := []string{} @@ -100,12 +101,13 @@ func (ds *databaseSnapshotter) exportDatabases( contextLogger.Info("exporting database", "databaseName", database) dsn := target.GetDsn(database) options := []string{ - "-Fc", + "-Fd", "-f", generateFileNameForDatabase(database), "-d", dsn, "-v", } options = append(options, sectionsToExport...) + options = append(options, extraOptions...) contextLogger.Info("Running pg_dump", "cmd", pgDump, "options", options) @@ -123,6 +125,7 @@ func (ds *databaseSnapshotter) importDatabases( ctx context.Context, target pool.Pooler, databases []string, + extraOptions []string, ) error { contextLogger := log.FromContext(ctx) @@ -156,6 +159,7 @@ func (ds *databaseSnapshotter) importDatabases( generateFileNameForDatabase(database), } + options = append(options, extraOptions...) options = append(options, alwaysPresentOptions...) contextLogger.Info("Running pg_restore", @@ -179,6 +183,7 @@ func (ds *databaseSnapshotter) importDatabaseContent( database string, targetDatabase string, owner string, + extraOptions []string, ) error { contextLogger := log.FromContext(ctx) @@ -204,7 +209,9 @@ func (ds *databaseSnapshotter) importDatabaseContent( "section", section, ) - options := []string{ + var options []string + + alwaysPresentOptions := []string{ "-U", "postgres", "--no-owner", "--no-privileges", @@ -214,6 +221,9 @@ func (ds *databaseSnapshotter) importDatabaseContent( generateFileNameForDatabase(database), } + options = append(options, extraOptions...) + options = append(options, alwaysPresentOptions...) + contextLogger.Info("Running pg_restore", "cmd", pgRestore, "options", options) diff --git a/pkg/management/postgres/logicalimport/microservice.go b/pkg/management/postgres/logicalimport/microservice.go index e84a28d843..a34593e563 100644 --- a/pkg/management/postgres/logicalimport/microservice.go +++ b/pkg/management/postgres/logicalimport/microservice.go @@ -34,18 +34,29 @@ func Microservice( ) error { contextLogger := log.FromContext(ctx) ds := databaseSnapshotter{cluster: cluster} - databases := cluster.Spec.Bootstrap.InitDB.Import.Databases + initDB := cluster.Spec.Bootstrap.InitDB + databases := initDB.Import.Databases + contextLogger.Info("starting microservice clone process") if err := createDumpsDirectory(); err != nil { return nil } - if err := ds.exportDatabases(ctx, origin, databases); err != nil { + if err := ds.exportDatabases( + ctx, + origin, + databases, + initDB.Import.PgDumpExtraOptions, + ); err != nil { return err } - if err := ds.dropExtensionsFromDatabase(ctx, destination, cluster.Spec.Bootstrap.InitDB.Database); err != nil { + if err := ds.dropExtensionsFromDatabase( + ctx, + destination, + initDB.Database, + ); err != nil { return err } @@ -53,8 +64,9 @@ func Microservice( ctx, destination, databases[0], - cluster.Spec.Bootstrap.InitDB.Database, - cluster.Spec.Bootstrap.InitDB.Owner, + initDB.Database, + initDB.Owner, + initDB.Import.PgRestoreExtraOptions, ); err != nil { return err } @@ -63,9 +75,13 @@ func Microservice( return err } - if err := ds.executePostImportQueries(ctx, destination, cluster.Spec.Bootstrap.InitDB.Database); err != nil { + if err := ds.executePostImportQueries( + ctx, + destination, + initDB.Database, + ); err != nil { return err } - return ds.analyze(ctx, destination, []string{cluster.Spec.Bootstrap.InitDB.Database}) + return ds.analyze(ctx, destination, []string{initDB.Database}) } diff --git a/pkg/management/postgres/logicalimport/monolith.go b/pkg/management/postgres/logicalimport/monolith.go index f65b0260c7..c63d787e91 100644 --- a/pkg/management/postgres/logicalimport/monolith.go +++ b/pkg/management/postgres/logicalimport/monolith.go @@ -53,11 +53,21 @@ func Monolith( return err } - if err := ds.exportDatabases(ctx, origin, databases); err != nil { + if err := ds.exportDatabases( + ctx, + origin, + databases, + cluster.Spec.Bootstrap.InitDB.Import.PgDumpExtraOptions, + ); err != nil { return err } - if err := ds.importDatabases(ctx, destination, databases); err != nil { + if err := ds.importDatabases( + ctx, + destination, + databases, + cluster.Spec.Bootstrap.InitDB.Import.PgRestoreExtraOptions, + ); err != nil { return err } diff --git a/tests/utils/import_db.go b/tests/utils/import_db.go index c3c7412f8d..ccb5e62175 100644 --- a/tests/utils/import_db.go +++ b/tests/utils/import_db.go @@ -70,6 +70,8 @@ func ImportDatabaseMicroservice( Source: apiv1.ImportSource{ ExternalCluster: sourceClusterName, }, + PgDumpExtraOptions: []string{"--jobs=2"}, + PgRestoreExtraOptions: []string{"--jobs=2"}, PostImportApplicationSQL: []string{"SELECT 1"}, }, }, From 15252a126054d6d289e00c01e50fce48d44cb424 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Mon, 2 Dec 2024 14:44:39 +0100 Subject: [PATCH 180/836] fix(initdb): ensure `primary_slot_name` is empty on a primary (#6219) Although harmless, remove the `primary_slot_name` definition from the `override.conf` file on a primary. Closes #6199 Signed-off-by: Armando Ruocco --- pkg/management/postgres/configuration.go | 9 +++++++-- pkg/management/postgres/initdb.go | 5 ++--- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/pkg/management/postgres/configuration.go b/pkg/management/postgres/configuration.go index 3dedfe9da5..1cc414ea92 100644 --- a/pkg/management/postgres/configuration.go +++ b/pkg/management/postgres/configuration.go @@ -252,7 +252,9 @@ func UpdateReplicaConfiguration(pgData, primaryConnInfo, slotName string) (chang } // configurePostgresOverrideConfFile writes the content of override.conf file, including -// replication information +// replication information. The “primary_slot_name` parameter will be generated only when the parameter slotName is not +// empty. +// Returns a boolean indicating if any changes were done and any errors encountered func configurePostgresOverrideConfFile(pgData, primaryConnInfo, slotName string) (changed bool, err error) { targetFile := path.Join(pgData, constants.PostgresqlOverrideConfigurationFile) options := map[string]string{ @@ -260,10 +262,13 @@ func configurePostgresOverrideConfFile(pgData, primaryConnInfo, slotName string) "/controller/manager wal-restore --log-destination %s/%s.json %%f %%p", postgres.LogPath, postgres.LogFileName), "recovery_target_timeline": "latest", - "primary_slot_name": slotName, "primary_conninfo": primaryConnInfo, } + if len(slotName) > 0 { + options["primary_slot_name"] = slotName + } + // Ensure that override.conf file contains just the above options changed, err = configfile.WritePostgresConfiguration(targetFile, options) if err != nil { diff --git a/pkg/management/postgres/initdb.go b/pkg/management/postgres/initdb.go index cc338d5201..e78d434dca 100644 --- a/pkg/management/postgres/initdb.go +++ b/pkg/management/postgres/initdb.go @@ -430,7 +430,6 @@ func (info InitInfo) Bootstrap(ctx context.Context) error { // Prepare the managed configuration file (override.conf) primaryConnInfo := info.GetPrimaryConnInfo() - slotName := cluster.GetSlotNameFromInstanceName(info.PodName) if isImportBootstrap { // Write a special configuration for the import phase @@ -439,7 +438,7 @@ func (info InitInfo) Bootstrap(ctx context.Context) error { } } else { // Write standard replication configuration - if _, err = configurePostgresOverrideConfFile(info.PgData, primaryConnInfo, slotName); err != nil { + if _, err = configurePostgresOverrideConfFile(info.PgData, primaryConnInfo, ""); err != nil { return fmt.Errorf("while configuring Postgres for replication: %w", err) } } @@ -466,7 +465,7 @@ func (info InitInfo) Bootstrap(ctx context.Context) error { // In case of import bootstrap, we restore the standard configuration file content if isImportBootstrap { /// Write standard replication configuration - if _, err = configurePostgresOverrideConfFile(info.PgData, primaryConnInfo, slotName); err != nil { + if _, err = configurePostgresOverrideConfFile(info.PgData, primaryConnInfo, ""); err != nil { return fmt.Errorf("while configuring Postgres for replication: %w", err) } From 14627d56ffe0531f40bdb9f508356855e9706f0b Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Mon, 2 Dec 2024 14:48:49 +0100 Subject: [PATCH 181/836] chore: add `lint-fix` to the Makefile commands (#6226) Following the used of `make lint` usually is required to type a different command to automatically fix the issues, this solve the problem by having everything in the same make command prefix. Signed-off-by: Jonathan Gonzalez V. --- Makefile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Makefile b/Makefile index bf85b798eb..2b93fba0ea 100644 --- a/Makefile +++ b/Makefile @@ -233,6 +233,9 @@ vet: ## Run go vet against code. lint: ## Run the linter. golangci-lint run +lint-fix: ## Run the linter with --fix. + golangci-lint run --fix + shellcheck: ## Shellcheck for the hack directory. @{ \ set -e ;\ From 2f29cf19b36e44e60de2f866f728764d783486b2 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Mon, 2 Dec 2024 15:20:09 +0100 Subject: [PATCH 182/836] fix: add `PhaseUnrecoverable` when no PVCs exist (#6170) Set the cluster phase to `Unrecoverable` when all previously generated `PersistentVolumeClaims` are no longer present. Closes #5912 Closes #3819 Signed-off-by: Armando Ruocco Signed-off-by: Gabriele Bartolini Co-authored-by: Gabriele Bartolini --- api/v1/cluster_types.go | 2 +- internal/controller/cluster_create.go | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go index c841a04954..b81585b961 100644 --- a/api/v1/cluster_types.go +++ b/api/v1/cluster_types.go @@ -523,7 +523,7 @@ const ( PhaseImageCatalogError = "Cluster has incomplete or invalid image catalog" // PhaseUnrecoverable for an unrecoverable cluster - PhaseUnrecoverable = "Cluster is in an unrecoverable state, needs manual intervention" + PhaseUnrecoverable = "Cluster is unrecoverable and needs manual intervention" // PhaseArchitectureBinaryMissing is the error phase describing a missing architecture PhaseArchitectureBinaryMissing = "Cluster cannot execute instance online upgrade due to missing architecture binary" diff --git a/internal/controller/cluster_create.go b/internal/controller/cluster_create.go index acda6f82fa..2aa3a51d47 100644 --- a/internal/controller/cluster_create.go +++ b/internal/controller/cluster_create.go @@ -1062,6 +1062,14 @@ func (r *ClusterReconciler) createPrimaryInstance( // reconciliation loop is started by the informers. contextLogger.Info("refusing to create the primary instance while the latest generated serial is not zero", "latestGeneratedNode", cluster.Status.LatestGeneratedNode) + + if err := r.RegisterPhase(ctx, cluster, + apiv1.PhaseUnrecoverable, + "One or more instances were previously created, but no PersistentVolumeClaims (PVCs) exist. "+ + "The cluster is in an unrecoverable state. To resolve this, restore the cluster from a recent backup.", + ); err != nil { + return ctrl.Result{}, fmt.Errorf("while registering the unrecoverable phase: %w", err) + } return ctrl.Result{}, nil } From 72a0796382a3518279a4fc65ace30b29f7547e2c Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Mon, 2 Dec 2024 15:25:34 +0100 Subject: [PATCH 183/836] fix(plugin): add `restoreJobHookCapabilities` in the `PluginStatus` (#6225) This patch adds the `restoreJobHookCapabilities` field to the `pluginStatus`, which is currently missing. Signed-off-by: Armando Ruocco --- .wordlist-en-custom.txt | 3 +++ api/v1/cluster_types.go | 5 +++++ api/v1/zz_generated.deepcopy.go | 5 +++++ .../crd/bases/postgresql.cnpg.io_clusters.yaml | 7 +++++++ docs/src/cloudnative-pg.v1.md | 8 ++++++++ internal/cnpi/plugin/connection/connection.go | 17 +++++++++++------ internal/cnpi/plugin/connection/metadata.go | 13 +++++++------ internal/controller/cluster_plugins.go | 1 + 8 files changed, 47 insertions(+), 12 deletions(-) diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index fc6ba07aae..4d811ab647 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -370,6 +370,8 @@ ReplicationTLSSecret ResizingPVC ResourceRequirements ResourceVersion +RestoreJobHook +RestoreJobHookCapabilities RetentionPolicy RoleBinding RoleConfiguration @@ -1121,6 +1123,7 @@ resizingPVC resourceVersion resourcerequirements restoreAdditionalCommandArgs +restoreJobHookCapabilities resync retentionPolicy reusePVC diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go index b81585b961..bb66dac8f2 100644 --- a/api/v1/cluster_types.go +++ b/api/v1/cluster_types.go @@ -2141,6 +2141,11 @@ type PluginStatus struct { // +optional BackupCapabilities []string `json:"backupCapabilities,omitempty"` + // RestoreJobHookCapabilities are the list of capabilities of the + // plugin regarding the RestoreJobHook management + // +optional + RestoreJobHookCapabilities []string `json:"restoreJobHookCapabilities,omitempty"` + // Status contain the status reported by the plugin through the SetStatusInCluster interface // +optional Status string `json:"status,omitempty"` diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index a0b80fcd9c..014362a084 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -1920,6 +1920,11 @@ func (in *PluginStatus) DeepCopyInto(out *PluginStatus) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.RestoreJobHookCapabilities != nil { + in, out := &in.RestoreJobHookCapabilities, &out.RestoreJobHookCapabilities + *out = make([]string, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginStatus. diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml index eca57c182e..51914f02d2 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml @@ -6024,6 +6024,13 @@ spec: items: type: string type: array + restoreJobHookCapabilities: + description: |- + RestoreJobHookCapabilities are the list of capabilities of the + plugin regarding the RestoreJobHook management + items: + type: string + type: array status: description: Status contain the status reported by the plugin through the SetStatusInCluster interface diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md index 2ef5d831f4..662b01af57 100644 --- a/docs/src/cloudnative-pg.v1.md +++ b/docs/src/cloudnative-pg.v1.md @@ -3637,6 +3637,14 @@ plugin regarding the WAL management

plugin regarding the Backup management

+restoreJobHookCapabilities
+[]string + + +

RestoreJobHookCapabilities are the list of capabilities of the +plugin regarding the RestoreJobHook management

+ + status
string diff --git a/internal/cnpi/plugin/connection/connection.go b/internal/cnpi/plugin/connection/connection.go index 0e9826d530..34e9574d05 100644 --- a/internal/cnpi/plugin/connection/connection.go +++ b/internal/cnpi/plugin/connection/connection.go @@ -264,12 +264,13 @@ func (pluginData *data) loadRestoreJobHooksCapabilities(ctx context.Context) err // the internal metadata func (pluginData *data) Metadata() Metadata { result := Metadata{ - Name: pluginData.name, - Version: pluginData.version, - Capabilities: make([]string, len(pluginData.capabilities)), - OperatorCapabilities: make([]string, len(pluginData.operatorCapabilities)), - WALCapabilities: make([]string, len(pluginData.walCapabilities)), - BackupCapabilities: make([]string, len(pluginData.backupCapabilities)), + Name: pluginData.name, + Version: pluginData.version, + Capabilities: make([]string, len(pluginData.capabilities)), + OperatorCapabilities: make([]string, len(pluginData.operatorCapabilities)), + WALCapabilities: make([]string, len(pluginData.walCapabilities)), + BackupCapabilities: make([]string, len(pluginData.backupCapabilities)), + RestoreJobHookCapabilities: make([]string, len(pluginData.restoreJobHooksCapabilities)), } for i := range pluginData.capabilities { @@ -288,6 +289,10 @@ func (pluginData *data) Metadata() Metadata { result.BackupCapabilities[i] = pluginData.backupCapabilities[i].String() } + for i := range pluginData.restoreJobHooksCapabilities { + result.RestoreJobHookCapabilities[i] = pluginData.restoreJobHooksCapabilities[i].String() + } + return result } diff --git a/internal/cnpi/plugin/connection/metadata.go b/internal/cnpi/plugin/connection/metadata.go index 21f28652c3..a17e4d9ae0 100644 --- a/internal/cnpi/plugin/connection/metadata.go +++ b/internal/cnpi/plugin/connection/metadata.go @@ -19,10 +19,11 @@ package connection // Metadata expose the metadata as discovered // from a plugin type Metadata struct { - Name string - Version string - Capabilities []string - OperatorCapabilities []string - WALCapabilities []string - BackupCapabilities []string + Name string + Version string + Capabilities []string + OperatorCapabilities []string + WALCapabilities []string + BackupCapabilities []string + RestoreJobHookCapabilities []string } diff --git a/internal/controller/cluster_plugins.go b/internal/controller/cluster_plugins.go index 2b6b31f4a0..845af5e6f7 100644 --- a/internal/controller/cluster_plugins.go +++ b/internal/controller/cluster_plugins.go @@ -43,6 +43,7 @@ func (r *ClusterReconciler) updatePluginsStatus(ctx context.Context, cluster *ap cluster.Status.PluginStatus[i].OperatorCapabilities = entry.OperatorCapabilities cluster.Status.PluginStatus[i].WALCapabilities = entry.WALCapabilities cluster.Status.PluginStatus[i].BackupCapabilities = entry.BackupCapabilities + cluster.Status.PluginStatus[i].RestoreJobHookCapabilities = entry.RestoreJobHookCapabilities } // If nothing changes, there's no need to hit the API server From 1b924a370a65f5a9fe3b710f0f2d6f276c991dc6 Mon Sep 17 00:00:00 2001 From: Jaime Silvela Date: Mon, 2 Dec 2024 21:36:48 +0100 Subject: [PATCH 184/836] doc: clarify usage of endpointCA field with object store (#5537) Closes #5308 Signed-off-by: Jaime Silvela --- docs/src/appendixes/object_stores.md | 37 ++++++++++++++++++++++------ docs/src/certificates.md | 4 +-- 2 files changed, 31 insertions(+), 10 deletions(-) diff --git a/docs/src/appendixes/object_stores.md b/docs/src/appendixes/object_stores.md index 991b8fa14e..2fc32452ca 100644 --- a/docs/src/appendixes/object_stores.md +++ b/docs/src/appendixes/object_stores.md @@ -129,6 +129,7 @@ spec: In case you're using **Digital Ocean Spaces**, you will have to use the Path-style syntax. In this example, it will use the `bucket` from **Digital Ocean Spaces** in the region `SFO3`. + ```yaml apiVersion: postgresql.cnpg.io/v1 kind: Cluster @@ -142,10 +143,31 @@ spec: [...] ``` -!!! Important - Suppose you configure an Object Storage provider which uses a certificate signed with a private CA, - like when using MinIO via HTTPS. In that case, you need to set the option `endpointCA` - referring to a secret containing the CA bundle so that Barman can verify the certificate correctly. +### Using Object Storage with a private CA + +Suppose you configure an Object Storage provider which uses a certificate +signed with a private CA, for example when using MinIO via HTTPS. In that case, +you need to set the option `endpointCA` inside `barmanObjectStore` referring +to a secret containing the CA bundle, so that Barman can verify the certificate +correctly. +You can find instructions on creating a secret using your cert files in the +[certificates](../certificates.md#example) document. +Once you have created the secret, you can populate the `endpointCA` as in the +following example: + +``` yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +[...] +spec: + [...] + backup: + barmanObjectStore: + endpointURL: + endpointCA: + name: my-ca-secret + key: ca.crt +``` !!! Note If you want ConfigMaps and Secrets to be **automatically** reloaded by instances, you can @@ -186,7 +208,7 @@ On the other side, using both **Storage account access key** or **Storage accoun the credentials need to be stored inside a Kubernetes Secret, adding data entries only when needed. The following command performs that: -``` +``` sh kubectl create secret generic azure-creds \ --from-literal=AZURE_STORAGE_ACCOUNT= \ --from-literal=AZURE_STORAGE_KEY= \ @@ -226,7 +248,7 @@ spec: When using the Azure Blob Storage, the `destinationPath` fulfills the following structure: -``` +``` sh ://..core.windows.net/ ``` @@ -238,7 +260,7 @@ which is also called **storage account name**, is included in the used host name If you are using a different implementation of the Azure Blob Storage APIs, the `destinationPath` will have the following structure: -``` +``` sh ://:// ``` @@ -266,7 +288,6 @@ without having to set any credentials. In particular, you need to: Please use the following example as a reference: - ```yaml apiVersion: postgresql.cnpg.io/v1 kind: Cluster diff --git a/docs/src/certificates.md b/docs/src/certificates.md index b5e2d1f49f..c9cc2eb95c 100644 --- a/docs/src/certificates.md +++ b/docs/src/certificates.md @@ -129,14 +129,14 @@ Given the following files: Create a secret containing the CA certificate: -``` +``` sh kubectl create secret generic my-postgresql-server-ca \ --from-file=ca.crt=./server-ca.crt ``` Create a secret with the TLS certificate: -``` +``` sh kubectl create secret tls my-postgresql-server \ --cert=./server.crt --key=./server.key ``` From 255262a795254e4e1aaca6f0a2a1b28293e8913f Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Tue, 3 Dec 2024 09:29:32 +0100 Subject: [PATCH 185/836] chore: remove unknown fields and improve CSV specs (#6107) There was some fields that were not required and/or needed and there was other that could have an improvement in the look when displaying on the OpenShift web interface. Closes #5966 Signed-off-by: Jonathan Gonzalez V. --- .spellcheck.yaml | 3 + .wordlist-en-custom.txt | 30 ++++ api/v1/cluster_types.go | 1 - .../bases/postgresql.cnpg.io_clusters.yaml | 13 +- .../cloudnative-pg.clusterserviceversion.yaml | 140 +++++++++++++++--- 5 files changed, 160 insertions(+), 27 deletions(-) diff --git a/.spellcheck.yaml b/.spellcheck.yaml index 540285ffcd..d60d3bf957 100644 --- a/.spellcheck.yaml +++ b/.spellcheck.yaml @@ -3,6 +3,7 @@ matrix: sources: - 'docs/src/*.md' - 'docs/src/*/*.md' + - 'config/olm-manifests/bases/*.yaml' default_encoding: utf-8 aspell: lang: en @@ -25,4 +26,6 @@ matrix: close: '(?P=open)' - open: '(?P)' + - open: '.*base64data.*' + close: "$" - pyspelling.filters.url: diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index 4d811ab647..912dc3d759 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -1,3 +1,4 @@ + AES API's APIs @@ -9,6 +10,7 @@ AdditionalCommandArgs AdditionalPodAffinity AdditionalPodAntiAffinity AffinityConfiguration +AllNamespaces AntiAffinity AppArmor AppArmorProfile @@ -244,6 +246,7 @@ Milsted MinIO Minikube MonitoringConfiguration +MultiNamespace NFS NGINX NOBYPASSRLS @@ -276,6 +279,7 @@ Openshift OperatorCapabilities OperatorGroup OperatorHub +OwnNamespace PDB PDBs PGAudit @@ -403,6 +407,7 @@ ScheduledBackupSpec ScheduledBackupStatus ScheduledBackups Scorsolini +Seccomp SeccompProfile SecretKeySelector SecretRefs @@ -424,6 +429,7 @@ ServiceUpdateStrategy SetStatusInCluster ShutdownCheckpointToken Silvela +SingleNamespace Slonik SnapshotOwnerReference SnapshotType @@ -438,6 +444,7 @@ SubscriptionReclaimPolicy SubscriptionSpec SubscriptionStatus SuccessfullyExtracted +SuperUserSecret SwitchReplicaClusterStatus SyncReplicaElectionConstraints SynchronizeReplicas @@ -513,6 +520,7 @@ allocator allowConnections allowPrivilegeEscalation allowVolumeExpansion +alm amd angus anonymization @@ -524,6 +532,7 @@ apidoc apimachinery apis apiserver +apiservicedefinitions apparmor appdb applicationCredentials @@ -586,6 +595,7 @@ bindPassword bindSearchAuth bitmask bool +booleanSwitch bootstrapconfiguration bootstrapinitdb bootstraprecovery @@ -653,6 +663,7 @@ connectionLimit connectionParameters connectionString conninfo +containerImage containerPort controldata coredump @@ -665,6 +676,7 @@ cpu crc crds crdview +createdAt createdb createrole createuser @@ -684,6 +696,7 @@ currentPrimaryTimestamp customQueriesConfigMap customQueriesSecret customizable +customresourcedefinitions cutover cyber dT @@ -716,6 +729,8 @@ dir disableDefaultQueries disablePassword disabledDefaultServices +displayName +displayName distro distroless distros @@ -773,6 +788,7 @@ filesystem finalizer findstr fio +fips firstRecoverabilityPoint firstRecoverabilityPointByMethod freddie @@ -781,6 +797,7 @@ gapped gc gcc gce +gcp gcs gcsCredentials geocoder @@ -837,6 +854,7 @@ initdb initialise initializingPVC inplace +installModes installplans instanceID instanceName @@ -925,6 +943,7 @@ maxSyncReplicas maxwait mcache md +mediatype mem memstats metav @@ -934,6 +953,7 @@ microservice microservices microsoft minApplyDelay +minKubeVersion minSyncReplicas minikube minio @@ -1034,10 +1054,12 @@ png podAffinityTerm podAntiAffinity podAntiAffinityType +podCount podMetricsEndpoints podMonitorMetricRelabelings podMonitorRelabelings podName +podStatuses podmonitor podtemplates poolMode @@ -1120,6 +1142,7 @@ req requiredDuringSchedulingIgnoredDuringExecution resizeInUseVolumes resizingPVC +resourceRequirements resourceVersion resourcerequirements restoreAdditionalCommandArgs @@ -1184,6 +1207,7 @@ shutdownCheckpointToken sig sigs singlenamespace +skipRange slotPrefix smartShutdownTimeout snapshotBackupStatus @@ -1191,6 +1215,7 @@ snapshotOwnerReference snapshotted snapshotting sourceNamespace +specDescriptors specificities sql src @@ -1210,6 +1235,7 @@ standbyNamesPre startDelay startedAt stateful +statusDescriptors stderr stdout stedolan @@ -1236,6 +1262,7 @@ superuserSecret superuserSecretVersion sv svc +svg switchReplicaClusterStatus switchoverDelay switchovers @@ -1288,6 +1315,7 @@ transactional transactionid tx ubi +ui uid ul un @@ -1305,6 +1333,7 @@ updateStrategy upgradable uptime uri +url usename usernamepassword usr @@ -1341,5 +1370,6 @@ wsl www xact xlog +xml yaml yml diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go index bb66dac8f2..9bb1edef5c 100644 --- a/api/v1/cluster_types.go +++ b/api/v1/cluster_types.go @@ -2073,7 +2073,6 @@ type ManagedServices struct { type ManagedService struct { // SelectorType specifies the type of selectors that the service will have. // Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services. - // +kubebuilder:validation:Enum=rw;r;ro SelectorType ServiceSelectorType `json:"selectorType"` // UpdateStrategy describes how the service differences should be reconciled diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml index 51914f02d2..4316b14329 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml @@ -3232,18 +3232,13 @@ spec: It includes the type of service and its associated template specification. properties: selectorType: - allOf: - - enum: - - rw - - r - - ro - - enum: - - rw - - r - - ro description: |- SelectorType specifies the type of selectors that the service will have. Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services. + enum: + - rw + - r + - ro type: string serviceTemplate: description: ServiceTemplate is the template specification diff --git a/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml b/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml index 0bf3485944..a8d0c40f05 100644 --- a/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml +++ b/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml @@ -143,7 +143,7 @@ spec: - 'urn:alm:descriptor:io.kubernetes:Clusters' statusDescriptors: - displayName: Phase - description: Current backupphase + description: Current backup phase path: phase x-descriptors: - 'urn:alm:descriptor:io.kubernetes.phase' @@ -174,6 +174,18 @@ spec: x-descriptors: - 'urn:alm:descriptor:com.tectonic.ui:text' - 'urn:alm:descriptor:com.tectonic.ui:advanced' + # Image section + - path: imagePullSecrets + displayName: Image Pull Secret + description: List of secrets to use for pulling the images + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: imagePullSecrets[0].name + displayName: Image Pull Secret + description: Secret for pulling the image. If empty, no secret will be used + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes:Secret' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' - path: imageName displayName: Image Name description: Name of the PostgreSQL container image @@ -198,6 +210,13 @@ spec: x-descriptors: - 'urn:alm:descriptor:com.tectonic.ui:hidden' - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: imageCatalogRef + displayName: Image Catalog + description: The name of the image catalog to use + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:text' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + # Instances section - path: instances displayName: Instances description: Number of instances required in the cluster @@ -215,6 +234,34 @@ spec: x-descriptors: - 'urn:alm:descriptor:com.tectonic.ui:number' - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: enablePDB + displayName: Enable Pod Disruption Budget + description: Boolean to enable or disable the Pod Disruption Budget + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:booleanSwitch' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: priorityClassName + displayName: Priority Class Name + description: The name of the Priority Class to use in every generated Pod + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes:PriorityClass' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: env + displayName: Environment Variables + description: Environment variables to set in the pods created in the cluster + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: envFrom + displayName: Environment Variables from ConfigMap + description: ConfigMap to use as environment variables in the pods created in the cluster + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: seccompProfile + displayName: Seccomp Profile applied to every pod in the cluster + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:text' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + # PostgreSQL Configuration section - path: postgresql displayName: PostgreSQL Configuration description: Options for postgresql.conf @@ -226,11 +273,8 @@ spec: - path: postgresql.pg_hba[0] displayName: pg_hba rules description: PostgreSQL Host Based Authentication rules - - path: postgresql.epas.audit - displayName: EPAS Configuration - description: Boolean to enable edb_audit logging - path: postgresql.promotionTimeout - displayName: pgctl Timeout + displayName: pg_ctl Timeout description: maximum number of seconds to wait when promoting an instance to primary - path: postgresql.shared_preload_libraries[0] displayName: Preload Libraries @@ -259,6 +303,22 @@ spec: description: Boolean to enable TLS x-descriptors: - 'urn:alm:descriptor:com.tectonic.ui:booleanSwitch' + - path: tablespaces + displayName: Tablespaces + description: Configuration of the tablespaces + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: managed + displayName: Managed service + description: Resources managed by the operator + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: replicationSlots + displayName: Replication Slots Configuration + description: Configuration of the replication slots + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + # Bootstrap section - path: bootstrap displayName: Bootstrap Configuration description: Instructions to bootstrap the cluster @@ -269,12 +329,14 @@ spec: description: The name of the Bootstrap secret x-descriptors: - 'urn:alm:descriptor:io.kubernetes:Secret' + # Replica cluster section - path: replica displayName: Replica description: Replica cluster configuration x-descriptors: - 'urn:alm:descriptor:io.kubernetes:Secret' - 'urn:alm:descriptor:com.tectonic.ui:advanced' + # Superuser section - path: superuserSecret displayName: Superuser Secret description: | @@ -284,7 +346,7 @@ spec: - 'urn:alm:descriptor:com.tectonic.ui:advanced' - path: superuserSecret.name displayName: Superuser Secret - description: Name of hte Superuser Secret + description: Name of the Superuser Secret x-descriptors: - 'urn:alm:descriptor:io.kubernetes:Secret' - path: enableSuperuserAccess @@ -293,16 +355,13 @@ spec: x-descriptors: - 'urn:alm:descriptor:com.tectonic.ui:booleanSwitch' - 'urn:alm:descriptor:com.tectonic.ui:advanced' + # Certificates section - path: certificates displayName: Certificates description: The configuration for the CA and related certificates x-descriptors: - 'urn:alm:descriptor:com.tectonic.ui:advanced' - - path: imagePullSecrets[0].name - displayName: Image Pull Secret - description: Secret for pulling the image. If empty, no secret will be used - x-descriptors: - - 'urn:alm:descriptor:io.kubernetes:Secret' + # Storage section - path: storage displayName: Storage description: Configuration of the storage of the instances @@ -336,6 +395,7 @@ spec: path: storage.pvcTemplate x-descriptors: - 'urn:alm:descriptor:com.tectonic.ui:advanced' + # Delay and timeout section - path: startDelay displayName: Maximum Start Delay description: The time in seconds that is allowed for a PostgreSQL instance @@ -350,6 +410,33 @@ spec: x-descriptors: - 'urn:alm:descriptor:com.tectonic.ui:number' - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: smartShutdownTimeout + displayName: Smart Shutdown Timeout + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:number' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: switchoverDelay + displayName: Switchover Delay + description: The time in seconds that is allowed for a PostgreSQL instance + to gracefully shutdown during a switchover + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:number' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: failoverDelay + displayName: Failover Delay + description: The amount of time (in seconds) to wait before triggering a failover + after the primary PostgreSQL instance in the cluster was detected to be unhealthy + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:number' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: livenessProbeTimeout + displayName: Liveness Probe Timeout + description: The time in seconds that is allowed for the liveness probe to + complete + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:number' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + # Affinity section - path: affinity displayName: Pod Affinity description: Affinity/Anti-affinity rules for Pods @@ -365,11 +452,13 @@ spec: description: Key value pair of which nodes the pods can run x-descriptors: - 'urn:alm:descriptor:com.tectonic.ui:selector:core:v1:Node' + # Resources section - path: resources - display: Resources - description: + displayName: PostgreSQL Resources requirement + description: Resources requirement for the PostgreSQL instances x-descriptors: - 'urn:alm:descriptor:com.tectonic.ui:resourceRequirements' + # Update strategy section - path: primaryUpdateStrategy displayName: Primary Update Strategy x-descriptors: @@ -378,6 +467,10 @@ spec: displayName: Primary Update Method x-descriptors: - 'urn:alm:descriptor:com.tectonic.ui:advanced' + # Backup section + - path: backup + displayName: Backup Configuration + description: Configuration to be used for backups - path: backup.barmanObjectStore.endpointURL displayName: Object Storage Endpoint description: S3-compatible object storage Endpoint. If empty the S3 default is used @@ -410,7 +503,7 @@ spec: - 'urn:alm:descriptor:io.kubernetes:text' - path: backup.barmanObjectStore.wal.encryption displayName: WAL encryption - description: WAL encryprion algorithm + description: WAL encryption algorithm x-descriptors: - 'urn:alm:descriptor:io.kubernetes:text' - path: backup.barmanObjectStore.data.compression @@ -420,7 +513,7 @@ spec: - 'urn:alm:descriptor:io.kubernetes:text' - path: backup.barmanObjectStore.data.encryption displayName: Data encryption - description: Data encryprion algorithm + description: Data encryption algorithm x-descriptors: - 'urn:alm:descriptor:io.kubernetes:text' - path: backup.barmanObjectStore.data.immediateCheckpoint @@ -431,6 +524,10 @@ spec: displayName: Jobs x-descriptors: - 'urn:alm:descriptor:com.tectonic.ui:number' + # Maintenance Window section + - path: nodeMaintenanceWindow + displayName: Node Maintenance Window + description: The configuration of the maintenance window for Kubernetes nodes - path: nodeMaintenanceWindow.inProgress displayName: In Progress description: Maintenance window for Kubernetes node upgrades is in progress @@ -442,6 +539,7 @@ spec: description: Should the existing PVCs be reused during Kubernetes upgrades? x-descriptors: - 'urn:alm:descriptor:com.tectonic.ui:booleanSwitch' + # Monitoring section - path: monitoring displayName: Monitoring description: The configuration of the monitoring infrastructure of this cluster @@ -467,17 +565,25 @@ spec: displayName: Enable PodMonitor resource x-descriptors: - 'urn:alm:descriptor:com.tectonic.ui:booleanSwitch' + # External Clusters section - path: externalClusters displayName: External Clusters description: List of external clusters which are used in the configuration x-descriptors: - 'urn:alm:descriptor:io.kubernetes:text' - 'urn:alm:descriptor:com.tectonic.ui:advanced' + # Log Level section - path: logLevel displayName: Log Level description: One of error, info (default), debug or trace x-descriptors: - 'urn:alm:descriptor:com.tectonic.ui:advanced' + # Plugins section + - path: plugins + displayName: Plugins + description: List of plugins to be installed + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' statusDescriptors: - displayName: Working Pods description: Status Pods @@ -551,7 +657,7 @@ spec: x-descriptors: - 'urn:alm:descriptor:com.tectonic.ui:text' - path: type - description: Service type of the cluster to connect to ('rw' or 'rw') + description: Service type of the cluster to connect to ('rw' or 'ro') x-descriptors: - 'urn:alm:descriptor:io.kubernetes:text' - path: instances @@ -570,7 +676,7 @@ spec: - 'urn:alm:descriptor:com.tectonic.ui:advanced' - path: pgbouncer.poolMode displayName: PgBouncer PoolMode - description: The poolmode to use. One of 'session' or 'transaction'. + description: The pool mode to use. One of 'session' or 'transaction'. x-descriptors: - 'urn:alm:descriptor:io.kubernetes:text' - path: pgbouncer.authQuerySecret From 248276cca2d113417c6db6a53d71af29ffe523ad Mon Sep 17 00:00:00 2001 From: Pierrick <139142330+pchovelon@users.noreply.github.com> Date: Tue, 3 Dec 2024 11:04:59 +0100 Subject: [PATCH 186/836] fix(docs): add default rule for PgBouncer in `pg_hba` (#6175) Closes #6174 --- docs/src/postgresql_conf.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/src/postgresql_conf.md b/docs/src/postgresql_conf.md index b3e76b6c74..fdd3fc4fe3 100644 --- a/docs/src/postgresql_conf.md +++ b/docs/src/postgresql_conf.md @@ -331,6 +331,7 @@ local all all peer hostssl postgres streaming_replica all cert hostssl replication streaming_replica all cert +hostssl all cnpg_pooler_pgbouncer all cert ``` Default rules: From af56bb29ef230a59bd3ef90d8be34e5af161a466 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Tue, 3 Dec 2024 13:41:59 +0100 Subject: [PATCH 187/836] chore: make cnpg plugin commands compatible with OLM (#6213) Improve the plugins to run the commands fio, publication, subscription and psql smoothly on OLM environments. Closes #5824 Signed-off-by: Jonathan Gonzalez V. Signed-off-by: Marco Nenciarini Co-authored-by: Marco Nenciarini --- cmd/kubectl-cnpg/main.go | 4 +- internal/cmd/plugin/fio/cmd.go | 2 +- internal/cmd/plugin/fio/fio.go | 63 +++++++++++++++++---------- internal/cmd/plugin/logical/psql.go | 2 + internal/cmd/plugin/plugin.go | 2 +- internal/cmd/plugin/psql/psql.go | 8 +++- internal/cmd/plugin/psql/psql_test.go | 4 ++ 7 files changed, 57 insertions(+), 28 deletions(-) diff --git a/cmd/kubectl-cnpg/main.go b/cmd/kubectl-cnpg/main.go index 128f6ea232..4a8157d057 100644 --- a/cmd/kubectl-cnpg/main.go +++ b/cmd/kubectl-cnpg/main.go @@ -66,8 +66,6 @@ func main() { PersistentPreRunE: func(cmd *cobra.Command, _ []string) error { logFlags.ConfigureLogging() - plugin.ConfigureColor(cmd) - // If we're invoking the completion command we shouldn't try to create // a Kubernetes client and we just let the Cobra flow to continue if cmd.Name() == "completion" || cmd.Name() == "version" || @@ -75,6 +73,8 @@ func main() { return nil } + plugin.ConfigureColor(cmd) + return plugin.SetupKubernetesClient(configFlags) }, } diff --git a/internal/cmd/plugin/fio/cmd.go b/internal/cmd/plugin/fio/cmd.go index 50d574fcba..fdb7b721d1 100644 --- a/internal/cmd/plugin/fio/cmd.go +++ b/internal/cmd/plugin/fio/cmd.go @@ -64,7 +64,7 @@ func NewCmd() *cobra.Command { fmt.Printf("To remove this test you need to delete the Deployment, ConfigMap "+ "and PVC with the name %v\n\nThe most simple way to do this is to re-run the command that was run"+ "to generate the deployment with the --dry-run flag and pipe that output to kubectl delete, e.g.:\n\n"+ - "kubectl cnpg fio --dry-run | kubectl delete -f -", deploymentName) + "kubectl cnpg fio --dry-run | kubectl delete -f -\n", deploymentName) } }, } diff --git a/internal/cmd/plugin/fio/fio.go b/internal/cmd/plugin/fio/fio.go index c06141c690..8aa6088c67 100644 --- a/internal/cmd/plugin/fio/fio.go +++ b/internal/cmd/plugin/fio/fio.go @@ -29,6 +29,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) type fioCommand struct { @@ -156,10 +157,44 @@ func (cmd *fioCommand) generateConfigMapObject() *corev1.ConfigMap { return result } +func getSecurityContext() *corev1.SecurityContext { + runAs := int64(10001) + sc := &corev1.SecurityContext{ + AllowPrivilegeEscalation: ptr.To(false), + RunAsNonRoot: ptr.To(true), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{ + "ALL", + }, + }, + ReadOnlyRootFilesystem: ptr.To(true), + } + if utils.HaveSecurityContextConstraints() { + return sc + } + + sc.RunAsUser = &runAs + sc.RunAsGroup = &runAs + sc.SeccompProfile = &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + } + + return sc +} + +func getPodSecurityContext() *corev1.PodSecurityContext { + if utils.HaveSecurityContextConstraints() { + return &corev1.PodSecurityContext{} + } + runAs := int64(10001) + return &corev1.PodSecurityContext{ + FSGroup: &runAs, + } +} + // createFioDeployment creates spec of deployment. func (cmd *fioCommand) generateFioDeployment(deploymentName string) *appsv1.Deployment { - runAs := int64(10001) - fioDeployment := &appsv1.Deployment{ + return &appsv1.Deployment{ TypeMeta: metav1.TypeMeta{ APIVersion: "apps/v1", Kind: "Deployment", @@ -229,22 +264,7 @@ func (cmd *fioCommand) generateFioDeployment(deploymentName string) *appsv1.Depl InitialDelaySeconds: 60, PeriodSeconds: 10, }, - SecurityContext: &corev1.SecurityContext{ - AllowPrivilegeEscalation: ptr.To(false), - SeccompProfile: &corev1.SeccompProfile{ - Type: corev1.SeccompProfileTypeRuntimeDefault, - }, - RunAsGroup: &runAs, - RunAsNonRoot: ptr.To(true), - RunAsUser: &runAs, - - Capabilities: &corev1.Capabilities{ - Drop: []corev1.Capability{ - "ALL", - }, - }, - ReadOnlyRootFilesystem: ptr.To(true), - }, + SecurityContext: getSecurityContext(), Resources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ "memory": resource.MustParse("100M"), @@ -303,13 +323,10 @@ func (cmd *fioCommand) generateFioDeployment(deploymentName string) *appsv1.Depl }, }, }, - NodeSelector: map[string]string{}, - SecurityContext: &corev1.PodSecurityContext{ - FSGroup: &runAs, - }, + NodeSelector: map[string]string{}, + SecurityContext: getPodSecurityContext(), }, }, }, } - return fioDeployment } diff --git a/internal/cmd/plugin/logical/psql.go b/internal/cmd/plugin/logical/psql.go index 3c4b1c1670..a13d527235 100644 --- a/internal/cmd/plugin/logical/psql.go +++ b/internal/cmd/plugin/logical/psql.go @@ -64,6 +64,8 @@ func getSQLCommand( ) (*psql.Command, error) { psqlArgs := []string{ connectionString, + "-U", + "postgres", "-c", sqlCommand, } diff --git a/internal/cmd/plugin/plugin.go b/internal/cmd/plugin/plugin.go index afaa602e1a..b9af618b84 100644 --- a/internal/cmd/plugin/plugin.go +++ b/internal/cmd/plugin/plugin.go @@ -98,7 +98,7 @@ func SetupKubernetesClient(configFlags *genericclioptions.ConfigFlags) error { ClientInterface = kubernetes.NewForConfigOrDie(Config) - return nil + return utils.DetectSecurityContextConstraints(ClientInterface.Discovery()) } func createClient(cfg *rest.Config) error { diff --git a/internal/cmd/plugin/psql/psql.go b/internal/cmd/plugin/psql/psql.go index 758a9f97b6..f20d5ce727 100644 --- a/internal/cmd/plugin/psql/psql.go +++ b/internal/cmd/plugin/psql/psql.go @@ -24,6 +24,7 @@ import ( "syscall" corev1 "k8s.io/api/core/v1" + "k8s.io/utils/strings/slices" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" @@ -102,7 +103,7 @@ func NewCommand( // getKubectlInvocation gets the kubectl command to be executed func (psql *Command) getKubectlInvocation() ([]string, error) { - result := make([]string, 0, 11+len(psql.Args)) + result := make([]string, 0, 13+len(psql.Args)) result = append(result, "kubectl", "exec") if psql.AllocateTTY { @@ -121,6 +122,11 @@ func (psql *Command) getKubectlInvocation() ([]string, error) { return nil, err } + // Default to `postgres` if no-user has been specified + if !slices.Contains(psql.Args, "-U") { + psql.Args = append([]string{"-U", "postgres"}, psql.Args...) + } + result = append(result, podName) result = append(result, "--", "psql") result = append(result, psql.Args...) diff --git a/internal/cmd/plugin/psql/psql_test.go b/internal/cmd/plugin/psql/psql_test.go index a01dd7cbb8..682705e76b 100644 --- a/internal/cmd/plugin/psql/psql_test.go +++ b/internal/cmd/plugin/psql/psql_test.go @@ -95,6 +95,8 @@ var _ = Describe("psql launcher", func() { "cluster-example-1", "--", "psql", + "-U", + "postgres", )) }) @@ -120,6 +122,8 @@ var _ = Describe("psql launcher", func() { "cluster-example-1", "--", "psql", + "-U", + "postgres", "-c", "select 1", )) From ddd90b9e3f4cc824e8494d3424947e84c13e5e5a Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Tue, 3 Dec 2024 19:50:10 +0100 Subject: [PATCH 188/836] fix(pooler): set libpq environment variables in PgBouncer pods (#6247) This patch configures the following environment variables in PgBouncer pods: - `PGUSER` - `PGDATABASE` - `PGHOST` - `PSQL_HISTORY` These variables enable seamless access to the PgBouncer administrative interface by allowing `psql` to connect directly from within the Pod without requiring additional command-line options. Fixes: #6242 Signed-off-by: Leonardo Cecchi --- internal/cmd/manager/pgbouncer/cmd.go | 5 ++++ pkg/specs/pgbouncer/deployments.go | 9 +++++++ tests/e2e/pgbouncer_test.go | 35 +++++++++++++++++++++++++++ 3 files changed, 49 insertions(+) diff --git a/internal/cmd/manager/pgbouncer/cmd.go b/internal/cmd/manager/pgbouncer/cmd.go index 138963dcfb..a511619112 100644 --- a/internal/cmd/manager/pgbouncer/cmd.go +++ b/internal/cmd/manager/pgbouncer/cmd.go @@ -19,10 +19,12 @@ package pgbouncer import ( "fmt" + "os" "github.com/spf13/cobra" "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/pgbouncer/run" + "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" ) // NewCmd creates the "instance" command @@ -31,6 +33,9 @@ func NewCmd() *cobra.Command { Use: "pgbouncer", Short: "pgbouncer management subfeatures", SilenceErrors: true, + PersistentPreRunE: func(_ *cobra.Command, _ []string) error { + return os.MkdirAll(postgres.TemporaryDirectory, 0o1777) //nolint:gosec + }, RunE: func(_ *cobra.Command, _ []string) error { return fmt.Errorf("missing subcommand") }, diff --git a/pkg/specs/pgbouncer/deployments.go b/pkg/specs/pgbouncer/deployments.go index 12fe9e0931..2f5d639502 100644 --- a/pkg/specs/pgbouncer/deployments.go +++ b/pkg/specs/pgbouncer/deployments.go @@ -19,6 +19,8 @@ limitations under the License. package pgbouncer import ( + "path" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -108,6 +110,13 @@ func Deployment(pooler *apiv1.Pooler, cluster *apiv1.Cluster) (*appsv1.Deploymen }, true). WithContainerEnv("pgbouncer", corev1.EnvVar{Name: "NAMESPACE", Value: pooler.Namespace}, true). WithContainerEnv("pgbouncer", corev1.EnvVar{Name: "POOLER_NAME", Value: pooler.Name}, true). + WithContainerEnv("pgbouncer", corev1.EnvVar{Name: "PGUSER", Value: "pgbouncer"}, false). + WithContainerEnv("pgbouncer", corev1.EnvVar{Name: "PGDATABASE", Value: "pgbouncer"}, false). + WithContainerEnv("pgbouncer", corev1.EnvVar{Name: "PGHOST", Value: "/controller/run"}, false). + WithContainerEnv("pgbouncer", corev1.EnvVar{ + Name: "PSQL_HISTORY", + Value: path.Join(postgres.TemporaryDirectory, ".psql_history"), + }, false). WithContainerSecurityContext("pgbouncer", specs.CreateContainerSecurityContext(cluster.GetSeccompProfile()), true). WithServiceAccountName(pooler.Name, true). WithReadinessProbe("pgbouncer", &corev1.Probe{ diff --git a/tests/e2e/pgbouncer_test.go b/tests/e2e/pgbouncer_test.go index 3e7f9542f1..d07734c106 100644 --- a/tests/e2e/pgbouncer_test.go +++ b/tests/e2e/pgbouncer_test.go @@ -17,6 +17,10 @@ limitations under the License. package e2e import ( + corev1 "k8s.io/api/core/v1" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" . "github.com/onsi/ginkgo/v2" @@ -75,6 +79,16 @@ var _ = Describe("PGBouncer Connections", Label(tests.LabelServiceConnectivity), assertReadWriteConnectionUsingPgBouncerService(namespace, clusterName, poolerBasicAuthROSampleFile, false) }) + + By("executing psql within the pgbouncer pod", func() { + pod, err := getPgbouncerPod(poolerBasicAuthRWSampleFile) + Expect(err).ToNot(HaveOccurred()) + + GinkgoWriter.Println(pod.Name) + + err = runShowHelpInPod(pod) + Expect(err).ToNot(HaveOccurred()) + }) }) It("can connect to Postgres via pgbouncer service using tls certificates", func() { @@ -176,3 +190,24 @@ var _ = Describe("PGBouncer Connections", Label(tests.LabelServiceConnectivity), }) }) }) + +func getPgbouncerPod(sampleFile string) (*corev1.Pod, error) { + poolerKey, err := env.GetResourceNamespacedNameFromYAML(sampleFile) + if err != nil { + return nil, err + } + + Expect(err).ToNot(HaveOccurred()) + + var podList corev1.PodList + err = env.Client.List(env.Ctx, &podList, ctrlclient.InNamespace(poolerKey.Namespace), + ctrlclient.MatchingLabels{utils.PgbouncerNameLabel: poolerKey.Name}) + Expect(err).ToNot(HaveOccurred()) + Expect(len(podList.Items)).Should(BeEquivalentTo(1)) + return &podList.Items[0], nil +} + +func runShowHelpInPod(pod *corev1.Pod) error { + _, _, err := env.ExecCommand(env.Ctx, *pod, "pgbouncer", nil, "psql", "-c", "SHOW HELP") + return err +} From 840920dafad32c083dffe6031880e34bce59feb7 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Wed, 4 Dec 2024 11:17:54 +0100 Subject: [PATCH 189/836] feat(initdb): add support for ICU and built-in locale providers (#6220) This patch enhances the PostgreSQL database initialization process (`initdb`) by introducing support for the ICU locale provider (available since PostgreSQL 16) and the built-in locale provider (available since PostgreSQL 17). Users can now specify the desired locale provider when initializing a new PostgreSQL cluster, offering improved localization flexibility. Closes #5386 Signed-off-by: Marco Nenciarini Signed-off-by: Gabriele Bartolini Signed-off-by: Tao Li Co-authored-by: Gabriele Bartolini Co-authored-by: Tao Li --- .wordlist-en-custom.txt | 4 + api/v1/cluster_types.go | 30 ++++++++ .../bases/postgresql.cnpg.io_clusters.yaml | 37 +++++++++ docs/src/bootstrap.md | 75 ++++++++++++++++--- docs/src/cloudnative-pg.v1.md | 42 +++++++++++ .../samples/cluster-example-initdb-icu.yaml | 19 +++++ pkg/specs/jobs.go | 15 ++++ pkg/specs/jobs_test.go | 41 ++++++++-- 8 files changed, 244 insertions(+), 19 deletions(-) create mode 100644 docs/src/samples/cluster-example-initdb-icu.yaml diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index 912dc3d759..8cf809cd19 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -601,6 +601,7 @@ bootstrapinitdb bootstraprecovery br bs +builtinLocale bw byStatus bypassrls @@ -833,6 +834,8 @@ httpGet https hugepages icu +icuLocale +icuRules ident imageCatalogRef imageName @@ -921,6 +924,7 @@ livenessProbeTimeout lm localeCType localeCollate +localeProvider localhost localobjectreference locktype diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go index 9bb1edef5c..affbfe0e60 100644 --- a/api/v1/cluster_types.go +++ b/api/v1/cluster_types.go @@ -1428,6 +1428,9 @@ type CertificatesStatus struct { // BootstrapInitDB is the configuration of the bootstrap process when // initdb is used // Refer to the Bootstrap page of the documentation for more information. +// +kubebuilder:validation:XValidation:rule="!has(self.builtinLocale) || self.localeProvider == 'builtin'",message="builtinLocale is only available when localeProvider is set to `builtin`" +// +kubebuilder:validation:XValidation:rule="!has(self.icuLocale) || self.localeProvider == 'icu'",message="icuLocale is only available when localeProvider is set to `icu`" +// +kubebuilder:validation:XValidation:rule="!has(self.icuRules) || self.localeProvider == 'icu'",message="icuRules is only available when localeProvider is set to `icu`" type BootstrapInitDB struct { // Name of the database used by the application. Default: `app`. // +optional @@ -1468,6 +1471,33 @@ type BootstrapInitDB struct { // +optional LocaleCType string `json:"localeCType,omitempty"` + // Sets the default collation order and character classification in the new database. + // +optional + Locale string `json:"locale,omitempty"` + + // This option sets the locale provider for databases created in the new cluster. + // Available from PostgreSQL 16. + // +optional + LocaleProvider string `json:"localeProvider,omitempty"` + + // Specifies the ICU locale when the ICU provider is used. + // This option requires `localeProvider` to be set to `icu`. + // Available from PostgreSQL 15. + // +optional + IcuLocale string `json:"icuLocale,omitempty"` + + // Specifies additional collation rules to customize the behavior of the default collation. + // This option requires `localeProvider` to be set to `icu`. + // Available from PostgreSQL 16. + // +optional + IcuRules string `json:"icuRules,omitempty"` + + // Specifies the locale name when the builtin provider is used. + // This option requires `localeProvider` to be set to `builtin`. + // Available from PostgreSQL 17. + // +optional + BuiltinLocale string `json:"builtinLocale,omitempty"` + // The value in megabytes (1 to 1024) to be passed to the `--wal-segsize` // option for initdb (default: empty, resulting in PostgreSQL default: 16MB) // +kubebuilder:validation:Minimum=1 diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml index 4316b14329..17242a5ec7 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml @@ -1495,6 +1495,12 @@ spec: initdb: description: Bootstrap the cluster via initdb properties: + builtinLocale: + description: |- + Specifies the locale name when the builtin provider is used. + This option requires `localeProvider` to be set to `builtin`. + Available from PostgreSQL 17. + type: string dataChecksums: description: |- Whether the `-k` option should be passed to initdb, @@ -1508,6 +1514,18 @@ spec: description: The value to be passed as option `--encoding` for initdb (default:`UTF8`) type: string + icuLocale: + description: |- + Specifies the ICU locale when the ICU provider is used. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 15. + type: string + icuRules: + description: |- + Specifies additional collation rules to customize the behavior of the default collation. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 16. + type: string import: description: |- Bootstraps the new cluster by importing data from an existing PostgreSQL @@ -1576,6 +1594,10 @@ spec: - source - type type: object + locale: + description: Sets the default collation order and character + classification in the new database. + type: string localeCType: description: The value to be passed as option `--lc-ctype` for initdb (default:`C`) @@ -1584,6 +1606,11 @@ spec: description: The value to be passed as option `--lc-collate` for initdb (default:`C`) type: string + localeProvider: + description: |- + This option sets the locale provider for databases created in the new cluster. + Available from PostgreSQL 16. + type: string options: description: |- The list of options that must be passed to initdb when creating the cluster. @@ -1789,6 +1816,16 @@ spec: minimum: 1 type: integer type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider + is set to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is + set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set + to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' pg_basebackup: description: |- Bootstrap the cluster taking a physical backup of another compatible diff --git a/docs/src/bootstrap.md b/docs/src/bootstrap.md index 6aff83a8a6..2a0518f67c 100644 --- a/docs/src/bootstrap.md +++ b/docs/src/bootstrap.md @@ -24,7 +24,7 @@ For more detailed information about this feature, please refer to the CloudNativePG requires both the `postgres` user and database to always exists. Using the local Unix Domain Socket, it needs to connect as `postgres` user to the `postgres` database via `peer` authentication in - order to perform administrative tasks on the cluster. + order to perform administrative tasks on the cluster. **DO NOT DELETE** the `postgres` user or the `postgres` database!!! !!! Info @@ -204,36 +204,87 @@ The user that owns the database defaults to the database name instead. The application user is not used internally by the operator, which instead relies on the superuser to reconcile the cluster with the desired status. -### Passing options to `initdb` +### Passing Options to `initdb` -The actual PostgreSQL data directory is created via an invocation of the -`initdb` PostgreSQL command. If you need to add custom options to that command -(i.e., to change the `locale` used for the template databases or to add data -checksums), you can use the following parameters: +The PostgreSQL data directory is initialized using the +[`initdb` PostgreSQL command](https://www.postgresql.org/docs/current/app-initdb.html). + +CloudNativePG enables you to customize the behavior of `initdb` to modify +settings such as default locale configurations and data checksums. + +!!! Warning + CloudNativePG acts only as a direct proxy to `initdb` for locale-related + options, due to the ongoing and significant enhancements in PostgreSQL's locale + support. It is your responsibility to ensure that the correct options are + provided, following the PostgreSQL documentation, and to verify that the + bootstrap process completes successfully. + +To include custom options in the `initdb` command, you can use the following +parameters: + +builtinLocale +: When `builtinLocale` is set to a value, CloudNativePG passes it to the + `--builtin-locale` option in `initdb`. This option controls the builtin locale, as + defined in ["Locale Support"](https://www.postgresql.org/docs/current/locale.html) + from the PostgreSQL documentation (default: empty). Note that this option requires + `localeProvider` to be set to `builtin`. Available from PostgreSQL 17. dataChecksums -: When `dataChecksums` is set to `true`, CNPG invokes the `-k` option in +: When `dataChecksums` is set to `true`, CloudNativePG invokes the `-k` option in `initdb` to enable checksums on data pages and help detect corruption by the I/O system - that would otherwise be silent (default: `false`). encoding -: When `encoding` set to a value, CNPG passes it to the `--encoding` option in `initdb`, - which selects the encoding of the template database (default: `UTF8`). +: When `encoding` set to a value, CloudNativePG passes it to the `--encoding` + option in `initdb`, which selects the encoding of the template database + (default: `UTF8`). + +icuLocale +: When `icuLocale` is set to a value, CloudNativePG passes it to the + `--icu-locale` option in `initdb`. This option controls the ICU locale, as + defined in ["Locale Support"](https://www.postgresql.org/docs/current/locale.html) + from the PostgreSQL documentation (default: empty). + Note that this option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 15. + +icuRules +: When `icuRules` is set to a value, CloudNativePG passes it to the + `--icu-rules` option in `initdb`. This option controls the ICU locale, as + defined in ["Locale + Support"](https://www.postgresql.org/docs/current/locale.html) from the + PostgreSQL documentation (default: empty). Note that this option requires + `localeProvider` to be set to `icu`. Available from PostgreSQL 16. + +locale +: When `locale` is set to a value, CloudNativePG passes it to the `--locale` + option in `initdb`. This option controls the locale, as defined in + ["Locale Support"](https://www.postgresql.org/docs/current/locale.html) from + the PostgreSQL documentation. By default, the locale parameter is empty. In + this case, environment variables such as `LANG` are used to determine the + locale. Be aware that these variables can vary between container images, + potentially leading to inconsistent behavior. localeCollate -: When `localeCollate` is set to a value, CNPG passes it to the `--lc-collate` +: When `localeCollate` is set to a value, CloudNativePG passes it to the `--lc-collate` option in `initdb`. This option controls the collation order (`LC_COLLATE` subcategory), as defined in ["Locale Support"](https://www.postgresql.org/docs/current/locale.html) from the PostgreSQL documentation (default: `C`). localeCType -: When `localeCType` is set to a value, CNPG passes it to the `--lc-ctype` option in +: When `localeCType` is set to a value, CloudNativePG passes it to the `--lc-ctype` option in `initdb`. This option controls the collation order (`LC_CTYPE` subcategory), as defined in ["Locale Support"](https://www.postgresql.org/docs/current/locale.html) from the PostgreSQL documentation (default: `C`). +localeProvider +: When `localeProvider` is set to a value, CloudNativePG passes it to the `--locale-provider` +option in `initdb`. This option controls the locale provider, as defined in +["Locale Support"](https://www.postgresql.org/docs/current/locale.html) from the +PostgreSQL documentation (default: empty, which means `libc` for PostgreSQL). +Available from PostgreSQL 15. + walSegmentSize -: When `walSegmentSize` is set to a value, CNPG passes it to the `--wal-segsize` +: When `walSegmentSize` is set to a value, CloudNativePG passes it to the `--wal-segsize` option in `initdb` (default: not set - defined by PostgreSQL as 16 megabytes). !!! Note diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md index 662b01af57..ea07a0bed8 100644 --- a/docs/src/cloudnative-pg.v1.md +++ b/docs/src/cloudnative-pg.v1.md @@ -1079,6 +1079,48 @@ enabling checksums on data pages (default: false)

The value to be passed as option --lc-ctype for initdb (default:C)

+locale
+string + + +

Sets the default collation order and character classification in the new database.

+ + +localeProvider
+string + + +

This option sets the locale provider for databases created in the new cluster. +Available from PostgreSQL 16.

+ + +icuLocale
+string + + +

Specifies the ICU locale when the ICU provider is used. +This option requires localeProvider to be set to icu. +Available from PostgreSQL 15.

+ + +icuRules
+string + + +

Specifies additional collation rules to customize the behavior of the default collation. +This option requires localeProvider to be set to icu. +Available from PostgreSQL 16.

+ + +builtinLocale
+string + + +

Specifies the locale name when the builtin provider is used. +This option requires localeProvider to be set to builtin. +Available from PostgreSQL 17.

+ + walSegmentSize
int diff --git a/docs/src/samples/cluster-example-initdb-icu.yaml b/docs/src/samples/cluster-example-initdb-icu.yaml new file mode 100644 index 0000000000..3e9747effe --- /dev/null +++ b/docs/src/samples/cluster-example-initdb-icu.yaml @@ -0,0 +1,19 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: cluster-example-initdb-icu +spec: + instances: 3 + + bootstrap: + initdb: + encoding: UTF8 + localeCollate: en_US.UTF8 + localeCType: en_US.UTF8 + localeProvider: icu + icuLocale: en-US + # we want to order g and G after A (and before b) + icuRules: '&A < g <<< G' + + storage: + size: 1Gi diff --git a/pkg/specs/jobs.go b/pkg/specs/jobs.go index 3ccdd8c0d0..bbd5cd936c 100644 --- a/pkg/specs/jobs.go +++ b/pkg/specs/jobs.go @@ -142,6 +142,21 @@ func buildInitDBFlags(cluster apiv1.Cluster) (initCommand []string) { if localeCType := config.LocaleCType; localeCType != "" { options = append(options, fmt.Sprintf("--lc-ctype=%s", localeCType)) } + if locale := config.Locale; locale != "" { + options = append(options, fmt.Sprintf("--locale=%s", locale)) + } + if localeProvider := config.LocaleProvider; localeProvider != "" { + options = append(options, fmt.Sprintf("--locale-provider=%s", localeProvider)) + } + if icuLocale := config.IcuLocale; icuLocale != "" { + options = append(options, fmt.Sprintf("--icu-locale=%s", icuLocale)) + } + if icuRules := config.IcuRules; icuRules != "" { + options = append(options, fmt.Sprintf("--icu-rules=%s", icuRules)) + } + if builtinLocale := config.BuiltinLocale; builtinLocale != "" { + options = append(options, fmt.Sprintf("--builtin-locale=%s", builtinLocale)) + } if walSegmentSize := config.WalSegmentSize; walSegmentSize != 0 && utils.IsPowerOfTwo(walSegmentSize) { options = append(options, fmt.Sprintf("--wal-segsize=%v", walSegmentSize)) } diff --git a/pkg/specs/jobs_test.go b/pkg/specs/jobs_test.go index 5d9c7eab32..378f1ae813 100644 --- a/pkg/specs/jobs_test.go +++ b/pkg/specs/jobs_test.go @@ -17,7 +17,9 @@ limitations under the License. package specs import ( - v1 "k8s.io/api/batch/v1" + "slices" + + batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -37,8 +39,8 @@ var _ = Describe("Barman endpoint CA", func() { }, } - job := v1.Job{ - Spec: v1.JobSpec{ + job := batchv1.Job{ + Spec: batchv1.JobSpec{ Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ Containers: []corev1.Container{{}}, @@ -80,8 +82,8 @@ var _ = Describe("Barman endpoint CA", func() { }, } - job := v1.Job{ - Spec: v1.JobSpec{ + job := batchv1.Job{ + Spec: batchv1.JobSpec{ Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ Containers: []corev1.Container{ @@ -118,8 +120,8 @@ var _ = Describe("Barman endpoint CA", func() { }, }} - job := v1.Job{ - Spec: v1.JobSpec{ + job := batchv1.Job{ + Spec: batchv1.JobSpec{ Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ Containers: []corev1.Container{ @@ -165,4 +167,29 @@ var _ = Describe("Job created via InitDB", func() { Expect(job.Spec.Template.Spec.Containers[0].Command).Should(ContainElement( postInitApplicationSQLRefsFolder.toString())) }) + + It("contains icu configuration", func() { + cluster := apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{ + Encoding: "UTF-8", + LocaleProvider: "icu", + IcuLocale: "und", + IcuRules: "&A < z <<< Z", + }, + }, + }, + } + job := CreatePrimaryJobViaInitdb(cluster, 0) + + jobCommand := job.Spec.Template.Spec.Containers[0].Command + Expect(jobCommand).Should(ContainElement("--initdb-flags")) + initdbFlags := jobCommand[slices.Index(jobCommand, "--initdb-flags")+1] + Expect(initdbFlags).Should(ContainSubstring("--encoding=UTF-8")) + Expect(initdbFlags).Should(ContainSubstring("--locale-provider=icu")) + Expect(initdbFlags).Should(ContainSubstring("--icu-locale=und")) + Expect(initdbFlags).ShouldNot(ContainSubstring("--locale=")) + Expect(initdbFlags).Should(ContainSubstring("'--icu-rules=&A < z <<< Z'")) + }) }) From 0cdb7268c113bcf0e9a30079df9d445e6f9a172b Mon Sep 17 00:00:00 2001 From: Jack Langston <13301098+fullykubed@users.noreply.github.com> Date: Wed, 4 Dec 2024 07:15:30 -0500 Subject: [PATCH 190/836] feat(pooler): expand configurable options for PgBouncer (#6216) This commit extends the `Pooler` API by supporting additional PgBouncer configuration parameters. The newly supported parameters are: - `cancel_wait_timeout` - `dns_max_ttl` - `dns_nxdomain_ttl` - `listen_backlog` - `max_packet_size` - `pkt_buf` - `sbuf_loopcnt` - `server_tls_ciphers` - `server_tls_protocols` - `suspend_timeout` - `tcp_defer_accept` - `tcp_socket_buffer` These additions provide greater flexibility, control, and responsibility over PgBouncer's behavior, catering to a broader range of use cases and deployment scenarios. Closes #5276 Signed-off-by: Jack Langston Signed-off-by: Gabriele Bartolini Co-authored-by: Jack Langston Co-authored-by: Gabriele Bartolini --- api/v1/pooler_webhook.go | 13 +++++++++++++ docs/src/connection_pooling.md | 15 ++++++++++++++- docs/src/samples/pooler-tls.yaml | 2 ++ 3 files changed, 29 insertions(+), 1 deletion(-) diff --git a/api/v1/pooler_webhook.go b/api/v1/pooler_webhook.go index 24241a836a..b86ac2622b 100644 --- a/api/v1/pooler_webhook.go +++ b/api/v1/pooler_webhook.go @@ -38,25 +38,32 @@ var ( AllowedPgbouncerGenericConfigurationParameters = stringset.From([]string{ "application_name_add_host", "autodb_idle_timeout", + "cancel_wait_timeout", "client_idle_timeout", "client_login_timeout", "default_pool_size", "disable_pqexec", + "dns_max_ttl", + "dns_nxdomain_ttl", "idle_transaction_timeout", "ignore_startup_parameters", + "listen_backlog", "log_connections", "log_disconnections", "log_pooler_errors", "log_stats", "max_client_conn", "max_db_connections", + "max_packet_size", "max_prepared_statements", "max_user_connections", "min_pool_size", + "pkt_buf", "query_timeout", "query_wait_timeout", "reserve_pool_size", "reserve_pool_timeout", + "sbuf_loopcnt", "server_check_delay", "server_check_query", "server_connect_timeout", @@ -67,12 +74,18 @@ var ( "server_reset_query", "server_reset_query_always", "server_round_robin", + "server_tls_ciphers", + "server_tls_protocols", "stats_period", + "suspend_timeout", + "tcp_defer_accept", + "tcp_socket_buffer", "tcp_keepalive", "tcp_keepcnt", "tcp_keepidle", "tcp_keepintvl", "tcp_user_timeout", + "track_extra_parameters", "verbose", }) ) diff --git a/docs/src/connection_pooling.md b/docs/src/connection_pooling.md index cf6b9bc310..0ac9d50bfe 100644 --- a/docs/src/connection_pooling.md +++ b/docs/src/connection_pooling.md @@ -331,13 +331,17 @@ are the ones directly set by PgBouncer. - [`application_name_add_host`](https://www.pgbouncer.org/config.html#application_name_add_host) - [`autodb_idle_timeout`](https://www.pgbouncer.org/config.html#autodb_idle_timeout) +- [`cancel_wait_timeout`](https://www.pgbouncer.org/config.html#cancel_wait_timeout) - [`client_idle_timeout`](https://www.pgbouncer.org/config.html#client_idle_timeout) - [`client_login_timeout`](https://www.pgbouncer.org/config.html#client_login_timeout) - [`default_pool_size`](https://www.pgbouncer.org/config.html#default_pool_size) - [`disable_pqexec`](https://www.pgbouncer.org/config.html#disable_pqexec) +- [`dns_max_ttl`](https://www.pgbouncer.org/config.html#dns_max_ttl) +- [`dns_nxdomain_ttl`](https://www.pgbouncer.org/config.html#dns_nxdomain_ttl) - [`idle_transaction_timeout`](https://www.pgbouncer.org/config.html#idle_transaction_timeout) - [`ignore_startup_parameters`](https://www.pgbouncer.org/config.html#ignore_startup_parameters): - to be appended to `extra_float_digits,options` - required by CNP + to be appended to `extra_float_digits,options` - required by CloudNativePG +- [`listen_backlog`](https://www.pgbouncer.org/config.html#listen_backlog) - [`log_connections`](https://www.pgbouncer.org/config.html#log_connections) - [`log_disconnections`](https://www.pgbouncer.org/config.html#log_disconnections) - [`log_pooler_errors`](https://www.pgbouncer.org/config.html#log_pooler_errors) @@ -346,13 +350,16 @@ are the ones directly set by PgBouncer. export as described in the ["Monitoring"](#monitoring) section below - [`max_client_conn`](https://www.pgbouncer.org/config.html#max_client_conn) - [`max_db_connections`](https://www.pgbouncer.org/config.html#max_db_connections) +- [`max_packet_size`](https://www.pgbouncer.org/config.html#max_packet_size) - [`max_prepared_statements`](https://www.pgbouncer.org/config.html#max_prepared_statements) - [`max_user_connections`](https://www.pgbouncer.org/config.html#max_user_connections) - [`min_pool_size`](https://www.pgbouncer.org/config.html#min_pool_size) +- [`pkt_buf`](https://www.pgbouncer.org/config.html#pkt_buf) - [`query_timeout`](https://www.pgbouncer.org/config.html#query_timeout) - [`query_wait_timeout`](https://www.pgbouncer.org/config.html#query_wait_timeout) - [`reserve_pool_size`](https://www.pgbouncer.org/config.html#reserve_pool_size) - [`reserve_pool_timeout`](https://www.pgbouncer.org/config.html#reserve_pool_timeout) +- [`sbuf_loopcnt`](https://www.pgbouncer.org/config.html#sbuf_loopcnt) - [`server_check_delay`](https://www.pgbouncer.org/config.html#server_check_delay) - [`server_check_query`](https://www.pgbouncer.org/config.html#server_check_query) - [`server_connect_timeout`](https://www.pgbouncer.org/config.html#server_connect_timeout) @@ -363,12 +370,18 @@ are the ones directly set by PgBouncer. - [`server_reset_query`](https://www.pgbouncer.org/config.html#server_reset_query) - [`server_reset_query_always`](https://www.pgbouncer.org/config.html#server_reset_query_always) - [`server_round_robin`](https://www.pgbouncer.org/config.html#server_round_robin) +- [`server_tls_ciphers`](https://www.pgbouncer.org/config.html#server_tls_ciphers) +- [`server_tls_server_tls_protocols`](https://www.pgbouncer.org/config.html#server_tls_protocols) - [`stats_period`](https://www.pgbouncer.org/config.html#stats_period) +- [`suspend_timeout`](https://www.pgbouncer.org/config.html#suspend_timeout) +- [`tcp_defer_accept`](https://www.pgbouncer.org/config.html#tcp_defer_accept) - [`tcp_keepalive`](https://www.pgbouncer.org/config.html#tcp_keepalive) - [`tcp_keepcnt`](https://www.pgbouncer.org/config.html#tcp_keepcnt) - [`tcp_keepidle`](https://www.pgbouncer.org/config.html#tcp_keepidle) - [`tcp_keepintvl`](https://www.pgbouncer.org/config.html#tcp_keepintvl) - [`tcp_user_timeout`](https://www.pgbouncer.org/config.html#tcp_user_timeout) +- [`tcp_socket_buffer`](https://www.pgbouncer.org/config.html#tcp_socket_buffer) +- [`track_extra_parameters`](https://www.pgbouncer.org/config.html#track_extra_parameters) - [`verbose`](https://www.pgbouncer.org/config.html#verbose) Customizations of the PgBouncer configuration are written declaratively in the diff --git a/docs/src/samples/pooler-tls.yaml b/docs/src/samples/pooler-tls.yaml index 9b58b2d364..20bffa1115 100644 --- a/docs/src/samples/pooler-tls.yaml +++ b/docs/src/samples/pooler-tls.yaml @@ -10,3 +10,5 @@ spec: type: rw pgbouncer: poolMode: session + parameters: + server_tls_protocols: tlsv1.3 From 0f153716c403805e2530c0e43585a526c59ce68b Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Wed, 4 Dec 2024 13:27:53 +0100 Subject: [PATCH 191/836] fix Signed-off-by: Gabriele Bartolini --- internal/cmd/plugin/backup/cmd.go | 4 ++-- internal/cmd/plugin/destroy/cmd.go | 4 ++-- internal/cmd/plugin/fence/cmd.go | 8 ++++---- internal/cmd/plugin/hibernate/cmd.go | 12 ++++++------ internal/cmd/plugin/maintenance/cmd.go | 4 ++-- internal/cmd/plugin/pgbench/cmd.go | 2 +- internal/cmd/plugin/pgbench/cmd_test.go | 2 +- internal/cmd/plugin/promote/cmd.go | 4 ++-- internal/cmd/plugin/psql/cmd.go | 2 +- internal/cmd/plugin/reload/cmd.go | 2 +- internal/cmd/plugin/status/cmd.go | 2 +- 11 files changed, 23 insertions(+), 23 deletions(-) diff --git a/internal/cmd/plugin/backup/cmd.go b/internal/cmd/plugin/backup/cmd.go index 17aceabe31..af8bbf8b3d 100644 --- a/internal/cmd/plugin/backup/cmd.go +++ b/internal/cmd/plugin/backup/cmd.go @@ -72,7 +72,7 @@ func NewCmd() *cobra.Command { } backupSubcommand := &cobra.Command{ - Use: "backup [cluster]", + Use: "backup CLUSTER", Short: "Request an on-demand backup for a PostgreSQL Cluster", GroupID: plugin.GroupIDDatabase, Args: plugin.RequiresArguments(1), @@ -167,7 +167,7 @@ func NewCmd() *cobra.Command { "backup-name", "", "The name of the Backup resource that will be created, "+ - "defaults to \"[cluster]-[current_timestamp]\"", + "defaults to \"CLUSTER-CURRENT_TIMESTAMP\"", ) backupSubcommand.Flags().StringVarP( &backupTarget, diff --git a/internal/cmd/plugin/destroy/cmd.go b/internal/cmd/plugin/destroy/cmd.go index c3f4e7c944..a1bf87b665 100644 --- a/internal/cmd/plugin/destroy/cmd.go +++ b/internal/cmd/plugin/destroy/cmd.go @@ -29,8 +29,8 @@ import ( // NewCmd create the new "destroy" subcommand func NewCmd() *cobra.Command { destroyCmd := &cobra.Command{ - Use: "destroy [cluster] [node]", - Short: "Destroy the instance named [cluster]-[node] or [node] with the associated PVC", + Use: "destroy CLUSTER INSTANCE", + Short: "Destroy the instance named CLUSTER-INSTANCE with the associated PVC", GroupID: plugin.GroupIDCluster, Args: plugin.RequiresArguments(2), RunE: func(cmd *cobra.Command, args []string) error { diff --git a/internal/cmd/plugin/fence/cmd.go b/internal/cmd/plugin/fence/cmd.go index 8b3e719936..ab7bd6b8f7 100644 --- a/internal/cmd/plugin/fence/cmd.go +++ b/internal/cmd/plugin/fence/cmd.go @@ -27,8 +27,8 @@ import ( var ( fenceOnCmd = &cobra.Command{ - Use: "on [cluster] [node]", - Short: `Fence an instance named [cluster]-[node] or [node]`, + Use: "on CLUSTER INSTANCE", + Short: `Fence an instance named CLUSTER-INSTANCE`, Args: plugin.RequiresArguments(2), RunE: func(cmd *cobra.Command, args []string) error { clusterName := args[0] @@ -42,8 +42,8 @@ var ( } fenceOffCmd = &cobra.Command{ - Use: "off [cluster] [node]", - Short: `Remove fence for an instance named [cluster]-[node] or [node]`, + Use: "off CLUSTER INSTANCE", + Short: `Remove fence for an instance named CLUSTER-INSTANCE`, Args: plugin.RequiresArguments(2), RunE: func(cmd *cobra.Command, args []string) error { clusterName := args[0] diff --git a/internal/cmd/plugin/hibernate/cmd.go b/internal/cmd/plugin/hibernate/cmd.go index 44f6c32a4c..134c5412ff 100644 --- a/internal/cmd/plugin/hibernate/cmd.go +++ b/internal/cmd/plugin/hibernate/cmd.go @@ -26,8 +26,8 @@ import ( var ( hibernateOnCmd = &cobra.Command{ - Use: "on [cluster]", - Short: "Hibernates the cluster named [cluster]", + Use: "on CLUSTER", + Short: "Hibernates the cluster named CLUSTER", Args: plugin.RequiresArguments(1), ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp @@ -49,8 +49,8 @@ var ( } hibernateOffCmd = &cobra.Command{ - Use: "off [cluster]", - Short: "Bring the cluster named [cluster] back from hibernation", + Use: "off CLUSTER", + Short: "Bring the cluster named CLUSTER back from hibernation", Args: plugin.RequiresArguments(1), ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp @@ -63,8 +63,8 @@ var ( } hibernateStatusCmd = &cobra.Command{ - Use: "status [cluster]", - Short: "Prints the hibernation status for the [cluster]", + Use: "status CLUSTER", + Short: "Prints the hibernation status for the CLUSTER", Args: plugin.RequiresArguments(1), ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp diff --git a/internal/cmd/plugin/maintenance/cmd.go b/internal/cmd/plugin/maintenance/cmd.go index 39064884d6..39a9d735da 100644 --- a/internal/cmd/plugin/maintenance/cmd.go +++ b/internal/cmd/plugin/maintenance/cmd.go @@ -37,7 +37,7 @@ func NewCmd() *cobra.Command { } maintenanceCmd.AddCommand(&cobra.Command{ - Use: "set [cluster]", + Use: "set CLUSTER", Short: "Sets maintenance mode", Long: "This command will set maintenance mode on a single cluster or on all clusters " + "in the current namespace if not specified differently through flags", @@ -58,7 +58,7 @@ func NewCmd() *cobra.Command { }) maintenanceCmd.AddCommand(&cobra.Command{ - Use: "unset [cluster]", + Use: "unset CLUSTER", Short: "Removes maintenance mode", Long: "This command will unset maintenance mode on a single cluster or on all clusters " + "in the current namespace if not specified differently through flags", diff --git a/internal/cmd/plugin/pgbench/cmd.go b/internal/cmd/plugin/pgbench/cmd.go index 7f79ed4c4a..394b238546 100644 --- a/internal/cmd/plugin/pgbench/cmd.go +++ b/internal/cmd/plugin/pgbench/cmd.go @@ -29,7 +29,7 @@ func NewCmd() *cobra.Command { run := &pgBenchRun{} pgBenchCmd := &cobra.Command{ - Use: "pgbench [cluster] [-- pgBenchCommandArgs...]", + Use: "pgbench CLUSTER [-- PGBENCH_COMMAND_ARGS...]", Short: "Creates a pgbench job", Args: validateCommandArgs, Long: "Creates a pgbench job to run against the specified Postgres Cluster.", diff --git a/internal/cmd/plugin/pgbench/cmd_test.go b/internal/cmd/plugin/pgbench/cmd_test.go index 68ec5ebf6f..b1e539d24f 100644 --- a/internal/cmd/plugin/pgbench/cmd_test.go +++ b/internal/cmd/plugin/pgbench/cmd_test.go @@ -27,7 +27,7 @@ var _ = Describe("NewCmd", func() { It("should create a cobra.Command with correct defaults", func() { cmd := NewCmd() - Expect(cmd.Use).To(Equal("pgbench [cluster] [-- pgBenchCommandArgs...]")) + Expect(cmd.Use).To(Equal("pgbench CLUSTER [-- PGBENCH_COMMAND_ARGS...]")) Expect(cmd.Short).To(Equal("Creates a pgbench job")) Expect(cmd.Long).To(Equal("Creates a pgbench job to run against the specified Postgres Cluster.")) Expect(cmd.Example).To(Equal(jobExample)) diff --git a/internal/cmd/plugin/promote/cmd.go b/internal/cmd/plugin/promote/cmd.go index f4f3c95d88..401e75d949 100644 --- a/internal/cmd/plugin/promote/cmd.go +++ b/internal/cmd/plugin/promote/cmd.go @@ -29,8 +29,8 @@ import ( // NewCmd create the new "promote" subcommand func NewCmd() *cobra.Command { promoteCmd := &cobra.Command{ - Use: "promote [cluster] [node]", - Short: "Promote the pod named [cluster]-[node] or [node] to primary", + Use: "promote CLUSTER INSTANCE", + Short: "Promote the pod named CLUSTER-INSTANCE to primary", GroupID: plugin.GroupIDCluster, Args: plugin.RequiresArguments(2), RunE: func(_ *cobra.Command, args []string) error { diff --git a/internal/cmd/plugin/psql/cmd.go b/internal/cmd/plugin/psql/cmd.go index 8cae04a0ea..6a2bfb6cf1 100644 --- a/internal/cmd/plugin/psql/cmd.go +++ b/internal/cmd/plugin/psql/cmd.go @@ -31,7 +31,7 @@ func NewCmd() *cobra.Command { var passStdin bool cmd := &cobra.Command{ - Use: "psql [cluster] [-- psqlArgs...]", + Use: "psql CLUSTER [-- PSQL_ARGS...]", Short: "Start a psql session targeting a CloudNativePG cluster", Args: validatePsqlArgs, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { diff --git a/internal/cmd/plugin/reload/cmd.go b/internal/cmd/plugin/reload/cmd.go index f31aa3ddef..833e1eae2b 100644 --- a/internal/cmd/plugin/reload/cmd.go +++ b/internal/cmd/plugin/reload/cmd.go @@ -27,7 +27,7 @@ import ( // NewCmd creates the new "reset" command func NewCmd() *cobra.Command { restartCmd := &cobra.Command{ - Use: "reload [clusterName]", + Use: "reload CLUSTER", Short: `Reload a cluster`, Long: `Triggers a reconciliation loop for all the cluster's instances, rolling out new configurations if present.`, GroupID: plugin.GroupIDCluster, diff --git a/internal/cmd/plugin/status/cmd.go b/internal/cmd/plugin/status/cmd.go index 4ddbc5db02..385f50e66f 100644 --- a/internal/cmd/plugin/status/cmd.go +++ b/internal/cmd/plugin/status/cmd.go @@ -28,7 +28,7 @@ import ( // NewCmd create the new "status" subcommand func NewCmd() *cobra.Command { statusCmd := &cobra.Command{ - Use: "status [cluster]", + Use: "status CLUSTER", Short: "Get the status of a PostgreSQL cluster", Args: plugin.RequiresArguments(1), GroupID: plugin.GroupIDDatabase, From 2e634fbdc4326a7fae1a8c64a46ac42d333cb249 Mon Sep 17 00:00:00 2001 From: Pierrick <139142330+pchovelon@users.noreply.github.com> Date: Wed, 4 Dec 2024 13:30:12 +0100 Subject: [PATCH 192/836] docs(plugin): standardize the `CLUSTER` argument in plugin commands (#6253) This update ensures consistent usage of the `CLUSTER` argument across all plugin commands and their associated documentation. Closes #5848 Signed-off-by: Pierrick Signed-off-by: Zekiye Aydemir Signed-off-by: Gabriele Bartolini Co-authored-by: Zekiye Aydemir Co-authored-by: Gabriele Bartolini --- docs/src/kubectl-plugin.md | 102 +++++++++--------- .../plugin/logical/publication/create/cmd.go | 2 +- .../plugin/logical/publication/drop/cmd.go | 2 +- .../plugin/logical/subscription/create/cmd.go | 2 +- .../plugin/logical/subscription/drop/cmd.go | 2 +- .../logical/subscription/syncsequences/cmd.go | 2 +- internal/cmd/plugin/logs/cluster.go | 2 +- internal/cmd/plugin/pgbench/cmd.go | 6 +- internal/cmd/plugin/pgbench/pgbench.go | 8 +- internal/cmd/plugin/promote/cmd.go | 2 +- internal/cmd/plugin/report/cluster.go | 2 +- internal/cmd/plugin/restart/cmd.go | 2 +- internal/cmd/plugin/snapshot/cmd.go | 2 +- 13 files changed, 68 insertions(+), 68 deletions(-) mode change 100755 => 100644 docs/src/kubectl-plugin.md diff --git a/docs/src/kubectl-plugin.md b/docs/src/kubectl-plugin.md old mode 100755 new mode 100644 index e066bbc082..2b430dd9e0 --- a/docs/src/kubectl-plugin.md +++ b/docs/src/kubectl-plugin.md @@ -169,7 +169,7 @@ sudo mv kubectl_complete-cnpg /usr/local/bin Once the plugin is installed and deployed, you can start using it like this: ```sh -kubectl cnpg +kubectl cnpg COMMAND [ARGS...] ``` !!! Note @@ -346,16 +346,16 @@ The command also supports output in `yaml` and `json` format. ### Promote The meaning of this command is to `promote` a pod in the cluster to primary, so you -can start with maintenance work or test a switch-over situation in your cluster +can start with maintenance work or test a switch-over situation in your cluster: ```sh -kubectl cnpg promote cluster-example cluster-example-2 +kubectl cnpg promote CLUSTER CLUSTER-INSTANCE ``` -Or you can use the instance node number to promote +Or you can use the instance node number to promote: ```sh -kubectl cnpg promote cluster-example 2 +kubectl cnpg promote CLUSTER INSTANCE ``` ### Certificates @@ -364,13 +364,13 @@ Clusters created using the CloudNativePG operator work with a CA to sign a TLS authentication certificate. To get a certificate, you need to provide a name for the secret to store -the credentials, the cluster name, and a user for this certificate +the credentials, the cluster name, and a user for this certificate: ```sh -kubectl cnpg certificate cluster-cert --cnpg-cluster cluster-example --cnpg-user appuser +kubectl cnpg certificate cluster-cert --cnpg-cluster CLUSTER --cnpg-user USER ``` -After the secret it's created, you can get it using `kubectl` +After the secret it's created, you can get it using `kubectl`: ```sh kubectl get secret cluster-cert @@ -388,7 +388,7 @@ The `kubectl cnpg restart` command can be used in two cases: - requesting the operator to orchestrate a rollout restart for a certain cluster. This is useful to apply - configuration changes to cluster dependent objects, such as ConfigMaps + configuration changes to cluster dependent objects, such as `ConfigMaps` containing custom monitoring queries. - request a single instance restart, either in-place if the instance is @@ -397,10 +397,10 @@ The `kubectl cnpg restart` command can be used in two cases: ```sh # this command will restart a whole cluster in a rollout fashion -kubectl cnpg restart [clusterName] +kubectl cnpg restart CLUSTER # this command will restart a single instance, according to the policy above -kubectl cnpg restart [clusterName] [pod] +kubectl cnpg restart CLUSTER INSTANCE ``` If the in-place restart is requested but the change cannot be applied without @@ -420,7 +420,7 @@ to cluster dependent objects, such as ConfigMaps containing custom monitoring qu The following command will reload all configurations for a given cluster: ```sh -kubectl cnpg reload [cluster_name] +kubectl cnpg reload CLUSTER ``` ### Maintenance @@ -503,7 +503,7 @@ default time-stamped filename is created for the zip file. E.g. the default installation namespace is cnpg-system ```sh -kubectl cnpg report operator -n +kubectl cnpg report operator -n cnpg-system ``` results in @@ -515,7 +515,7 @@ Successfully written report to "report_operator_.zip" (format: "yaml" With the `-f` flag set: ```sh -kubectl cnpg report operator -n -f reportRedacted.zip +kubectl cnpg report operator -n cnpg-system -f reportRedacted.zip ``` Unzipping the file will produce a time-stamped top-level folder to keep the @@ -592,7 +592,7 @@ metadata: With the `-S` (`--stopRedaction`) option activated, secrets are shown: ```sh -kubectl cnpg report operator -n -f reportNonRedacted.zip -S +kubectl cnpg report operator -n cnpg-system -f reportNonRedacted.zip -S ``` You'll get a reminder that you're about to view confidential information: @@ -641,7 +641,7 @@ so the `-S` is disabled. Usage: ```sh -kubectl cnpg report cluster [flags] +kubectl cnpg report cluster CLUSTER [flags] ``` Note that, unlike the `operator` sub-command, for the `cluster` sub-command you @@ -649,7 +649,7 @@ need to provide the cluster name, and very likely the namespace, unless the clus is in the default one. ```sh -kubectl cnpg report cluster example -f report.zip -n example_namespace +kubectl cnpg report cluster CLUSTER -f report.zip [-n NAMESPACE] ``` and then: @@ -671,7 +671,7 @@ Archive: report.zip Remember that you can use the `--logs` flag to add the pod and job logs to the ZIP. ```sh -kubectl cnpg report cluster example -n example_namespace --logs +kubectl cnpg report cluster CLUSTER [-n NAMESPACE] --logs ``` will result in: @@ -751,20 +751,20 @@ which takes `-f` to mean the logs should be followed. Usage: ```sh -kubectl cnpg logs cluster [flags] +kubectl cnpg logs cluster CLUSTER [flags] ``` Using the `-f` option to follow: ```sh -kubectl cnpg report cluster cluster-example -f +kubectl cnpg report cluster CLUSTER -f ``` Using `--tail` option to display 3 lines from each pod and the `-f` option to follow: ```sh -kubectl cnpg report cluster cluster-example -f --tail 3 +kubectl cnpg report cluster CLUSTER -f --tail 3 ``` ```output @@ -777,7 +777,7 @@ kubectl cnpg report cluster cluster-example -f --tail 3 With the `-o` option omitted, and with `--output` specified: ```console -$ kubectl cnpg logs cluster cluster-example --output my-cluster.log +$ kubectl cnpg logs cluster CLUSTER --output my-cluster.log Successfully written logs to "my-cluster.log" ``` @@ -869,7 +869,7 @@ detached PVCs. Usage: ```sh -kubectl cnpg destroy [CLUSTER_NAME] [INSTANCE_ID] +kubectl cnpg destroy CLUSTER INSTANCE ``` The following example removes the `cluster-example-2` pod and the associated @@ -895,7 +895,7 @@ instance. You can hibernate a cluster with: ```sh -kubectl cnpg hibernate on +kubectl cnpg hibernate on CLUSTER ``` This will: @@ -918,13 +918,13 @@ In case of error the operator will not be able to revert the procedure. You can still force the operation with: ```sh -kubectl cnpg hibernate on cluster-example --force +kubectl cnpg hibernate on CLUSTER --force ``` A hibernated cluster can be resumed with: ```sh -kubectl cnpg hibernate off +kubectl cnpg hibernate off CLUSTER ``` Once the cluster has been hibernated, it's possible to show the last @@ -932,7 +932,7 @@ configuration and the status that PostgreSQL had after it was shut down. That can be done with: ```sh -kubectl cnpg hibernate status +kubectl cnpg hibernate status CLUSTER ``` ### Benchmarking the database with pgbench @@ -941,7 +941,7 @@ Pgbench can be run against an existing PostgreSQL cluster with following command: ```sh -kubectl cnpg pgbench -- --time 30 --client 1 --jobs 1 +kubectl cnpg pgbench CLUSTER -- --time 30 --client 1 --jobs 1 ``` Refer to the [Benchmarking pgbench section](benchmarking.md#pgbench) for more @@ -949,10 +949,10 @@ details. ### Benchmarking the storage with fio -fio can be run on an existing storage class with following command: +`fio` can be run on an existing storage class with following command: ```sh -kubectl cnpg fio -n +kubectl cnpg fio FIO_JOB_NAME [-n NAMESPACE] ``` Refer to the [Benchmarking fio section](benchmarking.md#fio) for more details. @@ -965,13 +965,13 @@ an existing Postgres cluster by creating a new `Backup` resource. The following example requests an on-demand backup for a given cluster: ```sh -kubectl cnpg backup [cluster_name] +kubectl cnpg backup CLUSTER ``` or, if using volume snapshots: ```sh -kubectl cnpg backup [cluster_name] -m volumeSnapshot +kubectl cnpg backup CLUSTER -m volumeSnapshot ``` The created backup will be named after the request time: @@ -995,7 +995,7 @@ the configuration settings. ### Launching psql -The `kubectl cnpg psql` command starts a new PostgreSQL interactive front-end +The `kubectl cnpg psql CLUSTER` command starts a new PostgreSQL interactive front-end process (psql) connected to an existing Postgres cluster, as if you were running it from the actual pod. This means that you will be using the `postgres` user. @@ -1136,20 +1136,20 @@ command. The basic structure of this command is as follows: ```sh kubectl cnpg publication create \ - --publication \ - [--external-cluster ] - [options] + --publication PUBLICATION_NAME \ + [--external-cluster EXTERNAL_CLUSTER] + LOCAL_CLUSTER [options] ``` There are two primary use cases: - With `--external-cluster`: Use this option to create a publication on an external cluster (i.e. defined in the `externalClusters` stanza). The commands - will be issued from the ``, but the publication will be for the - data in ``. + will be issued from the `LOCAL_CLUSTER`, but the publication will be for the + data in `EXTERNAL_CLUSTER`. - Without `--external-cluster`: Use this option to create a publication in the - `` PostgreSQL `Cluster` (by default, the `app` database). + `LOCAL_CLUSTER` PostgreSQL `Cluster` (by default, the `app` database). !!! Warning When connecting to an external cluster, ensure that the specified user has @@ -1215,9 +1215,9 @@ following command structure: ```sh kubectl cnpg publication drop \ - --publication \ - [--external-cluster ] - [options] + --publication PUBLICATION_NAME \ + [--external-cluster EXTERNAL_CLUSTER] + LOCAL_CLUSTER [options] ``` To access further details and precise instructions, use the following command: @@ -1253,15 +1253,15 @@ command. The basic structure of this command is as follows: ```sh kubectl cnpg subscription create \ - --subscription \ - --publication \ - --external-cluster \ - [options] + --subscription SUBSCRIPTION_NAME \ + --publication PUBLICATION_NAME \ + --external-cluster EXTERNAL_CLUSTER \ + LOCAL_CLUSTER [options] ``` This command configures a subscription directed towards the specified publication in the designated external cluster, as defined in the -`externalClusters` stanza of the ``. +`externalClusters` stanza of the `LOCAL_CLUSTER`. For additional information and detailed instructions, type the following command: @@ -1303,8 +1303,8 @@ You can drop a `SUBSCRIPTION` with the following command structure: ```sh kubectl cnpg subcription drop \ - --subscription \ - [options] + --subscription SUBSCRIPTION_NAME \ + LOCAL_CLUSTER [options] ``` To access further details and precise instructions, use the following command: @@ -1332,8 +1332,8 @@ You can use the command as shown below: ```sh kubectl cnpg subscription sync-sequences \ - --subscription \ - + --subscription SUBSCRIPTION_NAME \ + LOCAL_CLUSTER ``` For comprehensive details and specific instructions, utilize the following diff --git a/internal/cmd/plugin/logical/publication/create/cmd.go b/internal/cmd/plugin/logical/publication/create/cmd.go index 966500911b..3e7847e682 100644 --- a/internal/cmd/plugin/logical/publication/create/cmd.go +++ b/internal/cmd/plugin/logical/publication/create/cmd.go @@ -38,7 +38,7 @@ func NewCmd() *cobra.Command { var dryRun bool publicationCreateCmd := &cobra.Command{ - Use: "create cluster_name", + Use: "create CLUSTER", Args: plugin.RequiresArguments(1), ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp diff --git a/internal/cmd/plugin/logical/publication/drop/cmd.go b/internal/cmd/plugin/logical/publication/drop/cmd.go index 6d27c7a955..b7d1166c5c 100644 --- a/internal/cmd/plugin/logical/publication/drop/cmd.go +++ b/internal/cmd/plugin/logical/publication/drop/cmd.go @@ -35,7 +35,7 @@ func NewCmd() *cobra.Command { var dryRun bool publicationDropCmd := &cobra.Command{ - Use: "drop cluster_name", + Use: "drop CLUSTER", Args: plugin.RequiresArguments(1), ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp diff --git a/internal/cmd/plugin/logical/subscription/create/cmd.go b/internal/cmd/plugin/logical/subscription/create/cmd.go index 9ca7508f9e..9c234d8ddc 100644 --- a/internal/cmd/plugin/logical/subscription/create/cmd.go +++ b/internal/cmd/plugin/logical/subscription/create/cmd.go @@ -37,7 +37,7 @@ func NewCmd() *cobra.Command { var dryRun bool subscriptionCreateCmd := &cobra.Command{ - Use: "create cluster_name", + Use: "create CLUSTER", Short: "create a logical replication subscription", Args: plugin.RequiresArguments(1), ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { diff --git a/internal/cmd/plugin/logical/subscription/drop/cmd.go b/internal/cmd/plugin/logical/subscription/drop/cmd.go index bba02c68bc..1ec557ad5f 100644 --- a/internal/cmd/plugin/logical/subscription/drop/cmd.go +++ b/internal/cmd/plugin/logical/subscription/drop/cmd.go @@ -34,7 +34,7 @@ func NewCmd() *cobra.Command { var dryRun bool subscriptionDropCmd := &cobra.Command{ - Use: "drop cluster_name", + Use: "drop CLUSTER", Args: plugin.RequiresArguments(1), ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp diff --git a/internal/cmd/plugin/logical/subscription/syncsequences/cmd.go b/internal/cmd/plugin/logical/subscription/syncsequences/cmd.go index fbb6230794..c88f62d0ee 100644 --- a/internal/cmd/plugin/logical/subscription/syncsequences/cmd.go +++ b/internal/cmd/plugin/logical/subscription/syncsequences/cmd.go @@ -36,7 +36,7 @@ func NewCmd() *cobra.Command { var offset int syncSequencesCmd := &cobra.Command{ - Use: "sync-sequences cluster_name", + Use: "sync-sequences CLUSTER", Short: "synchronize the sequences from the source database", Args: plugin.RequiresArguments(1), ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { diff --git a/internal/cmd/plugin/logs/cluster.go b/internal/cmd/plugin/logs/cluster.go index 0249ab9357..3ba9e7d4b2 100644 --- a/internal/cmd/plugin/logs/cluster.go +++ b/internal/cmd/plugin/logs/cluster.go @@ -26,7 +26,7 @@ func clusterCmd() *cobra.Command { cl := clusterLogs{} cmd := &cobra.Command{ - Use: "cluster ", + Use: "cluster CLUSTER", Short: "Logs for cluster's pods", Long: "Collects the logs for all pods in a cluster into a single stream or outputFile", Args: plugin.RequiresArguments(1), diff --git a/internal/cmd/plugin/pgbench/cmd.go b/internal/cmd/plugin/pgbench/cmd.go index 394b238546..11260d37d1 100644 --- a/internal/cmd/plugin/pgbench/cmd.go +++ b/internal/cmd/plugin/pgbench/cmd.go @@ -47,14 +47,14 @@ func NewCmd() *cobra.Command { &run.jobName, "job-name", "", - "Name of the job, defaulting to: -pgbench-xxxx", + "Name of the job, defaulting to: CLUSTER-pgbench-xxxx", ) pgBenchCmd.Flags().StringVar( &run.jobName, "pgbench-job-name", "", - "Name of the job, defaulting to: -pgbench-xxxx", + "Name of the job, defaulting to: CLUSTER-pgbench-xxxx", ) pgBenchCmd.Flags().StringVar( @@ -88,7 +88,7 @@ func validateCommandArgs(cmd *cobra.Command, args []string) error { } if cmd.ArgsLenAtDash() > 1 { - return fmt.Errorf("pgBenchCommands should be passed after the -- delimiter") + return fmt.Errorf("PGBENCH_COMMAND_ARGS should be passed after the -- delimiter") } return nil diff --git a/internal/cmd/plugin/pgbench/pgbench.go b/internal/cmd/plugin/pgbench/pgbench.go index a1b92eec4f..0887374219 100644 --- a/internal/cmd/plugin/pgbench/pgbench.go +++ b/internal/cmd/plugin/pgbench/pgbench.go @@ -47,17 +47,17 @@ const ( ) var jobExample = ` - # Dry-run command with default values and clusterName "cluster-example" + # Dry-run command with default values and [cluster] "cluster-example" kubectl-cnpg pgbench cluster-example --dry-run - # Create a pgbench job with default values and clusterName "cluster-example" + # Create a pgbench job with default values and [cluster] "cluster-example" kubectl-cnpg pgbench cluster-example - # Dry-run command with given values and clusterName "cluster-example" + # Dry-run command with given values and [cluster] "cluster-example" kubectl-cnpg pgbench cluster-example --db-name pgbenchDBName --job-name job-name --dry-run -- \ --time 30 --client 1 --jobs 1 - # Create a job with given values and clusterName "cluster-example" + # Create a job with given values and [cluster] "cluster-example" kubectl-cnpg pgbench cluster-example --db-name pgbenchDBName --job-name job-name -- \ --time 30 --client 1 --jobs 1` diff --git a/internal/cmd/plugin/promote/cmd.go b/internal/cmd/plugin/promote/cmd.go index 401e75d949..111c4291b0 100644 --- a/internal/cmd/plugin/promote/cmd.go +++ b/internal/cmd/plugin/promote/cmd.go @@ -30,7 +30,7 @@ import ( func NewCmd() *cobra.Command { promoteCmd := &cobra.Command{ Use: "promote CLUSTER INSTANCE", - Short: "Promote the pod named CLUSTER-INSTANCE to primary", + Short: "Promote the instance named CLUSTER-INSTANCE to primary", GroupID: plugin.GroupIDCluster, Args: plugin.RequiresArguments(2), RunE: func(_ *cobra.Command, args []string) error { diff --git a/internal/cmd/plugin/report/cluster.go b/internal/cmd/plugin/report/cluster.go index 5e5920db37..fa76af6461 100644 --- a/internal/cmd/plugin/report/cluster.go +++ b/internal/cmd/plugin/report/cluster.go @@ -32,7 +32,7 @@ func clusterCmd() *cobra.Command { const filePlaceholder = "report_cluster__.zip" cmd := &cobra.Command{ - Use: "cluster ", + Use: "cluster CLUSTER", Short: "Report cluster resources, pods, events, logs (opt-in)", Long: "Collects combined information on the cluster in a Zip file", Args: plugin.RequiresArguments(1), diff --git a/internal/cmd/plugin/restart/cmd.go b/internal/cmd/plugin/restart/cmd.go index c27b66a989..28ef5e31df 100644 --- a/internal/cmd/plugin/restart/cmd.go +++ b/internal/cmd/plugin/restart/cmd.go @@ -28,7 +28,7 @@ import ( // NewCmd creates the new "reset" command func NewCmd() *cobra.Command { restartCmd := &cobra.Command{ - Use: "restart clusterName [instance]", + Use: "restart CLUSTER [INSTANCE]", Short: `Restart a cluster or a single instance in a cluster`, Long: `If only the cluster name is specified, the whole cluster will be restarted, rolling out new configurations if present. diff --git a/internal/cmd/plugin/snapshot/cmd.go b/internal/cmd/plugin/snapshot/cmd.go index ed20dc669f..17039dcc7d 100644 --- a/internal/cmd/plugin/snapshot/cmd.go +++ b/internal/cmd/plugin/snapshot/cmd.go @@ -28,7 +28,7 @@ import ( // NewCmd implements the `snapshot` subcommand func NewCmd() *cobra.Command { cmd := &cobra.Command{ - Use: "snapshot ", + Use: "snapshot CLUSTER", Short: "DEPRECATED (use `backup -m volumeSnapshot` instead)", Long: "Replaced by `kubectl cnpg backup -m volumeSnapshot`", GroupID: plugin.GroupIDDatabase, From 67739cc70331a8572c9af8691ab285e86919c717 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Wed, 4 Dec 2024 14:33:35 +0100 Subject: [PATCH 193/836] feat(plugin): report CNPG-I plugins in `status` command (#6232) Closes #6230 Signed-off-by: Armando Ruocco Signed-off-by: Gabriele Bartolini Co-authored-by: Gabriele Bartolini --- internal/cmd/plugin/status/status.go | 63 +++++++++++++++++++++++++++- 1 file changed, 62 insertions(+), 1 deletion(-) diff --git a/internal/cmd/plugin/status/status.go b/internal/cmd/plugin/status/status.go index 8b05a67d5c..c55035a8db 100644 --- a/internal/cmd/plugin/status/status.go +++ b/internal/cmd/plugin/status/status.go @@ -27,8 +27,9 @@ import ( "time" "github.com/cheynewallace/tabby" + "github.com/cloudnative-pg/cnpg-i/pkg/identity" "github.com/cloudnative-pg/machinery/pkg/stringset" - types "github.com/cloudnative-pg/machinery/pkg/types" + "github.com/cloudnative-pg/machinery/pkg/types" "github.com/logrusorgru/aurora/v4" corev1 "k8s.io/api/core/v1" policyv1 "k8s.io/api/policy/v1" @@ -143,6 +144,7 @@ func Status( status.printPodDisruptionBudgetStatus() } status.printInstancesStatus() + status.printPluginStatus(verbosity) if len(errs) > 0 { fmt.Println() @@ -751,6 +753,7 @@ func (fullStatus *PostgresqlStatus) printInstancesStatus() { continue } status.Print() + fmt.Println() } func (fullStatus *PostgresqlStatus) printCertificatesStatus() { @@ -1147,6 +1150,64 @@ func (fullStatus *PostgresqlStatus) printTablespacesStatus() { fmt.Println() } +func (fullStatus *PostgresqlStatus) printPluginStatus(verbosity int) { + const header = "Plugins status" + + parseCapabilities := func(capabilities []string) string { + if len(capabilities) == 0 { + return "N/A" + } + + result := make([]string, len(capabilities)) + for idx, capability := range capabilities { + switch capability { + case identity.PluginCapability_Service_TYPE_BACKUP_SERVICE.String(): + result[idx] = "Backup Service" + case identity.PluginCapability_Service_TYPE_RESTORE_JOB.String(): + result[idx] = "Restore Job" + case identity.PluginCapability_Service_TYPE_RECONCILER_HOOKS.String(): + result[idx] = "Reconciler Hooks" + case identity.PluginCapability_Service_TYPE_WAL_SERVICE.String(): + result[idx] = "WAL Service" + case identity.PluginCapability_Service_TYPE_OPERATOR_SERVICE.String(): + result[idx] = "Operator Service" + case identity.PluginCapability_Service_TYPE_LIFECYCLE_SERVICE.String(): + result[idx] = "Lifecycle Service" + case identity.PluginCapability_Service_TYPE_UNSPECIFIED.String(): + continue + default: + result[idx] = capability + } + } + + return strings.Join(result, ", ") + } + + if len(fullStatus.Cluster.Status.PluginStatus) == 0 { + if verbosity > 0 { + fmt.Println(aurora.Green(header)) + fmt.Println("No plugins found") + } + return + } + + fmt.Println(aurora.Green(header)) + + status := tabby.New() + status.AddHeader("Name", "Version", "Status", "Reported Operator Capabilities") + + for _, plg := range fullStatus.Cluster.Status.PluginStatus { + plgStatus := "N/A" + if plg.Status != "" { + plgStatus = plg.Status + } + status.AddLine(plg.Name, plg.Version, plgStatus, parseCapabilities(plg.Capabilities)) + } + + status.Print() + fmt.Println() +} + func getPrimaryStartTime(cluster *apiv1.Cluster) string { return getPrimaryStartTimeIdempotent(cluster, time.Now()) } From fa365a1162d74a7b546c7710633e9701b0199a98 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Wed, 4 Dec 2024 15:42:14 +0100 Subject: [PATCH 194/836] chore(database): align field names with `initdb` section (#6245) Closes #6244 Signed-off-by: Marco Nenciarini Signed-off-by: Jaime Silvela Co-authored-by: Jaime Silvela --- .wordlist-en-custom.txt | 2 +- api/v1/database_types.go | 45 ++++++++---- .../bases/postgresql.cnpg.io_databases.yaml | 69 +++++++++++++------ docs/src/cloudnative-pg.v1.md | 36 ++++++---- docs/src/samples/database-example-icu.yaml | 6 +- ...e-with-delete-reclaim-policy.yaml.template | 4 +- .../database.yaml.template | 4 +- 7 files changed, 109 insertions(+), 57 deletions(-) diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index 8cf809cd19..cea36722e8 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -1,4 +1,3 @@ - AES API's APIs @@ -645,6 +644,7 @@ cn cnp cnpg codeready +collationVersion columnValue commandError commandOutput diff --git a/api/v1/database_types.go b/api/v1/database_types.go index 12786a38f6..5e6ecd834a 100644 --- a/api/v1/database_types.go +++ b/api/v1/database_types.go @@ -36,6 +36,9 @@ const ( ) // DatabaseSpec is the specification of a Postgresql Database +// +kubebuilder:validation:XValidation:rule="!has(self.builtinLocale) || self.localeProvider == 'builtin'",message="builtinLocale is only available when localeProvider is set to `builtin`" +// +kubebuilder:validation:XValidation:rule="!has(self.icuLocale) || self.localeProvider == 'icu'",message="icuLocale is only available when localeProvider is set to `icu`" +// +kubebuilder:validation:XValidation:rule="!has(self.icuRules) || self.localeProvider == 'icu'",message="icuRules is only available when localeProvider is set to `icu`" type DatabaseSpec struct { // The corresponding cluster ClusterRef corev1.LocalObjectReference `json:"cluster"` @@ -67,44 +70,56 @@ type DatabaseSpec struct { Encoding string `json:"encoding,omitempty"` // The locale (cannot be changed) + // Sets the default collation order and character classification in the new database. // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="locale is immutable" // +optional Locale string `json:"locale,omitempty"` - // The locale provider (cannot be changed) - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="locale_provider is immutable" + // The LOCALE_PROVIDER (cannot be changed) + // This option sets the locale provider for databases created in the new cluster. + // Available from PostgreSQL 16. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="localeProvider is immutable" // +optional - LocaleProvider string `json:"locale_provider,omitempty"` + LocaleProvider string `json:"localeProvider,omitempty"` // The LC_COLLATE (cannot be changed) - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="lc_collate is immutable" + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="localeCollate is immutable" // +optional - LcCollate string `json:"lc_collate,omitempty"` + LcCollate string `json:"localeCollate,omitempty"` // The LC_CTYPE (cannot be changed) - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="lc_ctype is immutable" + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="localeCType is immutable" // +optional - LcCtype string `json:"lc_ctype,omitempty"` + LcCtype string `json:"localeCType,omitempty"` // The ICU_LOCALE (cannot be changed) - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="icu_locale is immutable" + // Specifies the ICU locale when the ICU provider is used. + // This option requires `localeProvider` to be set to `icu`. + // Available from PostgreSQL 15. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="icuLocale is immutable" // +optional - IcuLocale string `json:"icu_locale,omitempty"` + IcuLocale string `json:"icuLocale,omitempty"` // The ICU_RULES (cannot be changed) - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="icu_rules is immutable" + // Specifies additional collation rules to customize the behavior of the default collation. + // This option requires `localeProvider` to be set to `icu`. + // Available from PostgreSQL 16. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="icuRules is immutable" // +optional - IcuRules string `json:"icu_rules,omitempty"` + IcuRules string `json:"icuRules,omitempty"` // The BUILTIN_LOCALE (cannot be changed) - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="builtin_locale is immutable" + // Specifies the locale name when the builtin provider is used. + // This option requires `localeProvider` to be set to `builtin`. + // Available from PostgreSQL 17. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="builtinLocale is immutable" // +optional - BuiltinLocale string `json:"builtin_locale,omitempty"` + BuiltinLocale string `json:"builtinLocale,omitempty"` // The COLLATION_VERSION (cannot be changed) - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="collation_version is immutable" + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="collationVersion is immutable" // +optional - CollationVersion string `json:"collation_version,omitempty"` + CollationVersion string `json:"collationVersion,omitempty"` // True when the database is a template // +optional diff --git a/config/crd/bases/postgresql.cnpg.io_databases.yaml b/config/crd/bases/postgresql.cnpg.io_databases.yaml index d50fb58224..7a1d7c8066 100644 --- a/config/crd/bases/postgresql.cnpg.io_databases.yaml +++ b/config/crd/bases/postgresql.cnpg.io_databases.yaml @@ -61,11 +61,15 @@ spec: allowConnections: description: True when connections to this database are allowed type: boolean - builtin_locale: - description: The BUILTIN_LOCALE (cannot be changed) + builtinLocale: + description: |- + The BUILTIN_LOCALE (cannot be changed) + Specifies the locale name when the builtin provider is used. + This option requires `localeProvider` to be set to `builtin`. + Available from PostgreSQL 17. type: string x-kubernetes-validations: - - message: builtin_locale is immutable + - message: builtinLocale is immutable rule: self == oldSelf cluster: description: The corresponding cluster @@ -81,11 +85,11 @@ spec: type: string type: object x-kubernetes-map-type: atomic - collation_version: + collationVersion: description: The COLLATION_VERSION (cannot be changed) type: string x-kubernetes-validations: - - message: collation_version is immutable + - message: collationVersion is immutable rule: self == oldSelf connectionLimit: description: |- @@ -113,44 +117,57 @@ spec: - present - absent type: string - icu_locale: - description: The ICU_LOCALE (cannot be changed) + icuLocale: + description: |- + The ICU_LOCALE (cannot be changed) + Specifies the ICU locale when the ICU provider is used. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 15. type: string x-kubernetes-validations: - - message: icu_locale is immutable + - message: icuLocale is immutable rule: self == oldSelf - icu_rules: - description: The ICU_RULES (cannot be changed) + icuRules: + description: |- + The ICU_RULES (cannot be changed) + Specifies additional collation rules to customize the behavior of the default collation. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 16. type: string x-kubernetes-validations: - - message: icu_rules is immutable + - message: icuRules is immutable rule: self == oldSelf isTemplate: description: True when the database is a template type: boolean - lc_collate: - description: The LC_COLLATE (cannot be changed) + locale: + description: |- + The locale (cannot be changed) + Sets the default collation order and character classification in the new database. type: string x-kubernetes-validations: - - message: lc_collate is immutable + - message: locale is immutable rule: self == oldSelf - lc_ctype: + localeCType: description: The LC_CTYPE (cannot be changed) type: string x-kubernetes-validations: - - message: lc_ctype is immutable + - message: localeCType is immutable rule: self == oldSelf - locale: - description: The locale (cannot be changed) + localeCollate: + description: The LC_COLLATE (cannot be changed) type: string x-kubernetes-validations: - - message: locale is immutable + - message: localeCollate is immutable rule: self == oldSelf - locale_provider: - description: The locale provider (cannot be changed) + localeProvider: + description: |- + The LOCALE_PROVIDER (cannot be changed) + This option sets the locale provider for databases created in the new cluster. + Available from PostgreSQL 16. type: string x-kubernetes-validations: - - message: locale_provider is immutable + - message: localeProvider is immutable rule: self == oldSelf name: description: The name inside PostgreSQL @@ -182,6 +199,14 @@ spec: - name - owner type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider is set + to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' status: description: |- Most recently observed status of the Database. This data may not be up to diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md index ea07a0bed8..4854ff8faf 100644 --- a/docs/src/cloudnative-pg.v1.md +++ b/docs/src/cloudnative-pg.v1.md @@ -2466,52 +2466,64 @@ PostgreSQL cluster from an existing storage

string -

The locale (cannot be changed)

+

The locale (cannot be changed) +Sets the default collation order and character classification in the new database.

-locale_provider
+localeProvider
string -

The locale provider (cannot be changed)

+

The LOCALE_PROVIDER (cannot be changed) +This option sets the locale provider for databases created in the new cluster. +Available from PostgreSQL 16.

-lc_collate
+localeCollate
string

The LC_COLLATE (cannot be changed)

-lc_ctype
+localeCType
string

The LC_CTYPE (cannot be changed)

-icu_locale
+icuLocale
string -

The ICU_LOCALE (cannot be changed)

+

The ICU_LOCALE (cannot be changed) +Specifies the ICU locale when the ICU provider is used. +This option requires localeProvider to be set to icu. +Available from PostgreSQL 15.

-icu_rules
+icuRules
string -

The ICU_RULES (cannot be changed)

+

The ICU_RULES (cannot be changed) +Specifies additional collation rules to customize the behavior of the default collation. +This option requires localeProvider to be set to icu. +Available from PostgreSQL 16.

-builtin_locale
+builtinLocale
string -

The BUILTIN_LOCALE (cannot be changed)

+

The BUILTIN_LOCALE (cannot be changed) +Specifies the locale name when the builtin provider is used. +This option requires localeProvider to be set to builtin. +Available from PostgreSQL 17.

-collation_version
+collationVersion
string diff --git a/docs/src/samples/database-example-icu.yaml b/docs/src/samples/database-example-icu.yaml index 7a6bba7e4d..fdfd367921 100644 --- a/docs/src/samples/database-example-icu.yaml +++ b/docs/src/samples/database-example-icu.yaml @@ -8,9 +8,9 @@ spec: name: declarative-icu owner: app encoding: UTF8 - locale_provider: icu - icu_locale: en - icu_rules: fr + localeProvider: icu + icuLocale: en + icuRules: fr template: template0 cluster: name: cluster-example diff --git a/tests/e2e/fixtures/declarative_databases/database-with-delete-reclaim-policy.yaml.template b/tests/e2e/fixtures/declarative_databases/database-with-delete-reclaim-policy.yaml.template index 0ce2071609..be0f6c7e23 100644 --- a/tests/e2e/fixtures/declarative_databases/database-with-delete-reclaim-policy.yaml.template +++ b/tests/e2e/fixtures/declarative_databases/database-with-delete-reclaim-policy.yaml.template @@ -5,8 +5,8 @@ metadata: spec: name: declarative owner: app - lc_ctype: C - lc_collate: C + localeCType: C + localeCollate: C encoding: UTF8 databaseReclaimPolicy: delete cluster: diff --git a/tests/e2e/fixtures/declarative_databases/database.yaml.template b/tests/e2e/fixtures/declarative_databases/database.yaml.template index 75f2107bcc..a3ae25d8b3 100644 --- a/tests/e2e/fixtures/declarative_databases/database.yaml.template +++ b/tests/e2e/fixtures/declarative_databases/database.yaml.template @@ -5,8 +5,8 @@ metadata: spec: name: declarative owner: app - lc_ctype: "en_US.utf8" - lc_collate: C + localeCType: "en_US.utf8" + localeCollate: C encoding: SQL_ASCII template: template0 cluster: From 45a147bafedf3baa61d059dcf9780c5eadc6e03c Mon Sep 17 00:00:00 2001 From: Francesco Canovai Date: Wed, 4 Dec 2024 18:43:55 +0100 Subject: [PATCH 195/836] fix: correct path for partial wal archiver (#6255) Use a relative file name for the partial wal archived during a replica switchover. This fixes an issue with backup plugin, that received the absolute file path prefixed by the $PGDATA value. The in-tree archiver uses only the base name of the file. Closes #6256 Signed-off-by: Francesco Canovai --- pkg/management/postgres/webserver/remote.go | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/pkg/management/postgres/webserver/remote.go b/pkg/management/postgres/webserver/remote.go index 7b9d75becd..95eab40892 100644 --- a/pkg/management/postgres/webserver/remote.go +++ b/pkg/management/postgres/webserver/remote.go @@ -386,24 +386,27 @@ func (ws *remoteWebserverEndpoints) pgArchivePartial(w http.ResponseWriter, req return } - pgWalDirectory := path.Join(os.Getenv("PGDATA"), "pg_wal") - walFilPath := path.Join(pgWalDirectory, walFile) - partialWalFilePath := fmt.Sprintf("%s.partial", walFilPath) + pgData := os.Getenv("PGDATA") + walRelativePath := path.Join("pg_wal", walFile) + partialWalFileRelativePath := fmt.Sprintf("%s.partial", walRelativePath) + walFileAbsolutePath := path.Join(pgData, walRelativePath) + partialWalFileAbsolutePath := path.Join(pgData, partialWalFileRelativePath) - if err := os.Link(walFilPath, partialWalFilePath); err != nil { + if err := os.Link(walFileAbsolutePath, partialWalFileAbsolutePath); err != nil { log.Error(err, "failed to get pg_controldata") sendBadRequestJSONResponse(w, "ERROR_WHILE_CREATING_SYMLINK", err.Error()) return } defer func() { - if err := fileutils.RemoveFile(partialWalFilePath); err != nil { + if err := fileutils.RemoveFile(partialWalFileAbsolutePath); err != nil { log.Error(err, "while deleting the partial wal file symlink") } }() - options := []string{constants.WalArchiveCommand, partialWalFilePath} + options := []string{constants.WalArchiveCommand, partialWalFileRelativePath} walArchiveCmd := exec.Command("/controller/manager", options...) // nolint: gosec + walArchiveCmd.Dir = pgData if err := execlog.RunBuffering(walArchiveCmd, "wal-archive-partial"); err != nil { sendBadRequestJSONResponse(w, "ERROR_WHILE_EXECUTING_WAL_ARCHIVE", err.Error()) return From f95015ab8ddd3d78c3bf5a08e164c499fbafc02d Mon Sep 17 00:00:00 2001 From: Jaime Silvela Date: Wed, 4 Dec 2024 22:50:12 +0100 Subject: [PATCH 196/836] feat: clean up logging of database, publication, subscriptio controllers (#6268) This update enhances the logging mechanisms for the database, publication, and subscription controllers. It ensures more consistent and informative log messages, making debugging and monitoring easier. Closes #5524 Signed-off-by: Jaime Silvela Signed-off-by: wolfox Signed-off-by: Armando Ruocco Co-authored-by: wolfox Co-authored-by: Armando Ruocco --- .../management/controller/database_controller.go | 16 ++++++++++------ .../controller/publication_controller.go | 16 ++++++++++------ .../controller/subscription_controller.go | 16 ++++++++++------ 3 files changed, 30 insertions(+), 18 deletions(-) diff --git a/internal/management/controller/database_controller.go b/internal/management/controller/database_controller.go index a82f8ce5a1..f1283f2d9c 100644 --- a/internal/management/controller/database_controller.go +++ b/internal/management/controller/database_controller.go @@ -66,12 +66,9 @@ const databaseReconciliationInterval = 30 * time.Second // Reconcile is the database reconciliation loop func (r *DatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - contextLogger := log.FromContext(ctx) - - contextLogger.Debug("Reconciliation loop start") - defer func() { - contextLogger.Debug("Reconciliation loop end") - }() + contextLogger := log.FromContext(ctx). + WithName("database_reconciler"). + WithValues("databaseName", req.Name) // Get the database object var database apiv1.Database @@ -115,6 +112,11 @@ func (r *DatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c return ctrl.Result{RequeueAfter: databaseReconciliationInterval}, nil } + contextLogger.Info("Reconciling database") + defer func() { + contextLogger.Info("Reconciliation loop of database exited") + }() + // Still not for me, we're waiting for a switchover if cluster.Status.CurrentPrimary != cluster.Status.TargetPrimary { return ctrl.Result{RequeueAfter: databaseReconciliationInterval}, nil @@ -165,6 +167,7 @@ func (r *DatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c ctx, &database, ); err != nil { + contextLogger.Error(err, "while reconciling database") return r.failedReconciliation( ctx, &database, @@ -172,6 +175,7 @@ func (r *DatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c ) } + contextLogger.Info("Reconciliation of database completed") return r.succeededReconciliation( ctx, &database, diff --git a/internal/management/controller/publication_controller.go b/internal/management/controller/publication_controller.go index f9d1bc8bd9..9758ff690d 100644 --- a/internal/management/controller/publication_controller.go +++ b/internal/management/controller/publication_controller.go @@ -58,12 +58,9 @@ const publicationReconciliationInterval = 30 * time.Second // For more details, check Reconcile and its Result here: // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.18.4/pkg/reconcile func (r *PublicationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - contextLogger := log.FromContext(ctx) - - contextLogger.Debug("Reconciliation loop start") - defer func() { - contextLogger.Debug("Reconciliation loop end") - }() + contextLogger := log.FromContext(ctx). + WithName("publication_reconciler"). + WithValues("publicationName", req.Name) // Get the publication object var publication apiv1.Publication @@ -105,6 +102,11 @@ func (r *PublicationReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{RequeueAfter: publicationReconciliationInterval}, nil } + contextLogger.Info("Reconciling publication") + defer func() { + contextLogger.Info("Reconciliation loop of publication exited") + }() + // Cannot do anything on a replica cluster if cluster.IsReplica() { if err := markAsUnknown(ctx, r.Client, &publication, errClusterIsReplica); err != nil { @@ -121,12 +123,14 @@ func (r *PublicationReconciler) Reconcile(ctx context.Context, req ctrl.Request) } if err := r.alignPublication(ctx, &publication); err != nil { + contextLogger.Error(err, "while reconciling publication") if err := markAsFailed(ctx, r.Client, &publication, err); err != nil { return ctrl.Result{}, err } return ctrl.Result{RequeueAfter: publicationReconciliationInterval}, nil } + contextLogger.Info("Reconciliation of publication completed") if err := markAsReady(ctx, r.Client, &publication); err != nil { return ctrl.Result{}, err } diff --git a/internal/management/controller/subscription_controller.go b/internal/management/controller/subscription_controller.go index f1a3af65bf..8019c3dd2b 100644 --- a/internal/management/controller/subscription_controller.go +++ b/internal/management/controller/subscription_controller.go @@ -51,12 +51,9 @@ const subscriptionReconciliationInterval = 30 * time.Second // Reconcile is the subscription reconciliation loop func (r *SubscriptionReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - contextLogger := log.FromContext(ctx) - - contextLogger.Debug("Reconciliation loop start") - defer func() { - contextLogger.Debug("Reconciliation loop end") - }() + contextLogger := log.FromContext(ctx). + WithName("subscription_reconciler"). + WithValues("subscriptionName", req.Name) // Get the subscription object var subscription apiv1.Subscription @@ -98,6 +95,11 @@ func (r *SubscriptionReconciler) Reconcile(ctx context.Context, req ctrl.Request return ctrl.Result{RequeueAfter: subscriptionReconciliationInterval}, nil } + contextLogger.Info("Reconciling subscription") + defer func() { + contextLogger.Info("Reconciliation loop of subscription exited") + }() + // Cannot do anything on a replica cluster if cluster.IsReplica() { if err := markAsUnknown(ctx, r.Client, &subscription, errClusterIsReplica); err != nil { @@ -127,12 +129,14 @@ func (r *SubscriptionReconciler) Reconcile(ctx context.Context, req ctrl.Request } if err := r.alignSubscription(ctx, &subscription, connString); err != nil { + contextLogger.Error(err, "while reconciling subscription") if err := markAsFailed(ctx, r.Client, &subscription, err); err != nil { return ctrl.Result{}, err } return ctrl.Result{RequeueAfter: subscriptionReconciliationInterval}, nil } + contextLogger.Info("Reconciliation of subscription completed") if err := markAsReady(ctx, r.Client, &subscription); err != nil { return ctrl.Result{}, err } From 4ce42ef29d278e28efd391dd579cc240f4a5b185 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Wed, 4 Dec 2024 23:54:36 +0100 Subject: [PATCH 197/836] feat(probes): enable customization of startup, liveness, and readiness probes (#6266) This patch enables users to customize the default behavior of readiness, liveness, and startup probes implemented by CloudNativePG by introducing the `.spec.probes` section. Users are responsible for ensuring that any custom probe settings align with the operational requirements of the cluster to avoid unintended disruptions. Closes: #4852 Signed-off-by: Leonardo Cecchi Signed-off-by: Gabriele Bartolini Signed-off-by: Ben Healey Co-authored-by: Gabriele Bartolini Co-authored-by: Ben Healey --- .wordlist-en-custom.txt | 8 + api/v1/cluster_funcs.go | 15 ++ api/v1/cluster_funcs_test.go | 44 ++++ api/v1/cluster_types.go | 56 ++++++ api/v1/zz_generated.deepcopy.go | 55 +++++ .../bases/postgresql.cnpg.io_clusters.yaml | 153 ++++++++++++++ docs/src/cloudnative-pg.v1.md | 122 +++++++++++ docs/src/failure_modes.md | 30 ++- docs/src/instance_manager.md | 153 ++++++++++++-- docs/src/operator_capability_levels.md | 30 +-- pkg/specs/pods.go | 27 ++- pkg/specs/podspec_diff.go | 3 + pkg/specs/podspec_diff_test.go | 53 +++++ tests/e2e/probes_test.go | 190 ++++++++++++++++++ 14 files changed, 888 insertions(+), 51 deletions(-) create mode 100644 tests/e2e/probes_test.go diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index cea36722e8..f5fe08815f 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -342,6 +342,8 @@ PrimaryUpdateMethod PrimaryUpdateStrategy PriorityClass PriorityClassName +ProbeTerminationGracePeriod +ProbesConfiguration ProjectedVolumeSource PublicationReclaimPolicy PublicationSpec @@ -778,6 +780,7 @@ facto failover failoverDelay failovers +failureThreshold faq fastpath fb @@ -854,6 +857,7 @@ inheritedMetadata init initDB initdb +initialDelaySeconds initialise initializingPVC inplace @@ -1028,6 +1032,7 @@ passwordSecret passwordStatus pc pdf +periodSeconds persistentvolumeclaim persistentvolumeclaims pgAdmin @@ -1260,6 +1265,7 @@ subdirectory subresource subscriptionReclaimPolicy substatement +successThreshold successfullyExtracted sudo superuserSecret @@ -1302,11 +1308,13 @@ tbody tcp td temporaryData +terminationGracePeriodSeconds th thead timeLineID timeframes timelineID +timeoutSeconds tls tmp tmpfs diff --git a/api/v1/cluster_funcs.go b/api/v1/cluster_funcs.go index 093cb41837..3ffba1a3da 100644 --- a/api/v1/cluster_funcs.go +++ b/api/v1/cluster_funcs.go @@ -1444,3 +1444,18 @@ func (target *RecoveryTarget) BuildPostgresOptions() string { return result } + +// ApplyInto applies the content of the probe configuration in a Kubernetes +// probe +func (p *Probe) ApplyInto(k8sProbe *corev1.Probe) { + if p == nil { + return + } + + k8sProbe.InitialDelaySeconds = p.InitialDelaySeconds + k8sProbe.TimeoutSeconds = p.TimeoutSeconds + k8sProbe.PeriodSeconds = p.PeriodSeconds + k8sProbe.SuccessThreshold = p.SuccessThreshold + k8sProbe.FailureThreshold = p.FailureThreshold + k8sProbe.TerminationGracePeriodSeconds = p.TerminationGracePeriodSeconds +} diff --git a/api/v1/cluster_funcs_test.go b/api/v1/cluster_funcs_test.go index c478c2b3ae..34c67f363f 100644 --- a/api/v1/cluster_funcs_test.go +++ b/api/v1/cluster_funcs_test.go @@ -26,6 +26,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/utils/ptr" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" @@ -1682,3 +1683,46 @@ var _ = Describe("UpdateBackupTimes", func() { To(Equal(now)) }) }) + +var _ = Describe("Probes configuration", func() { + originalProbe := corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/", + Port: intstr.FromInt32(23), + }, + }, + + InitialDelaySeconds: 21, + PeriodSeconds: 11, + FailureThreshold: 433, + TerminationGracePeriodSeconds: ptr.To[int64](23), + } + + It("Does not change any field if the configuration is nil", func() { + var nilProbe *Probe + configuredProbe := originalProbe.DeepCopy() + nilProbe.ApplyInto(configuredProbe) + Expect(originalProbe).To(BeEquivalentTo(*configuredProbe)) + }) + + It("Changes the corresponding fields", func() { + config := &Probe{ + InitialDelaySeconds: 1, + TimeoutSeconds: 2, + PeriodSeconds: 3, + SuccessThreshold: 4, + FailureThreshold: 5, + TerminationGracePeriodSeconds: nil, + } + + configuredProbe := originalProbe.DeepCopy() + config.ApplyInto(configuredProbe) + Expect(configuredProbe.InitialDelaySeconds).To(Equal(config.InitialDelaySeconds)) + Expect(configuredProbe.TimeoutSeconds).To(Equal(config.TimeoutSeconds)) + Expect(configuredProbe.PeriodSeconds).To(Equal(config.PeriodSeconds)) + Expect(configuredProbe.SuccessThreshold).To(Equal(config.SuccessThreshold)) + Expect(configuredProbe.FailureThreshold).To(Equal(config.FailureThreshold)) + Expect(configuredProbe.TerminationGracePeriodSeconds).To(BeNil()) + }) +}) diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go index affbfe0e60..2814f18cb6 100644 --- a/api/v1/cluster_types.go +++ b/api/v1/cluster_types.go @@ -476,6 +476,62 @@ type ClusterSpec struct { // any plugin to be loaded with the corresponding configuration // +optional Plugins PluginConfigurationList `json:"plugins,omitempty"` + + // The configuration of the probes to be injected + // in the PostgreSQL Pods. + // +optional + Probes *ProbesConfiguration `json:"probes,omitempty"` +} + +// ProbesConfiguration represent the configuration for the probes +// to be injected in the PostgreSQL Pods +type ProbesConfiguration struct { + // The startup probe configuration + Startup *Probe `json:"startup,omitempty"` + + // The liveness probe configuration + Liveness *Probe `json:"liveness,omitempty"` + + // The readiness probe configuration + Readiness *Probe `json:"readiness,omitempty"` +} + +// Probe describes a health check to be performed against a container to determine whether it is +// alive or ready to receive traffic. +type Probe struct { + // Number of seconds after the container has started before liveness probes are initiated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + InitialDelaySeconds int32 `json:"initialDelaySeconds,omitempty"` + // Number of seconds after which the probe times out. + // Defaults to 1 second. Minimum value is 1. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + TimeoutSeconds int32 `json:"timeoutSeconds,omitempty"` + // How often (in seconds) to perform the probe. + // Default to 10 seconds. Minimum value is 1. + // +optional + PeriodSeconds int32 `json:"periodSeconds,omitempty"` + // Minimum consecutive successes for the probe to be considered successful after having failed. + // Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + // +optional + SuccessThreshold int32 `json:"successThreshold,omitempty"` + // Minimum consecutive failures for the probe to be considered failed after having succeeded. + // Defaults to 3. Minimum value is 1. + // +optional + FailureThreshold int32 `json:"failureThreshold,omitempty"` + // Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + // The grace period is the duration in seconds after the processes running in the pod are sent + // a termination signal and the time when the processes are forcibly halted with a kill signal. + // Set this value longer than the expected cleanup time for your process. + // If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + // value overrides the value provided by the pod spec. + // Value must be non-negative integer. The value zero indicates stop immediately via + // the kill signal (no opportunity to shut down). + // This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + // Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + // +optional + TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"` } // PluginConfigurationList represent a set of plugin with their diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index 014362a084..fbaec944e2 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -843,6 +843,11 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Probes != nil { + in, out := &in.Probes, &out.Probes + *out = new(ProbesConfiguration) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec. @@ -2216,6 +2221,56 @@ func (in *PostgresConfiguration) DeepCopy() *PostgresConfiguration { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Probe) DeepCopyInto(out *Probe) { + *out = *in + if in.TerminationGracePeriodSeconds != nil { + in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Probe. +func (in *Probe) DeepCopy() *Probe { + if in == nil { + return nil + } + out := new(Probe) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProbesConfiguration) DeepCopyInto(out *ProbesConfiguration) { + *out = *in + if in.Startup != nil { + in, out := &in.Startup, &out.Startup + *out = new(Probe) + (*in).DeepCopyInto(*out) + } + if in.Liveness != nil { + in, out := &in.Liveness, &out.Liveness + *out = new(Probe) + (*in).DeepCopyInto(*out) + } + if in.Readiness != nil { + in, out := &in.Readiness, &out.Readiness + *out = new(Probe) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbesConfiguration. +func (in *ProbesConfiguration) DeepCopy() *ProbesConfiguration { + if in == nil { + return nil + } + out := new(ProbesConfiguration) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Publication) DeepCopyInto(out *Publication) { *out = *in diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml index 17242a5ec7..e185082fa7 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml @@ -4221,6 +4221,159 @@ spec: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass for more information type: string + probes: + description: |- + The configuration of the probes to be injected + in the PostgreSQL Pods. + properties: + liveness: + description: The liveness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + readiness: + description: The readiness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + startup: + description: The startup probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + type: object projectedVolumeTemplate: description: |- Template to be used to define projected volumes, projected volumes will be mounted diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md index 4854ff8faf..7a5b1c193f 100644 --- a/docs/src/cloudnative-pg.v1.md +++ b/docs/src/cloudnative-pg.v1.md @@ -1932,6 +1932,14 @@ development/staging purposes.

any plugin to be loaded with the corresponding configuration

+probes
+ProbesConfiguration + + +

The configuration of the probes to be injected +in the PostgreSQL Pods.

+ + @@ -4111,6 +4119,120 @@ the primary server of the cluster as part of rolling updates

+## Probe {#postgresql-cnpg-io-v1-Probe} + + +**Appears in:** + +- [ProbesConfiguration](#postgresql-cnpg-io-v1-ProbesConfiguration) + + +

Probe describes a health check to be performed against a container to determine whether it is +alive or ready to receive traffic.

+ + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
initialDelaySeconds
+int32 +
+

Number of seconds after the container has started before liveness probes are initiated. +More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes

+
timeoutSeconds
+int32 +
+

Number of seconds after which the probe times out. +Defaults to 1 second. Minimum value is 1. +More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes

+
periodSeconds
+int32 +
+

How often (in seconds) to perform the probe. +Default to 10 seconds. Minimum value is 1.

+
successThreshold
+int32 +
+

Minimum consecutive successes for the probe to be considered successful after having failed. +Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.

+
failureThreshold
+int32 +
+

Minimum consecutive failures for the probe to be considered failed after having succeeded. +Defaults to 3. Minimum value is 1.

+
terminationGracePeriodSeconds
+int64 +
+

Optional duration in seconds the pod needs to terminate gracefully upon probe failure. +The grace period is the duration in seconds after the processes running in the pod are sent +a termination signal and the time when the processes are forcibly halted with a kill signal. +Set this value longer than the expected cleanup time for your process. +If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this +value overrides the value provided by the pod spec. +Value must be non-negative integer. The value zero indicates stop immediately via +the kill signal (no opportunity to shut down). +This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. +Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.

+
+ +## ProbesConfiguration {#postgresql-cnpg-io-v1-ProbesConfiguration} + + +**Appears in:** + +- [ClusterSpec](#postgresql-cnpg-io-v1-ClusterSpec) + + +

ProbesConfiguration represent the configuration for the probes +to be injected in the PostgreSQL Pods

+ + + + + + + + + + + + + + + +
FieldDescription
startup [Required]
+Probe +
+

The startup probe configuration

+
liveness [Required]
+Probe +
+

The liveness probe configuration

+
readiness [Required]
+Probe +
+

The readiness probe configuration

+
+ ## PublicationReclaimPolicy {#postgresql-cnpg-io-v1-PublicationReclaimPolicy} (Alias of `string`) diff --git a/docs/src/failure_modes.md b/docs/src/failure_modes.md index 3f9b746f3c..4dd6df6c9b 100644 --- a/docs/src/failure_modes.md +++ b/docs/src/failure_modes.md @@ -99,26 +99,24 @@ kubectl delete pod [primary pod] --grace-period=1 triggers a failover promoting the most aligned standby, without the guarantee that the primary had been shut down. +### Liveness Probe Failure -### Readiness probe failure +By default, after three consecutive liveness probe failures, the `postgres` +container will be considered failed. The Pod will remain part of the `Cluster`, +but the *kubelet* will attempt to restart the failed container. If the issue +causing the failure persists and cannot be resolved, you can manually delete +the Pod. -After 3 failures, the pod will be considered *not ready*. The pod will still -be part of the `Cluster`, no new pod will be created. +In both cases, self-healing occurs automatically once the underlying issues are +resolved. -If the cause of the failure can't be fixed, it is possible to delete the pod -manually. Otherwise, the pod will resume the previous role when the failure -is solved. +### Readiness Probe Failure -Self-healing will happen after three failures of the probe. - -### Liveness probe failure - -After 3 failures, the `postgres` container will be considered failed. The -pod will still be part of the `Cluster`, and the *kubelet* will try to restart -the container. If the cause of the failure can't be fixed, it is possible -to delete the pod manually. - -Self-healing will happen after three failures of the probe. +By default, after three consecutive readiness probe failures, the Pod will be +marked as *not ready*. It will remain part of the `Cluster`, and no new Pod +will be created. If the issue causing the failure cannot be resolved, you can +manually delete the Pod. Once the failure is addressed, the Pod will +automatically regain its previous role. ### Worker node drained diff --git a/docs/src/instance_manager.md b/docs/src/instance_manager.md index c8c924f4fe..ce13adbab2 100644 --- a/docs/src/instance_manager.md +++ b/docs/src/instance_manager.md @@ -18,35 +18,144 @@ of the Pod, the instance manager acts as a backend to handle the ## Startup, liveness and readiness probes The startup and liveness probes rely on `pg_isready`, while the readiness -probe checks if the database is up and able to accept connections using the -superuser credentials. +probe checks if the database is up and able to accept connections. -The readiness probe is positive when the Pod is ready to accept traffic. -The liveness probe controls when to restart the container once -the startup probe interval has elapsed. +### Startup Probe -!!! Important - The liveness and readiness probes will report a failure if the probe command - fails three times with a 10-second interval between each check. +The `.spec.startDelay` parameter specifies the delay (in seconds) before the +liveness probe activates after a PostgreSQL Pod starts. By default, this is set +to `3600` seconds. You should adjust this value based on the time PostgreSQL +requires to fully initialize in your environment. + +!!! Warning + Setting `.spec.startDelay` too low can cause the liveness probe to activate + prematurely, potentially resulting in unnecessary Pod restarts if PostgreSQL + hasn’t fully initialized. + +CloudNativePG configures the startup probe with the following default parameters: + +```yaml +failureThreshold: FAILURE_THRESHOLD +periodSeconds: 10 +successThreshold: 1 +timeoutSeconds: 5 +``` + +Here, `FAILURE_THRESHOLD` is calculated as `startDelay` divided by +`periodSeconds`. + +If the default behavior based on `startDelay` is not suitable for your use +case, you can take full control of the startup probe by specifying custom +parameters in the `.spec.probes.startup` stanza. Note that defining this stanza +will override the default behavior, including the use of `startDelay`. + +!!! Warning + Ensure that any custom probe settings are aligned with your cluster’s + operational requirements to prevent unintended disruptions. + +!!! Info + For detailed information about probe configuration, refer to the + [probe API](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-Probe). -The liveness probe detects if the PostgreSQL instance is in a -broken state and needs to be restarted. The value in `startDelay` is used -to delay the probe's execution, preventing an -instance with a long startup time from being restarted. +For example, the following configuration bypasses `startDelay` entirely: -The amount of time needed for a Pod to be classified as not alive is -configurable in the `.spec.livenessProbeTimeout` parameter, that -defaults to 30 seconds. +```yaml +# ... snip +spec: + probes: + startup: + periodSeconds: 3 + timeoutSeconds: 3 + failureThreshold: 10 +``` -The interval (in seconds) after the Pod has started before the liveness -probe starts working is expressed in the `.spec.startDelay` parameter, -which defaults to 3600 seconds. The correct value for your cluster is -related to the time needed by PostgreSQL to start. +### Liveness Probe + +The liveness probe begins after the startup probe succeeds and is responsible +for detecting if the PostgreSQL instance has entered a broken state that +requires a restart of the pod. + +The amount of time before a Pod is classified as not alive is configurable via +the `.spec.livenessProbeTimeout` parameter. + +CloudNativePG configures the liveness probe with the following default +parameters: + +```yaml +failureThreshold: FAILURE_THRESHOLD +periodSeconds: 10 +successThreshold: 1 +timeoutSeconds: 5 +``` + +Here, `FAILURE_THRESHOLD` is calculated as `livenessProbeTimeout` divided by +`periodSeconds`. + +By default, `.spec.livenessProbeTimeout` is set to `30` seconds. This means the +liveness probe will report a failure if it detects three consecutive probe +failures, with a 10-second interval between each check. + +If the default behavior using `livenessProbeTimeout` does not meet your needs, +you can fully customize the liveness probe by defining parameters in the +`.spec.probes.liveness` stanza. Keep in mind that specifying this stanza will +override the default behavior, including the use of `livenessProbeTimeout`. !!! Warning - If `.spec.startDelay` is too low, the liveness probe will start working - before the PostgreSQL startup is complete, and the Pod could be restarted - prematurely. + Ensure that any custom probe settings are aligned with your cluster’s + operational requirements to prevent unintended disruptions. + +!!! Info + For more details on probe configuration, refer to the + [probe API](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-Probe). + +For example, the following configuration overrides the default behavior and +bypasses `livenessProbeTimeout`: + +```yaml +# ... snip +spec: + probes: + liveness: + periodSeconds: 3 + timeoutSeconds: 3 + failureThreshold: 10 +``` + +### Readiness Probe + +The readiness probe determines when a pod running a PostgreSQL instance is +prepared to accept traffic and serve requests. + +CloudNativePG uses the following default configuration for the readiness probe: + +```yaml +failureThreshold: 3 +periodSeconds: 10 +successThreshold: 1 +timeoutSeconds: 5 +``` + +If the default settings do not suit your requirements, you can fully customize +the readiness probe by specifying parameters in the `.spec.probes.readiness` +stanza. For example: + +```yaml +# ... snip +spec: + probes: + readiness: + periodSeconds: 3 + timeoutSeconds: 3 + failureThreshold: 10 +``` + +!!! Warning + Ensure that any custom probe settings are aligned with your cluster’s + operational requirements to prevent unintended disruptions. + +!!! Info + For more information on configuring probes, see the + [probe API](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-Probe). ## Shutdown control diff --git a/docs/src/operator_capability_levels.md b/docs/src/operator_capability_levels.md index 5008d33b8c..c961a27154 100644 --- a/docs/src/operator_capability_levels.md +++ b/docs/src/operator_capability_levels.md @@ -494,18 +494,24 @@ scalability of PostgreSQL databases, ensuring a streamlined and optimized experience for managing large scale data storage in cloud-native environments. Support for temporary tablespaces is also included. -### Liveness and readiness probes - -The operator defines liveness and readiness probes for the Postgres -containers that are then invoked by the kubelet. They're mapped respectively -to the `/healthz` and `/readyz` endpoints of the web server managed -directly by the instance manager. - -The liveness probe is based on the `pg_isready` executable, and the pod is -considered healthy with exit codes 0 (server accepting connections normally) -and 1 (server is rejecting connections, for example, during startup). The -readiness probe issues a simple query (`;`) to verify that the server is -ready to accept connections. +### Startup, Liveness, and Readiness Probes + +CloudNativePG configures startup, liveness, and readiness probes for PostgreSQL +containers, which are managed by the Kubernetes kubelet. These probes interact +with the `/healthz` and `/readyz` endpoints exposed by the instance manager's +web server to monitor the Pod's health and readiness. + +The startup and liveness probes use the `pg_isready` utility. A Pod is +considered healthy if `pg_isready` returns an exit code of 0 (indicating the +server is accepting connections) or 1 (indicating the server is rejecting +connections, such as during startup). + +The readiness probe executes a simple SQL query (`;`) to verify that the +PostgreSQL server is ready to accept client connections. + +All probes are configured with default settings but can be fully customized to +meet specific needs, allowing for fine-tuning to align with your environment +and workloads. ### Rolling deployments diff --git a/pkg/specs/pods.go b/pkg/specs/pods.go index 8e3e5ed44e..3fe970313b 100644 --- a/pkg/specs/pods.go +++ b/pkg/specs/pods.go @@ -198,6 +198,8 @@ func createPostgresContainers(cluster apiv1.Cluster, envConfig EnvConfig, enable Env: envConfig.EnvVars, EnvFrom: envConfig.EnvFrom, VolumeMounts: createPostgresVolumeMounts(cluster), + // This is the default startup probe, and can be overridden + // the user configuration in cluster.spec.probes.startup StartupProbe: &corev1.Probe{ FailureThreshold: getStartupProbeFailureThreshold(cluster.GetMaxStartDelay()), PeriodSeconds: StartupProbePeriod, @@ -209,6 +211,8 @@ func createPostgresContainers(cluster apiv1.Cluster, envConfig EnvConfig, enable }, }, }, + // This is the default readiness probe, and can be overridden + // by the user configuration in cluster.spec.probes.readiness ReadinessProbe: &corev1.Probe{ TimeoutSeconds: 5, PeriodSeconds: ReadinessProbePeriod, @@ -219,6 +223,8 @@ func createPostgresContainers(cluster apiv1.Cluster, envConfig EnvConfig, enable }, }, }, + // This is the default liveness probe, and can be overridden + // by the user configuration in cluster.spec.probes.liveness LivenessProbe: &corev1.Probe{ PeriodSeconds: LivenessProbePeriod, TimeoutSeconds: 5, @@ -272,10 +278,14 @@ func createPostgresContainers(cluster apiv1.Cluster, envConfig EnvConfig, enable // if user customizes the liveness probe timeout, we need to adjust the failure threshold addLivenessProbeFailureThreshold(cluster, &containers[0]) + // use the custom probe configuration if provided + ensureCustomProbesConfiguration(&cluster, &containers[0]) + return containers } -// adjust the liveness probe failure threshold based on the `spec.livenessProbeTimeout` value +// addLivenessProbeFailureThreshold adjusts the liveness probe failure threshold +// based on the `spec.livenessProbeTimeout` value func addLivenessProbeFailureThreshold(cluster apiv1.Cluster, container *corev1.Container) { if cluster.Spec.LivenessProbeTimeout != nil { timeout := *cluster.Spec.LivenessProbeTimeout @@ -283,6 +293,21 @@ func addLivenessProbeFailureThreshold(cluster apiv1.Cluster, container *corev1.C } } +// ensureCustomProbesConfiguration applies the custom probe configuration +// if specified inside the cluster specification +func ensureCustomProbesConfiguration(cluster *apiv1.Cluster, container *corev1.Container) { + // No probes configuration + if cluster.Spec.Probes == nil { + return + } + + // There's no need to check for nils here because a nil probe specification + // will result in no change in the Kubernetes probe. + cluster.Spec.Probes.Liveness.ApplyInto(container.LivenessProbe) + cluster.Spec.Probes.Readiness.ApplyInto(container.ReadinessProbe) + cluster.Spec.Probes.Startup.ApplyInto(container.StartupProbe) +} + // getStartupProbeFailureThreshold get the startup probe failure threshold // FAILURE_THRESHOLD = ceil(startDelay / periodSeconds) and minimum value is 1 func getStartupProbeFailureThreshold(startupDelay int32) int32 { diff --git a/pkg/specs/podspec_diff.go b/pkg/specs/podspec_diff.go index 54c9328d8a..ecd02fbfe0 100644 --- a/pkg/specs/podspec_diff.go +++ b/pkg/specs/podspec_diff.go @@ -176,6 +176,9 @@ func doContainersMatch(currentContainer, targetContainer corev1.Container) (bool "liveness-probe": func() bool { return reflect.DeepEqual(currentContainer.LivenessProbe, targetContainer.LivenessProbe) }, + "startup-probe": func() bool { + return reflect.DeepEqual(currentContainer.StartupProbe, targetContainer.StartupProbe) + }, "command": func() bool { return reflect.DeepEqual(currentContainer.Command, targetContainer.Command) }, diff --git a/pkg/specs/podspec_diff_test.go b/pkg/specs/podspec_diff_test.go index 5869d68221..cfbd40e907 100644 --- a/pkg/specs/podspec_diff_test.go +++ b/pkg/specs/podspec_diff_test.go @@ -17,6 +17,8 @@ limitations under the License. package specs import ( + corev1 "k8s.io/api/core/v1" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) @@ -37,4 +39,55 @@ var _ = Describe("PodSpecDiff", func() { It("returns false for empty volume name", func() { Expect(shouldIgnoreCurrentVolume("")).To(BeFalse()) }) + + It("return false when the startup probe do not match and true otherwise", func() { + containerPre := corev1.Container{ + StartupProbe: &corev1.Probe{ + TimeoutSeconds: 23, + }, + } + containerPost := corev1.Container{ + StartupProbe: &corev1.Probe{ + TimeoutSeconds: 24, + }, + } + Expect(doContainersMatch(containerPre, containerPre)).To(BeTrue()) + status, diff := doContainersMatch(containerPre, containerPost) + Expect(status).To(BeFalse()) + Expect(diff).To(Equal("startup-probe")) + }) + + It("return false when the liveness probe do not match and true otherwise", func() { + containerPre := corev1.Container{ + LivenessProbe: &corev1.Probe{ + InitialDelaySeconds: 23, + }, + } + containerPost := corev1.Container{ + LivenessProbe: &corev1.Probe{ + InitialDelaySeconds: 24, + }, + } + Expect(doContainersMatch(containerPre, containerPre)).To(BeTrue()) + status, diff := doContainersMatch(containerPre, containerPost) + Expect(status).To(BeFalse()) + Expect(diff).To(Equal("liveness-probe")) + }) + + It("return false when the readiness probe do not match and true otherwise", func() { + containerPre := corev1.Container{ + ReadinessProbe: &corev1.Probe{ + SuccessThreshold: 23, + }, + } + containerPost := corev1.Container{ + ReadinessProbe: &corev1.Probe{ + SuccessThreshold: 24, + }, + } + Expect(doContainersMatch(containerPre, containerPre)).To(BeTrue()) + status, diff := doContainersMatch(containerPre, containerPost) + Expect(status).To(BeFalse()) + Expect(diff).To(Equal("readiness-probe")) + }) }) diff --git a/tests/e2e/probes_test.go b/tests/e2e/probes_test.go new file mode 100644 index 0000000000..9e7dae8567 --- /dev/null +++ b/tests/e2e/probes_test.go @@ -0,0 +1,190 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/tests" + testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +// Set of tests in which we check that the configuration of the readiness probes is applied +var _ = Describe("Probes configuration tests", Label(tests.LabelBasic), func() { + const ( + level = tests.High + ) + + BeforeEach(func() { + if testLevelEnv.Depth < int(level) { + Skip("Test depth is lower than the amount requested for this test") + } + }) + + It("can change the probes configuration", func(ctx SpecContext) { + var namespace string + + const sampleFile = fixturesDir + "/base/cluster-storage-class.yaml.template" + const clusterName = "postgresql-storage-class" + + // IMPORTANT: for this E2e to work, these values need to be different + // than the default Kubernetes settings + probeConfiguration := apiv1.Probe{ + InitialDelaySeconds: 2, + PeriodSeconds: 4, + TimeoutSeconds: 8, + } + probesConfiguration := apiv1.ProbesConfiguration{ + Startup: probeConfiguration.DeepCopy(), + Liveness: probeConfiguration.DeepCopy(), + Readiness: probeConfiguration.DeepCopy(), + } + + assertProbeCoherentWithConfiguration := func(probe *corev1.Probe) { + Expect(probe.InitialDelaySeconds).To(BeEquivalentTo(probeConfiguration.InitialDelaySeconds)) + Expect(probe.PeriodSeconds).To(BeEquivalentTo(probeConfiguration.PeriodSeconds)) + Expect(probe.TimeoutSeconds).To(BeEquivalentTo(probeConfiguration.TimeoutSeconds)) + } + + assertProbesCoherentWithConfiguration := func(container *corev1.Container) { + assertProbeCoherentWithConfiguration(container.LivenessProbe) + assertProbeCoherentWithConfiguration(container.ReadinessProbe) + assertProbeCoherentWithConfiguration(container.LivenessProbe) + } + + var defaultReadinessProbe *corev1.Probe + var defaultLivenessProbe *corev1.Probe + var defaultStartupProbe *corev1.Probe + + By("creating an empty cluster", func() { + // Create a cluster in a namespace we'll delete after the test + const namespacePrefix = "probes" + var err error + namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + Expect(err).ToNot(HaveOccurred()) + + AssertCreateCluster(namespace, clusterName, sampleFile, env) + }) + + By("getting the default probes configuration", func() { + var pod corev1.Pod + err := env.Client.Get(ctx, client.ObjectKey{ + Name: fmt.Sprintf("%s-1", clusterName), + Namespace: namespace, + }, &pod) + Expect(err).ToNot(HaveOccurred()) + + Expect(pod.Spec.Containers[0].Name).To(Equal("postgres")) + defaultReadinessProbe = pod.Spec.Containers[0].ReadinessProbe.DeepCopy() + defaultLivenessProbe = pod.Spec.Containers[0].LivenessProbe.DeepCopy() + defaultStartupProbe = pod.Spec.Containers[0].StartupProbe.DeepCopy() + }) + + By("applying a probe configuration", func() { + var cluster apiv1.Cluster + err := env.Client.Get(ctx, client.ObjectKey{ + Name: clusterName, + Namespace: namespace, + }, &cluster) + Expect(err).ToNot(HaveOccurred()) + + originalCluster := cluster.DeepCopy() + cluster.Spec.Probes = probesConfiguration.DeepCopy() + + err = env.Client.Patch(ctx, &cluster, client.MergeFrom(originalCluster)) + Expect(err).ToNot(HaveOccurred()) + }) + + By("waiting for the cluster to restart", func() { + AssertClusterEventuallyReachesPhase(namespace, clusterName, + []string{apiv1.PhaseUpgrade, apiv1.PhaseWaitingForInstancesToBeActive}, 120) + AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReadyQuick], env) + }) + + By("checking the applied settings", func() { + var cluster apiv1.Cluster + err := env.Client.Get(ctx, client.ObjectKey{ + Name: clusterName, + Namespace: namespace, + }, &cluster) + Expect(err).ToNot(HaveOccurred()) + + for _, instance := range cluster.Status.InstanceNames { + var pod corev1.Pod + err := env.Client.Get(ctx, client.ObjectKey{ + Name: instance, + Namespace: namespace, + }, &pod) + Expect(err).ToNot(HaveOccurred()) + + Expect(pod.Spec.Containers[0].Name).To(Equal("postgres")) + assertProbesCoherentWithConfiguration(&pod.Spec.Containers[0]) + } + }) + + By("reverting back the changes", func() { + var cluster apiv1.Cluster + err := env.Client.Get(ctx, client.ObjectKey{ + Name: clusterName, + Namespace: namespace, + }, &cluster) + Expect(err).ToNot(HaveOccurred()) + + originalCluster := cluster.DeepCopy() + cluster.Spec.Probes = &apiv1.ProbesConfiguration{} + + err = env.Client.Patch(ctx, &cluster, client.MergeFrom(originalCluster)) + Expect(err).ToNot(HaveOccurred()) + }) + + By("waiting for the cluster to restart", func() { + AssertClusterEventuallyReachesPhase(namespace, clusterName, + []string{apiv1.PhaseUpgrade, apiv1.PhaseWaitingForInstancesToBeActive}, 120) + AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReadyQuick], env) + }) + + By("checking the applied settings", func() { + var cluster apiv1.Cluster + err := env.Client.Get(ctx, client.ObjectKey{ + Name: clusterName, + Namespace: namespace, + }, &cluster) + Expect(err).ToNot(HaveOccurred()) + + for _, instance := range cluster.Status.InstanceNames { + var pod corev1.Pod + err = env.Client.Get(ctx, client.ObjectKey{ + Name: instance, + Namespace: namespace, + }, &pod) + Expect(err).ToNot(HaveOccurred()) + + Expect(pod.Spec.Containers[0].Name).To(Equal("postgres")) + Expect(pod.Spec.Containers[0].LivenessProbe).To(BeEquivalentTo(defaultLivenessProbe)) + Expect(pod.Spec.Containers[0].ReadinessProbe).To(BeEquivalentTo(defaultReadinessProbe)) + Expect(pod.Spec.Containers[0].StartupProbe).To(BeEquivalentTo(defaultStartupProbe)) + } + }) + }) +}) From e580344a5e5957e856ee3fb8d92af28b29518943 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niccol=C3=B2=20Fei?= Date: Thu, 5 Dec 2024 10:03:25 +0100 Subject: [PATCH 198/836] refactor: use existing error handling functions in the `DatabaseReconciler` (#6212) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch updates the DatabaseReconciler code to match the structure of the PublicationReconciler and SubscriptionReconciler. It utilizes the common functions introduced in the Publications and Subscriptions implementation. Additionally, the actions within the DatabaseReconciler have been reorganized to align with the operational flow of the PublicationReconciler and SubscriptionReconciler. Closes #5927 Signed-off-by: Niccolò Fei Signed-off-by: Jaime Silvela Signed-off-by: Armando Ruocco Co-authored-by: Jaime Silvela Co-authored-by: Armando Ruocco --- api/v1/database_funcs.go | 24 +- internal/management/controller/common.go | 12 + ...tion_controller_test.go => common_test.go} | 0 .../controller/database_controller.go | 216 ++++++------------ .../controller/database_controller_test.go | 35 +-- .../controller/publication_controller.go | 11 +- .../controller/subscription_controller.go | 22 +- 7 files changed, 136 insertions(+), 184 deletions(-) rename internal/management/controller/{subscription_controller_test.go => common_test.go} (100%) diff --git a/api/v1/database_funcs.go b/api/v1/database_funcs.go index 879d97490c..7679f3236d 100644 --- a/api/v1/database_funcs.go +++ b/api/v1/database_funcs.go @@ -16,7 +16,29 @@ limitations under the License. package v1 -import corev1 "k8s.io/api/core/v1" +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/utils/ptr" +) + +// SetAsFailed sets the database as failed with the given error +func (db *Database) SetAsFailed(err error) { + db.Status.Applied = ptr.To(false) + db.Status.Message = err.Error() +} + +// SetAsUnknown sets the database as unknown with the given error +func (db *Database) SetAsUnknown(err error) { + db.Status.Applied = nil + db.Status.Message = err.Error() +} + +// SetAsReady sets the database as working correctly +func (db *Database) SetAsReady() { + db.Status.Applied = ptr.To(true) + db.Status.Message = "" + db.Status.ObservedGeneration = db.Generation +} // GetClusterRef returns the cluster reference of the database func (db *Database) GetClusterRef() corev1.LocalObjectReference { diff --git a/internal/management/controller/common.go b/internal/management/controller/common.go index c0d87aeb97..b5013d6657 100644 --- a/internal/management/controller/common.go +++ b/internal/management/controller/common.go @@ -19,6 +19,7 @@ package controller import ( "bytes" "context" + "database/sql" "fmt" "maps" "slices" @@ -31,6 +32,17 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" ) +// errClusterIsReplica is raised when an object +// cannot be reconciled because it belongs to a replica cluster +var errClusterIsReplica = fmt.Errorf("waiting for the cluster to become primary") + +type instanceInterface interface { + GetSuperUserDB() (*sql.DB, error) + GetClusterName() string + GetPodName() string + GetNamespaceName() string +} + type markableAsFailed interface { client.Object SetAsFailed(err error) diff --git a/internal/management/controller/subscription_controller_test.go b/internal/management/controller/common_test.go similarity index 100% rename from internal/management/controller/subscription_controller_test.go rename to internal/management/controller/common_test.go diff --git a/internal/management/controller/database_controller.go b/internal/management/controller/database_controller.go index f1283f2d9c..22a72861fd 100644 --- a/internal/management/controller/database_controller.go +++ b/internal/management/controller/database_controller.go @@ -18,23 +18,17 @@ package controller import ( "context" - "database/sql" - "errors" "fmt" "time" "github.com/cloudnative-pg/machinery/pkg/log" - apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/manager" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" - "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/instance" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) @@ -43,20 +37,10 @@ type DatabaseReconciler struct { client.Client Scheme *runtime.Scheme - instance instanceInterface + instance instanceInterface + finalizerReconciler *finalizerReconciler[*apiv1.Database] } -type instanceInterface interface { - GetSuperUserDB() (*sql.DB, error) - GetClusterName() string - GetPodName() string - GetNamespaceName() string -} - -// errClusterIsReplica is raised when the database object -// cannot be reconciled because it belongs to a replica cluster -var errClusterIsReplica = fmt.Errorf("waiting for the cluster to become primary") - // databaseReconciliationInterval is the time between the // database reconciliation loop failures const databaseReconciliationInterval = 30 * time.Second @@ -76,16 +60,16 @@ func (r *DatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c Namespace: req.Namespace, Name: req.Name, }, &database); err != nil { - // This is a deleted object, there's nothing - // to do since we don't manage any finalizers. - if apierrors.IsNotFound(err) { - return ctrl.Result{}, nil - } - return ctrl.Result{}, err + contextLogger.Trace("Could not fetch Database", "error", err) + return ctrl.Result{}, client.IgnoreNotFound(err) } // This is not for me! if database.Spec.ClusterRef.Name != r.instance.GetClusterName() { + contextLogger.Trace("Database is not for this cluster", + "cluster", database.Spec.ClusterRef.Name, + "expected", r.instance.GetClusterName(), + ) return ctrl.Result{}, nil } @@ -97,19 +81,7 @@ func (r *DatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c // Fetch the Cluster from the cache cluster, err := r.GetCluster(ctx) if err != nil { - if apierrors.IsNotFound(err) { - // The cluster has been deleted. - // We just need to wait for this instance manager to be terminated - contextLogger.Debug("Could not find Cluster") - return ctrl.Result{}, nil - } - - return ctrl.Result{}, fmt.Errorf("could not fetch Cluster: %w", err) - } - - // This is not for me, at least now - if cluster.Status.CurrentPrimary != r.instance.GetPodName() { - return ctrl.Result{RequeueAfter: databaseReconciliationInterval}, nil + return ctrl.Result{}, markAsFailed(ctx, r.Client, &database, fmt.Errorf("while fetching the cluster: %w", err)) } contextLogger.Info("Reconciling database") @@ -122,64 +94,59 @@ func (r *DatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c return ctrl.Result{RequeueAfter: databaseReconciliationInterval}, nil } - // Cannot do anything on a replica cluster - if cluster.IsReplica() { - return r.replicaClusterReconciliation(ctx, &database) + // This is not for me, at least now + if cluster.Status.CurrentPrimary != r.instance.GetPodName() { + return ctrl.Result{RequeueAfter: databaseReconciliationInterval}, nil } - // Add the finalizer if we don't have it - // nolint:nestif - if database.DeletionTimestamp.IsZero() { - if controllerutil.AddFinalizer(&database, utils.DatabaseFinalizerName) { - if err := r.Update(ctx, &database); err != nil { - return ctrl.Result{}, err - } - } - } else { - // This database is being deleted - if controllerutil.ContainsFinalizer(&database, utils.DatabaseFinalizerName) { - if database.Spec.ReclaimPolicy == apiv1.DatabaseReclaimDelete { - if err := r.deleteDatabase(ctx, &database); err != nil { - return ctrl.Result{}, err - } - } - - // remove our finalizer from the list and update it. - controllerutil.RemoveFinalizer(&database, utils.DatabaseFinalizerName) - if err := r.Update(ctx, &database); err != nil { - return ctrl.Result{}, err - } + // Cannot do anything on a replica cluster + if cluster.IsReplica() { + if err := markAsUnknown(ctx, r.Client, &database, errClusterIsReplica); err != nil { + return ctrl.Result{}, err } + return ctrl.Result{RequeueAfter: databaseReconciliationInterval}, nil + } + if err := r.finalizerReconciler.reconcile(ctx, &database); err != nil { + return ctrl.Result{}, fmt.Errorf("while reconciling the finalizer: %w", err) + } + if !database.GetDeletionTimestamp().IsZero() { return ctrl.Result{}, nil } // Make sure the target PG Database is not being managed by another Database Object if err := r.ensureOnlyOneManager(ctx, database); err != nil { - return r.failedReconciliation( - ctx, - &database, - err, - ) + if markErr := markAsFailed(ctx, r.Client, &database, err); markErr != nil { + contextLogger.Error(err, "while marking as failed the database resource", + "error", err, + "markError", markErr, + ) + return ctrl.Result{}, fmt.Errorf( + "encountered an error while marking as failed the database resource: %w, original error: %w", + markErr, + err) + } + return ctrl.Result{RequeueAfter: databaseReconciliationInterval}, nil } - if err := r.reconcileDatabase( - ctx, - &database, - ); err != nil { - contextLogger.Error(err, "while reconciling database") - return r.failedReconciliation( - ctx, - &database, - err, - ) + if err := r.reconcileDatabase(ctx, &database); err != nil { + if markErr := markAsFailed(ctx, r.Client, &database, err); markErr != nil { + contextLogger.Error(err, "while marking as failed the database resource", + "error", err, + "markError", markErr, + ) + return ctrl.Result{}, fmt.Errorf( + "encountered an error while marking as failed the database resource: %w, original error: %w", + markErr, + err) + } + return ctrl.Result{RequeueAfter: databaseReconciliationInterval}, nil } - contextLogger.Info("Reconciliation of database completed") - return r.succeededReconciliation( - ctx, - &database, - ) + if err := markAsReady(ctx, r.Client, &database); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{RequeueAfter: databaseReconciliationInterval}, nil } // ensureOnlyOneManager verifies that the target PostgreSQL Database specified by the given Database object @@ -226,68 +193,16 @@ func (r *DatabaseReconciler) ensureOnlyOneManager( return nil } -// failedReconciliation marks the reconciliation as failed and logs the corresponding error -func (r *DatabaseReconciler) failedReconciliation( - ctx context.Context, - database *apiv1.Database, - err error, -) (ctrl.Result, error) { - oldDatabase := database.DeepCopy() - database.Status.Message = fmt.Sprintf("reconciliation error: %s", err.Error()) - database.Status.Applied = ptr.To(false) - - var statusError *instance.StatusError - if errors.As(err, &statusError) { - // The body line of the instance manager contains the human - // readable error - database.Status.Message = statusError.Body - } - - if err := r.Client.Status().Patch(ctx, database, client.MergeFrom(oldDatabase)); err != nil { - return ctrl.Result{}, err - } - - return ctrl.Result{ - RequeueAfter: databaseReconciliationInterval, - }, nil -} - -// succeededReconciliation marks the reconciliation as succeeded -func (r *DatabaseReconciler) succeededReconciliation( - ctx context.Context, - database *apiv1.Database, -) (ctrl.Result, error) { - oldDatabase := database.DeepCopy() - database.Status.Message = "" - database.Status.Applied = ptr.To(true) - database.Status.ObservedGeneration = database.Generation - - if err := r.Client.Status().Patch(ctx, database, client.MergeFrom(oldDatabase)); err != nil { - return ctrl.Result{}, err +func (r *DatabaseReconciler) evaluateDropDatabase(ctx context.Context, db *apiv1.Database) error { + if db.Spec.ReclaimPolicy != apiv1.DatabaseReclaimDelete { + return nil } - - return ctrl.Result{ - RequeueAfter: databaseReconciliationInterval, - }, nil -} - -// replicaClusterReconciliation sets the status for a reconciliation that's -// executed in a replica Cluster -func (r *DatabaseReconciler) replicaClusterReconciliation( - ctx context.Context, - database *apiv1.Database, -) (ctrl.Result, error) { - oldDatabase := database.DeepCopy() - database.Status.Message = errClusterIsReplica.Error() - database.Status.Applied = nil - - if err := r.Client.Status().Patch(ctx, database, client.MergeFrom(oldDatabase)); err != nil { - return ctrl.Result{}, err + sqlDB, err := r.instance.GetSuperUserDB() + if err != nil { + return fmt.Errorf("while getting DB connection: %w", err) } - return ctrl.Result{ - RequeueAfter: databaseReconciliationInterval, - }, nil + return dropDatabase(ctx, sqlDB, db) } // NewDatabaseReconciler creates a new database reconciler @@ -295,10 +210,18 @@ func NewDatabaseReconciler( mgr manager.Manager, instance *postgres.Instance, ) *DatabaseReconciler { - return &DatabaseReconciler{ + dr := &DatabaseReconciler{ Client: mgr.GetClient(), instance: instance, } + + dr.finalizerReconciler = newFinalizerReconciler( + mgr.GetClient(), + utils.DatabaseFinalizerName, + dr.evaluateDropDatabase, + ) + + return dr } // SetupWithManager sets up the controller with the Manager. @@ -335,12 +258,3 @@ func (r *DatabaseReconciler) reconcileDatabase(ctx context.Context, obj *apiv1.D return createDatabase(ctx, db, obj) } - -func (r *DatabaseReconciler) deleteDatabase(ctx context.Context, obj *apiv1.Database) error { - db, err := r.instance.GetSuperUserDB() - if err != nil { - return fmt.Errorf("while connecting to the database %q: %w", obj.Spec.Name, err) - } - - return dropDatabase(ctx, db, obj) -} diff --git a/internal/management/controller/database_controller_test.go b/internal/management/controller/database_controller_test.go index c9c86ac216..37712a2c21 100644 --- a/internal/management/controller/database_controller_test.go +++ b/internal/management/controller/database_controller_test.go @@ -34,6 +34,7 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" schemeBuilder "github.com/cloudnative-pg/cloudnative-pg/internal/scheme" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -109,6 +110,11 @@ var _ = Describe("Managed Database status", func() { Scheme: schemeBuilder.BuildWithAllKnownScheme(), instance: &f, } + r.finalizerReconciler = newFinalizerReconciler( + fakeClient, + utils.DatabaseFinalizerName, + r.evaluateDropDatabase, + ) }) AfterEach(func() { @@ -263,7 +269,7 @@ var _ = Describe("Managed Database status", func() { Expect(apierrors.IsNotFound(err)).To(BeTrue()) }) - It("skips reconciliation if cluster isn't found (deleted cluster)", func(ctx SpecContext) { + It("fails reconciliation if cluster isn't found (deleted cluster)", func(ctx SpecContext) { // since the fakeClient has the `cluster-example` cluster, let's reference // another cluster `cluster-other` that is not found by the fakeClient pgInstance := postgres.NewInstance(). @@ -301,8 +307,8 @@ var _ = Describe("Managed Database status", func() { }, &updatedDatabase) Expect(err).ToNot(HaveOccurred()) - Expect(updatedDatabase.Status.Applied).Should(BeNil()) - Expect(updatedDatabase.Status.Message).Should(BeEmpty()) + Expect(updatedDatabase.Status.Applied).Should(HaveValue(BeFalse())) + Expect(updatedDatabase.Status.Message).Should(ContainSubstring(`"cluster-other" not found`)) }) It("skips reconciliation if database object isn't found (deleted database)", func(ctx SpecContext) { @@ -333,29 +339,6 @@ var _ = Describe("Managed Database status", func() { Expect(result).Should(BeZero()) // nothing to do, since the DB is being deleted }) - It("properly marks the status on a succeeded reconciliation", func(ctx SpecContext) { - _, err := r.succeededReconciliation(ctx, database) - Expect(err).ToNot(HaveOccurred()) - Expect(database.Status.Applied).To(HaveValue(BeTrue())) - Expect(database.Status.Message).To(BeEmpty()) - }) - - It("properly marks the status on a failed reconciliation", func(ctx SpecContext) { - exampleError := fmt.Errorf("sample error for database %s", database.Spec.Name) - - _, err := r.failedReconciliation(ctx, database, exampleError) - Expect(err).ToNot(HaveOccurred()) - Expect(database.Status.Applied).To(HaveValue(BeFalse())) - Expect(database.Status.Message).To(ContainSubstring(exampleError.Error())) - }) - - It("properly marks the status on a replica Cluster reconciliation", func(ctx SpecContext) { - _, err := r.replicaClusterReconciliation(ctx, database) - Expect(err).ToNot(HaveOccurred()) - Expect(database.Status.Applied).To(BeNil()) - Expect(database.Status.Message).To(BeEquivalentTo(errClusterIsReplica.Error())) - }) - It("drops database with ensure absent option", func(ctx SpecContext) { // Mocking dropDatabase expectedValue := sqlmock.NewResult(0, 1) diff --git a/internal/management/controller/publication_controller.go b/internal/management/controller/publication_controller.go index 9758ff690d..086b37e778 100644 --- a/internal/management/controller/publication_controller.go +++ b/internal/management/controller/publication_controller.go @@ -124,8 +124,15 @@ func (r *PublicationReconciler) Reconcile(ctx context.Context, req ctrl.Request) if err := r.alignPublication(ctx, &publication); err != nil { contextLogger.Error(err, "while reconciling publication") - if err := markAsFailed(ctx, r.Client, &publication, err); err != nil { - return ctrl.Result{}, err + if markErr := markAsFailed(ctx, r.Client, &publication, err); markErr != nil { + contextLogger.Error(err, "while marking as failed the publication resource", + "error", err, + "markError", markErr, + ) + return ctrl.Result{}, fmt.Errorf( + "encountered an error while marking as failed the publication resource: %w, original error: %w", + markErr, + err) } return ctrl.Result{RequeueAfter: publicationReconciliationInterval}, nil } diff --git a/internal/management/controller/subscription_controller.go b/internal/management/controller/subscription_controller.go index 8019c3dd2b..16a1165b7f 100644 --- a/internal/management/controller/subscription_controller.go +++ b/internal/management/controller/subscription_controller.go @@ -122,16 +122,30 @@ func (r *SubscriptionReconciler) Reconcile(ctx context.Context, req ctrl.Request subscription.Spec.PublicationDBName, ) if err != nil { - if err := markAsFailed(ctx, r.Client, &subscription, err); err != nil { - return ctrl.Result{}, err + if markErr := markAsFailed(ctx, r.Client, &subscription, err); markErr != nil { + contextLogger.Error(err, "while marking as failed the subscription resource", + "error", err, + "markError", markErr, + ) + return ctrl.Result{}, fmt.Errorf( + "encountered an error while marking as failed the subscription resource: %w, original error: %w", + markErr, + err) } return ctrl.Result{RequeueAfter: subscriptionReconciliationInterval}, nil } if err := r.alignSubscription(ctx, &subscription, connString); err != nil { contextLogger.Error(err, "while reconciling subscription") - if err := markAsFailed(ctx, r.Client, &subscription, err); err != nil { - return ctrl.Result{}, err + if markErr := markAsFailed(ctx, r.Client, &subscription, err); markErr != nil { + contextLogger.Error(err, "while marking as failed the subscription resource", + "error", err, + "markError", markErr, + ) + return ctrl.Result{}, fmt.Errorf( + "encountered an error while marking as failed the subscription resource: %w, original error: %w", + markErr, + err) } return ctrl.Result{RequeueAfter: subscriptionReconciliationInterval}, nil } From 7dcb199181b753db8badf7fd12a19219221376b1 Mon Sep 17 00:00:00 2001 From: Tao Li Date: Thu, 5 Dec 2024 20:19:22 +0800 Subject: [PATCH 199/836] fix: eliminate redundant Cluster status updates with image catalog (#6277) This patch resolves the issue of redundant Cluster status updates triggered when the image catalog is enabled. Closes: #6276 Signed-off-by: Tao Li --- internal/controller/cluster_image.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/controller/cluster_image.go b/internal/controller/cluster_image.go index 95ac9ad668..547b610cc9 100644 --- a/internal/controller/cluster_image.go +++ b/internal/controller/cluster_image.go @@ -113,7 +113,7 @@ func (r *ClusterReconciler) reconcileImage(ctx context.Context, cluster *apiv1.C } // If the image is different, we set it into the cluster status - if cluster.Spec.ImageName != catalogImage { + if cluster.Status.Image != catalogImage { cluster.Status.Image = catalogImage patch := client.MergeFrom(oldCluster) if err := r.Status().Patch(ctx, cluster, patch); err != nil { From d8af714ebade46e4478957e429c4a07a473e8f55 Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Thu, 5 Dec 2024 14:14:21 +0100 Subject: [PATCH 200/836] docs: cosmetic changes to `Database` spec (#6280) Closes #6278 Signed-off-by: Gabriele Bartolini Signed-off-by: Marco Nenciarini Signed-off-by: Jaime Silvela Co-authored-by: Marco Nenciarini Co-authored-by: Jaime Silvela --- api/v1/database_types.go | 85 ++++++++++------- .../bases/postgresql.cnpg.io_databases.yaml | 92 ++++++++++++------- docs/src/cloudnative-pg.v1.md | 84 ++++++++++------- 3 files changed, 162 insertions(+), 99 deletions(-) diff --git a/api/v1/database_types.go b/api/v1/database_types.go index 5e6ecd834a..3c759ab767 100644 --- a/api/v1/database_types.go +++ b/api/v1/database_types.go @@ -35,110 +35,129 @@ const ( DatabaseReclaimRetain DatabaseReclaimPolicy = "retain" ) -// DatabaseSpec is the specification of a Postgresql Database +// DatabaseSpec is the specification of a Postgresql Database, built around the +// `CREATE DATABASE`, `ALTER DATABASE`, and `DROP DATABASE` SQL commands of +// PostgreSQL. // +kubebuilder:validation:XValidation:rule="!has(self.builtinLocale) || self.localeProvider == 'builtin'",message="builtinLocale is only available when localeProvider is set to `builtin`" // +kubebuilder:validation:XValidation:rule="!has(self.icuLocale) || self.localeProvider == 'icu'",message="icuLocale is only available when localeProvider is set to `icu`" // +kubebuilder:validation:XValidation:rule="!has(self.icuRules) || self.localeProvider == 'icu'",message="icuRules is only available when localeProvider is set to `icu`" type DatabaseSpec struct { - // The corresponding cluster + // The name of the PostgreSQL cluster hosting the database. ClusterRef corev1.LocalObjectReference `json:"cluster"` - // Ensure the PostgreSQL database is `present` or `absent` - defaults to "present" + // Ensure the PostgreSQL database is `present` or `absent` - defaults to "present". // +kubebuilder:default:="present" // +kubebuilder:validation:Enum=present;absent // +optional Ensure EnsureOption `json:"ensure,omitempty"` - // The name inside PostgreSQL + // The name of the database to create inside PostgreSQL. This setting cannot be changed. // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="name is immutable" // +kubebuilder:validation:XValidation:rule="self != 'postgres'",message="the name postgres is reserved" // +kubebuilder:validation:XValidation:rule="self != 'template0'",message="the name template0 is reserved" // +kubebuilder:validation:XValidation:rule="self != 'template1'",message="the name template1 is reserved" Name string `json:"name"` - // The owner + // Maps to the `OWNER` parameter of `CREATE DATABASE`. + // Maps to the `OWNER TO` command of `ALTER DATABASE`. + // The role name of the user who owns the database inside PostgreSQL. Owner string `json:"owner"` - // The name of the template from which to create the new database + // Maps to the `TEMPLATE` parameter of `CREATE DATABASE`. This setting + // cannot be changed. The name of the template from which to create + // this database. // +optional // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="template is immutable" Template string `json:"template,omitempty"` - // The encoding (cannot be changed) + // Maps to the `ENCODING` parameter of `CREATE DATABASE`. This setting + // cannot be changed. Character set encoding to use in the database. // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="encoding is immutable" // +optional Encoding string `json:"encoding,omitempty"` - // The locale (cannot be changed) - // Sets the default collation order and character classification in the new database. + // Maps to the `LOCALE` parameter of `CREATE DATABASE`. This setting + // cannot be changed. Sets the default collation order and character + // classification in the new database. // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="locale is immutable" // +optional Locale string `json:"locale,omitempty"` - // The LOCALE_PROVIDER (cannot be changed) - // This option sets the locale provider for databases created in the new cluster. - // Available from PostgreSQL 16. + // Maps to the `LOCALE_PROVIDER` parameter of `CREATE DATABASE`. This + // setting cannot be changed. This option sets the locale provider for + // databases created in the new cluster. Available from PostgreSQL 16. // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="localeProvider is immutable" // +optional LocaleProvider string `json:"localeProvider,omitempty"` - // The LC_COLLATE (cannot be changed) + // Maps to the `LC_COLLATE` parameter of `CREATE DATABASE`. This + // setting cannot be changed. // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="localeCollate is immutable" // +optional LcCollate string `json:"localeCollate,omitempty"` - // The LC_CTYPE (cannot be changed) + // Maps to the `LC_CTYPE` parameter of `CREATE DATABASE`. This setting + // cannot be changed. // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="localeCType is immutable" // +optional LcCtype string `json:"localeCType,omitempty"` - // The ICU_LOCALE (cannot be changed) - // Specifies the ICU locale when the ICU provider is used. - // This option requires `localeProvider` to be set to `icu`. - // Available from PostgreSQL 15. + // Maps to the `ICU_LOCALE` parameter of `CREATE DATABASE`. This + // setting cannot be changed. Specifies the ICU locale when the ICU + // provider is used. This option requires `localeProvider` to be set to + // `icu`. Available from PostgreSQL 15. // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="icuLocale is immutable" // +optional IcuLocale string `json:"icuLocale,omitempty"` - // The ICU_RULES (cannot be changed) - // Specifies additional collation rules to customize the behavior of the default collation. - // This option requires `localeProvider` to be set to `icu`. - // Available from PostgreSQL 16. + // Maps to the `ICU_RULES` parameter of `CREATE DATABASE`. This setting + // cannot be changed. Specifies additional collation rules to customize + // the behavior of the default collation. This option requires + // `localeProvider` to be set to `icu`. Available from PostgreSQL 16. // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="icuRules is immutable" // +optional IcuRules string `json:"icuRules,omitempty"` - // The BUILTIN_LOCALE (cannot be changed) - // Specifies the locale name when the builtin provider is used. - // This option requires `localeProvider` to be set to `builtin`. - // Available from PostgreSQL 17. + // Maps to the `BUILTIN_LOCALE` parameter of `CREATE DATABASE`. This + // setting cannot be changed. Specifies the locale name when the + // builtin provider is used. This option requires `localeProvider` to + // be set to `builtin`. Available from PostgreSQL 17. // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="builtinLocale is immutable" // +optional BuiltinLocale string `json:"builtinLocale,omitempty"` - // The COLLATION_VERSION (cannot be changed) + // Maps to the `COLLATION_VERSION` parameter of `CREATE DATABASE`. This + // setting cannot be changed. // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="collationVersion is immutable" // +optional CollationVersion string `json:"collationVersion,omitempty"` - // True when the database is a template + // Maps to the `IS_TEMPLATE` parameter of `CREATE DATABASE` and `ALTER + // DATABASE`. If true, this database is considered a template and can + // be cloned by any user with `CREATEDB` privileges. // +optional IsTemplate *bool `json:"isTemplate,omitempty"` - // True when connections to this database are allowed + // Maps to the `ALLOW_CONNECTIONS` parameter of `CREATE DATABASE` and + // `ALTER DATABASE`. If false then no one can connect to this database. // +optional AllowConnections *bool `json:"allowConnections,omitempty"` - // Connection limit, -1 means no limit and -2 means the - // database is not valid + // Maps to the `CONNECTION LIMIT` clause of `CREATE DATABASE` and + // `ALTER DATABASE`. How many concurrent connections can be made to + // this database. -1 (the default) means no limit. // +optional ConnectionLimit *int `json:"connectionLimit,omitempty"` - // The default tablespace of this database + // Maps to the `TABLESPACE` parameter of `CREATE DATABASE`. + // Maps to the `SET TABLESPACE` command of `ALTER DATABASE`. + // The name of the tablespace (in PostgreSQL) that will be associated + // with the new database. This tablespace will be the default + // tablespace used for objects created in this database. // +optional Tablespace string `json:"tablespace,omitempty"` - // The policy for end-of-life maintenance of this database + // The policy for end-of-life maintenance of this database. // +kubebuilder:validation:Enum=delete;retain // +kubebuilder:default:=retain // +optional diff --git a/config/crd/bases/postgresql.cnpg.io_databases.yaml b/config/crd/bases/postgresql.cnpg.io_databases.yaml index 7a1d7c8066..b9db5db349 100644 --- a/config/crd/bases/postgresql.cnpg.io_databases.yaml +++ b/config/crd/bases/postgresql.cnpg.io_databases.yaml @@ -59,20 +59,22 @@ spec: More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status properties: allowConnections: - description: True when connections to this database are allowed + description: |- + Maps to the `ALLOW_CONNECTIONS` parameter of `CREATE DATABASE` and + `ALTER DATABASE`. If false then no one can connect to this database. type: boolean builtinLocale: description: |- - The BUILTIN_LOCALE (cannot be changed) - Specifies the locale name when the builtin provider is used. - This option requires `localeProvider` to be set to `builtin`. - Available from PostgreSQL 17. + Maps to the `BUILTIN_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the locale name when the + builtin provider is used. This option requires `localeProvider` to + be set to `builtin`. Available from PostgreSQL 17. type: string x-kubernetes-validations: - message: builtinLocale is immutable rule: self == oldSelf cluster: - description: The corresponding cluster + description: The name of the PostgreSQL cluster hosting the database. properties: name: default: "" @@ -86,25 +88,30 @@ spec: type: object x-kubernetes-map-type: atomic collationVersion: - description: The COLLATION_VERSION (cannot be changed) + description: |- + Maps to the `COLLATION_VERSION` parameter of `CREATE DATABASE`. This + setting cannot be changed. type: string x-kubernetes-validations: - message: collationVersion is immutable rule: self == oldSelf connectionLimit: description: |- - Connection limit, -1 means no limit and -2 means the - database is not valid + Maps to the `CONNECTION LIMIT` clause of `CREATE DATABASE` and + `ALTER DATABASE`. How many concurrent connections can be made to + this database. -1 (the default) means no limit. type: integer databaseReclaimPolicy: default: retain - description: The policy for end-of-life maintenance of this database + description: The policy for end-of-life maintenance of this database. enum: - delete - retain type: string encoding: - description: The encoding (cannot be changed) + description: |- + Maps to the `ENCODING` parameter of `CREATE DATABASE`. This setting + cannot be changed. Character set encoding to use in the database. type: string x-kubernetes-validations: - message: encoding is immutable @@ -112,65 +119,74 @@ spec: ensure: default: present description: Ensure the PostgreSQL database is `present` or `absent` - - defaults to "present" + - defaults to "present". enum: - present - absent type: string icuLocale: description: |- - The ICU_LOCALE (cannot be changed) - Specifies the ICU locale when the ICU provider is used. - This option requires `localeProvider` to be set to `icu`. - Available from PostgreSQL 15. + Maps to the `ICU_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the ICU locale when the ICU + provider is used. This option requires `localeProvider` to be set to + `icu`. Available from PostgreSQL 15. type: string x-kubernetes-validations: - message: icuLocale is immutable rule: self == oldSelf icuRules: description: |- - The ICU_RULES (cannot be changed) - Specifies additional collation rules to customize the behavior of the default collation. - This option requires `localeProvider` to be set to `icu`. - Available from PostgreSQL 16. + Maps to the `ICU_RULES` parameter of `CREATE DATABASE`. This setting + cannot be changed. Specifies additional collation rules to customize + the behavior of the default collation. This option requires + `localeProvider` to be set to `icu`. Available from PostgreSQL 16. type: string x-kubernetes-validations: - message: icuRules is immutable rule: self == oldSelf isTemplate: - description: True when the database is a template + description: |- + Maps to the `IS_TEMPLATE` parameter of `CREATE DATABASE` and `ALTER + DATABASE`. If true, this database is considered a template and can + be cloned by any user with `CREATEDB` privileges. type: boolean locale: description: |- - The locale (cannot be changed) - Sets the default collation order and character classification in the new database. + Maps to the `LOCALE` parameter of `CREATE DATABASE`. This setting + cannot be changed. Sets the default collation order and character + classification in the new database. type: string x-kubernetes-validations: - message: locale is immutable rule: self == oldSelf localeCType: - description: The LC_CTYPE (cannot be changed) + description: |- + Maps to the `LC_CTYPE` parameter of `CREATE DATABASE`. This setting + cannot be changed. type: string x-kubernetes-validations: - message: localeCType is immutable rule: self == oldSelf localeCollate: - description: The LC_COLLATE (cannot be changed) + description: |- + Maps to the `LC_COLLATE` parameter of `CREATE DATABASE`. This + setting cannot be changed. type: string x-kubernetes-validations: - message: localeCollate is immutable rule: self == oldSelf localeProvider: description: |- - The LOCALE_PROVIDER (cannot be changed) - This option sets the locale provider for databases created in the new cluster. - Available from PostgreSQL 16. + Maps to the `LOCALE_PROVIDER` parameter of `CREATE DATABASE`. This + setting cannot be changed. This option sets the locale provider for + databases created in the new cluster. Available from PostgreSQL 16. type: string x-kubernetes-validations: - message: localeProvider is immutable rule: self == oldSelf name: - description: The name inside PostgreSQL + description: The name of the database to create inside PostgreSQL. + This setting cannot be changed. type: string x-kubernetes-validations: - message: name is immutable @@ -182,14 +198,24 @@ spec: - message: the name template1 is reserved rule: self != 'template1' owner: - description: The owner + description: |- + Maps to the `OWNER` parameter of `CREATE DATABASE`. + Maps to the `OWNER TO` command of `ALTER DATABASE`. + The role name of the user who owns the database inside PostgreSQL. type: string tablespace: - description: The default tablespace of this database + description: |- + Maps to the `TABLESPACE` parameter of `CREATE DATABASE`. + Maps to the `SET TABLESPACE` command of `ALTER DATABASE`. + The name of the tablespace (in PostgreSQL) that will be associated + with the new database. This tablespace will be the default + tablespace used for objects created in this database. type: string template: - description: The name of the template from which to create the new - database + description: |- + Maps to the `TEMPLATE` parameter of `CREATE DATABASE`. This setting + cannot be changed. The name of the template from which to create + this database. type: string x-kubernetes-validations: - message: template is immutable diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md index 7a5b1c193f..48b53866ef 100644 --- a/docs/src/cloudnative-pg.v1.md +++ b/docs/src/cloudnative-pg.v1.md @@ -2422,7 +2422,9 @@ PostgreSQL cluster from an existing storage

- [Database](#postgresql-cnpg-io-v1-Database) -

DatabaseSpec is the specification of a Postgresql Database

+

DatabaseSpec is the specification of a Postgresql Database, built around the +CREATE DATABASE, ALTER DATABASE, and DROP DATABASE SQL commands of +PostgreSQL.

@@ -2432,146 +2434,162 @@ PostgreSQL cluster from an existing storage

core/v1.LocalObjectReference From ed2f89dc5d23f17a09da2fdf196fe31aafca5d52 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niccol=C3=B2=20Fei?= Date: Thu, 5 Dec 2024 14:27:18 +0100 Subject: [PATCH 201/836] chore: update object status to failed when referred cluster is deleted (#6279) When a cluster referred to by an object is deleted, the object status was previously left unchanged, which could lead to confusion. This patch sets the status to `failed`, and the message to `cluster resource has been deleted, skipping reconciliation`. Closes #6211 Closes #6172 Signed-off-by: wolfox Signed-off-by: Armando Ruocco Signed-off-by: Jaime Silvela Co-authored-by: wolfox Co-authored-by: Armando Ruocco Co-authored-by: Jaime Silvela --- api/v1/database_funcs.go | 5 + api/v1/publication_funcs.go | 5 + api/v1/subscription_funcs.go | 5 + internal/controller/cluster_controller.go | 2 +- internal/controller/finalizers_delete.go | 111 +++++++++++------- internal/controller/finalizers_delete_test.go | 54 +++++++-- 6 files changed, 128 insertions(+), 54 deletions(-) diff --git a/api/v1/database_funcs.go b/api/v1/database_funcs.go index 7679f3236d..198e760ce5 100644 --- a/api/v1/database_funcs.go +++ b/api/v1/database_funcs.go @@ -40,6 +40,11 @@ func (db *Database) SetAsReady() { db.Status.ObservedGeneration = db.Generation } +// GetStatusMessage returns the status message of the database +func (db *Database) GetStatusMessage() string { + return db.Status.Message +} + // GetClusterRef returns the cluster reference of the database func (db *Database) GetClusterRef() corev1.LocalObjectReference { return db.Spec.ClusterRef diff --git a/api/v1/publication_funcs.go b/api/v1/publication_funcs.go index e67255b68c..bfda3183a3 100644 --- a/api/v1/publication_funcs.go +++ b/api/v1/publication_funcs.go @@ -40,6 +40,11 @@ func (pub *Publication) SetAsReady() { pub.Status.ObservedGeneration = pub.Generation } +// GetStatusMessage returns the status message of the publication +func (pub *Publication) GetStatusMessage() string { + return pub.Status.Message +} + // GetClusterRef returns the cluster reference of the publication func (pub *Publication) GetClusterRef() corev1.LocalObjectReference { return pub.Spec.ClusterRef diff --git a/api/v1/subscription_funcs.go b/api/v1/subscription_funcs.go index 49a418bdae..506bf05b81 100644 --- a/api/v1/subscription_funcs.go +++ b/api/v1/subscription_funcs.go @@ -40,6 +40,11 @@ func (sub *Subscription) SetAsReady() { sub.Status.ObservedGeneration = sub.Generation } +// GetStatusMessage returns the status message of the subscription +func (sub *Subscription) GetStatusMessage() string { + return sub.Status.Message +} + // GetClusterRef returns the cluster reference of the subscription func (sub *Subscription) GetClusterRef() corev1.LocalObjectReference { return sub.Spec.ClusterRef diff --git a/internal/controller/cluster_controller.go b/internal/controller/cluster_controller.go index 2cdd503e82..606ecab88c 100644 --- a/internal/controller/cluster_controller.go +++ b/internal/controller/cluster_controller.go @@ -160,7 +160,7 @@ func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct "namespace", req.Namespace, ) } - if err := r.deleteFinalizers(ctx, req.NamespacedName); err != nil { + if err := r.notifyDeletionToOwnedResources(ctx, req.NamespacedName); err != nil { contextLogger.Error( err, "error while deleting finalizers of objects on the cluster", diff --git a/internal/controller/finalizers_delete.go b/internal/controller/finalizers_delete.go index 0bee4928df..6af03883b1 100644 --- a/internal/controller/finalizers_delete.go +++ b/internal/controller/finalizers_delete.go @@ -18,10 +18,10 @@ package controller import ( "context" + "errors" "github.com/cloudnative-pg/machinery/pkg/log" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -30,78 +30,109 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) -// ClusterReferrer is an object containing a cluster reference -type ClusterReferrer interface { - GetClusterRef() corev1.LocalObjectReference - client.Object -} +// notifyDeletionToOwnedResources notifies the cluster deletion to the managed owned resources +func (r *ClusterReconciler) notifyDeletionToOwnedResources( + ctx context.Context, + namespacedName types.NamespacedName, +) error { + var dbList apiv1.DatabaseList + if err := r.List(ctx, &dbList, client.InNamespace(namespacedName.Namespace)); err != nil { + return err + } -// deleteFinalizers deletes object finalizers when the cluster they were in has been deleted -func (r *ClusterReconciler) deleteFinalizers(ctx context.Context, namespacedName types.NamespacedName) error { - if err := r.deleteFinalizersForResource( + if err := notifyOwnedResourceDeletion( ctx, + r.Client, namespacedName, - &apiv1.DatabaseList{}, + toSliceWithPointers(dbList.Items), utils.DatabaseFinalizerName, ); err != nil { return err } - if err := r.deleteFinalizersForResource( + var pbList apiv1.PublicationList + if err := r.List(ctx, &pbList, client.InNamespace(namespacedName.Namespace)); err != nil { + return err + } + + if err := notifyOwnedResourceDeletion( ctx, + r.Client, namespacedName, - &apiv1.PublicationList{}, + toSliceWithPointers(pbList.Items), utils.PublicationFinalizerName, ); err != nil { return err } - return r.deleteFinalizersForResource( + var sbList apiv1.SubscriptionList + if err := r.List(ctx, &sbList, client.InNamespace(namespacedName.Namespace)); err != nil { + return err + } + + return notifyOwnedResourceDeletion( ctx, + r.Client, namespacedName, - &apiv1.SubscriptionList{}, + toSliceWithPointers(sbList.Items), utils.SubscriptionFinalizerName, ) } -// deleteFinalizersForResource deletes finalizers for a given resource type -func (r *ClusterReconciler) deleteFinalizersForResource( +// clusterOwnedResourceWithStatus is a kubernetes resource object owned by a cluster that has status +// capabilities +type clusterOwnedResourceWithStatus interface { + client.Object + GetClusterRef() corev1.LocalObjectReference + GetStatusMessage() string + SetAsFailed(err error) +} + +func toSliceWithPointers[T any](items []T) []*T { + result := make([]*T, len(items)) + for i, item := range items { + result[i] = &item + } + return result +} + +// notifyOwnedResourceDeletion deletes finalizers for a given resource type +func notifyOwnedResourceDeletion[T clusterOwnedResourceWithStatus]( ctx context.Context, + cli client.Client, namespacedName types.NamespacedName, - list client.ObjectList, + objects []T, finalizerName string, ) error { contextLogger := log.FromContext(ctx) - - if err := r.List(ctx, list, client.InNamespace(namespacedName.Namespace)); err != nil { - return err - } - - items, err := meta.ExtractList(list) - if err != nil { - return err - } - - for _, item := range items { - obj, ok := item.(ClusterReferrer) - if !ok { + for _, obj := range objects { + itemLogger := contextLogger.WithValues( + "resourceKind", obj.GetObjectKind().GroupVersionKind().Kind, + "resourceName", obj.GetName(), + "finalizerName", finalizerName, + ) + if obj.GetClusterRef().Name != namespacedName.Name { continue } - if obj.GetClusterRef().Name != namespacedName.Name { - continue + const statusMessage = "cluster resource has been deleted, skipping reconciliation" + + origObj := obj.DeepCopyObject().(T) + + if obj.GetStatusMessage() != statusMessage { + obj.SetAsFailed(errors.New(statusMessage)) + if err := cli.Status().Patch(ctx, obj, client.MergeFrom(origObj)); err != nil { + itemLogger.Error(err, "error while setting failed status for cluster deletion") + return err + } } - origObj := obj.DeepCopyObject().(ClusterReferrer) if controllerutil.RemoveFinalizer(obj, finalizerName) { - contextLogger.Debug("Removing finalizer from resource", - "finalizer", finalizerName, "resource", obj.GetName()) - if err := r.Patch(ctx, obj, client.MergeFrom(origObj)); err != nil { - contextLogger.Error( + itemLogger.Debug("Removing finalizer from resource") + if err := cli.Patch(ctx, obj, client.MergeFrom(origObj)); err != nil { + itemLogger.Error( err, - "error while removing finalizer from resource", - "resource", obj.GetName(), - "kind", obj.GetObjectKind().GroupVersionKind().Kind, + "while removing the finalizer", "oldFinalizerList", origObj.GetFinalizers(), "newFinalizerList", obj.GetFinalizers(), ) diff --git a/internal/controller/finalizers_delete_test.go b/internal/controller/finalizers_delete_test.go index cc6c0d5651..fcb7b40849 100644 --- a/internal/controller/finalizers_delete_test.go +++ b/internal/controller/finalizers_delete_test.go @@ -21,6 +21,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -33,7 +34,7 @@ import ( ) // nolint: dupl -var _ = Describe("CRD finalizers", func() { +var _ = Describe("Test cleanup of owned objects on cluster deletion", func() { var ( r ClusterReconciler scheme *runtime.Scheme @@ -51,7 +52,7 @@ var _ = Describe("CRD finalizers", func() { } }) - It("should delete database finalizers for databases on the cluster", func(ctx SpecContext) { + It("should set databases on the cluster as failed and delete their finalizers", func(ctx SpecContext) { databaseList := &apiv1.DatabaseList{ Items: []apiv1.Database{ { @@ -68,6 +69,10 @@ var _ = Describe("CRD finalizers", func() { Name: "cluster", }, }, + Status: apiv1.DatabaseStatus{ + Applied: ptr.To(true), + Message: "", + }, }, { ObjectMeta: metav1.ObjectMeta{ @@ -87,9 +92,10 @@ var _ = Describe("CRD finalizers", func() { }, } - cli := fake.NewClientBuilder().WithScheme(scheme).WithLists(databaseList).Build() + cli := fake.NewClientBuilder().WithScheme(scheme).WithLists(databaseList). + WithStatusSubresource(&databaseList.Items[0], &databaseList.Items[1]).Build() r.Client = cli - err := r.deleteFinalizers(ctx, namespacedName) + err := r.notifyDeletionToOwnedResources(ctx, namespacedName) Expect(err).ToNot(HaveOccurred()) for _, db := range databaseList.Items { @@ -97,6 +103,8 @@ var _ = Describe("CRD finalizers", func() { err = cli.Get(ctx, client.ObjectKeyFromObject(&db), database) Expect(err).ToNot(HaveOccurred()) Expect(database.Finalizers).To(BeZero()) + Expect(database.Status.Applied).To(HaveValue(BeFalse())) + Expect(database.Status.Message).To(ContainSubstring("cluster resource has been deleted")) } }) @@ -124,16 +132,18 @@ var _ = Describe("CRD finalizers", func() { cli := fake.NewClientBuilder().WithScheme(scheme).WithLists(databaseList).Build() r.Client = cli - err := r.deleteFinalizers(ctx, namespacedName) + err := r.notifyDeletionToOwnedResources(ctx, namespacedName) Expect(err).ToNot(HaveOccurred()) database := &apiv1.Database{} err = cli.Get(ctx, client.ObjectKeyFromObject(&databaseList.Items[0]), database) Expect(err).ToNot(HaveOccurred()) Expect(database.Finalizers).To(BeEquivalentTo([]string{utils.DatabaseFinalizerName})) + Expect(database.Status.Applied).To(BeNil()) + Expect(database.Status.Message).ToNot(ContainSubstring("not reconciled")) }) - It("should delete publication finalizers for publications on the cluster", func(ctx SpecContext) { + It("should set publications on the cluster as failed and delete their finalizers", func(ctx SpecContext) { publicationList := &apiv1.PublicationList{ Items: []apiv1.Publication{ { @@ -150,6 +160,10 @@ var _ = Describe("CRD finalizers", func() { Name: "cluster", }, }, + Status: apiv1.PublicationStatus{ + Applied: ptr.To(true), + Message: "", + }, }, { ObjectMeta: metav1.ObjectMeta{ @@ -169,9 +183,10 @@ var _ = Describe("CRD finalizers", func() { }, } - cli := fake.NewClientBuilder().WithScheme(scheme).WithLists(publicationList).Build() + cli := fake.NewClientBuilder().WithScheme(scheme).WithLists(publicationList). + WithStatusSubresource(&publicationList.Items[0], &publicationList.Items[1]).Build() r.Client = cli - err := r.deleteFinalizers(ctx, namespacedName) + err := r.notifyDeletionToOwnedResources(ctx, namespacedName) Expect(err).ToNot(HaveOccurred()) for _, pub := range publicationList.Items { @@ -179,6 +194,8 @@ var _ = Describe("CRD finalizers", func() { err = cli.Get(ctx, client.ObjectKeyFromObject(&pub), publication) Expect(err).ToNot(HaveOccurred()) Expect(publication.Finalizers).To(BeZero()) + Expect(publication.Status.Applied).To(HaveValue(BeFalse())) + Expect(publication.Status.Message).To(ContainSubstring("cluster resource has been deleted")) } }) @@ -205,16 +222,18 @@ var _ = Describe("CRD finalizers", func() { cli := fake.NewClientBuilder().WithScheme(scheme).WithLists(publicationList).Build() r.Client = cli - err := r.deleteFinalizers(ctx, namespacedName) + err := r.notifyDeletionToOwnedResources(ctx, namespacedName) Expect(err).ToNot(HaveOccurred()) publication := &apiv1.Publication{} err = cli.Get(ctx, client.ObjectKeyFromObject(&publicationList.Items[0]), publication) Expect(err).ToNot(HaveOccurred()) Expect(publication.Finalizers).To(BeEquivalentTo([]string{utils.PublicationFinalizerName})) + Expect(publication.Status.Applied).To(BeNil()) + Expect(publication.Status.Message).ToNot(ContainSubstring("not reconciled")) }) - It("should delete subscription finalizers for subscriptions on the cluster", func(ctx SpecContext) { + It("should set subscriptions on the cluster as failed and delete their finalizers ", func(ctx SpecContext) { subscriptionList := &apiv1.SubscriptionList{ Items: []apiv1.Subscription{ { @@ -231,6 +250,10 @@ var _ = Describe("CRD finalizers", func() { Name: "cluster", }, }, + Status: apiv1.SubscriptionStatus{ + Applied: ptr.To(true), + Message: "", + }, }, { ObjectMeta: metav1.ObjectMeta{ @@ -250,9 +273,10 @@ var _ = Describe("CRD finalizers", func() { }, } - cli := fake.NewClientBuilder().WithScheme(scheme).WithLists(subscriptionList).Build() + cli := fake.NewClientBuilder().WithScheme(scheme).WithLists(subscriptionList). + WithStatusSubresource(&subscriptionList.Items[0], &subscriptionList.Items[1]).Build() r.Client = cli - err := r.deleteFinalizers(ctx, namespacedName) + err := r.notifyDeletionToOwnedResources(ctx, namespacedName) Expect(err).ToNot(HaveOccurred()) for _, sub := range subscriptionList.Items { @@ -260,6 +284,8 @@ var _ = Describe("CRD finalizers", func() { err = cli.Get(ctx, client.ObjectKeyFromObject(&sub), subscription) Expect(err).ToNot(HaveOccurred()) Expect(subscription.Finalizers).To(BeZero()) + Expect(subscription.Status.Applied).To(HaveValue(BeFalse())) + Expect(subscription.Status.Message).To(ContainSubstring("cluster resource has been deleted")) } }) @@ -286,12 +312,14 @@ var _ = Describe("CRD finalizers", func() { cli := fake.NewClientBuilder().WithScheme(scheme).WithLists(subscriptionList).Build() r.Client = cli - err := r.deleteFinalizers(ctx, namespacedName) + err := r.notifyDeletionToOwnedResources(ctx, namespacedName) Expect(err).ToNot(HaveOccurred()) subscription := &apiv1.Subscription{} err = cli.Get(ctx, client.ObjectKeyFromObject(&subscriptionList.Items[0]), subscription) Expect(err).ToNot(HaveOccurred()) Expect(subscription.Finalizers).To(BeEquivalentTo([]string{utils.SubscriptionFinalizerName})) + Expect(subscription.Status.Applied).To(BeNil()) + Expect(subscription.Status.Message).ToNot(ContainSubstring("not reconciled")) }) }) From e2967abe6d86ba11425b2ecc38336be339990ea8 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Thu, 5 Dec 2024 15:04:26 +0100 Subject: [PATCH 202/836] fix(plugin): pass context in `psql` command (#6257) We were not passing the context from the `kubectl cnpg` plugin call to the `kubectl` command executed when invoking `psql`. This patch resolves the issue by introducing a new parameter that captures the Kubernetes context used to call the plugin. Closes #6227 Closes #4332 Signed-off-by: Jonathan Gonzalez V. --- internal/cmd/plugin/plugin.go | 5 +++++ internal/cmd/plugin/psql/cmd.go | 1 + internal/cmd/plugin/psql/psql.go | 7 +++++++ 3 files changed, 13 insertions(+) diff --git a/internal/cmd/plugin/plugin.go b/internal/cmd/plugin/plugin.go index b9af618b84..b22c1a9d6c 100644 --- a/internal/cmd/plugin/plugin.go +++ b/internal/cmd/plugin/plugin.go @@ -43,6 +43,9 @@ var ( // Namespace to operate in Namespace string + // KubeContext to operate with + KubeContext string + // NamespaceExplicitlyPassed indicates if the namespace was passed manually NamespaceExplicitlyPassed bool @@ -96,6 +99,8 @@ func SetupKubernetesClient(configFlags *genericclioptions.ConfigFlags) error { return err } + KubeContext = *configFlags.Context + ClientInterface = kubernetes.NewForConfigOrDie(Config) return utils.DetectSecurityContextConstraints(ClientInterface.Discovery()) diff --git a/internal/cmd/plugin/psql/cmd.go b/internal/cmd/plugin/psql/cmd.go index 6a2bfb6cf1..514c39a853 100644 --- a/internal/cmd/plugin/psql/cmd.go +++ b/internal/cmd/plugin/psql/cmd.go @@ -45,6 +45,7 @@ func NewCmd() *cobra.Command { psqlOptions := CommandOptions{ Replica: replica, Namespace: plugin.Namespace, + Context: plugin.KubeContext, AllocateTTY: allocateTTY, PassStdin: passStdin, Args: psqlArgs, diff --git a/internal/cmd/plugin/psql/psql.go b/internal/cmd/plugin/psql/psql.go index f20d5ce727..35d43c16ff 100644 --- a/internal/cmd/plugin/psql/psql.go +++ b/internal/cmd/plugin/psql/psql.go @@ -59,6 +59,9 @@ type CommandOptions struct { // The Namespace where we're working in Namespace string + // The Context to execute the command + Context string + // Whether we should we allocate a TTY for psql AllocateTTY bool @@ -106,6 +109,10 @@ func (psql *Command) getKubectlInvocation() ([]string, error) { result := make([]string, 0, 13+len(psql.Args)) result = append(result, "kubectl", "exec") + if psql.Context != "" { + result = append(result, "--context", psql.Context) + } + if psql.AllocateTTY { result = append(result, "-t") } From ddb8b36ee33b180c3b67b9198161a1b804c1276f Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Thu, 5 Dec 2024 17:05:34 +0100 Subject: [PATCH 203/836] fix(bootstrap): clean up the WAL volume before initializing a cluster (#6265) This patch ensures that the WAL volume is cleaned during the bootstrap as we clean the data directory volume. Closes #6264 Signed-off-by: Marco Nenciarini Signed-off-by: Leonardo Cecchi Signed-off-by: Jaime Silvela Signed-off-by: Armando Ruocco Co-authored-by: Leonardo Cecchi Co-authored-by: Jaime Silvela Co-authored-by: Armando Ruocco --- internal/cmd/manager/instance/initdb/cmd.go | 2 +- internal/cmd/manager/instance/join/cmd.go | 2 +- .../cmd/manager/instance/restore/restore.go | 2 +- pkg/management/postgres/initdb.go | 83 ++++++++---- pkg/management/postgres/initdb_test.go | 124 ++++++++++++++++++ 5 files changed, 187 insertions(+), 26 deletions(-) create mode 100644 pkg/management/postgres/initdb_test.go diff --git a/internal/cmd/manager/instance/initdb/cmd.go b/internal/cmd/manager/instance/initdb/cmd.go index 81c453544d..09928c9a90 100644 --- a/internal/cmd/manager/instance/initdb/cmd.go +++ b/internal/cmd/manager/instance/initdb/cmd.go @@ -150,7 +150,7 @@ func NewCmd() *cobra.Command { func initSubCommand(ctx context.Context, info postgres.InitInfo) error { contextLogger := log.FromContext(ctx) - err := info.CheckTargetDataDirectory(ctx) + err := info.EnsureTargetDirectoriesDoNotExist(ctx) if err != nil { return err } diff --git a/internal/cmd/manager/instance/join/cmd.go b/internal/cmd/manager/instance/join/cmd.go index e708d79a2d..d29c59f09e 100644 --- a/internal/cmd/manager/instance/join/cmd.go +++ b/internal/cmd/manager/instance/join/cmd.go @@ -95,7 +95,7 @@ func NewCmd() *cobra.Command { func joinSubCommand(ctx context.Context, instance *postgres.Instance, info postgres.InitInfo) error { contextLogger := log.FromContext(ctx) - if err := info.CheckTargetDataDirectory(ctx); err != nil { + if err := info.EnsureTargetDirectoriesDoNotExist(ctx); err != nil { return err } diff --git a/internal/cmd/manager/instance/restore/restore.go b/internal/cmd/manager/instance/restore/restore.go index 7c05ea097b..5f2b81974f 100644 --- a/internal/cmd/manager/instance/restore/restore.go +++ b/internal/cmd/manager/instance/restore/restore.go @@ -68,7 +68,7 @@ func (r *restoreRunnable) Start(ctx context.Context) error { func restoreSubCommand(ctx context.Context, info postgres.InitInfo, cli client.Client) error { contextLogger := log.FromContext(ctx) - if err := info.CheckTargetDataDirectory(ctx); err != nil { + if err := info.EnsureTargetDirectoriesDoNotExist(ctx); err != nil { return err } diff --git a/pkg/management/postgres/initdb.go b/pkg/management/postgres/initdb.go index e78d434dca..4f0b70d364 100644 --- a/pkg/management/postgres/initdb.go +++ b/pkg/management/postgres/initdb.go @@ -112,11 +112,11 @@ type InitInfo struct { TablespaceMapFile []byte } -// CheckTargetDataDirectory ensures that the target data directory does not exist. -// This is a safety check we do before initializing a new instance data directory. +// EnsureTargetDirectoriesDoNotExist ensures that the target data and WAL directories do not exist. +// This is a safety check we do before initializing a new instance. // // If the PGDATA directory already exists and contains a valid PostgreSQL control file, -// the function moves its contents to a uniquely named directory. +// the function moves the contents to uniquely named directories. // If no valid control file is found, the function assumes the directory is the result of // a failed initialization attempt and removes it. // @@ -132,47 +132,84 @@ type InitInfo struct { // important user data. This is particularly relevant when using static provisioning // of PersistentVolumeClaims (PVCs), as it prevents accidental overwriting of a valid // data directory that may exist in the PersistentVolumes (PVs). -func (info InitInfo) CheckTargetDataDirectory(ctx context.Context) error { +func (info InitInfo) EnsureTargetDirectoriesDoNotExist(ctx context.Context) error { contextLogger := log.FromContext(ctx).WithValues("pgdata", info.PgData) pgDataExists, err := fileutils.FileExists(info.PgData) if err != nil { - contextLogger.Error(err, "Error while checking for an existing PGData") - return fmt.Errorf("while verifying is PGDATA exists: %w", err) + contextLogger.Error(err, "Error while checking for an existing data directory") + return fmt.Errorf("while verifying if the data directory exists: %w", err) } - if !pgDataExists { - // The PGDATA directory doesn't exist. We can definitely - // write to it + + pgWalExists := false + if info.PgWal != "" { + if pgWalExists, err = fileutils.FileExists(info.PgWal); err != nil { + contextLogger.Error(err, "Error while checking for an existing WAL directory") + return fmt.Errorf("while verifying if the WAL directory exists: %w", err) + } + } + + if !pgDataExists && !pgWalExists { return nil } - // We've an existing directory. Let's check if this is a real - // PGDATA directory or not. out, err := info.GetInstance().GetPgControldata() - if err != nil { - contextLogger.Info("pg_controldata check on existing directory failed, cleaning it up", - "out", out, "err", err) + if err == nil { + contextLogger.Info("pg_controldata check on existing directory succeeded, renaming the folders", "out", out) + return info.renameExistingTargetDataDirectories(ctx, pgWalExists) + } + + contextLogger.Info("pg_controldata check on existing directory failed, cleaning up folders", "err", err, "out", out) + return info.removeExistingTargetDataDirectories(ctx, pgDataExists, pgWalExists) +} +func (info InitInfo) removeExistingTargetDataDirectories(ctx context.Context, pgDataExists, pgWalExists bool) error { + contextLogger := log.FromContext(ctx).WithValues("pgdata", info.PgData, "pgwal", info.PgWal) + + if pgDataExists { + contextLogger.Info("cleaning up existing data directory") if err := fileutils.RemoveDirectory(info.PgData); err != nil { contextLogger.Error(err, "error while cleaning up existing data directory") return err } + } - return nil + if pgWalExists { + contextLogger.Info("cleaning up existing WAL directory") + if err := fileutils.RemoveDirectory(info.PgWal); err != nil { + contextLogger.Error(err, "error while cleaning up existing WAL directory") + return err + } } - renamedDirectoryName := fmt.Sprintf("%s_%s", info.PgData, fileutils.FormatFriendlyTimestamp(time.Now())) - contextLogger = contextLogger.WithValues( - "out", out, - "newName", renamedDirectoryName, - ) + return nil +} + +func (info InitInfo) renameExistingTargetDataDirectories(ctx context.Context, pgWalExists bool) error { + contextLogger := log.FromContext(ctx).WithValues("pgdata", info.PgData, "pgwal", info.PgWal) - contextLogger.Info("pg_controldata check on existing directory succeeded, renaming the folder") - if err := os.Rename(info.PgData, renamedDirectoryName); err != nil { - contextLogger.Error(err, "error while renaming existing data directory") + suffixTimestamp := fileutils.FormatFriendlyTimestamp(time.Now()) + + pgdataNewName := fmt.Sprintf("%s_%s", info.PgData, suffixTimestamp) + contextLogger = contextLogger.WithValues() + + contextLogger.Info("renaming the data directory", "pgdataNewName", pgdataNewName) + if err := os.Rename(info.PgData, pgdataNewName); err != nil { + contextLogger.Error(err, "error while renaming existing data directory", + "pgdataNewName", pgdataNewName) return fmt.Errorf("while renaming existing data directory: %w", err) } + if pgWalExists { + pgwalNewName := fmt.Sprintf("%s_%s", info.PgWal, suffixTimestamp) + + contextLogger.Info("renaming the WAL directory", "pgwalNewName", pgwalNewName) + if err := os.Rename(info.PgWal, pgwalNewName); err != nil { + contextLogger.Error(err, "error while renaming existing WAL directory") + return fmt.Errorf("while renaming existing WAL directory: %w", err) + } + } + return nil } diff --git a/pkg/management/postgres/initdb_test.go b/pkg/management/postgres/initdb_test.go new file mode 100644 index 0000000000..985c42293b --- /dev/null +++ b/pkg/management/postgres/initdb_test.go @@ -0,0 +1,124 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package postgres + +import ( + "os" + "path/filepath" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("EnsureTargetDirectoriesDoNotExist", func() { + var initInfo InitInfo + + BeforeEach(func() { + initInfo = InitInfo{ + PgData: GinkgoT().TempDir(), + PgWal: GinkgoT().TempDir(), + } + Expect(os.Create(filepath.Join(initInfo.PgData, "PG_VERSION"))).Error().To(Succeed()) + Expect(os.Mkdir(filepath.Join(initInfo.PgWal, "archive_status"), 0o700)).To(Succeed()) + }) + + It("should do nothing if both data and WAL directories do not exist", func(ctx SpecContext) { + Expect(os.RemoveAll(initInfo.PgData)).Should(Succeed()) + Expect(os.RemoveAll(initInfo.PgWal)).Should(Succeed()) + + err := initInfo.EnsureTargetDirectoriesDoNotExist(ctx) + Expect(err).ToNot(HaveOccurred()) + + Expect(os.Stat(initInfo.PgData)).Error().To(MatchError(os.ErrNotExist)) + Expect(os.Stat(initInfo.PgWal)).Error().To(MatchError(os.ErrNotExist)) + }) + + It("should remove existing directories if pg_controldata check fails", func(ctx SpecContext) { + err := initInfo.EnsureTargetDirectoriesDoNotExist(ctx) + Expect(err).ToNot(HaveOccurred()) + + Expect(os.Stat(initInfo.PgData)).Error().To(MatchError(os.ErrNotExist)) + Expect(os.Stat(initInfo.PgWal)).Error().To(MatchError(os.ErrNotExist)) + }) + + It("should remove data directory even if WAL directory is not present", func(ctx SpecContext) { + Expect(os.RemoveAll(initInfo.PgWal)).Should(Succeed()) + + err := initInfo.EnsureTargetDirectoriesDoNotExist(ctx) + Expect(err).ToNot(HaveOccurred()) + + Expect(os.Stat(initInfo.PgData)).Error().To(MatchError(os.ErrNotExist)) + Expect(os.Stat(initInfo.PgWal)).Error().To(MatchError(os.ErrNotExist)) + }) + + It("should remove WAL directory even if data directory is not present", func(ctx SpecContext) { + Expect(os.RemoveAll(initInfo.PgData)).Should(Succeed()) + + err := initInfo.EnsureTargetDirectoriesDoNotExist(ctx) + Expect(err).ToNot(HaveOccurred()) + + Expect(os.Stat(initInfo.PgData)).Error().To(MatchError(os.ErrNotExist)) + Expect(os.Stat(initInfo.PgWal)).Error().To(MatchError(os.ErrNotExist)) + }) +}) + +var _ = Describe("renameExistingTargetDataDirectories", func() { + var initInfo InitInfo + + BeforeEach(func() { + initInfo = InitInfo{ + PgData: GinkgoT().TempDir(), + PgWal: GinkgoT().TempDir(), + } + Expect(os.Create(filepath.Join(initInfo.PgData, "PG_VERSION"))).Error().To(Succeed()) + Expect(os.Mkdir(filepath.Join(initInfo.PgWal, "archive_status"), 0o700)).To(Succeed()) + }) + + It("should rename existing data and WAL directories", func(ctx SpecContext) { + err := initInfo.renameExistingTargetDataDirectories(ctx, true) + Expect(err).ToNot(HaveOccurred()) + + Expect(os.Stat(initInfo.PgData)).Error().To(MatchError(os.ErrNotExist)) + Expect(os.Stat(initInfo.PgWal)).Error().To(MatchError(os.ErrNotExist)) + + filelist, err := filepath.Glob(initInfo.PgData + "_*") + Expect(err).ToNot(HaveOccurred()) + Expect(filelist).To(HaveLen(1)) + + filelist, err = filepath.Glob(initInfo.PgWal + "_*") + Expect(err).ToNot(HaveOccurred()) + Expect(filelist).To(HaveLen(1)) + }) + + It("should rename existing data without WAL directories", func(ctx SpecContext) { + Expect(os.RemoveAll(initInfo.PgWal)).Should(Succeed()) + + err := initInfo.renameExistingTargetDataDirectories(ctx, false) + Expect(err).ToNot(HaveOccurred()) + + Expect(os.Stat(initInfo.PgData)).Error().To(MatchError(os.ErrNotExist)) + Expect(os.Stat(initInfo.PgWal)).Error().To(MatchError(os.ErrNotExist)) + + filelist, err := filepath.Glob(initInfo.PgData + "_*") + Expect(err).ToNot(HaveOccurred()) + Expect(filelist).To(HaveLen(1)) + + filelist, err = filepath.Glob(initInfo.PgWal + "_*") + Expect(err).ToNot(HaveOccurred()) + Expect(filelist).To(BeEmpty()) + }) +}) From e2f640fe83b56a6f65b26a1f4c7b22358000c296 Mon Sep 17 00:00:00 2001 From: "David E. Wheeler" <46604+theory@users.noreply.github.com> Date: Thu, 5 Dec 2024 13:30:49 -0800 Subject: [PATCH 204/836] docs: define RPO and RTO (#6239) Signed-off-by: David E. Wheeler --- .wordlist-en-custom.txt | 2 ++ docs/src/architecture.md | 11 ++++++----- docs/src/backup.md | 11 ++++++----- docs/src/before_you_start.md | 7 +++++++ docs/src/failover.md | 3 ++- docs/src/faq.md | 4 ++-- docs/src/instance_manager.md | 18 ++++++++++-------- docs/src/operator_capability_levels.md | 20 +++++++++++--------- docs/src/replication.md | 14 ++++++++------ docs/src/rolling_update.md | 5 +++-- docs/src/wal_archiving.md | 2 +- 11 files changed, 58 insertions(+), 39 deletions(-) diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index f5fe08815f..9216cd19d7 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -361,7 +361,9 @@ README RHSA RLS RPO +rpo RTO +rto RUNTIME ReadWriteOnce RedHat diff --git a/docs/src/architecture.md b/docs/src/architecture.md index 326f50fdd1..509461e26d 100644 --- a/docs/src/architecture.md +++ b/docs/src/architecture.md @@ -354,11 +354,12 @@ only write inside a single Kubernetes cluster, at any time. However, for business continuity objectives it is fundamental to: -- reduce global **recovery point objectives** (RPO) by storing PostgreSQL backup data - in multiple locations, regions and possibly using different providers - (Disaster Recovery) -- reduce global **recovery time objectives** (RTO) by taking advantage of PostgreSQL - replication beyond the primary Kubernetes cluster (High Availability) +- reduce global **recovery point objectives** ([RPO](before_you_start.md#rpo)) + by storing PostgreSQL backup data in multiple locations, regions and possibly + using different providers (Disaster Recovery) +- reduce global **recovery time objectives** ([RTO](before_you_start.md#rto)) + by taking advantage of PostgreSQL replication beyond the primary Kubernetes + cluster (High Availability) In order to address the above concerns, CloudNativePG introduces the concept of a PostgreSQL Topology that is distributed across different Kubernetes clusters diff --git a/docs/src/backup.md b/docs/src/backup.md index fac42c56f9..4c3f8cb172 100644 --- a/docs/src/backup.md +++ b/docs/src/backup.md @@ -66,7 +66,8 @@ as they can simply rely on the WAL archive to synchronize across long distances, extending disaster recovery goals across different regions. When you [configure a WAL archive](wal_archiving.md), CloudNativePG provides -out-of-the-box an RPO <= 5 minutes for disaster recovery, even across regions. +out-of-the-box an [RPO](before_you_start.md#rpo) <= 5 minutes for disaster +recovery, even across regions. !!! Important Our recommendation is to always setup the WAL archive in production. @@ -118,9 +119,9 @@ including: - availability of a trusted storage class that supports volume snapshots - size of the database: with object stores, the larger your database, the longer backup and, most importantly, recovery procedures take (the latter - impacts RTO); in presence of Very Large Databases (VLDB), the general - advice is to rely on Volume Snapshots as, thanks to copy-on-write, they - provide faster recovery + impacts [RTO](before_you_start.md#rto)); in presence of Very Large Databases + (VLDB), the general advice is to rely on Volume Snapshots as, thanks to + copy-on-write, they provide faster recovery - data mobility and possibility to store or relay backup files on a secondary location in a different region, or any subsequent one - other factors, mostly based on the confidence and familiarity with the @@ -188,7 +189,7 @@ In Kubernetes CronJobs, the equivalent expression is `0 0 * * *` because seconds are not included. !!! Hint - Backup frequency might impact your recovery time object (RTO) after a + Backup frequency might impact your recovery time objective ([RTO](before_you_start.md#rto)) after a disaster which requires a full or Point-In-Time recovery operation. Our advice is that you regularly test your backups by recovering them, and then measuring the time it takes to recover from scratch so that you can refine diff --git a/docs/src/before_you_start.md b/docs/src/before_you_start.md index 2d6c0377fe..7ebc61d732 100644 --- a/docs/src/before_you_start.md +++ b/docs/src/before_you_start.md @@ -131,6 +131,13 @@ PVC group belonging to the same PostgreSQL instance, namely the main volume containing the PGDATA (`storage`) and the volume for WALs (`walStorage`). +RTO +: Acronym for "recovery time objective", the amount of time a system can be + unavailable without adversely impacting the application. + +RPO +: Acronym for "recovery point objective", a calculation of the level of + acceptable data loss following a disaster recovery scenario. ## Cloud terminology diff --git a/docs/src/failover.md b/docs/src/failover.md index 8469747104..89ecac7d3f 100644 --- a/docs/src/failover.md +++ b/docs/src/failover.md @@ -46,7 +46,8 @@ During the time the failing primary is being shut down: ## RTO and RPO impact -Failover may result in the service being impacted and/or data being lost: +Failover may result in the service being impacted ([RTO](before_you_start.md#rto)) +and/or data being lost ([RPO](before_you_start.md#rpo)): 1. During the time when the primary has started to fail, and before the controller starts failover procedures, queries in transit, WAL writes, checkpoints and diff --git a/docs/src/faq.md b/docs/src/faq.md index 19137d47c6..f8f92e2253 100644 --- a/docs/src/faq.md +++ b/docs/src/faq.md @@ -451,8 +451,8 @@ single cluster, namely: - storage: use dedicated storage for each worker node running Postgres Use at least one standby, preferably at least two, so that you can configure -synchronous replication in the cluster, introducing RPO=0 for high -availability. +synchronous replication in the cluster, introducing [RPO](before_you_start.md#rpo)=0 +for high availability. If you do not have availability zones - normally the case of on-premise installations - separate on worker nodes and storage. diff --git a/docs/src/instance_manager.md b/docs/src/instance_manager.md index ce13adbab2..53d13c4e4d 100644 --- a/docs/src/instance_manager.md +++ b/docs/src/instance_manager.md @@ -5,7 +5,7 @@ It simply relies on the Kubernetes API server and a native key component called: the **Postgres instance manager**. The instance manager takes care of the entire lifecycle of the PostgreSQL -leading process (also known as `postmaster`). +server process (also known as `postmaster`). When you create a new cluster, the operator makes a Pod per instance. The field `.spec.instances` specifies how many instances to create. @@ -182,8 +182,9 @@ seconds. !!! Important In order to avoid any data loss in the Postgres cluster, which impacts - the database RPO, don't delete the Pod where the primary instance is running. - In this case, perform a switchover to another instance first. + the database [RPO](before_you_start.md#rpo), don't delete the Pod where + the primary instance is running. In this case, perform a switchover to + another instance first. ### Shutdown of the primary during a switchover @@ -197,11 +198,12 @@ the time given to the former primary to shut down gracefully and archive all the WAL files. By default it is set to `3600` (1 hour). !!! Warning - The `.spec.switchoverDelay` option affects the RPO and RTO of your - PostgreSQL database. Setting it to a low value, might favor RTO over RPO - but lead to data loss at cluster level and/or backup level. On the contrary, - setting it to a high value, might remove the risk of data loss while leaving - the cluster without an active primary for a longer time during the switchover. + The `.spec.switchoverDelay` option affects the [RPO](before_you_start.md#rpo) + and [RTO](before_you_start.md#rto) of your PostgreSQL database. Setting it to + a low value, might favor RTO over RPO but lead to data loss at cluster level + and/or backup level. On the contrary, setting it to a high value, might remove + the risk of data loss while leaving the cluster without an active primary for a + longer time during the switchover. ## Failover diff --git a/docs/src/operator_capability_levels.md b/docs/src/operator_capability_levels.md index c961a27154..3975036803 100644 --- a/docs/src/operator_capability_levels.md +++ b/docs/src/operator_capability_levels.md @@ -336,11 +336,12 @@ continuity and scalability. *Disaster recovery* is a business continuity component that requires that both backup and recovery of a database work correctly. While as a -starting point, the goal is to achieve RPO < 5 minutes, the long-term goal is -to implement RPO=0 backup solutions. *High availability* is the other -important component of business continuity. Through PostgreSQL native -physical replication and hot standby replicas, it allows the operator to perform -failover and switchover operations. This area includes enhancements in: +starting point, the goal is to achieve [RPO](before_you_start.md#rpo) < 5 +minutes, the long-term goal is to implement RPO=0 backup solutions. *High +availability* is the other important component of business continuity. Through +PostgreSQL native physical replication and hot standby replicas, it allows the +operator to perform failover and switchover operations. This area includes +enhancements in: - Control of PostgreSQL physical replication, such as synchronous replication, (cascading) replication clusters, and so on @@ -404,8 +405,9 @@ database snapshots with cold backups. ### Backups from a standby The operator supports offloading base backups onto a standby without impacting -the RPO of the database. This allows resources to be preserved on the primary, in -particular I/O, for standard database operations. +the [RPO](before_you_start.md#rpo) of the database. This allows resources to +be preserved on the primary, in particular I/O, for standard database +operations. ### Full restore from a backup @@ -460,8 +462,8 @@ switchover across data centers remains necessary.) Additionally, the flexibility extends to creating delayed replica clusters intentionally lagging behind the primary cluster. This intentional lag aims to -minimize the Recovery Time Objective (RTO) in the event of unintended errors, -such as incorrect `DELETE` or `UPDATE` SQL operations. +minimize the Recovery Time Objective ([RTO](before_you_start.md#rto)) in the +event of unintended errors, such as incorrect `DELETE` or `UPDATE` SQL operations. ### Distributed Database Topologies diff --git a/docs/src/replication.md b/docs/src/replication.md index ff9f0f1f41..fbc37595bb 100644 --- a/docs/src/replication.md +++ b/docs/src/replication.md @@ -36,11 +36,12 @@ recovery. PostgreSQL 9.0 (2010) introduced WAL streaming and read-only replicas through *hot standby*. In 2011, PostgreSQL 9.1 brought synchronous replication at the -transaction level, supporting RPO=0 clusters. Cascading replication was added in -PostgreSQL 9.2 (2012). The foundations for [logical replication](logical_replication.md) -were established in PostgreSQL 9.4 (2014), and version 10 (2017) introduced -native support for the publisher/subscriber pattern to replicate data from an -origin to a destination. The table below summarizes these milestones. +transaction level, supporting [RPO](before_you_start.md#rpo)=0 clusters. Cascading +replication was added in PostgreSQL 9.2 (2012). The foundations for +[logical replication](logical_replication.md) were established in PostgreSQL +9.4 (2014), and version 10 (2017) introduced native support for the +publisher/subscriber pattern to replicate data from an origin to a destination. The +table below summarizes these milestones. | Version | Year | Feature | |:-------:|:----:|-----------------------------------------------------------------------| @@ -528,7 +529,8 @@ availability zone from the primary instance, usually identified by the `topology.kubernetes.io/zone` [label on a node](https://kubernetes.io/docs/reference/labels-annotations-taints/#topologykubernetesiozone). This would increase the robustness of the cluster in case of an outage in a -single availability zone, especially in terms of recovery point objective (RPO). +single availability zone, especially in terms of recovery point objective +([RPO](before_you_start.md#rpo)). The idea of anti-affinity is to ensure that sync replicas that participate in the quorum are chosen from pods running on nodes that have different values for diff --git a/docs/src/rolling_update.md b/docs/src/rolling_update.md index 01907ab416..023cc78c11 100644 --- a/docs/src/rolling_update.md +++ b/docs/src/rolling_update.md @@ -57,8 +57,9 @@ The `primaryUpdateMethod` option accepts one of the following values: There's no one-size-fits-all configuration for the update method, as that depends on several factors like the actual workload of your database, the -requirements in terms of RPO and RTO, whether your PostgreSQL architecture is -shared or shared nothing, and so on. +requirements in terms of [RPO](before_you_start.md#rpo) and +[RTO](before_you_start.md#rto), whether your PostgreSQL architecture is shared +or shared nothing, and so on. Indeed, being PostgreSQL a primary/standby architecture database management system, the update process inevitably generates a downtime for your diff --git a/docs/src/wal_archiving.md b/docs/src/wal_archiving.md index 1f7b60e0c7..5216f96a53 100644 --- a/docs/src/wal_archiving.md +++ b/docs/src/wal_archiving.md @@ -43,7 +43,7 @@ segment to be archived. By default, CloudNativePG sets `archive_timeout` to `5min`, ensuring that WAL files, even in case of low workloads, are closed and archived at least every 5 minutes, providing a deterministic time-based value for - your Recovery Point Objective (RPO). Even though you change the value + your Recovery Point Objective ([RPO](before_you_start.md#rpo)). Even though you change the value of the [`archive_timeout` setting in the PostgreSQL configuration](https://www.postgresql.org/docs/current/runtime-config-wal.html#GUC-ARCHIVE-TIMEOUT), our experience suggests that the default value set by the operator is suitable for most use cases. From d3f2b60afb0b68490ea98437d38ad063125d00af Mon Sep 17 00:00:00 2001 From: Jaime Silvela Date: Thu, 5 Dec 2024 22:32:12 +0100 Subject: [PATCH 205/836] docs: explain logical replication and database examples (#6282) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes #6281 Signed-off-by: Jaime Silvela Signed-off-by: Niccolò Fei Co-authored-by: Niccolò Fei --- docs/src/samples.md | 67 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/docs/src/samples.md b/docs/src/samples.md index 38a93256cc..a6ef595d7c 100644 --- a/docs/src/samples.md +++ b/docs/src/samples.md @@ -135,3 +135,70 @@ For a list of available options, see [API reference](cloudnative-pg.v1.md). **Pooler with custom service config** : [`pooler-external.yaml`](samples/pooler-external.yaml) + +## Logical replication via declarative Publication and Subscription objects + +Two test manifests contain everything needed to set up logical replication: + +**Source cluster with a publication** +: [`cluster-example-logical-source.yaml`](samples/cluster-example-logical-source.yaml) + +Sets up a cluster, `cluster-example` with some tables created in the `app` +database, and, importantly, *adds replication to the app user*. +A publication is created for the cluster on the `app` database: note that the +publication will be reconciled only after the cluster's primary is up and +running. + +**Destination cluster with a subscription** +: *Prerequisites*: The source cluster with publication, defined as above. +: [`cluster-example-logical-destination.yaml`](samples/cluster-example-logical-destination.yaml) + +Sets up a cluster `cluster-example-dest` with: + +- the source cluster defined in the `externalClusters` stanza. Note that it uses + the `app` role to connect, which assumes the source cluster grants it + `replication` privilege. +- a bootstrap import of microservice type, with `schemaOnly` enabled + +A subscription is created on the destination cluster: note that the subscription +will be reconciled only after the destination cluster's primary is up and +running. + +After both clusters have been reconciled, together with the publication and +subscription objects, you can verify that that tables in the source cluster, +and the data in them, have been replicated in the destination cluster + +In addition, there are some standalone example manifests: + +**A plain Publication targeting All Tables** +: *Prerequisites*: an existing cluster `cluster-example`. +: [`publication-example.yaml`](samples/publication-example.yaml) + +**A Publication with a constrained publication target** +: *Prerequisites*: an existing cluster `cluster-example`. +: [`publication-example-objects.yaml`](samples/publication-example-objects.yaml) + +**A plain Subscription** +: Prerequisites: an existing cluster `cluster-example` set up as source, with + a publication `pub-all`. A cluster `cluster-example-dest` set up as a + destination cluster, including the `externalClusters` stanza with + connection parameters to the source cluster, including a role with + replication privilege. +: [`subscription-example.yaml`](samples/subscription-example.yaml) + +All the above manifests create publications or subscriptions on the `app` +database. The Database CRD offers a convenient way to create databases +declaratively. With it, logical replication could be set up for arbitrary +databases. +Which brings us to the next section. + +## Declarative management of Postgres databases + +**A plain Database** +: *Prerequisites*: an existing cluster `cluster-example`. +: [`database-example.yaml`](samples/database-example.yaml) + +**A Database with ICU local specifications** +: *Prerequisites*: an existing cluster `cluster-example` running Postgres 16 + or more advanced. +: [`database-example-icu.yaml`](samples/database-example-icu.yaml) From 186f28b19fdeffeb20a4e428eb5ca683b200a7e1 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Thu, 5 Dec 2024 22:35:50 +0100 Subject: [PATCH 206/836] feat: add support for `maxConcurrentReconciles` (#5678) Support the `maxConcurrentReconciles` parameter for improved concurrency management, except for backup operations. By default, the value is set to 10, enabling the operator to handle larger deployments efficiently out of the box. This enhancement provides greater flexibility for tuning reconciliation behaviour to suit diverse workloads and deployment sizes. Closes #5687 Signed-off-by: Armando Ruocco --- config/manager/manager.yaml | 1 + internal/cmd/manager/controller/cmd.go | 8 ++++++++ internal/cmd/manager/controller/controller.go | 13 ++++++------- internal/controller/cluster_controller.go | 6 +++++- internal/controller/plugin_controller.go | 8 +++++++- internal/controller/pooler_controller.go | 4 +++- internal/controller/scheduledbackup_controller.go | 8 +++++++- 7 files changed, 37 insertions(+), 11 deletions(-) diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index f0164e0937..312cdc57cd 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -38,6 +38,7 @@ spec: args: - controller - --leader-elect + - --max-concurrent-reconciles=10 - --config-map-name=$(OPERATOR_DEPLOYMENT_NAME)-config - --secret-name=$(OPERATOR_DEPLOYMENT_NAME)-config - --webhook-port=9443 diff --git a/internal/cmd/manager/controller/cmd.go b/internal/cmd/manager/controller/cmd.go index 9eb5629868..3a94375a80 100644 --- a/internal/cmd/manager/controller/cmd.go +++ b/internal/cmd/manager/controller/cmd.go @@ -34,6 +34,7 @@ func NewCmd() *cobra.Command { var pprofHTTPServer bool var leaderLeaseDuration int var leaderRenewDeadline int + var maxConcurrentReconciles int cmd := cobra.Command{ Use: "controller [flags]", @@ -50,6 +51,7 @@ func NewCmd() *cobra.Command { }, pprofHTTPServer, port, + maxConcurrentReconciles, configuration.Current, ) }, @@ -77,6 +79,12 @@ func NewCmd() *cobra.Command { false, "If true it will start a pprof debug http server on localhost:6060. Defaults to false.", ) + cmd.Flags().IntVar( + &maxConcurrentReconciles, + "max-concurrent-reconciles", + 10, + "The maximum number of concurrent reconciles. Defaults to 10.", + ) return &cmd } diff --git a/internal/cmd/manager/controller/controller.go b/internal/cmd/manager/controller/controller.go index fe8028545d..84b452c908 100644 --- a/internal/cmd/manager/controller/controller.go +++ b/internal/cmd/manager/controller/controller.go @@ -95,6 +95,7 @@ func RunController( leaderConfig leaderElectionConfiguration, pprofDebug bool, port int, + maxConcurrentReconciles int, conf *configuration.Data, ) error { ctx := context.Background() @@ -222,7 +223,7 @@ func RunController( mgr, discoveryClient, pluginRepository, - ).SetupWithManager(ctx, mgr); err != nil { + ).SetupWithManager(ctx, mgr, maxConcurrentReconciles); err != nil { setupLog.Error(err, "unable to create controller", "controller", "Cluster") return err } @@ -236,10 +237,8 @@ func RunController( return err } - if err = controller.NewPluginReconciler( - mgr, - pluginRepository, - ).SetupWithManager(mgr, configuration.Current.OperatorNamespace); err != nil { + if err = controller.NewPluginReconciler(mgr, pluginRepository). + SetupWithManager(mgr, configuration.Current.OperatorNamespace, maxConcurrentReconciles); err != nil { setupLog.Error(err, "unable to create controller", "controller", "Plugin") return err } @@ -248,7 +247,7 @@ func RunController( Client: mgr.GetClient(), Scheme: mgr.GetScheme(), Recorder: mgr.GetEventRecorderFor("cloudnative-pg-scheduledbackup"), - }).SetupWithManager(ctx, mgr); err != nil { + }).SetupWithManager(ctx, mgr, maxConcurrentReconciles); err != nil { setupLog.Error(err, "unable to create controller", "controller", "ScheduledBackup") return err } @@ -258,7 +257,7 @@ func RunController( DiscoveryClient: discoveryClient, Scheme: mgr.GetScheme(), Recorder: mgr.GetEventRecorderFor("cloudnative-pg-pooler"), - }).SetupWithManager(mgr); err != nil { + }).SetupWithManager(mgr, maxConcurrentReconciles); err != nil { setupLog.Error(err, "unable to create controller", "controller", "Pooler") return err } diff --git a/internal/controller/cluster_controller.go b/internal/controller/cluster_controller.go index 606ecab88c..2c7dc542af 100644 --- a/internal/controller/cluster_controller.go +++ b/internal/controller/cluster_controller.go @@ -37,6 +37,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/predicate" @@ -1021,13 +1022,16 @@ func (r *ClusterReconciler) handleRollingUpdate( } // SetupWithManager creates a ClusterReconciler -func (r *ClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { +func (r *ClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, maxConcurrentReconciles int) error { err := r.createFieldIndexes(ctx, mgr) if err != nil { return err } return ctrl.NewControllerManagedBy(mgr). + WithOptions(controller.Options{ + MaxConcurrentReconciles: maxConcurrentReconciles, + }). For(&apiv1.Cluster{}). Named("cluster"). Owns(&corev1.Pod{}). diff --git a/internal/controller/plugin_controller.go b/internal/controller/plugin_controller.go index 7d4e606e2e..0e393f1525 100644 --- a/internal/controller/plugin_controller.go +++ b/internal/controller/plugin_controller.go @@ -31,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/predicate" @@ -207,7 +208,11 @@ func (r *PluginReconciler) getSecret( } // SetupWithManager adds this PluginReconciler to the passed controller manager -func (r *PluginReconciler) SetupWithManager(mgr ctrl.Manager, operatorNamespace string) error { +func (r *PluginReconciler) SetupWithManager( + mgr ctrl.Manager, + operatorNamespace string, + maxConcurrentReconciles int, +) error { pluginServicesPredicate := predicate.Funcs{ CreateFunc: func(e event.CreateEvent) bool { return isPluginService(e.Object, operatorNamespace) @@ -224,6 +229,7 @@ func (r *PluginReconciler) SetupWithManager(mgr ctrl.Manager, operatorNamespace } return ctrl.NewControllerManagedBy(mgr). + WithOptions(controller.Options{MaxConcurrentReconciles: maxConcurrentReconciles}). For(&corev1.Service{}). Named("plugin"). WithEventFilter(pluginServicesPredicate). diff --git a/internal/controller/pooler_controller.go b/internal/controller/pooler_controller.go index af54dbb344..e1d343f35b 100644 --- a/internal/controller/pooler_controller.go +++ b/internal/controller/pooler_controller.go @@ -35,6 +35,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -126,8 +127,9 @@ func (r *PoolerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr } // SetupWithManager setup this controller inside the controller manager -func (r *PoolerReconciler) SetupWithManager(mgr ctrl.Manager) error { +func (r *PoolerReconciler) SetupWithManager(mgr ctrl.Manager, maxConcurrentReconciles int) error { return ctrl.NewControllerManagedBy(mgr). + WithOptions(controller.Options{MaxConcurrentReconciles: maxConcurrentReconciles}). For(&apiv1.Pooler{}). Named("pooler"). Owns(&v1.Deployment{}). diff --git a/internal/controller/scheduledbackup_controller.go b/internal/controller/scheduledbackup_controller.go index d3a96d1f55..8d4d3fa248 100644 --- a/internal/controller/scheduledbackup_controller.go +++ b/internal/controller/scheduledbackup_controller.go @@ -33,6 +33,7 @@ import ( "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" @@ -326,7 +327,11 @@ func (r *ScheduledBackupReconciler) GetChildBackups( } // SetupWithManager install this controller in the controller manager -func (r *ScheduledBackupReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { +func (r *ScheduledBackupReconciler) SetupWithManager( + ctx context.Context, + mgr ctrl.Manager, + maxConcurrentReconciles int, +) error { // Create a new indexed field on backups. This field will be used to easily // find all the backups created by this controller if err := mgr.GetFieldIndexer().IndexField( @@ -353,6 +358,7 @@ func (r *ScheduledBackupReconciler) SetupWithManager(ctx context.Context, mgr ct } return ctrl.NewControllerManagedBy(mgr). + WithOptions(controller.Options{MaxConcurrentReconciles: maxConcurrentReconciles}). For(&apiv1.ScheduledBackup{}). Named("scheduled-backup"). Complete(r) From 9c98fbc2c732f3b83ae1940fa6205e8f3fa3d95f Mon Sep 17 00:00:00 2001 From: Pierrick <139142330+pchovelon@users.noreply.github.com> Date: Fri, 6 Dec 2024 07:55:40 +0100 Subject: [PATCH 207/836] docs: add a sample file for Scaleway object storage (#6143) Signed-off-by: Pierrick --- docs/src/samples.md | 8 ++++++- .../cluster-exemple-with-backup-scaleway.yaml | 23 +++++++++++++++++++ 2 files changed, 30 insertions(+), 1 deletion(-) create mode 100644 docs/src/samples/cluster-exemple-with-backup-scaleway.yaml diff --git a/docs/src/samples.md b/docs/src/samples.md index a6ef595d7c..823b4a0f6f 100644 --- a/docs/src/samples.md +++ b/docs/src/samples.md @@ -62,12 +62,18 @@ your PostgreSQL cluster. : [`backup-example.yaml`](samples/backup-example.yaml): An example of a backup that runs against the previous sample. -**Simple cluster with backup configured** +**Simple cluster with backup configured for minio** : *Prerequisites*: The configuration assumes minio is running and working. Update `backup.barmanObjectStore` with your minio parameters or your cloud solution. : [`cluster-example-with-backup.yaml`](samples/cluster-example-with-backup.yaml) A basic cluster with backups configured. +**Simple cluster with backup configured for Scaleway Object Storage** +: *Prerequisites*: The configuration assumes a Scaleway Object Storage bucket exists. + Update `backup.barmanObjectStore` with your Scaleway parameters. +: [`cluster-example-with-backup-scaleway.yaml`](samples/cluster-example-with-backup-scaleway.yaml) + A basic cluster with backups configured to work with Scaleway Object Storage.. + ## Replica clusters **Replica cluster by way of backup from an object store** diff --git a/docs/src/samples/cluster-exemple-with-backup-scaleway.yaml b/docs/src/samples/cluster-exemple-with-backup-scaleway.yaml new file mode 100644 index 0000000000..b9f7905edb --- /dev/null +++ b/docs/src/samples/cluster-exemple-with-backup-scaleway.yaml @@ -0,0 +1,23 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: pg-backup-scaleway +spec: + instances: 3 + storage: + storageClass: standard + size: 1Gi + backup: + barmanObjectStore: + destinationPath: "s3:///backups/" # change with your bucket's name. + endpointURL: "https://s3..scw.cloud" # change with your bucket's location/region. + s3Credentials: + accessKeyId: + name: scaleway + key: ACCESS_KEY_ID + secretAccessKey: + name: scaleway + key: ACCESS_SECRET_KEY + region: + name: scaleway + key: ACCESS_REGION From 5ea937b7c4f39fafd634e4698abc666c4b367c21 Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Fri, 6 Dec 2024 07:59:05 +0100 Subject: [PATCH 208/836] fix(plugin): avoid displaying physical backups block when empty (#5998) Signed-off-by: Gabriele Bartolini --- internal/cmd/plugin/status/status.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/internal/cmd/plugin/status/status.go b/internal/cmd/plugin/status/status.go index c55035a8db..c27018f386 100644 --- a/internal/cmd/plugin/status/status.go +++ b/internal/cmd/plugin/status/status.go @@ -998,10 +998,12 @@ func (fullStatus *PostgresqlStatus) printBasebackupStatus(verbosity int) { return } - if verbosity > 0 && len(primaryInstanceStatus.PgStatBasebackupsInfo) == 0 { - fmt.Println(aurora.Green(header)) - fmt.Println(aurora.Yellow("No running physical backups found").String()) - fmt.Println() + if len(primaryInstanceStatus.PgStatBasebackupsInfo) == 0 { + if verbosity > 0 { + fmt.Println(aurora.Green(header)) + fmt.Println(aurora.Yellow("No running physical backups found").String()) + fmt.Println() + } return } From c23abee6cbe00088a59125031917dfd1730f6315 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Fri, 6 Dec 2024 10:17:28 +0100 Subject: [PATCH 209/836] chore: drop support for `pg_rewind` in PostgreSQL 12 (#6157) Starting with PostgreSQL 13, `pg_rewind` automatically performs crash recovery before starting. This eliminates the need to manually restart the postmaster after a failed invocation of `pg_rewind`. This patch removes the code that manually handled the crash recovery process, simplifying the implementation by leveraging the improved functionality in PostgreSQL 13 and later versions. Closes: #6156 Signed-off-by: Leonardo Cecchi --- .../management/controller/instance_startup.go | 18 +----------------- pkg/management/postgres/instance.go | 16 ---------------- 2 files changed, 1 insertion(+), 33 deletions(-) diff --git a/internal/management/controller/instance_startup.go b/internal/management/controller/instance_startup.go index ee81ed483c..c5c8ffcf12 100644 --- a/internal/management/controller/instance_startup.go +++ b/internal/management/controller/instance_startup.go @@ -270,23 +270,7 @@ func (r *InstanceReconciler) verifyPgDataCoherenceForPrimary(ctx context.Context // retrying after having started up the instance. err = r.instance.Rewind(ctx, pgVersion) if err != nil { - contextLogger.Info( - "pg_rewind failed, starting the server to complete the crash recovery", - "err", err) - - // pg_rewind requires a clean shutdown of the old primary to work. - // The only way to do that is to start the server again - // and wait for it to be available again. - err = r.instance.CompleteCrashRecovery(ctx) - if err != nil { - return err - } - - // Then let's go back to the point of the new primary - err = r.instance.Rewind(ctx, pgVersion) - if err != nil { - return err - } + return fmt.Errorf("while exucuting pg_rewind: %w", err) } // Now I can demote myself diff --git a/pkg/management/postgres/instance.go b/pkg/management/postgres/instance.go index 42e538983d..2a207a63e4 100644 --- a/pkg/management/postgres/instance.go +++ b/pkg/management/postgres/instance.go @@ -829,22 +829,6 @@ func (instance *Instance) WaitForPrimaryAvailable(ctx context.Context) error { return waitForConnectionAvailable(ctx, db) } -// CompleteCrashRecovery temporary starts up the server and wait for it -// to be fully available for queries. This will ensure that the crash recovery -// is fully done. -// Important: this function must be called only when the instance isn't started -func (instance *Instance) CompleteCrashRecovery(ctx context.Context) error { - log.Info("Waiting for server to complete crash recovery") - - defer func() { - instance.ShutdownConnections() - }() - - return instance.WithActiveInstance(func() error { - return instance.WaitForSuperuserConnectionAvailable(ctx) - }) -} - // WaitForSuperuserConnectionAvailable waits until we can connect to this // instance using the superuser account func (instance *Instance) WaitForSuperuserConnectionAvailable(ctx context.Context) error { From c61ec6f5e5badc67bb5d025a6be4d08a1c855d94 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Fri, 6 Dec 2024 10:53:56 +0100 Subject: [PATCH 210/836] chore(lint): spellcheck word list (#6290) Signed-off-by: Leonardo Cecchi --- .wordlist-en-custom.txt | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index 9216cd19d7..42d7a6fa5b 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -361,9 +361,7 @@ README RHSA RLS RPO -rpo RTO -rto RUNTIME ReadWriteOnce RedHat @@ -404,6 +402,7 @@ SQLRefs SSL SSZ STORAGEACCOUNTNAME +Scaleway ScheduledBackup ScheduledBackupList ScheduledBackupSpec @@ -1166,6 +1165,8 @@ robfig roleRef rollingupdatestatus rollout +rpo +rto runonserver runtime rw @@ -1174,6 +1175,7 @@ sa sas scalability scalable +scaleway sccs scheduledbackup scheduledbackuplist From 767b53f14bb7bc76776dd13d685da57a58fc1a73 Mon Sep 17 00:00:00 2001 From: Ari Becker Date: Fri, 6 Dec 2024 12:29:50 +0200 Subject: [PATCH 211/836] chore(plugin): improve getting instance status from pod error message (#6092) Closes #6057 Signed-off-by: Ari Becker --- internal/plugin/resources/instance.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/internal/plugin/resources/instance.go b/internal/plugin/resources/instance.go index 1ba6a58d47..03d604e67e 100644 --- a/internal/plugin/resources/instance.go +++ b/internal/plugin/resources/instance.go @@ -112,7 +112,9 @@ func getInstanceStatusFromPod( DoRaw(ctx) if err != nil { result.AddPod(pod) - result.Error = err + result.Error = fmt.Errorf( + "failed to get status by proxying to the pod, you might lack permissions to get pods/proxy: %w", + err) return result } From a0448387e4e4aec7a5019b4dc9d15538cef4468d Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Fri, 6 Dec 2024 11:44:28 +0100 Subject: [PATCH 212/836] chore(e2e): Separate forward connection from psql connection (#5898) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Refactors the code to decouple the forward connection from the psql connection. Prepares the forward connection for reuse in other tests, such as MinIO. Closes #5880 Signed-off-by: Jonathan Gonzalez V. Signed-off-by: Armando Ruocco Signed-off-by: Marco Nenciarini Signed-off-by: Niccolò Fei Co-authored-by: Armando Ruocco Co-authored-by: Marco Nenciarini Co-authored-by: Niccolò Fei --- tests/e2e/asserts_test.go | 229 ++++++------- tests/e2e/connection_test.go | 16 +- tests/e2e/managed_roles_test.go | 305 ++++++------------ tests/e2e/pg_basebackup_test.go | 8 +- tests/e2e/update_user_test.go | 15 +- tests/utils/forwardconnection/doc.go | 20 ++ .../forwardconnection/forwardconnection.go | 218 +++++++++++++ tests/utils/psql_connection.go | 195 ++++++----- tests/utils/service.go | 14 + 9 files changed, 572 insertions(+), 448 deletions(-) create mode 100644 tests/utils/forwardconnection/doc.go create mode 100644 tests/utils/forwardconnection/forwardconnection.go diff --git a/tests/e2e/asserts_test.go b/tests/e2e/asserts_test.go index 5bdf763c9a..a7a5eb21ee 100644 --- a/tests/e2e/asserts_test.go +++ b/tests/e2e/asserts_test.go @@ -349,6 +349,8 @@ func AssertUpdateSecret( env *testsUtils.TestingEnvironment, ) { var secret corev1.Secret + + // Gather the secret Eventually(func(g Gomega) { err := env.Client.Get(env.Ctx, ctrlclient.ObjectKey{Namespace: namespace, Name: secretName}, @@ -356,13 +358,14 @@ func AssertUpdateSecret( g.Expect(err).ToNot(HaveOccurred()) }).Should(Succeed()) + // Change the given field to the new value provided secret.Data[field] = []byte(value) err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { return env.Client.Update(env.Ctx, &secret) }) Expect(err).ToNot(HaveOccurred()) - // Wait for the cluster pickup the updated secrets version first + // Wait for the cluster to pick up the updated secrets version first Eventually(func() string { cluster, err := env.GetCluster(namespace, clusterName) if err != nil { @@ -371,13 +374,23 @@ func AssertUpdateSecret( } switch { case strings.HasSuffix(secretName, apiv1.ApplicationUserSecretSuffix): - GinkgoWriter.Printf("Resource version of Application secret referenced in the cluster is %v\n", + GinkgoWriter.Printf("Resource version of %s secret referenced in the cluster is %v\n", + secretName, cluster.Status.SecretsResourceVersion.ApplicationSecretVersion) return cluster.Status.SecretsResourceVersion.ApplicationSecretVersion + case strings.HasSuffix(secretName, apiv1.SuperUserSecretSuffix): - GinkgoWriter.Printf("Resource version of Superuser secret referenced in the cluster is %v\n", + GinkgoWriter.Printf("Resource version of %s secret referenced in the cluster is %v\n", + secretName, cluster.Status.SecretsResourceVersion.SuperuserSecretVersion) return cluster.Status.SecretsResourceVersion.SuperuserSecretVersion + + case cluster.UsesSecretInManagedRoles(secretName): + GinkgoWriter.Printf("Resource version of %s ManagedRole secret referenced in the cluster is %v\n", + secretName, + cluster.Status.SecretsResourceVersion.ManagedRoleSecretVersions[secretName]) + return cluster.Status.SecretsResourceVersion.ManagedRoleSecretVersions[secretName] + default: GinkgoWriter.Printf("Unsupported secrets name found %v\n", secretName) return "" @@ -385,28 +398,36 @@ func AssertUpdateSecret( }, timeout).Should(BeEquivalentTo(secret.ResourceVersion)) } -// AssertConnection is used if a connection from a pod to a postgresql -// database works +// AssertConnection is used if a connection from a pod to a postgresql database works func AssertConnection( - host string, - user string, + namespace string, + service string, dbname string, + user string, password string, - queryingPod *corev1.Pod, - timeout int, env *testsUtils.TestingEnvironment, ) { - By(fmt.Sprintf("connecting to the %v service as %v", host, user), func() { - Eventually(func() string { - dsn := fmt.Sprintf("host=%v user=%v dbname=%v password=%v sslmode=require", host, user, dbname, password) - commandTimeout := time.Second * 10 - stdout, _, err := env.ExecCommand(env.Ctx, *queryingPod, specs.PostgresContainerName, &commandTimeout, - "psql", dsn, "-tAc", "SELECT 1") - if err != nil { - return "" - } - return stdout - }, timeout).Should(Equal("1\n")) + By(fmt.Sprintf("checking that %v service exists", service), func() { + Eventually(func(g Gomega) { + _, err := testsUtils.GetService(namespace, service, env) + g.Expect(err).ToNot(HaveOccurred()) + }, RetryTimeout).Should(Succeed()) + }) + + By(fmt.Sprintf("connecting to the %v service as %v", service, user), func() { + forwardConn, conn, err := testsUtils.ForwardPSQLServiceConnection(env, namespace, service, + dbname, user, password) + defer func() { + _ = conn.Close() + forwardConn.Close() + }() + Expect(err).ToNot(HaveOccurred()) + + var rawValue string + row := conn.QueryRow("SELECT 1") + err = row.Scan(&rawValue) + Expect(err).ToNot(HaveOccurred()) + Expect(strings.TrimSpace(rawValue)).To(BeEquivalentTo("1")) }) } @@ -508,24 +529,26 @@ func AssertDatabaseExists(pod *corev1.Pod, databaseName string, expectedValue bo // AssertUserExists assert if user exists func AssertUserExists(pod *corev1.Pod, userName string, expectedValue bool) { By(fmt.Sprintf("verifying if user %v exists", userName), func() { - query := fmt.Sprintf("SELECT EXISTS(SELECT 1 FROM pg_user WHERE lower(usename) = lower('%v'));", userName) - stdout, stderr, err := env.ExecQueryInInstancePod( - testsUtils.PodLocator{ - Namespace: pod.Namespace, - PodName: pod.Name, - }, - testsUtils.PostgresDBName, - query) - if err != nil { - GinkgoWriter.Printf("stdout: %v\nstderr: %v", stdout, stderr) - } - Expect(err).ToNot(HaveOccurred()) + query := fmt.Sprintf("SELECT EXISTS(SELECT 1 FROM pg_roles WHERE lower(rolname) = lower('%v'));", userName) + Eventually(func(g Gomega) { + stdout, stderr, err := env.ExecQueryInInstancePod( + testsUtils.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + testsUtils.PostgresDBName, + query) + if err != nil { + GinkgoWriter.Printf("stdout: %v\nstderr: %v", stdout, stderr) + } + g.Expect(err).ToNot(HaveOccurred()) - if expectedValue { - Expect(strings.Trim(stdout, "\n")).To(BeEquivalentTo("t")) - } else { - Expect(strings.Trim(stdout, "\n")).To(BeEquivalentTo("f")) - } + if expectedValue { + g.Expect(strings.Trim(stdout, "\n")).To(BeEquivalentTo("t")) + } else { + g.Expect(strings.Trim(stdout, "\n")).To(BeEquivalentTo("f")) + } + }, 60).Should(Succeed()) }) } @@ -1079,57 +1102,53 @@ func AssertDetachReplicaModeCluster( }) } -func AssertWritesToReplicaFails( - connectingPod *corev1.Pod, - service string, - appDBName string, - appDBUser string, - appDBPass string, -) { - By(fmt.Sprintf("Verifying %v service doesn't allow writes", service), - func() { - timeout := time.Second * 10 - dsn := testsUtils.CreateDSN(service, appDBUser, appDBName, appDBPass, testsUtils.Require, 5432) - - // Expect to be connected to a replica - stdout, _, err := env.EventuallyExecCommand(env.Ctx, *connectingPod, specs.PostgresContainerName, &timeout, - "psql", dsn, "-tAc", "select pg_is_in_recovery()") - value := strings.Trim(stdout, "\n") - Expect(value, err).To(Equal("t")) - - // Expect to be in a read-only transaction - _, _, err = utils.ExecCommand(env.Ctx, env.Interface, env.RestClientConfig, *connectingPod, - specs.PostgresContainerName, &timeout, - "psql", dsn, "-tAc", "CREATE TABLE IF NOT EXISTS table1(var1 text);") - Expect(err).To(HaveOccurred()) - Expect(err.Error()).Should( - ContainSubstring("cannot execute CREATE TABLE in a read-only transaction")) - }) +func AssertWritesToReplicaFails(namespace, service, appDBName, appDBUser, appDBPass string) { + By(fmt.Sprintf("Verifying %v service doesn't allow writes", service), func() { + forwardConn, conn, err := testsUtils.ForwardPSQLServiceConnection(env, namespace, service, + appDBName, appDBUser, appDBPass) + defer func() { + _ = conn.Close() + forwardConn.Close() + }() + Expect(err).ToNot(HaveOccurred()) + + var rawValue string + // Expect to be connected to a replica + row := conn.QueryRow("SELECT pg_is_in_recovery()") + err = row.Scan(&rawValue) + Expect(err).ToNot(HaveOccurred()) + isReplica := strings.TrimSpace(rawValue) + Expect(isReplica).To(BeEquivalentTo("true")) + + // Expect to be in a read-only transaction + _, err = conn.Exec("CREATE TABLE IF NOT EXISTS table1(var1 text)") + Expect(err).To(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("cannot execute CREATE TABLE in a read-only transaction")) + }) } -func AssertWritesToPrimarySucceeds( - connectingPod *corev1.Pod, - service string, - appDBName string, - appDBUser string, - appDBPass string, -) { - By(fmt.Sprintf("Verifying %v service correctly manages writes", service), - func() { - timeout := time.Second * 10 - dsn := testsUtils.CreateDSN(service, appDBUser, appDBName, appDBPass, testsUtils.Require, 5432) +func AssertWritesToPrimarySucceeds(namespace, service, appDBName, appDBUser, appDBPass string) { + By(fmt.Sprintf("Verifying %v service correctly manages writes", service), func() { + forwardConn, conn, err := testsUtils.ForwardPSQLServiceConnection(env, namespace, service, + appDBName, appDBUser, appDBPass) + defer func() { + _ = conn.Close() + forwardConn.Close() + }() + Expect(err).ToNot(HaveOccurred()) - // Expect to be connected to a primary - stdout, _, err := env.EventuallyExecCommand(env.Ctx, *connectingPod, specs.PostgresContainerName, &timeout, - "psql", dsn, "-tAc", "select pg_is_in_recovery()") - value := strings.Trim(stdout, "\n") - Expect(value, err).To(Equal("f")) + var rawValue string + // Expect to be connected to a primary + row := conn.QueryRow("SELECT pg_is_in_recovery()") + err = row.Scan(&rawValue) + Expect(err).ToNot(HaveOccurred()) + isReplica := strings.TrimSpace(rawValue) + Expect(isReplica).To(BeEquivalentTo("false")) - // Expect to be able to write - _, _, err = env.EventuallyExecCommand(env.Ctx, *connectingPod, specs.PostgresContainerName, &timeout, - "psql", dsn, "-tAc", "CREATE TABLE IF NOT EXISTS table1(var1 text);") - Expect(err).ToNot(HaveOccurred()) - }) + // Expect to be able to write + _, err = conn.Exec("CREATE TABLE IF NOT EXISTS table1(var1 text)") + Expect(err).ToNot(HaveOccurred()) + }) } func AssertFastFailOver( @@ -1346,7 +1365,6 @@ func AssertApplicationDatabaseConnection( appDB, appPassword, appSecretName string, - pod *corev1.Pod, ) { By("checking cluster can connect with application database user and password", func() { // Get the app user password from the auto generated -app secret if appPassword is not provided @@ -1363,10 +1381,9 @@ func AssertApplicationDatabaseConnection( Expect(err).ToNot(HaveOccurred()) appPassword = string(appSecret.Data["password"]) } - // rwService := fmt.Sprintf("%v-rw.%v.svc", clusterName, namespace) - rwService := testsUtils.CreateServiceFQDN(namespace, testsUtils.GetReadWriteServiceName(clusterName)) + rwService := testsUtils.GetReadWriteServiceName(clusterName) - AssertConnection(rwService, appUser, appDB, appPassword, pod, 60, env) + AssertConnection(namespace, rwService, appDB, appUser, appPassword, env) }) } @@ -1578,33 +1595,26 @@ func AssertClusterRestoreWithApplicationDB(namespace, restoreClusterFile, tableN secretName := restoredClusterName + apiv1.ApplicationUserSecretSuffix By("checking the restored cluster with pre-defined app password connectable", func() { - primaryPod, err := env.GetClusterPrimary(namespace, restoredClusterName) - Expect(err).ToNot(HaveOccurred()) AssertApplicationDatabaseConnection( namespace, restoredClusterName, appUser, testsUtils.AppDBName, appUserPass, - secretName, - primaryPod) + secretName) }) By("update user application password for restored cluster and verify connectivity", func() { const newPassword = "eeh2Zahohx" //nolint:gosec AssertUpdateSecret("password", newPassword, secretName, namespace, restoredClusterName, 30, env) - primaryPod, err := env.GetClusterPrimary(namespace, restoredClusterName) - Expect(err).ToNot(HaveOccurred()) - AssertApplicationDatabaseConnection( namespace, restoredClusterName, appUser, testsUtils.AppDBName, newPassword, - secretName, - primaryPod) + secretName) }) } @@ -1820,9 +1830,6 @@ func AssertClusterWasRestoredWithPITRAndApplicationDB(namespace, clusterName, ta env) Expect(err).ToNot(HaveOccurred()) - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - By("checking the restored cluster with auto generated app password connectable", func() { AssertApplicationDatabaseConnection( namespace, @@ -1830,8 +1837,7 @@ func AssertClusterWasRestoredWithPITRAndApplicationDB(namespace, clusterName, ta appUser, testsUtils.AppDBName, appUserPass, - secretName, - primaryPod) + secretName) }) By("update user application password for restored cluster and verify connectivity", func() { @@ -1843,8 +1849,7 @@ func AssertClusterWasRestoredWithPITRAndApplicationDB(namespace, clusterName, ta appUser, testsUtils.AppDBName, newPassword, - secretName, - primaryPod) + secretName) }) } @@ -2001,23 +2006,20 @@ func assertReadWriteConnectionUsingPgBouncerService( poolerYamlFilePath string, isPoolerRW bool, ) { - poolerName, err := env.GetResourceNameFromYAML(poolerYamlFilePath) + poolerService, err := env.GetResourceNameFromYAML(poolerYamlFilePath) Expect(err).ToNot(HaveOccurred()) - poolerService := testsUtils.CreateServiceFQDN(namespace, poolerName) appUser, generatedAppUserPassword, err := testsUtils.GetCredentials( clusterName, namespace, apiv1.ApplicationUserSecretSuffix, env) Expect(err).ToNot(HaveOccurred()) - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - AssertConnection(poolerService, appUser, "app", generatedAppUserPassword, primaryPod, 180, env) + AssertConnection(namespace, poolerService, testsUtils.AppDBName, appUser, generatedAppUserPassword, env) // verify that, if pooler type setup read write then it will allow both read and // write operations or if pooler type setup read only then it will allow only read operations if isPoolerRW { - AssertWritesToPrimarySucceeds(primaryPod, poolerService, "app", appUser, + AssertWritesToPrimarySucceeds(namespace, poolerService, "app", appUser, generatedAppUserPassword) } else { - AssertWritesToReplicaFails(primaryPod, poolerService, "app", appUser, + AssertWritesToReplicaFails(namespace, poolerService, "app", appUser, generatedAppUserPassword) } } @@ -2344,13 +2346,12 @@ func DeleteTableUsingPgBouncerService( env *testsUtils.TestingEnvironment, pod *corev1.Pod, ) { - poolerName, err := env.GetResourceNameFromYAML(poolerYamlFilePath) + poolerService, err := env.GetResourceNameFromYAML(poolerYamlFilePath) Expect(err).ToNot(HaveOccurred()) - poolerService := testsUtils.CreateServiceFQDN(namespace, poolerName) appUser, generatedAppUserPassword, err := testsUtils.GetCredentials( clusterName, namespace, apiv1.ApplicationUserSecretSuffix, env) Expect(err).ToNot(HaveOccurred()) - AssertConnection(poolerService, appUser, "app", generatedAppUserPassword, pod, 180, env) + AssertConnection(namespace, poolerService, testsUtils.AppDBName, appUser, generatedAppUserPassword, env) connectionTimeout := time.Second * 10 dsn := testsUtils.CreateDSN(poolerService, appUser, testsUtils.AppDBName, generatedAppUserPassword, diff --git a/tests/e2e/connection_test.go b/tests/e2e/connection_test.go index 67227ca118..fd962159f0 100644 --- a/tests/e2e/connection_test.go +++ b/tests/e2e/connection_test.go @@ -53,20 +53,18 @@ var _ = Describe("Connection via services", Label(tests.LabelServiceConnectivity superuserPassword string, env *utils.TestingEnvironment, ) { - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) // We test -rw, -ro and -r services with the app user and the superuser - rwService := fmt.Sprintf("%v-rw.%v.svc", clusterName, namespace) - rService := fmt.Sprintf("%v-r.%v.svc", clusterName, namespace) - roService := fmt.Sprintf("%v-ro.%v.svc", clusterName, namespace) + rwService := fmt.Sprintf("%v-rw", clusterName) + rService := fmt.Sprintf("%v-r", clusterName) + roService := fmt.Sprintf("%v-ro", clusterName) services := []string{rwService, roService, rService} for _, service := range services { - AssertConnection(service, "postgres", appDBName, superuserPassword, primaryPod, 10, env) - AssertConnection(service, appDBUser, appDBName, appPassword, primaryPod, 10, env) + AssertConnection(namespace, service, appDBName, utils.PostgresDBName, superuserPassword, env) + AssertConnection(namespace, service, appDBName, appDBUser, appPassword, env) } - AssertWritesToReplicaFails(primaryPod, roService, appDBName, appDBUser, appPassword) - AssertWritesToPrimarySucceeds(primaryPod, rwService, appDBName, appDBUser, appPassword) + AssertWritesToReplicaFails(namespace, roService, appDBName, appDBUser, appPassword) + AssertWritesToPrimarySucceeds(namespace, rwService, appDBName, appDBUser, appPassword) } Context("Auto-generated passwords", func() { diff --git a/tests/e2e/managed_roles_test.go b/tests/e2e/managed_roles_test.go index 534867203d..fc5dd5f314 100644 --- a/tests/e2e/managed_roles_test.go +++ b/tests/e2e/managed_roles_test.go @@ -29,7 +29,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/tests" "github.com/cloudnative-pg/cloudnative-pg/tests/utils" @@ -56,6 +55,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic Context("plain vanilla cluster", Ordered, func() { const ( namespacePrefix = "managed-roles" + secretName = "cluster-example-dante" username = "dante" appUsername = "app" password = "dante" @@ -64,8 +64,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic userWithPerpetualPass = "boccaccio" userWithHashedPassword = "cavalcanti" ) - var clusterName, secretName, namespace string - var secretNameSpacedName *types.NamespacedName + var clusterName, namespace string BeforeAll(func() { var err error @@ -76,35 +75,11 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic clusterName, err = env.GetResourceNameFromYAML(clusterManifest) Expect(err).ToNot(HaveOccurred()) - secretName = "cluster-example-dante" - secretNameSpacedName = &types.NamespacedName{ - Namespace: namespace, - Name: secretName, - } - By("setting up cluster with managed roles", func() { AssertCreateCluster(namespace, clusterName, clusterManifest, env) }) }) - assertUserExists := func(namespace, primaryPod, username string, shouldExists bool) { - Eventually(func(g Gomega) { - stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ - Namespace: namespace, - PodName: primaryPod, - }, - utils.PostgresDBName, - "\\du") - g.Expect(err).ToNot(HaveOccurred()) - if shouldExists { - g.Expect(stdout).To(ContainSubstring(username)) - } else { - g.Expect(stdout).NotTo(ContainSubstring(username)) - } - }, 60).Should(Succeed()) - } - assertInRoles := func(namespace, primaryPod, roleName string, expectedRoles []string) { slices.Sort(expectedRoles) Eventually(func() []string { @@ -132,6 +107,24 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic }, 30).Should(BeEquivalentTo(expectedRoles)) } + assertRoleStatus := func(namespace, clusterName, query, expectedResult string) { + primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + Eventually(func() string { + stdout, _, err := env.ExecQueryInInstancePod( + utils.PodLocator{ + Namespace: namespace, + PodName: primaryPod.Name, + }, + utils.PostgresDBName, + query) + if err != nil { + return "" + } + return strings.TrimSpace(stdout) + }, 30).Should(Equal(expectedResult)) + } + It("can create roles specified in the managed roles stanza", func() { rolCanLoginInSpec := true rolSuperInSpec := false @@ -143,13 +136,13 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic rolConnLimitInSpec := 4 By("ensuring the roles created in the managed stanza are in the database with correct attributes", func() { - primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) + primaryPod, err := env.GetClusterPrimary(namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - assertUserExists(namespace, primaryPodInfo.Name, username, true) - assertUserExists(namespace, primaryPodInfo.Name, userWithPerpetualPass, true) - assertUserExists(namespace, primaryPodInfo.Name, userWithHashedPassword, true) - assertUserExists(namespace, primaryPodInfo.Name, unrealizableUser, false) + AssertUserExists(primaryPod, username, true) + AssertUserExists(primaryPod, userWithPerpetualPass, true) + AssertUserExists(primaryPod, userWithHashedPassword, true) + AssertUserExists(primaryPod, unrealizableUser, false) query := fmt.Sprintf("SELECT true FROM pg_roles WHERE rolname='%s' and rolcanlogin=%v and rolsuper=%v "+ "and rolcreatedb=%v and rolcreaterole=%v and rolinherit=%v and rolreplication=%v "+ @@ -160,8 +153,8 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic for _, q := range []string{query, query2} { stdout, _, err := env.ExecQueryInInstancePod( utils.PodLocator{ - Namespace: namespace, - PodName: primaryPodInfo.Name, + Namespace: primaryPod.Namespace, + PodName: primaryPod.Name, }, utils.PostgresDBName, q) @@ -171,34 +164,21 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic }) By("Verifying connectivity of new managed role", func() { - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - - rwService := fmt.Sprintf("%v-rw.%v.svc", clusterName, namespace) + rwService := utils.GetReadWriteServiceName(clusterName) // assert connectable use username and password defined in secrets - AssertConnection(rwService, username, "postgres", password, primaryPod, 30, env) - - AssertConnection(rwService, userWithHashedPassword, "postgres", userWithHashedPassword, primaryPod, 30, env) + AssertConnection(namespace, rwService, utils.PostgresDBName, username, password, env) + AssertConnection(namespace, rwService, utils.PostgresDBName, userWithHashedPassword, userWithHashedPassword, env) }) By("ensuring the app role has been granted createdb in the managed stanza", func() { primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - assertUserExists(namespace, primaryPodInfo.Name, appUsername, true) + AssertUserExists(primaryPodInfo, appUsername, true) query := fmt.Sprintf("SELECT rolcreatedb and rolvaliduntil='infinity' "+ "FROM pg_roles WHERE rolname='%s'", appUsername) - - stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ - Namespace: namespace, - PodName: primaryPodInfo.Name, - }, - utils.PostgresDBName, - query) - Expect(err).ToNot(HaveOccurred()) - Expect(stdout).To(Equal("t\n")) + assertRoleStatus(namespace, clusterName, query, "t") }) By("verifying connectivity of app user", func() { @@ -213,13 +193,10 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic ) Expect(err).NotTo(HaveOccurred()) - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - pass := string(appUserSecret.Data["password"]) - rwService := fmt.Sprintf("%v-rw.%v.svc", clusterName, namespace) + rwService := utils.GetReadWriteServiceName(clusterName) // assert connectable use username and password defined in secrets - AssertConnection(rwService, appUsername, "postgres", pass, primaryPod, 30, env) + AssertConnection(namespace, rwService, utils.PostgresDBName, appUsername, pass, env) }) By("Verify show unrealizable role configurations in the status", func() { @@ -239,13 +216,11 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic }) It("can update role attributes in the spec and they are applied in the database", func() { - var primaryPod *corev1.Pod - var err error expectedLogin := false expectedCreateDB := false expectedCreateRole := true expectedConnLmt := int64(10) - rwService := fmt.Sprintf("%v-rw.%v.svc", clusterName, namespace) + rwService := utils.GetReadWriteServiceName(clusterName) By("updating role attribute in spec", func() { cluster, err := env.GetCluster(namespace, clusterName) @@ -261,35 +236,24 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic }) By("Verify the role has been updated in the database", func() { - primaryPod, err = env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - - Eventually(func() string { - query := fmt.Sprintf("SELECT 1 FROM pg_roles WHERE rolname='%s' and rolcanlogin=%v "+ - "and rolcreatedb=%v and rolcreaterole=%v and rolconnlimit=%v", - username, expectedLogin, expectedCreateDB, expectedCreateRole, expectedConnLmt) - - stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ - Namespace: namespace, - PodName: primaryPod.Name, - }, - utils.PostgresDBName, - query) - if err != nil { - return "" - } - return stdout - }, 30).Should(Equal("1\n")) + query := fmt.Sprintf("SELECT 1 FROM pg_roles WHERE rolname='%s' and rolcanlogin=%v "+ + "and rolcreatedb=%v and rolcreaterole=%v and rolconnlimit=%v", + username, expectedLogin, expectedCreateDB, expectedCreateRole, expectedConnLmt) + assertRoleStatus(namespace, clusterName, query, "1") }) By("the connection should fail since we disabled the login", func() { - dsn := fmt.Sprintf("host=%v user=%v dbname=%v password=%v sslmode=require", - rwService, username, "postgres", password) - timeout := time.Second * 10 - _, _, err := env.ExecCommand(env.Ctx, *primaryPod, specs.PostgresContainerName, &timeout, - "psql", dsn, "-tAc", "SELECT 1") + forwardConn, conn, err := utils.ForwardPSQLServiceConnection(env, namespace, rwService, + utils.PostgresDBName, username, password) + defer func() { + _ = conn.Close() + forwardConn.Close() + }() + Expect(err).ToNot(HaveOccurred()) + + _, err = conn.Exec("SELECT 1") Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("not permitted to log in")) }) By("enable Login again", func() { @@ -301,16 +265,22 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic Expect(err).ToNot(HaveOccurred()) }) + By("verifying Login is now enabled", func() { + expectedLogin = true + query := fmt.Sprintf("SELECT 1 FROM pg_roles WHERE rolname='%s' and rolcanlogin=%v "+ + "and rolcreatedb=%v and rolcreaterole=%v and rolconnlimit=%v", + username, expectedLogin, expectedCreateDB, expectedCreateRole, expectedConnLmt) + assertRoleStatus(namespace, clusterName, query, "1") + }) + By("the connectivity should be success again", func() { - rwService := fmt.Sprintf("%v-rw.%v.svc", clusterName, namespace) + rwService := utils.GetReadWriteServiceName(clusterName) // assert connectable use username and password defined in secrets - AssertConnection(rwService, username, "postgres", password, primaryPod, 30, env) + AssertConnection(namespace, rwService, utils.PostgresDBName, username, password, env) }) }) It("Can add role with all attribute omitted and verify it is default", func() { - primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) const ( defaultRolCanLogin = false defaultRolSuper = false @@ -335,26 +305,14 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic }) By("Verify new_role exists with all attribute default", func() { - Eventually(func() string { - query := fmt.Sprintf("SELECT 1 FROM pg_roles WHERE rolname='%s' and rolcanlogin=%v and rolsuper=%v "+ - "and rolcreatedb=%v and rolcreaterole=%v and rolinherit=%v and rolreplication=%v "+ - "and rolbypassrls=%v and rolconnlimit=%v", newUserName, defaultRolCanLogin, - defaultRolSuper, defaultRolCreateDB, - defaultRolCreateRole, defaultRolInherit, defaultRolReplication, - defaultRolByPassRLS, defaultRolConnLimit) + query := fmt.Sprintf("SELECT 1 FROM pg_roles WHERE rolname='%s' and rolcanlogin=%v and rolsuper=%v "+ + "and rolcreatedb=%v and rolcreaterole=%v and rolinherit=%v and rolreplication=%v "+ + "and rolbypassrls=%v and rolconnlimit=%v", newUserName, defaultRolCanLogin, + defaultRolSuper, defaultRolCreateDB, + defaultRolCreateRole, defaultRolInherit, defaultRolReplication, + defaultRolByPassRLS, defaultRolConnLimit) - stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ - Namespace: namespace, - PodName: primaryPodInfo.Name, - }, - utils.PostgresDBName, - query) - if err != nil { - return "" - } - return stdout - }, 30).Should(Equal("1\n")) + assertRoleStatus(namespace, clusterName, query, "1") }) }) @@ -376,52 +334,23 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic Expect(err).ToNot(HaveOccurred()) }) - primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - By(fmt.Sprintf("Verify comments update in db for %s", newUserName), func() { - Eventually(func() string { - query := fmt.Sprintf("SELECT pg_catalog.shobj_description(oid, 'pg_authid') as comment"+ - " FROM pg_catalog.pg_authid WHERE rolname='%s'", - newUserName) - - stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ - Namespace: namespace, - PodName: primaryPodInfo.Name, - }, - utils.PostgresDBName, - query) - if err != nil { - return ERROR - } - return stdout - }, 30).Should(Equal(fmt.Sprintf("This is user %s\n", newUserName))) + query := fmt.Sprintf("SELECT pg_catalog.shobj_description(oid, 'pg_authid') as comment"+ + " FROM pg_catalog.pg_authid WHERE rolname='%s'", + newUserName) + assertRoleStatus(namespace, clusterName, query, fmt.Sprintf("This is user %s", newUserName)) }) By(fmt.Sprintf("Verify comments update in db for %s", username), func() { - Eventually(func() string { - query := fmt.Sprintf("SELECT pg_catalog.shobj_description(oid, 'pg_authid') as comment"+ - " FROM pg_catalog.pg_authid WHERE rolname='%s'", - username) - - stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ - Namespace: namespace, - PodName: primaryPodInfo.Name, - }, - utils.PostgresDBName, - query) - if err != nil { - return ERROR - } - return stdout - }, 30).Should(Equal("\n")) + query := fmt.Sprintf("SELECT pg_catalog.shobj_description(oid, 'pg_authid') as comment"+ + " FROM pg_catalog.pg_authid WHERE rolname='%s'", + username) + assertRoleStatus(namespace, clusterName, query, "") }) }) It("Can update role membership and verify changes in db ", func() { - primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) + primaryPod, err := env.GetClusterPrimary(namespace, clusterName) Expect(err).ToNot(HaveOccurred()) By("Remove invalid parent role from unrealizableUser and verify user in database", func() { @@ -441,7 +370,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic Expect(err).ToNot(HaveOccurred()) return len(cluster.Status.ManagedRolesStatus.CannotReconcile) }, 30).Should(Equal(0)) - assertUserExists(namespace, primaryPodInfo.Name, unrealizableUser, true) + AssertUserExists(primaryPod, unrealizableUser, true) }) By("Add role in InRole for role new_role and verify in database", func() { @@ -464,7 +393,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic Expect(err).ToNot(HaveOccurred()) return len(cluster.Status.ManagedRolesStatus.CannotReconcile) }, 30).Should(Equal(0)) - assertInRoles(namespace, primaryPodInfo.Name, newUserName, []string{"postgres", username}) + assertInRoles(namespace, primaryPod.Name, newUserName, []string{"postgres", username}) }) By("Remove parent role from InRole for role new_role and verify in database", func() { @@ -486,7 +415,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic Expect(err).ToNot(HaveOccurred()) return len(cluster.Status.ManagedRolesStatus.CannotReconcile) }, 30).Should(Equal(0)) - assertInRoles(namespace, primaryPodInfo.Name, newUserName, []string{username}) + assertInRoles(namespace, primaryPod.Name, newUserName, []string{username}) }) By("Mock the error for unrealizable User and verify user in database", func() { @@ -502,7 +431,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic err = env.Client.Patch(env.Ctx, updated, client.MergeFrom(cluster)) Expect(err).ToNot(HaveOccurred()) // user not changed - assertUserExists(namespace, primaryPodInfo.Name, unrealizableUser, true) + AssertUserExists(primaryPod, unrealizableUser, true) Eventually(func() int { cluster, err := env.GetCluster(namespace, clusterName) Expect(err).ToNot(HaveOccurred()) @@ -523,28 +452,21 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic }) It("Can update role password in secrets and db and verify the connectivity", func() { - var primaryPod *corev1.Pod var err error newPassword := "ThisIsNew" - By("update password from secrets", func() { - var secret corev1.Secret - err := env.Client.Get(env.Ctx, *secretNameSpacedName, &secret) - Expect(err).ToNot(HaveOccurred()) + primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) - updated := secret.DeepCopy() - updated.Data["password"] = []byte(newPassword) - err = env.Client.Patch(env.Ctx, updated, client.MergeFrom(&secret)) - Expect(err).ToNot(HaveOccurred()) + By("update password from secrets", func() { + AssertUpdateSecret("password", newPassword, secretName, + namespace, clusterName, 30, env) }) By("Verify connectivity using changed password in secret", func() { - primaryPod, err = env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - - rwService := fmt.Sprintf("%v-rw.%v.svc", clusterName, namespace) + rwService := utils.GetReadWriteServiceName(clusterName) // assert connectable use username and password defined in secrets - AssertConnection(rwService, username, "postgres", newPassword, primaryPod, 30, env) + AssertConnection(namespace, rwService, utils.PostgresDBName, username, newPassword, env) }) By("Update password in database", func() { @@ -561,9 +483,9 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic Expect(err).ToNot(HaveOccurred()) }) - By("Verify password in secrets could still valid", func() { - rwService := fmt.Sprintf("%v-rw.%v.svc", clusterName, namespace) - AssertConnection(rwService, username, "postgres", newPassword, primaryPod, 60, env) + By("Verify password in secrets is still valid", func() { + rwService := utils.GetReadWriteServiceName(clusterName) + AssertConnection(namespace, rwService, utils.PostgresDBName, username, newPassword, env) }) }) @@ -589,47 +511,18 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic Expect(err).ToNot(HaveOccurred()) }) - primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - By(fmt.Sprintf("Verify valid until is removed in db for %s", newUserName), func() { - Eventually(func() string { - query := fmt.Sprintf("SELECT rolvaliduntil is NULL FROM pg_catalog.pg_authid"+ - " WHERE rolname='%s'", - newUserName) - - stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ - Namespace: namespace, - PodName: primaryPodInfo.Name, - }, - utils.PostgresDBName, - query) - if err != nil { - return ERROR - } - return stdout - }).Should(Equal("t\n")) + query := fmt.Sprintf("SELECT rolvaliduntil is NULL FROM pg_catalog.pg_authid"+ + " WHERE rolname='%s'", + newUserName) + assertRoleStatus(namespace, clusterName, query, "t") }) By(fmt.Sprintf("Verify valid until update in db for %s", username), func() { - Eventually(func() string { - query := fmt.Sprintf("SELECT rolvaliduntil='%s' FROM pg_catalog.pg_authid "+ - " WHERE rolname='%s'", - newValidUntilString, username) - - stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ - Namespace: namespace, - PodName: primaryPodInfo.Name, - }, - utils.PostgresDBName, - query) - if err != nil { - return ERROR - } - return stdout - }, 30).Should(Equal("t\n")) + query := fmt.Sprintf("SELECT rolvaliduntil='%s' FROM pg_catalog.pg_authid "+ + " WHERE rolname='%s'", + newValidUntilString, username) + assertRoleStatus(namespace, clusterName, query, "t") }) }) @@ -649,9 +542,9 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic }) By("Verify new_role not existed in db", func() { - primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) + primaryPod, err := env.GetClusterPrimary(namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - assertUserExists(namespace, primaryPodInfo.Name, newUserName, false) + AssertUserExists(primaryPod, newUserName, false) }) }) }) diff --git a/tests/e2e/pg_basebackup_test.go b/tests/e2e/pg_basebackup_test.go index 6ec697fd4a..de4fdcd812 100644 --- a/tests/e2e/pg_basebackup_test.go +++ b/tests/e2e/pg_basebackup_test.go @@ -71,12 +71,9 @@ var _ = Describe("Bootstrap with pg_basebackup", Label(tests.LabelRecovery), fun secretName := dstClusterName + apiv1.ApplicationUserSecretSuffix - primaryPod, err := env.GetClusterPrimary(namespace, dstClusterName) - Expect(err).ToNot(HaveOccurred()) - By("checking the dst cluster with auto generated app password connectable", func() { AssertApplicationDatabaseConnection(namespace, dstClusterName, - appUser, utils.AppDBName, "", secretName, primaryPod) + appUser, utils.AppDBName, "", secretName) }) By("update user application password for dst cluster and verify connectivity", func() { @@ -88,8 +85,7 @@ var _ = Describe("Bootstrap with pg_basebackup", Label(tests.LabelRecovery), fun appUser, utils.AppDBName, newPassword, - secretName, - primaryPod) + secretName) }) By("checking data have been copied correctly", func() { diff --git a/tests/e2e/update_user_test.go b/tests/e2e/update_user_test.go index d614e8ad75..b3824000c2 100644 --- a/tests/e2e/update_user_test.go +++ b/tests/e2e/update_user_test.go @@ -56,8 +56,7 @@ var _ = Describe("Update user and superuser password", Label(tests.LabelServiceC Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) - host, err := testsUtils.GetHostName(namespace, clusterName, env) - Expect(err).ToNot(HaveOccurred()) + rwService := testsUtils.GetReadWriteServiceName(clusterName) appSecretName := clusterName + apiv1.ApplicationUserSecretSuffix superUserSecretName := clusterName + apiv1.SuperUserSecretSuffix @@ -69,7 +68,7 @@ var _ = Describe("Update user and superuser password", Label(tests.LabelServiceC const newPassword = "eeh2Zahohx" //nolint:gosec AssertUpdateSecret("password", newPassword, appSecretName, namespace, clusterName, 30, env) - AssertConnection(host, testsUtils.AppUser, testsUtils.AppDBName, newPassword, primaryPod, 60, env) + AssertConnection(namespace, rwService, testsUtils.AppDBName, testsUtils.AppUser, newPassword, env) }) By("fail updating user application password with wrong user in secret", func() { @@ -80,7 +79,7 @@ var _ = Describe("Update user and superuser password", Label(tests.LabelServiceC AssertUpdateSecret("username", newUser, appSecretName, namespace, clusterName, 30, env) timeout := time.Second * 10 - dsn := testsUtils.CreateDSN(host, newUser, testsUtils.AppDBName, newPassword, testsUtils.Require, 5432) + dsn := testsUtils.CreateDSN(rwService, newUser, testsUtils.AppDBName, newPassword, testsUtils.Require, 5432) _, _, err := env.ExecCommand(env.Ctx, *primaryPod, specs.PostgresContainerName, &timeout, @@ -113,7 +112,7 @@ var _ = Describe("Update user and superuser password", Label(tests.LabelServiceC const newPassword = "fi6uCae7" //nolint:gosec AssertUpdateSecret("password", newPassword, superUserSecretName, namespace, clusterName, 30, env) - AssertConnection(host, testsUtils.PostgresUser, testsUtils.PostgresDBName, newPassword, primaryPod, 60, env) + AssertConnection(namespace, rwService, testsUtils.PostgresDBName, testsUtils.PostgresUser, newPassword, env) }) }) }) @@ -141,6 +140,8 @@ var _ = Describe("Enable superuser password", Label(tests.LabelServiceConnectivi Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) + rwService := testsUtils.GetReadWriteServiceName(clusterName) + secretName := clusterName + apiv1.SuperUserSecretSuffix var secret corev1.Secret namespacedName := types.NamespacedName{ @@ -190,12 +191,10 @@ var _ = Describe("Enable superuser password", Label(tests.LabelServiceConnectivi g.Expect(err).ToNot(HaveOccurred()) }, 90).WithPolling(time.Second).Should(Succeed()) - host, err := testsUtils.GetHostName(namespace, clusterName, env) - Expect(err).ToNot(HaveOccurred()) superUser, superUserPass, err := testsUtils.GetCredentials(clusterName, namespace, apiv1.SuperUserSecretSuffix, env) Expect(err).ToNot(HaveOccurred()) - AssertConnection(host, superUser, testsUtils.PostgresDBName, superUserPass, primaryPod, 60, env) + AssertConnection(namespace, rwService, testsUtils.PostgresDBName, superUser, superUserPass, env) }) By("disable superuser access", func() { diff --git a/tests/utils/forwardconnection/doc.go b/tests/utils/forwardconnection/doc.go new file mode 100644 index 0000000000..0e2f7af2bf --- /dev/null +++ b/tests/utils/forwardconnection/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package forwardconnection provides an easy interface to create +// a port forward from the local test to a service or pod +// inside the testing k8s cluster +package forwardconnection diff --git a/tests/utils/forwardconnection/forwardconnection.go b/tests/utils/forwardconnection/forwardconnection.go new file mode 100644 index 0000000000..77b774ffc1 --- /dev/null +++ b/tests/utils/forwardconnection/forwardconnection.go @@ -0,0 +1,218 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package forwardconnection + +import ( + "context" + "fmt" + "io" + "net/http" + "strconv" + + "github.com/onsi/ginkgo/v2" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/portforward" + "k8s.io/client-go/transport/spdy" +) + +// PostgresPortMap is the default port map for the PostgreSQL Pod +const PostgresPortMap = "0:5432" + +// ForwardConnection holds the necessary information to manage a port-forward +// against a service of pod inside Kubernetes +type ForwardConnection struct { + Forwarder *portforward.PortForwarder + stopChannel chan struct{} + readyChannel chan struct{} +} + +// NewDialerFromService returns a Dialer against the service specified +func NewDialerFromService( + ctx context.Context, + kubeInterface kubernetes.Interface, + config *rest.Config, + namespace, + service string, +) (dialer httpstream.Dialer, portMaps []string, err error) { + pod, portMap, err := getPodAndPortsFromService(ctx, kubeInterface, namespace, service) + if err != nil { + return nil, nil, err + } + + dial, err := NewDialer(kubeInterface, config, namespace, pod) + if err != nil { + return nil, nil, err + } + + return dial, portMap, nil +} + +// NewForwardConnection returns a PortForwarder against the pod specified +func NewForwardConnection( + dialer httpstream.Dialer, + portMaps []string, + outWriter, + errWriter io.Writer, +) (*ForwardConnection, error) { + fc := &ForwardConnection{ + stopChannel: make(chan struct{}), + readyChannel: make(chan struct{}, 1), + } + + var err error + fc.Forwarder, err = portforward.New( + dialer, + portMaps, + fc.stopChannel, + fc.readyChannel, + outWriter, + errWriter, + ) + if err != nil { + return nil, err + } + + return fc, nil +} + +// NewDialer returns a Dialer to be used with a PortForwarder +func NewDialer( + kubeInterface kubernetes.Interface, + config *rest.Config, + namespace string, + pod string, +) (httpstream.Dialer, error) { + req := kubeInterface.CoreV1(). + RESTClient(). + Post(). + Resource("pods"). + Namespace(namespace). + Name(pod). + SubResource("portforward") + + transport, upgrader, err := spdy.RoundTripperFor(config) + if err != nil { + return nil, err + } + dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, "POST", req.URL()) + return dialer, nil +} + +// StartAndWait begins the port-forwarding and waits until it's ready +func (fc *ForwardConnection) StartAndWait() error { + var err error + go func() { + ginkgo.GinkgoWriter.Println("Starting port-forward") + err = fc.Forwarder.ForwardPorts() + if err != nil { + ginkgo.GinkgoWriter.Printf("port-forward failed with error %s\n", err.Error()) + return + } + }() + select { + case <-fc.readyChannel: + ginkgo.GinkgoWriter.Println("port-forward ready") + return nil + case <-fc.stopChannel: + ginkgo.GinkgoWriter.Println("port-forward closed") + return err + } +} + +// GetLocalPort will return the local port where the forward has started +func (fc *ForwardConnection) GetLocalPort() (string, error) { + ports, err := fc.Forwarder.GetPorts() + if err != nil { + return "", err + } + return strconv.Itoa(int(ports[0].Local)), nil +} + +// getPortMap takes the first port between the list of ports exposed by the given service, and +// returns a map with 0 as the local port for auto-assignment +func getPortMap(serviceObj *corev1.Service) ([]string, error) { + if len(serviceObj.Spec.Ports) == 0 { + return []string{}, fmt.Errorf("service %s has no ports", serviceObj.Name) + } + port := serviceObj.Spec.Ports[0].Port + return []string{fmt.Sprintf("0:%d", port)}, nil +} + +func getPodAndPortsFromService( + ctx context.Context, + kubeInterface kubernetes.Interface, + namespace, + service string, +) (string, []string, error) { + serviceObj, err := getServiceObject(ctx, kubeInterface, namespace, service) + if err != nil { + return "", nil, err + } + + podObj, err := getPodFromService(ctx, kubeInterface, serviceObj) + if err != nil { + return "", nil, err + } + + portMaps, err := getPortMap(serviceObj) + if err != nil { + return "", nil, err + } + + return podObj.Name, portMaps, nil +} + +func getServiceObject( + ctx context.Context, + kubeInterface kubernetes.Interface, + namespace, + service string, +) (*corev1.Service, error) { + return kubeInterface.CoreV1().Services(namespace).Get(ctx, service, metav1.GetOptions{}) +} + +func getPodFromService( + ctx context.Context, + kubeInterface kubernetes.Interface, + serviceObj *corev1.Service, +) (*corev1.Pod, error) { + namespace := serviceObj.Namespace + + labelSelector, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{ + MatchLabels: serviceObj.Spec.Selector, + }) + if err != nil { + return nil, err + } + + podList, err := kubeInterface.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ + LabelSelector: labelSelector.String(), + }) + if err != nil { + return nil, err + } + + if len(podList.Items) == 0 { + return nil, fmt.Errorf("no pods found for service %s", serviceObj.Name) + } + + return &podList.Items[0], nil +} diff --git a/tests/utils/psql_connection.go b/tests/utils/psql_connection.go index a0d8a7a1fb..d3a24cc40a 100644 --- a/tests/utils/psql_connection.go +++ b/tests/utils/psql_connection.go @@ -18,128 +18,87 @@ package utils import ( "database/sql" - "fmt" - "net/http" - "os" - "strconv" + "io" "time" - "github.com/onsi/ginkgo/v2" - "k8s.io/client-go/rest" + "k8s.io/apimachinery/pkg/util/httpstream" "k8s.io/client-go/tools/portforward" - "k8s.io/client-go/transport/spdy" "github.com/cloudnative-pg/cloudnative-pg/pkg/configfile" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/pool" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/forwardconnection" ) -// PSQLForwardConnection manage the creation of a port forward to connect by psql client locally +// PSQLForwardConnection manages the creation of a port-forwarding to open a new database connection type PSQLForwardConnection struct { - namespace string - pod string - stopChan chan struct{} - readyChan chan struct{} - pooler *pool.ConnectionPool + pooler pool.Pooler portForward *portforward.PortForwarder - err error } -// psqlForwardConnectionNew initialize and create the proper forward configuration -func psqlForwardConnectionNew(env *TestingEnvironment, namespace, pod string) (*PSQLForwardConnection, error) { - psqlc := &PSQLForwardConnection{} - if pod == "" { - return nil, fmt.Errorf("pod not provided") - } - psqlc.namespace = namespace - psqlc.pod = pod +// Close will stop the port-forwarding and exit +func (psqlc *PSQLForwardConnection) Close() { + psqlc.portForward.Close() +} - req := psqlc.createRequest(env) +// GetPooler returns the connection Pooler +func (psqlc *PSQLForwardConnection) GetPooler() pool.Pooler { + return psqlc.pooler +} - transport, upgrader, err := spdy.RoundTripperFor(env.RestClientConfig) - if err != nil { - return nil, err +// createConnectionParameters returns a map of parameters required to perform a connection +func createConnectionParameters(user, password, localPort string) map[string]string { + return map[string]string{ + "host": "localhost", + "port": localPort, + "user": user, + "password": password, } - dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, "POST", req.URL()) - - psqlc.readyChan = make(chan struct{}, 1) - psqlc.stopChan = make(chan struct{}) +} - psqlc.portForward, err = portforward.New( +func startForwardConnection( + dialer httpstream.Dialer, + portMap []string, + dbname, + userApp, + passApp string, +) (*PSQLForwardConnection, *sql.DB, error) { + forwarder, err := forwardconnection.NewForwardConnection( dialer, - []string{"0:5432"}, - psqlc.stopChan, - psqlc.readyChan, - os.Stdout, - os.Stderr, + portMap, + io.Discard, + io.Discard, ) - - return psqlc, err -} - -func (psqlc *PSQLForwardConnection) createRequest(env *TestingEnvironment) *rest.Request { - return env.Interface.CoreV1(). - RESTClient(). - Post(). - Resource("pods"). - Namespace(psqlc.namespace). - Name(psqlc.pod). - SubResource("portforward") -} - -// startAndWait will begin the forward and wait to be ready -func (psqlc *PSQLForwardConnection) startAndWait() error { - go func() { - ginkgo.GinkgoWriter.Printf("Starting port-forward\n") - psqlc.err = psqlc.portForward.ForwardPorts() - if psqlc.err != nil { - ginkgo.GinkgoWriter.Printf("port-forward failed with error %s\n", psqlc.err.Error()) - return - } - }() - select { - case <-psqlc.readyChan: - ginkgo.GinkgoWriter.Printf("port-forward ready\n") - return nil - case <-psqlc.stopChan: - ginkgo.GinkgoWriter.Printf("port-forward closed\n") - return psqlc.err + if err != nil { + return nil, nil, err } -} -// GetPooler returns the connection Pooler -func (psqlc *PSQLForwardConnection) GetPooler() *pool.ConnectionPool { - return psqlc.pooler -} + if err = forwarder.StartAndWait(); err != nil { + return nil, nil, err + } -// getLocalPort gets the local port needed to connect to Postgres -func (psqlc *PSQLForwardConnection) getLocalPort() (string, error) { - forwardedPorts, err := psqlc.portForward.GetPorts() + localPort, err := forwarder.GetLocalPort() if err != nil { - return "", err + return nil, nil, err } - return strconv.Itoa(int(forwardedPorts[0].Local)), nil -} + connParameters := createConnectionParameters(userApp, passApp, localPort) -// Close will stop the forward and exit -func (psqlc *PSQLForwardConnection) Close() { - psqlc.portForward.Close() -} + pooler := pool.NewPgbouncerConnectionPool(configfile.CreateConnectionString(connParameters)) -// createConnectionParameters return the parameters require to create a connection -// to the current forwarded port -func (psqlc *PSQLForwardConnection) createConnectionParameters(user, password string) (map[string]string, error) { - port, err := psqlc.getLocalPort() + conn, err := pooler.Connection(dbname) if err != nil { - return nil, err + return nil, nil, err } - return map[string]string{ - "host": "localhost", - "port": port, - "user": user, - "password": password, - }, nil + conn.SetMaxOpenConns(10) + conn.SetMaxIdleConns(10) + conn.SetConnMaxLifetime(time.Hour) + conn.SetConnMaxIdleTime(time.Hour) + + return &PSQLForwardConnection{ + portForward: forwarder.Forwarder, + pooler: pooler, + }, conn, err } // ForwardPSQLConnection simplifies the creation of forwarded connection to PostgreSQL cluster @@ -158,8 +117,8 @@ func ForwardPSQLConnection( return ForwardPSQLConnectionWithCreds(env, namespace, clusterName, dbname, user, pass) } -// ForwardPSQLConnectionWithCreds does the same as ForwardPSQLConnection but without trying to -// get the credentials using the cluster +// ForwardPSQLConnectionWithCreds creates a forwarded connection to a PostgreSQL cluster +// using the given credentials func ForwardPSQLConnectionWithCreds( env *TestingEnvironment, namespace, @@ -173,31 +132,57 @@ func ForwardPSQLConnectionWithCreds( return nil, nil, err } - forward, err := psqlForwardConnectionNew(env, namespace, cluster.Status.CurrentPrimary) + dialer, err := forwardconnection.NewDialer( + env.Interface, + env.RestClientConfig, + namespace, + cluster.Status.CurrentPrimary, + ) if err != nil { return nil, nil, err } - if err = forward.startAndWait(); err != nil { + psqlForwardConn, conn, err := startForwardConnection( + dialer, + []string{forwardconnection.PostgresPortMap}, + dbname, + userApp, + passApp, + ) + if err != nil { return nil, nil, err } - connParameters, err := forward.createConnectionParameters(userApp, passApp) + return psqlForwardConn, conn, err +} + +// ForwardPSQLServiceConnection creates a forwarded connection to a PostgreSQL service +// using the given credentials +func ForwardPSQLServiceConnection( + env *TestingEnvironment, + namespace, + serviceName, + dbname, + userApp, + passApp string, +) (*PSQLForwardConnection, *sql.DB, error) { + dialer, portMap, err := forwardconnection.NewDialerFromService( + env.Ctx, + env.Interface, + env.RestClientConfig, + namespace, + serviceName, + ) if err != nil { return nil, nil, err } - forward.pooler = pool.NewPostgresqlConnectionPool(configfile.CreateConnectionString(connParameters)) - conn, err := forward.pooler.Connection(dbname) + psqlForwardConn, conn, err := startForwardConnection(dialer, portMap, dbname, userApp, passApp) if err != nil { return nil, nil, err } - conn.SetMaxOpenConns(10) - conn.SetMaxIdleConns(10) - conn.SetConnMaxLifetime(time.Hour) - conn.SetConnMaxIdleTime(time.Hour) - return forward, conn, err + return psqlForwardConn, conn, err } // RunQueryRowOverForward runs QueryRow with a given query, returning the Row of the SQL command diff --git a/tests/utils/service.go b/tests/utils/service.go index e569011b22..cce93ca126 100644 --- a/tests/utils/service.go +++ b/tests/utils/service.go @@ -45,6 +45,20 @@ func GetReadWriteServiceName(clusterName string) string { return fmt.Sprintf("%v%v", clusterName, apiv1.ServiceReadWriteSuffix) } +// GetService gets a service given name and namespace +func GetService(namespace, name string, env *TestingEnvironment) (*corev1.Service, error) { + namespacedName := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + service := &corev1.Service{} + err := GetObject(env, namespacedName, service) + if err != nil { + return nil, err + } + return service, nil +} + // GetRwServiceObject return read write service object func GetRwServiceObject(namespace, clusterName string, env *TestingEnvironment) (*corev1.Service, error) { svcName := GetReadWriteServiceName(clusterName) From 741388dfaa3ca6eff66738ee257c7a2283d84369 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 6 Dec 2024 12:19:22 +0100 Subject: [PATCH 213/836] fix(deps): update github.com/cloudnative-pg/barman-cloud digest to 711113b (main) (#6285) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index eaa13af67f..980261fede 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/avast/retry-go/v4 v4.6.0 github.com/blang/semver v3.5.1+incompatible github.com/cheynewallace/tabby v1.1.1 - github.com/cloudnative-pg/barman-cloud v0.0.0-20241105055149-ae6c2408bd14 + github.com/cloudnative-pg/barman-cloud v0.0.0-20241205144020-711113b64258 github.com/cloudnative-pg/cnpg-i v0.0.0-20241105133936-c704f46c20e0 github.com/cloudnative-pg/machinery v0.0.0-20241122084004-33b997fc6c61 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc diff --git a/go.sum b/go.sum index cf4ed929c2..2140149042 100644 --- a/go.sum +++ b/go.sum @@ -18,8 +18,8 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cheynewallace/tabby v1.1.1 h1:JvUR8waht4Y0S3JF17G6Vhyt+FRhnqVCkk8l4YrOU54= github.com/cheynewallace/tabby v1.1.1/go.mod h1:Pba/6cUL8uYqvOc9RkyvFbHGrQ9wShyrn6/S/1OYVys= -github.com/cloudnative-pg/barman-cloud v0.0.0-20241105055149-ae6c2408bd14 h1:HX5pXyzVAqfjcDgCa1l8b4sumf7XYnGqiP+6XMgbB2E= -github.com/cloudnative-pg/barman-cloud v0.0.0-20241105055149-ae6c2408bd14/go.mod h1:HPGwXHlatQEnb2HdsbGTZLEo8qlxKLdxTHiTeF9TTqw= +github.com/cloudnative-pg/barman-cloud v0.0.0-20241205144020-711113b64258 h1:B/Wncxl/OXrXJUHHtBCyxE//6FdIxznERfzPMsNHWfw= +github.com/cloudnative-pg/barman-cloud v0.0.0-20241205144020-711113b64258/go.mod h1:HPGwXHlatQEnb2HdsbGTZLEo8qlxKLdxTHiTeF9TTqw= github.com/cloudnative-pg/cnpg-i v0.0.0-20241105133936-c704f46c20e0 h1:8mrkOCJTFnhbG5j9qS7ZKXHvWek6Tp6rwyVXXQiN4JA= github.com/cloudnative-pg/cnpg-i v0.0.0-20241105133936-c704f46c20e0/go.mod h1:fAU7ySVzjpt/RZntxWZiWJCjaBJayzIxEnd0NuO7oQc= github.com/cloudnative-pg/machinery v0.0.0-20241122084004-33b997fc6c61 h1:KzazCP/OVbCPAkhhg9hLLNzLyAHcYzxA3U3wsyLDWbs= From d5cc9bb03afa2025190e2f2a92ce707c479481b5 Mon Sep 17 00:00:00 2001 From: Jaime Silvela Date: Fri, 6 Dec 2024 13:43:49 +0100 Subject: [PATCH 214/836] feat: ensure unique manager for declarative PostgreSQL resources (#6258) This patch extends the existing logic that ensures only one manager exists for a Postgres resource in the Database support code to also cover Publications and Subscriptions. Close #5922 Signed-off-by: Jaime Silvela Signed-off-by: Armando Ruocco Co-authored-by: Armando Ruocco --- api/v1/database_funcs.go | 21 ++++++ api/v1/generic_funcs.go | 64 +++++++++++++++++++ api/v1/publication_funcs.go | 21 ++++++ api/v1/subscription_funcs.go | 21 ++++++ internal/management/controller/common.go | 53 +++++++++++++++ .../controller/database_controller.go | 60 +---------------- .../controller/database_controller_test.go | 2 +- .../controller/publication_controller.go | 5 ++ .../controller/subscription_controller.go | 5 ++ 9 files changed, 194 insertions(+), 58 deletions(-) create mode 100644 api/v1/generic_funcs.go diff --git a/api/v1/database_funcs.go b/api/v1/database_funcs.go index 198e760ce5..2e87eba148 100644 --- a/api/v1/database_funcs.go +++ b/api/v1/database_funcs.go @@ -49,3 +49,24 @@ func (db *Database) GetStatusMessage() string { func (db *Database) GetClusterRef() corev1.LocalObjectReference { return db.Spec.ClusterRef } + +// GetManagedObjectName returns the name of the managed database object +func (db *Database) GetManagedObjectName() string { + return db.Spec.Name +} + +// GetName returns the database object name +func (db *Database) GetName() string { + return db.Name +} + +// HasReconciliations returns true if the database object has been reconciled at least once +func (db *Database) HasReconciliations() bool { + return db.Status.ObservedGeneration > 0 +} + +// MustHaveManagedResourceExclusivity detects conflicting databases +func (dbList *DatabaseList) MustHaveManagedResourceExclusivity(reference *Database) error { + pointers := toSliceWithPointers(dbList.Items) + return ensureManagedResourceExclusivity(reference, pointers) +} diff --git a/api/v1/generic_funcs.go b/api/v1/generic_funcs.go new file mode 100644 index 0000000000..3fc7e756f8 --- /dev/null +++ b/api/v1/generic_funcs.go @@ -0,0 +1,64 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" +) + +type managedResourceComparer interface { + GetName() string + GetManagedObjectName() string + GetClusterRef() corev1.LocalObjectReference + HasReconciliations() bool +} + +func ensureManagedResourceExclusivity[T managedResourceComparer](t1 T, list []T) error { + for _, t2 := range list { + if t1.GetName() == t2.GetName() { + continue + } + + if t1.GetClusterRef().Name != t2.GetClusterRef().Name { + continue + } + + if !t2.HasReconciliations() { + continue + } + + if t1.GetManagedObjectName() == t2.GetManagedObjectName() { + return fmt.Errorf( + "%q is already managed by object %q", + t1.GetManagedObjectName(), t2.GetName(), + ) + } + } + + return nil +} + +// toSliceWithPointers converts a slice of items to a slice of pointers to the items +func toSliceWithPointers[T any](items []T) []*T { + result := make([]*T, len(items)) + for i, item := range items { + result[i] = &item + } + return result +} diff --git a/api/v1/publication_funcs.go b/api/v1/publication_funcs.go index bfda3183a3..c32cc0c0cb 100644 --- a/api/v1/publication_funcs.go +++ b/api/v1/publication_funcs.go @@ -49,3 +49,24 @@ func (pub *Publication) GetStatusMessage() string { func (pub *Publication) GetClusterRef() corev1.LocalObjectReference { return pub.Spec.ClusterRef } + +// GetManagedObjectName returns the name of the managed publication object +func (pub *Publication) GetManagedObjectName() string { + return pub.Spec.Name +} + +// HasReconciliations returns true if the publication has been reconciled at least once +func (pub *Publication) HasReconciliations() bool { + return pub.Status.ObservedGeneration > 0 +} + +// GetName returns the publication name +func (pub *Publication) GetName() string { + return pub.Name +} + +// MustHaveManagedResourceExclusivity detects conflicting publications +func (pub *PublicationList) MustHaveManagedResourceExclusivity(reference *Publication) error { + pointers := toSliceWithPointers(pub.Items) + return ensureManagedResourceExclusivity(reference, pointers) +} diff --git a/api/v1/subscription_funcs.go b/api/v1/subscription_funcs.go index 506bf05b81..a337bb04a3 100644 --- a/api/v1/subscription_funcs.go +++ b/api/v1/subscription_funcs.go @@ -49,3 +49,24 @@ func (sub *Subscription) GetStatusMessage() string { func (sub *Subscription) GetClusterRef() corev1.LocalObjectReference { return sub.Spec.ClusterRef } + +// GetName returns the subscription object name +func (sub *Subscription) GetName() string { + return sub.Name +} + +// GetManagedObjectName returns the name of the managed subscription object +func (sub *Subscription) GetManagedObjectName() string { + return sub.Spec.Name +} + +// HasReconciliations returns true if the subscription has been reconciled at least once +func (sub *Subscription) HasReconciliations() bool { + return sub.Status.ObservedGeneration > 0 +} + +// MustHaveManagedResourceExclusivity detects conflicting subscriptions +func (pub *SubscriptionList) MustHaveManagedResourceExclusivity(reference *Subscription) error { + pointers := toSliceWithPointers(pub.Items) + return ensureManagedResourceExclusivity(reference, pointers) +} diff --git a/internal/management/controller/common.go b/internal/management/controller/common.go index b5013d6657..d0fe51dc68 100644 --- a/internal/management/controller/common.go +++ b/internal/management/controller/common.go @@ -23,10 +23,13 @@ import ( "fmt" "maps" "slices" + "time" + "github.com/cloudnative-pg/machinery/pkg/log" "github.com/jackc/pgx/v5" "github.com/lib/pq" "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" @@ -121,3 +124,53 @@ func toPostgresParameters(parameters map[string]string) string { // pruning last 2 chars `, ` return b.String()[:len(b.String())-2] } + +type postgresResourceManager interface { + client.Object + HasReconciliations() bool + markableAsFailed +} + +type managedResourceExclusivityEnsurer[T postgresResourceManager] interface { + MustHaveManagedResourceExclusivity(newManager T) error + client.ObjectList +} + +func detectConflictingManagers[T postgresResourceManager, TL managedResourceExclusivityEnsurer[T]]( + ctx context.Context, + cli client.Client, + resource T, + list TL, +) (ctrl.Result, error) { + if resource.HasReconciliations() { + return ctrl.Result{}, nil + } + contextLogger := log.FromContext(ctx) + + if err := cli.List(ctx, list, + client.InNamespace(resource.GetNamespace()), + ); err != nil { + kind := list.GetObjectKind().GroupVersionKind().Kind + + contextLogger.Error(err, "while getting list", + "kind", kind, + "namespace", resource.GetNamespace(), + ) + return ctrl.Result{}, fmt.Errorf("impossible to list %s objects in namespace %s: %w", + kind, resource.GetNamespace(), err) + } + + // Make sure the target PG element is not being managed by another kubernetes resource + if conflictErr := list.MustHaveManagedResourceExclusivity(resource); conflictErr != nil { + if markErr := markAsFailed(ctx, cli, resource, conflictErr); markErr != nil { + return ctrl.Result{}, + fmt.Errorf("encountered an error while marking as failed the resource: %w, original error: %w", + markErr, + conflictErr, + ) + } + return ctrl.Result{RequeueAfter: 30 * time.Second}, nil + } + + return ctrl.Result{}, nil +} diff --git a/internal/management/controller/database_controller.go b/internal/management/controller/database_controller.go index 22a72861fd..7fdbf5ba22 100644 --- a/internal/management/controller/database_controller.go +++ b/internal/management/controller/database_controller.go @@ -114,19 +114,9 @@ func (r *DatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c return ctrl.Result{}, nil } - // Make sure the target PG Database is not being managed by another Database Object - if err := r.ensureOnlyOneManager(ctx, database); err != nil { - if markErr := markAsFailed(ctx, r.Client, &database, err); markErr != nil { - contextLogger.Error(err, "while marking as failed the database resource", - "error", err, - "markError", markErr, - ) - return ctrl.Result{}, fmt.Errorf( - "encountered an error while marking as failed the database resource: %w, original error: %w", - markErr, - err) - } - return ctrl.Result{RequeueAfter: databaseReconciliationInterval}, nil + if res, err := detectConflictingManagers(ctx, r.Client, &database, &apiv1.DatabaseList{}); err != nil || + !res.IsZero() { + return res, err } if err := r.reconcileDatabase(ctx, &database); err != nil { @@ -149,50 +139,6 @@ func (r *DatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c return ctrl.Result{RequeueAfter: databaseReconciliationInterval}, nil } -// ensureOnlyOneManager verifies that the target PostgreSQL Database specified by the given Database object -// is not already managed by another Database object within the same namespace and cluster. -// If another Database object is found to be managing the same PostgreSQL database, this method returns an error. -func (r *DatabaseReconciler) ensureOnlyOneManager( - ctx context.Context, - database apiv1.Database, -) error { - contextLogger := log.FromContext(ctx) - - if database.Status.ObservedGeneration > 0 { - return nil - } - - var databaseList apiv1.DatabaseList - if err := r.Client.List(ctx, &databaseList, - client.InNamespace(r.instance.GetNamespaceName()), - ); err != nil { - contextLogger.Error(err, "while getting database list", "namespace", r.instance.GetNamespaceName()) - return fmt.Errorf("impossible to list database objects in namespace %s: %w", - r.instance.GetNamespaceName(), err) - } - - for _, item := range databaseList.Items { - if item.Name == database.Name { - continue - } - - if item.Spec.ClusterRef.Name != r.instance.GetClusterName() { - continue - } - - if item.Status.ObservedGeneration == 0 { - continue - } - - if item.Spec.Name == database.Spec.Name { - return fmt.Errorf("database %q is already managed by Database object %q", - database.Spec.Name, item.Name) - } - } - - return nil -} - func (r *DatabaseReconciler) evaluateDropDatabase(ctx context.Context, db *apiv1.Database) error { if db.Spec.ReclaimPolicy != apiv1.DatabaseReclaimDelete { return nil diff --git a/internal/management/controller/database_controller_test.go b/internal/management/controller/database_controller_test.go index 37712a2c21..d41d7eab57 100644 --- a/internal/management/controller/database_controller_test.go +++ b/internal/management/controller/database_controller_test.go @@ -422,7 +422,7 @@ var _ = Describe("Managed Database status", func() { }, dbDuplicate) Expect(err).ToNot(HaveOccurred()) - expectedError := fmt.Sprintf("database %q is already managed by Database object %q", + expectedError := fmt.Sprintf("%q is already managed by object %q", dbDuplicate.Spec.Name, currentManager.Name) Expect(dbDuplicate.Status.Applied).To(HaveValue(BeFalse())) Expect(dbDuplicate.Status.Message).To(ContainSubstring(expectedError)) diff --git a/internal/management/controller/publication_controller.go b/internal/management/controller/publication_controller.go index 086b37e778..d268367f1e 100644 --- a/internal/management/controller/publication_controller.go +++ b/internal/management/controller/publication_controller.go @@ -115,6 +115,11 @@ func (r *PublicationReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{RequeueAfter: publicationReconciliationInterval}, nil } + if res, err := detectConflictingManagers(ctx, r.Client, &publication, &apiv1.PublicationList{}); err != nil || + !res.IsZero() { + return res, err + } + if err := r.finalizerReconciler.reconcile(ctx, &publication); err != nil { return ctrl.Result{}, fmt.Errorf("while reconciling the finalizer: %w", err) } diff --git a/internal/management/controller/subscription_controller.go b/internal/management/controller/subscription_controller.go index 16a1165b7f..5fae540722 100644 --- a/internal/management/controller/subscription_controller.go +++ b/internal/management/controller/subscription_controller.go @@ -135,6 +135,11 @@ func (r *SubscriptionReconciler) Reconcile(ctx context.Context, req ctrl.Request return ctrl.Result{RequeueAfter: subscriptionReconciliationInterval}, nil } + if res, err := detectConflictingManagers(ctx, r.Client, &subscription, &apiv1.SubscriptionList{}); err != nil || + !res.IsZero() { + return res, err + } + if err := r.alignSubscription(ctx, &subscription, connString); err != nil { contextLogger.Error(err, "while reconciling subscription") if markErr := markAsFailed(ctx, r.Client, &subscription, err); markErr != nil { From e17819dcc0605c68dd2b6d73b7c120e872c17504 Mon Sep 17 00:00:00 2001 From: Jaime Silvela Date: Fri, 6 Dec 2024 14:25:51 +0100 Subject: [PATCH 215/836] test: express the initdb unit tests coherently (#6291) Signed-off-by: Jaime Silvela --- pkg/management/postgres/initdb_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/management/postgres/initdb_test.go b/pkg/management/postgres/initdb_test.go index 985c42293b..0df37770c9 100644 --- a/pkg/management/postgres/initdb_test.go +++ b/pkg/management/postgres/initdb_test.go @@ -32,7 +32,7 @@ var _ = Describe("EnsureTargetDirectoriesDoNotExist", func() { PgData: GinkgoT().TempDir(), PgWal: GinkgoT().TempDir(), } - Expect(os.Create(filepath.Join(initInfo.PgData, "PG_VERSION"))).Error().To(Succeed()) + Expect(os.Create(filepath.Join(initInfo.PgData, "PG_VERSION"))).Error().NotTo(HaveOccurred()) Expect(os.Mkdir(filepath.Join(initInfo.PgWal, "archive_status"), 0o700)).To(Succeed()) }) @@ -84,7 +84,7 @@ var _ = Describe("renameExistingTargetDataDirectories", func() { PgData: GinkgoT().TempDir(), PgWal: GinkgoT().TempDir(), } - Expect(os.Create(filepath.Join(initInfo.PgData, "PG_VERSION"))).Error().To(Succeed()) + Expect(os.Create(filepath.Join(initInfo.PgData, "PG_VERSION"))).Error().NotTo(HaveOccurred()) Expect(os.Mkdir(filepath.Join(initInfo.PgWal, "archive_status"), 0o700)).To(Succeed()) }) From a4cf356805f29f26e2aed4d0443bc5fa2cc554ed Mon Sep 17 00:00:00 2001 From: Pierrick <139142330+pchovelon@users.noreply.github.com> Date: Fri, 6 Dec 2024 15:16:16 +0100 Subject: [PATCH 216/836] feat: add `cnpg.io/userType` label to generated secrets (#4392) This patch adds a `cnpg.io/userType` label to the secrets containing users' credentials whose value tells whether the user is a superuser or an application role. Support for this label is limited to users created by the operator. Closes #2631 Signed-off-by: Pierrick Signed-off-by: Gabriele Bartolini Signed-off-by: Leonardo Cecchi Signed-off-by: Marco Nenciarini Co-authored-by: Gabriele Bartolini Co-authored-by: Leonardo Cecchi Co-authored-by: Marco Nenciarini --- docs/src/labels_annotations.md | 6 ++++++ internal/controller/cluster_create.go | 6 ++++-- pkg/specs/secrets.go | 4 +++- pkg/specs/secrets_test.go | 6 +++++- pkg/utils/labels_annotations.go | 19 ++++++++++++++++++- 5 files changed, 36 insertions(+), 5 deletions(-) diff --git a/docs/src/labels_annotations.md b/docs/src/labels_annotations.md index 8da4514584..299758c434 100644 --- a/docs/src/labels_annotations.md +++ b/docs/src/labels_annotations.md @@ -77,6 +77,12 @@ These predefined labels are managed by CloudNativePG. : Available on `ConfigMap` and `Secret` resources. When set to `true`, a change in the resource is automatically reloaded by the operator. +`cnpg.io/userType` +: Specifies the type of PostgreSQL user associated with the + `Secret`, either `superuser` (Postgres superuser access) or `app` + (application-level user in CloudNativePG terminology), and is limited to the + default users created by CloudNativePG (typically `postgres` and `app`). + `role` - **deprecated** : Whether the instance running in a pod is a `primary` or a `replica`. This label is deprecated, you should use `cnpg.io/instanceRole` instead. diff --git a/internal/controller/cluster_create.go b/internal/controller/cluster_create.go index 2aa3a51d47..280fe3361d 100644 --- a/internal/controller/cluster_create.go +++ b/internal/controller/cluster_create.go @@ -176,7 +176,8 @@ func (r *ClusterReconciler) reconcileSuperuserSecret(ctx context.Context, cluste cluster.GetServiceReadWriteName(), "*", "postgres", - postgresPassword) + postgresPassword, + utils.UserTypeSuperuser) cluster.SetInheritedDataAndOwnership(&postgresSecret.ObjectMeta) return createOrPatchClusterCredentialSecret(ctx, r.Client, postgresSecret) @@ -216,7 +217,8 @@ func (r *ClusterReconciler) reconcileAppUserSecret(ctx context.Context, cluster cluster.GetServiceReadWriteName(), cluster.GetApplicationDatabaseName(), cluster.GetApplicationDatabaseOwner(), - appPassword) + appPassword, + utils.UserTypeApp) cluster.SetInheritedDataAndOwnership(&appSecret.ObjectMeta) return createOrPatchClusterCredentialSecret(ctx, r.Client, appSecret) diff --git a/pkg/specs/secrets.go b/pkg/specs/secrets.go index 2e66a497e7..d7503dd026 100644 --- a/pkg/specs/secrets.go +++ b/pkg/specs/secrets.go @@ -35,6 +35,7 @@ func CreateSecret( dbname string, username string, password string, + usertype utils.UserType, ) *corev1.Secret { uriBuilder := newConnectionStringBuilder(hostname, dbname, username, password, namespace) @@ -43,7 +44,8 @@ func CreateSecret( Name: name, Namespace: namespace, Labels: map[string]string{ - utils.WatchedLabelName: "true", + utils.UserTypeLabelName: string(usertype), + utils.WatchedLabelName: "true", }, }, Type: corev1.SecretTypeBasicAuth, diff --git a/pkg/specs/secrets_test.go b/pkg/specs/secrets_test.go index 0807c90817..6648b76294 100644 --- a/pkg/specs/secrets_test.go +++ b/pkg/specs/secrets_test.go @@ -17,6 +17,8 @@ limitations under the License. package specs import ( + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) @@ -24,7 +26,7 @@ import ( var _ = Describe("Secret creation", func() { It("create a secret with the right user and password", func() { secret := CreateSecret("name", "namespace", - "thishost", "thisdb", "thisuser", "thispassword") + "thishost", "thisdb", "thisuser", "thispassword", utils.UserTypeApp) Expect(secret.Name).To(Equal("name")) Expect(secret.Namespace).To(Equal("namespace")) Expect(secret.StringData["username"]).To(Equal("thisuser")) @@ -39,5 +41,7 @@ var _ = Describe("Secret creation", func() { Expect(secret.StringData["jdbc-uri"]).To( Equal("jdbc:postgresql://thishost.namespace:5432/thisdb?password=thispassword&user=thisuser"), ) + Expect(secret.Labels).To( + HaveKeyWithValue(utils.UserTypeLabelName, string(utils.UserTypeApp))) }) }) diff --git a/pkg/utils/labels_annotations.go b/pkg/utils/labels_annotations.go index cc6a9e19ba..187325013f 100644 --- a/pkg/utils/labels_annotations.go +++ b/pkg/utils/labels_annotations.go @@ -71,10 +71,14 @@ const ( // scheduled backup if a backup is created by a scheduled backup ParentScheduledBackupLabelName = MetadataNamespace + "/scheduled-backup" - // WatchedLabelName the name of the label which tell if a resource change will be automatically reloaded by instance + // WatchedLabelName the name of the label which tells if a resource change will be automatically reloaded by instance // or not, use for Secrets or ConfigMaps WatchedLabelName = MetadataNamespace + "/reload" + // UserTypeLabelName the name of the label which tells if a Secret refers + // to a superuser database role or an application one + UserTypeLabelName = MetadataNamespace + "/userType" + // BackupTimelineLabelName is the name or the label where the timeline of a backup is kept BackupTimelineLabelName = MetadataNamespace + "/backupTimeline" @@ -273,6 +277,19 @@ const ( HibernationAnnotationValueOn HibernationAnnotationValue = "on" ) +// UserType tells if a secret refers to a superuser database role +// or an application one +type UserType string + +const ( + // UserTypeSuperuser is the type of a superuser database + // role + UserTypeSuperuser UserType = "superuser" + + // UserTypeApp is the type of an application role + UserTypeApp UserType = "app" +) + // LabelClusterName labels the object with the cluster name func LabelClusterName(object *metav1.ObjectMeta, name string) { if object.Labels == nil { From d699efa07575ca6068800137488b1f29ab1d4e0b Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Fri, 6 Dec 2024 15:49:07 +0100 Subject: [PATCH 217/836] test(e2e): AssertConnection must retry before failing (#6293) Fix a regression in the E2E testing suite introduced in #5898 Signed-off-by: Marco Nenciarini --- tests/e2e/asserts_test.go | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/tests/e2e/asserts_test.go b/tests/e2e/asserts_test.go index a7a5eb21ee..8c6d459fec 100644 --- a/tests/e2e/asserts_test.go +++ b/tests/e2e/asserts_test.go @@ -415,19 +415,21 @@ func AssertConnection( }) By(fmt.Sprintf("connecting to the %v service as %v", service, user), func() { - forwardConn, conn, err := testsUtils.ForwardPSQLServiceConnection(env, namespace, service, - dbname, user, password) - defer func() { - _ = conn.Close() - forwardConn.Close() - }() - Expect(err).ToNot(HaveOccurred()) + Eventually(func(g Gomega) { + forwardConn, conn, err := testsUtils.ForwardPSQLServiceConnection(env, namespace, service, + dbname, user, password) + defer func() { + _ = conn.Close() + forwardConn.Close() + }() + g.Expect(err).ToNot(HaveOccurred()) - var rawValue string - row := conn.QueryRow("SELECT 1") - err = row.Scan(&rawValue) - Expect(err).ToNot(HaveOccurred()) - Expect(strings.TrimSpace(rawValue)).To(BeEquivalentTo("1")) + var rawValue string + row := conn.QueryRow("SELECT 1") + err = row.Scan(&rawValue) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(strings.TrimSpace(rawValue)).To(BeEquivalentTo("1")) + }, RetryTimeout).Should(Succeed()) }) } From 1839f1b732ce3333f07bfe9f74f2c47dbf66360c Mon Sep 17 00:00:00 2001 From: Jaime Silvela Date: Fri, 6 Dec 2024 16:48:58 +0100 Subject: [PATCH 218/836] feat(OLM): enhance UI descriptions for Database, Publication, and Subscription CRDs (#6249) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch improves the OLM user interface by adding detailed descriptions for the Database, Publication, and Subscription custom resources. Closes #6248 Signed-off-by: Jaime Silvela Signed-off-by: Gabriele Bartolini Signed-off-by: Niccolò Fei Co-authored-by: Gabriele Bartolini Co-authored-by: Niccolò Fei --- .../cloudnative-pg.clusterserviceversion.yaml | 139 +++++++++++++++--- 1 file changed, 119 insertions(+), 20 deletions(-) diff --git a/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml b/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml index a8d0c40f05..fc286e39e7 100644 --- a/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml +++ b/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml @@ -784,30 +784,120 @@ spec: - 'urn:alm:descriptor:com.tectonic.ui:text' - kind: Database name: databases.postgresql.cnpg.io - displayName: Database management - description: Declarative database management + displayName: Postgres Database + description: Declarative creation and management of a database on a Cluster version: v1 resources: - kind: Cluster name: '' version: v1 specDescriptors: - - path: databaseReclaimPolicy - displayName: Database reclaim policy - description: Database reclaim policy - path: cluster displayName: Cluster requested to create the database - description: Cluster requested to create the database + description: Cluster in which to create the database - path: name displayName: Database name description: Database name - path: owner displayName: Database Owner - description: Database Owner + description: Owner of the database that will be created in Postgres + - path: ensure + displayName: Ensure + description: Ensure the PostgreSQL database is `present` or `absent` + - path: databaseReclaimPolicy + displayName: Database reclaim policy + description: Specifies the action to take for the database inside PostgreSQL when the associated object in Kubernetes is deleted. Options are to either delete the database or retain it for future management. + # Configuration section + - path: template + displayName: Template + description: The name of the template from which to create this database. + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:text' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: tablespace + displayName: Tablespace + description: The name of the tablespace that will be associated with the database. + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:text' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: isTemplate + displayName: Database is a template + description: If true, this database is considered a template + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:booleanSwitch' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: allowConnections + displayName: Allow Connections + description: If false, then no one can connect to this database + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:booleanSwitch' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: connectionLimit + displayName: Connection Limit + description: How many concurrent connections can be made to this database + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:number' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + # Encoding and Locale + - path: encoding + displayName: Encoding + description: Character set encoding to use in the database + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:text' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: locale + displayName: Locale + description: Sets the default collation order and character classification for the database. + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:text' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: localeCollate + displayName: LC collate + description: The collation to use for the database + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:text' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: localeCType + displayName: LC ctype + description: The ctype to use for the database + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:text' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: localeProvider + displayName: Locale Provider + description: Specifies the provider to use for the default collation in this database (Available from PostgreSQL 16). + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:text' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: collationVersion + displayName: Collation version + description: The version identifier of the collation + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:text' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: builtinLocale + displayName: Builtin locale + description: The choice of which builtin locale to use + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:text' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + # ICU section + - path: icuLocale + displayName: ICU locale + description: ICU locale to use for the database + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:text' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: icuRules + displayName: ICU rules + description: Additional customization of ICU locale + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:text' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' - kind: Publication name: publications.postgresql.cnpg.io - displayName: Publication - description: Declarative publication + displayName: Postgres Publication + description: Declarative creation and management of a Logical Replication Publication in a PostgreSQL Cluster version: v1 resources: - kind: Cluster @@ -816,20 +906,23 @@ spec: specDescriptors: - path: name displayName: Publication name - description: Publication name + description: Name of the publication for PostgreSQL logical replication - path: dbname displayName: Database name - description: Database name + description: Database on which the publication will be created - path: cluster displayName: Cluster requested to create the publication - description: Cluster requested to create the publication + description: Cluster on which the publication will be created - path: target displayName: Publication target - description: Publication target + description: Specifies which tables/schemas in the database should be published + - path: publicationReclaimPolicy + displayName: Publication reclaim policy + description: Specifies the action to take for the publication inside PostgreSQL when the associated object in Kubernetes is deleted. Options are to either delete the database or retain it for future management. - kind: Subscription name: subscriptions.postgresql.cnpg.io - displayName: Subscription - description: Declarative subscription + displayName: Postgres Subscription + description: Declarative creation and management of a Logical Replication Subscription in a PostgreSQL Cluster to a previously defined Publication version: v1 resources: - kind: Cluster @@ -838,16 +931,22 @@ spec: specDescriptors: - path: name displayName: Subscription name - description: Subscription name + description: Name of the subscription for PostgreSQL logical replication - path: dbname displayName: Database name - description: Database name + description: Database on which the Subscription will be created - path: publicationName displayName: Publication name - description: Publication name + description: Name of the Publication to subscribe to - path: cluster displayName: Cluster requested to create the subscription - description: Cluster requested to create the subscription + description: Cluster on which the subscription will be created (subscriber) - path: externalClusterName displayName: Name of the external cluster with publication - description: Name of the external cluster with publication + description: Name of the cluster where the Publication is defined (publisher) + - path: publicationDBName + displayName: Name of the database containing the publication on the external cluster + description: The name of the database containing the publication on the external cluster. Defaults to the one in the external cluster definition. + - path: subscriptionReclaimPolicy + displayName: Subscription reclaim policy + description: Specifies the action to take for the subscription inside PostgreSQL when the associated object in Kubernetes is deleted. Options are to either delete the database or retain it for future management. From 9ca9edcd1a7f2927590cdfbdedbf2a94800ed75b Mon Sep 17 00:00:00 2001 From: Jeff Mealo Date: Fri, 6 Dec 2024 11:07:21 -0500 Subject: [PATCH 219/836] feat(plugin): set `User-Agent` in HTTP requests (#6153) Properly set the `User-Agent` header in HTTP requests to the Kubernetes API server. Closes #6038 Signed-off-by: Jeff Mealo Signed-off-by: Marco Nenciarini Signed-off-by: Leonardo Cecchi Co-authored-by: Marco Nenciarini Co-authored-by: Leonardo Cecchi --- internal/cmd/plugin/plugin.go | 4 ++++ internal/cmd/plugin/plugin_test.go | 7 +++++++ 2 files changed, 11 insertions(+) diff --git a/internal/cmd/plugin/plugin.go b/internal/cmd/plugin/plugin.go index b22c1a9d6c..432e347283 100644 --- a/internal/cmd/plugin/plugin.go +++ b/internal/cmd/plugin/plugin.go @@ -37,6 +37,7 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" ) var ( @@ -108,11 +109,14 @@ func SetupKubernetesClient(configFlags *genericclioptions.ConfigFlags) error { func createClient(cfg *rest.Config) error { var err error + scheme := runtime.NewScheme() _ = clientgoscheme.AddToScheme(scheme) _ = apiv1.AddToScheme(scheme) _ = storagesnapshotv1.AddToScheme(scheme) + cfg.UserAgent = fmt.Sprintf("kubectl-cnpg/v%s (%s)", versions.Version, versions.Info.Commit) + Client, err = client.New(cfg, client.Options{Scheme: scheme}) if err != nil { return err diff --git a/internal/cmd/plugin/plugin_test.go b/internal/cmd/plugin/plugin_test.go index b6cfebe70f..8d0ed55683 100644 --- a/internal/cmd/plugin/plugin_test.go +++ b/internal/cmd/plugin/plugin_test.go @@ -23,6 +23,7 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/internal/scheme" + "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -30,8 +31,14 @@ import ( var _ = Describe("create client", func() { It("with given configuration", func() { + // createClient is not a pure function and as a side effect + // it will: + // - set the Client global variable + // - set the UserAgent field inside cfg err := createClient(cfg) + Expect(err).NotTo(HaveOccurred()) + Expect(cfg.UserAgent).To(Equal("kubectl-cnpg/v" + versions.Version + " (" + versions.Info.Commit + ")")) Expect(Client).NotTo(BeNil()) }) }) From fab33aeddddb5a0832732ea3453fa31c12138ea3 Mon Sep 17 00:00:00 2001 From: Peggie Date: Fri, 6 Dec 2024 17:13:42 +0100 Subject: [PATCH 220/836] feat: Public Cloud K8S versions update (#6263) Update the versions used to test the operator on public cloud providers Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: public-cloud-k8s-versions-check --- .github/aks_versions.json | 4 ++-- .github/openshift_versions.json | 1 + Makefile | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/aks_versions.json b/.github/aks_versions.json index 3ffbbe129d..b5a3eed520 100644 --- a/.github/aks_versions.json +++ b/.github/aks_versions.json @@ -1,6 +1,6 @@ [ - "1.31.1", - "1.30.5", + "1.31.2", + "1.30.6", "1.29.9", "1.28.9" ] diff --git a/.github/openshift_versions.json b/.github/openshift_versions.json index 08587af16a..49cf2ac65d 100644 --- a/.github/openshift_versions.json +++ b/.github/openshift_versions.json @@ -1,4 +1,5 @@ [ + "4.18", "4.17", "4.16", "4.15", diff --git a/Makefile b/Makefile index 2b93fba0ea..6a80924a5a 100644 --- a/Makefile +++ b/Makefile @@ -49,7 +49,7 @@ WOKE_VERSION ?= 0.19.0 OPERATOR_SDK_VERSION ?= v1.38.0 OPM_VERSION ?= v1.48.0 PREFLIGHT_VERSION ?= 1.10.2 -OPENSHIFT_VERSIONS ?= v4.12-v4.17 +OPENSHIFT_VERSIONS ?= v4.12-v4.18 ARCH ?= amd64 export CONTROLLER_IMG From fd62a1c01e7eb1a9effeb6a0773f36f305b10fa1 Mon Sep 17 00:00:00 2001 From: Abhishek Chanda Date: Fri, 6 Dec 2024 11:13:47 -0600 Subject: [PATCH 221/836] test: make sure we test port correctness for services (#4934) Signed-off-by: Abhishek Chanda Signed-off-by: Marco Nenciarini Signed-off-by: Armando Ruocco Co-authored-by: Marco Nenciarini Co-authored-by: Armando Ruocco --- pkg/specs/services_test.go | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/pkg/specs/services_test.go b/pkg/specs/services_test.go index a7c0922b98..dd146fde25 100644 --- a/pkg/specs/services_test.go +++ b/pkg/specs/services_test.go @@ -19,8 +19,10 @@ package specs import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" . "github.com/onsi/ginkgo/v2" @@ -33,6 +35,12 @@ var _ = Describe("Services specification", func() { Name: "clustername", }, } + expectedPort := corev1.ServicePort{ + Name: PostgresContainerName, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromInt32(postgres.ServerPort), + Port: postgres.ServerPort, + } It("create a configured -any service", func() { service := CreateClusterAnyService(postgresql) @@ -40,6 +48,8 @@ var _ = Describe("Services specification", func() { Expect(service.Spec.PublishNotReadyAddresses).To(BeTrue()) Expect(service.Spec.Selector[utils.ClusterLabelName]).To(Equal("clustername")) Expect(service.Spec.Selector[utils.PodRoleLabelName]).To(Equal(string(utils.PodRoleInstance))) + Expect(service.Spec.Ports).To(HaveLen(1)) + Expect(service.Spec.Ports).To(ContainElement(expectedPort)) }) It("create a configured -r service", func() { @@ -48,6 +58,8 @@ var _ = Describe("Services specification", func() { Expect(service.Spec.PublishNotReadyAddresses).To(BeFalse()) Expect(service.Spec.Selector[utils.ClusterLabelName]).To(Equal("clustername")) Expect(service.Spec.Selector[utils.PodRoleLabelName]).To(Equal(string(utils.PodRoleInstance))) + Expect(service.Spec.Ports).To(HaveLen(1)) + Expect(service.Spec.Ports).To(ContainElement(expectedPort)) }) It("create a configured -ro service", func() { @@ -56,6 +68,8 @@ var _ = Describe("Services specification", func() { Expect(service.Spec.PublishNotReadyAddresses).To(BeFalse()) Expect(service.Spec.Selector[utils.ClusterLabelName]).To(Equal("clustername")) Expect(service.Spec.Selector[utils.ClusterInstanceRoleLabelName]).To(Equal(ClusterRoleLabelReplica)) + Expect(service.Spec.Ports).To(HaveLen(1)) + Expect(service.Spec.Ports).To(ContainElement(expectedPort)) }) It("create a configured -rw service", func() { @@ -64,6 +78,8 @@ var _ = Describe("Services specification", func() { Expect(service.Spec.PublishNotReadyAddresses).To(BeFalse()) Expect(service.Spec.Selector[utils.ClusterLabelName]).To(Equal("clustername")) Expect(service.Spec.Selector[utils.ClusterInstanceRoleLabelName]).To(Equal(ClusterRoleLabelPrimary)) + Expect(service.Spec.Ports).To(HaveLen(1)) + Expect(service.Spec.Ports).To(ContainElement(expectedPort)) }) }) From 893c61a6fe978832fa27dc177e62d6146d5064c1 Mon Sep 17 00:00:00 2001 From: Pierrick <139142330+pchovelon@users.noreply.github.com> Date: Fri, 6 Dec 2024 18:20:48 +0100 Subject: [PATCH 222/836] docs: clarify behavior of primaryUpdateStrategy for single-instance clusters (#5001) Clarified the behavior of primaryUpdateStrategy when applied to a cluster consisting of a single instance. Signed-off-by: Pierrick --- docs/src/installation_upgrade.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/docs/src/installation_upgrade.md b/docs/src/installation_upgrade.md index ec9b2019a6..c948a61148 100644 --- a/docs/src/installation_upgrade.md +++ b/docs/src/installation_upgrade.md @@ -170,7 +170,10 @@ promote the new primary instance using the `cnpg` plugin for `kubectl`. !!! Important In case `primaryUpdateStrategy` is set to the default value of `unsupervised`, an upgrade of the operator will trigger a switchover on your PostgreSQL cluster, - causing a (normally negligible) downtime. + causing a (normally negligible) downtime. If your PostgreSQL Cluster has only one + instance, the instance will be automatically restarted as `supervised` value is + not supported for `primaryUpdateStrategy`. In either case, your applications will + have to reconnect to PostgreSQL. The default rolling update behavior can be replaced with in-place updates of the instance manager. This approach does not require a restart of the From 1b873886aa68c2295186d28beb483e9e1fab3efd Mon Sep 17 00:00:00 2001 From: Josh Heyer Date: Fri, 6 Dec 2024 09:44:22 -0800 Subject: [PATCH 223/836] docs: heading level in (#3806) Signed-off-by: Josh Heyer --- docs/src/cluster_conf.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/cluster_conf.md b/docs/src/cluster_conf.md index 43ed25b3fd..3634b11e59 100644 --- a/docs/src/cluster_conf.md +++ b/docs/src/cluster_conf.md @@ -47,7 +47,7 @@ CloudNativePG relies on [ephemeral volumes](https://kubernetes.io/docs/concepts/ for part of the internal activities. Ephemeral volumes exist for the sole duration of a pod's life, without persisting across pod restarts. -# Volume Claim Template for Temporary Storage +### Volume Claim Template for Temporary Storage The operator uses by default an `emptyDir` volume, which can be customized by using the `.spec.ephemeralVolumesSizeLimit field`. This can be overridden by specifying a volume claim template in the `.spec.ephemeralVolumeSource` field. From d5ad53e003b97d4d236e3f180aae76bea8030ff8 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Sat, 7 Dec 2024 10:31:25 +0100 Subject: [PATCH 224/836] refactor: centralize webserver client logic (#6163) This patch centralizes all the logic regarding the webserver clients inside one package instead of having it scattered around the codebase Signed-off-by: Armando Ruocco --- internal/cmd/manager/instance/status/cmd.go | 8 +- internal/cmd/manager/walarchive/cmd.go | 10 +- internal/cmd/manager/walrestore/cmd.go | 3 +- internal/controller/backup_controller.go | 6 +- internal/controller/cluster_controller.go | 6 +- internal/controller/cluster_upgrade.go | 4 +- internal/management/cache/cache.go | 11 -- internal/management/cache/doc.go | 19 +++ internal/management/cache/keys.go | 26 ++++ internal/plugin/resources/instance.go | 4 +- pkg/management/postgres/archiver/archiver.go | 4 +- .../postgres/webserver/backup_client.go | 146 ------------------ .../webserver/client/common}/client.go | 2 +- .../postgres/webserver/client/common}/doc.go | 4 +- .../postgres/webserver/client/local/backup.go | 98 ++++++++++++ .../postgres/webserver/client/local/cache.go | 30 ++-- .../webserver/client/local/cluster.go | 71 +++++++++ .../postgres/webserver/client/local/doc.go | 18 +++ .../postgres/webserver/client/local/local.go | 62 ++++++++ .../webserver/client/local/request.go | 69 +++++++++ .../postgres/webserver/client/remote/doc.go | 18 +++ .../webserver/client/remote/instance.go} | 30 ++-- .../webserver/client/remote/remote.go | 37 +++++ .../postgres/webserver/local_client.go | 63 -------- .../webserver/metricserver/pg_collector.go | 4 +- .../postgres/webserver/metricserver/wal.go | 4 +- .../backup/volumesnapshot/online.go | 5 +- .../backup/volumesnapshot/reconciler.go | 6 +- .../replicaclusterswitch/reconciler.go | 6 +- .../replicaclusterswitch/shutdown_wal.go | 4 +- 30 files changed, 493 insertions(+), 285 deletions(-) create mode 100644 internal/management/cache/doc.go create mode 100644 internal/management/cache/keys.go delete mode 100644 pkg/management/postgres/webserver/backup_client.go rename pkg/{resources => management/postgres/webserver/client/common}/client.go (98%) rename pkg/{resources/instance => management/postgres/webserver/client/common}/doc.go (84%) create mode 100644 pkg/management/postgres/webserver/client/local/backup.go rename internal/management/cache/client/client.go => pkg/management/postgres/webserver/client/local/cache.go (75%) create mode 100644 pkg/management/postgres/webserver/client/local/cluster.go create mode 100644 pkg/management/postgres/webserver/client/local/doc.go create mode 100644 pkg/management/postgres/webserver/client/local/local.go create mode 100644 pkg/management/postgres/webserver/client/local/request.go create mode 100644 pkg/management/postgres/webserver/client/remote/doc.go rename pkg/{resources/instance/client.go => management/postgres/webserver/client/remote/instance.go} (91%) create mode 100644 pkg/management/postgres/webserver/client/remote/remote.go delete mode 100644 pkg/management/postgres/webserver/local_client.go diff --git a/internal/cmd/manager/instance/status/cmd.go b/internal/cmd/manager/instance/status/cmd.go index d1361d71f1..31c8ed506d 100644 --- a/internal/cmd/manager/instance/status/cmd.go +++ b/internal/cmd/manager/instance/status/cmd.go @@ -29,11 +29,11 @@ import ( "github.com/cloudnative-pg/machinery/pkg/log" "github.com/spf13/cobra" - cacheClient "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache/client" "github.com/cloudnative-pg/cloudnative-pg/pkg/certs" "github.com/cloudnative-pg/cloudnative-pg/pkg/management" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/common" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/local" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/url" - "github.com/cloudnative-pg/cloudnative-pg/pkg/resources" ) // NewCmd create the "instance status" subcommand @@ -56,7 +56,7 @@ func statusSubCommand(ctx context.Context) error { return err } - cluster, err := cacheClient.GetCluster() + cluster, err := local.NewClient().Cache().GetCluster() if err != nil { contextLogger.Error(err, "while loading the cluster from cache") return err @@ -131,6 +131,6 @@ func executeRequest(ctx context.Context, scheme string) (*http.Response, error) contextLogger.Error(err, "Error while building the request") return nil, err } - httpClient := resources.NewHTTPClient(connectionTimeout, requestTimeout) + httpClient := common.NewHTTPClient(connectionTimeout, requestTimeout) return httpClient.Do(req) // nolint:gosec } diff --git a/internal/cmd/manager/walarchive/cmd.go b/internal/cmd/manager/walarchive/cmd.go index 2442f32715..665c656c1f 100644 --- a/internal/cmd/manager/walarchive/cmd.go +++ b/internal/cmd/manager/walarchive/cmd.go @@ -25,9 +25,8 @@ import ( "github.com/cloudnative-pg/machinery/pkg/log" "github.com/spf13/cobra" - cacheClient "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache/client" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/archiver" - "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/local" ) // errSwitchoverInProgress is raised when there is a switchover in progress @@ -54,7 +53,8 @@ func NewCmd() *cobra.Command { return err } - cluster, errCluster := cacheClient.GetCluster() + localClient := local.NewClient() + cluster, errCluster := localClient.Cache().GetCluster() if errCluster != nil { return fmt.Errorf("failed to get cluster: %w", errCluster) } @@ -66,13 +66,13 @@ func NewCmd() *cobra.Command { } else { contextLog.Error(err, logErrorMessage) } - if reqErr := webserver.NewLocalClient().SetWALArchiveStatusCondition(ctx, err.Error()); reqErr != nil { + if reqErr := localClient.Cluster().SetWALArchiveStatusCondition(ctx, err.Error()); reqErr != nil { contextLog.Error(reqErr, "while invoking the set wal archive condition endpoint") } return err } - if err := webserver.NewLocalClient().SetWALArchiveStatusCondition(ctx, ""); err != nil { + if err := localClient.Cluster().SetWALArchiveStatusCondition(ctx, ""); err != nil { contextLog.Error(err, "while invoking the set wal archive condition endpoint") } return nil diff --git a/internal/cmd/manager/walrestore/cmd.go b/internal/cmd/manager/walrestore/cmd.go index db88cb2725..4accc2a023 100644 --- a/internal/cmd/manager/walrestore/cmd.go +++ b/internal/cmd/manager/walrestore/cmd.go @@ -37,7 +37,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository" "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache" - cacheClient "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache/client" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/local" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" ) @@ -113,6 +113,7 @@ func run(ctx context.Context, pgData string, podName string, args []string) erro var cluster *apiv1.Cluster var err error + cacheClient := local.NewClient().Cache() cluster, err = cacheClient.GetCluster() if err != nil { return fmt.Errorf("failed to get cluster: %w", err) diff --git a/internal/controller/backup_controller.go b/internal/controller/backup_controller.go index 424ee3966c..4506c08737 100644 --- a/internal/controller/backup_controller.go +++ b/internal/controller/backup_controller.go @@ -47,9 +47,9 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/certs" "github.com/cloudnative-pg/cloudnative-pg/pkg/conditions" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/remote" "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/backup/volumesnapshot" "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/persistentvolumeclaim" - "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/instance" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) @@ -71,7 +71,7 @@ type BackupReconciler struct { Recorder record.EventRecorder Plugins repository.Interface - instanceStatusClient instance.Client + instanceStatusClient remote.InstanceClient } // NewBackupReconciler properly initializes the BackupReconciler @@ -85,7 +85,7 @@ func NewBackupReconciler( DiscoveryClient: discoveryClient, Scheme: mgr.GetScheme(), Recorder: mgr.GetEventRecorderFor("cloudnative-pg-backup"), - instanceStatusClient: instance.NewStatusClient(), + instanceStatusClient: remote.NewClient().Instance(), Plugins: plugins, } } diff --git a/internal/controller/cluster_controller.go b/internal/controller/cluster_controller.go index 2c7dc542af..fb2705f093 100644 --- a/internal/controller/cluster_controller.go +++ b/internal/controller/cluster_controller.go @@ -50,12 +50,12 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" rolloutManager "github.com/cloudnative-pg/cloudnative-pg/internal/controller/rollout" "github.com/cloudnative-pg/cloudnative-pg/pkg/certs" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/remote" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/hibernation" instanceReconciler "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/instance" "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/persistentvolumeclaim" "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/replicaclusterswitch" - "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/instance" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) @@ -83,7 +83,7 @@ type ClusterReconciler struct { DiscoveryClient discovery.DiscoveryInterface Scheme *runtime.Scheme Recorder record.EventRecorder - InstanceClient instance.Client + InstanceClient remote.InstanceClient Plugins repository.Interface rolloutManager *rolloutManager.Manager @@ -96,7 +96,7 @@ func NewClusterReconciler( plugins repository.Interface, ) *ClusterReconciler { return &ClusterReconciler{ - InstanceClient: instance.NewStatusClient(), + InstanceClient: remote.NewClient().Instance(), DiscoveryClient: discoveryClient, Client: operatorclient.NewExtendedClient(mgr.GetClient()), Scheme: mgr.GetScheme(), diff --git a/internal/controller/cluster_upgrade.go b/internal/controller/cluster_upgrade.go index cd3e6ac43a..b29dee1929 100644 --- a/internal/controller/cluster_upgrade.go +++ b/internal/controller/cluster_upgrade.go @@ -30,9 +30,9 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/remote" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/persistentvolumeclaim" - "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/instance" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) @@ -622,7 +622,7 @@ func checkPodSpecIsOutdated(pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, e } envConfig := specs.CreatePodEnvConfig(*cluster, pod.Name) gracePeriod := int64(cluster.GetMaxStopDelay()) - tlsEnabled := instance.GetStatusSchemeFromPod(pod).IsHTTPS() + tlsEnabled := remote.GetStatusSchemeFromPod(pod).IsHTTPS() targetPodSpec := specs.CreateClusterPodSpec(pod.Name, *cluster, envConfig, gracePeriod, tlsEnabled) // the bootstrap init-container could change image after an operator upgrade. diff --git a/internal/management/cache/cache.go b/internal/management/cache/cache.go index ad1b559e8c..4bdc0519f4 100644 --- a/internal/management/cache/cache.go +++ b/internal/management/cache/cache.go @@ -14,23 +14,12 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package cache contains the constants and functions for reading/writing to the process local cache -// some specific supported objects package cache import ( "sync" ) -const ( - // ClusterKey is the key to be used to access the cached cluster - ClusterKey = "cluster" - // WALArchiveKey is the key to be used to access the cached envs for wal-archive - WALArchiveKey = "wal-archive" - // WALRestoreKey is the key to be used to access the cached envs for wal-restore - WALRestoreKey = "wal-restore" -) - var cache sync.Map // Store write an object into the local cache diff --git a/internal/management/cache/doc.go b/internal/management/cache/doc.go new file mode 100644 index 0000000000..41acc1351a --- /dev/null +++ b/internal/management/cache/doc.go @@ -0,0 +1,19 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cache contains the constants and functions for reading/writing to the process local cache +// some specific supported objects +package cache diff --git a/internal/management/cache/keys.go b/internal/management/cache/keys.go new file mode 100644 index 0000000000..2792f882c3 --- /dev/null +++ b/internal/management/cache/keys.go @@ -0,0 +1,26 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +const ( + // ClusterKey is the key to be used to access the cached cluster + ClusterKey = "cluster" + // WALArchiveKey is the key to be used to access the cached envs for wal-archive + WALArchiveKey = "wal-archive" + // WALRestoreKey is the key to be used to access the cached envs for wal-restore + WALRestoreKey = "wal-restore" +) diff --git a/internal/plugin/resources/instance.go b/internal/plugin/resources/instance.go index 03d604e67e..612fb37034 100644 --- a/internal/plugin/resources/instance.go +++ b/internal/plugin/resources/instance.go @@ -33,9 +33,9 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/remote" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/url" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" - "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/instance" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) @@ -103,7 +103,7 @@ func getInstanceStatusFromPod( CoreV1(). Pods(pod.Namespace). ProxyGet( - instance.GetStatusSchemeFromPod(&pod).ToString(), + remote.GetStatusSchemeFromPod(&pod).ToString(), pod.Name, strconv.Itoa(int(url.StatusPort)), url.PathPgStatus, diff --git a/pkg/management/postgres/archiver/archiver.go b/pkg/management/postgres/archiver/archiver.go index 251ca52f5a..ccf24efd36 100644 --- a/pkg/management/postgres/archiver/archiver.go +++ b/pkg/management/postgres/archiver/archiver.go @@ -36,8 +36,8 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository" "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache" - cacheClient "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache/client" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/constants" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/local" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) @@ -165,7 +165,7 @@ func internalRun( } // Get environment from cache - env, err := cacheClient.GetEnv(cache.WALArchiveKey) + env, err := local.NewClient().Cache().GetEnv(cache.WALArchiveKey) if err != nil { return fmt.Errorf("failed to get envs: %w", err) } diff --git a/pkg/management/postgres/webserver/backup_client.go b/pkg/management/postgres/webserver/backup_client.go deleted file mode 100644 index 549415d82c..0000000000 --- a/pkg/management/postgres/webserver/backup_client.go +++ /dev/null @@ -1,146 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package webserver - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "time" - - "github.com/cloudnative-pg/machinery/pkg/log" - corev1 "k8s.io/api/core/v1" - - "github.com/cloudnative-pg/cloudnative-pg/pkg/management/url" - "github.com/cloudnative-pg/cloudnative-pg/pkg/resources" - "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/instance" -) - -// backupClient a client to interact with the instance backup endpoints -type backupClient struct { - cli *http.Client -} - -// BackupClient is a struct capable of interacting with the instance backup endpoints -type BackupClient interface { - StatusWithErrors(ctx context.Context, pod *corev1.Pod) (*Response[BackupResultData], error) - Start(ctx context.Context, pod *corev1.Pod, sbq StartBackupRequest) error - Stop(ctx context.Context, pod *corev1.Pod, sbq StopBackupRequest) error -} - -// NewBackupClient creates a client capable of interacting with the instance backup endpoints -func NewBackupClient() BackupClient { - const connectionTimeout = 2 * time.Second - const requestTimeout = 30 * time.Second - - return &backupClient{cli: resources.NewHTTPClient(connectionTimeout, requestTimeout)} -} - -// StatusWithErrors retrieves the current status of the backup. -// Returns the response body in case there is an error in the request -func (c *backupClient) StatusWithErrors(ctx context.Context, pod *corev1.Pod) (*Response[BackupResultData], error) { - scheme := instance.GetStatusSchemeFromPod(pod) - httpURL := url.Build(scheme.ToString(), pod.Status.PodIP, url.PathPgModeBackup, url.StatusPort) - req, err := http.NewRequestWithContext(ctx, "GET", httpURL, nil) - if err != nil { - return nil, err - } - - return executeRequestWithError[BackupResultData](ctx, c.cli, req, true) -} - -// Start runs the pg_start_backup -func (c *backupClient) Start(ctx context.Context, pod *corev1.Pod, sbq StartBackupRequest) error { - scheme := instance.GetStatusSchemeFromPod(pod) - httpURL := url.Build(scheme.ToString(), pod.Status.PodIP, url.PathPgModeBackup, url.StatusPort) - - // Marshalling the payload to JSON - jsonBody, err := json.Marshal(sbq) - if err != nil { - return fmt.Errorf("failed to marshal start payload: %w", err) - } - - req, err := http.NewRequestWithContext(ctx, "POST", httpURL, bytes.NewReader(jsonBody)) - if err != nil { - return err - } - req.Header.Set("Content-Type", "application/json") - - _, err = executeRequestWithError[struct{}](ctx, c.cli, req, false) - return err -} - -// Stop runs the command pg_stop_backup -func (c *backupClient) Stop(ctx context.Context, pod *corev1.Pod, sbq StopBackupRequest) error { - scheme := instance.GetStatusSchemeFromPod(pod) - httpURL := url.Build(scheme.ToString(), pod.Status.PodIP, url.PathPgModeBackup, url.StatusPort) - // Marshalling the payload to JSON - jsonBody, err := json.Marshal(sbq) - if err != nil { - return fmt.Errorf("failed to marshal stop payload: %w", err) - } - - req, err := http.NewRequestWithContext(ctx, "PUT", httpURL, bytes.NewReader(jsonBody)) - if err != nil { - return err - } - _, err = executeRequestWithError[BackupResultData](ctx, c.cli, req, false) - return err -} - -func executeRequestWithError[T any]( - ctx context.Context, - cli *http.Client, - req *http.Request, - ignoreBodyErrors bool, -) (*Response[T], error) { - contextLogger := log.FromContext(ctx) - - resp, err := cli.Do(req) - if err != nil { - return nil, fmt.Errorf("while executing http request: %w", err) - } - - defer func() { - if err := resp.Body.Close(); err != nil { - contextLogger.Error(err, "while closing response body") - } - }() - - body, err := io.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("while reading the response body: %w", err) - } - - if resp.StatusCode == http.StatusInternalServerError { - return nil, fmt.Errorf("encountered an internal server error status code 500 with body: %s", string(body)) - } - - var result Response[T] - if err := json.Unmarshal(body, &result); err != nil { - return nil, fmt.Errorf("while unmarshalling the body, body: %s err: %w", string(body), err) - } - if result.Error != nil && !ignoreBodyErrors { - return nil, fmt.Errorf("body contained an error code: %s and message: %s", - result.Error.Code, result.Error.Message) - } - - return &result, nil -} diff --git a/pkg/resources/client.go b/pkg/management/postgres/webserver/client/common/client.go similarity index 98% rename from pkg/resources/client.go rename to pkg/management/postgres/webserver/client/common/client.go index f66cb51446..06bee98158 100644 --- a/pkg/resources/client.go +++ b/pkg/management/postgres/webserver/client/common/client.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package resources +package common import ( "context" diff --git a/pkg/resources/instance/doc.go b/pkg/management/postgres/webserver/client/common/doc.go similarity index 84% rename from pkg/resources/instance/doc.go rename to pkg/management/postgres/webserver/client/common/doc.go index 975dc071f9..35dc461c8e 100644 --- a/pkg/resources/instance/doc.go +++ b/pkg/management/postgres/webserver/client/common/doc.go @@ -14,5 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package instance contains the client capable of querying the HTTP instances endpoints -package instance +// Package common provides common utilities for the webserver client. +package common diff --git a/pkg/management/postgres/webserver/client/local/backup.go b/pkg/management/postgres/webserver/client/local/backup.go new file mode 100644 index 0000000000..8a7d4eb57f --- /dev/null +++ b/pkg/management/postgres/webserver/client/local/backup.go @@ -0,0 +1,98 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package local + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/http" + + corev1 "k8s.io/api/core/v1" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/remote" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/url" +) + +// BackupClient is the interface to interact with the backup endpoints +type BackupClient interface { + StatusWithErrors(ctx context.Context, pod *corev1.Pod) (*webserver.Response[webserver.BackupResultData], error) + Start(ctx context.Context, pod *corev1.Pod, sbq webserver.StartBackupRequest) error + Stop(ctx context.Context, pod *corev1.Pod, sbq webserver.StopBackupRequest) error +} + +// backupClientImpl a client to interact with the instance backup endpoints +type backupClientImpl struct { + cli *http.Client +} + +// StatusWithErrors retrieves the current status of the backup. +// Returns the response body in case there is an error in the request +func (c *backupClientImpl) StatusWithErrors( + ctx context.Context, + pod *corev1.Pod, +) (*webserver.Response[webserver.BackupResultData], error) { + scheme := remote.GetStatusSchemeFromPod(pod) + httpURL := url.Build(scheme.ToString(), pod.Status.PodIP, url.PathPgModeBackup, url.StatusPort) + req, err := http.NewRequestWithContext(ctx, "GET", httpURL, nil) + if err != nil { + return nil, err + } + + return executeRequestWithError[webserver.BackupResultData](ctx, c.cli, req, true) +} + +// Start runs the pg_start_backup +func (c *backupClientImpl) Start(ctx context.Context, pod *corev1.Pod, sbq webserver.StartBackupRequest) error { + scheme := remote.GetStatusSchemeFromPod(pod) + httpURL := url.Build(scheme.ToString(), pod.Status.PodIP, url.PathPgModeBackup, url.StatusPort) + + // Marshalling the payload to JSON + jsonBody, err := json.Marshal(sbq) + if err != nil { + return fmt.Errorf("failed to marshal start payload: %w", err) + } + + req, err := http.NewRequestWithContext(ctx, "POST", httpURL, bytes.NewReader(jsonBody)) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + + _, err = executeRequestWithError[struct{}](ctx, c.cli, req, false) + return err +} + +// Stop runs the command pg_stop_backup +func (c *backupClientImpl) Stop(ctx context.Context, pod *corev1.Pod, sbq webserver.StopBackupRequest) error { + scheme := remote.GetStatusSchemeFromPod(pod) + httpURL := url.Build(scheme.ToString(), pod.Status.PodIP, url.PathPgModeBackup, url.StatusPort) + // Marshalling the payload to JSON + jsonBody, err := json.Marshal(sbq) + if err != nil { + return fmt.Errorf("failed to marshal stop payload: %w", err) + } + + req, err := http.NewRequestWithContext(ctx, "PUT", httpURL, bytes.NewReader(jsonBody)) + if err != nil { + return err + } + _, err = executeRequestWithError[webserver.BackupResultData](ctx, c.cli, req, false) + return err +} diff --git a/internal/management/cache/client/client.go b/pkg/management/postgres/webserver/client/local/cache.go similarity index 75% rename from internal/management/cache/client/client.go rename to pkg/management/postgres/webserver/client/local/cache.go index 4c3486c579..a950018cbc 100644 --- a/internal/management/cache/client/client.go +++ b/pkg/management/postgres/webserver/client/local/cache.go @@ -14,9 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package client contains the constants and functions for reading supported objects from cache -// or building them in case of cache miss. -package client +package local import ( "encoding/json" @@ -32,9 +30,19 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/management/url" ) +// CacheClient is the interface to interact with the cache endpoints +type CacheClient interface { + GetCluster() (*apiv1.Cluster, error) + GetEnv(key string) ([]string, error) +} + +type cacheClientImpl struct { + cli *http.Client +} + // GetCluster gets the required cluster from cache -func GetCluster() (*apiv1.Cluster, error) { - bytes, err := httpCacheGet(cache.ClusterKey) +func (c *cacheClientImpl) GetCluster() (*apiv1.Cluster, error) { + bytes, err := c.httpCacheGet(cache.ClusterKey) if err != nil { return nil, err } @@ -49,8 +57,8 @@ func GetCluster() (*apiv1.Cluster, error) { } // GetEnv gets the environment variables from cache -func GetEnv(key string) ([]string, error) { - bytes, err := httpCacheGet(key) +func (c *cacheClientImpl) GetEnv(key string) ([]string, error) { + bytes, err := c.httpCacheGet(key) if err != nil { return nil, err } @@ -66,11 +74,11 @@ func GetEnv(key string) ([]string, error) { // httpCacheGet retrieves an object from the cache. // In case of failures it retries for a while before giving up -func httpCacheGet(urlPath string) ([]byte, error) { +func (c *cacheClientImpl) httpCacheGet(urlPath string) ([]byte, error) { var bytes []byte err := retry.OnError(retry.DefaultBackoff, func(error) bool { return true }, func() error { var err error - bytes, err = get(urlPath) + bytes, err = c.get(urlPath) return err }) if err != nil { @@ -80,8 +88,8 @@ func httpCacheGet(urlPath string) ([]byte, error) { return bytes, nil } -func get(urlPath string) ([]byte, error) { - resp, err := http.Get(url.Local(url.PathCache+urlPath, url.LocalPort)) +func (c *cacheClientImpl) get(urlPath string) ([]byte, error) { + resp, err := c.cli.Get(url.Local(url.PathCache+urlPath, url.LocalPort)) if err != nil { return nil, err } diff --git a/pkg/management/postgres/webserver/client/local/cluster.go b/pkg/management/postgres/webserver/client/local/cluster.go new file mode 100644 index 0000000000..d1229d4f55 --- /dev/null +++ b/pkg/management/postgres/webserver/client/local/cluster.go @@ -0,0 +1,71 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package local + +import ( + "bytes" + "context" + "encoding/json" + "net/http" + + "github.com/cloudnative-pg/machinery/pkg/log" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/url" +) + +// ClusterClient is the interface to interact with the uncategorized endpoints +type ClusterClient interface { + // SetWALArchiveStatusCondition sets the wal-archive status condition. + // An empty errMessage means that the archive process was successful. + // Returns any error encountered during the request. + SetWALArchiveStatusCondition(ctx context.Context, errMessage string) error +} + +// clusterClientImpl a client to interact with the uncategorized endpoints +type clusterClientImpl struct { + cli *http.Client +} + +func (c *clusterClientImpl) SetWALArchiveStatusCondition(ctx context.Context, errMessage string) error { + contextLogger := log.FromContext(ctx).WithValues("endpoint", url.PathWALArchiveStatusCondition) + + asr := webserver.ArchiveStatusRequest{ + Error: errMessage, + } + + encoded, err := json.Marshal(&asr) + if err != nil { + return err + } + + resp, err := http.Post( + url.Local(url.PathWALArchiveStatusCondition, url.LocalPort), + "application/json", + bytes.NewBuffer(encoded), + ) + if err != nil { + return err + } + defer func() { + if errClose := resp.Body.Close(); errClose != nil { + contextLogger.Error(err, "while closing response body") + } + }() + + return nil +} diff --git a/pkg/management/postgres/webserver/client/local/doc.go b/pkg/management/postgres/webserver/client/local/doc.go new file mode 100644 index 0000000000..1fdc0bca97 --- /dev/null +++ b/pkg/management/postgres/webserver/client/local/doc.go @@ -0,0 +1,18 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package local provides a client to interact with the local webserver endpoints. +package local diff --git a/pkg/management/postgres/webserver/client/local/local.go b/pkg/management/postgres/webserver/client/local/local.go new file mode 100644 index 0000000000..5b91d3e41a --- /dev/null +++ b/pkg/management/postgres/webserver/client/local/local.go @@ -0,0 +1,62 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package local + +import ( + "time" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/common" +) + +// Client is an entity capable of interacting with the local webserver endpoints +type Client interface { + Backup() BackupClient + Cache() CacheClient + Cluster() ClusterClient +} + +type localClient struct { + backup BackupClient + cache CacheClient + cluster ClusterClient +} + +// NewClient returns a new instance of Client +func NewClient() Client { + const connectionTimeout = 2 * time.Second + const requestTimeout = 30 * time.Second + + standardClient := common.NewHTTPClient(connectionTimeout, requestTimeout) + + return &localClient{ + backup: &backupClientImpl{cli: standardClient}, + cache: &cacheClientImpl{cli: standardClient}, + cluster: &clusterClientImpl{cli: standardClient}, + } +} + +func (c *localClient) Backup() BackupClient { + return c.backup +} + +func (c *localClient) Cache() CacheClient { + return c.cache +} + +func (c *localClient) Cluster() ClusterClient { + return c.cluster +} diff --git a/pkg/management/postgres/webserver/client/local/request.go b/pkg/management/postgres/webserver/client/local/request.go new file mode 100644 index 0000000000..efc3a2c7c5 --- /dev/null +++ b/pkg/management/postgres/webserver/client/local/request.go @@ -0,0 +1,69 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package local + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + + "github.com/cloudnative-pg/machinery/pkg/log" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver" +) + +func executeRequestWithError[T any]( + ctx context.Context, + cli *http.Client, + req *http.Request, + ignoreBodyErrors bool, +) (*webserver.Response[T], error) { + contextLogger := log.FromContext(ctx) + + resp, err := cli.Do(req) + if err != nil { + return nil, fmt.Errorf("while executing http request: %w", err) + } + + defer func() { + if err := resp.Body.Close(); err != nil { + contextLogger.Error(err, "while closing response body") + } + }() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("while reading the response body: %w", err) + } + + if resp.StatusCode == http.StatusInternalServerError { + return nil, fmt.Errorf("encountered an internal server error status code 500 with body: %s", string(body)) + } + + var result webserver.Response[T] + if err := json.Unmarshal(body, &result); err != nil { + return nil, fmt.Errorf("while unmarshalling the body, body: %s err: %w", string(body), err) + } + if result.Error != nil && !ignoreBodyErrors { + return nil, fmt.Errorf("body contained an error code: %s and message: %s", + result.Error.Code, result.Error.Message) + } + + return &result, nil +} diff --git a/pkg/management/postgres/webserver/client/remote/doc.go b/pkg/management/postgres/webserver/client/remote/doc.go new file mode 100644 index 0000000000..f5a83f43e9 --- /dev/null +++ b/pkg/management/postgres/webserver/client/remote/doc.go @@ -0,0 +1,18 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package remote contains the client capable of querying the webserver remote endpoint. +package remote diff --git a/pkg/resources/instance/client.go b/pkg/management/postgres/webserver/client/remote/instance.go similarity index 91% rename from pkg/resources/instance/client.go rename to pkg/management/postgres/webserver/client/remote/instance.go index 1c80752e69..b83111f850 100644 --- a/pkg/resources/instance/client.go +++ b/pkg/management/postgres/webserver/client/remote/instance.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package instance +package remote import ( "context" @@ -34,9 +34,9 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/retry" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/common" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/url" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" - "github.com/cloudnative-pg/cloudnative-pg/pkg/resources" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) @@ -55,8 +55,8 @@ var requestRetry = wait.Backoff{ Jitter: 0.1, } -// Client a http client capable of querying the instance HTTP endpoints -type Client interface { +// InstanceClient a http client capable of querying the instance HTTP endpoints +type InstanceClient interface { // GetStatusFromInstances gets the replication status from the PostgreSQL instances, // the returned list is sorted in order to have the primary as the first element // and the other instances in their election order @@ -83,7 +83,7 @@ type Client interface { ArchivePartialWAL(context.Context, *corev1.Pod) (string, error) } -type statusClient struct { +type instanceClientImpl struct { *http.Client } @@ -97,18 +97,18 @@ func (i StatusError) Error() string { return fmt.Sprintf("error status code: %v, body: %v", i.StatusCode, i.Body) } -// NewStatusClient returns a client capable of querying the instance HTTP endpoints -func NewStatusClient() Client { +// newInstanceClient returns a client capable of querying the instance HTTP endpoints +func newInstanceClient() InstanceClient { const connectionTimeout = 2 * time.Second const requestTimeout = 10 * time.Second - return &statusClient{Client: resources.NewHTTPClient(connectionTimeout, requestTimeout)} + return &instanceClientImpl{Client: common.NewHTTPClient(connectionTimeout, requestTimeout)} } // extractInstancesStatus extracts the status of the underlying PostgreSQL instance from // the requested Pod, via the instance manager. In case of failure, errors are passed // in the result list -func (r statusClient) extractInstancesStatus( +func (r instanceClientImpl) extractInstancesStatus( ctx context.Context, activePods []corev1.Pod, ) postgres.PostgresqlStatusList { @@ -123,7 +123,7 @@ func (r statusClient) extractInstancesStatus( // getReplicaStatusFromPodViaHTTP retrieves the status of PostgreSQL pod via HTTP, retrying // the request if some communication error is encountered -func (r *statusClient) getReplicaStatusFromPodViaHTTP( +func (r *instanceClientImpl) getReplicaStatusFromPodViaHTTP( ctx context.Context, pod corev1.Pod, ) (result postgres.PostgresqlStatus) { @@ -161,7 +161,7 @@ func (r *statusClient) getReplicaStatusFromPodViaHTTP( return result } -func (r *statusClient) GetStatusFromInstances( +func (r *instanceClientImpl) GetStatusFromInstances( ctx context.Context, pods corev1.PodList, ) postgres.PostgresqlStatusList { @@ -184,7 +184,7 @@ func (r *statusClient) GetStatusFromInstances( return status } -func (r *statusClient) GetPgControlDataFromInstance( +func (r *instanceClientImpl) GetPgControlDataFromInstance( ctx context.Context, pod *corev1.Pod, ) (string, error) { @@ -231,7 +231,7 @@ func (r *statusClient) GetPgControlDataFromInstance( } // UpgradeInstanceManager upgrades the instance manager to the passed availableArchitecture -func (r *statusClient) UpgradeInstanceManager( +func (r *instanceClientImpl) UpgradeInstanceManager( ctx context.Context, pod *corev1.Pod, availableArchitecture *utils.AvailableArchitecture, @@ -293,7 +293,7 @@ func isEOF(err error) bool { } // rawInstanceStatusRequest retrieves the status of PostgreSQL pods via an HTTP request with GET method. -func (r *statusClient) rawInstanceStatusRequest( +func (r *instanceClientImpl) rawInstanceStatusRequest( ctx context.Context, pod corev1.Pod, ) (result postgres.PostgresqlStatus) { @@ -376,7 +376,7 @@ func GetStatusSchemeFromPod(pod *corev1.Pod) HTTPScheme { return schemeHTTP } -func (r *statusClient) ArchivePartialWAL(ctx context.Context, pod *corev1.Pod) (string, error) { +func (r *instanceClientImpl) ArchivePartialWAL(ctx context.Context, pod *corev1.Pod) (string, error) { contextLogger := log.FromContext(ctx) statusURL := url.Build( diff --git a/pkg/management/postgres/webserver/client/remote/remote.go b/pkg/management/postgres/webserver/client/remote/remote.go new file mode 100644 index 0000000000..2b6a375e0e --- /dev/null +++ b/pkg/management/postgres/webserver/client/remote/remote.go @@ -0,0 +1,37 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remote + +// Client is the interface to interact with the remote webserver +type Client interface { + Instance() InstanceClient +} + +type remoteClientImpl struct { + instance InstanceClient +} + +func (r *remoteClientImpl) Instance() InstanceClient { + return r.instance +} + +// NewClient creates a new remote client +func NewClient() Client { + return &remoteClientImpl{ + instance: newInstanceClient(), + } +} diff --git a/pkg/management/postgres/webserver/local_client.go b/pkg/management/postgres/webserver/local_client.go deleted file mode 100644 index 4f2ec068f5..0000000000 --- a/pkg/management/postgres/webserver/local_client.go +++ /dev/null @@ -1,63 +0,0 @@ -package webserver - -import ( - "bytes" - "context" - "encoding/json" - "net/http" - "time" - - "github.com/cloudnative-pg/machinery/pkg/log" - - "github.com/cloudnative-pg/cloudnative-pg/pkg/management/url" - "github.com/cloudnative-pg/cloudnative-pg/pkg/resources" -) - -// LocalClient is an entity capable of interacting with the local webserver endpoints -type LocalClient interface { - // SetWALArchiveStatusCondition sets the wal-archive status condition. - // An empty errMessage means that the archive process was successful. - // Returns any error encountered during the request. - SetWALArchiveStatusCondition(ctx context.Context, errMessage string) error -} - -type localClient struct { - cli *http.Client -} - -// NewLocalClient returns a new instance of LocalClient -func NewLocalClient() LocalClient { - const connectionTimeout = 2 * time.Second - const requestTimeout = 30 * time.Second - - return &localClient{cli: resources.NewHTTPClient(connectionTimeout, requestTimeout)} -} - -func (c *localClient) SetWALArchiveStatusCondition(ctx context.Context, errMessage string) error { - contextLogger := log.FromContext(ctx).WithValues("endpoint", url.PathWALArchiveStatusCondition) - - asr := ArchiveStatusRequest{ - Error: errMessage, - } - - encoded, err := json.Marshal(&asr) - if err != nil { - return err - } - - resp, err := http.Post( - url.Local(url.PathWALArchiveStatusCondition, url.LocalPort), - "application/json", - bytes.NewBuffer(encoded), - ) - if err != nil { - return err - } - defer func() { - if errClose := resp.Body.Close(); errClose != nil { - contextLogger.Error(err, "while closing response body") - } - }() - - return nil -} diff --git a/pkg/management/postgres/webserver/metricserver/pg_collector.go b/pkg/management/postgres/webserver/metricserver/pg_collector.go index 64f469f2bb..1ae00b82d1 100644 --- a/pkg/management/postgres/webserver/metricserver/pg_collector.go +++ b/pkg/management/postgres/webserver/metricserver/pg_collector.go @@ -29,9 +29,9 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache" - cacheClient "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache/client" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" m "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/metrics" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/local" postgresconf "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" ) @@ -92,7 +92,7 @@ func NewExporter(instance *postgres.Instance) *Exporter { return &Exporter{ instance: instance, Metrics: newMetrics(), - getCluster: cacheClient.GetCluster, + getCluster: local.NewClient().Cache().GetCluster, } } diff --git a/pkg/management/postgres/webserver/metricserver/wal.go b/pkg/management/postgres/webserver/metricserver/wal.go index 50280a21cc..47a502a6d8 100644 --- a/pkg/management/postgres/webserver/metricserver/wal.go +++ b/pkg/management/postgres/webserver/metricserver/wal.go @@ -24,8 +24,8 @@ import ( "github.com/cloudnative-pg/machinery/pkg/log" - cacheClient "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache/client" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/local" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) @@ -220,7 +220,7 @@ func collectPGWalSettings(exporter *Exporter, db *sql.DB) error { } func getWalVolumeSize() float64 { - cluster, err := cacheClient.GetCluster() + cluster, err := local.NewClient().Cache().GetCluster() if err != nil || !cluster.ShouldCreateWalArchiveVolume() { return 0 } diff --git a/pkg/reconciler/backup/volumesnapshot/online.go b/pkg/reconciler/backup/volumesnapshot/online.go index e1d74097a1..62de9f4bb3 100644 --- a/pkg/reconciler/backup/volumesnapshot/online.go +++ b/pkg/reconciler/backup/volumesnapshot/online.go @@ -26,14 +26,15 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/local" ) type onlineExecutor struct { - backupClient webserver.BackupClient + backupClient local.BackupClient } func newOnlineExecutor() *onlineExecutor { - return &onlineExecutor{backupClient: webserver.NewBackupClient()} + return &onlineExecutor{backupClient: local.NewClient().Backup()} } func (o *onlineExecutor) finalize( diff --git a/pkg/reconciler/backup/volumesnapshot/reconciler.go b/pkg/reconciler/backup/volumesnapshot/reconciler.go index 4d2d633ac4..730fa8f241 100644 --- a/pkg/reconciler/backup/volumesnapshot/reconciler.go +++ b/pkg/reconciler/backup/volumesnapshot/reconciler.go @@ -35,8 +35,8 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/remote" "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/persistentvolumeclaim" - "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/instance" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) @@ -44,7 +44,7 @@ import ( type Reconciler struct { cli client.Client recorder record.EventRecorder - instanceStatusClient instance.Client + instanceStatusClient remote.InstanceClient } // ExecutorBuilder is a struct capable of creating a Reconciler @@ -61,7 +61,7 @@ func NewReconcilerBuilder( executor: Reconciler{ cli: cli, recorder: recorder, - instanceStatusClient: instance.NewStatusClient(), + instanceStatusClient: remote.NewClient().Instance(), }, } } diff --git a/pkg/reconciler/replicaclusterswitch/reconciler.go b/pkg/reconciler/replicaclusterswitch/reconciler.go index afb00515fb..15342e9adc 100644 --- a/pkg/reconciler/replicaclusterswitch/reconciler.go +++ b/pkg/reconciler/replicaclusterswitch/reconciler.go @@ -29,8 +29,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/remote" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" - "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/instance" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) @@ -39,7 +39,7 @@ func Reconcile( ctx context.Context, cli client.Client, cluster *apiv1.Cluster, - instanceClient instance.Client, + instanceClient remote.InstanceClient, instances postgres.PostgresqlStatusList, ) (*ctrl.Result, error) { if !cluster.IsReplica() { @@ -150,7 +150,7 @@ func reconcileDemotionToken( ctx context.Context, cli client.Client, cluster *apiv1.Cluster, - instanceClient instance.Client, + instanceClient remote.InstanceClient, instances postgres.PostgresqlStatusList, ) (*ctrl.Result, error) { contextLogger := log.FromContext(ctx).WithName("replica_cluster") diff --git a/pkg/reconciler/replicaclusterswitch/shutdown_wal.go b/pkg/reconciler/replicaclusterswitch/shutdown_wal.go index 0af100575c..9b7413bf30 100644 --- a/pkg/reconciler/replicaclusterswitch/shutdown_wal.go +++ b/pkg/reconciler/replicaclusterswitch/shutdown_wal.go @@ -23,8 +23,8 @@ import ( "github.com/cloudnative-pg/machinery/pkg/log" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/remote" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" - "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/instance" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) @@ -38,7 +38,7 @@ var errPostgresNotShutDown = fmt.Errorf("expected postmaster to be shut down") func generateDemotionToken( ctx context.Context, cluster *apiv1.Cluster, - instanceClient instance.Client, + instanceClient remote.InstanceClient, instancesStatus postgres.PostgresqlStatusList, ) (string, error) { contextLogger := log.FromContext(ctx).WithName("shutdown_checkpoint") From caacdbab0f75530599f3bc1efd6fcf8aa597cfa0 Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Sat, 7 Dec 2024 10:36:23 +0100 Subject: [PATCH 225/836] docs: release notes for 1.25.0-rc1 (#6275) Closes #6228 Signed-off-by: Gabriele Bartolini Co-authored-by: Marco Nenciarini --- docs/src/preview_version.md | 2 - docs/src/release_notes.md | 1 + docs/src/release_notes/v1.25.md | 70 +++++++++++++++++++++++++++++++++ docs/src/supported_releases.md | 12 +++--- 4 files changed, 77 insertions(+), 8 deletions(-) create mode 100644 docs/src/release_notes/v1.25.md diff --git a/docs/src/preview_version.md b/docs/src/preview_version.md index 1331deff22..8f354f67ae 100644 --- a/docs/src/preview_version.md +++ b/docs/src/preview_version.md @@ -35,12 +35,10 @@ are not backwards compatible and could be removed entirely. There are currently no preview versions available. - diff --git a/docs/src/release_notes.md b/docs/src/release_notes.md index f10bd1cd6d..fe2a723507 100644 --- a/docs/src/release_notes.md +++ b/docs/src/release_notes.md @@ -2,6 +2,7 @@ History of user-visible changes for CloudNativePG, classified for each minor release. +- [CloudNativePG 1.25 - Release Candidate](release_notes/v1.25.md) - [CloudNativePG 1.24](release_notes/v1.24.md) - [CloudNativePG 1.23](release_notes/v1.23.md) diff --git a/docs/src/release_notes/v1.25.md b/docs/src/release_notes/v1.25.md new file mode 100644 index 0000000000..4996532171 --- /dev/null +++ b/docs/src/release_notes/v1.25.md @@ -0,0 +1,70 @@ +# Release notes for CloudNativePG 1.25 + +History of user-visible changes in the 1.25 minor release of CloudNativePG. + +For a complete list of changes, please refer to the +[commits](https://github.com/cloudnative-pg/cloudnative-pg/commits/release-1.25) +on the release branch in GitHub. + +## Version 1.25.0-rc1 + +**Release Date:** December 9, 2024 + +### Features + +- **Declarative Database Management**: Introduce the `Database` Custom Resource + Definition (CRD), enabling users to create and manage PostgreSQL databases + declaratively within a cluster. (#5325) + +- **Logical Replication Management**: Add `Publication` and `Subscription` CRDs + for declarative management of PostgreSQL logical replication. These simplify + replication setup and facilitate online migrations to CloudNativePG. (#5329) + +### Enhancements + +- Add the `dataDurability` option to the `.spec.postgresql.synchronous` stanza, + allowing users to choose between `required` (default) or `preferred` + durability in synchronous replication. (#5878) +- Enable customization of startup, liveness, and readiness probes through the + `.spec.probes` stanza. (#6266) +- Support additional `pg_dump` and `pg_restore` options to enhance database + import flexibility. (#6214) +- Add support for `maxConcurrentReconciles` in the CloudNativePG controller and + set the default to 10, improving the operator's ability to efficiently manage + larger deployments out of the box. (#5678) +- Add the `cnpg.io/userType` label to secrets generated for predefined users, + specifically `superuser` and `app`. (#4392) +- `cnpg` plugin: + - Enhance the `backup` command to support plugins. (#6045) + - Honor the `User-Agent` header in HTTP requests with the API server. (#6153) + +### Bug Fixes + +- Ensure the former primary flushes its WAL file queue to the archive before + re-synchronizing as a replica, reducing recovery times and enhancing data + consistency during failovers. (#6141) +- Clean the WAL volume along with the `PGDATA` volume during bootstrap. (#6265) +- Update the operator to set the cluster phase to `Unrecoverable` when + all previously generated `PersistentVolumeClaims` are missing. (#6170) +- Fix the parsing of the `synchronous_standby_names` GUC when + `.spec.postgresql.synchronous.method` is set to `first`. (#5955) +- Correct role changes to apply at the transaction level instead of the + database context. (#6064) +- Remove the `primary_slot_name` definition from the `override.conf` file on + the primary to ensure it is always empty. (#6219) +- Configure libpq environment variables, including `PGHOST`, in PgBouncer pods + to enable seamless access to the `pgbouncer` virtual database using `psql` + from within the container. (#6247) +- Remove unnecessary updates to the Cluster status when verifying changes in + the image catalog. (#6277) +- `cnpg` plugin: + - Ensure the `kubectl` context is properly passed in the `psql` command. (#6257) + - Avoid displaying physical backups block when empty with `status` command. (#5998) + +### Supported Versions + +- **Kubernetes**: 1.31, 1.30, and 1.29 +- **PostgreSQL**: 17, 16, 15, 14, and 13 + - Default image: PostgreSQL 17.2 + - Officially dropped support for PostgreSQL 12 + - PostgreSQL 13 support ends on November 12, 2025 diff --git a/docs/src/supported_releases.md b/docs/src/supported_releases.md index 9717ee5cc0..afdffb9ebd 100644 --- a/docs/src/supported_releases.md +++ b/docs/src/supported_releases.md @@ -80,11 +80,11 @@ Git tags for versions are prefixed with `v`. ## Support status of CloudNativePG releases -| Version | Currently supported | Release date | End of life | Supported Kubernetes versions | Tested, but not supported | Supported Postgres versions | -|-----------------|----------------------|---------------------|---------------------|-------------------------------|---------------------------|-----------------------------| -| 1.25.x | Yes | December XX, 2024 | ~ February, 2025 | 1.29, 1.30, 1.31, 1.32 (??) | 1.27, 1.28 | 13 - 17 | -| 1.24.x | Yes | August 22, 2024 | February XX, 2025 | 1.28, 1.29, 1.30, 1.31 | 1.27 | 13 - 17 | -| main | No, development only | | | | | 13 - 17 | +| Version | Currently supported | Release date | End of life | Supported Kubernetes versions | Tested, but not supported | Supported Postgres versions | +|-----------------|----------------------|----------------|---------------------|-------------------------------|---------------------------|-----------------------------| +| 1.25.x | No (RC) | Dec XX, 2024 | ~ May/Jun, 2025 | 1.29, 1.30, 1.31, 1.32 (!) | 1.27, 1.28 | 13 - 17 | +| 1.24.x | Yes | Aug 22, 2024 | Feb XX, 2025 | 1.28, 1.29, 1.30, 1.31 | 1.27 | 13 - 17 | +| main | No, development only | | | | | 13 - 17 | @@ -121,7 +121,7 @@ version of PostgreSQL, we might not be able to help you. | Version | Release date | End of life | |-----------------|-----------------------|---------------------------| -| 1.25.0 | Nov/Dec, 2024 | May/Jun, 2025 | +| 1.25.0 | Dec, 2024 | May/Jun, 2025 | | 1.26.0 | Mar, 2025 | Aug/Sep, 2025 | | 1.27.0 | Jun, 2025 | Dec, 2025 | From 480f80593e922d01be27be25e71db8992c57bc9a Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Sat, 7 Dec 2024 13:49:33 +0100 Subject: [PATCH 226/836] docs(security): mention new CRDs (#6296) Mention `Database`, `Publication`, and `Subscription` CRDs in the security page. Closes #6241 Signed-off-by: Gabriele Bartolini --- docs/src/security.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/src/security.md b/docs/src/security.md index 6eab222826..ec14f35d46 100644 --- a/docs/src/security.md +++ b/docs/src/security.md @@ -113,8 +113,8 @@ more about these roles, you can use the `kubectl describe clusterrole` or The above permissions are exclusively reserved for the operator's service account to interact with the Kubernetes API server. They are not directly accessible by the users of the operator that interact only with `Cluster`, - `Pooler`, `Backup`, `ScheduledBackup`, `ImageCatalog` and - `ClusterImageCatalog` resources. + `Pooler`, `Backup`, `ScheduledBackup`, `Database`, `Publication`, + `Subscription`, `ImageCatalog` and `ClusterImageCatalog` resources. Below we provide some examples and, most importantly, the reasons why CloudNativePG requires full or partial management of standard Kubernetes From 83222ae319d4a4e3e79568831a0358d0f2e4696a Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 9 Dec 2024 11:08:23 +0100 Subject: [PATCH 227/836] Version tag to 1.25.0-rc1 (#6299) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Niccolò Fei Co-authored-by: Niccolò Fei --- docs/src/installation_upgrade.md | 6 +- docs/src/kubectl-plugin.md | 30 +- pkg/versions/versions.go | 6 +- releases/cnpg-1.25.0-rc1.yaml | 17645 +++++++++++++++++++++++++++++ 4 files changed, 17666 insertions(+), 21 deletions(-) create mode 100644 releases/cnpg-1.25.0-rc1.yaml diff --git a/docs/src/installation_upgrade.md b/docs/src/installation_upgrade.md index c948a61148..182ae94d44 100644 --- a/docs/src/installation_upgrade.md +++ b/docs/src/installation_upgrade.md @@ -7,12 +7,12 @@ The operator can be installed like any other resource in Kubernetes, through a YAML manifest applied via `kubectl`. -You can install the [latest operator manifest](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.24/releases/cnpg-1.24.1.yaml) +You can install the [latest operator manifest](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-1.25.0-rc1.yaml) for this minor release as follows: ```sh kubectl apply --server-side -f \ - https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.24/releases/cnpg-1.24.1.yaml + https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-1.25.0-rc1.yaml ``` You can verify that with: @@ -72,7 +72,7 @@ specific minor release, you can just run: ```sh curl -sSfL \ - https://raw.githubusercontent.com/cloudnative-pg/artifacts/release-1.24/manifests/operator-manifest.yaml | \ + https://raw.githubusercontent.com/cloudnative-pg/artifacts/release-1.25/manifests/operator-manifest.yaml | \ kubectl apply --server-side -f - ``` diff --git a/docs/src/kubectl-plugin.md b/docs/src/kubectl-plugin.md index 2b430dd9e0..45801e223c 100644 --- a/docs/src/kubectl-plugin.md +++ b/docs/src/kubectl-plugin.md @@ -30,11 +30,11 @@ them in your systems. #### Debian packages -For example, let's install the 1.24.1 release of the plugin, for an Intel based +For example, let's install the 1.25.0-rc1 release of the plugin, for an Intel based 64 bit server. First, we download the right `.deb` file. ```sh -wget https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.24.1/kubectl-cnpg_1.24.1_linux_x86_64.deb \ +wget https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.25.0-rc1/kubectl-cnpg_1.25.0-rc1_linux_x86_64.deb \ --output-document kube-plugin.deb ``` @@ -45,17 +45,17 @@ $ sudo dpkg -i kube-plugin.deb Selecting previously unselected package cnpg. (Reading database ... 6688 files and directories currently installed.) Preparing to unpack kube-plugin.deb ... -Unpacking cnpg (1.24.1) ... -Setting up cnpg (1.24.1) ... +Unpacking cnpg (1.25.0-rc1) ... +Setting up cnpg (1.25.0-rc1) ... ``` #### RPM packages -As in the example for `.rpm` packages, let's install the 1.24.1 release for an +As in the example for `.rpm` packages, let's install the 1.25.0-rc1 release for an Intel 64 bit machine. Note the `--output` flag to provide a file name. ```sh -curl -L https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.24.1/kubectl-cnpg_1.24.1_linux_x86_64.rpm \ +curl -L https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.25.0-rc1/kubectl-cnpg_1.25.0-rc1_linux_x86_64.rpm \ --output kube-plugin.rpm ``` @@ -69,7 +69,7 @@ Dependencies resolved. Package Architecture Version Repository Size ==================================================================================================== Installing: - cnpg x86_64 1.24.1-1 @commandline 20 M + cnpg x86_64 1.25.0-rc1-1 @commandline 20 M Transaction Summary ==================================================================================================== @@ -277,9 +277,9 @@ sandbox-3 0/604DE38 0/604DE38 0/604DE38 0/604DE38 00:00:00 00:00:00 00 Instances status Name Current LSN Replication role Status QoS Manager Version Node ---- ----------- ---------------- ------ --- --------------- ---- -sandbox-1 0/604DE38 Primary OK BestEffort 1.24.1 k8s-eu-worker -sandbox-2 0/604DE38 Standby (async) OK BestEffort 1.24.1 k8s-eu-worker2 -sandbox-3 0/604DE38 Standby (async) OK BestEffort 1.24.1 k8s-eu-worker +sandbox-1 0/604DE38 Primary OK BestEffort 1.25.0-rc1 k8s-eu-worker +sandbox-2 0/604DE38 Standby (async) OK BestEffort 1.25.0-rc1 k8s-eu-worker2 +sandbox-3 0/604DE38 Standby (async) OK BestEffort 1.25.0-rc1 k8s-eu-worker ``` If you require more detailed status information, use the `--verbose` option (or @@ -333,9 +333,9 @@ sandbox-primary primary 1 1 1 Instances status Name Current LSN Replication role Status QoS Manager Version Node ---- ----------- ---------------- ------ --- --------------- ---- -sandbox-1 0/6053720 Primary OK BestEffort 1.24.1 k8s-eu-worker -sandbox-2 0/6053720 Standby (async) OK BestEffort 1.24.1 k8s-eu-worker2 -sandbox-3 0/6053720 Standby (async) OK BestEffort 1.24.1 k8s-eu-worker +sandbox-1 0/6053720 Primary OK BestEffort 1.25.0-rc1 k8s-eu-worker +sandbox-2 0/6053720 Standby (async) OK BestEffort 1.25.0-rc1 k8s-eu-worker2 +sandbox-3 0/6053720 Standby (async) OK BestEffort 1.25.0-rc1 k8s-eu-worker ``` With an additional `-v` (e.g. `kubectl cnpg status sandbox -v -v`), you can @@ -558,12 +558,12 @@ Archive: report_operator_.zip ```output ====== Begin of Previous Log ===== -2023-03-28T12:56:41.251711811Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.24.1","build":{"Version":"1.24.1+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} +2023-03-28T12:56:41.251711811Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.25.0-rc1","build":{"Version":"1.25.0-rc1+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} 2023-03-28T12:56:41.251851909Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"} ====== End of Previous Log ===== -2023-03-28T12:57:09.854306024Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.24.1","build":{"Version":"1.24.1+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} +2023-03-28T12:57:09.854306024Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.25.0-rc1","build":{"Version":"1.25.0-rc1+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} 2023-03-28T12:57:09.854363943Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"} ``` diff --git a/pkg/versions/versions.go b/pkg/versions/versions.go index 6189bdad1f..91f8dcc30e 100644 --- a/pkg/versions/versions.go +++ b/pkg/versions/versions.go @@ -20,13 +20,13 @@ package versions const ( // Version is the version of the operator - Version = "1.24.1" + Version = "1.25.0-rc1" // DefaultImageName is the default image used by the operator to create pods DefaultImageName = "ghcr.io/cloudnative-pg/postgresql:17.2" // DefaultOperatorImageName is the default operator image used by the controller in the pods running PostgreSQL - DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.24.1" + DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.25.0-rc1" ) // BuildInfo is a struct containing all the info about the build @@ -36,7 +36,7 @@ type BuildInfo struct { var ( // buildVersion injected during the build - buildVersion = "1.24.1" + buildVersion = "1.25.0-rc1" // buildCommit injected during the build buildCommit = "none" diff --git a/releases/cnpg-1.25.0-rc1.yaml b/releases/cnpg-1.25.0-rc1.yaml new file mode 100644 index 0000000000..d69fecaf08 --- /dev/null +++ b/releases/cnpg-1.25.0-rc1.yaml @@ -0,0 +1,17645 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-system +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: backups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Backup + listKind: BackupList + plural: backups + singular: backup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.method + name: Method + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .status.error + name: Error + type: string + name: v1 + schema: + openAPIV3Schema: + description: Backup is the Schema for the backups API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the backup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + type: object + status: + description: |- + Most recently observed status of the backup. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + azureCredentials: + description: The credentials to use to upload data to Azure Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without providing + explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + backupId: + description: The ID of the Barman backup + type: string + backupLabelFile: + description: Backup label file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + backupName: + description: The Name of the Barman backup + type: string + beginLSN: + description: The starting xlog + type: string + beginWal: + description: The starting WAL + type: string + commandError: + description: The backup command output in case of error + type: string + commandOutput: + description: Unused. Retained for compatibility with old versions. + type: string + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data. This may not be populated in case of errors. + type: string + encryption: + description: Encryption method required to S3 API + type: string + endLSN: + description: The ending xlog + type: string + endWal: + description: The ending WAL + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + error: + description: The detected error + type: string + googleCredentials: + description: The credentials to use to upload data to Google Cloud + Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage JSON + file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + instanceID: + description: Information to identify the instance where the backup + has been taken from + properties: + ContainerID: + description: The container ID + type: string + podName: + description: The pod name + type: string + type: object + method: + description: The backup method being used + type: string + online: + description: Whether the backup was online/hot (`true`) or offline/cold + (`false`) + type: boolean + phase: + description: The last backup status + type: string + pluginMetadata: + additionalProperties: + type: string + description: A map containing the plugin metadata + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without providing + explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the region + name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + snapshotBackupStatus: + description: Status of the volumeSnapshot backup + properties: + elements: + description: The elements list, populated with the gathered volume + snapshots + items: + description: BackupSnapshotElementStatus is a volume snapshot + that is part of a volume snapshot method backup + properties: + name: + description: Name is the snapshot resource name + type: string + tablespaceName: + description: |- + TablespaceName is the name of the snapshotted tablespace. Only set + when type is PG_TABLESPACE + type: string + type: + description: Type is tho role of the snapshot in the cluster, + such as PG_DATA, PG_WAL and PG_TABLESPACE + type: string + required: + - name + - type + type: object + type: array + type: object + startedAt: + description: When the backup was started + format: date-time + type: string + stoppedAt: + description: When the backup was terminated + format: date-time + type: string + tablespaceMapFile: + description: Tablespace map file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: clusterimagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ClusterImageCatalog + listKind: ClusterImageCatalogList + plural: clusterimagecatalogs + singular: clusterimagecatalog + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ClusterImageCatalog is the Schema for the clusterimagecatalogs + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ClusterImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: clusters.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Cluster + listKind: ClusterList + plural: clusters + singular: cluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Number of instances + jsonPath: .status.instances + name: Instances + type: integer + - description: Number of ready instances + jsonPath: .status.readyInstances + name: Ready + type: integer + - description: Cluster current status + jsonPath: .status.phase + name: Status + type: string + - description: Primary pod + jsonPath: .status.currentPrimary + name: Primary + type: string + name: v1 + schema: + openAPIV3Schema: + description: Cluster is the Schema for the PostgreSQL API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the cluster. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + affinity: + description: Affinity/Anti-affinity rules for Pods + properties: + additionalPodAffinity: + description: AdditionalPodAffinity allows to specify pod affinity + terms to be passed to all the cluster's pods. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + additionalPodAntiAffinity: + description: |- + AdditionalPodAntiAffinity allows to specify pod anti-affinity terms to be added to the ones generated + by the operator if EnablePodAntiAffinity is set to true (default) or to be used exclusively if set to false. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + enablePodAntiAffinity: + description: |- + Activates anti-affinity for the pods. The operator will define pods + anti-affinity unless this field is explicitly set to false + type: boolean + nodeAffinity: + description: |- + NodeAffinity describes node affinity scheduling rules for the pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is map of key-value pairs used to define the nodes on which + the pods can run. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + podAntiAffinityType: + description: |- + PodAntiAffinityType allows the user to decide whether pod anti-affinity between cluster instance has to be + considered a strong requirement during scheduling or not. Allowed values are: "preferred" (default if empty) or + "required". Setting it to "required", could lead to instances remaining pending until new kubernetes nodes are + added if all the existing nodes don't match the required pod anti-affinity rule. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + type: string + tolerations: + description: |- + Tolerations is a list of Tolerations that should be set for all the pods, in order to allow them to run + on tainted nodes. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologyKey: + description: |- + TopologyKey to use for anti-affinity configuration. See k8s documentation + for more info on that + type: string + type: object + backup: + description: The configuration to be used for backups + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2` or `snappy`. + enum: + - gzip + - bzip2 + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage + JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the + region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2` or `snappy`. + enum: + - gzip + - bzip2 + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + retentionPolicy: + description: |- + RetentionPolicy is the retention policy to be used for backups + and WALs (i.e. '60d'). The retention policy is expressed in the form + of `XXu` where `XX` is a positive integer and `u` is in `[dwm]` - + days, weeks, months. + It's currently only applicable when using the BarmanObjectStore method. + pattern: ^[1-9][0-9]*[dwm]$ + type: string + target: + default: prefer-standby + description: |- + The policy to decide which instance should perform backups. Available + options are empty string, which will default to `prefer-standby` policy, + `primary` to have backups run always on primary instances, `prefer-standby` + to have backups run preferably on the most updated standby, if available. + enum: + - primary + - prefer-standby + type: string + volumeSnapshot: + description: VolumeSnapshot provides the configuration for the + execution of volume snapshot backups. + properties: + annotations: + additionalProperties: + type: string + description: Annotations key-value pairs that will be added + to .metadata.annotations snapshot resources. + type: object + className: + description: |- + ClassName specifies the Snapshot Class to be used for PG_DATA PersistentVolumeClaim. + It is the default class for the other types if no specific class is present + type: string + labels: + additionalProperties: + type: string + description: Labels are key-value pairs that will be added + to .metadata.labels snapshot resources. + type: object + online: + default: true + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + type: boolean + onlineConfiguration: + default: + immediateCheckpoint: false + waitForArchive: true + description: Configuration parameters to control the online/hot + backup with volume snapshots + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + snapshotOwnerReference: + default: none + description: SnapshotOwnerReference indicates the type of + owner reference the snapshot should have + enum: + - none + - cluster + - backup + type: string + tablespaceClassName: + additionalProperties: + type: string + description: |- + TablespaceClassName specifies the Snapshot Class to be used for the tablespaces. + defaults to the PGDATA Snapshot Class, if set + type: object + walClassName: + description: WalClassName specifies the Snapshot Class to + be used for the PG_WAL PersistentVolumeClaim. + type: string + type: object + type: object + bootstrap: + description: Instructions to bootstrap this cluster + properties: + initdb: + description: Bootstrap the cluster via initdb + properties: + builtinLocale: + description: |- + Specifies the locale name when the builtin provider is used. + This option requires `localeProvider` to be set to `builtin`. + Available from PostgreSQL 17. + type: string + dataChecksums: + description: |- + Whether the `-k` option should be passed to initdb, + enabling checksums on data pages (default: `false`) + type: boolean + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + encoding: + description: The value to be passed as option `--encoding` + for initdb (default:`UTF8`) + type: string + icuLocale: + description: |- + Specifies the ICU locale when the ICU provider is used. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 15. + type: string + icuRules: + description: |- + Specifies additional collation rules to customize the behavior of the default collation. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 16. + type: string + import: + description: |- + Bootstraps the new cluster by importing data from an existing PostgreSQL + instance using logical backup (`pg_dump` and `pg_restore`) + properties: + databases: + description: The databases to import + items: + type: string + type: array + pgDumpExtraOptions: + description: |- + List of custom options to pass to the `pg_dump` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + pgRestoreExtraOptions: + description: |- + List of custom options to pass to the `pg_restore` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + postImportApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after is imported - to be used with extreme care + (by default empty). Only available in microservice type. + items: + type: string + type: array + roles: + description: The roles to import + items: + type: string + type: array + schemaOnly: + description: |- + When set to true, only the `pre-data` and `post-data` sections of + `pg_restore` are invoked, avoiding data import. Default: `false`. + type: boolean + source: + description: The source of the import + properties: + externalCluster: + description: The name of the externalCluster used + for import + type: string + required: + - externalCluster + type: object + type: + description: The import type. Can be `microservice` or + `monolith`. + enum: + - microservice + - monolith + type: string + required: + - databases + - source + - type + type: object + locale: + description: Sets the default collation order and character + classification in the new database. + type: string + localeCType: + description: The value to be passed as option `--lc-ctype` + for initdb (default:`C`) + type: string + localeCollate: + description: The value to be passed as option `--lc-collate` + for initdb (default:`C`) + type: string + localeProvider: + description: |- + This option sets the locale provider for databases created in the new cluster. + Available from PostgreSQL 16. + type: string + options: + description: |- + The list of options that must be passed to initdb when creating the cluster. + Deprecated: This could lead to inconsistent configurations, + please use the explicit provided parameters instead. + If defined, explicit values will be ignored. + items: + type: string + type: array + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + postInitApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitApplicationSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the application database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitSQL: + description: |- + List of SQL queries to be executed as a superuser in the `postgres` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `postgres` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitTemplateSQL: + description: |- + List of SQL queries to be executed as a superuser in the `template1` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitTemplateSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `template1` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + walSegmentSize: + description: |- + The value in megabytes (1 to 1024) to be passed to the `--wal-segsize` + option for initdb (default: empty, resulting in PostgreSQL default: 16MB) + maximum: 1024 + minimum: 1 + type: integer + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider + is set to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is + set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set + to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + pg_basebackup: + description: |- + Bootstrap the cluster taking a physical backup of another compatible + PostgreSQL instance + properties: + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: The name of the server of which we need to take + a physical backup + minLength: 1 + type: string + required: + - source + type: object + recovery: + description: Bootstrap the cluster from a backup + properties: + backup: + description: |- + The backup object containing the physical base backup from which to + initiate the recovery procedure. + Mutually exclusive with `source` and `volumeSnapshots`. + properties: + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + name: + description: Name of the referent. + type: string + required: + - name + type: object + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + recoveryTarget: + description: |- + By default, the recovery process applies all the available + WAL files in the archive (full recovery). However, you can also + end the recovery as soon as a consistent state is reached or + recover to a point-in-time (PITR) by specifying a `RecoveryTarget` object, + as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...). + More info: https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET + properties: + backupID: + description: |- + The ID of the backup from which to start the recovery process. + If empty (default) the operator will automatically detect the backup + based on targetTime or targetLSN if specified. Otherwise use the + latest available backup in chronological order. + type: string + exclusive: + description: |- + Set the target to be exclusive. If omitted, defaults to false, so that + in Postgres, `recovery_target_inclusive` will be true + type: boolean + targetImmediate: + description: End recovery as soon as a consistent state + is reached + type: boolean + targetLSN: + description: The target LSN (Log Sequence Number) + type: string + targetName: + description: |- + The target name (to be previously created + with `pg_create_restore_point`) + type: string + targetTLI: + description: The target timeline ("latest" or a positive + integer) + type: string + targetTime: + description: The target time as a timestamp in the RFC3339 + standard + type: string + targetXID: + description: The target transaction ID + type: string + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: |- + The external cluster whose backup we will restore. This is also + used as the name of the folder under which the backup is stored, + so it must be set to the name of the source cluster + Mutually exclusive with `backup`. + type: string + volumeSnapshots: + description: |- + The static PVC data source(s) from which to initiate the + recovery procedure. Currently supporting `VolumeSnapshot` + and `PersistentVolumeClaim` resources that map an existing + PVC group, compatible with CloudNativePG, and taken with + a cold backup copy on a fenced Postgres instance (limitation + which will be removed in the future when online backup + will be implemented). + Mutually exclusive with `backup`. + properties: + storage: + description: Configuration of the storage of the instances + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + tablespaceStorage: + additionalProperties: + description: |- + TypedLocalObjectReference contains enough information to let you locate the + typed referenced object inside the same namespace. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + description: Configuration of the storage for PostgreSQL + tablespaces + type: object + walStorage: + description: Configuration of the storage for PostgreSQL + WAL (Write-Ahead Log) + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + required: + - storage + type: object + type: object + type: object + certificates: + description: The configuration for the CA and related certificates + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + description: + description: Description of this PostgreSQL cluster + type: string + enablePDB: + default: true + description: |- + Manage the `PodDisruptionBudget` resources within the cluster. When + configured as `true` (default setting), the pod disruption budgets + will safeguard the primary node from being terminated. Conversely, + setting it to `false` will result in the absence of any + `PodDisruptionBudget` resource, permitting the shutdown of all nodes + hosting the PostgreSQL cluster. This latter configuration is + advisable for any PostgreSQL cluster employed for + development/staging purposes. + type: boolean + enableSuperuserAccess: + default: false + description: |- + When this option is enabled, the operator will use the `SuperuserSecret` + to update the `postgres` user password (if the secret is + not present, the operator will automatically create one). When this + option is disabled, the operator will ignore the `SuperuserSecret` content, delete + it when automatically created, and then blank the password of the `postgres` + user by setting it to `NULL`. Disabled by default. + type: boolean + env: + description: |- + Env follows the Env format to pass environment variables + to the pods created in the cluster + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + EnvFrom follows the EnvFrom format to pass environment variables + sources to the pods to be used by Env + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each key in + the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + ephemeralVolumeSource: + description: EphemeralVolumeSource allows the user to configure the + source of ephemeral volumes. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to + consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the + PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + ephemeralVolumesSizeLimit: + description: |- + EphemeralVolumesSizeLimit allows the user to set the limits for the ephemeral + volumes + properties: + shm: + anyOf: + - type: integer + - type: string + description: Shm is the size limit of the shared memory volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + temporaryData: + anyOf: + - type: integer + - type: string + description: TemporaryData is the size limit of the temporary + data volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + externalClusters: + description: The list of external clusters which are used in the configuration + items: + description: |- + ExternalCluster represents the connection parameters to an + external cluster which is used in the other sections of the configuration + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2` or `snappy`. + enum: + - gzip + - bzip2 + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud + Storage JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing + the region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2` or `snappy`. + enum: + - gzip + - bzip2 + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + connectionParameters: + additionalProperties: + type: string + description: The list of connection parameters, such as dbname, + host, username, etc + type: object + name: + description: The server name, required + type: string + password: + description: |- + The reference to the password to be used to connect to the server. + If a password is provided, CloudNativePG creates a PostgreSQL + passfile at `/controller/external/NAME/pass` (where "NAME" is the + cluster's name). This passfile is automatically referenced in the + connection string when establishing a connection to the remote + PostgreSQL server from the current PostgreSQL `Cluster`. This ensures + secure and efficient password management for external clusters. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + plugin: + description: |- + The configuration of the plugin that is taking care + of WAL archiving and backups for this external cluster + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + sslCert: + description: |- + The reference to an SSL certificate to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslKey: + description: |- + The reference to an SSL private key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslRootCert: + description: |- + The reference to an SSL CA public key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - name + type: object + type: array + failoverDelay: + default: 0 + description: |- + The amount of time (in seconds) to wait before triggering a failover + after the primary PostgreSQL instance in the cluster was detected + to be unhealthy + format: int32 + type: integer + imageCatalogRef: + description: Defines the major PostgreSQL version we want to use within + an ImageCatalog + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + major: + description: The major version of PostgreSQL we want to use from + the ImageCatalog + type: integer + x-kubernetes-validations: + - message: Major is immutable + rule: self == oldSelf + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - major + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: Only image catalogs are supported + rule: self.kind == 'ImageCatalog' || self.kind == 'ClusterImageCatalog' + - message: Only image catalogs are supported + rule: self.apiGroup == 'postgresql.cnpg.io' + imageName: + description: |- + Name of the container image, supporting both tags (`:`) + and digests for deterministic and repeatable deployments + (`:@sha256:`) + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of `Always`, `Never` or `IfNotPresent`. + If not defined, it defaults to `IfNotPresent`. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + imagePullSecrets: + description: The list of pull secrets to be used to pull the images + items: + description: |- + LocalObjectReference contains enough information to let you locate a + local object with a known type inside the same namespace + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + type: array + inheritedMetadata: + description: Metadata that will be inherited by all objects related + to the Cluster + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + instances: + default: 1 + description: Number of instances required in the cluster + minimum: 1 + type: integer + livenessProbeTimeout: + description: |- + LivenessProbeTimeout is the time (in seconds) that is allowed for a PostgreSQL instance + to successfully respond to the liveness probe (default 30). + The Liveness probe failure threshold is derived from this value using the formula: + ceiling(livenessProbe / 10). + format: int32 + type: integer + logLevel: + default: info + description: 'The instances'' log level, one of the following values: + error, warning, info (default), debug, trace' + enum: + - error + - warning + - info + - debug + - trace + type: string + managed: + description: The configuration that is used by the portions of PostgreSQL + that are managed by the instance manager + properties: + roles: + description: Database roles managed by the `Cluster` + items: + description: |- + RoleConfiguration is the representation, in Kubernetes, of a PostgreSQL role + with the additional field Ensure specifying whether to ensure the presence or + absence of the role in the database + + The defaults of the CREATE ROLE command are applied + Reference: https://www.postgresql.org/docs/current/sql-createrole.html + properties: + bypassrls: + description: |- + Whether a role bypasses every row-level security (RLS) policy. + Default is `false`. + type: boolean + comment: + description: Description of the role + type: string + connectionLimit: + default: -1 + description: |- + If the role can log in, this specifies how many concurrent + connections the role can make. `-1` (the default) means no limit. + format: int64 + type: integer + createdb: + description: |- + When set to `true`, the role being defined will be allowed to create + new databases. Specifying `false` (default) will deny a role the + ability to create databases. + type: boolean + createrole: + description: |- + Whether the role will be permitted to create, alter, drop, comment + on, change the security label for, and grant or revoke membership in + other roles. Default is `false`. + type: boolean + disablePassword: + description: DisablePassword indicates that a role's password + should be set to NULL in Postgres + type: boolean + ensure: + default: present + description: Ensure the role is `present` or `absent` - + defaults to "present" + enum: + - present + - absent + type: string + inRoles: + description: |- + List of one or more existing roles to which this role will be + immediately added as a new member. Default empty. + items: + type: string + type: array + inherit: + default: true + description: |- + Whether a role "inherits" the privileges of roles it is a member of. + Defaults is `true`. + type: boolean + login: + description: |- + Whether the role is allowed to log in. A role having the `login` + attribute can be thought of as a user. Roles without this attribute + are useful for managing database privileges, but are not users in + the usual sense of the word. Default is `false`. + type: boolean + name: + description: Name of the role + type: string + passwordSecret: + description: |- + Secret containing the password of the role (if present) + If null, the password will be ignored unless DisablePassword is set + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + replication: + description: |- + Whether a role is a replication role. A role must have this + attribute (or be a superuser) in order to be able to connect to the + server in replication mode (physical or logical replication) and in + order to be able to create or drop replication slots. A role having + the `replication` attribute is a very highly privileged role, and + should only be used on roles actually used for replication. Default + is `false`. + type: boolean + superuser: + description: |- + Whether the role is a `superuser` who can override all access + restrictions within the database - superuser status is dangerous and + should be used only when really needed. You must yourself be a + superuser to create a new superuser. Defaults is `false`. + type: boolean + validUntil: + description: |- + Date and time after which the role's password is no longer valid. + When omitted, the password will never expire (default). + format: date-time + type: string + required: + - name + type: object + type: array + services: + description: Services roles managed by the `Cluster` + properties: + additional: + description: Additional is a list of additional managed services + specified by the user. + items: + description: |- + ManagedService represents a specific service managed by the cluster. + It includes the type of service and its associated template specification. + properties: + selectorType: + description: |- + SelectorType specifies the type of selectors that the service will have. + Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services. + enum: + - rw + - r + - ro + type: string + serviceTemplate: + description: ServiceTemplate is the template specification + for the service. + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only + supported for certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information + on service's port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed + by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains + the configurations of session affinity. + properties: + clientIP: + description: clientIP contains the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic is + distributed to Service endpoints. Implementations can use this field as a + hint, but are not required to guarantee strict adherence. If the field is + not set, the implementation will apply its default routing strategy. If set + to "PreferClose", implementations should prioritize endpoints that are + topologically close (e.g., same zone). + This is an alpha field and requires enabling ServiceTrafficDistribution feature. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + updateStrategy: + default: patch + description: UpdateStrategy describes how the service + differences should be reconciled + enum: + - patch + - replace + type: string + required: + - selectorType + - serviceTemplate + type: object + type: array + disabledDefaultServices: + description: |- + DisabledDefaultServices is a list of service types that are disabled by default. + Valid values are "r", and "ro", representing read, and read-only services. + items: + description: |- + ServiceSelectorType describes a valid value for generating the service selectors. + It indicates which type of service the selector applies to, such as read-write, read, or read-only + enum: + - rw + - r + - ro + type: string + type: array + type: object + type: object + maxSyncReplicas: + default: 0 + description: |- + The target value for the synchronous replication quorum, that can be + decreased if the number of ready standbys is lower than this. + Undefined or 0 disable synchronous replication. + minimum: 0 + type: integer + minSyncReplicas: + default: 0 + description: |- + Minimum number of instances required in synchronous replication with the + primary. Undefined or 0 allow writes to complete when no standby is + available. + minimum: 0 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this cluster + properties: + customQueriesConfigMap: + description: The list of config maps containing the custom queries + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + customQueriesSecret: + description: The list of secrets containing the custom queries + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + disableDefaultQueries: + default: false + description: |- + Whether the default queries should be injected. + Set it to `true` if you don't want to inject default queries into the cluster. + Default: false. + type: boolean + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + tls: + description: |- + Configure TLS communication for the metrics endpoint. + Changing tls.enabled option will force a rollout of all instances. + properties: + enabled: + default: false + description: |- + Enable TLS for the monitoring endpoint. + Changing this option will force a rollout of all instances. + type: boolean + type: object + type: object + nodeMaintenanceWindow: + description: Define a maintenance window for the Kubernetes nodes + properties: + inProgress: + default: false + description: Is there a node maintenance activity in progress? + type: boolean + reusePVC: + default: true + description: |- + Reuse the existing PVC (wait for the node to come + up again) or not (recreate it elsewhere - when `instances` >1) + type: boolean + type: object + plugins: + description: |- + The plugins configuration, containing + any plugin to be loaded with the corresponding configuration + items: + description: |- + PluginConfiguration specifies a plugin that need to be loaded for this + cluster to be reconciled + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + type: array + postgresGID: + default: 26 + description: The GID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresUID: + default: 26 + description: The UID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresql: + description: Configuration of the PostgreSQL server + properties: + enableAlterSystem: + description: |- + If this parameter is true, the user will be able to invoke `ALTER SYSTEM` + on this CloudNativePG Cluster. + This should only be used for debugging and troubleshooting. + Defaults to false. + type: boolean + ldap: + description: Options to specify LDAP configuration + properties: + bindAsAuth: + description: Bind as authentication configuration + properties: + prefix: + description: Prefix for the bind authentication option + type: string + suffix: + description: Suffix for the bind authentication option + type: string + type: object + bindSearchAuth: + description: Bind+Search authentication configuration + properties: + baseDN: + description: Root DN to begin the user search + type: string + bindDN: + description: DN of the user to bind to the directory + type: string + bindPassword: + description: Secret with the password for the user to + bind to the directory + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + searchAttribute: + description: Attribute to match against the username + type: string + searchFilter: + description: Search filter to use when doing the search+bind + authentication + type: string + type: object + port: + description: LDAP server port + type: integer + scheme: + description: LDAP schema to be used, possible options are + `ldap` and `ldaps` + enum: + - ldap + - ldaps + type: string + server: + description: LDAP hostname or IP address + type: string + tls: + description: Set to 'true' to enable LDAP over TLS. 'false' + is default + type: boolean + type: object + parameters: + additionalProperties: + type: string + description: PostgreSQL configuration options (postgresql.conf) + type: object + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + pg_ident: + description: |- + PostgreSQL User Name Maps rules (lines to be appended + to the pg_ident.conf file) + items: + type: string + type: array + promotionTimeout: + description: |- + Specifies the maximum number of seconds to wait when promoting an instance to primary. + Default value is 40000000, greater than one year in seconds, + big enough to simulate an infinite timeout + format: int32 + type: integer + shared_preload_libraries: + description: Lists of shared preload libraries to add to the default + ones + items: + type: string + type: array + syncReplicaElectionConstraint: + description: |- + Requirements to be met by sync replicas. This will affect how the "synchronous_standby_names" parameter will be + set up. + properties: + enabled: + description: This flag enables the constraints for sync replicas + type: boolean + nodeLabelsAntiAffinity: + description: A list of node labels values to extract and compare + to evaluate if the pods reside in the same topology or not + items: + type: string + type: array + required: + - enabled + type: object + synchronous: + description: Configuration of the PostgreSQL synchronous replication + feature + properties: + dataDurability: + default: required + description: |- + If set to "required", data durability is strictly enforced. Write operations + with synchronous commit settings (`on`, `remote_write`, or `remote_apply`) will + block if there are insufficient healthy replicas, ensuring data persistence. + If set to "preferred", data durability is maintained when healthy replicas + are available, but the required number of instances will adjust dynamically + if replicas become unavailable. This setting relaxes strict durability enforcement + to allow for operational continuity. This setting is only applicable if both + `standbyNamesPre` and `standbyNamesPost` are unset (empty). + enum: + - required + - preferred + type: string + maxStandbyNamesFromCluster: + description: |- + Specifies the maximum number of local cluster pods that can be + automatically included in the `synchronous_standby_names` option in + PostgreSQL. + type: integer + method: + description: |- + Method to select synchronous replication standbys from the listed + servers, accepting 'any' (quorum-based synchronous replication) or + 'first' (priority-based synchronous replication) as values. + enum: + - any + - first + type: string + number: + description: |- + Specifies the number of synchronous standby servers that + transactions must wait for responses from. + type: integer + x-kubernetes-validations: + - message: The number of synchronous replicas should be greater + than zero + rule: self > 0 + standbyNamesPost: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` after local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + standbyNamesPre: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` before local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + required: + - method + - number + type: object + x-kubernetes-validations: + - message: dataDurability set to 'preferred' requires empty 'standbyNamesPre' + and empty 'standbyNamesPost' + rule: self.dataDurability!='preferred' || ((!has(self.standbyNamesPre) + || self.standbyNamesPre.size()==0) && (!has(self.standbyNamesPost) + || self.standbyNamesPost.size()==0)) + type: object + primaryUpdateMethod: + default: restart + description: |- + Method to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be with a switchover (`switchover`) or in-place (`restart` - default) + enum: + - switchover + - restart + type: string + primaryUpdateStrategy: + default: unsupervised + description: |- + Deployment strategy to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be automated (`unsupervised` - default) or manual (`supervised`) + enum: + - unsupervised + - supervised + type: string + priorityClassName: + description: |- + Name of the priority class which will be used in every generated Pod, if the PriorityClass + specified does not exist, the pod will not be able to schedule. Please refer to + https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass + for more information + type: string + probes: + description: |- + The configuration of the probes to be injected + in the PostgreSQL Pods. + properties: + liveness: + description: The liveness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + readiness: + description: The readiness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + startup: + description: The startup probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + type: object + projectedVolumeTemplate: + description: |- + Template to be used to define projected volumes, projected volumes will be mounted + under `/projected` base folder + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root to write + the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap data + to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the + pod: only annotations, labels, name, namespace + and uid are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must not + be absolute or contain the ''..'' path. Must + be utf-8 encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the secret data to + project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether the Secret + or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information about the + serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + replica: + description: Replica cluster configuration + properties: + enabled: + description: |- + If replica mode is enabled, this cluster will be a replica of an + existing cluster. Replica cluster can be created from a recovery + object store or via streaming through pg_basebackup. + Refer to the Replica clusters page of the documentation for more information. + type: boolean + minApplyDelay: + description: |- + When replica mode is enabled, this parameter allows you to replay + transactions only when the system time is at least the configured + time past the commit time. This provides an opportunity to correct + data loss errors. Note that when this parameter is set, a promotion + token cannot be used. + type: string + primary: + description: |- + Primary defines which Cluster is defined to be the primary in the distributed PostgreSQL cluster, based on the + topology specified in externalClusters + type: string + promotionToken: + description: |- + A demotion token generated by an external cluster used to + check if the promotion requirements are met. + type: string + self: + description: |- + Self defines the name of this cluster. It is used to determine if this is a primary + or a replica cluster, comparing it with `primary` + type: string + source: + description: The name of the external cluster which is the replication + origin + minLength: 1 + type: string + required: + - source + type: object + replicationSlots: + default: + highAvailability: + enabled: true + description: Replication slots management configuration + properties: + highAvailability: + default: + enabled: true + description: Replication slots for high availability configuration + properties: + enabled: + default: true + description: |- + If enabled (default), the operator will automatically manage replication slots + on the primary instance and use them in streaming replication + connections with all the standby instances that are part of the HA + cluster. If disabled, the operator will not take advantage + of replication slots in streaming connections with the replicas. + This feature also controls replication slots in replica cluster, + from the designated primary to its cascading replicas. + type: boolean + slotPrefix: + default: _cnpg_ + description: |- + Prefix for replication slots managed by the operator for HA. + It may only contain lower case letters, numbers, and the underscore character. + This can only be set at creation time. By default set to `_cnpg_`. + pattern: ^[0-9a-z_]*$ + type: string + type: object + synchronizeReplicas: + description: Configures the synchronization of the user defined + physical replication slots + properties: + enabled: + default: true + description: When set to true, every replication slot that + is on the primary is synchronized on each standby + type: boolean + excludePatterns: + description: List of regular expression patterns to match + the names of replication slots to be excluded (by default + empty) + items: + type: string + type: array + required: + - enabled + type: object + updateInterval: + default: 30 + description: |- + Standby will update the status of the local replication slots + every `updateInterval` seconds (default 30). + minimum: 1 + type: integer + type: object + resources: + description: |- + Resources requirements of every generated Pod. Please refer to + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + for more information. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + schedulerName: + description: |- + If specified, the pod will be dispatched by specified Kubernetes + scheduler. If not specified, the pod will be dispatched by the default + scheduler. More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/ + type: string + seccompProfile: + description: |- + The SeccompProfile applied to every Pod and Container. + Defaults to: `RuntimeDefault` + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + serviceAccountTemplate: + description: Configure the generation of the service account + properties: + metadata: + description: |- + Metadata are the metadata to be used for the generated + service account + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + required: + - metadata + type: object + smartShutdownTimeout: + default: 180 + description: |- + The time in seconds that controls the window of time reserved for the smart shutdown of Postgres to complete. + Make sure you reserve enough time for the operator to request a fast shutdown of Postgres + (that is: `stopDelay` - `smartShutdownTimeout`). + format: int32 + type: integer + startDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + successfully start up (default 3600). + The startup probe failure threshold is derived from this value using the formula: + ceiling(startDelay / 10). + format: int32 + type: integer + stopDelay: + default: 1800 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + gracefully shutdown (default 1800) + format: int32 + type: integer + storage: + description: Configuration of the storage of the instances + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + superuserSecret: + description: |- + The secret containing the superuser password. If not defined a new + secret will be created with a randomly generated password + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + switchoverDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a primary PostgreSQL instance + to gracefully shutdown during a switchover. + Default value is 3600 seconds (1 hour). + format: int32 + type: integer + tablespaces: + description: The tablespaces configuration + items: + description: |- + TablespaceConfiguration is the configuration of a tablespace, and includes + the storage specification for the tablespace + properties: + name: + description: The name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + properties: + name: + type: string + type: object + storage: + description: The storage configuration for the tablespace + properties: + pvcTemplate: + description: Template to be used to generate the Persistent + Volume Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to + the PersistentVolume backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + temporary: + default: false + description: |- + When set to true, the tablespace will be added as a `temp_tablespaces` + entry in PostgreSQL, and will be available to automatically house temp + database objects, or other temporary files. Please refer to PostgreSQL + documentation for more information on the `temp_tablespaces` GUC. + type: boolean + required: + - name + - storage + type: object + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints specifies how to spread matching pods among the given topology. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ + items: + description: TopologySpreadConstraint specifies how to spread matching + pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + walStorage: + description: Configuration of the storage for PostgreSQL WAL (Write-Ahead + Log) + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + required: + - instances + type: object + x-kubernetes-validations: + - message: imageName and imageCatalogRef are mutually exclusive + rule: '!(has(self.imageCatalogRef) && has(self.imageName))' + status: + description: |- + Most recently observed status of the cluster. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + availableArchitectures: + description: AvailableArchitectures reports the available architectures + of a cluster + items: + description: AvailableArchitecture represents the state of a cluster's + architecture + properties: + goArch: + description: GoArch is the name of the executable architecture + type: string + hash: + description: Hash is the hash of the executable + type: string + required: + - goArch + - hash + type: object + type: array + azurePVCUpdateEnabled: + description: AzurePVCUpdateEnabled shows if the PVC online upgrade + is enabled for this cluster + type: boolean + certificates: + description: The configuration for the CA and related certificates, + initialized with defaults. + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + expirations: + additionalProperties: + type: string + description: Expiration dates for all certificates. + type: object + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + cloudNativePGCommitHash: + description: The commit hash number of which this operator running + type: string + cloudNativePGOperatorHash: + description: The hash of the binary of the operator + type: string + conditions: + description: Conditions for cluster object + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + configMapResourceVersion: + description: |- + The list of resource versions of the configmaps, + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + configmap data + properties: + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the config maps used to pass metrics. + Map keys are the config map names, map values are the versions + type: object + type: object + currentPrimary: + description: Current primary instance + type: string + currentPrimaryFailingSinceTimestamp: + description: |- + The timestamp when the primary was detected to be unhealthy + This field is reported when `.spec.failoverDelay` is populated or during online upgrades + type: string + currentPrimaryTimestamp: + description: The timestamp when the last actual promotion to primary + has occurred + type: string + danglingPVC: + description: |- + List of all the PVCs created by this cluster and still available + which are not attached to a Pod + items: + type: string + type: array + demotionToken: + description: |- + DemotionToken is a JSON token containing the information + from pg_controldata such as Database system identifier, Latest checkpoint's + TimeLineID, Latest checkpoint's REDO location, Latest checkpoint's REDO + WAL file, and Time of latest checkpoint + type: string + firstRecoverabilityPoint: + description: |- + The first recoverability point, stored as a date in RFC3339 format. + This field is calculated from the content of FirstRecoverabilityPointByMethod + type: string + firstRecoverabilityPointByMethod: + additionalProperties: + format: date-time + type: string + description: The first recoverability point, stored as a date in RFC3339 + format, per backup method type + type: object + healthyPVC: + description: List of all the PVCs not dangling nor initializing + items: + type: string + type: array + image: + description: Image contains the image name used by the pods + type: string + initializingPVC: + description: List of all the PVCs that are being initialized by this + cluster + items: + type: string + type: array + instanceNames: + description: List of instance names in the cluster + items: + type: string + type: array + instances: + description: The total number of PVC Groups detected in the cluster. + It may differ from the number of existing instance pods. + type: integer + instancesReportedState: + additionalProperties: + description: InstanceReportedState describes the last reported state + of an instance during a reconciliation loop + properties: + isPrimary: + description: indicates if an instance is the primary one + type: boolean + timeLineID: + description: indicates on which TimelineId the instance is + type: integer + required: + - isPrimary + type: object + description: The reported state of the instances during the last reconciliation + loop + type: object + instancesStatus: + additionalProperties: + items: + type: string + type: array + description: InstancesStatus indicates in which status the instances + are + type: object + jobCount: + description: How many Jobs have been created by this cluster + format: int32 + type: integer + lastFailedBackup: + description: Stored as a date in RFC3339 format + type: string + lastPromotionToken: + description: |- + LastPromotionToken is the last verified promotion token that + was used to promote a replica cluster + type: string + lastSuccessfulBackup: + description: |- + Last successful backup, stored as a date in RFC3339 format + This field is calculated from the content of LastSuccessfulBackupByMethod + type: string + lastSuccessfulBackupByMethod: + additionalProperties: + format: date-time + type: string + description: Last successful backup, stored as a date in RFC3339 format, + per backup method type + type: object + latestGeneratedNode: + description: ID of the latest generated node (used to avoid node name + clashing) + type: integer + managedRolesStatus: + description: ManagedRolesStatus reports the state of the managed roles + in the cluster + properties: + byStatus: + additionalProperties: + items: + type: string + type: array + description: ByStatus gives the list of roles in each state + type: object + cannotReconcile: + additionalProperties: + items: + type: string + type: array + description: |- + CannotReconcile lists roles that cannot be reconciled in PostgreSQL, + with an explanation of the cause + type: object + passwordStatus: + additionalProperties: + description: PasswordState represents the state of the password + of a managed RoleConfiguration + properties: + resourceVersion: + description: the resource version of the password secret + type: string + transactionID: + description: the last transaction ID to affect the role + definition in PostgreSQL + format: int64 + type: integer + type: object + description: PasswordStatus gives the last transaction id and + password secret version for each managed role + type: object + type: object + onlineUpdateEnabled: + description: OnlineUpdateEnabled shows if the online upgrade is enabled + inside the cluster + type: boolean + phase: + description: Current phase of the cluster + type: string + phaseReason: + description: Reason for the current phase + type: string + pluginStatus: + description: PluginStatus is the status of the loaded plugins + items: + description: PluginStatus is the status of a loaded plugin + properties: + backupCapabilities: + description: |- + BackupCapabilities are the list of capabilities of the + plugin regarding the Backup management + items: + type: string + type: array + capabilities: + description: |- + Capabilities are the list of capabilities of the + plugin + items: + type: string + type: array + name: + description: Name is the name of the plugin + type: string + operatorCapabilities: + description: |- + OperatorCapabilities are the list of capabilities of the + plugin regarding the reconciler + items: + type: string + type: array + restoreJobHookCapabilities: + description: |- + RestoreJobHookCapabilities are the list of capabilities of the + plugin regarding the RestoreJobHook management + items: + type: string + type: array + status: + description: Status contain the status reported by the plugin + through the SetStatusInCluster interface + type: string + version: + description: |- + Version is the version of the plugin loaded by the + latest reconciliation loop + type: string + walCapabilities: + description: |- + WALCapabilities are the list of capabilities of the + plugin regarding the WAL management + items: + type: string + type: array + required: + - name + - version + type: object + type: array + poolerIntegrations: + description: The integration needed by poolers referencing the cluster + properties: + pgBouncerIntegration: + description: PgBouncerIntegrationStatus encapsulates the needed + integration for the pgbouncer poolers referencing the cluster + properties: + secrets: + items: + type: string + type: array + type: object + type: object + pvcCount: + description: How many PVCs have been created by this cluster + format: int32 + type: integer + readService: + description: Current list of read pods + type: string + readyInstances: + description: The total number of ready instances in the cluster. It + is equal to the number of ready instance pods. + type: integer + resizingPVC: + description: List of all the PVCs that have ResizingPVC condition. + items: + type: string + type: array + secretsResourceVersion: + description: |- + The list of resource versions of the secrets + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + secret data + properties: + applicationSecretVersion: + description: The resource version of the "app" user secret + type: string + barmanEndpointCA: + description: The resource version of the Barman Endpoint CA if + provided + type: string + caSecretVersion: + description: Unused. Retained for compatibility with old versions. + type: string + clientCaSecretVersion: + description: The resource version of the PostgreSQL client-side + CA secret version + type: string + externalClusterSecretVersion: + additionalProperties: + type: string + description: The resource versions of the external cluster secrets + type: object + managedRoleSecretVersion: + additionalProperties: + type: string + description: The resource versions of the managed roles secrets + type: object + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the secrets used to pass metrics. + Map keys are the secret names, map values are the versions + type: object + replicationSecretVersion: + description: The resource version of the "streaming_replica" user + secret + type: string + serverCaSecretVersion: + description: The resource version of the PostgreSQL server-side + CA secret version + type: string + serverSecretVersion: + description: The resource version of the PostgreSQL server-side + secret version + type: string + superuserSecretVersion: + description: The resource version of the "postgres" user secret + type: string + type: object + switchReplicaClusterStatus: + description: SwitchReplicaClusterStatus is the status of the switch + to replica cluster + properties: + inProgress: + description: InProgress indicates if there is an ongoing procedure + of switching a cluster to a replica cluster. + type: boolean + type: object + tablespacesStatus: + description: TablespacesStatus reports the state of the declarative + tablespaces in the cluster + items: + description: TablespaceState represents the state of a tablespace + in a cluster + properties: + error: + description: Error is the reconciliation error, if any + type: string + name: + description: Name is the name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + type: string + state: + description: State is the latest reconciliation state + type: string + required: + - name + - state + type: object + type: array + targetPrimary: + description: |- + Target primary instance, this is different from the previous one + during a switchover or a failover + type: string + targetPrimaryTimestamp: + description: The timestamp when the last request for a new primary + has occurred + type: string + timelineID: + description: The timeline of the Postgres cluster + type: integer + topology: + description: Instances topology. + properties: + instances: + additionalProperties: + additionalProperties: + type: string + description: PodTopologyLabels represent the topology of a Pod. + map[labelName]labelValue + type: object + description: Instances contains the pod topology of the instances + type: object + nodesUsed: + description: |- + NodesUsed represents the count of distinct nodes accommodating the instances. + A value of '1' suggests that all instances are hosted on a single node, + implying the absence of High Availability (HA). Ideally, this value should + be the same as the number of instances in the Postgres HA cluster, implying + shared nothing architecture on the compute side. + format: int32 + type: integer + successfullyExtracted: + description: |- + SuccessfullyExtracted indicates if the topology data was extract. It is useful to enact fallback behaviors + in synchronous replica election in case of failures + type: boolean + type: object + unusablePVC: + description: List of all the PVCs that are unusable because another + PVC is missing + items: + type: string + type: array + writeService: + description: Current write pod + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: databases.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Database + listKind: DatabaseList + plural: databases + singular: database + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Database is the Schema for the databases API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired Database. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allowConnections: + description: |- + Maps to the `ALLOW_CONNECTIONS` parameter of `CREATE DATABASE` and + `ALTER DATABASE`. If false then no one can connect to this database. + type: boolean + builtinLocale: + description: |- + Maps to the `BUILTIN_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the locale name when the + builtin provider is used. This option requires `localeProvider` to + be set to `builtin`. Available from PostgreSQL 17. + type: string + x-kubernetes-validations: + - message: builtinLocale is immutable + rule: self == oldSelf + cluster: + description: The name of the PostgreSQL cluster hosting the database. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + collationVersion: + description: |- + Maps to the `COLLATION_VERSION` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: collationVersion is immutable + rule: self == oldSelf + connectionLimit: + description: |- + Maps to the `CONNECTION LIMIT` clause of `CREATE DATABASE` and + `ALTER DATABASE`. How many concurrent connections can be made to + this database. -1 (the default) means no limit. + type: integer + databaseReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this database. + enum: + - delete + - retain + type: string + encoding: + description: |- + Maps to the `ENCODING` parameter of `CREATE DATABASE`. This setting + cannot be changed. Character set encoding to use in the database. + type: string + x-kubernetes-validations: + - message: encoding is immutable + rule: self == oldSelf + ensure: + default: present + description: Ensure the PostgreSQL database is `present` or `absent` + - defaults to "present". + enum: + - present + - absent + type: string + icuLocale: + description: |- + Maps to the `ICU_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the ICU locale when the ICU + provider is used. This option requires `localeProvider` to be set to + `icu`. Available from PostgreSQL 15. + type: string + x-kubernetes-validations: + - message: icuLocale is immutable + rule: self == oldSelf + icuRules: + description: |- + Maps to the `ICU_RULES` parameter of `CREATE DATABASE`. This setting + cannot be changed. Specifies additional collation rules to customize + the behavior of the default collation. This option requires + `localeProvider` to be set to `icu`. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: icuRules is immutable + rule: self == oldSelf + isTemplate: + description: |- + Maps to the `IS_TEMPLATE` parameter of `CREATE DATABASE` and `ALTER + DATABASE`. If true, this database is considered a template and can + be cloned by any user with `CREATEDB` privileges. + type: boolean + locale: + description: |- + Maps to the `LOCALE` parameter of `CREATE DATABASE`. This setting + cannot be changed. Sets the default collation order and character + classification in the new database. + type: string + x-kubernetes-validations: + - message: locale is immutable + rule: self == oldSelf + localeCType: + description: |- + Maps to the `LC_CTYPE` parameter of `CREATE DATABASE`. This setting + cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCType is immutable + rule: self == oldSelf + localeCollate: + description: |- + Maps to the `LC_COLLATE` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCollate is immutable + rule: self == oldSelf + localeProvider: + description: |- + Maps to the `LOCALE_PROVIDER` parameter of `CREATE DATABASE`. This + setting cannot be changed. This option sets the locale provider for + databases created in the new cluster. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: localeProvider is immutable + rule: self == oldSelf + name: + description: The name of the database to create inside PostgreSQL. + This setting cannot be changed. + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + - message: the name postgres is reserved + rule: self != 'postgres' + - message: the name template0 is reserved + rule: self != 'template0' + - message: the name template1 is reserved + rule: self != 'template1' + owner: + description: |- + Maps to the `OWNER` parameter of `CREATE DATABASE`. + Maps to the `OWNER TO` command of `ALTER DATABASE`. + The role name of the user who owns the database inside PostgreSQL. + type: string + tablespace: + description: |- + Maps to the `TABLESPACE` parameter of `CREATE DATABASE`. + Maps to the `SET TABLESPACE` command of `ALTER DATABASE`. + The name of the tablespace (in PostgreSQL) that will be associated + with the new database. This tablespace will be the default + tablespace used for objects created in this database. + type: string + template: + description: |- + Maps to the `TEMPLATE` parameter of `CREATE DATABASE`. This setting + cannot be changed. The name of the template from which to create + this database. + type: string + x-kubernetes-validations: + - message: template is immutable + rule: self == oldSelf + required: + - cluster + - name + - owner + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider is set + to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + status: + description: |- + Most recently observed status of the Database. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + applied: + description: Applied is true if the database was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: imagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ImageCatalog + listKind: ImageCatalogList + plural: imagecatalogs + singular: imagecatalog + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ImageCatalog is the Schema for the imagecatalogs API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: poolers.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Pooler + listKind: PoolerList + plural: poolers + singular: pooler + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.type + name: Type + type: string + name: v1 + schema: + openAPIV3Schema: + description: Pooler is the Schema for the poolers API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the Pooler. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: |- + This is the cluster reference on which the Pooler will work. + Pooler name should never match with any cluster name within the same namespace. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + deploymentStrategy: + description: The deployment strategy to use for pgbouncer to replace + existing pods with new ones + properties: + rollingUpdate: + description: |- + Rolling update config params. Present only if DeploymentStrategyType = + RollingUpdate. + properties: + maxSurge: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be scheduled above the desired number of + pods. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + This can not be 0 if MaxUnavailable is 0. + Absolute number is calculated from percentage by rounding up. + Defaults to 25%. + Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when + the rolling update starts, such that the total number of old and new pods do not exceed + 130% of desired pods. Once old pods have been killed, + new ReplicaSet can be scaled up further, ensuring that total number of pods running + at any time during the update is at most 130% of desired pods. + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be unavailable during the update. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + Absolute number is calculated from percentage by rounding down. + This can not be 0 if MaxSurge is 0. + Defaults to 25%. + Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods + immediately when the rolling update starts. Once new pods are ready, old ReplicaSet + can be scaled down further, followed by scaling up the new ReplicaSet, ensuring + that the total number of pods available at all times during the update is at + least 70% of desired pods. + x-kubernetes-int-or-string: true + type: object + type: + description: Type of deployment. Can be "Recreate" or "RollingUpdate". + Default is RollingUpdate. + type: string + type: object + instances: + default: 1 + description: 'The number of replicas we want. Default: 1.' + format: int32 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this pooler. + properties: + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + type: object + pgbouncer: + description: The PgBouncer configuration + properties: + authQuery: + description: |- + The query that will be used to download the hash of the password + of a certain user. Default: "SELECT usename, passwd FROM public.user_search($1)". + In case it is specified, also an AuthQuerySecret has to be specified and + no automatic CNPG Cluster integration will be triggered. + type: string + authQuerySecret: + description: |- + The credentials of the user that need to be used for the authentication + query. In case it is specified, also an AuthQuery + (e.g. "SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1") + has to be specified and no automatic CNPG Cluster integration will be triggered. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + parameters: + additionalProperties: + type: string + description: |- + Additional parameters to be passed to PgBouncer - please check + the CNPG documentation for a list of options you can configure + type: object + paused: + default: false + description: |- + When set to `true`, PgBouncer will disconnect from the PostgreSQL + server, first waiting for all queries to complete, and pause all new + client connections until this value is set to `false` (default). Internally, + the operator calls PgBouncer's `PAUSE` and `RESUME` commands. + type: boolean + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + poolMode: + default: session + description: 'The pool mode. Default: `session`.' + enum: + - session + - transaction + type: string + type: object + serviceTemplate: + description: Template for the Service to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information on service's + port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the configurations + of session affinity. + properties: + clientIP: + description: clientIP contains the configurations of Client + IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic is + distributed to Service endpoints. Implementations can use this field as a + hint, but are not required to guarantee strict adherence. If the field is + not set, the implementation will apply its default routing strategy. If set + to "PreferClose", implementations should prioritize endpoints that are + topologically close (e.g., same zone). + This is an alpha field and requires enabling ServiceTrafficDistribution feature. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + template: + description: The template of the Pod to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the pod. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + activeDeadlineSeconds: + description: |- + Optional duration in seconds the pod may be active on the node relative to + StartTime before the system will actively try to mark it failed and kill associated containers. + Value must be a positive integer. + format: int64 + type: integer + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + automountServiceAccountToken: + description: AutomountServiceAccountToken indicates whether + a service account token should be automatically mounted. + type: boolean + containers: + description: |- + List of containers belonging to the pod. + Containers cannot currently be added or removed. + There must be at least one container in a Pod. + Cannot be updated. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + dnsConfig: + description: |- + Specifies the DNS parameters of a pod. + Parameters specified here will be merged to the generated DNS + configuration based on DNSPolicy. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver + options of a pod. + properties: + name: + description: Required. + type: string + value: + type: string + type: object + type: array + x-kubernetes-list-type: atomic + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + dnsPolicy: + description: |- + Set DNS policy for the pod. + Defaults to "ClusterFirst". + Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + To have DNS options set along with hostNetwork, you have to specify DNS policy + explicitly to 'ClusterFirstWithHostNet'. + type: string + enableServiceLinks: + description: |- + EnableServiceLinks indicates whether information about services should be injected into pod's + environment variables, matching the syntax of Docker links. + Optional: Defaults to true. + type: boolean + ephemeralContainers: + description: |- + List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + pod to perform user-initiated actions such as debugging. This list cannot be specified when + creating a pod, and it cannot be modified by updating the pod spec. In order to add an + ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + items: + description: |- + An EphemeralContainer is a temporary container that you may add to an existing Pod for + user-initiated activities such as debugging. Ephemeral containers have no resource or + scheduling guarantees, and they will not be restarted when they exit or when a Pod is + removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the + Pod to exceed its resource allocation. + + To add an ephemeral container, use the ephemeralcontainers subresource of an existing + Pod. Ephemeral containers may not be removed or restarted. + properties: + args: + description: |- + Arguments to the entrypoint. + The image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: Lifecycle is not allowed for ephemeral + containers. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the ephemeral container specified as a DNS_LABEL. + This name must be unique among all containers, init containers and ephemeral containers. + type: string + ports: + description: Ports are not allowed for ephemeral containers. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + already allocated to the pod. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for the container to manage the restart behavior of each + container within a pod. + This may only be set for init containers. You cannot set this field on + ephemeral containers. + type: string + securityContext: + description: |- + Optional: SecurityContext defines the security options the ephemeral container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + targetContainerName: + description: |- + If set, the name of the container from PodSpec that this ephemeral container targets. + The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + If not set then the ephemeral container uses the namespaces configured in the Pod spec. + + The container runtime must implement support for this feature. If the runtime does not + support namespace targeting then the result of setting this field is undefined. + type: string + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + hostAliases: + description: |- + HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + file if specified. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + description: IP address of the host file entry. + type: string + required: + - ip + type: object + type: array + x-kubernetes-list-map-keys: + - ip + x-kubernetes-list-type: map + hostIPC: + description: |- + Use the host's ipc namespace. + Optional: Default to false. + type: boolean + hostNetwork: + description: |- + Host networking requested for this pod. Use the host's network namespace. + If this option is set, the ports that will be used must be specified. + Default to false. + type: boolean + hostPID: + description: |- + Use the host's pid namespace. + Optional: Default to false. + type: boolean + hostUsers: + description: |- + Use the host's user namespace. + Optional: Default to true. + If set to true or not present, the pod will be run in the host user namespace, useful + for when the pod needs a feature only available to the host user namespace, such as + loading a kernel module with CAP_SYS_MODULE. + When set to false, a new userns is created for the pod. Setting false is useful for + mitigating container breakout vulnerabilities even allowing users to run their + containers as root without actually having root privileges on the host. + This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. + type: boolean + hostname: + description: |- + Specifies the hostname of the Pod + If not specified, the pod's hostname will be set to a system-defined value. + type: string + imagePullSecrets: + description: |- + ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + If specified, these secrets will be passed to individual puller implementations for them to use. + More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + initContainers: + description: |- + List of initialization containers belonging to the pod. + Init containers are executed in order prior to containers being started. If any + init container fails, the pod is considered to have failed and is handled according + to its restartPolicy. The name for an init container or normal container must be + unique among all containers. + Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. + The resourceRequirements of an init container are taken into account during scheduling + by finding the highest request/limit for each resource type, and then using the max of + of that value or the sum of the normal containers. Limits are applied to init containers + in a similar fashion. + Init containers cannot currently be added or removed. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + nodeName: + description: |- + NodeName indicates in which node this pod is scheduled. + If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. + Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. + This field should not be used to express a desire for the pod to be scheduled on a specific node. + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the pod to fit on a node. + Selector which must match a node's labels for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + x-kubernetes-map-type: atomic + os: + description: |- + Specifies the OS of the containers in the pod. + Some pod and container fields are restricted if this is set. + + If the OS field is set to linux, the following fields must be unset: + -securityContext.windowsOptions + + If the OS field is set to windows, following fields must be unset: + - spec.hostPID + - spec.hostIPC + - spec.hostUsers + - spec.securityContext.appArmorProfile + - spec.securityContext.seLinuxOptions + - spec.securityContext.seccompProfile + - spec.securityContext.fsGroup + - spec.securityContext.fsGroupChangePolicy + - spec.securityContext.sysctls + - spec.shareProcessNamespace + - spec.securityContext.runAsUser + - spec.securityContext.runAsGroup + - spec.securityContext.supplementalGroups + - spec.securityContext.supplementalGroupsPolicy + - spec.containers[*].securityContext.appArmorProfile + - spec.containers[*].securityContext.seLinuxOptions + - spec.containers[*].securityContext.seccompProfile + - spec.containers[*].securityContext.capabilities + - spec.containers[*].securityContext.readOnlyRootFilesystem + - spec.containers[*].securityContext.privileged + - spec.containers[*].securityContext.allowPrivilegeEscalation + - spec.containers[*].securityContext.procMount + - spec.containers[*].securityContext.runAsUser + - spec.containers[*].securityContext.runAsGroup + properties: + name: + description: |- + Name is the name of the operating system. The currently supported values are linux and windows. + Additional value may be defined in future and can be one of: + https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + Clients should expect to handle additional values and treat unrecognized values in this field as os: null + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + This field will be autopopulated at admission time by the RuntimeClass admission controller. If + the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + The RuntimeClass admission controller will reject Pod create requests which have the overhead already + set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md + type: object + preemptionPolicy: + description: |- + PreemptionPolicy is the Policy for preempting pods with lower priority. + One of Never, PreemptLowerPriority. + Defaults to PreemptLowerPriority if unset. + type: string + priority: + description: |- + The priority value. Various system components use this field to find the + priority of the pod. When Priority Admission Controller is enabled, it + prevents users from setting this field. The admission controller populates + this field from PriorityClassName. + The higher the value, the higher the priority. + format: int32 + type: integer + priorityClassName: + description: |- + If specified, indicates the pod's priority. "system-node-critical" and + "system-cluster-critical" are two special keywords which indicate the + highest priorities with the former being the highest priority. Any other + name must be defined by creating a PriorityClass object with that name. + If not specified, the pod priority will be default or zero if there is no + default. + type: string + readinessGates: + description: |- + If specified, all readiness gates will be evaluated for pod readiness. + A pod is ready when all its containers are ready AND + all conditions specified in the readiness gates have status equal to "True" + More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates + items: + description: PodReadinessGate contains the reference to + a pod condition + properties: + conditionType: + description: ConditionType refers to a condition in + the pod's condition list with matching type. + type: string + required: + - conditionType + type: object + type: array + x-kubernetes-list-type: atomic + resourceClaims: + description: |- + ResourceClaims defines which ResourceClaims must be allocated + and reserved before the Pod is allowed to start. The resources + will be made available to those containers which consume them + by name. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. + items: + description: |- + PodResourceClaim references exactly one ResourceClaim, either directly + or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim + for the pod. + + It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. + Containers that need access to the ResourceClaim reference it with this name. + properties: + name: + description: |- + Name uniquely identifies this resource claim inside the pod. + This must be a DNS_LABEL. + type: string + resourceClaimName: + description: |- + ResourceClaimName is the name of a ResourceClaim object in the same + namespace as this pod. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + resourceClaimTemplateName: + description: |- + ResourceClaimTemplateName is the name of a ResourceClaimTemplate + object in the same namespace as this pod. + + The template will be used to create a new ResourceClaim, which will + be bound to this pod. When this pod is deleted, the ResourceClaim + will also be deleted. The pod name and resource name, along with a + generated component, will be used to form a unique name for the + ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + + This field is immutable and no changes will be made to the + corresponding ResourceClaim by the control plane after creating the + ResourceClaim. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + restartPolicy: + description: |- + Restart policy for all containers within the pod. + One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. + Default to Always. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy + type: string + runtimeClassName: + description: |- + RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + empty definition that uses the default runtime handler. + More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + type: string + schedulerName: + description: |- + If specified, the pod will be dispatched by specified scheduler. + If not specified, the pod will be dispatched by default scheduler. + type: string + schedulingGates: + description: |- + SchedulingGates is an opaque list of values that if specified will block scheduling the pod. + If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + scheduler will not attempt to schedule the pod. + + SchedulingGates can only be set at pod creation time, and be removed only afterwards. + items: + description: PodSchedulingGate is associated to a Pod to + guard its scheduling. + properties: + name: + description: |- + Name of the scheduling gate. + Each scheduling gate must have a unique name field. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + securityContext: + description: |- + SecurityContext holds pod-level security attributes and common container settings. + Optional: Defaults to empty. See type description for default values of each field. + properties: + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in + addition to the container's primary GID and fsGroup (if specified). If + the SupplementalGroupsPolicy feature is enabled, the + supplementalGroupsPolicy field determines whether these are in addition + to or instead of any group memberships defined in the container image. + If unspecified, no additional groups are added, though group memberships + defined in the container image may still be used, depending on the + supplementalGroupsPolicy field. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + description: |- + Defines how supplemental groups of the first container processes are calculated. + Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + and the container runtime must implement support for this feature. + Note that this field cannot be set when spec.os.name is windows. + type: string + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be + set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: |- + DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. + Deprecated: Use serviceAccountName instead. + type: string + serviceAccountName: + description: |- + ServiceAccountName is the name of the ServiceAccount to use to run this pod. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + type: string + setHostnameAsFQDN: + description: |- + If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). + In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). + In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. + If a pod does not have FQDN, this has no effect. + Default to false. + type: boolean + shareProcessNamespace: + description: |- + Share a single process namespace between all of the containers in a pod. + When this is set containers will be able to view and signal processes from other containers + in the same pod, and the first process in each container will not be assigned PID 1. + HostPID and ShareProcessNamespace cannot both be set. + Optional: Default to false. + type: boolean + subdomain: + description: |- + If specified, the fully qualified Pod hostname will be "...svc.". + If not specified, the pod will not have a domainname at all. + type: string + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + If this value is nil, the default grace period will be used instead. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: If specified, the pod's tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of pods ought to spread across topology + domains. Scheduler will schedule pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + description: |- + List of volumes that can be mounted by containers belonging to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk + mount on the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk + in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in + the blob storage + type: string + fsType: + default: ext4 + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage account Managed: azure + managed data disk (only in managed availability + set). defaults to shared' + type: string + readOnly: + default: false + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service + mount on the host and bind mount to the pod. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the + host that shares a pod's lifetime + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default + is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers (Beta feature). + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about + the pod that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing the + pod field + properties: + fieldRef: + description: 'Required: Selects a field of + the pod: only annotations, labels, name, + namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path. + Must be utf-8 encoded. The first item of + the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource + that is attached to a kubelet's host machine and then + exposed to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target + worldwide names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to + use for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached + to a kubelet's host machine. This depends on the Flocker + control service being running + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the + specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + image: + description: |- + image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + The volume is resolved at pod startup depending on which PullPolicy value is provided: + + - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + + The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + The volume will be mounted read-only (ro) and non-executable files (noexec). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + default: default + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host + machine + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon + Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume + attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources + secrets, configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume + root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the + configMap data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about + the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name, namespace and uid + are supported.' + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file + to be created. Must not be absolute + or contain the ''..'' path. Must + be utf-8 encoded. The first item + of the relative path must not + start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the + secret data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: quobyte represents a Quobyte mount on the + host that shares a pod's lifetime + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + default: /etc/ceph/keyring + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent + volume attached and mounted on Kubernetes nodes. + properties: + fsType: + default: xfs + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the + ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the + ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL + communication with Gateway, default false + type: boolean + storageMode: + default: ThinProvisioned + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage + Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether the + Secret or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume + attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy + Based Management (SPBM) profile ID associated + with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + required: + - containers + type: object + type: object + type: + default: rw + description: 'Type of service to forward traffic to. Default: `rw`.' + enum: + - rw + - ro + type: string + required: + - cluster + - pgbouncer + type: object + status: + description: |- + Most recently observed status of the Pooler. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + instances: + description: The number of pods trying to be scheduled + format: int32 + type: integer + secrets: + description: The resource version of the config object + properties: + clientCA: + description: The client CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + pgBouncerSecrets: + description: The version of the secrets used by PgBouncer + properties: + authQuery: + description: The auth query secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + serverCA: + description: The server CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + serverTLS: + description: The server TLS secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: publications.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Publication + listKind: PublicationList + plural: publications + singular: publication + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Publication is the Schema for the publications API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PublicationSpec defines the desired state of Publication + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "publisher" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "publisher" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + name: + description: The name of the publication inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Publication parameters part of the `WITH` clause as expected by + PostgreSQL `CREATE PUBLICATION` command + type: object + publicationReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this publication + enum: + - delete + - retain + type: string + target: + description: Target of the publication as expected by PostgreSQL `CREATE + PUBLICATION` command + properties: + allTables: + description: |- + Marks the publication as one that replicates changes for all tables + in the database, including tables created in the future. + Corresponding to `FOR ALL TABLES` in PostgreSQL. + type: boolean + x-kubernetes-validations: + - message: allTables is immutable + rule: self == oldSelf + objects: + description: Just the following schema objects + items: + description: PublicationTargetObject is an object to publish + properties: + table: + description: |- + Specifies a list of tables to add to the publication. Corresponding + to `FOR TABLE` in PostgreSQL. + properties: + columns: + description: The columns to publish + items: + type: string + type: array + name: + description: The table name + type: string + only: + description: Whether to limit to the table only or include + all its descendants + type: boolean + schema: + description: The schema name + type: string + required: + - name + type: object + tablesInSchema: + description: |- + Marks the publication as one that replicates changes for all tables + in the specified list of schemas, including tables created in the + future. Corresponding to `FOR TABLES IN SCHEMA` in PostgreSQL. + type: string + type: object + x-kubernetes-validations: + - message: tablesInSchema and table are mutually exclusive + rule: (has(self.tablesInSchema) && !has(self.table)) || (!has(self.tablesInSchema) + && has(self.table)) + maxItems: 100000 + type: array + x-kubernetes-validations: + - message: specifying a column list when the publication also + publishes tablesInSchema is not supported + rule: '!(self.exists(o, has(o.table) && has(o.table.columns)) + && self.exists(o, has(o.tablesInSchema)))' + type: object + x-kubernetes-validations: + - message: allTables and objects are mutually exclusive + rule: (has(self.allTables) && !has(self.objects)) || (!has(self.allTables) + && has(self.objects)) + required: + - cluster + - dbname + - name + - target + type: object + status: + description: PublicationStatus defines the observed state of Publication + properties: + applied: + description: Applied is true if the publication was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: scheduledbackups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ScheduledBackup + listKind: ScheduledBackupList + plural: scheduledbackups + singular: scheduledbackup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .status.lastScheduleTime + name: Last Backup + type: date + name: v1 + schema: + openAPIV3Schema: + description: ScheduledBackup is the Schema for the scheduledbackups API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ScheduledBackup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + backupOwnerReference: + default: none + description: |- + Indicates which ownerReference should be put inside the created backup resources.
+ - none: no owner reference for created backup objects (same behavior as before the field was introduced)
+ - self: sets the Scheduled backup object as owner of the backup
+ - cluster: set the cluster as owner of the backup
+ enum: + - none + - self + - cluster + type: string + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + immediate: + description: If the first backup has to be immediately start after + creation or not + type: boolean + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + schedule: + description: |- + The schedule does not follow the same format used in Kubernetes CronJobs + as it includes an additional seconds specifier, + see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format + type: string + suspend: + description: If this backup is suspended or not + type: boolean + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + - schedule + type: object + status: + description: |- + Most recently observed status of the ScheduledBackup. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + lastCheckTime: + description: The latest time the schedule + format: date-time + type: string + lastScheduleTime: + description: Information when was the last time that backup was successfully + scheduled. + format: date-time + type: string + nextScheduleTime: + description: Next time we will run a backup + format: date-time + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: subscriptions.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Subscription + listKind: SubscriptionList + plural: subscriptions + singular: subscription + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Subscription is the Schema for the subscriptions API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SubscriptionSpec defines the desired state of Subscription + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "subscriber" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "subscriber" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + externalClusterName: + description: The name of the external cluster with the publication + ("publisher") + type: string + name: + description: The name of the subscription inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Subscription parameters part of the `WITH` clause as expected by + PostgreSQL `CREATE SUBSCRIPTION` command + type: object + publicationDBName: + description: |- + The name of the database containing the publication on the external + cluster. Defaults to the one in the external cluster definition. + type: string + publicationName: + description: |- + The name of the publication inside the PostgreSQL database in the + "publisher" + type: string + subscriptionReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this subscription + enum: + - delete + - retain + type: string + required: + - cluster + - dbname + - externalClusterName + - name + - publicationName + type: object + status: + description: SubscriptionStatus defines the observed state of Subscription + properties: + applied: + description: Applied is true if the subscription was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cnpg-manager +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps/status + - secrets/status + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - pods + - pods/exec + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - pods/status + verbs: + - get +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - get + - patch +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +- apiGroups: + - monitoring.coreos.com + resources: + - podmonitors + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups + - clusters + - databases + - poolers + - publications + - scheduledbackups + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups/status + - databases/status + - publications/status + - scheduledbackups/status + - subscriptions/status + verbs: + - get + - patch + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusterimagecatalogs + - imagecatalogs + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/finalizers + - poolers/finalizers + verbs: + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/status + - poolers/status + verbs: + - get + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - create + - get + - list + - patch + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cnpg-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cnpg-manager +subjects: +- kind: ServiceAccount + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: v1 +data: + queries: | + backends: + query: | + SELECT sa.datname + , sa.usename + , sa.application_name + , states.state + , COALESCE(sa.count, 0) AS total + , COALESCE(sa.max_tx_secs, 0) AS max_tx_duration_seconds + FROM ( VALUES ('active') + , ('idle') + , ('idle in transaction') + , ('idle in transaction (aborted)') + , ('fastpath function call') + , ('disabled') + ) AS states(state) + LEFT JOIN ( + SELECT datname + , state + , usename + , COALESCE(application_name, '') AS application_name + , COUNT(*) + , COALESCE(EXTRACT (EPOCH FROM (max(now() - xact_start))), 0) AS max_tx_secs + FROM pg_catalog.pg_stat_activity + GROUP BY datname, state, usename, application_name + ) sa ON states.state = sa.state + WHERE sa.usename IS NOT NULL + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - usename: + usage: "LABEL" + description: "Name of the user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - state: + usage: "LABEL" + description: "State of the backend" + - total: + usage: "GAUGE" + description: "Number of backends" + - max_tx_duration_seconds: + usage: "GAUGE" + description: "Maximum duration of a transaction in seconds" + + backends_waiting: + query: | + SELECT count(*) AS total + FROM pg_catalog.pg_locks blocked_locks + JOIN pg_catalog.pg_locks blocking_locks + ON blocking_locks.locktype = blocked_locks.locktype + AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database + AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation + AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page + AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple + AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid + AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid + AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid + AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid + AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid + AND blocking_locks.pid != blocked_locks.pid + JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid + WHERE NOT blocked_locks.granted + metrics: + - total: + usage: "GAUGE" + description: "Total number of backends that are currently waiting on other queries" + + pg_database: + query: | + SELECT datname + , pg_catalog.pg_database_size(datname) AS size_bytes + , pg_catalog.age(datfrozenxid) AS xid_age + , pg_catalog.mxid_age(datminmxid) AS mxid_age + FROM pg_catalog.pg_database + WHERE datallowconn + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - size_bytes: + usage: "GAUGE" + description: "Disk space used by the database" + - xid_age: + usage: "GAUGE" + description: "Number of transactions from the frozen XID to the current one" + - mxid_age: + usage: "GAUGE" + description: "Number of multiple transactions (Multixact) from the frozen XID to the current one" + + pg_postmaster: + query: | + SELECT EXTRACT(EPOCH FROM pg_postmaster_start_time) AS start_time + FROM pg_catalog.pg_postmaster_start_time() + metrics: + - start_time: + usage: "GAUGE" + description: "Time at which postgres started (based on epoch)" + + pg_replication: + query: "SELECT CASE WHEN ( + NOT pg_catalog.pg_is_in_recovery() + OR pg_catalog.pg_last_wal_receive_lsn() = pg_catalog.pg_last_wal_replay_lsn()) + THEN 0 + ELSE GREATEST (0, + EXTRACT(EPOCH FROM (now() - pg_catalog.pg_last_xact_replay_timestamp()))) + END AS lag, + pg_catalog.pg_is_in_recovery() AS in_recovery, + EXISTS (TABLE pg_stat_wal_receiver) AS is_wal_receiver_up, + (SELECT count(*) FROM pg_catalog.pg_stat_replication) AS streaming_replicas" + metrics: + - lag: + usage: "GAUGE" + description: "Replication lag behind primary in seconds" + - in_recovery: + usage: "GAUGE" + description: "Whether the instance is in recovery" + - is_wal_receiver_up: + usage: "GAUGE" + description: "Whether the instance wal_receiver is up" + - streaming_replicas: + usage: "GAUGE" + description: "Number of streaming replicas connected to the instance" + + pg_replication_slots: + query: | + SELECT slot_name, + slot_type, + database, + active, + (CASE pg_catalog.pg_is_in_recovery() + WHEN TRUE THEN pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_last_wal_receive_lsn(), restart_lsn) + ELSE pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), restart_lsn) + END) as pg_wal_lsn_diff + FROM pg_catalog.pg_replication_slots + WHERE NOT temporary + metrics: + - slot_name: + usage: "LABEL" + description: "Name of the replication slot" + - slot_type: + usage: "LABEL" + description: "Type of the replication slot" + - database: + usage: "LABEL" + description: "Name of the database" + - active: + usage: "GAUGE" + description: "Flag indicating whether the slot is active" + - pg_wal_lsn_diff: + usage: "GAUGE" + description: "Replication lag in bytes" + + pg_stat_archiver: + query: | + SELECT archived_count + , failed_count + , COALESCE(EXTRACT(EPOCH FROM (now() - last_archived_time)), -1) AS seconds_since_last_archival + , COALESCE(EXTRACT(EPOCH FROM (now() - last_failed_time)), -1) AS seconds_since_last_failure + , COALESCE(EXTRACT(EPOCH FROM last_archived_time), -1) AS last_archived_time + , COALESCE(EXTRACT(EPOCH FROM last_failed_time), -1) AS last_failed_time + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_archived_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_archived_wal_start_lsn + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_failed_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_failed_wal_start_lsn + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_archiver + metrics: + - archived_count: + usage: "COUNTER" + description: "Number of WAL files that have been successfully archived" + - failed_count: + usage: "COUNTER" + description: "Number of failed attempts for archiving WAL files" + - seconds_since_last_archival: + usage: "GAUGE" + description: "Seconds since the last successful archival operation" + - seconds_since_last_failure: + usage: "GAUGE" + description: "Seconds since the last failed archival operation" + - last_archived_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving succeeded" + - last_failed_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving failed" + - last_archived_wal_start_lsn: + usage: "GAUGE" + description: "Archived WAL start LSN" + - last_failed_wal_start_lsn: + usage: "GAUGE" + description: "Last failed WAL LSN" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_bgwriter: + runonserver: "<17.0.0" + query: | + SELECT checkpoints_timed + , checkpoints_req + , checkpoint_write_time + , checkpoint_sync_time + , buffers_checkpoint + , buffers_clean + , maxwritten_clean + , buffers_backend + , buffers_backend_fsync + , buffers_alloc + FROM pg_catalog.pg_stat_bgwriter + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - checkpoint_write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds" + - checkpoint_sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds" + - buffers_checkpoint: + usage: "COUNTER" + description: "Number of buffers written during checkpoints" + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_backend: + usage: "COUNTER" + description: "Number of buffers written directly by a backend" + - buffers_backend_fsync: + usage: "COUNTER" + description: "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + + pg_stat_bgwriter_17: + runonserver: ">=17.0.0" + name: pg_stat_bgwriter + query: | + SELECT buffers_clean + , maxwritten_clean + , buffers_alloc + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_bgwriter + metrics: + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_checkpointer: + runonserver: ">=17.0.0" + query: | + SELECT num_timed AS checkpoints_timed + , num_requested AS checkpoints_req + , restartpoints_timed + , restartpoints_req + , restartpoints_done + , write_time + , sync_time + , buffers_written + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_checkpointer + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - restartpoints_timed: + usage: "COUNTER" + description: "Number of scheduled restartpoints due to timeout or after a failed attempt to perform it" + - restartpoints_req: + usage: "COUNTER" + description: "Number of requested restartpoints that have been performed" + - restartpoints_done: + usage: "COUNTER" + description: "Number of restartpoints that have been performed" + - write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are written to disk, in milliseconds" + - sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are synchronized to disk, in milliseconds" + - buffers_written: + usage: "COUNTER" + description: "Number of buffers written during checkpoints and restartpoints" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_database: + query: | + SELECT datname + , xact_commit + , xact_rollback + , blks_read + , blks_hit + , tup_returned + , tup_fetched + , tup_inserted + , tup_updated + , tup_deleted + , conflicts + , temp_files + , temp_bytes + , deadlocks + , blk_read_time + , blk_write_time + FROM pg_catalog.pg_stat_database + metrics: + - datname: + usage: "LABEL" + description: "Name of this database" + - xact_commit: + usage: "COUNTER" + description: "Number of transactions in this database that have been committed" + - xact_rollback: + usage: "COUNTER" + description: "Number of transactions in this database that have been rolled back" + - blks_read: + usage: "COUNTER" + description: "Number of disk blocks read in this database" + - blks_hit: + usage: "COUNTER" + description: "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)" + - tup_returned: + usage: "COUNTER" + description: "Number of rows returned by queries in this database" + - tup_fetched: + usage: "COUNTER" + description: "Number of rows fetched by queries in this database" + - tup_inserted: + usage: "COUNTER" + description: "Number of rows inserted by queries in this database" + - tup_updated: + usage: "COUNTER" + description: "Number of rows updated by queries in this database" + - tup_deleted: + usage: "COUNTER" + description: "Number of rows deleted by queries in this database" + - conflicts: + usage: "COUNTER" + description: "Number of queries canceled due to conflicts with recovery in this database" + - temp_files: + usage: "COUNTER" + description: "Number of temporary files created by queries in this database" + - temp_bytes: + usage: "COUNTER" + description: "Total amount of data written to temporary files by queries in this database" + - deadlocks: + usage: "COUNTER" + description: "Number of deadlocks detected in this database" + - blk_read_time: + usage: "COUNTER" + description: "Time spent reading data file blocks by backends in this database, in milliseconds" + - blk_write_time: + usage: "COUNTER" + description: "Time spent writing data file blocks by backends in this database, in milliseconds" + + pg_stat_replication: + primary: true + query: | + SELECT usename + , COALESCE(application_name, '') AS application_name + , COALESCE(client_addr::text, '') AS client_addr + , COALESCE(client_port::text, '') AS client_port + , EXTRACT(EPOCH FROM backend_start) AS backend_start + , COALESCE(pg_catalog.age(backend_xmin), 0) AS backend_xmin_age + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), sent_lsn) AS sent_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), write_lsn) AS write_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), flush_lsn) AS flush_diff_bytes + , COALESCE(pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), replay_lsn),0) AS replay_diff_bytes + , COALESCE((EXTRACT(EPOCH FROM write_lag)),0)::float AS write_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM flush_lag)),0)::float AS flush_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM replay_lag)),0)::float AS replay_lag_seconds + FROM pg_catalog.pg_stat_replication + metrics: + - usename: + usage: "LABEL" + description: "Name of the replication user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - client_addr: + usage: "LABEL" + description: "Client IP address" + - client_port: + usage: "LABEL" + description: "Client TCP port" + - backend_start: + usage: "COUNTER" + description: "Time when this process was started" + - backend_xmin_age: + usage: "COUNTER" + description: "The age of this standby's xmin horizon" + - sent_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location sent on this connection" + - write_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location written to disk by this standby server" + - flush_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location flushed to disk by this standby server" + - replay_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location replayed into the database on this standby server" + - write_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it" + - flush_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it" + - replay_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it" + + pg_settings: + query: | + SELECT name, + CASE setting WHEN 'on' THEN '1' WHEN 'off' THEN '0' ELSE setting END AS setting + FROM pg_catalog.pg_settings + WHERE vartype IN ('integer', 'real', 'bool') + ORDER BY 1 + metrics: + - name: + usage: "LABEL" + description: "Name of the setting" + - setting: + usage: "GAUGE" + description: "Setting value" +kind: ConfigMap +metadata: + labels: + cnpg.io/reload: "" + name: cnpg-default-monitoring + namespace: cnpg-system +--- +apiVersion: v1 +kind: Service +metadata: + name: cnpg-webhook-service + namespace: cnpg-system +spec: + ports: + - port: 443 + targetPort: 9443 + selector: + app.kubernetes.io/name: cloudnative-pg +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-controller-manager + namespace: cnpg-system +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: cloudnative-pg + template: + metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + spec: + containers: + - args: + - controller + - --leader-elect + - --max-concurrent-reconciles=10 + - --config-map-name=cnpg-controller-manager-config + - --secret-name=cnpg-controller-manager-config + - --webhook-port=9443 + command: + - /manager + env: + - name: OPERATOR_IMAGE_NAME + value: ghcr.io/cloudnative-pg/cloudnative-pg:1.25.0-rc1 + - name: OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MONITORING_QUERIES_CONFIGMAP + value: cnpg-default-monitoring + image: ghcr.io/cloudnative-pg/cloudnative-pg:1.25.0-rc1 + livenessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + name: manager + ports: + - containerPort: 8080 + name: metrics + protocol: TCP + - containerPort: 9443 + name: webhook-server + protocol: TCP + readinessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + resources: + limits: + cpu: 100m + memory: 200Mi + requests: + cpu: 100m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 10001 + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /controller + name: scratch-data + - mountPath: /run/secrets/cnpg.io/webhook + name: webhook-certificates + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: cnpg-manager + terminationGracePeriodSeconds: 10 + volumes: + - emptyDir: {} + name: scratch-data + - name: webhook-certificates + secret: + defaultMode: 420 + optional: true + secretName: cnpg-webhook-cert +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: cnpg-mutating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: mbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: mcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: mscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: cnpg-validating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: vbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: vcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-pooler + failurePolicy: Fail + name: vpooler.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - poolers + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: vscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None From 50a4e496a528d635ece11aa41476cc1b6843e338 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Mon, 9 Dec 2024 14:12:25 +0100 Subject: [PATCH 228/836] chore: update the release script to better handle RC releases (#6301) Signed-off-by: Marco Nenciarini --- hack/release.sh | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/hack/release.sh b/hack/release.sh index 0c0fc0596b..e82aeabc73 100755 --- a/hack/release.sh +++ b/hack/release.sh @@ -96,17 +96,19 @@ KUSTOMIZE="${REPO_ROOT}/bin/kustomize" mkdir -p releases/ release_manifest="releases/cnpg-${release_version}.yaml" +# shellcheck disable=SC2001 +release_branch="release-$(sed -e 's/^\([0-9]\+\.[0-9]\+\)\..*$/\1/' <<< "$release_version" )" # Perform automated substitutions of the version string in the source code sed -i -e "/Version *= *.*/Is/\".*\"/\"${release_version}\"/" \ -e "/DefaultOperatorImageName *= *.*/Is/\"\(.*\):.*\"/\"\1:${release_version}\"/" \ pkg/versions/versions.go -sed -i -e "s@release-[0-9.]*/releases/cnpg-[0-9.]*.yaml@${branch}/releases/cnpg-${release_version}.yaml@g" \ - -e "s@artifacts/release-[0-9.]*/@artifacts/${branch}/@g" \ +sed -i -e "s@\(release-[0-9.]\+\|main\)/releases/cnpg-[0-9.]\+\(-rc.*\)\?.yaml@${branch}/releases/cnpg-${release_version}.yaml@g" \ + -e "s@artifacts/release-[0-9.]*/@artifacts/${release_branch}/@g" \ docs/src/installation_upgrade.md -sed -i -e "s@1\.[0-9]\+\.[0-9]\+@${release_version}@g" docs/src/kubectl-plugin.md +sed -i -e "s@1\.[0-9]\+\.[0-9]\+\(-[a-z0-9]\+\)\?@${release_version}@g" docs/src/kubectl-plugin.md CONFIG_TMP_DIR=$(mktemp -d) cp -r config/* "${CONFIG_TMP_DIR}" @@ -123,6 +125,7 @@ git checkout -b "release/v${release_version}" git add \ pkg/versions/versions.go \ docs/src/installation_upgrade.md \ + docs/src/kubectl-plugin.md \ "${release_manifest}" git commit -sm "Version tag to ${release_version}" git push origin -u "release/v${release_version}" From ec06335f708c7365ebe1275b80609321a670bb24 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Mon, 9 Dec 2024 15:21:06 +0100 Subject: [PATCH 229/836] fix: panic recovering from an external server with no backup configuration (#6300) The instance manager panicked when recovering from an external server with no backup configuration. This patch fixes that and prevents such a configuration from being applied using the validation webhook. Closes: #6295 Signed-off-by: Leonardo Cecchi Signed-off-by: Armando Ruocco Co-authored-by: Armando Ruocco --- api/v1/cluster_webhook.go | 16 ++++++++- api/v1/cluster_webhook_test.go | 55 ++++++++++++++++++++++++++---- pkg/management/postgres/restore.go | 5 +++ 3 files changed, 69 insertions(+), 7 deletions(-) diff --git a/api/v1/cluster_webhook.go b/api/v1/cluster_webhook.go index 0d6da62c95..eec7a01f68 100644 --- a/api/v1/cluster_webhook.go +++ b/api/v1/cluster_webhook.go @@ -866,7 +866,9 @@ func (r *Cluster) validateBootstrapRecoverySource() field.ErrorList { return result } - _, found := r.ExternalCluster(r.Spec.Bootstrap.Recovery.Source) + externalCluster, found := r.ExternalCluster(r.Spec.Bootstrap.Recovery.Source) + + // Ensure the existence of the external cluster if !found { result = append( result, @@ -876,6 +878,18 @@ func (r *Cluster) validateBootstrapRecoverySource() field.ErrorList { fmt.Sprintf("External cluster %v not found", r.Spec.Bootstrap.Recovery.Source))) } + // Ensure the external cluster definition has enough information + // to be used to recover a data directory + if externalCluster.BarmanObjectStore == nil && externalCluster.PluginConfiguration == nil { + result = append( + result, + field.Invalid( + field.NewPath("spec", "bootstrap", "recovery", "source"), + r.Spec.Bootstrap.Recovery.Source, + fmt.Sprintf("External cluster %v cannot be used for recovery: "+ + "both Barman and CNPG-i plugin configurations are missing", r.Spec.Bootstrap.Recovery.Source))) + } + return result } diff --git a/api/v1/cluster_webhook_test.go b/api/v1/cluster_webhook_test.go index cdbf5585c2..bd08a26511 100644 --- a/api/v1/cluster_webhook_test.go +++ b/api/v1/cluster_webhook_test.go @@ -23,6 +23,7 @@ import ( "strings" "time" + "github.com/cloudnative-pg/barman-cloud/pkg/api" "github.com/cloudnative-pg/machinery/pkg/image/reference" pgversion "github.com/cloudnative-pg/machinery/pkg/postgres/version" storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" @@ -2328,7 +2329,49 @@ var _ = Describe("bootstrap recovery validation", func() { Expect(result).To(BeEmpty()) }) - It("does not complain when bootstrap recovery source matches one of the names of external clusters", func() { + Context("does not complain when bootstrap recovery source matches one of the names of external clusters", func() { + When("using a barman object store configuration", func() { + recoveryCluster := &Cluster{ + Spec: ClusterSpec{ + Bootstrap: &BootstrapConfiguration{ + Recovery: &BootstrapRecovery{ + Source: "test", + }, + }, + ExternalClusters: []ExternalCluster{ + { + Name: "test", + BarmanObjectStore: &api.BarmanObjectStoreConfiguration{}, + }, + }, + }, + } + errorsList := recoveryCluster.validateBootstrapRecoverySource() + Expect(errorsList).To(BeEmpty()) + }) + + When("using a plugin configuration", func() { + recoveryCluster := &Cluster{ + Spec: ClusterSpec{ + Bootstrap: &BootstrapConfiguration{ + Recovery: &BootstrapRecovery{ + Source: "test", + }, + }, + ExternalClusters: []ExternalCluster{ + { + Name: "test", + PluginConfiguration: &PluginConfiguration{}, + }, + }, + }, + } + errorsList := recoveryCluster.validateBootstrapRecoverySource() + Expect(errorsList).To(BeEmpty()) + }) + }) + + It("complains when bootstrap recovery source does not match one of the names of external clusters", func() { recoveryCluster := &Cluster{ Spec: ClusterSpec{ Bootstrap: &BootstrapConfiguration{ @@ -2338,16 +2381,16 @@ var _ = Describe("bootstrap recovery validation", func() { }, ExternalClusters: []ExternalCluster{ { - Name: "test", + Name: "another-test", }, }, }, } errorsList := recoveryCluster.validateBootstrapRecoverySource() - Expect(errorsList).To(BeEmpty()) + Expect(errorsList).ToNot(BeEmpty()) }) - It("complains when bootstrap recovery source does not match one of the names of external clusters", func() { + It("complains when bootstrap recovery source have no BarmanObjectStore nor plugin configuration", func() { recoveryCluster := &Cluster{ Spec: ClusterSpec{ Bootstrap: &BootstrapConfiguration{ @@ -2357,13 +2400,13 @@ var _ = Describe("bootstrap recovery validation", func() { }, ExternalClusters: []ExternalCluster{ { - Name: "another-test", + Name: "test", }, }, }, } errorsList := recoveryCluster.validateBootstrapRecoverySource() - Expect(errorsList).ToNot(BeEmpty()) + Expect(errorsList).To(HaveLen(1)) }) }) diff --git a/pkg/management/postgres/restore.go b/pkg/management/postgres/restore.go index 2cdad8c8ea..979ed57f53 100644 --- a/pkg/management/postgres/restore.go +++ b/pkg/management/postgres/restore.go @@ -511,6 +511,11 @@ func (info InitInfo) loadBackupObjectFromExternalCluster( if !found { return nil, nil, fmt.Errorf("missing external cluster: %v", sourceName) } + + if server.BarmanObjectStore == nil { + return nil, nil, fmt.Errorf("missing barman object store configuration for source: %v", sourceName) + } + serverName := server.GetServerName() env, err := barmanCredentials.EnvSetRestoreCloudCredentials( From bd6e545aa5c7833154de259c142ccc9c0203e6c9 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Wed, 11 Dec 2024 14:01:43 +0100 Subject: [PATCH 230/836] fix: deadlock when a plugin is used multiple times (#6309) Now the operator requests one connection per used plugin, even if the same plugin has been requested multiple times. Fixes a deadlock arising when the same plugin is used multiple times. The operator was acquiring multiple connections to the plugin, and could get stuck without releasing the ones it had already taken in case of high concurrency. Closes: #6310 --------- Signed-off-by: Leonardo Cecchi Signed-off-by: Armando Ruocco Co-authored-by: Armando Ruocco --- internal/cnpi/plugin/client/client.go | 11 ++++++++++- internal/cnpi/plugin/repository/connection.go | 19 +++++++++++++++++-- internal/cnpi/plugin/repository/setup.go | 10 ++++++++++ internal/controller/cluster_controller.go | 6 +++++- 4 files changed, 42 insertions(+), 4 deletions(-) diff --git a/internal/cnpi/plugin/client/client.go b/internal/cnpi/plugin/client/client.go index 9d62c1a7a7..e13515668c 100644 --- a/internal/cnpi/plugin/client/client.go +++ b/internal/cnpi/plugin/client/client.go @@ -20,6 +20,7 @@ import ( "context" "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/stringset" "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/connection" "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository" @@ -83,7 +84,15 @@ func WithPlugins(ctx context.Context, repository repository.Interface, names ... result := &data{ repository: repository, } - if err := result.load(ctx, names...); err != nil { + + // The following ensures that each plugin is loaded just one + // time, even when the same plugin has been requested multiple + // times. + loadingPlugins := stringset.From(names) + uniqueSortedPluginName := loadingPlugins.ToSortedList() + + if err := result.load(ctx, uniqueSortedPluginName...); err != nil { + result.Close(ctx) return nil, err } diff --git a/internal/cnpi/plugin/repository/connection.go b/internal/cnpi/plugin/repository/connection.go index 3a563ac6a5..fb8e478c70 100644 --- a/internal/cnpi/plugin/repository/connection.go +++ b/internal/cnpi/plugin/repository/connection.go @@ -26,6 +26,10 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/connection" ) +// maxConnectionAttempts is the maximum number of connections attempts to a +// plugin. maxConnectionAttempts should be higher or equal to maxPoolSize +const maxConnectionAttempts = 5 + type releasingConnection struct { connection.Interface closer func() error @@ -51,7 +55,7 @@ func (r *data) GetConnection(ctx context.Context, name string) (connection.Inter var resource *puddle.Resource[connection.Interface] var err error - for i := 0; i < maxPoolSize; i++ { + for i := 0; i < maxConnectionAttempts; i++ { contextLogger.Trace("try getting connection") resource, err = pool.Acquire(ctx) if err != nil { @@ -60,7 +64,10 @@ func (r *data) GetConnection(ctx context.Context, name string) (connection.Inter err = resource.Value().Ping(ctx) if err != nil { - contextLogger.Debug("Detected plugin connection error, closing the connection and trying again") + contextLogger.Info( + "Detected stale/broken plugin connection, closing and trying again", + "pluginName", name, + "err", err) resource.Destroy() } else { break @@ -71,9 +78,17 @@ func (r *data) GetConnection(ctx context.Context, name string) (connection.Inter return nil, fmt.Errorf("while getting plugin connection: %w", err) } + contextLogger.Trace( + "Acquired logical plugin connection", + "name", name, + ) return &releasingConnection{ Interface: resource.Value(), closer: func() error { + contextLogger.Trace( + "Released logical plugin connection", + "name", name, + ) // When the client has done its job with a plugin connection, it // will be returned to the pool resource.Release() diff --git a/internal/cnpi/plugin/repository/setup.go b/internal/cnpi/plugin/repository/setup.go index e43b5f1091..ca06824075 100644 --- a/internal/cnpi/plugin/repository/setup.go +++ b/internal/cnpi/plugin/repository/setup.go @@ -60,6 +60,8 @@ type data struct { pluginConnectionPool map[string]*puddle.Pool[connection.Interface] } +// maxPoolSize is the maximum number of connections in a plugin's connection +// pool const maxPoolSize = 5 func (r *data) setPluginProtocol(name string, protocol connection.Protocol) error { @@ -92,6 +94,8 @@ func (r *data) setPluginProtocol(name string, protocol connection.Protocol) erro WithValues("pluginName", name) ctx = log.IntoContext(ctx, constructorLogger) + constructorLogger.Trace("Acquired physical plugin connection") + if handler, err = protocol.Dial(ctx); err != nil { constructorLogger.Error(err, "Got error while connecting to plugin") return nil, err @@ -101,6 +105,12 @@ func (r *data) setPluginProtocol(name string, protocol connection.Protocol) erro } destructor := func(res connection.Interface) { + constructorLogger := log. + FromContext(context.Background()). + WithName("setPluginProtocol"). + WithValues("pluginName", name) + constructorLogger.Trace("Released physical plugin connection") + err := res.Close() if err != nil { destructorLogger := log.FromContext(context.Background()). diff --git a/internal/controller/cluster_controller.go b/internal/controller/cluster_controller.go index fb2705f093..6194505985 100644 --- a/internal/controller/cluster_controller.go +++ b/internal/controller/cluster_controller.go @@ -177,7 +177,11 @@ func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct // Load the plugins required to bootstrap and reconcile this cluster enabledPluginNames := cluster.Spec.Plugins.GetEnabledPluginNames() enabledPluginNames = append(enabledPluginNames, cluster.Spec.ExternalClusters.GetEnabledPluginNames()...) - pluginClient, err := cnpgiClient.WithPlugins(ctx, r.Plugins, enabledPluginNames...) + + pluginLoadingContext, cancelPluginLoading := context.WithTimeout(ctx, 5*time.Second) + defer cancelPluginLoading() + + pluginClient, err := cnpgiClient.WithPlugins(pluginLoadingContext, r.Plugins, enabledPluginNames...) if err != nil { var errUnknownPlugin *repository.ErrUnknownPlugin if errors.As(err, &errUnknownPlugin) { From da2d0341a4a47a4e7e40950acde3ff501943b79d Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Wed, 11 Dec 2024 14:32:37 +0100 Subject: [PATCH 231/836] chore(refactor): isolate plugin loading function (#6312) Signed-off-by: Leonardo Cecchi --- internal/cnpi/plugin/client/client.go | 26 +++++++++---------- internal/cnpi/plugin/repository/connection.go | 2 +- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/internal/cnpi/plugin/client/client.go b/internal/cnpi/plugin/client/client.go index e13515668c..100a0e027b 100644 --- a/internal/cnpi/plugin/client/client.go +++ b/internal/cnpi/plugin/client/client.go @@ -43,18 +43,6 @@ func (data *data) getPlugin(pluginName string) (connection.Interface, error) { return nil, ErrPluginNotLoaded } -func (data *data) load(ctx context.Context, names ...string) error { - for _, name := range names { - pluginData, err := data.repository.GetConnection(ctx, name) - if err != nil { - return err - } - - data.plugins = append(data.plugins, pluginData) - } - return nil -} - func (data *data) MetadataList() []connection.Metadata { result := make([]connection.Metadata, len(data.plugins)) for i := range data.plugins { @@ -85,13 +73,25 @@ func WithPlugins(ctx context.Context, repository repository.Interface, names ... repository: repository, } + load := func(names ...string) error { + for _, name := range names { + pluginData, err := result.repository.GetConnection(ctx, name) + if err != nil { + return err + } + + result.plugins = append(result.plugins, pluginData) + } + return nil + } + // The following ensures that each plugin is loaded just one // time, even when the same plugin has been requested multiple // times. loadingPlugins := stringset.From(names) uniqueSortedPluginName := loadingPlugins.ToSortedList() - if err := result.load(ctx, uniqueSortedPluginName...); err != nil { + if err := load(uniqueSortedPluginName...); err != nil { result.Close(ctx) return nil, err } diff --git a/internal/cnpi/plugin/repository/connection.go b/internal/cnpi/plugin/repository/connection.go index fb8e478c70..a71586f530 100644 --- a/internal/cnpi/plugin/repository/connection.go +++ b/internal/cnpi/plugin/repository/connection.go @@ -64,7 +64,7 @@ func (r *data) GetConnection(ctx context.Context, name string) (connection.Inter err = resource.Value().Ping(ctx) if err != nil { - contextLogger.Info( + contextLogger.Warning( "Detected stale/broken plugin connection, closing and trying again", "pluginName", name, "err", err) From e6cdceaeff438bf1892d6a504d268526bb0befda Mon Sep 17 00:00:00 2001 From: Jonathan Battiato Date: Thu, 12 Dec 2024 10:41:44 +0100 Subject: [PATCH 232/836] chore(olm): improve scorecard test implementing suggestions (#6106) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch adds the missing status field in the `olm-samples` as reported in the "Suggestions" section from olm-scorecard tests. Closes #5710 Signed-off-by: Jonathan Battiato Signed-off-by: Jonathan Gonzalez V. Signed-off-by: Niccolò Fei Co-authored-by: Jonathan Gonzalez V. Co-authored-by: Niccolò Fei --- .../cloudnative-pg.clusterserviceversion.yaml | 21 +++++++++++++++++++ config/olm-samples/postgresql_v1_backup.yaml | 2 ++ config/olm-samples/postgresql_v1_cluster.yaml | 2 ++ .../postgresql_v1_clusterimagecatalog.yaml | 1 + .../olm-samples/postgresql_v1_database.yaml | 2 ++ .../postgresql_v1_imagecatalog.yaml | 1 + config/olm-samples/postgresql_v1_pooler.yaml | 2 ++ .../postgresql_v1_publication.yaml | 2 ++ .../postgresql_v1_scheduledbackup.yaml | 2 ++ .../postgresql_v1_subscription.yaml | 2 ++ 10 files changed, 37 insertions(+) diff --git a/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml b/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml index fc286e39e7..b78e7a927d 100644 --- a/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml +++ b/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml @@ -894,6 +894,13 @@ spec: x-descriptors: - 'urn:alm:descriptor:com.tectonic.ui:text' - 'urn:alm:descriptor:com.tectonic.ui:advanced' + statusDescriptors: + - path: applied + displayName: Applied + description: Applied is true if the database was reconciled correctly + - path: message + displayName: Message + description: Message is the reconciliation output message - kind: Publication name: publications.postgresql.cnpg.io displayName: Postgres Publication @@ -919,6 +926,13 @@ spec: - path: publicationReclaimPolicy displayName: Publication reclaim policy description: Specifies the action to take for the publication inside PostgreSQL when the associated object in Kubernetes is deleted. Options are to either delete the database or retain it for future management. + statusDescriptors: + - path: applied + displayName: Applied + description: Applied is true if the publication was reconciled correctly + - path: message + displayName: Message + description: Message is the reconciliation output message - kind: Subscription name: subscriptions.postgresql.cnpg.io displayName: Postgres Subscription @@ -950,3 +964,10 @@ spec: - path: subscriptionReclaimPolicy displayName: Subscription reclaim policy description: Specifies the action to take for the subscription inside PostgreSQL when the associated object in Kubernetes is deleted. Options are to either delete the database or retain it for future management. + statusDescriptors: + - path: applied + displayName: Applied + description: Applied is true if the subscription was reconciled correctly + - path: message + displayName: Message + description: Message is the reconciliation output message diff --git a/config/olm-samples/postgresql_v1_backup.yaml b/config/olm-samples/postgresql_v1_backup.yaml index 330ede8589..40147cec70 100644 --- a/config/olm-samples/postgresql_v1_backup.yaml +++ b/config/olm-samples/postgresql_v1_backup.yaml @@ -5,3 +5,5 @@ metadata: spec: cluster: name: cluster-sample +status: + serverName: diff --git a/config/olm-samples/postgresql_v1_cluster.yaml b/config/olm-samples/postgresql_v1_cluster.yaml index 0a8204977b..40f324f07b 100644 --- a/config/olm-samples/postgresql_v1_cluster.yaml +++ b/config/olm-samples/postgresql_v1_cluster.yaml @@ -19,3 +19,5 @@ spec: walStorage: size: 1Gi logLevel: info +status: + instances: 3 diff --git a/config/olm-samples/postgresql_v1_clusterimagecatalog.yaml b/config/olm-samples/postgresql_v1_clusterimagecatalog.yaml index 20e725876d..3ad7041ea2 100644 --- a/config/olm-samples/postgresql_v1_clusterimagecatalog.yaml +++ b/config/olm-samples/postgresql_v1_clusterimagecatalog.yaml @@ -1,3 +1,4 @@ +apiVersion: postgresql.cnpg.io/v1 kind: ClusterImageCatalog metadata: name: postgresql diff --git a/config/olm-samples/postgresql_v1_database.yaml b/config/olm-samples/postgresql_v1_database.yaml index b4d3d56b4d..748cb1ee7a 100644 --- a/config/olm-samples/postgresql_v1_database.yaml +++ b/config/olm-samples/postgresql_v1_database.yaml @@ -7,3 +7,5 @@ spec: owner: app cluster: name: cluster-sample +status: + applied: false diff --git a/config/olm-samples/postgresql_v1_imagecatalog.yaml b/config/olm-samples/postgresql_v1_imagecatalog.yaml index faf6d60a42..f141f90691 100644 --- a/config/olm-samples/postgresql_v1_imagecatalog.yaml +++ b/config/olm-samples/postgresql_v1_imagecatalog.yaml @@ -1,3 +1,4 @@ +apiVersion: postgresql.cnpg.io/v1 kind: ImageCatalog metadata: name: postgresql diff --git a/config/olm-samples/postgresql_v1_pooler.yaml b/config/olm-samples/postgresql_v1_pooler.yaml index 0400ed54c2..1ba730bb17 100644 --- a/config/olm-samples/postgresql_v1_pooler.yaml +++ b/config/olm-samples/postgresql_v1_pooler.yaml @@ -9,3 +9,5 @@ spec: type: rw pgbouncer: poolMode: session +status: + instances: 1 diff --git a/config/olm-samples/postgresql_v1_publication.yaml b/config/olm-samples/postgresql_v1_publication.yaml index 598c02a2bb..89a54cbac6 100644 --- a/config/olm-samples/postgresql_v1_publication.yaml +++ b/config/olm-samples/postgresql_v1_publication.yaml @@ -9,3 +9,5 @@ spec: name: cluster-sample target: allTables: true +status: + applied: false diff --git a/config/olm-samples/postgresql_v1_scheduledbackup.yaml b/config/olm-samples/postgresql_v1_scheduledbackup.yaml index bd2350fddc..6e61b15d9f 100644 --- a/config/olm-samples/postgresql_v1_scheduledbackup.yaml +++ b/config/olm-samples/postgresql_v1_scheduledbackup.yaml @@ -6,3 +6,5 @@ spec: schedule: "0 0 0 * * *" cluster: name: cluster-sample +status: + lastCheckTime: diff --git a/config/olm-samples/postgresql_v1_subscription.yaml b/config/olm-samples/postgresql_v1_subscription.yaml index ecc016619b..6047977c3e 100644 --- a/config/olm-samples/postgresql_v1_subscription.yaml +++ b/config/olm-samples/postgresql_v1_subscription.yaml @@ -9,3 +9,5 @@ spec: cluster: name: cluster-sample-dest externalClusterName: cluster-sample +status: + applied: false From 8494165d8bd5ffce0e4d8cd4d4343b90d59621a2 Mon Sep 17 00:00:00 2001 From: Jaime Silvela Date: Thu, 12 Dec 2024 15:04:57 +0100 Subject: [PATCH 233/836] tests: add unit tests to publication and subscription controllers (#6284) Close #6267 Signed-off-by: Jaime Silvela Signed-off-by: wolfox Signed-off-by: Armando Ruocco Signed-off-by: Marco Nenciarini Co-authored-by: wolfox Co-authored-by: Armando Ruocco Co-authored-by: Marco Nenciarini --- .../controller/database_controller.go | 9 +- .../controller/database_controller_test.go | 373 +++++++--------- .../controller/publication_controller.go | 7 +- .../controller/publication_controller_sql.go | 2 +- .../controller/publication_controller_test.go | 368 ++++++++++++++++ .../controller/subscription_controller.go | 12 +- .../controller/subscription_controller_sql.go | 2 +- .../subscription_controller_test.go | 397 ++++++++++++++++++ 8 files changed, 943 insertions(+), 227 deletions(-) create mode 100644 internal/management/controller/publication_controller_test.go create mode 100644 internal/management/controller/subscription_controller_test.go diff --git a/internal/management/controller/database_controller.go b/internal/management/controller/database_controller.go index 7fdbf5ba22..ad9ed1b14e 100644 --- a/internal/management/controller/database_controller.go +++ b/internal/management/controller/database_controller.go @@ -18,6 +18,7 @@ package controller import ( "context" + "database/sql" "fmt" "time" @@ -39,6 +40,7 @@ type DatabaseReconciler struct { instance instanceInterface finalizerReconciler *finalizerReconciler[*apiv1.Database] + getSuperUserDB func() (*sql.DB, error) } // databaseReconciliationInterval is the time between the @@ -143,7 +145,7 @@ func (r *DatabaseReconciler) evaluateDropDatabase(ctx context.Context, db *apiv1 if db.Spec.ReclaimPolicy != apiv1.DatabaseReclaimDelete { return nil } - sqlDB, err := r.instance.GetSuperUserDB() + sqlDB, err := r.getSuperUserDB() if err != nil { return fmt.Errorf("while getting DB connection: %w", err) } @@ -159,6 +161,9 @@ func NewDatabaseReconciler( dr := &DatabaseReconciler{ Client: mgr.GetClient(), instance: instance, + getSuperUserDB: func() (*sql.DB, error) { + return instance.GetSuperUserDB() + }, } dr.finalizerReconciler = newFinalizerReconciler( @@ -184,7 +189,7 @@ func (r *DatabaseReconciler) GetCluster(ctx context.Context) (*apiv1.Cluster, er } func (r *DatabaseReconciler) reconcileDatabase(ctx context.Context, obj *apiv1.Database) error { - db, err := r.instance.GetSuperUserDB() + db, err := r.getSuperUserDB() if err != nil { return fmt.Errorf("while connecting to the database %q: %w", obj.Spec.Name, err) } diff --git a/internal/management/controller/database_controller_test.go b/internal/management/controller/database_controller_test.go index d41d7eab57..be9c8487cf 100644 --- a/internal/management/controller/database_controller_test.go +++ b/internal/management/controller/database_controller_test.go @@ -17,6 +17,7 @@ limitations under the License. package controller import ( + "context" "database/sql" "fmt" @@ -40,14 +41,9 @@ import ( . "github.com/onsi/gomega" ) -type fakeInstanceData struct { - *postgres.Instance - db *sql.DB -} - -func (f *fakeInstanceData) GetSuperUserDB() (*sql.DB, error) { - return f.db, nil -} +const databaseDetectionQuery = `SELECT count(*) + FROM pg_database + WHERE datname = $1` var _ = Describe("Managed Database status", func() { var ( @@ -95,11 +91,6 @@ var _ = Describe("Managed Database status", func() { WithPodName("cluster-example-1"). WithClusterName("cluster-example") - f := fakeInstanceData{ - Instance: pgInstance, - db: db, - } - fakeClient = fake.NewClientBuilder().WithScheme(schemeBuilder.BuildWithAllKnownScheme()). WithObjects(cluster, database). WithStatusSubresource(&apiv1.Cluster{}, &apiv1.Database{}). @@ -108,7 +99,10 @@ var _ = Describe("Managed Database status", func() { r = &DatabaseReconciler{ Client: fakeClient, Scheme: schemeBuilder.BuildWithAllKnownScheme(), - instance: &f, + instance: pgInstance, + getSuperUserDB: func() (*sql.DB, error) { + return db, nil + }, } r.finalizerReconciler = newFinalizerReconciler( fakeClient, @@ -122,193 +116,160 @@ var _ = Describe("Managed Database status", func() { }) It("adds finalizer and sets status ready on success", func(ctx SpecContext) { - Expect(database.Finalizers).To(BeEmpty()) - - // Mocking DetectDB expectedValue := sqlmock.NewRows([]string{""}).AddRow("0") - dbMock.ExpectQuery(`SELECT count(*) - FROM pg_database - WHERE datname = $1`).WithArgs(database.Spec.Name).WillReturnRows(expectedValue) + dbMock.ExpectQuery(databaseDetectionQuery).WithArgs(database.Spec.Name). + WillReturnRows(expectedValue) - // Mocking CreateDB expectedCreate := sqlmock.NewResult(0, 1) expectedQuery := fmt.Sprintf( "CREATE DATABASE %s OWNER %s", - pgx.Identifier{database.Spec.Name}.Sanitize(), pgx.Identifier{database.Spec.Owner}.Sanitize(), + pgx.Identifier{database.Spec.Name}.Sanitize(), + pgx.Identifier{database.Spec.Owner}.Sanitize(), ) dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedCreate) - // Reconcile and get the updated object - _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ - Namespace: database.Namespace, - Name: database.Name, - }}) - Expect(err).ToNot(HaveOccurred()) - - var updatedDatabase apiv1.Database - err = fakeClient.Get(ctx, client.ObjectKey{ - Namespace: database.Namespace, - Name: database.Name, - }, &updatedDatabase) + err := reconcileDatabase(ctx, fakeClient, r, database) Expect(err).ToNot(HaveOccurred()) - Expect(updatedDatabase.Status.Applied).Should(HaveValue(BeTrue())) - Expect(updatedDatabase.Status.Message).Should(BeEmpty()) - Expect(updatedDatabase.Finalizers).NotTo(BeEmpty()) + Expect(database.Status.Applied).Should(HaveValue(BeTrue())) + Expect(database.GetStatusMessage()).Should(BeEmpty()) + Expect(database.GetFinalizers()).NotTo(BeEmpty()) }) It("database object inherits error after patching", func(ctx SpecContext) { - // Mocking DetectDB + expectedError := fmt.Errorf("no permission") expectedValue := sqlmock.NewRows([]string{""}).AddRow("1") - dbMock.ExpectQuery(`SELECT count(*) - FROM pg_database - WHERE datname = $1`).WithArgs(database.Spec.Name).WillReturnRows(expectedValue) + dbMock.ExpectQuery(databaseDetectionQuery).WithArgs(database.Spec.Name). + WillReturnRows(expectedValue) - // Mocking Alter Database - expectedError := fmt.Errorf("no permission") expectedQuery := fmt.Sprintf("ALTER DATABASE %s OWNER TO %s", pgx.Identifier{database.Spec.Name}.Sanitize(), pgx.Identifier{database.Spec.Owner}.Sanitize(), ) dbMock.ExpectExec(expectedQuery).WillReturnError(expectedError) - // Reconcile and get the updated object - _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ - Namespace: database.Namespace, - Name: database.Name, - }}) - Expect(err).ToNot(HaveOccurred()) - - var updatedDatabase apiv1.Database - err = fakeClient.Get(ctx, client.ObjectKey{ - Namespace: database.Namespace, - Name: database.Name, - }, &updatedDatabase) + err := reconcileDatabase(ctx, fakeClient, r, database) Expect(err).ToNot(HaveOccurred()) - Expect(updatedDatabase.Status.Applied).Should(HaveValue(BeFalse())) - Expect(updatedDatabase.Status.Message).Should(ContainSubstring(expectedError.Error())) + Expect(database.Status.Applied).Should(HaveValue(BeFalse())) + Expect(database.GetStatusMessage()).Should(ContainSubstring(expectedError.Error())) }) - It("on deletion it removes finalizers and drops DB", func(ctx SpecContext) { - Expect(database.Finalizers).To(BeEmpty()) - - // Mocking DetectDB - expectedValue := sqlmock.NewRows([]string{""}).AddRow("0") - dbMock.ExpectQuery(`SELECT count(*) - FROM pg_database - WHERE datname = $1`).WithArgs(database.Spec.Name).WillReturnRows(expectedValue) - - // Mocking CreateDB - expectedCreate := sqlmock.NewResult(0, 1) - expectedQuery := fmt.Sprintf( - "CREATE DATABASE %s OWNER %s", - pgx.Identifier{database.Spec.Name}.Sanitize(), pgx.Identifier{database.Spec.Owner}.Sanitize(), - ) - dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedCreate) - - // Reconcile and get the updated object - _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ - Namespace: database.Namespace, - Name: database.Name, - }}) - Expect(err).ToNot(HaveOccurred()) - - var updatedDatabase apiv1.Database - err = fakeClient.Get(ctx, client.ObjectKey{ - Namespace: database.Namespace, - Name: database.Name, - }, &updatedDatabase) - Expect(err).ToNot(HaveOccurred()) - - Expect(updatedDatabase.Status.Applied).Should(HaveValue(BeTrue())) - Expect(updatedDatabase.Status.Message).Should(BeEmpty()) - Expect(updatedDatabase.Finalizers).NotTo(BeEmpty()) - - // the next 3 lines are a hacky bit to make sure the next reconciler - // call doesn't skip on account of Generation == ObservedGeneration. - // See fake.Client known issues with `Generation` - // https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/client/fake@v0.19.0#NewClientBuilder - currentDatabase := updatedDatabase.DeepCopy() - updatedDatabase.Status.ObservedGeneration = 2 - Expect(fakeClient.Status().Patch(ctx, &updatedDatabase, client.MergeFrom(currentDatabase))).To(Succeed()) - - // We now look at the behavior when we delete the Database object - Expect(fakeClient.Delete(ctx, database)).To(Succeed()) - - // the Database object is Deleted, but its finalizer prevents removal from - // the API - var fadingDatabase apiv1.Database - err = fakeClient.Get(ctx, client.ObjectKey{ - Namespace: database.Namespace, - Name: database.Name, - }, &fadingDatabase) - Expect(err).ToNot(HaveOccurred()) - Expect(fadingDatabase.DeletionTimestamp).NotTo(BeZero()) - Expect(fadingDatabase.Finalizers).NotTo(BeEmpty()) - - // Mocking Drop Database - expectedDrop := fmt.Sprintf("DROP DATABASE IF EXISTS %s", - pgx.Identifier{database.Spec.Name}.Sanitize(), - ) - dbMock.ExpectExec(expectedDrop).WillReturnResult(sqlmock.NewResult(0, 1)) - - // Reconcile and get the updated object - _, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ - Namespace: database.Namespace, - Name: database.Name, - }}) - Expect(err).ToNot(HaveOccurred()) + When("reclaim policy is delete", func() { + It("on deletion it removes finalizers and drops DB", func(ctx SpecContext) { + // Mocking DetectDB + expectedValue := sqlmock.NewRows([]string{""}).AddRow("0") + dbMock.ExpectQuery(databaseDetectionQuery).WithArgs(database.Spec.Name). + WillReturnRows(expectedValue) + + // Mocking CreateDB + expectedCreate := sqlmock.NewResult(0, 1) + expectedQuery := fmt.Sprintf( + "CREATE DATABASE %s OWNER %s", + pgx.Identifier{database.Spec.Name}.Sanitize(), + pgx.Identifier{database.Spec.Owner}.Sanitize(), + ) + dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedCreate) + + // Mocking Drop Database + expectedDrop := fmt.Sprintf("DROP DATABASE IF EXISTS %s", + pgx.Identifier{database.Spec.Name}.Sanitize(), + ) + dbMock.ExpectExec(expectedDrop).WillReturnResult(sqlmock.NewResult(0, 1)) + + err := reconcileDatabase(ctx, fakeClient, r, database) + Expect(err).ToNot(HaveOccurred()) + + // Plain successful reconciliation, finalizers have been created + Expect(database.GetFinalizers()).NotTo(BeEmpty()) + Expect(database.Status.Applied).Should(HaveValue(BeTrue())) + Expect(database.Status.Message).Should(BeEmpty()) + + // The next 2 lines are a hacky bit to make sure the next reconciler + // call doesn't skip on account of Generation == ObservedGeneration. + // See fake.Client known issues with `Generation` + // https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/client/fake@v0.19.0#NewClientBuilder + database.SetGeneration(database.GetGeneration() + 1) + Expect(fakeClient.Update(ctx, database)).To(Succeed()) + + // We now look at the behavior when we delete the Database object + Expect(fakeClient.Delete(ctx, database)).To(Succeed()) + + err = reconcileDatabase(ctx, fakeClient, r, database) + Expect(err).To(HaveOccurred()) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + }) - var finalDatabase apiv1.Database - err = fakeClient.Get(ctx, client.ObjectKey{ - Namespace: database.Namespace, - Name: database.Name, - }, &finalDatabase) - Expect(err).To(HaveOccurred()) - Expect(apierrors.IsNotFound(err)).To(BeTrue()) + When("reclaim policy is retain", func() { + It("on deletion it removes finalizers and does NOT drop the DB", func(ctx SpecContext) { + database.Spec.ReclaimPolicy = apiv1.DatabaseReclaimRetain + Expect(fakeClient.Update(ctx, database)).To(Succeed()) + + // Mocking DetectDB + expectedValue := sqlmock.NewRows([]string{""}).AddRow("0") + dbMock.ExpectQuery(databaseDetectionQuery).WithArgs(database.Spec.Name). + WillReturnRows(expectedValue) + + // Mocking CreateDB + expectedCreate := sqlmock.NewResult(0, 1) + expectedQuery := fmt.Sprintf( + "CREATE DATABASE %s OWNER %s", + pgx.Identifier{database.Spec.Name}.Sanitize(), + pgx.Identifier{database.Spec.Owner}.Sanitize(), + ) + dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedCreate) + + err := reconcileDatabase(ctx, fakeClient, r, database) + Expect(err).ToNot(HaveOccurred()) + + // Plain successful reconciliation, finalizers have been created + Expect(database.GetFinalizers()).NotTo(BeEmpty()) + Expect(database.Status.Applied).Should(HaveValue(BeTrue())) + Expect(database.Status.Message).Should(BeEmpty()) + + // The next 2 lines are a hacky bit to make sure the next reconciler + // call doesn't skip on account of Generation == ObservedGeneration. + // See fake.Client known issues with `Generation` + // https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/client/fake@v0.19.0#NewClientBuilder + database.SetGeneration(database.GetGeneration() + 1) + Expect(fakeClient.Update(ctx, database)).To(Succeed()) + + // We now look at the behavior when we delete the Database object + Expect(fakeClient.Delete(ctx, database)).To(Succeed()) + + err = reconcileDatabase(ctx, fakeClient, r, database) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) }) It("fails reconciliation if cluster isn't found (deleted cluster)", func(ctx SpecContext) { - // since the fakeClient has the `cluster-example` cluster, let's reference + // Since the fakeClient has the `cluster-example` cluster, let's reference // another cluster `cluster-other` that is not found by the fakeClient pgInstance := postgres.NewInstance(). WithNamespace("default"). WithPodName("cluster-other-1"). WithClusterName("cluster-other") - f := fakeInstanceData{ - Instance: pgInstance, - db: db, - } - r = &DatabaseReconciler{ Client: fakeClient, Scheme: schemeBuilder.BuildWithAllKnownScheme(), - instance: &f, + instance: pgInstance, + getSuperUserDB: func() (*sql.DB, error) { + return db, nil + }, } - // patching the Database object to reference the newly created Cluster - originalDatabase := database.DeepCopy() + // Updating the Database object to reference the newly created Cluster database.Spec.ClusterRef.Name = "cluster-other" - Expect(fakeClient.Patch(ctx, database, client.MergeFrom(originalDatabase))).To(Succeed()) - - // Reconcile and get the updated object - _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ - Namespace: database.Namespace, - Name: database.Name, - }}) - Expect(err).ToNot(HaveOccurred()) + Expect(fakeClient.Update(ctx, database)).To(Succeed()) - var updatedDatabase apiv1.Database - err = fakeClient.Get(ctx, client.ObjectKey{ - Namespace: database.Namespace, - Name: database.Name, - }, &updatedDatabase) + err := reconcileDatabase(ctx, fakeClient, r, database) Expect(err).ToNot(HaveOccurred()) - Expect(updatedDatabase.Status.Applied).Should(HaveValue(BeFalse())) - Expect(updatedDatabase.Status.Message).Should(ContainSubstring(`"cluster-other" not found`)) + Expect(database.Status.Applied).Should(HaveValue(BeFalse())) + Expect(database.Status.Message).Should(ContainSubstring( + fmt.Sprintf("%q not found", database.Spec.ClusterRef.Name))) }) It("skips reconciliation if database object isn't found (deleted database)", func(ctx SpecContext) { @@ -334,13 +295,16 @@ var _ = Describe("Managed Database status", func() { Name: otherDatabase.Name, }}) - // Expect the reconciler to exit silently since the object doesn't exist + // Expect the reconciler to exit silently, since the object doesn't exist Expect(err).ToNot(HaveOccurred()) Expect(result).Should(BeZero()) // nothing to do, since the DB is being deleted }) It("drops database with ensure absent option", func(ctx SpecContext) { - // Mocking dropDatabase + // Update the obj to set EnsureAbsent + database.Spec.Ensure = apiv1.EnsureAbsent + Expect(fakeClient.Update(ctx, database)).To(Succeed()) + expectedValue := sqlmock.NewResult(0, 1) expectedQuery := fmt.Sprintf( "DROP DATABASE IF EXISTS %s", @@ -348,21 +312,7 @@ var _ = Describe("Managed Database status", func() { ) dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedValue) - // Update the obj to set EnsureAbsent - database.Spec.Ensure = apiv1.EnsureAbsent - Expect(fakeClient.Update(ctx, database)).To(Succeed()) - - // Reconcile and get the updated object - _, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ - Namespace: database.Namespace, - Name: database.Name, - }}) - Expect(err).ToNot(HaveOccurred()) - - err = fakeClient.Get(ctx, client.ObjectKey{ - Namespace: database.Namespace, - Name: database.Name, - }, database) + err := reconcileDatabase(ctx, fakeClient, r, database) Expect(err).ToNot(HaveOccurred()) Expect(database.Status.Applied).To(HaveValue(BeTrue())) @@ -371,26 +321,11 @@ var _ = Describe("Managed Database status", func() { }) It("marks as failed if the target Database is already being managed", func(ctx SpecContext) { - // The Database obj currently managing "test-database" - currentManager := &apiv1.Database{ - ObjectMeta: metav1.ObjectMeta{ - Name: "current-manager", - Namespace: "default", - }, - Spec: apiv1.DatabaseSpec{ - ClusterRef: corev1.LocalObjectReference{ - Name: cluster.Name, - }, - Name: "test-database", - Owner: "app", - }, - Status: apiv1.DatabaseStatus{ - Applied: ptr.To(true), - ObservedGeneration: 1, - }, - } + // Let's force the database to have a past reconciliation + database.Status.ObservedGeneration = 2 + Expect(fakeClient.Status().Update(ctx, database)).To(Succeed()) - // A new Database Object targeting the same "test-database" + // A new Database Object targeting the same "db-one" dbDuplicate := &apiv1.Database{ ObjectMeta: metav1.ObjectMeta{ Name: "db-duplicate", @@ -401,29 +336,19 @@ var _ = Describe("Managed Database status", func() { ClusterRef: corev1.LocalObjectReference{ Name: cluster.Name, }, - Name: "test-database", + Name: "db-one", Owner: "app", }, } - Expect(fakeClient.Create(ctx, currentManager)).To(Succeed()) + // Expect(fakeClient.Create(ctx, currentManager)).To(Succeed()) Expect(fakeClient.Create(ctx, dbDuplicate)).To(Succeed()) - // Reconcile and get the updated object - _, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ - Namespace: dbDuplicate.Namespace, - Name: dbDuplicate.Name, - }}) - Expect(err).ToNot(HaveOccurred()) - - err = fakeClient.Get(ctx, client.ObjectKey{ - Namespace: dbDuplicate.Namespace, - Name: dbDuplicate.Name, - }, dbDuplicate) + err := reconcileDatabase(ctx, fakeClient, r, dbDuplicate) Expect(err).ToNot(HaveOccurred()) expectedError := fmt.Sprintf("%q is already managed by object %q", - dbDuplicate.Spec.Name, currentManager.Name) + dbDuplicate.Spec.Name, database.Name) Expect(dbDuplicate.Status.Applied).To(HaveValue(BeFalse())) Expect(dbDuplicate.Status.Message).To(ContainSubstring(expectedError)) Expect(dbDuplicate.Status.ObservedGeneration).To(BeZero()) @@ -436,20 +361,28 @@ var _ = Describe("Managed Database status", func() { } Expect(fakeClient.Patch(ctx, cluster, client.MergeFrom(initialCluster))).To(Succeed()) - _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ - Namespace: database.Namespace, - Name: database.Spec.Name, - }}) + err := reconcileDatabase(ctx, fakeClient, r, database) Expect(err).ToNot(HaveOccurred()) - var updatedDatabase apiv1.Database - err = fakeClient.Get(ctx, client.ObjectKey{ - Namespace: database.Namespace, - Name: database.Name, - }, &updatedDatabase) - Expect(err).ToNot(HaveOccurred()) - - Expect(updatedDatabase.Status.Applied).Should(BeNil()) - Expect(updatedDatabase.Status.Message).Should(ContainSubstring("waiting for the cluster to become primary")) + Expect(database.Status.Applied).Should(BeNil()) + Expect(database.Status.Message).Should(ContainSubstring("waiting for the cluster to become primary")) }) }) + +func reconcileDatabase( + ctx context.Context, + fakeClient client.Client, + r *DatabaseReconciler, + database *apiv1.Database, +) error { + GinkgoT().Helper() + _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ + Namespace: database.GetNamespace(), + Name: database.GetName(), + }}) + Expect(err).ToNot(HaveOccurred()) + return fakeClient.Get(ctx, client.ObjectKey{ + Namespace: database.GetNamespace(), + Name: database.GetName(), + }, database) +} diff --git a/internal/management/controller/publication_controller.go b/internal/management/controller/publication_controller.go index d268367f1e..06fb6dad6a 100644 --- a/internal/management/controller/publication_controller.go +++ b/internal/management/controller/publication_controller.go @@ -18,6 +18,7 @@ package controller import ( "context" + "database/sql" "fmt" "time" @@ -39,6 +40,7 @@ type PublicationReconciler struct { instance *postgres.Instance finalizerReconciler *finalizerReconciler[*apiv1.Publication] + getDB func(name string) (*sql.DB, error) } // publicationReconciliationInterval is the time between the @@ -153,7 +155,7 @@ func (r *PublicationReconciler) evaluateDropPublication(ctx context.Context, pub if pub.Spec.ReclaimPolicy != apiv1.PublicationReclaimDelete { return nil } - db, err := r.instance.ConnectionPool().Connection(pub.Spec.DBName) + db, err := r.getDB(pub.Spec.DBName) if err != nil { return fmt.Errorf("while getting DB connection: %w", err) } @@ -169,6 +171,9 @@ func NewPublicationReconciler( pr := &PublicationReconciler{ Client: mgr.GetClient(), instance: instance, + getDB: func(name string) (*sql.DB, error) { + return instance.ConnectionPool().Connection(name) + }, } pr.finalizerReconciler = newFinalizerReconciler( diff --git a/internal/management/controller/publication_controller_sql.go b/internal/management/controller/publication_controller_sql.go index e179e71bf2..0938111885 100644 --- a/internal/management/controller/publication_controller_sql.go +++ b/internal/management/controller/publication_controller_sql.go @@ -28,7 +28,7 @@ import ( ) func (r *PublicationReconciler) alignPublication(ctx context.Context, obj *apiv1.Publication) error { - db, err := r.instance.ConnectionPool().Connection(obj.Spec.DBName) + db, err := r.getDB(obj.Spec.DBName) if err != nil { return fmt.Errorf("while getting DB connection: %w", err) } diff --git a/internal/management/controller/publication_controller_test.go b/internal/management/controller/publication_controller_test.go new file mode 100644 index 0000000000..ea77ba6002 --- /dev/null +++ b/internal/management/controller/publication_controller_test.go @@ -0,0 +1,368 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "database/sql" + "fmt" + + "github.com/DATA-DOG/go-sqlmock" + "github.com/jackc/pgx/v5" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + schemeBuilder "github.com/cloudnative-pg/cloudnative-pg/internal/scheme" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +const publicationDetectionQuery = `SELECT count(*) + FROM pg_publication + WHERE pubname = $1` + +var _ = Describe("Managed publication controller tests", func() { + var ( + dbMock sqlmock.Sqlmock + db *sql.DB + publication *apiv1.Publication + cluster *apiv1.Cluster + r *PublicationReconciler + fakeClient client.Client + err error + ) + + BeforeEach(func() { + cluster = &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-example", + Namespace: "default", + }, + Status: apiv1.ClusterStatus{ + CurrentPrimary: "cluster-example-1", + TargetPrimary: "cluster-example-1", + }, + } + publication = &apiv1.Publication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pub-one", + Namespace: "default", + Generation: 1, + }, + Spec: apiv1.PublicationSpec{ + ClusterRef: corev1.LocalObjectReference{ + Name: cluster.Name, + }, + ReclaimPolicy: apiv1.PublicationReclaimDelete, + Name: "pub-all", + DBName: "app", + Target: apiv1.PublicationTarget{ + AllTables: true, + Objects: []apiv1.PublicationTargetObject{ + {TablesInSchema: "public"}, + }, + }, + }, + } + db, dbMock, err = sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) + Expect(err).ToNot(HaveOccurred()) + + pgInstance := postgres.NewInstance(). + WithNamespace("default"). + WithPodName("cluster-example-1"). + WithClusterName("cluster-example") + + fakeClient = fake.NewClientBuilder().WithScheme(schemeBuilder.BuildWithAllKnownScheme()). + WithObjects(cluster, publication). + WithStatusSubresource(&apiv1.Cluster{}, &apiv1.Publication{}). + Build() + + r = &PublicationReconciler{ + Client: fakeClient, + Scheme: schemeBuilder.BuildWithAllKnownScheme(), + instance: pgInstance, + getDB: func(_ string) (*sql.DB, error) { + return db, nil + }, + } + r.finalizerReconciler = newFinalizerReconciler( + fakeClient, + utils.PublicationFinalizerName, + r.evaluateDropPublication, + ) + }) + + AfterEach(func() { + Expect(dbMock.ExpectationsWereMet()).To(Succeed()) + }) + + It("adds finalizer and sets status ready on success", func(ctx SpecContext) { + noHits := sqlmock.NewRows([]string{""}).AddRow("0") + dbMock.ExpectQuery(publicationDetectionQuery).WithArgs(publication.Spec.Name). + WillReturnRows(noHits) + + expectedCreate := sqlmock.NewResult(0, 1) + expectedQuery := fmt.Sprintf( + "CREATE PUBLICATION %s FOR ALL TABLES", + pgx.Identifier{publication.Spec.Name}.Sanitize(), + ) + dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedCreate) + + err := reconcilePublication(ctx, fakeClient, r, publication) + Expect(err).ToNot(HaveOccurred()) + + Expect(publication.Status.Applied).Should(HaveValue(BeTrue())) + Expect(publication.GetStatusMessage()).Should(BeEmpty()) + Expect(publication.GetFinalizers()).NotTo(BeEmpty()) + }) + + It("publication object inherits error after patching", func(ctx SpecContext) { + expectedError := fmt.Errorf("no permission") + oneHit := sqlmock.NewRows([]string{""}).AddRow("1") + dbMock.ExpectQuery(publicationDetectionQuery).WithArgs(publication.Spec.Name). + WillReturnRows(oneHit) + + expectedQuery := fmt.Sprintf("ALTER PUBLICATION %s SET TABLES IN SCHEMA \"public\"", + pgx.Identifier{publication.Spec.Name}.Sanitize(), + ) + dbMock.ExpectExec(expectedQuery).WillReturnError(expectedError) + + err := reconcilePublication(ctx, fakeClient, r, publication) + Expect(err).ToNot(HaveOccurred()) + + Expect(publication.Status.Applied).Should(HaveValue(BeFalse())) + Expect(publication.Status.Message).Should(ContainSubstring(expectedError.Error())) + }) + + When("reclaim policy is delete", func() { + It("on deletion it removes finalizers and drops the Publication", func(ctx SpecContext) { + // Mocking Detect publication + expectedValue := sqlmock.NewRows([]string{""}).AddRow("0") + dbMock.ExpectQuery(publicationDetectionQuery).WithArgs(publication.Spec.Name). + WillReturnRows(expectedValue) + + // Mocking Create publication + expectedCreate := sqlmock.NewResult(0, 1) + expectedQuery := fmt.Sprintf( + "CREATE PUBLICATION %s FOR ALL TABLES", + pgx.Identifier{publication.Spec.Name}.Sanitize(), + ) + dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedCreate) + + // Mocking Drop Publication + expectedDrop := fmt.Sprintf("DROP PUBLICATION IF EXISTS %s", + pgx.Identifier{publication.Spec.Name}.Sanitize(), + ) + dbMock.ExpectExec(expectedDrop).WillReturnResult(sqlmock.NewResult(0, 1)) + + err := reconcilePublication(ctx, fakeClient, r, publication) + Expect(err).ToNot(HaveOccurred()) + + // Plain successful reconciliation, finalizers have been created + Expect(publication.GetFinalizers()).NotTo(BeEmpty()) + Expect(publication.Status.Applied).Should(HaveValue(BeTrue())) + Expect(publication.Status.Message).Should(BeEmpty()) + + // The next 2 lines are a hacky bit to make sure the next reconciler + // call doesn't skip on account of Generation == ObservedGeneration. + // See fake.Client known issues with `Generation` + // https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/client/fake@v0.19.0#NewClientBuilder + publication.SetGeneration(publication.GetGeneration() + 1) + Expect(fakeClient.Update(ctx, publication)).To(Succeed()) + + // We now look at the behavior when we delete the Database object + Expect(fakeClient.Delete(ctx, publication)).To(Succeed()) + + err = reconcilePublication(ctx, fakeClient, r, publication) + Expect(err).To(HaveOccurred()) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + }) + + When("reclaim policy is retain", func() { + It("on deletion it removes finalizers and does NOT drop the Publication", func(ctx SpecContext) { + publication.Spec.ReclaimPolicy = apiv1.PublicationReclaimRetain + Expect(fakeClient.Update(ctx, publication)).To(Succeed()) + + // Mocking Detect publication + expectedValue := sqlmock.NewRows([]string{""}).AddRow("0") + dbMock.ExpectQuery(publicationDetectionQuery).WithArgs(publication.Spec.Name). + WillReturnRows(expectedValue) + + // Mocking Create publication + expectedCreate := sqlmock.NewResult(0, 1) + expectedQuery := fmt.Sprintf( + "CREATE PUBLICATION %s FOR ALL TABLES", + pgx.Identifier{publication.Spec.Name}.Sanitize(), + ) + dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedCreate) + + err := reconcilePublication(ctx, fakeClient, r, publication) + Expect(err).ToNot(HaveOccurred()) + + // Plain successful reconciliation, finalizers have been created + Expect(publication.GetFinalizers()).NotTo(BeEmpty()) + Expect(publication.Status.Applied).Should(HaveValue(BeTrue())) + Expect(publication.Status.Message).Should(BeEmpty()) + + // The next 2 lines are a hacky bit to make sure the next reconciler + // call doesn't skip on account of Generation == ObservedGeneration. + // See fake.Client known issues with `Generation` + // https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/client/fake@v0.19.0#NewClientBuilder + publication.SetGeneration(publication.GetGeneration() + 1) + Expect(fakeClient.Update(ctx, publication)).To(Succeed()) + + // We now look at the behavior when we delete the Database object + Expect(fakeClient.Delete(ctx, publication)).To(Succeed()) + + err = reconcilePublication(ctx, fakeClient, r, publication) + Expect(err).To(HaveOccurred()) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + }) + + It("fails reconciliation if cluster isn't found (deleted cluster)", func(ctx SpecContext) { + // Since the fakeClient has the `cluster-example` cluster, let's reference + // another cluster `cluster-other` that is not found by the fakeClient + pgInstance := postgres.NewInstance(). + WithNamespace("default"). + WithPodName("cluster-other-1"). + WithClusterName("cluster-other") + + r = &PublicationReconciler{ + Client: fakeClient, + Scheme: schemeBuilder.BuildWithAllKnownScheme(), + instance: pgInstance, + getDB: func(_ string) (*sql.DB, error) { + return db, nil + }, + } + + // Updating the publication object to reference the newly created Cluster + publication.Spec.ClusterRef.Name = "cluster-other" + Expect(fakeClient.Update(ctx, publication)).To(Succeed()) + + err := reconcilePublication(ctx, fakeClient, r, publication) + Expect(err).ToNot(HaveOccurred()) + + Expect(publication.Status.Applied).Should(HaveValue(BeFalse())) + Expect(publication.GetStatusMessage()).Should(ContainSubstring( + fmt.Sprintf("%q not found", publication.Spec.ClusterRef.Name))) + }) + + It("skips reconciliation if publication object isn't found (deleted publication)", func(ctx SpecContext) { + // Initialize a new Publication but without creating it in the K8S Cluster + otherPublication := &apiv1.Publication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pub-other", + Namespace: "default", + Generation: 1, + }, + Spec: apiv1.PublicationSpec{ + ClusterRef: corev1.LocalObjectReference{ + Name: cluster.Name, + }, + Name: "pub-all", + }, + } + + // Reconcile the publication that hasn't been created in the K8S Cluster + result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ + Namespace: otherPublication.Namespace, + Name: otherPublication.Name, + }}) + + // Expect the reconciler to exit silently, since the object doesn't exist + Expect(err).ToNot(HaveOccurred()) + Expect(result).Should(BeZero()) + }) + + It("marks as failed if the target publication is already being managed", func(ctx SpecContext) { + // Let's force the publication to have a past reconciliation + publication.Status.ObservedGeneration = 2 + Expect(fakeClient.Status().Update(ctx, publication)).To(Succeed()) + + // A new Publication Object targeting the same "pub-all" + pubDuplicate := &apiv1.Publication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pub-duplicate", + Namespace: "default", + Generation: 1, + }, + Spec: apiv1.PublicationSpec{ + ClusterRef: corev1.LocalObjectReference{ + Name: cluster.Name, + }, + Name: "pub-all", + }, + } + + // Expect(fakeClient.Create(ctx, currentManager)).To(Succeed()) + Expect(fakeClient.Create(ctx, pubDuplicate)).To(Succeed()) + + err := reconcilePublication(ctx, fakeClient, r, pubDuplicate) + Expect(err).ToNot(HaveOccurred()) + + expectedError := fmt.Sprintf("%q is already managed by object %q", + pubDuplicate.Spec.Name, publication.Name) + Expect(pubDuplicate.Status.Applied).To(HaveValue(BeFalse())) + Expect(pubDuplicate.Status.Message).To(ContainSubstring(expectedError)) + Expect(pubDuplicate.Status.ObservedGeneration).To(BeZero()) + }) + + It("properly signals a publication is on a replica cluster", func(ctx SpecContext) { + initialCluster := cluster.DeepCopy() + cluster.Spec.ReplicaCluster = &apiv1.ReplicaClusterConfiguration{ + Enabled: ptr.To(true), + } + Expect(fakeClient.Patch(ctx, cluster, client.MergeFrom(initialCluster))).To(Succeed()) + + err := reconcilePublication(ctx, fakeClient, r, publication) + Expect(err).ToNot(HaveOccurred()) + + Expect(publication.Status.Applied).Should(BeNil()) + Expect(publication.Status.Message).Should(ContainSubstring("waiting for the cluster to become primary")) + }) +}) + +func reconcilePublication( + ctx context.Context, + fakeClient client.Client, + r *PublicationReconciler, + publication *apiv1.Publication, +) error { + GinkgoT().Helper() + _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ + Namespace: publication.GetNamespace(), + Name: publication.GetName(), + }}) + Expect(err).ToNot(HaveOccurred()) + return fakeClient.Get(ctx, client.ObjectKey{ + Namespace: publication.GetNamespace(), + Name: publication.GetName(), + }, publication) +} diff --git a/internal/management/controller/subscription_controller.go b/internal/management/controller/subscription_controller.go index 5fae540722..4f8d5c7583 100644 --- a/internal/management/controller/subscription_controller.go +++ b/internal/management/controller/subscription_controller.go @@ -18,6 +18,7 @@ package controller import ( "context" + "database/sql" "fmt" "time" @@ -40,6 +41,7 @@ type SubscriptionReconciler struct { instance *postgres.Instance finalizerReconciler *finalizerReconciler[*apiv1.Subscription] + getDB func(name string) (*sql.DB, error) } // subscriptionReconciliationInterval is the time between the @@ -167,7 +169,7 @@ func (r *SubscriptionReconciler) evaluateDropSubscription(ctx context.Context, s return nil } - db, err := r.instance.ConnectionPool().Connection(sub.Spec.DBName) + db, err := r.getDB(sub.Spec.DBName) if err != nil { return fmt.Errorf("while getting DB connection: %w", err) } @@ -179,7 +181,13 @@ func NewSubscriptionReconciler( mgr manager.Manager, instance *postgres.Instance, ) *SubscriptionReconciler { - sr := &SubscriptionReconciler{Client: mgr.GetClient(), instance: instance} + sr := &SubscriptionReconciler{ + Client: mgr.GetClient(), + instance: instance, + getDB: func(name string) (*sql.DB, error) { + return instance.ConnectionPool().Connection(name) + }, + } sr.finalizerReconciler = newFinalizerReconciler( mgr.GetClient(), utils.SubscriptionFinalizerName, diff --git a/internal/management/controller/subscription_controller_sql.go b/internal/management/controller/subscription_controller_sql.go index 47f9f945df..fcb61bc3ab 100644 --- a/internal/management/controller/subscription_controller_sql.go +++ b/internal/management/controller/subscription_controller_sql.go @@ -32,7 +32,7 @@ func (r *SubscriptionReconciler) alignSubscription( obj *apiv1.Subscription, connString string, ) error { - db, err := r.instance.ConnectionPool().Connection(obj.Spec.DBName) + db, err := r.getDB(obj.Spec.DBName) if err != nil { return fmt.Errorf("while getting DB connection: %w", err) } diff --git a/internal/management/controller/subscription_controller_test.go b/internal/management/controller/subscription_controller_test.go new file mode 100644 index 0000000000..f6afdc0c4e --- /dev/null +++ b/internal/management/controller/subscription_controller_test.go @@ -0,0 +1,397 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "database/sql" + "fmt" + + "github.com/DATA-DOG/go-sqlmock" + "github.com/jackc/pgx/v5" + "github.com/lib/pq" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + schemeBuilder "github.com/cloudnative-pg/cloudnative-pg/internal/scheme" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +const subscriptionDetectionQuery = `SELECT count(*) + FROM pg_subscription + WHERE subname = $1` + +var _ = Describe("Managed subscription controller tests", func() { + var ( + dbMock sqlmock.Sqlmock + db *sql.DB + subscription *apiv1.Subscription + cluster *apiv1.Cluster + r *SubscriptionReconciler + fakeClient client.Client + connString string + err error + ) + + BeforeEach(func() { + cluster = &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-example", + Namespace: "default", + }, + Status: apiv1.ClusterStatus{ + CurrentPrimary: "cluster-example-1", + TargetPrimary: "cluster-example-1", + }, + Spec: apiv1.ClusterSpec{ + ExternalClusters: apiv1.ExternalClusterList{ + apiv1.ExternalCluster{ + Name: "cluster-other", + ConnectionParameters: map[string]string{ + "host": "localhost", + }, + }, + }, + }, + } + subscription = &apiv1.Subscription{ + ObjectMeta: metav1.ObjectMeta{ + Name: "sub-one", + Namespace: "default", + Generation: 1, + }, + Spec: apiv1.SubscriptionSpec{ + ClusterRef: corev1.LocalObjectReference{ + Name: cluster.Name, + }, + ReclaimPolicy: apiv1.SubscriptionReclaimDelete, + Name: "sub-one", + DBName: "app", + PublicationName: "pub-all", + PublicationDBName: "app", + ExternalClusterName: "cluster-other", + }, + } + connString, err = getSubscriptionConnectionString(cluster, "cluster-other", "app") + Expect(err).ToNot(HaveOccurred()) + + db, dbMock, err = sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) + Expect(err).ToNot(HaveOccurred()) + + pgInstance := postgres.NewInstance(). + WithNamespace("default"). + WithPodName("cluster-example-1"). + WithClusterName("cluster-example") + + fakeClient = fake.NewClientBuilder().WithScheme(schemeBuilder.BuildWithAllKnownScheme()). + WithObjects(cluster, subscription). + WithStatusSubresource(&apiv1.Cluster{}, &apiv1.Subscription{}). + Build() + + r = &SubscriptionReconciler{ + Client: fakeClient, + Scheme: schemeBuilder.BuildWithAllKnownScheme(), + instance: pgInstance, + getDB: func(_ string) (*sql.DB, error) { + return db, nil + }, + } + r.finalizerReconciler = newFinalizerReconciler( + fakeClient, + utils.SubscriptionFinalizerName, + r.evaluateDropSubscription, + ) + }) + + AfterEach(func() { + Expect(dbMock.ExpectationsWereMet()).To(Succeed()) + }) + + It("adds finalizer and sets status ready on success", func(ctx SpecContext) { + noHits := sqlmock.NewRows([]string{""}).AddRow("0") + dbMock.ExpectQuery(subscriptionDetectionQuery).WithArgs(subscription.Spec.Name). + WillReturnRows(noHits) + + expectedCreate := sqlmock.NewResult(0, 1) + expectedQuery := fmt.Sprintf( + "CREATE SUBSCRIPTION %s CONNECTION %s PUBLICATION %s", + pgx.Identifier{subscription.Spec.Name}.Sanitize(), + pq.QuoteLiteral(connString), + pgx.Identifier{subscription.Spec.PublicationName}.Sanitize(), + ) + dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedCreate) + + _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ + Namespace: subscription.GetNamespace(), + Name: subscription.GetName(), + }}) + Expect(err).ToNot(HaveOccurred()) + err = fakeClient.Get(ctx, client.ObjectKey{ + Namespace: subscription.GetNamespace(), + Name: subscription.GetName(), + }, subscription) + Expect(err).ToNot(HaveOccurred()) + + Expect(subscription.Status.Applied).Should(HaveValue(BeTrue())) + Expect(subscription.GetStatusMessage()).Should(BeEmpty()) + Expect(subscription.GetFinalizers()).NotTo(BeEmpty()) + }) + + It("subscription object inherits error after patching", func(ctx SpecContext) { + expectedError := fmt.Errorf("no permission") + oneHit := sqlmock.NewRows([]string{""}).AddRow("1") + dbMock.ExpectQuery(subscriptionDetectionQuery).WithArgs(subscription.Spec.Name). + WillReturnRows(oneHit) + + expectedQuery := fmt.Sprintf("ALTER SUBSCRIPTION %s SET PUBLICATION %s", + pgx.Identifier{subscription.Spec.Name}.Sanitize(), + pgx.Identifier{subscription.Spec.PublicationName}.Sanitize(), + ) + dbMock.ExpectExec(expectedQuery).WillReturnError(expectedError) + + err = reconcileSubscription(ctx, fakeClient, r, subscription) + Expect(err).ToNot(HaveOccurred()) + + Expect(subscription.Status.Applied).Should(HaveValue(BeFalse())) + Expect(subscription.Status.Message).Should(ContainSubstring(expectedError.Error())) + }) + + When("reclaim policy is delete", func() { + It("on deletion it removes finalizers and drops the subscription", func(ctx SpecContext) { + // Mocking detection of subscriptions + expectedValue := sqlmock.NewRows([]string{""}).AddRow("0") + dbMock.ExpectQuery(subscriptionDetectionQuery).WithArgs(subscription.Spec.Name). + WillReturnRows(expectedValue) + + // Mocking create subscription + expectedCreate := sqlmock.NewResult(0, 1) + expectedQuery := fmt.Sprintf( + "CREATE SUBSCRIPTION %s CONNECTION %s PUBLICATION %s", + pgx.Identifier{subscription.Spec.Name}.Sanitize(), + pq.QuoteLiteral(connString), + pgx.Identifier{subscription.Spec.PublicationName}.Sanitize(), + ) + dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedCreate) + + // Mocking Drop subscription + expectedDrop := fmt.Sprintf("DROP SUBSCRIPTION IF EXISTS %s", + pgx.Identifier{subscription.Spec.Name}.Sanitize(), + ) + dbMock.ExpectExec(expectedDrop).WillReturnResult(sqlmock.NewResult(0, 1)) + + err = reconcileSubscription(ctx, fakeClient, r, subscription) + Expect(err).ToNot(HaveOccurred()) + + // Plain successful reconciliation, finalizers have been created + Expect(subscription.GetFinalizers()).NotTo(BeEmpty()) + Expect(subscription.Status.Applied).Should(HaveValue(BeTrue())) + Expect(subscription.Status.Message).Should(BeEmpty()) + + // The next 2 lines are a hacky bit to make sure the next reconciler + // call doesn't skip on account of Generation == ObservedGeneration. + // See fake.Client known issues with `Generation` + // https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/client/fake@v0.19.0#NewClientBuilder + subscription.SetGeneration(subscription.GetGeneration() + 1) + Expect(fakeClient.Update(ctx, subscription)).To(Succeed()) + + // We now look at the behavior when we delete the Database object + Expect(fakeClient.Delete(ctx, subscription)).To(Succeed()) + + err = reconcileSubscription(ctx, fakeClient, r, subscription) + Expect(err).To(HaveOccurred()) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + }) + + When("reclaim policy is retain", func() { + It("on deletion it removes finalizers and does NOT drop the subscription", func(ctx SpecContext) { + subscription.Spec.ReclaimPolicy = apiv1.SubscriptionReclaimRetain + Expect(fakeClient.Update(ctx, subscription)).To(Succeed()) + + // Mocking Detect subscription + expectedValue := sqlmock.NewRows([]string{""}).AddRow("0") + dbMock.ExpectQuery(subscriptionDetectionQuery).WithArgs(subscription.Spec.Name). + WillReturnRows(expectedValue) + + // Mocking Create subscription + expectedCreate := sqlmock.NewResult(0, 1) + expectedQuery := fmt.Sprintf( + "CREATE SUBSCRIPTION %s CONNECTION %s PUBLICATION %s", + pgx.Identifier{subscription.Spec.Name}.Sanitize(), + pq.QuoteLiteral(connString), + pgx.Identifier{subscription.Spec.PublicationName}.Sanitize(), + ) + dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedCreate) + + err = reconcileSubscription(ctx, fakeClient, r, subscription) + Expect(err).ToNot(HaveOccurred()) + + // Plain successful reconciliation, finalizers have been created + Expect(subscription.GetFinalizers()).NotTo(BeEmpty()) + Expect(subscription.Status.Applied).Should(HaveValue(BeTrue())) + Expect(subscription.Status.Message).Should(BeEmpty()) + + // The next 2 lines are a hacky bit to make sure the next reconciler + // call doesn't skip on account of Generation == ObservedGeneration. + // See fake.Client known issues with `Generation` + // https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/client/fake@v0.19.0#NewClientBuilder + subscription.SetGeneration(subscription.GetGeneration() + 1) + Expect(fakeClient.Update(ctx, subscription)).To(Succeed()) + + // We now look at the behavior when we delete the Database object + Expect(fakeClient.Delete(ctx, subscription)).To(Succeed()) + + err = reconcileSubscription(ctx, fakeClient, r, subscription) + Expect(err).To(HaveOccurred()) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + }) + + It("fails reconciliation if cluster isn't found (deleted cluster)", func(ctx SpecContext) { + // Since the fakeClient has the `cluster-example` cluster, let's reference + // another cluster `cluster-other` that is not found by the fakeClient + pgInstance := postgres.NewInstance(). + WithNamespace("default"). + WithPodName("cluster-other-1"). + WithClusterName("cluster-other") + + r = &SubscriptionReconciler{ + Client: fakeClient, + Scheme: schemeBuilder.BuildWithAllKnownScheme(), + instance: pgInstance, + getDB: func(_ string) (*sql.DB, error) { + return db, nil + }, + } + + // Updating the subscription object to reference the newly created Cluster + subscription.Spec.ClusterRef.Name = "cluster-other" + Expect(fakeClient.Update(ctx, subscription)).To(Succeed()) + + err = reconcileSubscription(ctx, fakeClient, r, subscription) + Expect(err).ToNot(HaveOccurred()) + + Expect(subscription.Status.Applied).Should(HaveValue(BeFalse())) + Expect(subscription.Status.Message).Should(ContainSubstring( + fmt.Sprintf("%q not found", subscription.Spec.ClusterRef.Name))) + }) + + It("skips reconciliation if subscription object isn't found (deleted subscription)", func(ctx SpecContext) { + // Initialize a new subscription but without creating it in the K8S Cluster + otherSubscription := &apiv1.Subscription{ + ObjectMeta: metav1.ObjectMeta{ + Name: "sub-other", + Namespace: "default", + Generation: 1, + }, + Spec: apiv1.SubscriptionSpec{ + ClusterRef: corev1.LocalObjectReference{ + Name: cluster.Name, + }, + Name: "sub-one", + }, + } + + // Reconcile the subscription that hasn't been created in the K8S Cluster + result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ + Namespace: otherSubscription.Namespace, + Name: otherSubscription.Name, + }}) + + // Expect the reconciler to exit silently, since the object doesn't exist + Expect(err).ToNot(HaveOccurred()) + Expect(result).Should(BeZero()) // nothing to do, since the subscription is being deleted + }) + + It("marks as failed if the target subscription is already being managed", func(ctx SpecContext) { + // Let's force the subscription to have a past reconciliation + subscription.Status.ObservedGeneration = 2 + Expect(fakeClient.Status().Update(ctx, subscription)).To(Succeed()) + + // A new subscription Object targeting the same "sub-one" + subDuplicate := &apiv1.Subscription{ + ObjectMeta: metav1.ObjectMeta{ + Name: "sub-duplicate", + Namespace: "default", + Generation: 1, + }, + Spec: apiv1.SubscriptionSpec{ + ClusterRef: corev1.LocalObjectReference{ + Name: cluster.Name, + }, + Name: "sub-one", + PublicationName: "pub-all", + PublicationDBName: "app", + ExternalClusterName: "cluster-other", + }, + } + + // Expect(fakeClient.Create(ctx, currentManager)).To(Succeed()) + Expect(fakeClient.Create(ctx, subDuplicate)).To(Succeed()) + + err = reconcileSubscription(ctx, fakeClient, r, subDuplicate) + Expect(err).ToNot(HaveOccurred()) + + expectedError := fmt.Sprintf("%q is already managed by object %q", + subDuplicate.Spec.Name, subscription.Name) + Expect(subDuplicate.Status.Applied).Should(HaveValue(BeFalse())) + Expect(subDuplicate.Status.Message).Should(ContainSubstring(expectedError)) + }) + + It("properly signals a subscription is on a replica cluster", func(ctx SpecContext) { + initialCluster := cluster.DeepCopy() + cluster.Spec.ReplicaCluster = &apiv1.ReplicaClusterConfiguration{ + Enabled: ptr.To(true), + } + Expect(fakeClient.Patch(ctx, cluster, client.MergeFrom(initialCluster))).To(Succeed()) + + err = reconcileSubscription(ctx, fakeClient, r, subscription) + Expect(err).ToNot(HaveOccurred()) + + Expect(subscription.Status.Applied).Should(BeNil()) + Expect(subscription.Status.Message).Should(ContainSubstring("waiting for the cluster to become primary")) + }) +}) + +func reconcileSubscription( + ctx context.Context, + fakeClient client.Client, + r *SubscriptionReconciler, + subscription *apiv1.Subscription, +) error { + GinkgoT().Helper() + _, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{ + Namespace: subscription.GetNamespace(), + Name: subscription.GetName(), + }}) + Expect(err).ToNot(HaveOccurred()) + return fakeClient.Get(ctx, client.ObjectKey{ + Namespace: subscription.GetNamespace(), + Name: subscription.GetName(), + }, subscription) +} From 3e82045b6a5dd00445c2abd62c703b938a7b9710 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 17 Dec 2024 11:42:22 +0100 Subject: [PATCH 234/836] fix(deps): update kubernetes patches (main) (#6236) https://github.com/kubernetes/utils `6fe5fd8` -> `24370be` https://github.com/kubernetes-sigs/controller-runtime `v0.19.2` -> `v0.19.3` --- go.mod | 5 ++--- go.sum | 10 ++++------ 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index 980261fede..b325745c1b 100644 --- a/go.mod +++ b/go.mod @@ -45,8 +45,8 @@ require ( k8s.io/apimachinery v0.31.3 k8s.io/cli-runtime v0.31.3 k8s.io/client-go v0.31.3 - k8s.io/utils v0.0.0-20241104163129-6fe5fd82f078 - sigs.k8s.io/controller-runtime v0.19.2 + k8s.io/utils v0.0.0-20241210054802-24370beab758 + sigs.k8s.io/controller-runtime v0.19.3 sigs.k8s.io/yaml v1.4.0 ) @@ -57,7 +57,6 @@ require ( github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/emicklei/go-restful/v3 v3.12.1 // indirect github.com/fatih/color v1.17.0 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-errors/errors v1.5.1 // indirect github.com/go-logr/zapr v1.3.0 // indirect diff --git a/go.sum b/go.sum index 2140149042..721058d0b3 100644 --- a/go.sum +++ b/go.sum @@ -39,8 +39,6 @@ github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0 github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= @@ -292,10 +290,10 @@ k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 h1:1dWzkmJrrprYvjGwh9kEUxmcUV/CtNU8QM7h1FLWQOo= k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38/go.mod h1:coRQXBK9NxO98XUv3ZD6AK3xzHCxV6+b7lrquKwaKzA= -k8s.io/utils v0.0.0-20241104163129-6fe5fd82f078 h1:jGnCPejIetjiy2gqaJ5V0NLwTpF4wbQ6cZIItJCSHno= -k8s.io/utils v0.0.0-20241104163129-6fe5fd82f078/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/controller-runtime v0.19.2 h1:3sPrF58XQEPzbE8T81TN6selQIMGbtYwuaJ6eDssDF8= -sigs.k8s.io/controller-runtime v0.19.2/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= +k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0= +k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.19.3 h1:XO2GvC9OPftRst6xWCpTgBZO04S2cbp0Qqkj8bX1sPw= +sigs.k8s.io/controller-runtime v0.19.3/go.mod h1:j4j87DqtsThvwTv5/Tc5NFRyyF/RF0ip4+62tbTSIUM= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kustomize/api v0.17.3 h1:6GCuHSsxq7fN5yhF2XrC+AAr8gxQwhexgHflOAD/JJU= From 5fd41abeebf614e3217e73318a9ba95ee1e8ccf3 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 17 Dec 2024 17:46:43 +0100 Subject: [PATCH 235/836] fix(deps): update all non-major go dependencies (main) (#6333) https://github.com/goreleaser/goreleaser `v2.4.8` -> `v2.5.0` https://github.com/grpc-ecosystem/go-grpc-middleware `v2.1.0` -> `v2.2.0` https://github.com/kubernetes-csi/external-snapshotter `v8.0.0` -> `v8.2.0` https://github.com/onsi/gomega `v1.36.0` -> `v1.36.1` golang.org/x/term `v0.26.0` -> `v0.27.0` https://github.com/grpc/grpc-go `v1.68.0` -> `v1.69.0` --- Makefile | 2 +- go.mod | 14 +++++++------- go.sum | 40 ++++++++++++++++++++++++++-------------- 3 files changed, 34 insertions(+), 22 deletions(-) diff --git a/Makefile b/Makefile index 6a80924a5a..c4f7a65e3d 100644 --- a/Makefile +++ b/Makefile @@ -43,7 +43,7 @@ BUILD_IMAGE ?= true POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions.go" | cut -f 2 -d \") KUSTOMIZE_VERSION ?= v5.5.0 CONTROLLER_TOOLS_VERSION ?= v0.16.5 -GORELEASER_VERSION ?= v2.4.8 +GORELEASER_VERSION ?= v2.5.0 SPELLCHECK_VERSION ?= 0.45.0 WOKE_VERSION ?= 0.19.0 OPERATOR_SDK_VERSION ?= v1.38.0 diff --git a/go.mod b/go.mod index b325745c1b..4688667712 100644 --- a/go.mod +++ b/go.mod @@ -17,16 +17,16 @@ require ( github.com/evanphx/json-patch/v5 v5.9.0 github.com/go-logr/logr v1.4.2 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 - github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 + github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0 github.com/jackc/pgx/v5 v5.7.1 github.com/jackc/puddle/v2 v2.2.2 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 - github.com/kubernetes-csi/external-snapshotter/client/v8 v8.0.0 + github.com/kubernetes-csi/external-snapshotter/client/v8 v8.2.0 github.com/lib/pq v1.10.9 github.com/logrusorgru/aurora/v4 v4.0.0 github.com/mitchellh/go-ps v1.0.0 github.com/onsi/ginkgo/v2 v2.22.0 - github.com/onsi/gomega v1.36.0 + github.com/onsi/gomega v1.36.1 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.2 github.com/prometheus/client_golang v1.20.5 github.com/robfig/cron v1.2.0 @@ -37,8 +37,8 @@ require ( go.uber.org/atomic v1.11.0 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 - golang.org/x/term v0.26.0 - google.golang.org/grpc v1.68.0 + golang.org/x/term v0.27.0 + google.golang.org/grpc v1.69.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.31.3 k8s.io/apiextensions-apiserver v0.31.3 @@ -107,12 +107,12 @@ require ( golang.org/x/net v0.30.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.27.0 // indirect + golang.org/x/sys v0.28.0 // indirect golang.org/x/text v0.19.0 // indirect golang.org/x/time v0.7.0 // indirect golang.org/x/tools v0.26.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index 721058d0b3..d7509f0515 100644 --- a/go.sum +++ b/go.sum @@ -45,6 +45,8 @@ github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8b github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= @@ -81,8 +83,8 @@ github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWm github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 h1:pRhl55Yx1eC7BZ1N+BBWwnKaMyD8uC+34TLdndZMAKk= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0/go.mod h1:XKMd7iuf/RGPSMJ/U4HP0zS2Z9Fh8Ps9a+6X26m/tmI= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0 h1:kQ0NI7W1B3HwiN5gAYtY+XFItDPbLBwYRxAqbFTyDes= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0/go.mod h1:zrT2dxOAjNFPRGjTUe2Xmb4q4YdUwVvQFV6xiCSf+z0= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -110,8 +112,8 @@ github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kubernetes-csi/external-snapshotter/client/v8 v8.0.0 h1:mjQG0Vakr2h246kEDR85U8y8ZhPgT3bguTCajRa/jaw= -github.com/kubernetes-csi/external-snapshotter/client/v8 v8.0.0/go.mod h1:E3vdYxHj2C2q6qo8/Da4g7P+IcwqRZyy3gJBzYybV9Y= +github.com/kubernetes-csi/external-snapshotter/client/v8 v8.2.0 h1:Q3jQ1NkFqv5o+F8dMmHd8SfEmlcwNeo1immFApntEwE= +github.com/kubernetes-csi/external-snapshotter/client/v8 v8.2.0/go.mod h1:E3vdYxHj2C2q6qo8/Da4g7P+IcwqRZyy3gJBzYybV9Y= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= @@ -146,8 +148,8 @@ github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= -github.com/onsi/gomega v1.36.0 h1:Pb12RlruUtj4XUuPUqeEWc6j5DkVVVA49Uf6YLfC95Y= -github.com/onsi/gomega v1.36.0/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw= +github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -196,6 +198,16 @@ github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= +go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= +go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= +go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= +go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= +go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= +go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= +go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= +go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= +go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= go.starlark.net v0.0.0-20240925182052-1207426daebd h1:S+EMisJOHklQxnS3kqsY8jl2y5aF0FDEdcLnOw3q22E= go.starlark.net v0.0.0-20240925182052-1207426daebd/go.mod h1:YKMCv9b1WrfWmeqdV5MAuEHWsu5iC+fe6kYl2sQjdI8= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= @@ -234,10 +246,10 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= -golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU= -golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= @@ -256,10 +268,10 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0= -google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/grpc v1.69.0 h1:quSiOM1GJPmPH5XtU+BCoVXcDVJJAzNcoyfC2cCjGkI= +google.golang.org/grpc v1.69.0/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From d0964d50ba0c473258e6bba9e6ac61252ca75582 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 17 Dec 2024 18:19:25 +0100 Subject: [PATCH 236/836] chore(deps): update agilepathway/pull-request-label-checker docker tag to v1.6.60 (main) (#6339) --- .github/workflows/require-labels.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/require-labels.yml b/.github/workflows/require-labels.yml index e0da0f8728..1bbbfb1d23 100644 --- a/.github/workflows/require-labels.yml +++ b/.github/workflows/require-labels.yml @@ -19,7 +19,7 @@ jobs: runs-on: ubuntu-24.04 steps: - name: Require labels - uses: docker://agilepathway/pull-request-label-checker:v1.6.56 + uses: docker://agilepathway/pull-request-label-checker:v1.6.60 with: any_of: "ok to merge :ok_hand:" none_of: "do not merge" From aed3c1398b6e25d1e6c8135c26e2ddd0066a28d4 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 18 Dec 2024 14:16:33 +0100 Subject: [PATCH 237/836] fix(deps): update github.com/cloudnative-pg/barman-cloud digest to 134c7de (main) (#6331) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 4688667712..bf663e9fe4 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/avast/retry-go/v4 v4.6.0 github.com/blang/semver v3.5.1+incompatible github.com/cheynewallace/tabby v1.1.1 - github.com/cloudnative-pg/barman-cloud v0.0.0-20241205144020-711113b64258 + github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a github.com/cloudnative-pg/cnpg-i v0.0.0-20241105133936-c704f46c20e0 github.com/cloudnative-pg/machinery v0.0.0-20241122084004-33b997fc6c61 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc diff --git a/go.sum b/go.sum index d7509f0515..7a990938f9 100644 --- a/go.sum +++ b/go.sum @@ -18,8 +18,8 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cheynewallace/tabby v1.1.1 h1:JvUR8waht4Y0S3JF17G6Vhyt+FRhnqVCkk8l4YrOU54= github.com/cheynewallace/tabby v1.1.1/go.mod h1:Pba/6cUL8uYqvOc9RkyvFbHGrQ9wShyrn6/S/1OYVys= -github.com/cloudnative-pg/barman-cloud v0.0.0-20241205144020-711113b64258 h1:B/Wncxl/OXrXJUHHtBCyxE//6FdIxznERfzPMsNHWfw= -github.com/cloudnative-pg/barman-cloud v0.0.0-20241205144020-711113b64258/go.mod h1:HPGwXHlatQEnb2HdsbGTZLEo8qlxKLdxTHiTeF9TTqw= +github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a h1:VrEa9P/HfA6csNOh0DRlUyeUoKuByV57tLnf2rTIqfU= +github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a/go.mod h1:HPGwXHlatQEnb2HdsbGTZLEo8qlxKLdxTHiTeF9TTqw= github.com/cloudnative-pg/cnpg-i v0.0.0-20241105133936-c704f46c20e0 h1:8mrkOCJTFnhbG5j9qS7ZKXHvWek6Tp6rwyVXXQiN4JA= github.com/cloudnative-pg/cnpg-i v0.0.0-20241105133936-c704f46c20e0/go.mod h1:fAU7ySVzjpt/RZntxWZiWJCjaBJayzIxEnd0NuO7oQc= github.com/cloudnative-pg/machinery v0.0.0-20241122084004-33b997fc6c61 h1:KzazCP/OVbCPAkhhg9hLLNzLyAHcYzxA3U3wsyLDWbs= From e4913b240729e3e703bd915643cfc4e5224701e6 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 18 Dec 2024 14:55:28 +0100 Subject: [PATCH 238/836] chore(deps): update kubernetes csi (main) (#6343) https://github.com/kubernetes-csi/external-snapshotter `v8.1.0` -> `v8.2.0` https://github.com/rook/rook `v1.15.6` -> `v1.16.0` --- .github/workflows/continuous-delivery.yml | 4 ++-- hack/setup-cluster.sh | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index 04c3cd31ef..9d809c8802 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -37,8 +37,8 @@ env: GOLANG_VERSION: "1.23.x" KUBEBUILDER_VERSION: "2.3.1" KIND_VERSION: "v0.25.0" - ROOK_VERSION: "v1.15.6" - EXTERNAL_SNAPSHOTTER_VERSION: "v8.1.0" + ROOK_VERSION: "v1.16.0" + EXTERNAL_SNAPSHOTTER_VERSION: "v8.2.0" OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing" BUILD_PUSH_PROVENANCE: "" BUILD_PUSH_CACHE_FROM: "" diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh index 698baa2e3f..a0166bca71 100755 --- a/hack/setup-cluster.sh +++ b/hack/setup-cluster.sh @@ -27,7 +27,7 @@ fi KIND_NODE_DEFAULT_VERSION=v1.31.2 K3D_NODE_DEFAULT_VERSION=v1.30.3 CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=v1.15.0 -EXTERNAL_SNAPSHOTTER_VERSION=v8.1.0 +EXTERNAL_SNAPSHOTTER_VERSION=v8.2.0 EXTERNAL_PROVISIONER_VERSION=v5.1.0 EXTERNAL_RESIZER_VERSION=v1.12.0 EXTERNAL_ATTACHER_VERSION=v4.7.0 From e22e7e9fdf7ee3e63d425a428b538fe76b8bbe22 Mon Sep 17 00:00:00 2001 From: Jaime Silvela Date: Wed, 18 Dec 2024 15:27:19 +0100 Subject: [PATCH 239/836] fix: key collision in structured logs (#6324) Inside the logs we found the name `name` as a key for a couple of values, now we assign the proper keys like `jobName` and `podName` Closes #6321 Signed-off-by: Jaime Silvela --- internal/controller/cluster_create.go | 2 +- pkg/management/postgres/webserver/client/remote/instance.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/controller/cluster_create.go b/internal/controller/cluster_create.go index 280fe3361d..39029dd44f 100644 --- a/internal/controller/cluster_create.go +++ b/internal/controller/cluster_create.go @@ -1166,7 +1166,7 @@ func (r *ClusterReconciler) createPrimaryInstance( } contextLogger.Info("Creating new Job", - "name", job.Name, + "jobName", job.Name, "primary", true) utils.SetOperatorVersion(&job.ObjectMeta, versions.Version) diff --git a/pkg/management/postgres/webserver/client/remote/instance.go b/pkg/management/postgres/webserver/client/remote/instance.go index b83111f850..6c158402fe 100644 --- a/pkg/management/postgres/webserver/client/remote/instance.go +++ b/pkg/management/postgres/webserver/client/remote/instance.go @@ -177,7 +177,7 @@ func (r *instanceClientImpl) GetStatusFromInstances( for idx := range status.Items { if status.Items[idx].Error != nil { log.FromContext(ctx).Info("Cannot extract Pod status", - "name", status.Items[idx].Pod.Name, + "podName", status.Items[idx].Pod.Name, "error", status.Items[idx].Error.Error()) } } From 67cc5473f3ca30f47d12860a01d57d29b2c47039 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Wed, 18 Dec 2024 17:47:20 +0100 Subject: [PATCH 240/836] fix: use optimistic locking when updating conditions (#6328) Kubernetes does not implement strategic merge for CRDs, and every JSON merge patch will replace the condition set with a new one. It is possible for a proposed patch to start from a Cluster that is not up-to-date, and in that case, the conditions will be reverted back to an older status. This patch fixes this race condition by encapsulating this operator with a merge patch that requires optimistic locking and retrying the conditions update when needed. Fixes: #6317 Signed-off-by: Leonardo Cecchi Signed-off-by: Armando Ruocco Signed-off-by: Marco Nenciarini Co-authored-by: Armando Ruocco Co-authored-by: Marco Nenciarini --- api/v1/cluster_conditions.go | 8 +- internal/controller/backup_controller.go | 30 +++++-- pkg/conditions/conditions.go | 49 ----------- pkg/conditions/doc.go | 19 ----- pkg/management/postgres/backup.go | 24 +++--- pkg/management/postgres/backup_test.go | 1 + pkg/management/postgres/webserver/local.go | 15 ++-- .../postgres/webserver/plugin_backup.go | 21 ++--- .../replicaclusterswitch/reconciler.go | 79 ++++++++++-------- pkg/resources/status/conditions.go | 81 +++++++++++++++++++ pkg/resources/status/phase.go | 77 +++++++++--------- pkg/resources/status/update.go | 73 +++++++++++++++++ 12 files changed, 305 insertions(+), 172 deletions(-) delete mode 100644 pkg/conditions/conditions.go delete mode 100644 pkg/conditions/doc.go create mode 100644 pkg/resources/status/conditions.go create mode 100644 pkg/resources/status/update.go diff --git a/api/v1/cluster_conditions.go b/api/v1/cluster_conditions.go index 9d1e83947a..ae9844632d 100644 --- a/api/v1/cluster_conditions.go +++ b/api/v1/cluster_conditions.go @@ -22,7 +22,7 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" var ( // BackupSucceededCondition is added to a backup // when it was completed correctly - BackupSucceededCondition = &metav1.Condition{ + BackupSucceededCondition = metav1.Condition{ Type: string(ConditionBackup), Status: metav1.ConditionTrue, Reason: string(ConditionReasonLastBackupSucceeded), @@ -31,7 +31,7 @@ var ( // BackupStartingCondition is added to a backup // when it started - BackupStartingCondition = &metav1.Condition{ + BackupStartingCondition = metav1.Condition{ Type: string(ConditionBackup), Status: metav1.ConditionFalse, Reason: string(ConditionBackupStarted), @@ -40,8 +40,8 @@ var ( // BuildClusterBackupFailedCondition builds // ConditionReasonLastBackupFailed condition - BuildClusterBackupFailedCondition = func(err error) *metav1.Condition { - return &metav1.Condition{ + BuildClusterBackupFailedCondition = func(err error) metav1.Condition { + return metav1.Condition{ Type: string(ConditionBackup), Status: metav1.ConditionFalse, Reason: string(ConditionReasonLastBackupFailed), diff --git a/internal/controller/backup_controller.go b/internal/controller/backup_controller.go index 4506c08737..29c6aea6f9 100644 --- a/internal/controller/backup_controller.go +++ b/internal/controller/backup_controller.go @@ -45,11 +45,11 @@ import ( cnpgiClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client" "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository" "github.com/cloudnative-pg/cloudnative-pg/pkg/certs" - "github.com/cloudnative-pg/cloudnative-pg/pkg/conditions" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/remote" "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/backup/volumesnapshot" "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/persistentvolumeclaim" + resourcestatus "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/status" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) @@ -417,7 +417,12 @@ func (r *BackupReconciler) reconcileSnapshotBackup( } } - if errCond := conditions.Patch(ctx, r.Client, cluster, apiv1.BackupStartingCondition); errCond != nil { + if errCond := resourcestatus.PatchConditionsWithOptimisticLock( + ctx, + r.Client, + cluster, + apiv1.BackupStartingCondition, + ); errCond != nil { contextLogger.Error(errCond, "Error while updating backup condition (backup starting)") } @@ -440,7 +445,12 @@ func (r *BackupReconciler) reconcileSnapshotBackup( // and un-fence the Pod contextLogger.Error(err, "while executing snapshot backup") // Update backup status in cluster conditions - if errCond := conditions.Patch(ctx, r.Client, cluster, apiv1.BuildClusterBackupFailedCondition(err)); errCond != nil { + if errCond := resourcestatus.PatchConditionsWithOptimisticLock( + ctx, + r.Client, + cluster, + apiv1.BuildClusterBackupFailedCondition(err), + ); errCond != nil { contextLogger.Error(errCond, "Error while updating backup condition (backup snapshot failed)") } @@ -453,7 +463,12 @@ func (r *BackupReconciler) reconcileSnapshotBackup( return res, nil } - if err := conditions.Patch(ctx, r.Client, cluster, apiv1.BackupSucceededCondition); err != nil { + if err := resourcestatus.PatchConditionsWithOptimisticLock( + ctx, + r.Client, + cluster, + apiv1.BackupSucceededCondition, + ); err != nil { contextLogger.Error(err, "Can't update the cluster with the completed snapshot backup data") } @@ -633,7 +648,12 @@ func startInstanceManagerBackup( status.CommandError = stdout // Update backup status in cluster conditions - if errCond := conditions.Patch(ctx, client, cluster, apiv1.BuildClusterBackupFailedCondition(err)); errCond != nil { + if errCond := resourcestatus.PatchConditionsWithOptimisticLock( + ctx, + client, + cluster, + apiv1.BuildClusterBackupFailedCondition(err), + ); errCond != nil { log.FromContext(ctx).Error(errCond, "Error while updating backup condition (backup failed)") } return postgres.PatchBackupStatusAndRetry(ctx, client, backup) diff --git a/pkg/conditions/conditions.go b/pkg/conditions/conditions.go deleted file mode 100644 index 768ac02d49..0000000000 --- a/pkg/conditions/conditions.go +++ /dev/null @@ -1,49 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package conditions - -import ( - "context" - - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" -) - -// Patch will patch a particular condition in cluster status. -func Patch( - ctx context.Context, - c client.Client, - cluster *apiv1.Cluster, - condition *metav1.Condition, -) error { - if cluster == nil || condition == nil { - return nil - } - - existingCluster := cluster.DeepCopy() - if changed := meta.SetStatusCondition(&cluster.Status.Conditions, *condition); changed { - // To avoid conflict using patch instead of update - if err := c.Status().Patch(ctx, cluster, client.MergeFrom(existingCluster)); err != nil { - return err - } - } - - return nil -} diff --git a/pkg/conditions/doc.go b/pkg/conditions/doc.go deleted file mode 100644 index acecc6fc10..0000000000 --- a/pkg/conditions/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package conditions contains functions useful to update the conditions -// on the resources managed by the operator -package conditions diff --git a/pkg/management/postgres/backup.go b/pkg/management/postgres/backup.go index 46ff344b29..f79c4d4a9a 100644 --- a/pkg/management/postgres/backup.go +++ b/pkg/management/postgres/backup.go @@ -32,6 +32,7 @@ import ( "github.com/cloudnative-pg/machinery/pkg/fileutils" "github.com/cloudnative-pg/machinery/pkg/log" pgTime "github.com/cloudnative-pg/machinery/pkg/postgres/time" + "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -41,9 +42,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/pkg/conditions" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/resources" + "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/status" // this is needed to correctly open the sql connection with the pgx driver _ "github.com/jackc/pgx/v5/stdlib" @@ -187,12 +188,15 @@ func (b *BackupCommand) run(ctx context.Context) { // add backup failed condition to the cluster if failErr := b.retryWithRefreshedCluster(ctx, func() error { - origCluster := b.Cluster.DeepCopy() - - meta.SetStatusCondition(&b.Cluster.Status.Conditions, *apiv1.BuildClusterBackupFailedCondition(err)) - - b.Cluster.Status.LastFailedBackup = pgTime.GetCurrentTimestampWithFormat(time.RFC3339) - return b.Client.Status().Patch(ctx, b.Cluster, client.MergeFrom(origCluster)) + return status.PatchWithOptimisticLock( + ctx, + b.Client, + b.Cluster, + func(cluster *apiv1.Cluster) { + meta.SetStatusCondition(&cluster.Status.Conditions, apiv1.BuildClusterBackupFailedCondition(err)) + cluster.Status.LastFailedBackup = pgTime.GetCurrentTimestampWithFormat(time.RFC3339) + }, + ) }); failErr != nil { b.Log.Error(failErr, "while setting cluster condition for failed backup") // We do not terminate here because it's more important to properly handle @@ -210,7 +214,7 @@ func (b *BackupCommand) takeBackup(ctx context.Context) error { // Update backup status in cluster conditions on startup if err := b.retryWithRefreshedCluster(ctx, func() error { - return conditions.Patch(ctx, b.Client, b.Cluster, apiv1.BackupStartingCondition) + return status.PatchConditionsWithOptimisticLock(ctx, b.Client, b.Cluster, apiv1.BackupStartingCondition) }); err != nil { b.Log.Error(err, "Error changing backup condition (backup started)") // We do not terminate here because we could still have a good backup @@ -256,7 +260,7 @@ func (b *BackupCommand) takeBackup(ctx context.Context) error { // Update backup status in cluster conditions on backup completion if err := b.retryWithRefreshedCluster(ctx, func() error { - return conditions.Patch(ctx, b.Client, b.Cluster, apiv1.BackupSucceededCondition) + return status.PatchConditionsWithOptimisticLock(ctx, b.Client, b.Cluster, apiv1.BackupSucceededCondition) }); err != nil { b.Log.Error(err, "Can't update the cluster with the completed backup data") } @@ -303,7 +307,7 @@ func (b *BackupCommand) backupMaintenance(ctx context.Context) { data.GetLastSuccessfulBackupTime(), ) - if reflect.DeepEqual(origCluster.Status, b.Cluster.Status) { + if equality.Semantic.DeepEqual(origCluster.Status, b.Cluster.Status) { return nil } return b.Client.Status().Patch(ctx, b.Cluster, client.MergeFrom(origCluster)) diff --git a/pkg/management/postgres/backup_test.go b/pkg/management/postgres/backup_test.go index 18307bc791..8ff4796c72 100644 --- a/pkg/management/postgres/backup_test.go +++ b/pkg/management/postgres/backup_test.go @@ -125,6 +125,7 @@ var _ = Describe("testing backup command", func() { Client: fake.NewClientBuilder(). WithScheme(scheme.BuildWithAllKnownScheme()). WithObjects(cluster, backup). + WithStatusSubresource(cluster, backup). Build(), Recorder: &record.FakeRecorder{}, Env: os.Environ(), diff --git a/pkg/management/postgres/webserver/local.go b/pkg/management/postgres/webserver/local.go index 0d15f851ca..27b15db9e1 100644 --- a/pkg/management/postgres/webserver/local.go +++ b/pkg/management/postgres/webserver/local.go @@ -33,9 +33,9 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache" - "github.com/cloudnative-pg/cloudnative-pg/pkg/conditions" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/url" + "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/status" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) @@ -245,9 +245,9 @@ type ArchiveStatusRequest struct { Error string `json:"error,omitempty"` } -func (asr *ArchiveStatusRequest) getContinuousArchivingCondition() *metav1.Condition { +func (asr *ArchiveStatusRequest) getContinuousArchivingCondition() metav1.Condition { if asr.Error != "" { - return &metav1.Condition{ + return metav1.Condition{ Type: string(apiv1.ConditionContinuousArchiving), Status: metav1.ConditionFalse, Reason: string(apiv1.ConditionReasonContinuousArchivingFailing), @@ -255,7 +255,7 @@ func (asr *ArchiveStatusRequest) getContinuousArchivingCondition() *metav1.Condi } } - return &metav1.Condition{ + return metav1.Condition{ Type: string(apiv1.ConditionContinuousArchiving), Status: metav1.ConditionTrue, Reason: string(apiv1.ConditionReasonContinuousArchivingSuccess), @@ -283,7 +283,12 @@ func (ws *localWebserverEndpoints) setWALArchiveStatusCondition(w http.ResponseW return } - if errCond := conditions.Patch(ctx, ws.typedClient, cluster, asr.getContinuousArchivingCondition()); errCond != nil { + if errCond := status.PatchConditionsWithOptimisticLock( + ctx, + ws.typedClient, + cluster, + asr.getContinuousArchivingCondition(), + ); errCond != nil { contextLogger.Error(errCond, "Error changing wal archiving condition", "condition", asr.getContinuousArchivingCondition()) http.Error( diff --git a/pkg/management/postgres/webserver/plugin_backup.go b/pkg/management/postgres/webserver/plugin_backup.go index 5d1ad1562b..2e6f58f5b6 100644 --- a/pkg/management/postgres/webserver/plugin_backup.go +++ b/pkg/management/postgres/webserver/plugin_backup.go @@ -32,9 +32,9 @@ import ( pluginClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client" "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository" "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" - "github.com/cloudnative-pg/cloudnative-pg/pkg/conditions" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/resources" + "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/status" ) // PluginBackupCommand represent a backup command that is being executed @@ -102,7 +102,7 @@ func (b *PluginBackupCommand) invokeStart(ctx context.Context) { // Update backup status in cluster conditions on startup if err := b.retryWithRefreshedCluster(ctx, func() error { - return conditions.Patch(ctx, b.Client, b.Cluster, apiv1.BackupStartingCondition) + return status.PatchConditionsWithOptimisticLock(ctx, b.Client, b.Cluster, apiv1.BackupStartingCondition) }); err != nil { contextLogger.Error(err, "Error changing backup condition (backup started)") // We do not terminate here because we could still have a good backup @@ -152,7 +152,7 @@ func (b *PluginBackupCommand) invokeStart(ctx context.Context) { // Update backup status in cluster conditions on backup completion if err := b.retryWithRefreshedCluster(ctx, func() error { - return conditions.Patch(ctx, b.Client, b.Cluster, apiv1.BackupSucceededCondition) + return status.PatchConditionsWithOptimisticLock(ctx, b.Client, b.Cluster, apiv1.BackupSucceededCondition) }); err != nil { contextLogger.Error(err, "Can't update the cluster with the completed backup data") } @@ -176,12 +176,15 @@ func (b *PluginBackupCommand) markBackupAsFailed(ctx context.Context, failure er // add backup failed condition to the cluster if failErr := b.retryWithRefreshedCluster(ctx, func() error { - origCluster := b.Cluster.DeepCopy() - - meta.SetStatusCondition(&b.Cluster.Status.Conditions, *apiv1.BuildClusterBackupFailedCondition(failure)) - - b.Cluster.Status.LastFailedBackup = pgTime.GetCurrentTimestampWithFormat(time.RFC3339) - return b.Client.Status().Patch(ctx, b.Cluster, client.MergeFrom(origCluster)) + return status.PatchWithOptimisticLock( + ctx, + b.Client, + b.Cluster, + func(cluster *apiv1.Cluster) { + meta.SetStatusCondition(&cluster.Status.Conditions, apiv1.BuildClusterBackupFailedCondition(failure)) + cluster.Status.LastFailedBackup = pgTime.GetCurrentTimestampWithFormat(time.RFC3339) + }, + ) }); failErr != nil { contextLogger.Error(failErr, "while setting cluster condition for failed backup") } diff --git a/pkg/reconciler/replicaclusterswitch/reconciler.go b/pkg/reconciler/replicaclusterswitch/reconciler.go index 15342e9adc..fd185a7e49 100644 --- a/pkg/reconciler/replicaclusterswitch/reconciler.go +++ b/pkg/reconciler/replicaclusterswitch/reconciler.go @@ -31,6 +31,7 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/remote" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" + "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/status" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) @@ -89,28 +90,33 @@ func startTransition(ctx context.Context, cli client.Client, cluster *apiv1.Clus return nil, fmt.Errorf("while fencing primary cluster to demote it: %w", err) } - origCluster := cluster.DeepCopy() - meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ - Type: conditionDesignatedPrimaryTransition, - Status: metav1.ConditionFalse, - Reason: "ReplicaClusterAfterCreation", - Message: "Enabled external cluster after a node was generated", - }) - meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ - Type: conditionFence, - Status: metav1.ConditionTrue, - Reason: "ReplicaClusterAfterCreation", - Message: "Enabled external cluster after a node was generated", - }) - meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ - Type: ConditionReplicaClusterSwitch, - Status: metav1.ConditionFalse, - Reason: "ReplicaEnabledSetTrue", - Message: "Starting the Replica cluster transition", - }) - - cluster.Status.SwitchReplicaClusterStatus.InProgress = true - if err := cli.Status().Patch(ctx, cluster, client.MergeFrom(origCluster)); err != nil { + if err := status.PatchWithOptimisticLock( + ctx, + cli, + cluster, + func(cluster *apiv1.Cluster) { + meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ + Type: conditionDesignatedPrimaryTransition, + Status: metav1.ConditionFalse, + Reason: "ReplicaClusterAfterCreation", + Message: "Enabled external cluster after a node was generated", + }) + meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ + Type: conditionFence, + Status: metav1.ConditionTrue, + Reason: "ReplicaClusterAfterCreation", + Message: "Enabled external cluster after a node was generated", + }) + meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ + Type: ConditionReplicaClusterSwitch, + Status: metav1.ConditionFalse, + Reason: "ReplicaEnabledSetTrue", + Message: "Starting the Replica cluster transition", + }) + + cluster.Status.SwitchReplicaClusterStatus.InProgress = true + }, + ); err != nil { return nil, err } @@ -132,18 +138,23 @@ func cleanupTransitionMetadata(ctx context.Context, cli client.Client, cluster * return err } } - origCluster := cluster.DeepCopy() - meta.RemoveStatusCondition(&cluster.Status.Conditions, conditionDesignatedPrimaryTransition) - meta.RemoveStatusCondition(&cluster.Status.Conditions, conditionFence) - meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ - Type: ConditionReplicaClusterSwitch, - Status: metav1.ConditionTrue, - Reason: "ReplicaEnabledSetTrue", - Message: "Completed the Replica cluster transition", - }) - cluster.Status.SwitchReplicaClusterStatus.InProgress = false - - return cli.Status().Patch(ctx, cluster, client.MergeFrom(origCluster)) + + return status.PatchWithOptimisticLock( + ctx, + cli, + cluster, + func(cluster *apiv1.Cluster) { + meta.RemoveStatusCondition(&cluster.Status.Conditions, conditionDesignatedPrimaryTransition) + meta.RemoveStatusCondition(&cluster.Status.Conditions, conditionFence) + meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ + Type: ConditionReplicaClusterSwitch, + Status: metav1.ConditionTrue, + Reason: "ReplicaEnabledSetTrue", + Message: "Completed the Replica cluster transition", + }) + cluster.Status.SwitchReplicaClusterStatus.InProgress = false + }, + ) } func reconcileDemotionToken( diff --git a/pkg/resources/status/conditions.go b/pkg/resources/status/conditions.go new file mode 100644 index 0000000000..54b09a056b --- /dev/null +++ b/pkg/resources/status/conditions.go @@ -0,0 +1,81 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package status + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/client" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" +) + +// PatchConditionsWithOptimisticLock will update a particular condition in cluster status. +// This function may update the conditions in the passed cluster +// with the latest ones that were found from the API server. +// This function is needed because Kubernetes still doesn't support strategic merge +// for CRDs (see https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/). +func PatchConditionsWithOptimisticLock( + ctx context.Context, + c client.Client, + cluster *apiv1.Cluster, + conditions ...metav1.Condition, +) error { + if cluster == nil || len(conditions) == 0 { + return nil + } + + applyConditions := func(cluster *apiv1.Cluster) bool { + changed := false + for _, c := range conditions { + changed = changed || meta.SetStatusCondition(&cluster.Status.Conditions, c) + } + return changed + } + + var currentCluster apiv1.Cluster + if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := c.Get(ctx, client.ObjectKeyFromObject(cluster), ¤tCluster); err != nil { + return err + } + + updatedCluster := currentCluster.DeepCopy() + if changed := applyConditions(updatedCluster); !changed { + return nil + } + + if err := c.Status().Patch( + ctx, + updatedCluster, + client.MergeFromWithOptions(¤tCluster, client.MergeFromWithOptimisticLock{}), + ); err != nil { + return err + } + + cluster.Status.Conditions = updatedCluster.Status.Conditions + + return nil + }); err != nil { + return fmt.Errorf("while updating conditions: %w", err) + } + + return nil +} diff --git a/pkg/resources/status/phase.go b/pkg/resources/status/phase.go index 684eefd26c..bac80933c5 100644 --- a/pkg/resources/status/phase.go +++ b/pkg/resources/status/phase.go @@ -18,7 +18,7 @@ package status import ( "context" - "reflect" + "fmt" "github.com/cloudnative-pg/machinery/pkg/log" "k8s.io/apimachinery/pkg/api/meta" @@ -51,47 +51,50 @@ func RegisterPhaseWithOrigCluster( phase string, reason string, ) error { - contextLogger := log.FromContext(ctx) - - // we ensure that the modifiedCluster conditions aren't nil before operating - if modifiedCluster.Status.Conditions == nil { - modifiedCluster.Status.Conditions = []metav1.Condition{} + if err := PatchWithOptimisticLock( + ctx, + cli, + modifiedCluster, + func(cluster *apiv1.Cluster) { + if cluster.Status.Conditions == nil { + cluster.Status.Conditions = []metav1.Condition{} + } + + cluster.Status.Phase = phase + cluster.Status.PhaseReason = reason + + condition := metav1.Condition{ + Type: string(apiv1.ConditionClusterReady), + Status: metav1.ConditionFalse, + Reason: string(apiv1.ClusterIsNotReady), + Message: "Cluster Is Not Ready", + } + + if cluster.Status.Phase == apiv1.PhaseHealthy { + condition = metav1.Condition{ + Type: string(apiv1.ConditionClusterReady), + Status: metav1.ConditionTrue, + Reason: string(apiv1.ClusterReady), + Message: "Cluster is Ready", + } + } + + meta.SetStatusCondition(&cluster.Status.Conditions, condition) + }, + ); err != nil { + return fmt.Errorf("while updating phase: %w", err) } - modifiedCluster.Status.Phase = phase - modifiedCluster.Status.PhaseReason = reason + contextLogger := log.FromContext(ctx) - condition := metav1.Condition{ - Type: string(apiv1.ConditionClusterReady), - Status: metav1.ConditionFalse, - Reason: string(apiv1.ClusterIsNotReady), - Message: "Cluster Is Not Ready", - } + modifiedPhase := modifiedCluster.Status.Phase + origPhase := origCluster.Status.Phase - if modifiedCluster.Status.Phase == apiv1.PhaseHealthy { - condition = metav1.Condition{ - Type: string(apiv1.ConditionClusterReady), - Status: metav1.ConditionTrue, - Reason: string(apiv1.ClusterReady), - Message: "Cluster is Ready", - } + if modifiedPhase != apiv1.PhaseHealthy && origPhase == apiv1.PhaseHealthy { + contextLogger.Info("Cluster is not healthy") } - - meta.SetStatusCondition(&modifiedCluster.Status.Conditions, condition) - - if !reflect.DeepEqual(origCluster, modifiedCluster) { - modifiedPhase := modifiedCluster.Status.Phase - origPhase := origCluster.Status.Phase - - if modifiedPhase != apiv1.PhaseHealthy && origPhase == apiv1.PhaseHealthy { - contextLogger.Info("Cluster is not healthy") - } - if modifiedPhase == apiv1.PhaseHealthy && origPhase != apiv1.PhaseHealthy { - contextLogger.Info("Cluster is healthy") - } - if err := cli.Status().Patch(ctx, modifiedCluster, client.MergeFrom(origCluster)); err != nil { - return err - } + if modifiedPhase == apiv1.PhaseHealthy && origPhase != apiv1.PhaseHealthy { + contextLogger.Info("Cluster is healthy") } return nil diff --git a/pkg/resources/status/update.go b/pkg/resources/status/update.go new file mode 100644 index 0000000000..0543292d9e --- /dev/null +++ b/pkg/resources/status/update.go @@ -0,0 +1,73 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package status + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/client" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" +) + +// PatchWithOptimisticLock updates the status of the cluster using the passed +// transaction function. +// Important: after successfully updating the status, this +// function refreshes it into the passed cluster +func PatchWithOptimisticLock( + ctx context.Context, + c client.Client, + cluster *apiv1.Cluster, + tx func(cluster *apiv1.Cluster), +) error { + if cluster == nil { + return nil + } + + if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + var currentCluster apiv1.Cluster + if err := c.Get(ctx, client.ObjectKeyFromObject(cluster), ¤tCluster); err != nil { + return err + } + + updatedCluster := currentCluster.DeepCopy() + tx(updatedCluster) + + if equality.Semantic.DeepEqual(currentCluster.Status, updatedCluster.Status) { + return nil + } + + if err := c.Status().Patch( + ctx, + updatedCluster, + client.MergeFromWithOptions(¤tCluster, client.MergeFromWithOptimisticLock{}), + ); err != nil { + return err + } + + cluster.Status = updatedCluster.Status + + return nil + }); err != nil { + return fmt.Errorf("while updating conditions: %w", err) + } + + return nil +} From 55bc13852672cd3543bb0d26cd0cef34004ec588 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 18 Dec 2024 22:32:10 +0100 Subject: [PATCH 241/836] chore(deps): update dependency kubernetes-sigs/kind to v0.26.0 (main) (#6350) --- .github/workflows/continuous-delivery.yml | 2 +- .github/workflows/continuous-integration.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index 9d809c8802..d29cc443d2 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -36,7 +36,7 @@ on: env: GOLANG_VERSION: "1.23.x" KUBEBUILDER_VERSION: "2.3.1" - KIND_VERSION: "v0.25.0" + KIND_VERSION: "v0.26.0" ROOK_VERSION: "v1.16.0" EXTERNAL_SNAPSHOTTER_VERSION: "v8.2.0" OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing" diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index db7bbb243e..fe8629a5e5 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -19,7 +19,7 @@ env: GOLANG_VERSION: "1.23.x" GOLANGCI_LINT_VERSION: "v1.62.2" KUBEBUILDER_VERSION: "2.3.1" - KIND_VERSION: "v0.25.0" + KIND_VERSION: "v0.26.0" OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing" API_DOC_NAME: "cloudnative-pg.v1.md" SLACK_USERNAME: "cnpg-bot" From 141c5846a0b3b94fb0f3df3237cb34f5b990c7df Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 19 Dec 2024 09:31:42 +0100 Subject: [PATCH 242/836] fix(deps): update github.com/cloudnative-pg/cnpg-i digest to cbc4287 (main) (#6332) --- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index bf663e9fe4..20e25fbcee 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/blang/semver v3.5.1+incompatible github.com/cheynewallace/tabby v1.1.1 github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a - github.com/cloudnative-pg/cnpg-i v0.0.0-20241105133936-c704f46c20e0 + github.com/cloudnative-pg/cnpg-i v0.0.0-20241218212131-cbc4287931ee github.com/cloudnative-pg/machinery v0.0.0-20241122084004-33b997fc6c61 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/evanphx/json-patch/v5 v5.9.0 @@ -38,7 +38,7 @@ require ( go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 golang.org/x/term v0.27.0 - google.golang.org/grpc v1.69.0 + google.golang.org/grpc v1.69.2 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.31.3 k8s.io/apiextensions-apiserver v0.31.3 @@ -113,7 +113,7 @@ require ( golang.org/x/tools v0.26.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect - google.golang.org/protobuf v1.35.1 // indirect + google.golang.org/protobuf v1.36.0 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 7a990938f9..53e874f1bf 100644 --- a/go.sum +++ b/go.sum @@ -20,8 +20,8 @@ github.com/cheynewallace/tabby v1.1.1 h1:JvUR8waht4Y0S3JF17G6Vhyt+FRhnqVCkk8l4Yr github.com/cheynewallace/tabby v1.1.1/go.mod h1:Pba/6cUL8uYqvOc9RkyvFbHGrQ9wShyrn6/S/1OYVys= github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a h1:VrEa9P/HfA6csNOh0DRlUyeUoKuByV57tLnf2rTIqfU= github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a/go.mod h1:HPGwXHlatQEnb2HdsbGTZLEo8qlxKLdxTHiTeF9TTqw= -github.com/cloudnative-pg/cnpg-i v0.0.0-20241105133936-c704f46c20e0 h1:8mrkOCJTFnhbG5j9qS7ZKXHvWek6Tp6rwyVXXQiN4JA= -github.com/cloudnative-pg/cnpg-i v0.0.0-20241105133936-c704f46c20e0/go.mod h1:fAU7ySVzjpt/RZntxWZiWJCjaBJayzIxEnd0NuO7oQc= +github.com/cloudnative-pg/cnpg-i v0.0.0-20241218212131-cbc4287931ee h1:PJc4BpPu0b684BrwWzy0B5W/CSqrnUV+jv3PTrSUx8g= +github.com/cloudnative-pg/cnpg-i v0.0.0-20241218212131-cbc4287931ee/go.mod h1:ahVFn+JzYkFfv7Iwpswu4lsuC9yK7zZupM1ssaIKPFI= github.com/cloudnative-pg/machinery v0.0.0-20241122084004-33b997fc6c61 h1:KzazCP/OVbCPAkhhg9hLLNzLyAHcYzxA3U3wsyLDWbs= github.com/cloudnative-pg/machinery v0.0.0-20241122084004-33b997fc6c61/go.mod h1:uBHGRIk5rt07mO4zjIC1uvGBWTH6PqIiD1PfpvPGZKU= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= @@ -270,10 +270,10 @@ gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE= google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= -google.golang.org/grpc v1.69.0 h1:quSiOM1GJPmPH5XtU+BCoVXcDVJJAzNcoyfC2cCjGkI= -google.golang.org/grpc v1.69.0/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= -google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= -google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU= +google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/protobuf v1.36.0 h1:mjIs9gYtt56AzC4ZaffQuh88TZurBGhIJMBZGSxNerQ= +google.golang.org/protobuf v1.36.0/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= From ef1669b1addd9a5e9333095c0ff7ac19632f529d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niccol=C3=B2=20Fei?= Date: Thu, 19 Dec 2024 10:25:38 +0100 Subject: [PATCH 243/836] test(e2e): add coverage for Publication and Subscription features (#6320) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Implement end-to-end tests to verify publication and subscription functionalities * Ensure data replication scenarios are thoroughly tested Closes #6306 Signed-off-by: Niccolò Fei Signed-off-by: Gabriele Quaresima Signed-off-by: Marco Nenciarini Signed-off-by: Armando Ruocco Signed-off-by: Jonathan Gonzalez V. Co-authored-by: Gabriele Quaresima Co-authored-by: Marco Nenciarini Co-authored-by: Armando Ruocco Co-authored-by: Jonathan Gonzalez V. --- .github/workflows/continuous-delivery.yml | 8 +- tests/e2e/asserts_test.go | 97 +++---- tests/e2e/cluster_microservice_test.go | 9 +- .../declarative_database_management_test.go | 14 +- .../destination-database.yaml | 1 + .../declarative_pub_sub/source-database.yaml | 1 + tests/e2e/managed_roles_test.go | 80 +++--- tests/e2e/publication_subscription_test.go | 252 +++++++++++++----- tests/labels.go | 6 +- 9 files changed, 289 insertions(+), 179 deletions(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index d29cc443d2..7ce21a1d15 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -20,10 +20,10 @@ on: default: '4' feature_type: description: > - Feature Type (disruptive, performance, upgrade, smoke, basic, service-connectivity, self-healing, - backup-restore, snapshot, operator, observability, replication, plugin, postgres-configuration, - pod-scheduling, cluster-metadata, recovery, importing-databases, storage, security, maintenance, - tablespaces) + Feature Type (backup-restore, basic, cluster-metadata, declarative-databases, disruptive, + importing-databases, maintenance, no-openshift, observability, operator, performance, plugin, + pod-scheduling, postgres-configuration, publication-subscription, recovery, replication, + security, self-healing, service-connectivity, smoke, snapshot, storage, tablespaces, upgrade) required: false log_level: description: 'Log level for operator (error, warning, info, debug(default), trace)' diff --git a/tests/e2e/asserts_test.go b/tests/e2e/asserts_test.go index 8c6d459fec..ccf73d21a9 100644 --- a/tests/e2e/asserts_test.go +++ b/tests/e2e/asserts_test.go @@ -32,7 +32,7 @@ import ( "github.com/thoas/go-funk" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/retry" "k8s.io/utils/strings/slices" @@ -504,54 +504,34 @@ func insertRecordIntoTable(tableName string, value int, conn *sql.DB) { Expect(err).ToNot(HaveOccurred()) } -// AssertDatabaseExists assert if database exists -func AssertDatabaseExists(pod *corev1.Pod, databaseName string, expectedValue bool) { - By(fmt.Sprintf("verifying if database %v exists", databaseName), func() { - query := fmt.Sprintf("SELECT EXISTS(SELECT 1 FROM pg_database WHERE lower(datname) = lower('%v'));", databaseName) +func QueryMatchExpectationPredicate( + pod *corev1.Pod, + dbname testsUtils.DatabaseName, + query string, + expectedOutput string, +) func(g Gomega) { + return func(g Gomega) { + // executor stdout, stderr, err := env.ExecQueryInInstancePod( - testsUtils.PodLocator{ - Namespace: pod.Namespace, - PodName: pod.Name, - }, - testsUtils.PostgresDBName, - query) + testsUtils.PodLocator{Namespace: pod.Namespace, PodName: pod.Name}, + dbname, + query, + ) if err != nil { GinkgoWriter.Printf("stdout: %v\nstderr: %v", stdout, stderr) } - Expect(err).ToNot(HaveOccurred()) - - if expectedValue { - Expect(strings.Trim(stdout, "\n")).To(BeEquivalentTo("t")) - } else { - Expect(strings.Trim(stdout, "\n")).To(BeEquivalentTo("f")) - } - }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(strings.Trim(stdout, "\n")).To(BeEquivalentTo(expectedOutput), + fmt.Sprintf("expected query %q to return %q", query, expectedOutput)) + } } -// AssertUserExists assert if user exists -func AssertUserExists(pod *corev1.Pod, userName string, expectedValue bool) { - By(fmt.Sprintf("verifying if user %v exists", userName), func() { - query := fmt.Sprintf("SELECT EXISTS(SELECT 1 FROM pg_roles WHERE lower(rolname) = lower('%v'));", userName) - Eventually(func(g Gomega) { - stdout, stderr, err := env.ExecQueryInInstancePod( - testsUtils.PodLocator{ - Namespace: pod.Namespace, - PodName: pod.Name, - }, - testsUtils.PostgresDBName, - query) - if err != nil { - GinkgoWriter.Printf("stdout: %v\nstderr: %v", stdout, stderr) - } - g.Expect(err).ToNot(HaveOccurred()) +func roleExistsQuery(roleName string) string { + return fmt.Sprintf("SELECT EXISTS(SELECT 1 FROM pg_roles WHERE rolname='%v')", roleName) +} - if expectedValue { - g.Expect(strings.Trim(stdout, "\n")).To(BeEquivalentTo("t")) - } else { - g.Expect(strings.Trim(stdout, "\n")).To(BeEquivalentTo("f")) - } - }, 60).Should(Succeed()) - }) +func databaseExistsQuery(dbName string) string { + return fmt.Sprintf("SELECT EXISTS(SELECT 1 FROM pg_database WHERE datname='%v')", dbName) } // AssertDataExpectedCount verifies that an expected amount of rows exists on the table @@ -832,7 +812,7 @@ func AssertScheduledBackupsAreScheduled(namespace string, backupYAMLPath string, Name: scheduledBackupName, } - Eventually(func() (*v1.Time, error) { + Eventually(func() (*metav1.Time, error) { scheduledBackup := &apiv1.ScheduledBackup{} err := env.Client.Get(env.Ctx, scheduledBackupNamespacedName, scheduledBackup) @@ -891,11 +871,6 @@ func getScheduledBackupCompleteBackupsCount(namespace string, scheduledBackupNam // AssertPgRecoveryMode verifies if the target pod recovery mode is enabled or disabled func AssertPgRecoveryMode(pod *corev1.Pod, expectedValue bool) { By(fmt.Sprintf("verifying that postgres recovery mode is %v", expectedValue), func() { - stringExpectedValue := "f" - if expectedValue { - stringExpectedValue = "t" - } - Eventually(func() (string, error) { stdOut, stdErr, err := env.ExecQueryInInstancePod( testsUtils.PodLocator{ @@ -908,10 +883,18 @@ func AssertPgRecoveryMode(pod *corev1.Pod, expectedValue bool) { GinkgoWriter.Printf("stdout: %v\ntderr: %v\n", stdOut, stdErr) } return strings.Trim(stdOut, "\n"), err - }, 300, 10).Should(BeEquivalentTo(stringExpectedValue)) + }, 300, 10).Should(BeEquivalentTo(boolPGOutput(expectedValue))) }) } +func boolPGOutput(expectedValue bool) string { + stringExpectedValue := "f" + if expectedValue { + stringExpectedValue = "t" + } + return stringExpectedValue +} + // AssertReplicaModeCluster checks that, after inserting some data in a source cluster, // a replica cluster can be bootstrapped using pg_basebackup and is properly replicating // from the source cluster @@ -991,8 +974,10 @@ func AssertReplicaModeCluster( // verify the replica database created followed the source database, rather than // default to the "app" db and user By("checking that in replica cluster there is no database app and user app", func() { - AssertDatabaseExists(primaryReplicaCluster, "app", false) - AssertUserExists(primaryReplicaCluster, "app", false) + Eventually(QueryMatchExpectationPredicate(primaryReplicaCluster, testsUtils.PostgresDBName, + databaseExistsQuery("app"), "f"), 30).Should(Succeed()) + Eventually(QueryMatchExpectationPredicate(primaryReplicaCluster, testsUtils.PostgresDBName, + roleExistsQuery("app"), "f"), 30).Should(Succeed()) }) } } @@ -1072,8 +1057,10 @@ func AssertDetachReplicaModeCluster( By("verifying the replica database doesn't exist in the replica cluster", func() { // Application database configuration is skipped for replica clusters, // so we expect these to not be present - AssertDatabaseExists(primaryReplicaCluster, replicaDatabaseName, false) - AssertUserExists(primaryReplicaCluster, replicaUserName, false) + Eventually(QueryMatchExpectationPredicate(primaryReplicaCluster, testsUtils.PostgresDBName, + databaseExistsQuery(replicaDatabaseName), "f"), 30).Should(Succeed()) + Eventually(QueryMatchExpectationPredicate(primaryReplicaCluster, testsUtils.PostgresDBName, + roleExistsQuery(replicaUserName), "f"), 30).Should(Succeed()) }) By("writing some new data to the source cluster", func() { @@ -1684,7 +1671,7 @@ func AssertScheduledBackupsImmediate(namespace, backupYAMLPath, scheduledBackupN Namespace: namespace, Name: scheduledBackupName, } - Eventually(func() (*v1.Time, error) { + Eventually(func() (*metav1.Time, error) { scheduledBackup := &apiv1.ScheduledBackup{} err = env.Client.Get(env.Ctx, scheduledBackupNamespacedName, scheduledBackup) @@ -2605,7 +2592,7 @@ func AssertBackupConditionTimestampChangedInClusterStatus( namespace, clusterName string, clusterConditionType apiv1.ClusterConditionType, - lastTransactionTimeStamp *v1.Time, + lastTransactionTimeStamp *metav1.Time, ) { By(fmt.Sprintf("waiting for backup condition status in cluster '%v'", clusterName), func() { Eventually(func() (bool, error) { diff --git a/tests/e2e/cluster_microservice_test.go b/tests/e2e/cluster_microservice_test.go index 6019086aa3..d957e1976f 100644 --- a/tests/e2e/cluster_microservice_test.go +++ b/tests/e2e/cluster_microservice_test.go @@ -261,7 +261,8 @@ func assertTableAndDataOnImportedCluster( }) By("verifying the user named 'micro' on source is not in imported database", func() { - AssertUserExists(pod, "micro", false) + Eventually(QueryMatchExpectationPredicate(pod, testsUtils.PostgresDBName, + roleExistsQuery("micro"), "f"), 30).Should(Succeed()) }) }) } @@ -330,8 +331,10 @@ func assertImportRenamesSelectedDatabase( importedPrimaryPod, err := env.GetClusterPrimary(namespace, importedClusterName) Expect(err).ToNot(HaveOccurred()) - AssertUserExists(importedPrimaryPod, "db2", false) - AssertUserExists(importedPrimaryPod, "app", true) + Eventually(QueryMatchExpectationPredicate(importedPrimaryPod, testsUtils.PostgresDBName, + roleExistsQuery("db2"), "f"), 30).Should(Succeed()) + Eventually(QueryMatchExpectationPredicate(importedPrimaryPod, testsUtils.PostgresDBName, + roleExistsQuery("app"), "t"), 30).Should(Succeed()) }) By("cleaning up the clusters", func() { diff --git a/tests/e2e/declarative_database_management_test.go b/tests/e2e/declarative_database_management_test.go index a7f9c574eb..594bba356c 100644 --- a/tests/e2e/declarative_database_management_test.go +++ b/tests/e2e/declarative_database_management_test.go @@ -24,7 +24,7 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -75,7 +75,7 @@ var _ = Describe("Declarative database management", Label(tests.LabelSmoke, test db.Spec.Name, db.Spec.Encoding, db.Spec.LcCtype, db.Spec.LcCollate) Eventually(func(g Gomega) { stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ + testsUtils.PodLocator{ Namespace: namespace, PodName: primaryPod, }, @@ -119,20 +119,22 @@ var _ = Describe("Declarative database management", Label(tests.LabelSmoke, test primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - AssertDatabaseExists(primaryPodInfo, dbname, true) + Eventually(QueryMatchExpectationPredicate(primaryPodInfo, testsUtils.PostgresDBName, + databaseExistsQuery(dbname), "t"), 30).Should(Succeed()) assertDatabaseHasExpectedFields(namespace, primaryPodInfo.Name, database) }) By("removing the Database object", func() { - Expect(utils.DeleteObject(env, &database)).To(Succeed()) + Expect(testsUtils.DeleteObject(env, &database)).To(Succeed()) }) By("verifying the retention policy in the postgres database", func() { primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - AssertDatabaseExists(primaryPodInfo, dbname, retainOnDeletion) + Eventually(QueryMatchExpectationPredicate(primaryPodInfo, testsUtils.PostgresDBName, + databaseExistsQuery(dbname), boolPGOutput(retainOnDeletion)), 30).Should(Succeed()) }) } @@ -193,7 +195,7 @@ var _ = Describe("Declarative database management", Label(tests.LabelSmoke, test }, 300).WithPolling(10 * time.Second).Should(Succeed()) }) By("deleting the namespace and making sure it succeeds before timeout", func() { - err := env.DeleteNamespaceAndWait(namespace, 60) + err := env.DeleteNamespaceAndWait(namespace, 120) Expect(err).ToNot(HaveOccurred()) }) }) diff --git a/tests/e2e/fixtures/declarative_pub_sub/destination-database.yaml b/tests/e2e/fixtures/declarative_pub_sub/destination-database.yaml index 2a6e122647..d4deace971 100644 --- a/tests/e2e/fixtures/declarative_pub_sub/destination-database.yaml +++ b/tests/e2e/fixtures/declarative_pub_sub/destination-database.yaml @@ -5,5 +5,6 @@ metadata: spec: name: declarative owner: app + databaseReclaimPolicy: delete cluster: name: destination-cluster diff --git a/tests/e2e/fixtures/declarative_pub_sub/source-database.yaml b/tests/e2e/fixtures/declarative_pub_sub/source-database.yaml index 80d5a4cf27..4ebcae63ee 100644 --- a/tests/e2e/fixtures/declarative_pub_sub/source-database.yaml +++ b/tests/e2e/fixtures/declarative_pub_sub/source-database.yaml @@ -5,5 +5,6 @@ metadata: spec: name: declarative owner: app + databaseReclaimPolicy: delete cluster: name: source-cluster diff --git a/tests/e2e/managed_roles_test.go b/tests/e2e/managed_roles_test.go index fc5dd5f314..64fa7bd753 100644 --- a/tests/e2e/managed_roles_test.go +++ b/tests/e2e/managed_roles_test.go @@ -24,13 +24,13 @@ import ( "github.com/lib/pq" corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -92,11 +92,11 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic ) mem ON member = oid WHERE rolname =` + pq.QuoteLiteral(roleName) stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ + testsUtils.PodLocator{ Namespace: namespace, PodName: primaryPod, }, - utils.PostgresDBName, + testsUtils.PostgresDBName, query) if err != nil { return []string{ERROR} @@ -112,11 +112,11 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic Expect(err).ToNot(HaveOccurred()) Eventually(func() string { stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ + testsUtils.PodLocator{ Namespace: namespace, PodName: primaryPod.Name, }, - utils.PostgresDBName, + testsUtils.PostgresDBName, query) if err != nil { return "" @@ -139,10 +139,14 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic primaryPod, err := env.GetClusterPrimary(namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - AssertUserExists(primaryPod, username, true) - AssertUserExists(primaryPod, userWithPerpetualPass, true) - AssertUserExists(primaryPod, userWithHashedPassword, true) - AssertUserExists(primaryPod, unrealizableUser, false) + Eventually(QueryMatchExpectationPredicate(primaryPod, testsUtils.PostgresDBName, + roleExistsQuery(username), "t"), 30).Should(Succeed()) + Eventually(QueryMatchExpectationPredicate(primaryPod, testsUtils.PostgresDBName, + roleExistsQuery(userWithPerpetualPass), "t"), 30).Should(Succeed()) + Eventually(QueryMatchExpectationPredicate(primaryPod, testsUtils.PostgresDBName, + roleExistsQuery(userWithHashedPassword), "t"), 30).Should(Succeed()) + Eventually(QueryMatchExpectationPredicate(primaryPod, testsUtils.PostgresDBName, + roleExistsQuery(unrealizableUser), "f"), 30).Should(Succeed()) query := fmt.Sprintf("SELECT true FROM pg_roles WHERE rolname='%s' and rolcanlogin=%v and rolsuper=%v "+ "and rolcreatedb=%v and rolcreaterole=%v and rolinherit=%v and rolreplication=%v "+ @@ -152,11 +156,11 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic for _, q := range []string{query, query2} { stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ + testsUtils.PodLocator{ Namespace: primaryPod.Namespace, PodName: primaryPod.Name, }, - utils.PostgresDBName, + testsUtils.PostgresDBName, q) Expect(err).ToNot(HaveOccurred()) Expect(stdout).To(Equal("t\n")) @@ -164,17 +168,20 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic }) By("Verifying connectivity of new managed role", func() { - rwService := utils.GetReadWriteServiceName(clusterName) + rwService := testsUtils.GetReadWriteServiceName(clusterName) // assert connectable use username and password defined in secrets - AssertConnection(namespace, rwService, utils.PostgresDBName, username, password, env) - AssertConnection(namespace, rwService, utils.PostgresDBName, userWithHashedPassword, userWithHashedPassword, env) + AssertConnection(namespace, rwService, testsUtils.PostgresDBName, + username, password, env) + AssertConnection(namespace, rwService, testsUtils.PostgresDBName, + userWithHashedPassword, userWithHashedPassword, env) }) By("ensuring the app role has been granted createdb in the managed stanza", func() { primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - AssertUserExists(primaryPodInfo, appUsername, true) + Eventually(QueryMatchExpectationPredicate(primaryPodInfo, testsUtils.PostgresDBName, + roleExistsQuery(appUsername), "t"), 30).Should(Succeed()) query := fmt.Sprintf("SELECT rolcreatedb and rolvaliduntil='infinity' "+ "FROM pg_roles WHERE rolname='%s'", appUsername) @@ -186,7 +193,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic Expect(err).NotTo(HaveOccurred()) appUserSecret := corev1.Secret{} - err = utils.GetObject( + err = testsUtils.GetObject( env, types.NamespacedName{Name: cluster.GetApplicationSecretName(), Namespace: namespace}, &appUserSecret, @@ -194,9 +201,9 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic Expect(err).NotTo(HaveOccurred()) pass := string(appUserSecret.Data["password"]) - rwService := utils.GetReadWriteServiceName(clusterName) + rwService := testsUtils.GetReadWriteServiceName(clusterName) // assert connectable use username and password defined in secrets - AssertConnection(namespace, rwService, utils.PostgresDBName, appUsername, pass, env) + AssertConnection(namespace, rwService, testsUtils.PostgresDBName, appUsername, pass, env) }) By("Verify show unrealizable role configurations in the status", func() { @@ -220,7 +227,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic expectedCreateDB := false expectedCreateRole := true expectedConnLmt := int64(10) - rwService := utils.GetReadWriteServiceName(clusterName) + rwService := testsUtils.GetReadWriteServiceName(clusterName) By("updating role attribute in spec", func() { cluster, err := env.GetCluster(namespace, clusterName) @@ -243,8 +250,8 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic }) By("the connection should fail since we disabled the login", func() { - forwardConn, conn, err := utils.ForwardPSQLServiceConnection(env, namespace, rwService, - utils.PostgresDBName, username, password) + forwardConn, conn, err := testsUtils.ForwardPSQLServiceConnection(env, namespace, rwService, + testsUtils.PostgresDBName, username, password) defer func() { _ = conn.Close() forwardConn.Close() @@ -274,9 +281,9 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic }) By("the connectivity should be success again", func() { - rwService := utils.GetReadWriteServiceName(clusterName) + rwService := testsUtils.GetReadWriteServiceName(clusterName) // assert connectable use username and password defined in secrets - AssertConnection(namespace, rwService, utils.PostgresDBName, username, password, env) + AssertConnection(namespace, rwService, testsUtils.PostgresDBName, username, password, env) }) }) @@ -370,7 +377,8 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic Expect(err).ToNot(HaveOccurred()) return len(cluster.Status.ManagedRolesStatus.CannotReconcile) }, 30).Should(Equal(0)) - AssertUserExists(primaryPod, unrealizableUser, true) + Eventually(QueryMatchExpectationPredicate(primaryPod, testsUtils.PostgresDBName, + roleExistsQuery(unrealizableUser), "t"), 30).Should(Succeed()) }) By("Add role in InRole for role new_role and verify in database", func() { @@ -431,7 +439,8 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic err = env.Client.Patch(env.Ctx, updated, client.MergeFrom(cluster)) Expect(err).ToNot(HaveOccurred()) // user not changed - AssertUserExists(primaryPod, unrealizableUser, true) + Eventually(QueryMatchExpectationPredicate(primaryPod, testsUtils.PostgresDBName, + roleExistsQuery(unrealizableUser), "t"), 30).Should(Succeed()) Eventually(func() int { cluster, err := env.GetCluster(namespace, clusterName) Expect(err).ToNot(HaveOccurred()) @@ -464,9 +473,9 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic }) By("Verify connectivity using changed password in secret", func() { - rwService := utils.GetReadWriteServiceName(clusterName) + rwService := testsUtils.GetReadWriteServiceName(clusterName) // assert connectable use username and password defined in secrets - AssertConnection(namespace, rwService, utils.PostgresDBName, username, newPassword, env) + AssertConnection(namespace, rwService, testsUtils.PostgresDBName, username, newPassword, env) }) By("Update password in database", func() { @@ -474,18 +483,18 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic username, pq.QuoteLiteral(newPassword)) _, _, err = env.ExecQueryInInstancePod( - utils.PodLocator{ + testsUtils.PodLocator{ Namespace: namespace, PodName: primaryPod.Name, }, - utils.PostgresDBName, + testsUtils.PostgresDBName, query) Expect(err).ToNot(HaveOccurred()) }) By("Verify password in secrets is still valid", func() { - rwService := utils.GetReadWriteServiceName(clusterName) - AssertConnection(namespace, rwService, utils.PostgresDBName, username, newPassword, env) + rwService := testsUtils.GetReadWriteServiceName(clusterName) + AssertConnection(namespace, rwService, testsUtils.PostgresDBName, username, newPassword, env) }) }) @@ -497,12 +506,12 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic updated := cluster.DeepCopy() for i, r := range updated.Spec.Managed.Roles { if r.Name == newUserName { - updated.Spec.Managed.Roles[i].ValidUntil = &v1.Time{} + updated.Spec.Managed.Roles[i].ValidUntil = &metav1.Time{} } if r.Name == username { tt, err := time.Parse(time.RFC3339Nano, newValidUntilString) Expect(err).ToNot(HaveOccurred()) - nt := v1.NewTime(tt) + nt := metav1.NewTime(tt) updated.Spec.Managed.Roles[i].ValidUntil = &nt } } @@ -544,7 +553,8 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic By("Verify new_role not existed in db", func() { primaryPod, err := env.GetClusterPrimary(namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - AssertUserExists(primaryPod, newUserName, false) + Eventually(QueryMatchExpectationPredicate(primaryPod, testsUtils.PostgresDBName, + roleExistsQuery(newUserName), "f"), 30).Should(Succeed()) }) }) }) diff --git a/tests/e2e/publication_subscription_test.go b/tests/e2e/publication_subscription_test.go index 3133bd3ef7..0ffa10d918 100644 --- a/tests/e2e/publication_subscription_test.go +++ b/tests/e2e/publication_subscription_test.go @@ -24,7 +24,7 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/tests" - testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -33,7 +33,7 @@ import ( // - spinning up a cluster, apply a declarative publication/subscription on it // Set of tests in which we use the declarative publication and subscription CRDs on an existing cluster -var _ = Describe("Publication and Subscription", Label(tests.LabelDeclarativePubSub), func() { +var _ = Describe("Publication and Subscription", Label(tests.LabelPublicationSubscription), func() { const ( sourceClusterManifest = fixturesDir + "/declarative_pub_sub/source-cluster.yaml.template" destinationClusterManifest = fixturesDir + "/declarative_pub_sub/destination-cluster.yaml.template" @@ -54,13 +54,12 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelDeclarativePub const ( namespacePrefix = "declarative-pub-sub" dbname = "declarative" + subName = "sub" + pubName = "pub" tableName = "test" ) var ( sourceClusterName, destinationClusterName, namespace string - databaseObjectName, pubObjectName, subObjectName string - pub *apiv1.Publication - sub *apiv1.Subscription err error ) @@ -84,8 +83,51 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelDeclarativePub }) }) - assertCreateDatabase := func(namespace, clusterName, databaseManifest, databaseName string) { - databaseObjectName, err = env.GetResourceNameFromYAML(databaseManifest) + AfterEach(func() { + // We want to reuse the same source and destination Cluster, so + // we need to drop each Postgres object that has been created. + // We need to make sure that publication/subscription have been removed before + // attempting to drop the database, otherwise the DROP DATABASE will fail because + // there's an active logical replication slot. + destPrimaryPod, err := env.GetClusterPrimary(namespace, destinationClusterName) + Expect(err).ToNot(HaveOccurred()) + _, _, err = env.EventuallyExecQueryInInstancePod( + testsUtils.PodLocator{ + Namespace: destPrimaryPod.Namespace, + PodName: destPrimaryPod.Name, + }, + dbname, + fmt.Sprintf("DROP SUBSCRIPTION IF EXISTS %s", subName), + RetryTimeout, + PollingTime, + ) + Expect(err).ToNot(HaveOccurred()) + + sourcePrimaryPod, err := env.GetClusterPrimary(namespace, sourceClusterName) + Expect(err).ToNot(HaveOccurred()) + _, _, err = env.EventuallyExecQueryInInstancePod( + testsUtils.PodLocator{ + Namespace: sourcePrimaryPod.Namespace, + PodName: sourcePrimaryPod.Name, + }, + dbname, + fmt.Sprintf("DROP PUBLICATION IF EXISTS %s", pubName), + RetryTimeout, + PollingTime, + ) + Expect(err).ToNot(HaveOccurred()) + + Expect(DeleteResourcesFromFile(namespace, destinationDatabaseManifest)).To(Succeed()) + Expect(DeleteResourcesFromFile(namespace, sourceDatabaseManifest)).To(Succeed()) + Eventually(QueryMatchExpectationPredicate(sourcePrimaryPod, testsUtils.PostgresDBName, + databaseExistsQuery(dbname), "f"), 30).Should(Succeed()) + Eventually(QueryMatchExpectationPredicate(destPrimaryPod, testsUtils.PostgresDBName, + databaseExistsQuery(dbname), "f"), 30).Should(Succeed()) + }) + + assertCreateDatabase := func(namespace, clusterName, databaseManifest string) { + databaseObject := &apiv1.Database{} + databaseObjectName, err := env.GetResourceNameFromYAML(databaseManifest) Expect(err).NotTo(HaveOccurred()) By(fmt.Sprintf("applying the %s Database CRD manifest", databaseObjectName), func() { @@ -93,7 +135,6 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelDeclarativePub }) By(fmt.Sprintf("ensuring the %s Database CRD succeeded reconciliation", databaseObjectName), func() { - databaseObject := &apiv1.Database{} databaseNamespacedName := types.NamespacedName{ Namespace: namespace, Name: databaseObjectName, @@ -106,81 +147,33 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelDeclarativePub }, 300).WithPolling(10 * time.Second).Should(Succeed()) }) - By(fmt.Sprintf("verifying the %s database has been created", databaseName), func() { + By(fmt.Sprintf("verifying the %s database has been created", databaseObject.Spec.Name), func() { primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - AssertDatabaseExists(primaryPodInfo, databaseName, true) + Eventually(QueryMatchExpectationPredicate(primaryPodInfo, testsUtils.PostgresDBName, + databaseExistsQuery(databaseObject.Spec.Name), "t"), 30).Should(Succeed()) }) } - assertPublicationExists := func(namespace, primaryPod string, pub *apiv1.Publication) { - query := fmt.Sprintf("select count(*) from pg_publication where pubname = '%s'", - pub.Spec.Name) - Eventually(func(g Gomega) { - stdout, _, err := env.ExecQueryInInstancePod( - testUtils.PodLocator{ - Namespace: namespace, - PodName: primaryPod, - }, - dbname, - query) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(stdout).Should(ContainSubstring("1"), "expected publication not found") - }, 30).Should(Succeed()) - } - - assertSubscriptionExists := func(namespace, primaryPod string, sub *apiv1.Subscription) { - query := fmt.Sprintf("select count(*) from pg_subscription where subname = '%s'", - sub.Spec.Name) - Eventually(func(g Gomega) { - stdout, _, err := env.ExecQueryInInstancePod( - testUtils.PodLocator{ - Namespace: namespace, - PodName: primaryPod, - }, - dbname, - query) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(stdout).Should(ContainSubstring("1"), "expected subscription not found") - }, 30).Should(Succeed()) - } - - It("can perform logical replication", func() { - assertCreateDatabase(namespace, sourceClusterName, sourceDatabaseManifest, dbname) - - tableLocator := TableLocator{ - Namespace: namespace, - ClusterName: sourceClusterName, - DatabaseName: dbname, - TableName: tableName, - } - AssertCreateTestData(env, tableLocator) - - assertCreateDatabase(namespace, destinationClusterName, destinationDatabaseManifest, dbname) - - By("creating an empty table inside the destination database", func() { - query := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %v (column1 int) ;", tableName) - _, err = testUtils.RunExecOverForward(env, namespace, destinationClusterName, dbname, - apiv1.ApplicationUserSecretSuffix, query) - Expect(err).ToNot(HaveOccurred()) - }) + // nolint:dupl + assertCreatePublication := func(namespace, clusterName, publicationManifest string) { + pubObjectName, err := env.GetResourceNameFromYAML(publicationManifest) + Expect(err).NotTo(HaveOccurred()) By("applying Publication CRD manifest", func() { - CreateResourceFromFile(namespace, pubManifest) - pubObjectName, err = env.GetResourceNameFromYAML(pubManifest) - Expect(err).NotTo(HaveOccurred()) + CreateResourceFromFile(namespace, publicationManifest) }) By("ensuring the Publication CRD succeeded reconciliation", func() { // get publication object - pub = &apiv1.Publication{} pubNamespacedName := types.NamespacedName{ Namespace: namespace, Name: pubObjectName, } Eventually(func(g Gomega) { + pub := &apiv1.Publication{} err := env.Client.Get(env.Ctx, pubNamespacedName, pub) Expect(err).ToNot(HaveOccurred()) g.Expect(pub.Status.Applied).Should(HaveValue(BeTrue())) @@ -188,27 +181,32 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelDeclarativePub }) By("verifying new publication has been created", func() { - primaryPodInfo, err := env.GetClusterPrimary(namespace, sourceClusterName) + primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - assertPublicationExists(namespace, primaryPodInfo.Name, pub) + Eventually(QueryMatchExpectationPredicate(primaryPodInfo, dbname, + publicationExistsQuery(pubName), "t"), 30).Should(Succeed()) }) + } + + // nolint:dupl + assertCreateSubscription := func(namespace, clusterName, subscriptionManifest string) { + subObjectName, err := env.GetResourceNameFromYAML(subscriptionManifest) + Expect(err).NotTo(HaveOccurred()) By("applying Subscription CRD manifest", func() { - CreateResourceFromFile(namespace, subManifest) - subObjectName, err = env.GetResourceNameFromYAML(subManifest) - Expect(err).NotTo(HaveOccurred()) + CreateResourceFromFile(namespace, subscriptionManifest) }) By("ensuring the Subscription CRD succeeded reconciliation", func() { // get subscription object - sub = &apiv1.Subscription{} pubNamespacedName := types.NamespacedName{ Namespace: namespace, Name: subObjectName, } Eventually(func(g Gomega) { + sub := &apiv1.Subscription{} err := env.Client.Get(env.Ctx, pubNamespacedName, sub) Expect(err).ToNot(HaveOccurred()) g.Expect(sub.Status.Applied).Should(HaveValue(BeTrue())) @@ -216,10 +214,77 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelDeclarativePub }) By("verifying new subscription has been created", func() { - primaryPodInfo, err := env.GetClusterPrimary(namespace, destinationClusterName) + primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - assertSubscriptionExists(namespace, primaryPodInfo.Name, sub) + Eventually(QueryMatchExpectationPredicate(primaryPodInfo, dbname, + subscriptionExistsQuery(subName), "t"), 30).Should(Succeed()) + }) + } + + assertTestPubSub := func(retainOnDeletion bool) { + assertCreateDatabase(namespace, sourceClusterName, sourceDatabaseManifest) + + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: sourceClusterName, + DatabaseName: dbname, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) + + assertCreateDatabase(namespace, destinationClusterName, destinationDatabaseManifest) + + By("creating an empty table inside the destination database", func() { + query := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %v (column1 int) ;", tableName) + _, err = testsUtils.RunExecOverForward(env, namespace, destinationClusterName, dbname, + apiv1.ApplicationUserSecretSuffix, query) + Expect(err).ToNot(HaveOccurred()) + }) + + assertCreatePublication(namespace, sourceClusterName, pubManifest) + assertCreateSubscription(namespace, destinationClusterName, subManifest) + + var ( + publication *apiv1.Publication + subscription *apiv1.Subscription + ) + By("setting the reclaimPolicy", func() { + publicationReclaimPolicy := apiv1.PublicationReclaimDelete + subscriptionReclaimPolicy := apiv1.SubscriptionReclaimDelete + if retainOnDeletion { + publicationReclaimPolicy = apiv1.PublicationReclaimRetain + subscriptionReclaimPolicy = apiv1.SubscriptionReclaimRetain + } + // Get the object names + pubObjectName, err := env.GetResourceNameFromYAML(pubManifest) + Expect(err).NotTo(HaveOccurred()) + subObjectName, err := env.GetResourceNameFromYAML(subManifest) + Expect(err).NotTo(HaveOccurred()) + + Eventually(func(g Gomega) { + var pub apiv1.Publication + err = testsUtils.GetObject( + env, + types.NamespacedName{Namespace: namespace, Name: pubObjectName}, + &pub, + ) + g.Expect(err).ToNot(HaveOccurred()) + publication.Spec.ReclaimPolicy = publicationReclaimPolicy + err = env.Client.Update(env.Ctx, publication) + g.Expect(err).ToNot(HaveOccurred()) + + var sub apiv1.Subscription + err = testsUtils.GetObject( + env, + types.NamespacedName{Namespace: namespace, Name: subObjectName}, + &sub, + ) + g.Expect(err).ToNot(HaveOccurred()) + subscription.Spec.ReclaimPolicy = subscriptionReclaimPolicy + err = env.Client.Update(env.Ctx, subscription) + g.Expect(err).ToNot(HaveOccurred()) + }, 60, 5).Should(Succeed()) }) By("checking that the data is present inside the destination cluster database", func() { @@ -231,6 +296,47 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelDeclarativePub } AssertDataExpectedCount(env, tableLocator, 2) }) + + By("removing the objects", func() { + Expect(testsUtils.DeleteObject(env, publication)).To(Succeed()) + Expect(testsUtils.DeleteObject(env, subscription)).To(Succeed()) + }) + + By("verifying the publication reclaim policy outcome", func() { + primaryPodInfo, err := env.GetClusterPrimary(namespace, sourceClusterName) + Expect(err).ToNot(HaveOccurred()) + + Eventually(QueryMatchExpectationPredicate(primaryPodInfo, dbname, + publicationExistsQuery(pubName), boolPGOutput(retainOnDeletion)), 30).Should(Succeed()) + }) + + By("verifying the subscription reclaim policy outcome", func() { + primaryPodInfo, err := env.GetClusterPrimary(namespace, destinationClusterName) + Expect(err).ToNot(HaveOccurred()) + + Eventually(QueryMatchExpectationPredicate(primaryPodInfo, dbname, + subscriptionExistsQuery(subName), boolPGOutput(retainOnDeletion)), 30).Should(Succeed()) + }) + } + + When("Reclaim policy is set to delete", func() { + It("can manage Publication and Subscription and delete them in Postgres", func() { + assertTestPubSub(false) + }) + }) + + When("Reclaim policy is set to retain", func() { + It("can manage Publication and Subscription and release it", func() { + assertTestPubSub(true) + }) }) }) }) + +func publicationExistsQuery(pubName string) string { + return fmt.Sprintf("SELECT EXISTS(SELECT 1 FROM pg_publication WHERE pubname='%s')", pubName) +} + +func subscriptionExistsQuery(subName string) string { + return fmt.Sprintf("SELECT EXISTS(SELECT 1 FROM pg_subscription WHERE subname='%s')", subName) +} diff --git a/tests/labels.go b/tests/labels.go index 98649f2be2..ee37925343 100644 --- a/tests/labels.go +++ b/tests/labels.go @@ -32,9 +32,6 @@ const ( // LabelDeclarativeDatabases is a label for selecting the declarative databases test LabelDeclarativeDatabases = "declarative-databases" - // LabelDeclarativePubSub is a label for selecting the publication / subscription test - LabelDeclarativePubSub = "publication-subscription" - // LabelDisruptive is the string for labelling disruptive tests LabelDisruptive = "disruptive" @@ -65,6 +62,9 @@ const ( // LabelPostgresConfiguration is a label for selecting postgres-configuration test LabelPostgresConfiguration = "postgres-configuration" + // LabelPublicationSubscription is a label for selecting the publication / subscription test + LabelPublicationSubscription = "publication-subscription" + // LabelRecovery is a label for selecting recovery tests LabelRecovery = "recovery" From 7eabd38fb70adfaa4c6feeb7b3aa996da8aeef4b Mon Sep 17 00:00:00 2001 From: Peggie Date: Thu, 19 Dec 2024 10:38:59 +0100 Subject: [PATCH 244/836] feat: Public Cloud K8S versions update (#6316) Update the versions used to test the operator on public cloud providers Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Signed-off-by: Jonathan Gonzalez V. Co-authored-by: public-cloud-k8s-versions-check --- .github/kind_versions.json | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/kind_versions.json b/.github/kind_versions.json index 85547c7125..b39d642e5d 100644 --- a/.github/kind_versions.json +++ b/.github/kind_versions.json @@ -1,7 +1,8 @@ [ - "v1.31.2", - "v1.30.6", - "v1.29.10", + "v1.32.0", + "v1.31.4", + "v1.30.8", + "v1.29.12", "v1.28.15", "v1.27.16" ] From 2cce9742abbbf8133f24435d6e5436b94185b1fa Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 19 Dec 2024 11:05:49 +0100 Subject: [PATCH 245/836] chore(deps): update kindest/node docker tag to v1.32.0 (main) (#6365) --- hack/e2e/run-e2e-kind.sh | 2 +- hack/setup-cluster.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hack/e2e/run-e2e-kind.sh b/hack/e2e/run-e2e-kind.sh index e795c4a4dc..c28579750c 100755 --- a/hack/e2e/run-e2e-kind.sh +++ b/hack/e2e/run-e2e-kind.sh @@ -29,7 +29,7 @@ E2E_DIR="${HACK_DIR}/e2e" export PRESERVE_CLUSTER=${PRESERVE_CLUSTER:-false} export BUILD_IMAGE=${BUILD_IMAGE:-false} -KIND_NODE_DEFAULT_VERSION=v1.31.2 +KIND_NODE_DEFAULT_VERSION=v1.32.0 export K8S_VERSION=${K8S_VERSION:-$KIND_NODE_DEFAULT_VERSION} export CLUSTER_ENGINE=kind export CLUSTER_NAME=pg-operator-e2e-${K8S_VERSION//./-} diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh index a0166bca71..e668d5097e 100755 --- a/hack/setup-cluster.sh +++ b/hack/setup-cluster.sh @@ -24,7 +24,7 @@ if [ "${DEBUG-}" = true ]; then fi # Defaults -KIND_NODE_DEFAULT_VERSION=v1.31.2 +KIND_NODE_DEFAULT_VERSION=v1.32.0 K3D_NODE_DEFAULT_VERSION=v1.30.3 CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=v1.15.0 EXTERNAL_SNAPSHOTTER_VERSION=v8.2.0 From 0be01557b747745e93c60375c6b9c58f4c2637cc Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 19 Dec 2024 11:35:08 +0100 Subject: [PATCH 246/836] chore(deps): update helm/kind-action action to v1.11.0 (main) (#6364) --- .github/workflows/continuous-integration.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index fe8629a5e5..5b15566bba 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -829,7 +829,7 @@ jobs: uses: actions/checkout@v4 - name: Setting up KinD cluster - uses: helm/kind-action@v1.10.0 + uses: helm/kind-action@v1.11.0 with: wait: "600s" version: ${{ env.KIND_VERSION }} From 396e3bd62a1b53ed29dd03a167b003f1d69e90cb Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 19 Dec 2024 14:12:37 +0100 Subject: [PATCH 247/836] fix(deps): update github.com/cloudnative-pg/machinery digest to 2807bc8 (main) (#6338) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 20e25fbcee..0467d2e463 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/cheynewallace/tabby v1.1.1 github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a github.com/cloudnative-pg/cnpg-i v0.0.0-20241218212131-cbc4287931ee - github.com/cloudnative-pg/machinery v0.0.0-20241122084004-33b997fc6c61 + github.com/cloudnative-pg/machinery v0.0.0-20241219102532-2807bc88310d github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/evanphx/json-patch/v5 v5.9.0 github.com/go-logr/logr v1.4.2 diff --git a/go.sum b/go.sum index 53e874f1bf..ded4172b34 100644 --- a/go.sum +++ b/go.sum @@ -22,8 +22,8 @@ github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a h1:VrE github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a/go.mod h1:HPGwXHlatQEnb2HdsbGTZLEo8qlxKLdxTHiTeF9TTqw= github.com/cloudnative-pg/cnpg-i v0.0.0-20241218212131-cbc4287931ee h1:PJc4BpPu0b684BrwWzy0B5W/CSqrnUV+jv3PTrSUx8g= github.com/cloudnative-pg/cnpg-i v0.0.0-20241218212131-cbc4287931ee/go.mod h1:ahVFn+JzYkFfv7Iwpswu4lsuC9yK7zZupM1ssaIKPFI= -github.com/cloudnative-pg/machinery v0.0.0-20241122084004-33b997fc6c61 h1:KzazCP/OVbCPAkhhg9hLLNzLyAHcYzxA3U3wsyLDWbs= -github.com/cloudnative-pg/machinery v0.0.0-20241122084004-33b997fc6c61/go.mod h1:uBHGRIk5rt07mO4zjIC1uvGBWTH6PqIiD1PfpvPGZKU= +github.com/cloudnative-pg/machinery v0.0.0-20241219102532-2807bc88310d h1:v9IgiRYa7r+KCUxl5lCyUXdhsefZ90engPSMNLBqYmc= +github.com/cloudnative-pg/machinery v0.0.0-20241219102532-2807bc88310d/go.mod h1:uBHGRIk5rt07mO4zjIC1uvGBWTH6PqIiD1PfpvPGZKU= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= From c9abaf65816b66616512a56a3ff16442a2159274 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Thu, 19 Dec 2024 14:34:46 +0100 Subject: [PATCH 248/836] test(e2e): fix a panic in publication and subscription test (#6378) The problem was introduced in #6320 Signed-off-by: Marco Nenciarini --- tests/e2e/publication_subscription_test.go | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/tests/e2e/publication_subscription_test.go b/tests/e2e/publication_subscription_test.go index 0ffa10d918..e6dccd6e66 100644 --- a/tests/e2e/publication_subscription_test.go +++ b/tests/e2e/publication_subscription_test.go @@ -246,8 +246,8 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelPublicationSub assertCreateSubscription(namespace, destinationClusterName, subManifest) var ( - publication *apiv1.Publication - subscription *apiv1.Subscription + publication apiv1.Publication + subscription apiv1.Subscription ) By("setting the reclaimPolicy", func() { publicationReclaimPolicy := apiv1.PublicationReclaimDelete @@ -263,26 +263,24 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelPublicationSub Expect(err).NotTo(HaveOccurred()) Eventually(func(g Gomega) { - var pub apiv1.Publication err = testsUtils.GetObject( env, types.NamespacedName{Namespace: namespace, Name: pubObjectName}, - &pub, + &publication, ) g.Expect(err).ToNot(HaveOccurred()) publication.Spec.ReclaimPolicy = publicationReclaimPolicy - err = env.Client.Update(env.Ctx, publication) + err = env.Client.Update(env.Ctx, &publication) g.Expect(err).ToNot(HaveOccurred()) - var sub apiv1.Subscription err = testsUtils.GetObject( env, types.NamespacedName{Namespace: namespace, Name: subObjectName}, - &sub, + &subscription, ) g.Expect(err).ToNot(HaveOccurred()) subscription.Spec.ReclaimPolicy = subscriptionReclaimPolicy - err = env.Client.Update(env.Ctx, subscription) + err = env.Client.Update(env.Ctx, &subscription) g.Expect(err).ToNot(HaveOccurred()) }, 60, 5).Should(Succeed()) }) @@ -298,8 +296,8 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelPublicationSub }) By("removing the objects", func() { - Expect(testsUtils.DeleteObject(env, publication)).To(Succeed()) - Expect(testsUtils.DeleteObject(env, subscription)).To(Succeed()) + Expect(testsUtils.DeleteObject(env, &publication)).To(Succeed()) + Expect(testsUtils.DeleteObject(env, &subscription)).To(Succeed()) }) By("verifying the publication reclaim policy outcome", func() { From c59451ab820399cba5e8d1914fe6fe9651c0c16f Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 19 Dec 2024 16:04:55 +0100 Subject: [PATCH 249/836] fix(deps): update kubernetes packages to v0.32.0 (main) (#6354) https://github.com/kubernetes/api `v0.31.3` -> `v0.32.0` https://github.com/kubernetes/apiextensions-apiserver `v0.31.3` -> `v0.32.0` https://github.com/kubernetes/apimachinery `v0.31.3` -> `v0.32.0` https://github.com/kubernetes/cli-runtime `v0.31.3` -> `v0.32.0` https://github.com/kubernetes/client-go `v0.31.3` -> `v0.32.0` Signed-off-by: Jonathan Gonzalez V. Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: Jonathan Gonzalez V. --- .../bases/postgresql.cnpg.io_clusters.yaml | 2 +- .../crd/bases/postgresql.cnpg.io_poolers.yaml | 340 ++++++++++++------ go.mod | 30 +- go.sum | 57 ++- 4 files changed, 271 insertions(+), 158 deletions(-) diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml index e185082fa7..1057f16c99 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml @@ -3633,7 +3633,7 @@ spec: not set, the implementation will apply its default routing strategy. If set to "PreferClose", implementations should prioritize endpoints that are topologically close (e.g., same zone). - This is an alpha field and requires enabling ServiceTrafficDistribution feature. + This is a beta field and requires enabling ServiceTrafficDistribution feature. type: string type: description: |- diff --git a/config/crd/bases/postgresql.cnpg.io_poolers.yaml b/config/crd/bases/postgresql.cnpg.io_poolers.yaml index ac283c038f..6039e1e5ea 100644 --- a/config/crd/bases/postgresql.cnpg.io_poolers.yaml +++ b/config/crd/bases/postgresql.cnpg.io_poolers.yaml @@ -707,7 +707,7 @@ spec: not set, the implementation will apply its default routing strategy. If set to "PreferClose", implementations should prioritize endpoints that are topologically close (e.g., same zone). - This is an alpha field and requires enabling ServiceTrafficDistribution feature. + This is a beta field and requires enabling ServiceTrafficDistribution feature. type: string type: description: |- @@ -1958,7 +1958,8 @@ spec: More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: exec: - description: Exec specifies the action to take. + description: Exec specifies a command to execute + in the container. properties: command: description: |- @@ -1973,7 +1974,7 @@ spec: x-kubernetes-list-type: atomic type: object httpGet: - description: HTTPGet specifies the http request + description: HTTPGet specifies an HTTP GET request to perform. properties: host: @@ -2024,8 +2025,8 @@ spec: - port type: object sleep: - description: Sleep represents the duration that - the container should sleep before being terminated. + description: Sleep represents a duration that + the container should sleep. properties: seconds: description: Seconds is the number of seconds @@ -2038,8 +2039,8 @@ spec: tcpSocket: description: |- Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for the backward compatibility. There are no validation of this field and - lifecycle hooks will fail in runtime when tcp handler is specified. + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. properties: host: description: 'Optional: Host name to connect @@ -2071,7 +2072,8 @@ spec: More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: exec: - description: Exec specifies the action to take. + description: Exec specifies a command to execute + in the container. properties: command: description: |- @@ -2086,7 +2088,7 @@ spec: x-kubernetes-list-type: atomic type: object httpGet: - description: HTTPGet specifies the http request + description: HTTPGet specifies an HTTP GET request to perform. properties: host: @@ -2137,8 +2139,8 @@ spec: - port type: object sleep: - description: Sleep represents the duration that - the container should sleep before being terminated. + description: Sleep represents a duration that + the container should sleep. properties: seconds: description: Seconds is the number of seconds @@ -2151,8 +2153,8 @@ spec: tcpSocket: description: |- Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for the backward compatibility. There are no validation of this field and - lifecycle hooks will fail in runtime when tcp handler is specified. + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. properties: host: description: 'Optional: Host name to connect @@ -2180,7 +2182,8 @@ spec: More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes properties: exec: - description: Exec specifies the action to take. + description: Exec specifies a command to execute + in the container. properties: command: description: |- @@ -2201,8 +2204,7 @@ spec: format: int32 type: integer grpc: - description: GRPC specifies an action involving - a GRPC port. + description: GRPC specifies a GRPC HealthCheckRequest. properties: port: description: Port number of the gRPC service. @@ -2221,7 +2223,7 @@ spec: - port type: object httpGet: - description: HTTPGet specifies the http request + description: HTTPGet specifies an HTTP GET request to perform. properties: host: @@ -2289,7 +2291,7 @@ spec: format: int32 type: integer tcpSocket: - description: TCPSocket specifies an action involving + description: TCPSocket specifies a connection to a TCP port. properties: host: @@ -2395,7 +2397,8 @@ spec: More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes properties: exec: - description: Exec specifies the action to take. + description: Exec specifies a command to execute + in the container. properties: command: description: |- @@ -2416,8 +2419,7 @@ spec: format: int32 type: integer grpc: - description: GRPC specifies an action involving - a GRPC port. + description: GRPC specifies a GRPC HealthCheckRequest. properties: port: description: Port number of the gRPC service. @@ -2436,7 +2438,7 @@ spec: - port type: object httpGet: - description: HTTPGet specifies the http request + description: HTTPGet specifies an HTTP GET request to perform. properties: host: @@ -2504,7 +2506,7 @@ spec: format: int32 type: integer tcpSocket: - description: TCPSocket specifies an action involving + description: TCPSocket specifies a connection to a TCP port. properties: host: @@ -2854,7 +2856,8 @@ spec: More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes properties: exec: - description: Exec specifies the action to take. + description: Exec specifies a command to execute + in the container. properties: command: description: |- @@ -2875,8 +2878,7 @@ spec: format: int32 type: integer grpc: - description: GRPC specifies an action involving - a GRPC port. + description: GRPC specifies a GRPC HealthCheckRequest. properties: port: description: Port number of the gRPC service. @@ -2895,7 +2897,7 @@ spec: - port type: object httpGet: - description: HTTPGet specifies the http request + description: HTTPGet specifies an HTTP GET request to perform. properties: host: @@ -2963,7 +2965,7 @@ spec: format: int32 type: integer tcpSocket: - description: TCPSocket specifies an action involving + description: TCPSocket specifies a connection to a TCP port. properties: host: @@ -3178,9 +3180,13 @@ spec: options of a pod. properties: name: - description: Required. + description: |- + Name is this DNS resolver option's name. + Required. type: string value: + description: Value is this DNS resolver option's + value. type: string type: object type: array @@ -3463,7 +3469,8 @@ spec: More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: exec: - description: Exec specifies the action to take. + description: Exec specifies a command to execute + in the container. properties: command: description: |- @@ -3478,7 +3485,7 @@ spec: x-kubernetes-list-type: atomic type: object httpGet: - description: HTTPGet specifies the http request + description: HTTPGet specifies an HTTP GET request to perform. properties: host: @@ -3529,8 +3536,8 @@ spec: - port type: object sleep: - description: Sleep represents the duration that - the container should sleep before being terminated. + description: Sleep represents a duration that + the container should sleep. properties: seconds: description: Seconds is the number of seconds @@ -3543,8 +3550,8 @@ spec: tcpSocket: description: |- Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for the backward compatibility. There are no validation of this field and - lifecycle hooks will fail in runtime when tcp handler is specified. + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. properties: host: description: 'Optional: Host name to connect @@ -3576,7 +3583,8 @@ spec: More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: exec: - description: Exec specifies the action to take. + description: Exec specifies a command to execute + in the container. properties: command: description: |- @@ -3591,7 +3599,7 @@ spec: x-kubernetes-list-type: atomic type: object httpGet: - description: HTTPGet specifies the http request + description: HTTPGet specifies an HTTP GET request to perform. properties: host: @@ -3642,8 +3650,8 @@ spec: - port type: object sleep: - description: Sleep represents the duration that - the container should sleep before being terminated. + description: Sleep represents a duration that + the container should sleep. properties: seconds: description: Seconds is the number of seconds @@ -3656,8 +3664,8 @@ spec: tcpSocket: description: |- Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for the backward compatibility. There are no validation of this field and - lifecycle hooks will fail in runtime when tcp handler is specified. + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. properties: host: description: 'Optional: Host name to connect @@ -3681,7 +3689,8 @@ spec: description: Probes are not allowed for ephemeral containers. properties: exec: - description: Exec specifies the action to take. + description: Exec specifies a command to execute + in the container. properties: command: description: |- @@ -3702,8 +3711,7 @@ spec: format: int32 type: integer grpc: - description: GRPC specifies an action involving - a GRPC port. + description: GRPC specifies a GRPC HealthCheckRequest. properties: port: description: Port number of the gRPC service. @@ -3722,7 +3730,7 @@ spec: - port type: object httpGet: - description: HTTPGet specifies the http request + description: HTTPGet specifies an HTTP GET request to perform. properties: host: @@ -3790,7 +3798,7 @@ spec: format: int32 type: integer tcpSocket: - description: TCPSocket specifies an action involving + description: TCPSocket specifies a connection to a TCP port. properties: host: @@ -3884,7 +3892,8 @@ spec: description: Probes are not allowed for ephemeral containers. properties: exec: - description: Exec specifies the action to take. + description: Exec specifies a command to execute + in the container. properties: command: description: |- @@ -3905,8 +3914,7 @@ spec: format: int32 type: integer grpc: - description: GRPC specifies an action involving - a GRPC port. + description: GRPC specifies a GRPC HealthCheckRequest. properties: port: description: Port number of the gRPC service. @@ -3925,7 +3933,7 @@ spec: - port type: object httpGet: - description: HTTPGet specifies the http request + description: HTTPGet specifies an HTTP GET request to perform. properties: host: @@ -3993,7 +4001,7 @@ spec: format: int32 type: integer tcpSocket: - description: TCPSocket specifies an action involving + description: TCPSocket specifies a connection to a TCP port. properties: host: @@ -4323,7 +4331,8 @@ spec: description: Probes are not allowed for ephemeral containers. properties: exec: - description: Exec specifies the action to take. + description: Exec specifies a command to execute + in the container. properties: command: description: |- @@ -4344,8 +4353,7 @@ spec: format: int32 type: integer grpc: - description: GRPC specifies an action involving - a GRPC port. + description: GRPC specifies a GRPC HealthCheckRequest. properties: port: description: Port number of the gRPC service. @@ -4364,7 +4372,7 @@ spec: - port type: object httpGet: - description: HTTPGet specifies the http request + description: HTTPGet specifies an HTTP GET request to perform. properties: host: @@ -4432,7 +4440,7 @@ spec: format: int32 type: integer tcpSocket: - description: TCPSocket specifies an action involving + description: TCPSocket specifies a connection to a TCP port. properties: host: @@ -4971,7 +4979,8 @@ spec: More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: exec: - description: Exec specifies the action to take. + description: Exec specifies a command to execute + in the container. properties: command: description: |- @@ -4986,7 +4995,7 @@ spec: x-kubernetes-list-type: atomic type: object httpGet: - description: HTTPGet specifies the http request + description: HTTPGet specifies an HTTP GET request to perform. properties: host: @@ -5037,8 +5046,8 @@ spec: - port type: object sleep: - description: Sleep represents the duration that - the container should sleep before being terminated. + description: Sleep represents a duration that + the container should sleep. properties: seconds: description: Seconds is the number of seconds @@ -5051,8 +5060,8 @@ spec: tcpSocket: description: |- Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for the backward compatibility. There are no validation of this field and - lifecycle hooks will fail in runtime when tcp handler is specified. + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. properties: host: description: 'Optional: Host name to connect @@ -5084,7 +5093,8 @@ spec: More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: exec: - description: Exec specifies the action to take. + description: Exec specifies a command to execute + in the container. properties: command: description: |- @@ -5099,7 +5109,7 @@ spec: x-kubernetes-list-type: atomic type: object httpGet: - description: HTTPGet specifies the http request + description: HTTPGet specifies an HTTP GET request to perform. properties: host: @@ -5150,8 +5160,8 @@ spec: - port type: object sleep: - description: Sleep represents the duration that - the container should sleep before being terminated. + description: Sleep represents a duration that + the container should sleep. properties: seconds: description: Seconds is the number of seconds @@ -5164,8 +5174,8 @@ spec: tcpSocket: description: |- Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for the backward compatibility. There are no validation of this field and - lifecycle hooks will fail in runtime when tcp handler is specified. + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. properties: host: description: 'Optional: Host name to connect @@ -5193,7 +5203,8 @@ spec: More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes properties: exec: - description: Exec specifies the action to take. + description: Exec specifies a command to execute + in the container. properties: command: description: |- @@ -5214,8 +5225,7 @@ spec: format: int32 type: integer grpc: - description: GRPC specifies an action involving - a GRPC port. + description: GRPC specifies a GRPC HealthCheckRequest. properties: port: description: Port number of the gRPC service. @@ -5234,7 +5244,7 @@ spec: - port type: object httpGet: - description: HTTPGet specifies the http request + description: HTTPGet specifies an HTTP GET request to perform. properties: host: @@ -5302,7 +5312,7 @@ spec: format: int32 type: integer tcpSocket: - description: TCPSocket specifies an action involving + description: TCPSocket specifies a connection to a TCP port. properties: host: @@ -5408,7 +5418,8 @@ spec: More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes properties: exec: - description: Exec specifies the action to take. + description: Exec specifies a command to execute + in the container. properties: command: description: |- @@ -5429,8 +5440,7 @@ spec: format: int32 type: integer grpc: - description: GRPC specifies an action involving - a GRPC port. + description: GRPC specifies a GRPC HealthCheckRequest. properties: port: description: Port number of the gRPC service. @@ -5449,7 +5459,7 @@ spec: - port type: object httpGet: - description: HTTPGet specifies the http request + description: HTTPGet specifies an HTTP GET request to perform. properties: host: @@ -5517,7 +5527,7 @@ spec: format: int32 type: integer tcpSocket: - description: TCPSocket specifies an action involving + description: TCPSocket specifies a connection to a TCP port. properties: host: @@ -5867,7 +5877,8 @@ spec: More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes properties: exec: - description: Exec specifies the action to take. + description: Exec specifies a command to execute + in the container. properties: command: description: |- @@ -5888,8 +5899,7 @@ spec: format: int32 type: integer grpc: - description: GRPC specifies an action involving - a GRPC port. + description: GRPC specifies a GRPC HealthCheckRequest. properties: port: description: Port number of the gRPC service. @@ -5908,7 +5918,7 @@ spec: - port type: object httpGet: - description: HTTPGet specifies the http request + description: HTTPGet specifies an HTTP GET request to perform. properties: host: @@ -5976,7 +5986,7 @@ spec: format: int32 type: integer tcpSocket: - description: TCPSocket specifies an action involving + description: TCPSocket specifies a connection to a TCP port. properties: host: @@ -6343,6 +6353,74 @@ spec: x-kubernetes-list-map-keys: - name x-kubernetes-list-type: map + resources: + description: |- + Resources is the total amount of CPU and Memory resources required by all + containers in the pod. It supports specifying Requests and Limits for + "cpu" and "memory" resource names only. ResourceClaims are not supported. + + This field enables fine-grained control over resource allocation for the + entire pod, allowing resource sharing among containers in a pod. + + This is an alpha field and requires enabling the PodLevelResources feature + gate. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object restartPolicy: description: |- Restart policy for all containers within the pod. @@ -6467,6 +6545,32 @@ spec: Note that this field cannot be set when spec.os.name is windows. format: int64 type: integer + seLinuxChangePolicy: + description: |- + seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. + It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. + Valid values are "MountOption" and "Recursive". + + "Recursive" means relabeling of all files on all Pod volumes by the container runtime. + This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. + + "MountOption" mounts all eligible Pod volumes with `-o context` mount option. + This requires all Pods that share the same volume to use the same SELinux label. + It is not possible to share the same volume among privileged and unprivileged Pods. + Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes + whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their + CSIDriver instance. Other volumes are always re-labelled recursively. + "MountOption" value is allowed only when SELinuxMount feature gate is enabled. + + If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. + If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes + and "Recursive" for all other volumes. + + This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers. + + All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. + Note that this field cannot be set when spec.os.name is windows. + type: string seLinuxOptions: description: |- The SELinux context to be applied to all containers. @@ -6873,6 +6977,8 @@ spec: description: |- awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore properties: fsType: @@ -6904,8 +7010,10 @@ spec: - volumeID type: object azureDisk: - description: azureDisk represents an Azure Data Disk - mount on the host and bind mount to the pod. + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. properties: cachingMode: description: 'cachingMode is the Host Caching mode: @@ -6944,8 +7052,10 @@ spec: - diskURI type: object azureFile: - description: azureFile represents an Azure File Service - mount on the host and bind mount to the pod. + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. properties: readOnly: description: |- @@ -6964,8 +7074,9 @@ spec: - shareName type: object cephfs: - description: cephFS represents a Ceph FS mount on the - host that shares a pod's lifetime + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. properties: monitors: description: |- @@ -7018,6 +7129,8 @@ spec: cinder: description: |- cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md properties: fsType: @@ -7129,7 +7242,7 @@ spec: csi: description: csi (Container Storage Interface) represents ephemeral storage that is handled by certain external - CSI drivers (Beta feature). + CSI drivers. properties: driver: description: |- @@ -7600,6 +7713,7 @@ spec: description: |- flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. properties: driver: description: driver is the name of the driver to @@ -7645,9 +7759,9 @@ spec: - driver type: object flocker: - description: flocker represents a Flocker volume attached - to a kubelet's host machine. This depends on the Flocker - control service being running + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. properties: datasetName: description: |- @@ -7663,6 +7777,8 @@ spec: description: |- gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk properties: fsType: @@ -7698,7 +7814,7 @@ spec: gitRepo: description: |- gitRepo represents a git repository at a particular revision. - DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container. properties: @@ -7722,6 +7838,7 @@ spec: glusterfs: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. More info: https://examples.k8s.io/volumes/glusterfs/README.md properties: endpoints: @@ -7931,9 +8048,9 @@ spec: - claimName type: object photonPersistentDisk: - description: photonPersistentDisk represents a PhotonController - persistent disk attached and mounted on kubelets host - machine + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. properties: fsType: description: |- @@ -7949,8 +8066,11 @@ spec: - pdID type: object portworxVolume: - description: portworxVolume represents a portworx volume - attached and mounted on kubelets host machine + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. properties: fsType: description: |- @@ -8321,8 +8441,9 @@ spec: x-kubernetes-list-type: atomic type: object quobyte: - description: quobyte represents a Quobyte mount on the - host that shares a pod's lifetime + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. properties: group: description: |- @@ -8361,6 +8482,7 @@ spec: rbd: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. More info: https://examples.k8s.io/volumes/rbd/README.md properties: fsType: @@ -8433,8 +8555,9 @@ spec: - monitors type: object scaleIO: - description: scaleIO represents a ScaleIO persistent - volume attached and mounted on Kubernetes nodes. + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. properties: fsType: default: xfs @@ -8567,8 +8690,9 @@ spec: type: string type: object storageos: - description: storageOS represents a StorageOS volume - attached and mounted on Kubernetes nodes. + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. properties: fsType: description: |- @@ -8613,8 +8737,10 @@ spec: type: string type: object vsphereVolume: - description: vsphereVolume represents a vSphere volume - attached and mounted on kubelets host machine + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. properties: fsType: description: |- diff --git a/go.mod b/go.mod index 0467d2e463..f0ba746aaa 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,8 @@ module github.com/cloudnative-pg/cloudnative-pg -go 1.23 +go 1.23.0 -toolchain go1.23.3 +toolchain go1.23.4 require ( github.com/DATA-DOG/go-sqlmock v1.5.2 @@ -40,11 +40,11 @@ require ( golang.org/x/term v0.27.0 google.golang.org/grpc v1.69.2 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.31.3 - k8s.io/apiextensions-apiserver v0.31.3 - k8s.io/apimachinery v0.31.3 - k8s.io/cli-runtime v0.31.3 - k8s.io/client-go v0.31.3 + k8s.io/api v0.32.0 + k8s.io/apiextensions-apiserver v0.32.0 + k8s.io/apimachinery v0.32.0 + k8s.io/cli-runtime v0.32.0 + k8s.io/client-go v0.32.0 k8s.io/utils v0.0.0-20241210054802-24370beab758 sigs.k8s.io/controller-runtime v0.19.3 sigs.k8s.io/yaml v1.4.0 @@ -65,7 +65,6 @@ require ( github.com/go-openapi/swag v0.23.0 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.3 // indirect github.com/google/gnostic-models v0.6.8 // indirect @@ -75,7 +74,6 @@ require ( github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect - github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect @@ -86,7 +84,7 @@ require ( github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/moby/spdystream v0.4.0 // indirect + github.com/moby/spdystream v0.5.0 // indirect github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect @@ -101,7 +99,6 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xlab/treeprint v1.2.0 // indirect - go.starlark.net v0.0.0-20240925182052-1207426daebd // indirect golang.org/x/crypto v0.28.0 // indirect golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect golang.org/x/net v0.30.0 // indirect @@ -116,11 +113,10 @@ require ( google.golang.org/protobuf v1.36.0 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 // indirect - sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/kustomize/api v0.17.3 // indirect - sigs.k8s.io/kustomize/kyaml v0.17.2 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect + sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect + sigs.k8s.io/kustomize/api v0.18.0 // indirect + sigs.k8s.io/kustomize/kyaml v0.18.1 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect ) diff --git a/go.sum b/go.sum index ded4172b34..3f3e51ffa4 100644 --- a/go.sum +++ b/go.sum @@ -59,8 +59,6 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= @@ -85,8 +83,6 @@ github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJr github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0 h1:kQ0NI7W1B3HwiN5gAYtY+XFItDPbLBwYRxAqbFTyDes= github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0/go.mod h1:zrT2dxOAjNFPRGjTUe2Xmb4q4YdUwVvQFV6xiCSf+z0= -github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= -github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= @@ -131,8 +127,8 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc= github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= -github.com/moby/spdystream v0.4.0 h1:Vy79D6mHeJJjiPdFEL2yku1kl0chZpJfZcPpb16BRl8= -github.com/moby/spdystream v0.4.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= +github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= +github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -183,8 +179,8 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/stern/stern v1.31.0 h1:kKHVgEmIgqbC6/sFZahUeU9TbxDH+0l3l5/ornLlQLs= github.com/stern/stern v1.31.0/go.mod h1:BfAeaPQhkMhQPTaFV81pS8YWCBmxg6IBL8fPGalt0qY= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -208,8 +204,6 @@ go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4Jjx go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= -go.starlark.net v0.0.0-20240925182052-1207426daebd h1:S+EMisJOHklQxnS3kqsY8jl2y5aF0FDEdcLnOw3q22E= -go.starlark.net v0.0.0-20240925182052-1207426daebd/go.mod h1:YKMCv9b1WrfWmeqdV5MAuEHWsu5iC+fe6kYl2sQjdI8= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -282,37 +276,34 @@ gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWM gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.31.3 h1:umzm5o8lFbdN/hIXbrK9oRpOproJO62CV1zqxXrLgk8= -k8s.io/api v0.31.3/go.mod h1:UJrkIp9pnMOI9K2nlL6vwpxRzzEX5sWgn8kGQe92kCE= -k8s.io/apiextensions-apiserver v0.31.3 h1:+GFGj2qFiU7rGCsA5o+p/rul1OQIq6oYpQw4+u+nciE= -k8s.io/apiextensions-apiserver v0.31.3/go.mod h1:2DSpFhUZZJmn/cr/RweH1cEVVbzFw9YBu4T+U3mf1e4= -k8s.io/apimachinery v0.31.3 h1:6l0WhcYgasZ/wk9ktLq5vLaoXJJr5ts6lkaQzgeYPq4= -k8s.io/apimachinery v0.31.3/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= -k8s.io/cli-runtime v0.31.3 h1:fEQD9Xokir78y7pVK/fCJN090/iYNrLHpFbGU4ul9TI= -k8s.io/cli-runtime v0.31.3/go.mod h1:Q2jkyTpl+f6AtodQvgDI8io3jrfr+Z0LyQBPJJ2Btq8= -k8s.io/client-go v0.31.3 h1:CAlZuM+PH2cm+86LOBemaJI/lQ5linJ6UFxKX/SoG+4= -k8s.io/client-go v0.31.3/go.mod h1:2CgjPUTpv3fE5dNygAr2NcM8nhHzXvxB8KL5gYc3kJs= +k8s.io/api v0.32.0 h1:OL9JpbvAU5ny9ga2fb24X8H6xQlVp+aJMFlgtQjR9CE= +k8s.io/api v0.32.0/go.mod h1:4LEwHZEf6Q/cG96F3dqR965sYOfmPM7rq81BLgsE0p0= +k8s.io/apiextensions-apiserver v0.32.0 h1:S0Xlqt51qzzqjKPxfgX1xh4HBZE+p8KKBq+k2SWNOE0= +k8s.io/apiextensions-apiserver v0.32.0/go.mod h1:86hblMvN5yxMvZrZFX2OhIHAuFIMJIZ19bTvzkP+Fmw= +k8s.io/apimachinery v0.32.0 h1:cFSE7N3rmEEtv4ei5X6DaJPHHX0C+upp+v5lVPiEwpg= +k8s.io/apimachinery v0.32.0/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/cli-runtime v0.32.0 h1:dP+OZqs7zHPpGQMCGAhectbHU2SNCuZtIimRKTv2T1c= +k8s.io/cli-runtime v0.32.0/go.mod h1:Mai8ht2+esoDRK5hr861KRy6z0zHsSTYttNVJXgP3YQ= +k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8= +k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 h1:1dWzkmJrrprYvjGwh9kEUxmcUV/CtNU8QM7h1FLWQOo= -k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38/go.mod h1:coRQXBK9NxO98XUv3ZD6AK3xzHCxV6+b7lrquKwaKzA= +k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= +k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0= k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/controller-runtime v0.19.3 h1:XO2GvC9OPftRst6xWCpTgBZO04S2cbp0Qqkj8bX1sPw= sigs.k8s.io/controller-runtime v0.19.3/go.mod h1:j4j87DqtsThvwTv5/Tc5NFRyyF/RF0ip4+62tbTSIUM= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/kustomize/api v0.17.3 h1:6GCuHSsxq7fN5yhF2XrC+AAr8gxQwhexgHflOAD/JJU= -sigs.k8s.io/kustomize/api v0.17.3/go.mod h1:TuDH4mdx7jTfK61SQ/j1QZM/QWR+5rmEiNjvYlhzFhc= -sigs.k8s.io/kustomize/kyaml v0.17.2 h1:+AzvoJUY0kq4QAhH/ydPHHMRLijtUKiyVyh7fOSshr0= -sigs.k8s.io/kustomize/kyaml v0.17.2/go.mod h1:9V0mCjIEYjlXuCdYsSXvyoy2BTsLESH7TlGV81S282U= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/kustomize/api v0.18.0 h1:hTzp67k+3NEVInwz5BHyzc9rGxIauoXferXyjv5lWPo= +sigs.k8s.io/kustomize/api v0.18.0/go.mod h1:f8isXnX+8b+SGLHQ6yO4JG1rdkZlvhaCf/uZbLVMb0U= +sigs.k8s.io/kustomize/kyaml v0.18.1 h1:WvBo56Wzw3fjS+7vBjN6TeivvpbW9GmRaWZ9CIVmt4E= +sigs.k8s.io/kustomize/kyaml v0.18.1/go.mod h1:C3L2BFVU1jgcddNBE1TxuVLgS46TjObMwW5FT9FcjYo= +sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= +sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= From bc55791b7ff05b322ad6ea240d0b67d6d471c1ff Mon Sep 17 00:00:00 2001 From: Gabriele Quaresima Date: Thu, 19 Dec 2024 16:35:17 +0100 Subject: [PATCH 250/836] fix(roles): properly quote inRoles in SQL statements (#6346) This patch fixes an issue where the `inRoles` parameter was not properly quoted in SQL statements, which could cause syntax errors if the role name contains special characters. Closes #6337 Signed-off-by: wolfox --- internal/management/controller/roles/postgres.go | 8 +++++++- internal/management/controller/roles/postgres_test.go | 8 ++++---- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/internal/management/controller/roles/postgres.go b/internal/management/controller/roles/postgres.go index eb1dcf913c..26c909d4b2 100644 --- a/internal/management/controller/roles/postgres.go +++ b/internal/management/controller/roles/postgres.go @@ -301,7 +301,13 @@ func GetParentRoles(ctx context.Context, db *sql.DB, role DatabaseRole) ([]strin func appendInRoleOptions(role DatabaseRole, query *strings.Builder) { if len(role.InRoles) > 0 { - query.WriteString(fmt.Sprintf(" IN ROLE %s ", strings.Join(role.InRoles, ","))) + quotedInRoles := make([]string, len(role.InRoles)) + + for i, inRole := range role.InRoles { + quotedInRoles[i] = pgx.Identifier{inRole}.Sanitize() + } + + query.WriteString(fmt.Sprintf(" IN ROLE %s ", strings.Join(quotedInRoles, ","))) } } diff --git a/internal/management/controller/roles/postgres_test.go b/internal/management/controller/roles/postgres_test.go index 01f3dd1dc9..4357f62f0c 100644 --- a/internal/management/controller/roles/postgres_test.go +++ b/internal/management/controller/roles/postgres_test.go @@ -104,22 +104,22 @@ var _ = Describe("Postgres RoleManager implementation test", func() { } wantedRoleExpectedCrtStmt := fmt.Sprintf( "CREATE ROLE \"%s\" BYPASSRLS NOCREATEDB CREATEROLE NOINHERIT LOGIN NOREPLICATION "+ - "NOSUPERUSER CONNECTION LIMIT 2 IN ROLE pg_monitoring VALID UNTIL '2100-01-01 00:00:00Z'", + "NOSUPERUSER CONNECTION LIMIT 2 IN ROLE \"pg_monitoring\" VALID UNTIL '2100-01-01 00:00:00Z'", wantedRole.Name) wantedRoleWithPassExpectedCrtStmt := fmt.Sprintf( "CREATE ROLE \"%s\" BYPASSRLS NOCREATEDB CREATEROLE NOINHERIT LOGIN NOREPLICATION "+ - "NOSUPERUSER CONNECTION LIMIT 2 IN ROLE pg_monitoring PASSWORD 'myPassword' VALID UNTIL '2100-01-01 00:00:00Z'", + "NOSUPERUSER CONNECTION LIMIT 2 IN ROLE \"pg_monitoring\" PASSWORD 'myPassword' VALID UNTIL '2100-01-01 00:00:00Z'", wantedRole.Name) wantedRoleWithoutValidUntilExpectedCrtStmt := fmt.Sprintf( "CREATE ROLE \"%s\" BYPASSRLS NOCREATEDB CREATEROLE NOINHERIT LOGIN NOREPLICATION "+ - "NOSUPERUSER CONNECTION LIMIT 2 IN ROLE pg_monitoring PASSWORD 'myPassword'", + "NOSUPERUSER CONNECTION LIMIT 2 IN ROLE \"pg_monitoring\" PASSWORD 'myPassword'", wantedRole.Name) wantedRoleWithPassDeletionExpectedCrtStmt := fmt.Sprintf( "CREATE ROLE \"%s\" BYPASSRLS NOCREATEDB CREATEROLE NOINHERIT LOGIN NOREPLICATION "+ - "NOSUPERUSER CONNECTION LIMIT 2 IN ROLE pg_monitoring PASSWORD NULL VALID UNTIL '2100-01-01 00:00:00Z'", + "NOSUPERUSER CONNECTION LIMIT 2 IN ROLE \"pg_monitoring\" PASSWORD NULL VALID UNTIL '2100-01-01 00:00:00Z'", wantedRole.Name) wantedRoleWithDefaultConnectionLimitExpectedCrtStmt := fmt.Sprintf( "CREATE ROLE \"%s\" NOBYPASSRLS NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION "+ From b897b44fc3e6f9afe0b19654724cd9d9d4888fcc Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Thu, 19 Dec 2024 17:35:11 +0100 Subject: [PATCH 251/836] chore: add missing labels in container images (#6377) The operator was missing a label "maintainer" for certifications and also for standards, it was relying on having the label from the parent image. Signed-off-by: Jonathan Gonzalez V. --- Dockerfile | 6 ++++-- Dockerfile-ubi8 | 6 ++++-- Dockerfile-ubi9 | 6 ++++-- 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/Dockerfile b/Dockerfile index c96d232364..e6d787a93b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -12,14 +12,16 @@ ARG VERSION="dev" ARG TARGETARCH ENV SUMMARY="CloudNativePG Operator Container Image." \ - DESCRIPTION="This Docker image contains CloudNativePG Operator." + DESCRIPTION="This Docker image contains CloudNativePG Operator." \ + MAINTAINER="CloudNativePG Contributors." LABEL summary="$SUMMARY" \ description="$DESCRIPTION" \ io.k8s.display-name="$SUMMARY" \ io.k8s.description="$DESCRIPTION" \ name="CloudNativePG Operator" \ - vendor="CloudNativePG Contributors" \ + vendor="$MAINTAINER" \ + maintainer="$MAINTAINER" \ url="https://cloudnative-pg.io/" \ version="$VERSION" \ release="1" diff --git a/Dockerfile-ubi8 b/Dockerfile-ubi8 index 4c2712de72..1aea9e40ac 100644 --- a/Dockerfile-ubi8 +++ b/Dockerfile-ubi8 @@ -3,14 +3,16 @@ ARG VERSION="dev" ARG TARGETARCH ENV SUMMARY="CloudNativePG Operator Container Image." \ - DESCRIPTION="This Docker image contains CloudNativePG Operator." + DESCRIPTION="This Docker image contains CloudNativePG Operator." \ + MAINTAINER="CloudNativePG Contributors." LABEL summary="$SUMMARY" \ description="$DESCRIPTION" \ io.k8s.display-name="$SUMMARY" \ io.k8s.description="$DESCRIPTION" \ name="CloudNativePG Operator" \ - vendor="CloudNativePG Contributors" \ + vendor="$MAINTAINER" \ + maintainer="$MAINTAINER" \ url="https://cloudnative-pg.io/" \ version="$VERSION" \ release="1" diff --git a/Dockerfile-ubi9 b/Dockerfile-ubi9 index 74409e03ca..0d846d91b0 100644 --- a/Dockerfile-ubi9 +++ b/Dockerfile-ubi9 @@ -3,14 +3,16 @@ ARG VERSION="dev" ARG TARGETARCH ENV SUMMARY="CloudNativePG Operator Container Image." \ - DESCRIPTION="This Docker image contains CloudNativePG Operator." + DESCRIPTION="This Docker image contains CloudNativePG Operator." \ + MAINTAINER="CloudNativePG Contributors." LABEL summary="$SUMMARY" \ description="$DESCRIPTION" \ io.k8s.display-name="$SUMMARY" \ io.k8s.description="$DESCRIPTION" \ name="CloudNativePG Operator" \ - vendor="CloudNativePG Contributors" \ + vendor="$MAINTAINER" \ + maintainer="$MAINTAINER" \ url="https://cloudnative-pg.io/" \ version="$VERSION" \ release="1" From 60ff2ccce515eaebf666c7374e589894213cc561 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 20 Dec 2024 09:41:54 +0100 Subject: [PATCH 252/836] fix(deps): update module github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring to v0.79.2 (main) (#6379) https://github.com/prometheus-operator/prometheus-operator v0.78.2` -> `v0.79.2` golang.org/x/crypto `v0.28.0` -> `v0.30.0` golang.org/x/net `v0.30.0` -> `v0.32.0` golang.org/x/sync `v0.8.0` -> `v0.10.0` golang.org/x/text `v0.19.0` -> `v0.21.0` sigs.k8s.io/json `v0.0.0-20241010143419-9aa6b5e7a4b3` -> `v0.0.0-20241014173422-cfa47c3a1cc8` sigs.k8s.io/structured-merge-diff/v4 `v4.4.2` -> `v4.5.0` Signed-off-by: Jonathan Gonzalez V. Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: Jonathan Gonzalez V. --- go.mod | 14 ++++++------- go.sum | 28 +++++++++++++------------- pkg/specs/pgbouncer/podmonitor.go | 3 ++- pkg/specs/pgbouncer/podmonitor_test.go | 2 +- pkg/specs/podmonitor.go | 3 ++- pkg/specs/podmonitor_test.go | 5 +++-- 6 files changed, 29 insertions(+), 26 deletions(-) diff --git a/go.mod b/go.mod index f0ba746aaa..fe86702085 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/mitchellh/go-ps v1.0.0 github.com/onsi/ginkgo/v2 v2.22.0 github.com/onsi/gomega v1.36.1 - github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.2 + github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.79.2 github.com/prometheus/client_golang v1.20.5 github.com/robfig/cron v1.2.0 github.com/sethvargo/go-password v0.3.1 @@ -99,13 +99,13 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xlab/treeprint v1.2.0 // indirect - golang.org/x/crypto v0.28.0 // indirect + golang.org/x/crypto v0.30.0 // indirect golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect - golang.org/x/net v0.30.0 // indirect + golang.org/x/net v0.32.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sync v0.8.0 // indirect + golang.org/x/sync v0.10.0 // indirect golang.org/x/sys v0.28.0 // indirect - golang.org/x/text v0.19.0 // indirect + golang.org/x/text v0.21.0 // indirect golang.org/x/time v0.7.0 // indirect golang.org/x/tools v0.26.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect @@ -115,8 +115,8 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect - sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect + sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect sigs.k8s.io/kustomize/api v0.18.0 // indirect sigs.k8s.io/kustomize/kyaml v0.18.1 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.5.0 // indirect ) diff --git a/go.sum b/go.sum index 3f3e51ffa4..e9a085e58a 100644 --- a/go.sum +++ b/go.sum @@ -153,8 +153,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.2 h1:SyoVBXD/r0PntR1rprb90ClI32FSUNOCWqqTatnipHM= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.2/go.mod h1:SvsRXw4m1F2vk7HquU5h475bFpke27mIUswfyw9u3ug= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.79.2 h1:DGv150w4UyxnjNHlkCw85R3+lspOxegtdnbpP2vKRrk= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.79.2/go.mod h1:AVMP4QEW8xuGWnxaWSpI3kKjP9fDA31nO68zsyREJZA= github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= @@ -215,8 +215,8 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= -golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= +golang.org/x/crypto v0.30.0 h1:RwoQn3GkWiMkzlX562cLB7OxWvjH1L8xutO2WoJcRoY= +golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -225,15 +225,15 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= -golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= +golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI= +golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs= golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -246,8 +246,8 @@ golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -297,13 +297,13 @@ k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJ k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/controller-runtime v0.19.3 h1:XO2GvC9OPftRst6xWCpTgBZO04S2cbp0Qqkj8bX1sPw= sigs.k8s.io/controller-runtime v0.19.3/go.mod h1:j4j87DqtsThvwTv5/Tc5NFRyyF/RF0ip4+62tbTSIUM= -sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= -sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/kustomize/api v0.18.0 h1:hTzp67k+3NEVInwz5BHyzc9rGxIauoXferXyjv5lWPo= sigs.k8s.io/kustomize/api v0.18.0/go.mod h1:f8isXnX+8b+SGLHQ6yO4JG1rdkZlvhaCf/uZbLVMb0U= sigs.k8s.io/kustomize/kyaml v0.18.1 h1:WvBo56Wzw3fjS+7vBjN6TeivvpbW9GmRaWZ9CIVmt4E= sigs.k8s.io/kustomize/kyaml v0.18.1/go.mod h1:C3L2BFVU1jgcddNBE1TxuVLgS46TjObMwW5FT9FcjYo= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= +sigs.k8s.io/structured-merge-diff/v4 v4.5.0 h1:nbCitCK2hfnhyiKo6uf2HxUPTCodY6Qaf85SbDIaMBk= +sigs.k8s.io/structured-merge-diff/v4 v4.5.0/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/pkg/specs/pgbouncer/podmonitor.go b/pkg/specs/pgbouncer/podmonitor.go index 579a8006d6..ff7962e3b9 100644 --- a/pkg/specs/pgbouncer/podmonitor.go +++ b/pkg/specs/pgbouncer/podmonitor.go @@ -51,8 +51,9 @@ func (c PoolerPodMonitorManager) BuildPodMonitor() *monitoringv1.PodMonitor { utils.SetAsOwnedBy(&meta, c.pooler.ObjectMeta, c.pooler.TypeMeta) + metricsPort := "metrics" endpoint := monitoringv1.PodMetricsEndpoint{ - Port: "metrics", + Port: &metricsPort, } if c.pooler.Spec.Monitoring != nil { diff --git a/pkg/specs/pgbouncer/podmonitor_test.go b/pkg/specs/pgbouncer/podmonitor_test.go index 8643955ff5..66a16fd644 100644 --- a/pkg/specs/pgbouncer/podmonitor_test.go +++ b/pkg/specs/pgbouncer/podmonitor_test.go @@ -79,7 +79,7 @@ var _ = Describe("PoolerPodMonitorManager", func() { })) Expect(podMonitor.Spec.PodMetricsEndpoints).To(HaveLen(1)) - Expect(podMonitor.Spec.PodMetricsEndpoints[0].Port).To(Equal("metrics")) + Expect(*podMonitor.Spec.PodMetricsEndpoints[0].Port).To(Equal("metrics")) }) }) diff --git a/pkg/specs/podmonitor.go b/pkg/specs/podmonitor.go index 25770e3684..4a7ab863ef 100644 --- a/pkg/specs/podmonitor.go +++ b/pkg/specs/podmonitor.go @@ -44,8 +44,9 @@ func (c ClusterPodMonitorManager) BuildPodMonitor() *monitoringv1.PodMonitor { } c.cluster.SetInheritedDataAndOwnership(&meta) + metricsPort := "metrics" endpoint := monitoringv1.PodMetricsEndpoint{ - Port: "metrics", + Port: &metricsPort, } if c.cluster.IsMetricsTLSEnabled() { diff --git a/pkg/specs/podmonitor_test.go b/pkg/specs/podmonitor_test.go index 6c486e1808..b043eb5fe5 100644 --- a/pkg/specs/podmonitor_test.go +++ b/pkg/specs/podmonitor_test.go @@ -35,6 +35,7 @@ var _ = Describe("PodMonitor test", func() { clusterName = "test" clusterNamespace = "test-namespace" ) + metricsPort := "metrics" assertPodMonitorCorrect := func(cluster *apiv1.Cluster, expectedEndpoint monitoringv1.PodMetricsEndpoint) { getMetricRelabelings := func() []monitoringv1.RelabelConfig { @@ -121,7 +122,7 @@ var _ = Describe("PodMonitor test", func() { }, } - expectedEndpoint := monitoringv1.PodMetricsEndpoint{Port: "metrics"} + expectedEndpoint := monitoringv1.PodMetricsEndpoint{Port: &metricsPort} assertPodMonitorCorrect(&cluster, expectedEndpoint) }) @@ -142,7 +143,7 @@ var _ = Describe("PodMonitor test", func() { } expectedEndpoint := monitoringv1.PodMetricsEndpoint{ - Port: "metrics", + Port: &metricsPort, Scheme: "https", TLSConfig: &monitoringv1.SafeTLSConfig{ CA: monitoringv1.SecretOrConfigMap{ From d90792f87a36e1f884fd3e98eb2b6763e9a36b90 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 20 Dec 2024 11:47:34 +0100 Subject: [PATCH 253/836] chore(deps): update operator framework (main) (#6370) https://github.com/operator-framework/operator-registry `v1.48.0` -> `v1.49.0` https://github.com/redhat-openshift-ecosystem/openshift-preflight `1.10.2` -> `1.11.1` --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index c4f7a65e3d..a80d95dfb7 100644 --- a/Makefile +++ b/Makefile @@ -47,8 +47,8 @@ GORELEASER_VERSION ?= v2.5.0 SPELLCHECK_VERSION ?= 0.45.0 WOKE_VERSION ?= 0.19.0 OPERATOR_SDK_VERSION ?= v1.38.0 -OPM_VERSION ?= v1.48.0 -PREFLIGHT_VERSION ?= 1.10.2 +OPM_VERSION ?= v1.49.0 +PREFLIGHT_VERSION ?= 1.11.1 OPENSHIFT_VERSIONS ?= v4.12-v4.18 ARCH ?= amd64 From 27fb8a6662dead86aec744f5ba7dc4268f15dd40 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 20 Dec 2024 14:03:16 +0100 Subject: [PATCH 254/836] fix(deps): update module github.com/onsi/ginkgo/v2 to v2.22.1 (main) (#6391) --- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index fe86702085..7aad95057a 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( github.com/lib/pq v1.10.9 github.com/logrusorgru/aurora/v4 v4.0.0 github.com/mitchellh/go-ps v1.0.0 - github.com/onsi/ginkgo/v2 v2.22.0 + github.com/onsi/ginkgo/v2 v2.22.1 github.com/onsi/gomega v1.36.1 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.79.2 github.com/prometheus/client_golang v1.20.5 @@ -70,7 +70,7 @@ require ( github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect + github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect @@ -107,7 +107,7 @@ require ( golang.org/x/sys v0.28.0 // indirect golang.org/x/text v0.21.0 // indirect golang.org/x/time v0.7.0 // indirect - golang.org/x/tools v0.26.0 // indirect + golang.org/x/tools v0.28.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect google.golang.org/protobuf v1.36.0 // indirect diff --git a/go.sum b/go.sum index e9a085e58a..096950aade 100644 --- a/go.sum +++ b/go.sum @@ -71,8 +71,8 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= -github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -142,8 +142,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= -github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/ginkgo/v2 v2.22.1 h1:QW7tbJAUDyVDVOM5dFa7qaybo+CRfR7bemlQUN6Z8aM= +github.com/onsi/ginkgo/v2 v2.22.1/go.mod h1:S6aTpoRsSq2cZOd+pssHAlKW/Q/jZt6cPrPlnj4a1xM= github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw= github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= @@ -254,8 +254,8 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= -golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= +golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= +golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From 6d2e5aa6e901a149fb4ac9b7994c1e62a4c1eb4f Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 20 Dec 2024 16:33:30 +0100 Subject: [PATCH 255/836] chore(deps): update dependency kubernetes-csi/external-attacher to v4.8.0 (main) (#6392) --- hack/setup-cluster.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh index e668d5097e..7bd5e66072 100755 --- a/hack/setup-cluster.sh +++ b/hack/setup-cluster.sh @@ -30,7 +30,7 @@ CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=v1.15.0 EXTERNAL_SNAPSHOTTER_VERSION=v8.2.0 EXTERNAL_PROVISIONER_VERSION=v5.1.0 EXTERNAL_RESIZER_VERSION=v1.12.0 -EXTERNAL_ATTACHER_VERSION=v4.7.0 +EXTERNAL_ATTACHER_VERSION=v4.8.0 K8S_VERSION=${K8S_VERSION-} KUBECTL_VERSION=${KUBECTL_VERSION-} CSI_DRIVER_HOST_PATH_VERSION=${CSI_DRIVER_HOST_PATH_VERSION:-$CSI_DRIVER_HOST_PATH_DEFAULT_VERSION} From 98541cd3fe578fc37ffc4149c997ef61d08503fd Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Fri, 20 Dec 2024 18:08:44 +0100 Subject: [PATCH 256/836] chore: add missing kinds to `groupversion_info` (#6390) This patch adds the missing kinds to the groupversion_info file. This can be useful when referencing CRDs `kind` value. Signed-off-by: Armando Ruocco Signed-off-by: Marco Nenciarini Co-authored-by: Marco Nenciarini --- api/v1/groupversion_info.go | 33 +++++++++++++++------------------ 1 file changed, 15 insertions(+), 18 deletions(-) diff --git a/api/v1/groupversion_info.go b/api/v1/groupversion_info.go index 44bab3db5b..bb665cb83b 100644 --- a/api/v1/groupversion_info.go +++ b/api/v1/groupversion_info.go @@ -24,24 +24,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/scheme" ) -var ( - // GroupVersion is group version used to register these objects - GroupVersion = schema.GroupVersion{Group: "postgresql.cnpg.io", Version: "v1"} - - // ClusterGVK is the triple to reach Cluster resources in k8s - ClusterGVK = schema.GroupVersionResource{ - Group: GroupVersion.Group, - Version: GroupVersion.Version, - Resource: "clusters", - } - - // PoolerGVK is the triple to reach Pooler resources in k8s - PoolerGVK = schema.GroupVersionResource{ - Group: GroupVersion.Group, - Version: GroupVersion.Version, - Resource: "poolers", - } - +const ( // ClusterKind is the kind name of Clusters ClusterKind = "Cluster" @@ -57,6 +40,20 @@ var ( // ClusterImageCatalogKind is the kind name of the cluster-wide image catalogs ClusterImageCatalogKind = "ClusterImageCatalog" + // PublicationKind is the kind name of publications + PublicationKind = "Publication" + + // SubscriptionKind is the kind name of subscriptions + SubscriptionKind = "Subscription" + + // DatabaseKind is the kind name of databases + DatabaseKind = "Database" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "postgresql.cnpg.io", Version: "v1"} + // SchemeBuilder is used to add go types to the GroupVersionKind scheme SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} From 74507bb789923259f00c1c5ca15d140458e30fde Mon Sep 17 00:00:00 2001 From: Pierrick <139142330+pchovelon@users.noreply.github.com> Date: Sun, 22 Dec 2024 11:15:23 +0100 Subject: [PATCH 257/836] feat: check the number of spec.schedule fields (#5396) The `schedule` field of `scheduledbackup.spec` can have 5 or 6 parameters as defined by the [Go cron package format](https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format) This syntax may be misleading when using just 5 parameters, as the `seconds` field is not included. This patch improves the webhook to raise a warning when a schedule specification has just 5 fields while retaining compatibility. Closes #5380 Signed-off-by: Pierrick Signed-off-by: Leonardo Cecchi Signed-off-by: Marco Nenciarini Co-authored-by: Leonardo Cecchi Co-authored-by: Marco Nenciarini --- api/v1/scheduledbackup_funcs_test.go | 6 ++++-- api/v1/scheduledbackup_webhook.go | 20 ++++++++++++++------ api/v1/scheduledbackup_webhook_test.go | 25 +++++++++++++++++++++---- 3 files changed, 39 insertions(+), 12 deletions(-) diff --git a/api/v1/scheduledbackup_funcs_test.go b/api/v1/scheduledbackup_funcs_test.go index 9ef98a3692..d4da0915ea 100644 --- a/api/v1/scheduledbackup_funcs_test.go +++ b/api/v1/scheduledbackup_funcs_test.go @@ -77,7 +77,8 @@ var _ = Describe("Scheduled backup", func() { Schedule: "* * * * * *", }, } - result := scheduledBackup.validate() + warnings, result := scheduledBackup.validate() + Expect(warnings).To(BeEmpty()) Expect(result).To(HaveLen(1)) Expect(result[0].Field).To(Equal("spec.online")) }) @@ -90,7 +91,8 @@ var _ = Describe("Scheduled backup", func() { Schedule: "* * * * * *", }, } - result := scheduledBackup.validate() + warnings, result := scheduledBackup.validate() + Expect(warnings).To(BeEmpty()) Expect(result).To(HaveLen(1)) Expect(result[0].Field).To(Equal("spec.onlineConfiguration")) }) diff --git a/api/v1/scheduledbackup_webhook.go b/api/v1/scheduledbackup_webhook.go index e1aebeedf5..30be039614 100644 --- a/api/v1/scheduledbackup_webhook.go +++ b/api/v1/scheduledbackup_webhook.go @@ -17,6 +17,8 @@ limitations under the License. package v1 import ( + "strings" + "github.com/cloudnative-pg/machinery/pkg/log" "github.com/robfig/cron" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -56,13 +58,11 @@ var _ webhook.Validator = &ScheduledBackup{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type func (r *ScheduledBackup) ValidateCreate() (admission.Warnings, error) { - var allErrs field.ErrorList scheduledBackupLog.Info("validate create", "name", r.Name, "namespace", r.Namespace) - allErrs = append(allErrs, r.validate()...) - + warnings, allErrs := r.validate() if len(allErrs) == 0 { - return nil, nil + return warnings, nil } return nil, apierrors.NewInvalid( @@ -82,15 +82,23 @@ func (r *ScheduledBackup) ValidateDelete() (admission.Warnings, error) { return nil, nil } -func (r *ScheduledBackup) validate() field.ErrorList { +func (r *ScheduledBackup) validate() (admission.Warnings, field.ErrorList) { var result field.ErrorList + var warnings admission.Warnings if _, err := cron.Parse(r.GetSchedule()); err != nil { result = append(result, field.Invalid( field.NewPath("spec", "schedule"), r.Spec.Schedule, err.Error())) + } else if len(strings.Fields(r.Spec.Schedule)) != 6 { + warnings = append( + warnings, + "Schedule parameter may not have the right number of arguments "+ + "(usually six arguments are needed)", + ) } + if r.Spec.Method == BackupMethodVolumeSnapshot && !utils.HaveVolumeSnapshot() { result = append(result, field.Invalid( field.NewPath("spec", "method"), @@ -118,5 +126,5 @@ func (r *ScheduledBackup) validate() field.ErrorList { )) } - return result + return warnings, result } diff --git a/api/v1/scheduledbackup_webhook_test.go b/api/v1/scheduledbackup_webhook_test.go index b31e954741..0ef5043a97 100644 --- a/api/v1/scheduledbackup_webhook_test.go +++ b/api/v1/scheduledbackup_webhook_test.go @@ -31,7 +31,20 @@ var _ = Describe("Validate schedule", func() { }, } - result := schedule.validate() + warnings, result := schedule.validate() + Expect(warnings).To(BeEmpty()) + Expect(result).To(BeEmpty()) + }) + + It("warn the user if the schedule has a wrong number of arguments", func() { + schedule := &ScheduledBackup{ + Spec: ScheduledBackupSpec{ + Schedule: "1 2 3 4 5", + }, + } + + warnings, result := schedule.validate() + Expect(warnings).To(HaveLen(1)) Expect(result).To(BeEmpty()) }) @@ -42,7 +55,8 @@ var _ = Describe("Validate schedule", func() { }, } - result := schedule.validate() + warnings, result := schedule.validate() + Expect(warnings).To(BeEmpty()) Expect(result).To(HaveLen(1)) }) @@ -54,7 +68,9 @@ var _ = Describe("Validate schedule", func() { }, } utils.SetVolumeSnapshot(true) - result := schedule.validate() + + warnings, result := schedule.validate() + Expect(warnings).To(BeEmpty()) Expect(result).To(BeEmpty()) }) @@ -66,7 +82,8 @@ var _ = Describe("Validate schedule", func() { }, } utils.SetVolumeSnapshot(false) - result := schedule.validate() + warnings, result := schedule.validate() + Expect(warnings).To(BeEmpty()) Expect(result).To(HaveLen(1)) Expect(result[0].Field).To(Equal("spec.method")) }) From c76557213d92f8b7311a0bc72d36dc505e1cf79d Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Sun, 22 Dec 2024 16:48:13 +0100 Subject: [PATCH 258/836] docs: fix genref config (#6422) Signed-off-by: Marco Nenciarini --- docs/config.yaml | 18 +++++++++-------- docs/src/cloudnative-pg.v1.md | 20 +++++++++++++------ ...cluster-example-with-backup-scaleway.yaml} | 0 3 files changed, 24 insertions(+), 14 deletions(-) rename docs/src/samples/{cluster-exemple-with-backup-scaleway.yaml => cluster-example-with-backup-scaleway.yaml} (100%) diff --git a/docs/config.yaml b/docs/config.yaml index 54ecf6e949..7b260f3bc8 100644 --- a/docs/config.yaml +++ b/docs/config.yaml @@ -32,14 +32,16 @@ externalPackages: hideTypePatterns: - "ParseError$" - # We cannot exclude all `List$` because we declare PluginConfigurationList - - "BackupList$" - - "ClusterList$" - - "ClusterImageCatalogList$" - - "DatabaseList$" - - "ImageCatalogList$" - - "PoolerList$" - - "ScheduledBackupList$" + # We cannot exclude all `List$` because we declare PluginConfigurationList and ExternalClusterList + - "\\.BackupList$" + - "\\.ClusterList$" + - "\\.ClusterImageCatalogList$" + - "\\.DatabaseList$" + - "\\.ImageCatalogList$" + - "\\.PoolerList$" + - "\\.ScheduledBackupList$" + - "\\.PublicationList$" + - "\\.SubscriptionList$" markdownDisabled: false diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md index 48b53866ef..fa9d2c9242 100644 --- a/docs/src/cloudnative-pg.v1.md +++ b/docs/src/cloudnative-pg.v1.md @@ -229,9 +229,6 @@ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api- ## Publication {#postgresql-cnpg-io-v1-Publication} -**Appears in:** - -

Publication is the Schema for the publications API

@@ -303,9 +300,6 @@ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api- ## Subscription {#postgresql-cnpg-io-v1-Subscription} -**Appears in:** - -

Subscription is the Schema for the subscriptions API

@@ -2712,6 +2706,20 @@ storage

-

The corresponding cluster

+

The name of the PostgreSQL cluster hosting the database.

ensure
EnsureOption
-

Ensure the PostgreSQL database is present or absent - defaults to "present"

+

Ensure the PostgreSQL database is present or absent - defaults to "present".

name [Required]
string
-

The name inside PostgreSQL

+

The name of the database to create inside PostgreSQL. This setting cannot be changed.

owner [Required]
string
-

The owner

+

Maps to the OWNER parameter of CREATE DATABASE. +Maps to the OWNER TO command of ALTER DATABASE. +The role name of the user who owns the database inside PostgreSQL.

template
string
-

The name of the template from which to create the new database

+

Maps to the TEMPLATE parameter of CREATE DATABASE. This setting +cannot be changed. The name of the template from which to create +this database.

encoding
string
-

The encoding (cannot be changed)

+

Maps to the ENCODING parameter of CREATE DATABASE. This setting +cannot be changed. Character set encoding to use in the database.

locale
string
-

The locale (cannot be changed) -Sets the default collation order and character classification in the new database.

+

Maps to the LOCALE parameter of CREATE DATABASE. This setting +cannot be changed. Sets the default collation order and character +classification in the new database.

localeProvider
string
-

The LOCALE_PROVIDER (cannot be changed) -This option sets the locale provider for databases created in the new cluster. -Available from PostgreSQL 16.

+

Maps to the LOCALE_PROVIDER parameter of CREATE DATABASE. This +setting cannot be changed. This option sets the locale provider for +databases created in the new cluster. Available from PostgreSQL 16.

localeCollate
string
-

The LC_COLLATE (cannot be changed)

+

Maps to the LC_COLLATE parameter of CREATE DATABASE. This +setting cannot be changed.

localeCType
string
-

The LC_CTYPE (cannot be changed)

+

Maps to the LC_CTYPE parameter of CREATE DATABASE. This setting +cannot be changed.

icuLocale
string
-

The ICU_LOCALE (cannot be changed) -Specifies the ICU locale when the ICU provider is used. -This option requires localeProvider to be set to icu. -Available from PostgreSQL 15.

+

Maps to the ICU_LOCALE parameter of CREATE DATABASE. This +setting cannot be changed. Specifies the ICU locale when the ICU +provider is used. This option requires localeProvider to be set to +icu. Available from PostgreSQL 15.

icuRules
string
-

The ICU_RULES (cannot be changed) -Specifies additional collation rules to customize the behavior of the default collation. -This option requires localeProvider to be set to icu. -Available from PostgreSQL 16.

+

Maps to the ICU_RULES parameter of CREATE DATABASE. This setting +cannot be changed. Specifies additional collation rules to customize +the behavior of the default collation. This option requires +localeProvider to be set to icu. Available from PostgreSQL 16.

builtinLocale
string
-

The BUILTIN_LOCALE (cannot be changed) -Specifies the locale name when the builtin provider is used. -This option requires localeProvider to be set to builtin. -Available from PostgreSQL 17.

+

Maps to the BUILTIN_LOCALE parameter of CREATE DATABASE. This +setting cannot be changed. Specifies the locale name when the +builtin provider is used. This option requires localeProvider to +be set to builtin. Available from PostgreSQL 17.

collationVersion
string
-

The COLLATION_VERSION (cannot be changed)

+

Maps to the COLLATION_VERSION parameter of CREATE DATABASE. This +setting cannot be changed.

isTemplate
bool
-

True when the database is a template

+

Maps to the IS_TEMPLATE parameter of CREATE DATABASE and ALTER DATABASE. If true, this database is considered a template and can +be cloned by any user with CREATEDB privileges.

allowConnections
bool
-

True when connections to this database are allowed

+

Maps to the ALLOW_CONNECTIONS parameter of CREATE DATABASE and +ALTER DATABASE. If false then no one can connect to this database.

connectionLimit
int
-

Connection limit, -1 means no limit and -2 means the -database is not valid

+

Maps to the CONNECTION LIMIT clause of CREATE DATABASE and +ALTER DATABASE. How many concurrent connections can be made to +this database. -1 (the default) means no limit.

tablespace
string
-

The default tablespace of this database

+

Maps to the TABLESPACE parameter of CREATE DATABASE. +Maps to the SET TABLESPACE command of ALTER DATABASE. +The name of the tablespace (in PostgreSQL) that will be associated +with the new database. This tablespace will be the default +tablespace used for objects created in this database.

databaseReclaimPolicy
DatabaseReclaimPolicy
-

The policy for end-of-life maintenance of this database

+

The policy for end-of-life maintenance of this database.

+## ExternalClusterList {#postgresql-cnpg-io-v1-ExternalClusterList} + +(Alias of `[]github.com/cloudnative-pg/cloudnative-pg/api/v1.ExternalCluster`) + +**Appears in:** + +- [ClusterSpec](#postgresql-cnpg-io-v1-ClusterSpec) + + +

ExternalClusterList is a list of external clusters

+ + + + ## ImageCatalogRef {#postgresql-cnpg-io-v1-ImageCatalogRef} diff --git a/docs/src/samples/cluster-exemple-with-backup-scaleway.yaml b/docs/src/samples/cluster-example-with-backup-scaleway.yaml similarity index 100% rename from docs/src/samples/cluster-exemple-with-backup-scaleway.yaml rename to docs/src/samples/cluster-example-with-backup-scaleway.yaml From bef05706e1757634882b5a22dc7b4b41da650c20 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sun, 22 Dec 2024 21:28:28 +0100 Subject: [PATCH 259/836] fix(deps): update module github.com/jackc/pgx/v5 to v5.7.2 (main) (#6416) --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 7aad95057a..78f66c0b64 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/go-logr/logr v1.4.2 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0 - github.com/jackc/pgx/v5 v5.7.1 + github.com/jackc/pgx/v5 v5.7.2 github.com/jackc/puddle/v2 v2.2.2 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 github.com/kubernetes-csi/external-snapshotter/client/v8 v8.2.0 @@ -99,7 +99,7 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xlab/treeprint v1.2.0 // indirect - golang.org/x/crypto v0.30.0 // indirect + golang.org/x/crypto v0.31.0 // indirect golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect golang.org/x/net v0.32.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect diff --git a/go.sum b/go.sum index 096950aade..8c96643b6a 100644 --- a/go.sum +++ b/go.sum @@ -89,8 +89,8 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.7.1 h1:x7SYsPBYDkHDksogeSmZZ5xzThcTgRz++I5E+ePFUcs= -github.com/jackc/pgx/v5 v5.7.1/go.mod h1:e7O26IywZZ+naJtWWos6i6fvWK+29etgITqrqHLfoZA= +github.com/jackc/pgx/v5 v5.7.2 h1:mLoDLV6sonKlvjIEsV56SkWNCnuNv531l94GaIzO+XI= +github.com/jackc/pgx/v5 v5.7.2/go.mod h1:ncY89UGWxg82EykZUwSpUKEfccBGGYq1xjrOpsbsfGQ= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -215,8 +215,8 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.30.0 h1:RwoQn3GkWiMkzlX562cLB7OxWvjH1L8xutO2WoJcRoY= -golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= From 4334d111dd56355b5a405faa1759cc662cf2a28b Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Mon, 23 Dec 2024 10:32:35 +0100 Subject: [PATCH 260/836] ci: stop testing Postgres 12 (#6425) Signed-off-by: Marco Nenciarini --- .github/pg_versions.json | 4 ---- .github/postgres-versions-update.py | 2 +- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/.github/pg_versions.json b/.github/pg_versions.json index 119882aec3..a6a9696f2b 100644 --- a/.github/pg_versions.json +++ b/.github/pg_versions.json @@ -18,9 +18,5 @@ "13": [ "13.18", "13.17" - ], - "12": [ - "12.22", - "12.21" ] } \ No newline at end of file diff --git a/.github/postgres-versions-update.py b/.github/postgres-versions-update.py index 25ce0402d1..92e5219bef 100644 --- a/.github/postgres-versions-update.py +++ b/.github/postgres-versions-update.py @@ -21,7 +21,7 @@ from packaging import version from subprocess import check_output -min_supported_major = 12 +min_supported_major = 13 pg_repo_name = "cloudnative-pg/postgresql" pg_version_re = re.compile(r"^(\d+)(?:\.\d+|beta\d+|rc\d+|alpha\d+)(-\d+)?$") From ed9105de879f770cc3d7484a8130386b7301516e Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Mon, 23 Dec 2024 10:46:12 +0100 Subject: [PATCH 261/836] chore: remove ExternalClusterList type (#6426) Our documentation generator is not able to handle that type alias, so let's get rid of it. Closes: #6427 Signed-off-by: Leonardo Cecchi Signed-off-by: Marco Nenciarini Co-authored-by: Marco Nenciarini --- .wordlist-en-custom.txt | 1 - api/v1/cluster_funcs.go | 8 +- api/v1/cluster_types.go | 5 +- api/v1/zz_generated.deepcopy.go | 23 +----- docs/config.yaml | 2 +- docs/src/cloudnative-pg.v1.md | 81 +++++++++++++++++-- internal/cmd/manager/walrestore/cmd.go | 5 +- internal/controller/cluster_controller.go | 5 +- .../subscription_controller_test.go | 4 +- 9 files changed, 93 insertions(+), 41 deletions(-) diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index 42d7a6fa5b..47260d5468 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -155,7 +155,6 @@ EphemeralVolumeSource EphemeralVolumesSizeLimit EphemeralVolumesSizeLimitConfiguration ExternalCluster -ExternalClusterList FQDN Fei Filesystem diff --git a/api/v1/cluster_funcs.go b/api/v1/cluster_funcs.go index 3ffba1a3da..b0345cdc16 100644 --- a/api/v1/cluster_funcs.go +++ b/api/v1/cluster_funcs.go @@ -82,13 +82,13 @@ func (pluginList PluginConfigurationList) GetEnabledPluginNames() (result []stri return pluginNames } -// GetEnabledPluginNames gets the name of the plugins that are +// GetExternalClustersEnabledPluginNames gets the name of the plugins that are // involved in the reconciliation of this external cluster list. This // list is usually composed by the plugins that need to be active to // recover data from the external clusters. -func (externalClusterList ExternalClusterList) GetEnabledPluginNames() (result []string) { - pluginNames := make([]string, 0, len(externalClusterList)) - for _, externalCluster := range externalClusterList { +func GetExternalClustersEnabledPluginNames(externalClusters []ExternalCluster) (result []string) { + pluginNames := make([]string, 0, len(externalClusters)) + for _, externalCluster := range externalClusters { if externalCluster.PluginConfiguration != nil { pluginNames = append(pluginNames, externalCluster.PluginConfiguration.Name) } diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go index 2814f18cb6..ae6e6a181e 100644 --- a/api/v1/cluster_types.go +++ b/api/v1/cluster_types.go @@ -422,7 +422,7 @@ type ClusterSpec struct { // The list of external clusters which are used in the configuration // +optional - ExternalClusters ExternalClusterList `json:"externalClusters,omitempty"` + ExternalClusters []ExternalCluster `json:"externalClusters,omitempty"` // The instances' log level, one of the following values: error, warning, info (default), debug, trace // +kubebuilder:default:=info @@ -2060,9 +2060,6 @@ type ClusterMonitoringTLSConfiguration struct { Enabled bool `json:"enabled,omitempty"` } -// ExternalClusterList is a list of external clusters -type ExternalClusterList []ExternalCluster - // ExternalCluster represents the connection parameters to an // external cluster which is used in the other sections of the configuration type ExternalCluster struct { diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index fbaec944e2..8c6a2a71e6 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -790,7 +790,7 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { } if in.ExternalClusters != nil { in, out := &in.ExternalClusters, &out.ExternalClusters - *out = make(ExternalClusterList, len(*in)) + *out = make([]ExternalCluster, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1258,27 +1258,6 @@ func (in *ExternalCluster) DeepCopy() *ExternalCluster { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in ExternalClusterList) DeepCopyInto(out *ExternalClusterList) { - { - in := &in - *out = make(ExternalClusterList, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalClusterList. -func (in ExternalClusterList) DeepCopy() ExternalClusterList { - if in == nil { - return nil - } - out := new(ExternalClusterList) - in.DeepCopyInto(out) - return *out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ImageCatalog) DeepCopyInto(out *ImageCatalog) { *out = *in diff --git a/docs/config.yaml b/docs/config.yaml index 7b260f3bc8..9717ffa456 100644 --- a/docs/config.yaml +++ b/docs/config.yaml @@ -32,7 +32,7 @@ externalPackages: hideTypePatterns: - "ParseError$" - # We cannot exclude all `List$` because we declare PluginConfigurationList and ExternalClusterList + # We cannot exclude all `List$` because we declare PluginConfigurationList - "\\.BackupList$" - "\\.ClusterList$" - "\\.ClusterImageCatalogList$" diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md index fa9d2c9242..3db291f4cd 100644 --- a/docs/src/cloudnative-pg.v1.md +++ b/docs/src/cloudnative-pg.v1.md @@ -1845,7 +1845,7 @@ it can be with a switchover (switchover) or in-place (restart externalClusters
-ExternalClusterList +[]ExternalCluster

The list of external clusters which are used in the configuration

@@ -2706,19 +2706,89 @@ storage

-## ExternalClusterList {#postgresql-cnpg-io-v1-ExternalClusterList} +## ExternalCluster {#postgresql-cnpg-io-v1-ExternalCluster} -(Alias of `[]github.com/cloudnative-pg/cloudnative-pg/api/v1.ExternalCluster`) **Appears in:** - [ClusterSpec](#postgresql-cnpg-io-v1-ClusterSpec) -

ExternalClusterList is a list of external clusters

- +

ExternalCluster represents the connection parameters to an +external cluster which is used in the other sections of the configuration

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+

The server name, required

+
connectionParameters
+map[string]string +
+

The list of connection parameters, such as dbname, host, username, etc

+
sslCert
+core/v1.SecretKeySelector +
+

The reference to an SSL certificate to be used to connect to this +instance

+
sslKey
+core/v1.SecretKeySelector +
+

The reference to an SSL private key to be used to connect to this +instance

+
sslRootCert
+core/v1.SecretKeySelector +
+

The reference to an SSL CA public key to be used to connect to this +instance

+
password
+core/v1.SecretKeySelector +
+

The reference to the password to be used to connect to the server. +If a password is provided, CloudNativePG creates a PostgreSQL +passfile at /controller/external/NAME/pass (where "NAME" is the +cluster's name). This passfile is automatically referenced in the +connection string when establishing a connection to the remote +PostgreSQL server from the current PostgreSQL Cluster. This ensures +secure and efficient password management for external clusters.

+
barmanObjectStore
+github.com/cloudnative-pg/barman-cloud/pkg/api.BarmanObjectStoreConfiguration +
+

The configuration for the barman-cloud tool suite

+
plugin [Required]
+PluginConfiguration +
+

The configuration of the plugin that is taking care +of WAL archiving and backups for this external cluster

+
## ImageCatalogRef {#postgresql-cnpg-io-v1-ImageCatalogRef} @@ -3616,6 +3686,7 @@ the operator calls PgBouncer's PAUSE and RESUME comman **Appears in:** +- [ExternalCluster](#postgresql-cnpg-io-v1-ExternalCluster)

PluginConfiguration specifies a plugin that need to be loaded for this diff --git a/internal/cmd/manager/walrestore/cmd.go b/internal/cmd/manager/walrestore/cmd.go index 4accc2a023..8b1835a271 100644 --- a/internal/cmd/manager/walrestore/cmd.go +++ b/internal/cmd/manager/walrestore/cmd.go @@ -262,7 +262,10 @@ func restoreWALViaPlugins( availablePluginNamesSet := stringset.From(availablePluginNames) enabledPluginNames := cluster.Spec.Plugins.GetEnabledPluginNames() - enabledPluginNames = append(enabledPluginNames, cluster.Spec.ExternalClusters.GetEnabledPluginNames()...) + enabledPluginNames = append( + enabledPluginNames, + apiv1.GetExternalClustersEnabledPluginNames(cluster.Spec.ExternalClusters)..., + ) enabledPluginNamesSet := stringset.From(enabledPluginNames) client, err := pluginClient.WithPlugins( diff --git a/internal/controller/cluster_controller.go b/internal/controller/cluster_controller.go index 6194505985..958f98f1bd 100644 --- a/internal/controller/cluster_controller.go +++ b/internal/controller/cluster_controller.go @@ -176,7 +176,10 @@ func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct // Load the plugins required to bootstrap and reconcile this cluster enabledPluginNames := cluster.Spec.Plugins.GetEnabledPluginNames() - enabledPluginNames = append(enabledPluginNames, cluster.Spec.ExternalClusters.GetEnabledPluginNames()...) + enabledPluginNames = append( + enabledPluginNames, + apiv1.GetExternalClustersEnabledPluginNames(cluster.Spec.ExternalClusters)..., + ) pluginLoadingContext, cancelPluginLoading := context.WithTimeout(ctx, 5*time.Second) defer cancelPluginLoading() diff --git a/internal/management/controller/subscription_controller_test.go b/internal/management/controller/subscription_controller_test.go index f6afdc0c4e..f699324805 100644 --- a/internal/management/controller/subscription_controller_test.go +++ b/internal/management/controller/subscription_controller_test.go @@ -69,8 +69,8 @@ var _ = Describe("Managed subscription controller tests", func() { TargetPrimary: "cluster-example-1", }, Spec: apiv1.ClusterSpec{ - ExternalClusters: apiv1.ExternalClusterList{ - apiv1.ExternalCluster{ + ExternalClusters: []apiv1.ExternalCluster{ + { Name: "cluster-other", ConnectionParameters: map[string]string{ "host": "localhost", From 7f8260913ca1f306e81bf3131e46c7fe5ca5cc01 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Mon, 23 Dec 2024 11:20:01 +0100 Subject: [PATCH 262/836] chore: remove PluginConfigurationList type (#6431) Our documentation generator is not able to cope with that, so let's remove it. Closes: #6430 Signed-off-by: Leonardo Cecchi --- .wordlist-en-custom.txt | 1 - api/v1/cluster_funcs.go | 4 ++-- api/v1/cluster_types.go | 6 +---- api/v1/zz_generated.deepcopy.go | 23 +------------------ docs/config.yaml | 1 - docs/src/cloudnative-pg.v1.md | 19 +++------------ internal/cmd/manager/walrestore/cmd.go | 2 +- internal/controller/backup_controller.go | 6 ++++- internal/controller/cluster_controller.go | 2 +- pkg/management/postgres/archiver/archiver.go | 3 ++- .../postgres/webserver/plugin_backup.go | 6 ++++- 11 files changed, 21 insertions(+), 52 deletions(-) diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index 47260d5468..3d1524d0ad 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -304,7 +304,6 @@ PgBouncerSecrets PgBouncerSecretsVersions PgBouncerSpec Philippe -PluginConfigurationList PluginStatus PoLA PodAffinity diff --git a/api/v1/cluster_funcs.go b/api/v1/cluster_funcs.go index b0345cdc16..a116aee695 100644 --- a/api/v1/cluster_funcs.go +++ b/api/v1/cluster_funcs.go @@ -70,9 +70,9 @@ func (o OnlineConfiguration) GetImmediateCheckpoint() bool { return *o.ImmediateCheckpoint } -// GetEnabledPluginNames gets the name of the plugins that are involved +// GetPluginConfigurationEnabledPluginNames gets the name of the plugins that are involved // in the reconciliation of this cluster -func (pluginList PluginConfigurationList) GetEnabledPluginNames() (result []string) { +func GetPluginConfigurationEnabledPluginNames(pluginList []PluginConfiguration) (result []string) { pluginNames := make([]string, 0, len(pluginList)) for _, pluginDeclaration := range pluginList { if pluginDeclaration.IsEnabled() { diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go index ae6e6a181e..57dae17184 100644 --- a/api/v1/cluster_types.go +++ b/api/v1/cluster_types.go @@ -475,7 +475,7 @@ type ClusterSpec struct { // The plugins configuration, containing // any plugin to be loaded with the corresponding configuration // +optional - Plugins PluginConfigurationList `json:"plugins,omitempty"` + Plugins []PluginConfiguration `json:"plugins,omitempty"` // The configuration of the probes to be injected // in the PostgreSQL Pods. @@ -534,10 +534,6 @@ type Probe struct { TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"` } -// PluginConfigurationList represent a set of plugin with their -// configuration parameters -type PluginConfigurationList []PluginConfiguration - const ( // PhaseSwitchover when a cluster is changing the primary node PhaseSwitchover = "Switchover in progress" diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index 8c6a2a71e6..b4b9d5b295 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -838,7 +838,7 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { } if in.Plugins != nil { in, out := &in.Plugins, &out.Plugins - *out = make(PluginConfigurationList, len(*in)) + *out = make([]PluginConfiguration, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1860,27 +1860,6 @@ func (in *PluginConfiguration) DeepCopy() *PluginConfiguration { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in PluginConfigurationList) DeepCopyInto(out *PluginConfigurationList) { - { - in := &in - *out = make(PluginConfigurationList, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginConfigurationList. -func (in PluginConfigurationList) DeepCopy() PluginConfigurationList { - if in == nil { - return nil - } - out := new(PluginConfigurationList) - in.DeepCopyInto(out) - return *out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PluginStatus) DeepCopyInto(out *PluginStatus) { *out = *in diff --git a/docs/config.yaml b/docs/config.yaml index 9717ffa456..94e4522c2a 100644 --- a/docs/config.yaml +++ b/docs/config.yaml @@ -32,7 +32,6 @@ externalPackages: hideTypePatterns: - "ParseError$" - # We cannot exclude all `List$` because we declare PluginConfigurationList - "\\.BackupList$" - "\\.ClusterList$" - "\\.ClusterImageCatalogList$" diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md index 3db291f4cd..1734270290 100644 --- a/docs/src/cloudnative-pg.v1.md +++ b/docs/src/cloudnative-pg.v1.md @@ -1919,7 +1919,7 @@ development/staging purposes.

plugins
-PluginConfigurationList +[]PluginConfiguration

The plugins configuration, containing @@ -3686,6 +3686,8 @@ the operator calls PgBouncer's PAUSE and RESUME comman **Appears in:** +- [ClusterSpec](#postgresql-cnpg-io-v1-ClusterSpec) + - [ExternalCluster](#postgresql-cnpg-io-v1-ExternalCluster) @@ -3720,21 +3722,6 @@ cluster to be reconciled

-## PluginConfigurationList {#postgresql-cnpg-io-v1-PluginConfigurationList} - -(Alias of `[]github.com/cloudnative-pg/cloudnative-pg/api/v1.PluginConfiguration`) - -**Appears in:** - -- [ClusterSpec](#postgresql-cnpg-io-v1-ClusterSpec) - - -

PluginConfigurationList represent a set of plugin with their -configuration parameters

- - - - ## PluginStatus {#postgresql-cnpg-io-v1-PluginStatus} diff --git a/internal/cmd/manager/walrestore/cmd.go b/internal/cmd/manager/walrestore/cmd.go index 8b1835a271..403dedb063 100644 --- a/internal/cmd/manager/walrestore/cmd.go +++ b/internal/cmd/manager/walrestore/cmd.go @@ -261,7 +261,7 @@ func restoreWALViaPlugins( availablePluginNamesSet := stringset.From(availablePluginNames) - enabledPluginNames := cluster.Spec.Plugins.GetEnabledPluginNames() + enabledPluginNames := apiv1.GetPluginConfigurationEnabledPluginNames(cluster.Spec.Plugins) enabledPluginNames = append( enabledPluginNames, apiv1.GetExternalClustersEnabledPluginNames(cluster.Spec.ExternalClusters)..., diff --git a/internal/controller/backup_controller.go b/internal/controller/backup_controller.go index 29c6aea6f9..5b5906bccd 100644 --- a/internal/controller/backup_controller.go +++ b/internal/controller/backup_controller.go @@ -136,7 +136,11 @@ func (r *BackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr } // Load the required plugins - pluginClient, err := cnpgiClient.WithPlugins(ctx, r.Plugins, cluster.Spec.Plugins.GetEnabledPluginNames()...) + pluginClient, err := cnpgiClient.WithPlugins( + ctx, + r.Plugins, + apiv1.GetPluginConfigurationEnabledPluginNames(cluster.Spec.Plugins)..., + ) if err != nil { contextLogger.Error(err, "Error loading plugins, retrying") return ctrl.Result{}, err diff --git a/internal/controller/cluster_controller.go b/internal/controller/cluster_controller.go index 958f98f1bd..bc46d91bae 100644 --- a/internal/controller/cluster_controller.go +++ b/internal/controller/cluster_controller.go @@ -175,7 +175,7 @@ func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct ctx = cluster.SetInContext(ctx) // Load the plugins required to bootstrap and reconcile this cluster - enabledPluginNames := cluster.Spec.Plugins.GetEnabledPluginNames() + enabledPluginNames := apiv1.GetPluginConfigurationEnabledPluginNames(cluster.Spec.Plugins) enabledPluginNames = append( enabledPluginNames, apiv1.GetExternalClustersEnabledPluginNames(cluster.Spec.ExternalClusters)..., diff --git a/pkg/management/postgres/archiver/archiver.go b/pkg/management/postgres/archiver/archiver.go index ccf24efd36..9e6feed0a8 100644 --- a/pkg/management/postgres/archiver/archiver.go +++ b/pkg/management/postgres/archiver/archiver.go @@ -264,7 +264,8 @@ func archiveWALViaPlugins( defer plugins.Close() availablePluginNamesSet := stringset.From(availablePluginNames) - enabledPluginNamesSet := stringset.From(cluster.Spec.Plugins.GetEnabledPluginNames()) + enabledPluginNamesSet := stringset.From( + apiv1.GetPluginConfigurationEnabledPluginNames(cluster.Spec.Plugins)) client, err := pluginClient.WithPlugins( ctx, diff --git a/pkg/management/postgres/webserver/plugin_backup.go b/pkg/management/postgres/webserver/plugin_backup.go index 2e6f58f5b6..f694f58716 100644 --- a/pkg/management/postgres/webserver/plugin_backup.go +++ b/pkg/management/postgres/webserver/plugin_backup.go @@ -90,7 +90,11 @@ func (b *PluginBackupCommand) invokeStart(ctx context.Context) { } defer plugins.Close() - cli, err := pluginClient.WithPlugins(ctx, plugins, b.Cluster.Spec.Plugins.GetEnabledPluginNames()...) + cli, err := pluginClient.WithPlugins( + ctx, + plugins, + apiv1.GetPluginConfigurationEnabledPluginNames(b.Cluster.Spec.Plugins)..., + ) if err != nil { b.markBackupAsFailed(ctx, err) return From 9ae7634e72a222d18eec8cdbea672a1c79eca914 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Mon, 23 Dec 2024 11:44:57 +0100 Subject: [PATCH 263/836] docs: Release notes for 1.25.0, 1.24.2, 1.23.6 (#6424) Closes #6420 Signed-off-by: Marco Nenciarini Signed-off-by: Gabriele Bartolini Co-authored-by: Gabriele Bartolini --- .wordlist-en-custom.txt | 1 + docs/src/backup.md | 5 +++++ docs/src/preview_version.md | 2 ++ docs/src/recovery.md | 7 ++++++- docs/src/release_notes.md | 5 +++-- docs/src/release_notes/v1.25.md | 27 ++++++++++++++++++++++++--- docs/src/supported_releases.md | 20 ++++++++++---------- 7 files changed, 51 insertions(+), 16 deletions(-) diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index 3d1524d0ad..16a65fde9c 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -964,6 +964,7 @@ minKubeVersion minSyncReplicas minikube minio +misconfigurations mmap monitoringconfiguration mountPath diff --git a/docs/src/backup.md b/docs/src/backup.md index 4c3f8cb172..cffda0e7fa 100644 --- a/docs/src/backup.md +++ b/docs/src/backup.md @@ -41,6 +41,11 @@ On the other hand, CloudNativePG supports two ways to store physical base backup the supported [Container Storage Interface (CSI) drivers](https://kubernetes-csi.github.io/docs/drivers.html) that provide snapshotting capabilities. +!!! Info + Starting with version 1.25, CloudNativePG includes experimental support for + backup and recovery using plugins, such as the + [Barman Cloud plugin](https://github.com/cloudnative-pg/plugin-barman-cloud). + ## WAL archive The WAL archive in PostgreSQL is at the heart of **continuous backup**, and it diff --git a/docs/src/preview_version.md b/docs/src/preview_version.md index 8f354f67ae..d2e2702a9f 100644 --- a/docs/src/preview_version.md +++ b/docs/src/preview_version.md @@ -35,6 +35,7 @@ are not backwards compatible and could be removed entirely. There are currently no preview versions available. + diff --git a/docs/src/recovery.md b/docs/src/recovery.md index 1e0a9b931a..e53db596ba 100644 --- a/docs/src/recovery.md +++ b/docs/src/recovery.md @@ -23,6 +23,11 @@ WAL files are pulled from the defined *recovery object store*. Base backups can be taken either on object stores or using volume snapshots. +!!! Info + Starting with version 1.25, CloudNativePG includes experimental support for + backup and recovery using plugins, such as the + [Barman Cloud plugin](https://github.com/cloudnative-pg/plugin-barman-cloud). + You can achieve recovery from a *recovery object store* in two ways: - We recommend using a recovery object store, that is, a backup of another cluster @@ -64,7 +69,7 @@ metadata: name: cluster-restore spec: [...] - + superuserSecret: name: superuser-secret diff --git a/docs/src/release_notes.md b/docs/src/release_notes.md index fe2a723507..71c503fb91 100644 --- a/docs/src/release_notes.md +++ b/docs/src/release_notes.md @@ -2,15 +2,16 @@ History of user-visible changes for CloudNativePG, classified for each minor release. -- [CloudNativePG 1.25 - Release Candidate](release_notes/v1.25.md) + +- [CloudNativePG 1.25](release_notes/v1.25.md) - [CloudNativePG 1.24](release_notes/v1.24.md) -- [CloudNativePG 1.23](release_notes/v1.23.md) For information on the community support policy for CloudNativePG, please refer to ["Supported releases"](supported_releases.md). Older releases: +- [CloudNativePG 1.23](release_notes/v1.23.md) - [CloudNativePG 1.22](release_notes/old/v1.22.md) - [CloudNativePG 1.21](release_notes/old/v1.21.md) - [CloudNativePG 1.20](release_notes/old/v1.20.md) diff --git a/docs/src/release_notes/v1.25.md b/docs/src/release_notes/v1.25.md index 4996532171..0e8ca6b85a 100644 --- a/docs/src/release_notes/v1.25.md +++ b/docs/src/release_notes/v1.25.md @@ -6,9 +6,9 @@ For a complete list of changes, please refer to the [commits](https://github.com/cloudnative-pg/cloudnative-pg/commits/release-1.25) on the release branch in GitHub. -## Version 1.25.0-rc1 +## Version 1.25.0 -**Release Date:** December 9, 2024 +**Release Date:** December 23, 2024 ### Features @@ -20,6 +20,17 @@ on the release branch in GitHub. for declarative management of PostgreSQL logical replication. These simplify replication setup and facilitate online migrations to CloudNativePG. (#5329) +- **Experimental Support for CNPG-I**: Introducing CNPG-I (CloudNativePG + Interface), a standardized framework designed to extend CloudNativePG + functionality through third-party plugins and foster the growth of the CNPG + ecosystem. + The [Barman Cloud Plugin](https://github.com/cloudnative-pg/plugin-barman-cloud) serves as a live + example, illustrating how plugins can be developed to enhance backup and + recovery workflows. Although CNPG-I support is currently experimental, it + offers a powerful approach to extending CloudNativePG without modifying the + operator’s core code—akin to PostgreSQL extensions. We welcome community + feedback and contributions to shape this exciting new capability. + ### Enhancements - Add the `dataDurability` option to the `.spec.postgresql.synchronous` stanza, @@ -34,6 +45,8 @@ on the release branch in GitHub. larger deployments out of the box. (#5678) - Add the `cnpg.io/userType` label to secrets generated for predefined users, specifically `superuser` and `app`. (#4392) +- Improved validation for the `spec.schedule` field in ScheduledBackups, + raising warnings for potential misconfigurations. (#5396) - `cnpg` plugin: - Enhance the `backup` command to support plugins. (#6045) - Honor the `User-Agent` header in HTTP requests with the API server. (#6153) @@ -48,6 +61,8 @@ on the release branch in GitHub. all previously generated `PersistentVolumeClaims` are missing. (#6170) - Fix the parsing of the `synchronous_standby_names` GUC when `.spec.postgresql.synchronous.method` is set to `first`. (#5955) +- Resolved a potential race condition when patching certain conditions + in CRD statuses, improving reliability in concurrent updates. (#6328) - Correct role changes to apply at the transaction level instead of the database context. (#6064) - Remove the `primary_slot_name` definition from the `override.conf` file on @@ -57,13 +72,19 @@ on the release branch in GitHub. from within the container. (#6247) - Remove unnecessary updates to the Cluster status when verifying changes in the image catalog. (#6277) +- Prevent panic during recovery from an external server without proper backup + configuration. (#6300) +- Resolved a key collision issue in structured logs, where the name field was + inconsistently used to log two distinct values. (#6324) +- Ensure proper quoting of the inRoles field in SQL statements to prevent + syntax errors in generated SQL during role management. (#6346) - `cnpg` plugin: - Ensure the `kubectl` context is properly passed in the `psql` command. (#6257) - Avoid displaying physical backups block when empty with `status` command. (#5998) ### Supported Versions -- **Kubernetes**: 1.31, 1.30, and 1.29 +- **Kubernetes**: 1.32, 1.31, 1.30, and 1.29 - **PostgreSQL**: 17, 16, 15, 14, and 13 - Default image: PostgreSQL 17.2 - Officially dropped support for PostgreSQL 12 diff --git a/docs/src/supported_releases.md b/docs/src/supported_releases.md index afdffb9ebd..446ed26f99 100644 --- a/docs/src/supported_releases.md +++ b/docs/src/supported_releases.md @@ -80,11 +80,11 @@ Git tags for versions are prefixed with `v`. ## Support status of CloudNativePG releases -| Version | Currently supported | Release date | End of life | Supported Kubernetes versions | Tested, but not supported | Supported Postgres versions | -|-----------------|----------------------|----------------|---------------------|-------------------------------|---------------------------|-----------------------------| -| 1.25.x | No (RC) | Dec XX, 2024 | ~ May/Jun, 2025 | 1.29, 1.30, 1.31, 1.32 (!) | 1.27, 1.28 | 13 - 17 | -| 1.24.x | Yes | Aug 22, 2024 | Feb XX, 2025 | 1.28, 1.29, 1.30, 1.31 | 1.27 | 13 - 17 | -| main | No, development only | | | | | 13 - 17 | +| Version | Currently supported | Release date | End of life | Supported Kubernetes versions | Tested, but not supported | Supported Postgres versions | +|-----------------|----------------------|--------------|-----------------|-------------------------------|---------------------------|-----------------------------| +| 1.25.x | Yes | Dec 23, 2024 | ~ May/Jun, 2025 | 1.29, 1.30, 1.31, 1.32 | 1.27, 1.28 | 13 - 17 | +| 1.24.x | Yes | Aug 22, 2024 | Mar 23, 2025 | 1.28, 1.29, 1.30, 1.31 | 1.27 | 13 - 17 | +| main | No, development only | | | | | 13 - 17 | @@ -119,11 +119,11 @@ version of PostgreSQL, we might not be able to help you. ## Upcoming releases -| Version | Release date | End of life | -|-----------------|-----------------------|---------------------------| -| 1.25.0 | Dec, 2024 | May/Jun, 2025 | -| 1.26.0 | Mar, 2025 | Aug/Sep, 2025 | -| 1.27.0 | Jun, 2025 | Dec, 2025 | +| Version | Release date | End of life | +|---------|--------------|---------------| +| 1.26.0 | Mar, 2025 | Aug/Sep, 2025 | +| 1.27.0 | Jun, 2025 | Dec, 2025 | +| 1.28.0 | Sep, 2025 | Mar/Apr, 2025 | !!! Note Feature freeze occurs 1-2 weeks before the release, at which point a From 55a3137eabef3daf1ca4609121087dd6723c43e1 Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Mon, 23 Dec 2024 11:59:52 +0100 Subject: [PATCH 264/836] docs: Release notes for 1.24.2 and 1.23.6 (#6438) Relates #6420 Signed-off-by: Gabriele Bartolini --- docs/src/release_notes/v1.23.md | 51 +++++++++++++++++++++++++++++++++ docs/src/release_notes/v1.24.md | 46 +++++++++++++++++++++++++++++ 2 files changed, 97 insertions(+) diff --git a/docs/src/release_notes/v1.23.md b/docs/src/release_notes/v1.23.md index 8aaf2f773f..1247a53955 100644 --- a/docs/src/release_notes/v1.23.md +++ b/docs/src/release_notes/v1.23.md @@ -6,6 +6,57 @@ For a complete list of changes, please refer to the [commits](https://github.com/cloudnative-pg/cloudnative-pg/commits/release-1.23) on the release branch in GitHub. +## Version 1.23.6 + +**Release Date:** December 23, 2024 + +!!! Warning + This is the final release in the 1.23.x series. + Users are strongly encouraged to upgrade to a newer minor version, as 1.23 + is no longer supported. + +### Enhancements + +- Enable customization of startup, liveness, and readiness probes through the + `.spec.probes` stanza. (#6266) +- Add the `cnpg.io/userType` label to secrets generated for predefined users, + specifically `superuser` and `app`. (#4392) +- Improved validation for the `spec.schedule` field in ScheduledBackups, + raising warnings for potential misconfigurations. (#5396) +- `cnpg` plugin: + - Honor the `User-Agent` header in HTTP requests with the API server. (#6153) + +### Bug Fixes + +- Ensure the former primary flushes its WAL file queue to the archive before + re-synchronizing as a replica, reducing recovery times and enhancing data + consistency during failovers. (#6141) +- Clean the WAL volume along with the `PGDATA` volume during bootstrap. (#6265) +- Update the operator to set the cluster phase to `Unrecoverable` when + all previously generated `PersistentVolumeClaims` are missing. (#6170) +- Fix the parsing of the `synchronous_standby_names` GUC when + `.spec.postgresql.synchronous.method` is set to `first`. (#5955) +- Resolved a potential race condition when patching certain conditions + in CRD statuses, improving reliability in concurrent updates. (#6328) +- Correct role changes to apply at the transaction level instead of the + database context. (#6064) +- Remove the `primary_slot_name` definition from the `override.conf` file on + the primary to ensure it is always empty. (#6219) +- Configure libpq environment variables, including `PGHOST`, in PgBouncer pods + to enable seamless access to the `pgbouncer` virtual database using `psql` + from within the container. (#6247) +- Remove unnecessary updates to the Cluster status when verifying changes in + the image catalog. (#6277) +- Prevent panic during recovery from an external server without proper backup + configuration. (#6300) +- Resolved a key collision issue in structured logs, where the name field was + inconsistently used to log two distinct values. (#6324) +- Ensure proper quoting of the inRoles field in SQL statements to prevent + syntax errors in generated SQL during role management. (#6346) +- `cnpg` plugin: + - Ensure the `kubectl` context is properly passed in the `psql` command. (#6257) + - Avoid displaying physical backups block when empty with `status` command. (#5998) + ## Version 1.23.5 **Release date:** Oct 16, 2024 diff --git a/docs/src/release_notes/v1.24.md b/docs/src/release_notes/v1.24.md index 78182f8180..fdb59d023b 100644 --- a/docs/src/release_notes/v1.24.md +++ b/docs/src/release_notes/v1.24.md @@ -6,6 +6,52 @@ For a complete list of changes, please refer to the [commits](https://github.com/cloudnative-pg/cloudnative-pg/commits/release-1.24) on the release branch in GitHub. +## Version 1.24.2 + +**Release Date:** December 23, 2024 + +### Enhancements + +- Enable customization of startup, liveness, and readiness probes through the + `.spec.probes` stanza. (#6266) +- Add the `cnpg.io/userType` label to secrets generated for predefined users, + specifically `superuser` and `app`. (#4392) +- Improved validation for the `spec.schedule` field in ScheduledBackups, + raising warnings for potential misconfigurations. (#5396) +- `cnpg` plugin: + - Honor the `User-Agent` header in HTTP requests with the API server. (#6153) + +### Bug Fixes + +- Ensure the former primary flushes its WAL file queue to the archive before + re-synchronizing as a replica, reducing recovery times and enhancing data + consistency during failovers. (#6141) +- Clean the WAL volume along with the `PGDATA` volume during bootstrap. (#6265) +- Update the operator to set the cluster phase to `Unrecoverable` when + all previously generated `PersistentVolumeClaims` are missing. (#6170) +- Fix the parsing of the `synchronous_standby_names` GUC when + `.spec.postgresql.synchronous.method` is set to `first`. (#5955) +- Resolved a potential race condition when patching certain conditions + in CRD statuses, improving reliability in concurrent updates. (#6328) +- Correct role changes to apply at the transaction level instead of the + database context. (#6064) +- Remove the `primary_slot_name` definition from the `override.conf` file on + the primary to ensure it is always empty. (#6219) +- Configure libpq environment variables, including `PGHOST`, in PgBouncer pods + to enable seamless access to the `pgbouncer` virtual database using `psql` + from within the container. (#6247) +- Remove unnecessary updates to the Cluster status when verifying changes in + the image catalog. (#6277) +- Prevent panic during recovery from an external server without proper backup + configuration. (#6300) +- Resolved a key collision issue in structured logs, where the name field was + inconsistently used to log two distinct values. (#6324) +- Ensure proper quoting of the inRoles field in SQL statements to prevent + syntax errors in generated SQL during role management. (#6346) +- `cnpg` plugin: + - Ensure the `kubectl` context is properly passed in the `psql` command. (#6257) + - Avoid displaying physical backups block when empty with `status` command. (#5998) + ## Version 1.24.1 **Release date:** Oct 16, 2024 From fb81a49d909db8616a8a47aab94e05a62ca6d50b Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Mon, 23 Dec 2024 14:11:51 +0100 Subject: [PATCH 265/836] docs: upgrade info to 1.25.0 (#6442) Signed-off-by: Gabriele Bartolini --- docs/src/installation_upgrade.md | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/docs/src/installation_upgrade.md b/docs/src/installation_upgrade.md index 182ae94d44..cbc2580640 100644 --- a/docs/src/installation_upgrade.md +++ b/docs/src/installation_upgrade.md @@ -251,23 +251,24 @@ When versions are not directly upgradable, the old version needs to be removed before installing the new one. This won't affect user data but only the operator itself. - - -### Upgrading to 1.24 from a previous minor version !!! Warning Every time you are upgrading to a higher minor release, make sure you go through the release notes and upgrade instructions of all the intermediate minor releases. For example, if you want to move - from 1.22.x to 1.24, make sure you go through the release notes - and upgrade instructions for 1.23 and 1.24. + from 1.23.x to 1.25, make sure you go through the release notes + and upgrade instructions for 1.24 and 1.25. + +No changes to existing 1.24 cluster configurations are required when upgrading +to 1.25. + +### Upgrading to 1.24 from a previous minor version #### From Replica Clusters to Distributed Topology From bad5a251642655399eca392abf5d981668fbd8cc Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 23 Dec 2024 14:18:46 +0100 Subject: [PATCH 266/836] Version tag to 1.25.0 (#6443) Signed-off-by: Gabriele Bartolini Co-authored-by: Gabriele Bartolini --- docs/src/installation_upgrade.md | 4 +- docs/src/kubectl-plugin.md | 30 +- pkg/versions/versions.go | 6 +- releases/cnpg-1.25.0.yaml | 17771 +++++++++++++++++++++++++++++ 4 files changed, 17791 insertions(+), 20 deletions(-) create mode 100644 releases/cnpg-1.25.0.yaml diff --git a/docs/src/installation_upgrade.md b/docs/src/installation_upgrade.md index cbc2580640..5acbdbd854 100644 --- a/docs/src/installation_upgrade.md +++ b/docs/src/installation_upgrade.md @@ -7,12 +7,12 @@ The operator can be installed like any other resource in Kubernetes, through a YAML manifest applied via `kubectl`. -You can install the [latest operator manifest](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-1.25.0-rc1.yaml) +You can install the [latest operator manifest](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.25/releases/cnpg-1.25.0.yaml) for this minor release as follows: ```sh kubectl apply --server-side -f \ - https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-1.25.0-rc1.yaml + https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.25/releases/cnpg-1.25.0.yaml ``` You can verify that with: diff --git a/docs/src/kubectl-plugin.md b/docs/src/kubectl-plugin.md index 45801e223c..d001a3397e 100644 --- a/docs/src/kubectl-plugin.md +++ b/docs/src/kubectl-plugin.md @@ -30,11 +30,11 @@ them in your systems. #### Debian packages -For example, let's install the 1.25.0-rc1 release of the plugin, for an Intel based +For example, let's install the 1.25.0 release of the plugin, for an Intel based 64 bit server. First, we download the right `.deb` file. ```sh -wget https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.25.0-rc1/kubectl-cnpg_1.25.0-rc1_linux_x86_64.deb \ +wget https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.25.0/kubectl-cnpg_1.25.0_linux_x86_64.deb \ --output-document kube-plugin.deb ``` @@ -45,17 +45,17 @@ $ sudo dpkg -i kube-plugin.deb Selecting previously unselected package cnpg. (Reading database ... 6688 files and directories currently installed.) Preparing to unpack kube-plugin.deb ... -Unpacking cnpg (1.25.0-rc1) ... -Setting up cnpg (1.25.0-rc1) ... +Unpacking cnpg (1.25.0) ... +Setting up cnpg (1.25.0) ... ``` #### RPM packages -As in the example for `.rpm` packages, let's install the 1.25.0-rc1 release for an +As in the example for `.rpm` packages, let's install the 1.25.0 release for an Intel 64 bit machine. Note the `--output` flag to provide a file name. ```sh -curl -L https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.25.0-rc1/kubectl-cnpg_1.25.0-rc1_linux_x86_64.rpm \ +curl -L https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.25.0/kubectl-cnpg_1.25.0_linux_x86_64.rpm \ --output kube-plugin.rpm ``` @@ -69,7 +69,7 @@ Dependencies resolved. Package Architecture Version Repository Size ==================================================================================================== Installing: - cnpg x86_64 1.25.0-rc1-1 @commandline 20 M + cnpg x86_64 1.25.0-1 @commandline 20 M Transaction Summary ==================================================================================================== @@ -277,9 +277,9 @@ sandbox-3 0/604DE38 0/604DE38 0/604DE38 0/604DE38 00:00:00 00:00:00 00 Instances status Name Current LSN Replication role Status QoS Manager Version Node ---- ----------- ---------------- ------ --- --------------- ---- -sandbox-1 0/604DE38 Primary OK BestEffort 1.25.0-rc1 k8s-eu-worker -sandbox-2 0/604DE38 Standby (async) OK BestEffort 1.25.0-rc1 k8s-eu-worker2 -sandbox-3 0/604DE38 Standby (async) OK BestEffort 1.25.0-rc1 k8s-eu-worker +sandbox-1 0/604DE38 Primary OK BestEffort 1.25.0 k8s-eu-worker +sandbox-2 0/604DE38 Standby (async) OK BestEffort 1.25.0 k8s-eu-worker2 +sandbox-3 0/604DE38 Standby (async) OK BestEffort 1.25.0 k8s-eu-worker ``` If you require more detailed status information, use the `--verbose` option (or @@ -333,9 +333,9 @@ sandbox-primary primary 1 1 1 Instances status Name Current LSN Replication role Status QoS Manager Version Node ---- ----------- ---------------- ------ --- --------------- ---- -sandbox-1 0/6053720 Primary OK BestEffort 1.25.0-rc1 k8s-eu-worker -sandbox-2 0/6053720 Standby (async) OK BestEffort 1.25.0-rc1 k8s-eu-worker2 -sandbox-3 0/6053720 Standby (async) OK BestEffort 1.25.0-rc1 k8s-eu-worker +sandbox-1 0/6053720 Primary OK BestEffort 1.25.0 k8s-eu-worker +sandbox-2 0/6053720 Standby (async) OK BestEffort 1.25.0 k8s-eu-worker2 +sandbox-3 0/6053720 Standby (async) OK BestEffort 1.25.0 k8s-eu-worker ``` With an additional `-v` (e.g. `kubectl cnpg status sandbox -v -v`), you can @@ -558,12 +558,12 @@ Archive: report_operator_.zip ```output ====== Begin of Previous Log ===== -2023-03-28T12:56:41.251711811Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.25.0-rc1","build":{"Version":"1.25.0-rc1+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} +2023-03-28T12:56:41.251711811Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.25.0","build":{"Version":"1.25.0+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} 2023-03-28T12:56:41.251851909Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"} ====== End of Previous Log ===== -2023-03-28T12:57:09.854306024Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.25.0-rc1","build":{"Version":"1.25.0-rc1+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} +2023-03-28T12:57:09.854306024Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.25.0","build":{"Version":"1.25.0+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} 2023-03-28T12:57:09.854363943Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"} ``` diff --git a/pkg/versions/versions.go b/pkg/versions/versions.go index 91f8dcc30e..c4b1c95414 100644 --- a/pkg/versions/versions.go +++ b/pkg/versions/versions.go @@ -20,13 +20,13 @@ package versions const ( // Version is the version of the operator - Version = "1.25.0-rc1" + Version = "1.25.0" // DefaultImageName is the default image used by the operator to create pods DefaultImageName = "ghcr.io/cloudnative-pg/postgresql:17.2" // DefaultOperatorImageName is the default operator image used by the controller in the pods running PostgreSQL - DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.25.0-rc1" + DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.25.0" ) // BuildInfo is a struct containing all the info about the build @@ -36,7 +36,7 @@ type BuildInfo struct { var ( // buildVersion injected during the build - buildVersion = "1.25.0-rc1" + buildVersion = "1.25.0" // buildCommit injected during the build buildCommit = "none" diff --git a/releases/cnpg-1.25.0.yaml b/releases/cnpg-1.25.0.yaml new file mode 100644 index 0000000000..cbdfc4162b --- /dev/null +++ b/releases/cnpg-1.25.0.yaml @@ -0,0 +1,17771 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-system +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: backups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Backup + listKind: BackupList + plural: backups + singular: backup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.method + name: Method + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .status.error + name: Error + type: string + name: v1 + schema: + openAPIV3Schema: + description: Backup is the Schema for the backups API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the backup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + type: object + status: + description: |- + Most recently observed status of the backup. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + azureCredentials: + description: The credentials to use to upload data to Azure Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without providing + explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + backupId: + description: The ID of the Barman backup + type: string + backupLabelFile: + description: Backup label file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + backupName: + description: The Name of the Barman backup + type: string + beginLSN: + description: The starting xlog + type: string + beginWal: + description: The starting WAL + type: string + commandError: + description: The backup command output in case of error + type: string + commandOutput: + description: Unused. Retained for compatibility with old versions. + type: string + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data. This may not be populated in case of errors. + type: string + encryption: + description: Encryption method required to S3 API + type: string + endLSN: + description: The ending xlog + type: string + endWal: + description: The ending WAL + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + error: + description: The detected error + type: string + googleCredentials: + description: The credentials to use to upload data to Google Cloud + Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage JSON + file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + instanceID: + description: Information to identify the instance where the backup + has been taken from + properties: + ContainerID: + description: The container ID + type: string + podName: + description: The pod name + type: string + type: object + method: + description: The backup method being used + type: string + online: + description: Whether the backup was online/hot (`true`) or offline/cold + (`false`) + type: boolean + phase: + description: The last backup status + type: string + pluginMetadata: + additionalProperties: + type: string + description: A map containing the plugin metadata + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without providing + explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the region + name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + snapshotBackupStatus: + description: Status of the volumeSnapshot backup + properties: + elements: + description: The elements list, populated with the gathered volume + snapshots + items: + description: BackupSnapshotElementStatus is a volume snapshot + that is part of a volume snapshot method backup + properties: + name: + description: Name is the snapshot resource name + type: string + tablespaceName: + description: |- + TablespaceName is the name of the snapshotted tablespace. Only set + when type is PG_TABLESPACE + type: string + type: + description: Type is tho role of the snapshot in the cluster, + such as PG_DATA, PG_WAL and PG_TABLESPACE + type: string + required: + - name + - type + type: object + type: array + type: object + startedAt: + description: When the backup was started + format: date-time + type: string + stoppedAt: + description: When the backup was terminated + format: date-time + type: string + tablespaceMapFile: + description: Tablespace map file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: clusterimagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ClusterImageCatalog + listKind: ClusterImageCatalogList + plural: clusterimagecatalogs + singular: clusterimagecatalog + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ClusterImageCatalog is the Schema for the clusterimagecatalogs + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ClusterImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: clusters.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Cluster + listKind: ClusterList + plural: clusters + singular: cluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Number of instances + jsonPath: .status.instances + name: Instances + type: integer + - description: Number of ready instances + jsonPath: .status.readyInstances + name: Ready + type: integer + - description: Cluster current status + jsonPath: .status.phase + name: Status + type: string + - description: Primary pod + jsonPath: .status.currentPrimary + name: Primary + type: string + name: v1 + schema: + openAPIV3Schema: + description: Cluster is the Schema for the PostgreSQL API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the cluster. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + affinity: + description: Affinity/Anti-affinity rules for Pods + properties: + additionalPodAffinity: + description: AdditionalPodAffinity allows to specify pod affinity + terms to be passed to all the cluster's pods. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + additionalPodAntiAffinity: + description: |- + AdditionalPodAntiAffinity allows to specify pod anti-affinity terms to be added to the ones generated + by the operator if EnablePodAntiAffinity is set to true (default) or to be used exclusively if set to false. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + enablePodAntiAffinity: + description: |- + Activates anti-affinity for the pods. The operator will define pods + anti-affinity unless this field is explicitly set to false + type: boolean + nodeAffinity: + description: |- + NodeAffinity describes node affinity scheduling rules for the pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is map of key-value pairs used to define the nodes on which + the pods can run. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + podAntiAffinityType: + description: |- + PodAntiAffinityType allows the user to decide whether pod anti-affinity between cluster instance has to be + considered a strong requirement during scheduling or not. Allowed values are: "preferred" (default if empty) or + "required". Setting it to "required", could lead to instances remaining pending until new kubernetes nodes are + added if all the existing nodes don't match the required pod anti-affinity rule. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + type: string + tolerations: + description: |- + Tolerations is a list of Tolerations that should be set for all the pods, in order to allow them to run + on tainted nodes. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologyKey: + description: |- + TopologyKey to use for anti-affinity configuration. See k8s documentation + for more info on that + type: string + type: object + backup: + description: The configuration to be used for backups + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2` or `snappy`. + enum: + - gzip + - bzip2 + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage + JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the + region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2` or `snappy`. + enum: + - gzip + - bzip2 + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + retentionPolicy: + description: |- + RetentionPolicy is the retention policy to be used for backups + and WALs (i.e. '60d'). The retention policy is expressed in the form + of `XXu` where `XX` is a positive integer and `u` is in `[dwm]` - + days, weeks, months. + It's currently only applicable when using the BarmanObjectStore method. + pattern: ^[1-9][0-9]*[dwm]$ + type: string + target: + default: prefer-standby + description: |- + The policy to decide which instance should perform backups. Available + options are empty string, which will default to `prefer-standby` policy, + `primary` to have backups run always on primary instances, `prefer-standby` + to have backups run preferably on the most updated standby, if available. + enum: + - primary + - prefer-standby + type: string + volumeSnapshot: + description: VolumeSnapshot provides the configuration for the + execution of volume snapshot backups. + properties: + annotations: + additionalProperties: + type: string + description: Annotations key-value pairs that will be added + to .metadata.annotations snapshot resources. + type: object + className: + description: |- + ClassName specifies the Snapshot Class to be used for PG_DATA PersistentVolumeClaim. + It is the default class for the other types if no specific class is present + type: string + labels: + additionalProperties: + type: string + description: Labels are key-value pairs that will be added + to .metadata.labels snapshot resources. + type: object + online: + default: true + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + type: boolean + onlineConfiguration: + default: + immediateCheckpoint: false + waitForArchive: true + description: Configuration parameters to control the online/hot + backup with volume snapshots + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + snapshotOwnerReference: + default: none + description: SnapshotOwnerReference indicates the type of + owner reference the snapshot should have + enum: + - none + - cluster + - backup + type: string + tablespaceClassName: + additionalProperties: + type: string + description: |- + TablespaceClassName specifies the Snapshot Class to be used for the tablespaces. + defaults to the PGDATA Snapshot Class, if set + type: object + walClassName: + description: WalClassName specifies the Snapshot Class to + be used for the PG_WAL PersistentVolumeClaim. + type: string + type: object + type: object + bootstrap: + description: Instructions to bootstrap this cluster + properties: + initdb: + description: Bootstrap the cluster via initdb + properties: + builtinLocale: + description: |- + Specifies the locale name when the builtin provider is used. + This option requires `localeProvider` to be set to `builtin`. + Available from PostgreSQL 17. + type: string + dataChecksums: + description: |- + Whether the `-k` option should be passed to initdb, + enabling checksums on data pages (default: `false`) + type: boolean + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + encoding: + description: The value to be passed as option `--encoding` + for initdb (default:`UTF8`) + type: string + icuLocale: + description: |- + Specifies the ICU locale when the ICU provider is used. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 15. + type: string + icuRules: + description: |- + Specifies additional collation rules to customize the behavior of the default collation. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 16. + type: string + import: + description: |- + Bootstraps the new cluster by importing data from an existing PostgreSQL + instance using logical backup (`pg_dump` and `pg_restore`) + properties: + databases: + description: The databases to import + items: + type: string + type: array + pgDumpExtraOptions: + description: |- + List of custom options to pass to the `pg_dump` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + pgRestoreExtraOptions: + description: |- + List of custom options to pass to the `pg_restore` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + postImportApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after is imported - to be used with extreme care + (by default empty). Only available in microservice type. + items: + type: string + type: array + roles: + description: The roles to import + items: + type: string + type: array + schemaOnly: + description: |- + When set to true, only the `pre-data` and `post-data` sections of + `pg_restore` are invoked, avoiding data import. Default: `false`. + type: boolean + source: + description: The source of the import + properties: + externalCluster: + description: The name of the externalCluster used + for import + type: string + required: + - externalCluster + type: object + type: + description: The import type. Can be `microservice` or + `monolith`. + enum: + - microservice + - monolith + type: string + required: + - databases + - source + - type + type: object + locale: + description: Sets the default collation order and character + classification in the new database. + type: string + localeCType: + description: The value to be passed as option `--lc-ctype` + for initdb (default:`C`) + type: string + localeCollate: + description: The value to be passed as option `--lc-collate` + for initdb (default:`C`) + type: string + localeProvider: + description: |- + This option sets the locale provider for databases created in the new cluster. + Available from PostgreSQL 16. + type: string + options: + description: |- + The list of options that must be passed to initdb when creating the cluster. + Deprecated: This could lead to inconsistent configurations, + please use the explicit provided parameters instead. + If defined, explicit values will be ignored. + items: + type: string + type: array + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + postInitApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitApplicationSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the application database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitSQL: + description: |- + List of SQL queries to be executed as a superuser in the `postgres` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `postgres` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitTemplateSQL: + description: |- + List of SQL queries to be executed as a superuser in the `template1` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitTemplateSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `template1` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + walSegmentSize: + description: |- + The value in megabytes (1 to 1024) to be passed to the `--wal-segsize` + option for initdb (default: empty, resulting in PostgreSQL default: 16MB) + maximum: 1024 + minimum: 1 + type: integer + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider + is set to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is + set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set + to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + pg_basebackup: + description: |- + Bootstrap the cluster taking a physical backup of another compatible + PostgreSQL instance + properties: + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: The name of the server of which we need to take + a physical backup + minLength: 1 + type: string + required: + - source + type: object + recovery: + description: Bootstrap the cluster from a backup + properties: + backup: + description: |- + The backup object containing the physical base backup from which to + initiate the recovery procedure. + Mutually exclusive with `source` and `volumeSnapshots`. + properties: + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + name: + description: Name of the referent. + type: string + required: + - name + type: object + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + recoveryTarget: + description: |- + By default, the recovery process applies all the available + WAL files in the archive (full recovery). However, you can also + end the recovery as soon as a consistent state is reached or + recover to a point-in-time (PITR) by specifying a `RecoveryTarget` object, + as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...). + More info: https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET + properties: + backupID: + description: |- + The ID of the backup from which to start the recovery process. + If empty (default) the operator will automatically detect the backup + based on targetTime or targetLSN if specified. Otherwise use the + latest available backup in chronological order. + type: string + exclusive: + description: |- + Set the target to be exclusive. If omitted, defaults to false, so that + in Postgres, `recovery_target_inclusive` will be true + type: boolean + targetImmediate: + description: End recovery as soon as a consistent state + is reached + type: boolean + targetLSN: + description: The target LSN (Log Sequence Number) + type: string + targetName: + description: |- + The target name (to be previously created + with `pg_create_restore_point`) + type: string + targetTLI: + description: The target timeline ("latest" or a positive + integer) + type: string + targetTime: + description: The target time as a timestamp in the RFC3339 + standard + type: string + targetXID: + description: The target transaction ID + type: string + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: |- + The external cluster whose backup we will restore. This is also + used as the name of the folder under which the backup is stored, + so it must be set to the name of the source cluster + Mutually exclusive with `backup`. + type: string + volumeSnapshots: + description: |- + The static PVC data source(s) from which to initiate the + recovery procedure. Currently supporting `VolumeSnapshot` + and `PersistentVolumeClaim` resources that map an existing + PVC group, compatible with CloudNativePG, and taken with + a cold backup copy on a fenced Postgres instance (limitation + which will be removed in the future when online backup + will be implemented). + Mutually exclusive with `backup`. + properties: + storage: + description: Configuration of the storage of the instances + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + tablespaceStorage: + additionalProperties: + description: |- + TypedLocalObjectReference contains enough information to let you locate the + typed referenced object inside the same namespace. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + description: Configuration of the storage for PostgreSQL + tablespaces + type: object + walStorage: + description: Configuration of the storage for PostgreSQL + WAL (Write-Ahead Log) + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + required: + - storage + type: object + type: object + type: object + certificates: + description: The configuration for the CA and related certificates + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + description: + description: Description of this PostgreSQL cluster + type: string + enablePDB: + default: true + description: |- + Manage the `PodDisruptionBudget` resources within the cluster. When + configured as `true` (default setting), the pod disruption budgets + will safeguard the primary node from being terminated. Conversely, + setting it to `false` will result in the absence of any + `PodDisruptionBudget` resource, permitting the shutdown of all nodes + hosting the PostgreSQL cluster. This latter configuration is + advisable for any PostgreSQL cluster employed for + development/staging purposes. + type: boolean + enableSuperuserAccess: + default: false + description: |- + When this option is enabled, the operator will use the `SuperuserSecret` + to update the `postgres` user password (if the secret is + not present, the operator will automatically create one). When this + option is disabled, the operator will ignore the `SuperuserSecret` content, delete + it when automatically created, and then blank the password of the `postgres` + user by setting it to `NULL`. Disabled by default. + type: boolean + env: + description: |- + Env follows the Env format to pass environment variables + to the pods created in the cluster + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + EnvFrom follows the EnvFrom format to pass environment variables + sources to the pods to be used by Env + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each key in + the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + ephemeralVolumeSource: + description: EphemeralVolumeSource allows the user to configure the + source of ephemeral volumes. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to + consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the + PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + ephemeralVolumesSizeLimit: + description: |- + EphemeralVolumesSizeLimit allows the user to set the limits for the ephemeral + volumes + properties: + shm: + anyOf: + - type: integer + - type: string + description: Shm is the size limit of the shared memory volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + temporaryData: + anyOf: + - type: integer + - type: string + description: TemporaryData is the size limit of the temporary + data volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + externalClusters: + description: The list of external clusters which are used in the configuration + items: + description: |- + ExternalCluster represents the connection parameters to an + external cluster which is used in the other sections of the configuration + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2` or `snappy`. + enum: + - gzip + - bzip2 + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud + Storage JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing + the region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2` or `snappy`. + enum: + - gzip + - bzip2 + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + connectionParameters: + additionalProperties: + type: string + description: The list of connection parameters, such as dbname, + host, username, etc + type: object + name: + description: The server name, required + type: string + password: + description: |- + The reference to the password to be used to connect to the server. + If a password is provided, CloudNativePG creates a PostgreSQL + passfile at `/controller/external/NAME/pass` (where "NAME" is the + cluster's name). This passfile is automatically referenced in the + connection string when establishing a connection to the remote + PostgreSQL server from the current PostgreSQL `Cluster`. This ensures + secure and efficient password management for external clusters. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + plugin: + description: |- + The configuration of the plugin that is taking care + of WAL archiving and backups for this external cluster + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + sslCert: + description: |- + The reference to an SSL certificate to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslKey: + description: |- + The reference to an SSL private key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslRootCert: + description: |- + The reference to an SSL CA public key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - name + type: object + type: array + failoverDelay: + default: 0 + description: |- + The amount of time (in seconds) to wait before triggering a failover + after the primary PostgreSQL instance in the cluster was detected + to be unhealthy + format: int32 + type: integer + imageCatalogRef: + description: Defines the major PostgreSQL version we want to use within + an ImageCatalog + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + major: + description: The major version of PostgreSQL we want to use from + the ImageCatalog + type: integer + x-kubernetes-validations: + - message: Major is immutable + rule: self == oldSelf + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - major + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: Only image catalogs are supported + rule: self.kind == 'ImageCatalog' || self.kind == 'ClusterImageCatalog' + - message: Only image catalogs are supported + rule: self.apiGroup == 'postgresql.cnpg.io' + imageName: + description: |- + Name of the container image, supporting both tags (`:`) + and digests for deterministic and repeatable deployments + (`:@sha256:`) + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of `Always`, `Never` or `IfNotPresent`. + If not defined, it defaults to `IfNotPresent`. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + imagePullSecrets: + description: The list of pull secrets to be used to pull the images + items: + description: |- + LocalObjectReference contains enough information to let you locate a + local object with a known type inside the same namespace + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + type: array + inheritedMetadata: + description: Metadata that will be inherited by all objects related + to the Cluster + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + instances: + default: 1 + description: Number of instances required in the cluster + minimum: 1 + type: integer + livenessProbeTimeout: + description: |- + LivenessProbeTimeout is the time (in seconds) that is allowed for a PostgreSQL instance + to successfully respond to the liveness probe (default 30). + The Liveness probe failure threshold is derived from this value using the formula: + ceiling(livenessProbe / 10). + format: int32 + type: integer + logLevel: + default: info + description: 'The instances'' log level, one of the following values: + error, warning, info (default), debug, trace' + enum: + - error + - warning + - info + - debug + - trace + type: string + managed: + description: The configuration that is used by the portions of PostgreSQL + that are managed by the instance manager + properties: + roles: + description: Database roles managed by the `Cluster` + items: + description: |- + RoleConfiguration is the representation, in Kubernetes, of a PostgreSQL role + with the additional field Ensure specifying whether to ensure the presence or + absence of the role in the database + + The defaults of the CREATE ROLE command are applied + Reference: https://www.postgresql.org/docs/current/sql-createrole.html + properties: + bypassrls: + description: |- + Whether a role bypasses every row-level security (RLS) policy. + Default is `false`. + type: boolean + comment: + description: Description of the role + type: string + connectionLimit: + default: -1 + description: |- + If the role can log in, this specifies how many concurrent + connections the role can make. `-1` (the default) means no limit. + format: int64 + type: integer + createdb: + description: |- + When set to `true`, the role being defined will be allowed to create + new databases. Specifying `false` (default) will deny a role the + ability to create databases. + type: boolean + createrole: + description: |- + Whether the role will be permitted to create, alter, drop, comment + on, change the security label for, and grant or revoke membership in + other roles. Default is `false`. + type: boolean + disablePassword: + description: DisablePassword indicates that a role's password + should be set to NULL in Postgres + type: boolean + ensure: + default: present + description: Ensure the role is `present` or `absent` - + defaults to "present" + enum: + - present + - absent + type: string + inRoles: + description: |- + List of one or more existing roles to which this role will be + immediately added as a new member. Default empty. + items: + type: string + type: array + inherit: + default: true + description: |- + Whether a role "inherits" the privileges of roles it is a member of. + Defaults is `true`. + type: boolean + login: + description: |- + Whether the role is allowed to log in. A role having the `login` + attribute can be thought of as a user. Roles without this attribute + are useful for managing database privileges, but are not users in + the usual sense of the word. Default is `false`. + type: boolean + name: + description: Name of the role + type: string + passwordSecret: + description: |- + Secret containing the password of the role (if present) + If null, the password will be ignored unless DisablePassword is set + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + replication: + description: |- + Whether a role is a replication role. A role must have this + attribute (or be a superuser) in order to be able to connect to the + server in replication mode (physical or logical replication) and in + order to be able to create or drop replication slots. A role having + the `replication` attribute is a very highly privileged role, and + should only be used on roles actually used for replication. Default + is `false`. + type: boolean + superuser: + description: |- + Whether the role is a `superuser` who can override all access + restrictions within the database - superuser status is dangerous and + should be used only when really needed. You must yourself be a + superuser to create a new superuser. Defaults is `false`. + type: boolean + validUntil: + description: |- + Date and time after which the role's password is no longer valid. + When omitted, the password will never expire (default). + format: date-time + type: string + required: + - name + type: object + type: array + services: + description: Services roles managed by the `Cluster` + properties: + additional: + description: Additional is a list of additional managed services + specified by the user. + items: + description: |- + ManagedService represents a specific service managed by the cluster. + It includes the type of service and its associated template specification. + properties: + selectorType: + description: |- + SelectorType specifies the type of selectors that the service will have. + Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services. + enum: + - rw + - r + - ro + type: string + serviceTemplate: + description: ServiceTemplate is the template specification + for the service. + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only + supported for certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information + on service's port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed + by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains + the configurations of session affinity. + properties: + clientIP: + description: clientIP contains the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic is + distributed to Service endpoints. Implementations can use this field as a + hint, but are not required to guarantee strict adherence. If the field is + not set, the implementation will apply its default routing strategy. If set + to "PreferClose", implementations should prioritize endpoints that are + topologically close (e.g., same zone). + This is a beta field and requires enabling ServiceTrafficDistribution feature. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + updateStrategy: + default: patch + description: UpdateStrategy describes how the service + differences should be reconciled + enum: + - patch + - replace + type: string + required: + - selectorType + - serviceTemplate + type: object + type: array + disabledDefaultServices: + description: |- + DisabledDefaultServices is a list of service types that are disabled by default. + Valid values are "r", and "ro", representing read, and read-only services. + items: + description: |- + ServiceSelectorType describes a valid value for generating the service selectors. + It indicates which type of service the selector applies to, such as read-write, read, or read-only + enum: + - rw + - r + - ro + type: string + type: array + type: object + type: object + maxSyncReplicas: + default: 0 + description: |- + The target value for the synchronous replication quorum, that can be + decreased if the number of ready standbys is lower than this. + Undefined or 0 disable synchronous replication. + minimum: 0 + type: integer + minSyncReplicas: + default: 0 + description: |- + Minimum number of instances required in synchronous replication with the + primary. Undefined or 0 allow writes to complete when no standby is + available. + minimum: 0 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this cluster + properties: + customQueriesConfigMap: + description: The list of config maps containing the custom queries + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + customQueriesSecret: + description: The list of secrets containing the custom queries + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + disableDefaultQueries: + default: false + description: |- + Whether the default queries should be injected. + Set it to `true` if you don't want to inject default queries into the cluster. + Default: false. + type: boolean + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + tls: + description: |- + Configure TLS communication for the metrics endpoint. + Changing tls.enabled option will force a rollout of all instances. + properties: + enabled: + default: false + description: |- + Enable TLS for the monitoring endpoint. + Changing this option will force a rollout of all instances. + type: boolean + type: object + type: object + nodeMaintenanceWindow: + description: Define a maintenance window for the Kubernetes nodes + properties: + inProgress: + default: false + description: Is there a node maintenance activity in progress? + type: boolean + reusePVC: + default: true + description: |- + Reuse the existing PVC (wait for the node to come + up again) or not (recreate it elsewhere - when `instances` >1) + type: boolean + type: object + plugins: + description: |- + The plugins configuration, containing + any plugin to be loaded with the corresponding configuration + items: + description: |- + PluginConfiguration specifies a plugin that need to be loaded for this + cluster to be reconciled + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + type: array + postgresGID: + default: 26 + description: The GID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresUID: + default: 26 + description: The UID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresql: + description: Configuration of the PostgreSQL server + properties: + enableAlterSystem: + description: |- + If this parameter is true, the user will be able to invoke `ALTER SYSTEM` + on this CloudNativePG Cluster. + This should only be used for debugging and troubleshooting. + Defaults to false. + type: boolean + ldap: + description: Options to specify LDAP configuration + properties: + bindAsAuth: + description: Bind as authentication configuration + properties: + prefix: + description: Prefix for the bind authentication option + type: string + suffix: + description: Suffix for the bind authentication option + type: string + type: object + bindSearchAuth: + description: Bind+Search authentication configuration + properties: + baseDN: + description: Root DN to begin the user search + type: string + bindDN: + description: DN of the user to bind to the directory + type: string + bindPassword: + description: Secret with the password for the user to + bind to the directory + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + searchAttribute: + description: Attribute to match against the username + type: string + searchFilter: + description: Search filter to use when doing the search+bind + authentication + type: string + type: object + port: + description: LDAP server port + type: integer + scheme: + description: LDAP schema to be used, possible options are + `ldap` and `ldaps` + enum: + - ldap + - ldaps + type: string + server: + description: LDAP hostname or IP address + type: string + tls: + description: Set to 'true' to enable LDAP over TLS. 'false' + is default + type: boolean + type: object + parameters: + additionalProperties: + type: string + description: PostgreSQL configuration options (postgresql.conf) + type: object + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + pg_ident: + description: |- + PostgreSQL User Name Maps rules (lines to be appended + to the pg_ident.conf file) + items: + type: string + type: array + promotionTimeout: + description: |- + Specifies the maximum number of seconds to wait when promoting an instance to primary. + Default value is 40000000, greater than one year in seconds, + big enough to simulate an infinite timeout + format: int32 + type: integer + shared_preload_libraries: + description: Lists of shared preload libraries to add to the default + ones + items: + type: string + type: array + syncReplicaElectionConstraint: + description: |- + Requirements to be met by sync replicas. This will affect how the "synchronous_standby_names" parameter will be + set up. + properties: + enabled: + description: This flag enables the constraints for sync replicas + type: boolean + nodeLabelsAntiAffinity: + description: A list of node labels values to extract and compare + to evaluate if the pods reside in the same topology or not + items: + type: string + type: array + required: + - enabled + type: object + synchronous: + description: Configuration of the PostgreSQL synchronous replication + feature + properties: + dataDurability: + default: required + description: |- + If set to "required", data durability is strictly enforced. Write operations + with synchronous commit settings (`on`, `remote_write`, or `remote_apply`) will + block if there are insufficient healthy replicas, ensuring data persistence. + If set to "preferred", data durability is maintained when healthy replicas + are available, but the required number of instances will adjust dynamically + if replicas become unavailable. This setting relaxes strict durability enforcement + to allow for operational continuity. This setting is only applicable if both + `standbyNamesPre` and `standbyNamesPost` are unset (empty). + enum: + - required + - preferred + type: string + maxStandbyNamesFromCluster: + description: |- + Specifies the maximum number of local cluster pods that can be + automatically included in the `synchronous_standby_names` option in + PostgreSQL. + type: integer + method: + description: |- + Method to select synchronous replication standbys from the listed + servers, accepting 'any' (quorum-based synchronous replication) or + 'first' (priority-based synchronous replication) as values. + enum: + - any + - first + type: string + number: + description: |- + Specifies the number of synchronous standby servers that + transactions must wait for responses from. + type: integer + x-kubernetes-validations: + - message: The number of synchronous replicas should be greater + than zero + rule: self > 0 + standbyNamesPost: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` after local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + standbyNamesPre: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` before local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + required: + - method + - number + type: object + x-kubernetes-validations: + - message: dataDurability set to 'preferred' requires empty 'standbyNamesPre' + and empty 'standbyNamesPost' + rule: self.dataDurability!='preferred' || ((!has(self.standbyNamesPre) + || self.standbyNamesPre.size()==0) && (!has(self.standbyNamesPost) + || self.standbyNamesPost.size()==0)) + type: object + primaryUpdateMethod: + default: restart + description: |- + Method to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be with a switchover (`switchover`) or in-place (`restart` - default) + enum: + - switchover + - restart + type: string + primaryUpdateStrategy: + default: unsupervised + description: |- + Deployment strategy to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be automated (`unsupervised` - default) or manual (`supervised`) + enum: + - unsupervised + - supervised + type: string + priorityClassName: + description: |- + Name of the priority class which will be used in every generated Pod, if the PriorityClass + specified does not exist, the pod will not be able to schedule. Please refer to + https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass + for more information + type: string + probes: + description: |- + The configuration of the probes to be injected + in the PostgreSQL Pods. + properties: + liveness: + description: The liveness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + readiness: + description: The readiness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + startup: + description: The startup probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + type: object + projectedVolumeTemplate: + description: |- + Template to be used to define projected volumes, projected volumes will be mounted + under `/projected` base folder + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root to write + the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap data + to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the + pod: only annotations, labels, name, namespace + and uid are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must not + be absolute or contain the ''..'' path. Must + be utf-8 encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the secret data to + project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether the Secret + or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information about the + serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + replica: + description: Replica cluster configuration + properties: + enabled: + description: |- + If replica mode is enabled, this cluster will be a replica of an + existing cluster. Replica cluster can be created from a recovery + object store or via streaming through pg_basebackup. + Refer to the Replica clusters page of the documentation for more information. + type: boolean + minApplyDelay: + description: |- + When replica mode is enabled, this parameter allows you to replay + transactions only when the system time is at least the configured + time past the commit time. This provides an opportunity to correct + data loss errors. Note that when this parameter is set, a promotion + token cannot be used. + type: string + primary: + description: |- + Primary defines which Cluster is defined to be the primary in the distributed PostgreSQL cluster, based on the + topology specified in externalClusters + type: string + promotionToken: + description: |- + A demotion token generated by an external cluster used to + check if the promotion requirements are met. + type: string + self: + description: |- + Self defines the name of this cluster. It is used to determine if this is a primary + or a replica cluster, comparing it with `primary` + type: string + source: + description: The name of the external cluster which is the replication + origin + minLength: 1 + type: string + required: + - source + type: object + replicationSlots: + default: + highAvailability: + enabled: true + description: Replication slots management configuration + properties: + highAvailability: + default: + enabled: true + description: Replication slots for high availability configuration + properties: + enabled: + default: true + description: |- + If enabled (default), the operator will automatically manage replication slots + on the primary instance and use them in streaming replication + connections with all the standby instances that are part of the HA + cluster. If disabled, the operator will not take advantage + of replication slots in streaming connections with the replicas. + This feature also controls replication slots in replica cluster, + from the designated primary to its cascading replicas. + type: boolean + slotPrefix: + default: _cnpg_ + description: |- + Prefix for replication slots managed by the operator for HA. + It may only contain lower case letters, numbers, and the underscore character. + This can only be set at creation time. By default set to `_cnpg_`. + pattern: ^[0-9a-z_]*$ + type: string + type: object + synchronizeReplicas: + description: Configures the synchronization of the user defined + physical replication slots + properties: + enabled: + default: true + description: When set to true, every replication slot that + is on the primary is synchronized on each standby + type: boolean + excludePatterns: + description: List of regular expression patterns to match + the names of replication slots to be excluded (by default + empty) + items: + type: string + type: array + required: + - enabled + type: object + updateInterval: + default: 30 + description: |- + Standby will update the status of the local replication slots + every `updateInterval` seconds (default 30). + minimum: 1 + type: integer + type: object + resources: + description: |- + Resources requirements of every generated Pod. Please refer to + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + for more information. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + schedulerName: + description: |- + If specified, the pod will be dispatched by specified Kubernetes + scheduler. If not specified, the pod will be dispatched by the default + scheduler. More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/ + type: string + seccompProfile: + description: |- + The SeccompProfile applied to every Pod and Container. + Defaults to: `RuntimeDefault` + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + serviceAccountTemplate: + description: Configure the generation of the service account + properties: + metadata: + description: |- + Metadata are the metadata to be used for the generated + service account + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + required: + - metadata + type: object + smartShutdownTimeout: + default: 180 + description: |- + The time in seconds that controls the window of time reserved for the smart shutdown of Postgres to complete. + Make sure you reserve enough time for the operator to request a fast shutdown of Postgres + (that is: `stopDelay` - `smartShutdownTimeout`). + format: int32 + type: integer + startDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + successfully start up (default 3600). + The startup probe failure threshold is derived from this value using the formula: + ceiling(startDelay / 10). + format: int32 + type: integer + stopDelay: + default: 1800 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + gracefully shutdown (default 1800) + format: int32 + type: integer + storage: + description: Configuration of the storage of the instances + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + superuserSecret: + description: |- + The secret containing the superuser password. If not defined a new + secret will be created with a randomly generated password + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + switchoverDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a primary PostgreSQL instance + to gracefully shutdown during a switchover. + Default value is 3600 seconds (1 hour). + format: int32 + type: integer + tablespaces: + description: The tablespaces configuration + items: + description: |- + TablespaceConfiguration is the configuration of a tablespace, and includes + the storage specification for the tablespace + properties: + name: + description: The name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + properties: + name: + type: string + type: object + storage: + description: The storage configuration for the tablespace + properties: + pvcTemplate: + description: Template to be used to generate the Persistent + Volume Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to + the PersistentVolume backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + temporary: + default: false + description: |- + When set to true, the tablespace will be added as a `temp_tablespaces` + entry in PostgreSQL, and will be available to automatically house temp + database objects, or other temporary files. Please refer to PostgreSQL + documentation for more information on the `temp_tablespaces` GUC. + type: boolean + required: + - name + - storage + type: object + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints specifies how to spread matching pods among the given topology. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ + items: + description: TopologySpreadConstraint specifies how to spread matching + pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + walStorage: + description: Configuration of the storage for PostgreSQL WAL (Write-Ahead + Log) + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + required: + - instances + type: object + x-kubernetes-validations: + - message: imageName and imageCatalogRef are mutually exclusive + rule: '!(has(self.imageCatalogRef) && has(self.imageName))' + status: + description: |- + Most recently observed status of the cluster. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + availableArchitectures: + description: AvailableArchitectures reports the available architectures + of a cluster + items: + description: AvailableArchitecture represents the state of a cluster's + architecture + properties: + goArch: + description: GoArch is the name of the executable architecture + type: string + hash: + description: Hash is the hash of the executable + type: string + required: + - goArch + - hash + type: object + type: array + azurePVCUpdateEnabled: + description: AzurePVCUpdateEnabled shows if the PVC online upgrade + is enabled for this cluster + type: boolean + certificates: + description: The configuration for the CA and related certificates, + initialized with defaults. + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + expirations: + additionalProperties: + type: string + description: Expiration dates for all certificates. + type: object + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + cloudNativePGCommitHash: + description: The commit hash number of which this operator running + type: string + cloudNativePGOperatorHash: + description: The hash of the binary of the operator + type: string + conditions: + description: Conditions for cluster object + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + configMapResourceVersion: + description: |- + The list of resource versions of the configmaps, + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + configmap data + properties: + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the config maps used to pass metrics. + Map keys are the config map names, map values are the versions + type: object + type: object + currentPrimary: + description: Current primary instance + type: string + currentPrimaryFailingSinceTimestamp: + description: |- + The timestamp when the primary was detected to be unhealthy + This field is reported when `.spec.failoverDelay` is populated or during online upgrades + type: string + currentPrimaryTimestamp: + description: The timestamp when the last actual promotion to primary + has occurred + type: string + danglingPVC: + description: |- + List of all the PVCs created by this cluster and still available + which are not attached to a Pod + items: + type: string + type: array + demotionToken: + description: |- + DemotionToken is a JSON token containing the information + from pg_controldata such as Database system identifier, Latest checkpoint's + TimeLineID, Latest checkpoint's REDO location, Latest checkpoint's REDO + WAL file, and Time of latest checkpoint + type: string + firstRecoverabilityPoint: + description: |- + The first recoverability point, stored as a date in RFC3339 format. + This field is calculated from the content of FirstRecoverabilityPointByMethod + type: string + firstRecoverabilityPointByMethod: + additionalProperties: + format: date-time + type: string + description: The first recoverability point, stored as a date in RFC3339 + format, per backup method type + type: object + healthyPVC: + description: List of all the PVCs not dangling nor initializing + items: + type: string + type: array + image: + description: Image contains the image name used by the pods + type: string + initializingPVC: + description: List of all the PVCs that are being initialized by this + cluster + items: + type: string + type: array + instanceNames: + description: List of instance names in the cluster + items: + type: string + type: array + instances: + description: The total number of PVC Groups detected in the cluster. + It may differ from the number of existing instance pods. + type: integer + instancesReportedState: + additionalProperties: + description: InstanceReportedState describes the last reported state + of an instance during a reconciliation loop + properties: + isPrimary: + description: indicates if an instance is the primary one + type: boolean + timeLineID: + description: indicates on which TimelineId the instance is + type: integer + required: + - isPrimary + type: object + description: The reported state of the instances during the last reconciliation + loop + type: object + instancesStatus: + additionalProperties: + items: + type: string + type: array + description: InstancesStatus indicates in which status the instances + are + type: object + jobCount: + description: How many Jobs have been created by this cluster + format: int32 + type: integer + lastFailedBackup: + description: Stored as a date in RFC3339 format + type: string + lastPromotionToken: + description: |- + LastPromotionToken is the last verified promotion token that + was used to promote a replica cluster + type: string + lastSuccessfulBackup: + description: |- + Last successful backup, stored as a date in RFC3339 format + This field is calculated from the content of LastSuccessfulBackupByMethod + type: string + lastSuccessfulBackupByMethod: + additionalProperties: + format: date-time + type: string + description: Last successful backup, stored as a date in RFC3339 format, + per backup method type + type: object + latestGeneratedNode: + description: ID of the latest generated node (used to avoid node name + clashing) + type: integer + managedRolesStatus: + description: ManagedRolesStatus reports the state of the managed roles + in the cluster + properties: + byStatus: + additionalProperties: + items: + type: string + type: array + description: ByStatus gives the list of roles in each state + type: object + cannotReconcile: + additionalProperties: + items: + type: string + type: array + description: |- + CannotReconcile lists roles that cannot be reconciled in PostgreSQL, + with an explanation of the cause + type: object + passwordStatus: + additionalProperties: + description: PasswordState represents the state of the password + of a managed RoleConfiguration + properties: + resourceVersion: + description: the resource version of the password secret + type: string + transactionID: + description: the last transaction ID to affect the role + definition in PostgreSQL + format: int64 + type: integer + type: object + description: PasswordStatus gives the last transaction id and + password secret version for each managed role + type: object + type: object + onlineUpdateEnabled: + description: OnlineUpdateEnabled shows if the online upgrade is enabled + inside the cluster + type: boolean + phase: + description: Current phase of the cluster + type: string + phaseReason: + description: Reason for the current phase + type: string + pluginStatus: + description: PluginStatus is the status of the loaded plugins + items: + description: PluginStatus is the status of a loaded plugin + properties: + backupCapabilities: + description: |- + BackupCapabilities are the list of capabilities of the + plugin regarding the Backup management + items: + type: string + type: array + capabilities: + description: |- + Capabilities are the list of capabilities of the + plugin + items: + type: string + type: array + name: + description: Name is the name of the plugin + type: string + operatorCapabilities: + description: |- + OperatorCapabilities are the list of capabilities of the + plugin regarding the reconciler + items: + type: string + type: array + restoreJobHookCapabilities: + description: |- + RestoreJobHookCapabilities are the list of capabilities of the + plugin regarding the RestoreJobHook management + items: + type: string + type: array + status: + description: Status contain the status reported by the plugin + through the SetStatusInCluster interface + type: string + version: + description: |- + Version is the version of the plugin loaded by the + latest reconciliation loop + type: string + walCapabilities: + description: |- + WALCapabilities are the list of capabilities of the + plugin regarding the WAL management + items: + type: string + type: array + required: + - name + - version + type: object + type: array + poolerIntegrations: + description: The integration needed by poolers referencing the cluster + properties: + pgBouncerIntegration: + description: PgBouncerIntegrationStatus encapsulates the needed + integration for the pgbouncer poolers referencing the cluster + properties: + secrets: + items: + type: string + type: array + type: object + type: object + pvcCount: + description: How many PVCs have been created by this cluster + format: int32 + type: integer + readService: + description: Current list of read pods + type: string + readyInstances: + description: The total number of ready instances in the cluster. It + is equal to the number of ready instance pods. + type: integer + resizingPVC: + description: List of all the PVCs that have ResizingPVC condition. + items: + type: string + type: array + secretsResourceVersion: + description: |- + The list of resource versions of the secrets + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + secret data + properties: + applicationSecretVersion: + description: The resource version of the "app" user secret + type: string + barmanEndpointCA: + description: The resource version of the Barman Endpoint CA if + provided + type: string + caSecretVersion: + description: Unused. Retained for compatibility with old versions. + type: string + clientCaSecretVersion: + description: The resource version of the PostgreSQL client-side + CA secret version + type: string + externalClusterSecretVersion: + additionalProperties: + type: string + description: The resource versions of the external cluster secrets + type: object + managedRoleSecretVersion: + additionalProperties: + type: string + description: The resource versions of the managed roles secrets + type: object + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the secrets used to pass metrics. + Map keys are the secret names, map values are the versions + type: object + replicationSecretVersion: + description: The resource version of the "streaming_replica" user + secret + type: string + serverCaSecretVersion: + description: The resource version of the PostgreSQL server-side + CA secret version + type: string + serverSecretVersion: + description: The resource version of the PostgreSQL server-side + secret version + type: string + superuserSecretVersion: + description: The resource version of the "postgres" user secret + type: string + type: object + switchReplicaClusterStatus: + description: SwitchReplicaClusterStatus is the status of the switch + to replica cluster + properties: + inProgress: + description: InProgress indicates if there is an ongoing procedure + of switching a cluster to a replica cluster. + type: boolean + type: object + tablespacesStatus: + description: TablespacesStatus reports the state of the declarative + tablespaces in the cluster + items: + description: TablespaceState represents the state of a tablespace + in a cluster + properties: + error: + description: Error is the reconciliation error, if any + type: string + name: + description: Name is the name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + type: string + state: + description: State is the latest reconciliation state + type: string + required: + - name + - state + type: object + type: array + targetPrimary: + description: |- + Target primary instance, this is different from the previous one + during a switchover or a failover + type: string + targetPrimaryTimestamp: + description: The timestamp when the last request for a new primary + has occurred + type: string + timelineID: + description: The timeline of the Postgres cluster + type: integer + topology: + description: Instances topology. + properties: + instances: + additionalProperties: + additionalProperties: + type: string + description: PodTopologyLabels represent the topology of a Pod. + map[labelName]labelValue + type: object + description: Instances contains the pod topology of the instances + type: object + nodesUsed: + description: |- + NodesUsed represents the count of distinct nodes accommodating the instances. + A value of '1' suggests that all instances are hosted on a single node, + implying the absence of High Availability (HA). Ideally, this value should + be the same as the number of instances in the Postgres HA cluster, implying + shared nothing architecture on the compute side. + format: int32 + type: integer + successfullyExtracted: + description: |- + SuccessfullyExtracted indicates if the topology data was extract. It is useful to enact fallback behaviors + in synchronous replica election in case of failures + type: boolean + type: object + unusablePVC: + description: List of all the PVCs that are unusable because another + PVC is missing + items: + type: string + type: array + writeService: + description: Current write pod + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: databases.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Database + listKind: DatabaseList + plural: databases + singular: database + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Database is the Schema for the databases API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired Database. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allowConnections: + description: |- + Maps to the `ALLOW_CONNECTIONS` parameter of `CREATE DATABASE` and + `ALTER DATABASE`. If false then no one can connect to this database. + type: boolean + builtinLocale: + description: |- + Maps to the `BUILTIN_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the locale name when the + builtin provider is used. This option requires `localeProvider` to + be set to `builtin`. Available from PostgreSQL 17. + type: string + x-kubernetes-validations: + - message: builtinLocale is immutable + rule: self == oldSelf + cluster: + description: The name of the PostgreSQL cluster hosting the database. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + collationVersion: + description: |- + Maps to the `COLLATION_VERSION` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: collationVersion is immutable + rule: self == oldSelf + connectionLimit: + description: |- + Maps to the `CONNECTION LIMIT` clause of `CREATE DATABASE` and + `ALTER DATABASE`. How many concurrent connections can be made to + this database. -1 (the default) means no limit. + type: integer + databaseReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this database. + enum: + - delete + - retain + type: string + encoding: + description: |- + Maps to the `ENCODING` parameter of `CREATE DATABASE`. This setting + cannot be changed. Character set encoding to use in the database. + type: string + x-kubernetes-validations: + - message: encoding is immutable + rule: self == oldSelf + ensure: + default: present + description: Ensure the PostgreSQL database is `present` or `absent` + - defaults to "present". + enum: + - present + - absent + type: string + icuLocale: + description: |- + Maps to the `ICU_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the ICU locale when the ICU + provider is used. This option requires `localeProvider` to be set to + `icu`. Available from PostgreSQL 15. + type: string + x-kubernetes-validations: + - message: icuLocale is immutable + rule: self == oldSelf + icuRules: + description: |- + Maps to the `ICU_RULES` parameter of `CREATE DATABASE`. This setting + cannot be changed. Specifies additional collation rules to customize + the behavior of the default collation. This option requires + `localeProvider` to be set to `icu`. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: icuRules is immutable + rule: self == oldSelf + isTemplate: + description: |- + Maps to the `IS_TEMPLATE` parameter of `CREATE DATABASE` and `ALTER + DATABASE`. If true, this database is considered a template and can + be cloned by any user with `CREATEDB` privileges. + type: boolean + locale: + description: |- + Maps to the `LOCALE` parameter of `CREATE DATABASE`. This setting + cannot be changed. Sets the default collation order and character + classification in the new database. + type: string + x-kubernetes-validations: + - message: locale is immutable + rule: self == oldSelf + localeCType: + description: |- + Maps to the `LC_CTYPE` parameter of `CREATE DATABASE`. This setting + cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCType is immutable + rule: self == oldSelf + localeCollate: + description: |- + Maps to the `LC_COLLATE` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCollate is immutable + rule: self == oldSelf + localeProvider: + description: |- + Maps to the `LOCALE_PROVIDER` parameter of `CREATE DATABASE`. This + setting cannot be changed. This option sets the locale provider for + databases created in the new cluster. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: localeProvider is immutable + rule: self == oldSelf + name: + description: The name of the database to create inside PostgreSQL. + This setting cannot be changed. + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + - message: the name postgres is reserved + rule: self != 'postgres' + - message: the name template0 is reserved + rule: self != 'template0' + - message: the name template1 is reserved + rule: self != 'template1' + owner: + description: |- + Maps to the `OWNER` parameter of `CREATE DATABASE`. + Maps to the `OWNER TO` command of `ALTER DATABASE`. + The role name of the user who owns the database inside PostgreSQL. + type: string + tablespace: + description: |- + Maps to the `TABLESPACE` parameter of `CREATE DATABASE`. + Maps to the `SET TABLESPACE` command of `ALTER DATABASE`. + The name of the tablespace (in PostgreSQL) that will be associated + with the new database. This tablespace will be the default + tablespace used for objects created in this database. + type: string + template: + description: |- + Maps to the `TEMPLATE` parameter of `CREATE DATABASE`. This setting + cannot be changed. The name of the template from which to create + this database. + type: string + x-kubernetes-validations: + - message: template is immutable + rule: self == oldSelf + required: + - cluster + - name + - owner + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider is set + to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + status: + description: |- + Most recently observed status of the Database. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + applied: + description: Applied is true if the database was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: imagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ImageCatalog + listKind: ImageCatalogList + plural: imagecatalogs + singular: imagecatalog + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ImageCatalog is the Schema for the imagecatalogs API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: poolers.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Pooler + listKind: PoolerList + plural: poolers + singular: pooler + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.type + name: Type + type: string + name: v1 + schema: + openAPIV3Schema: + description: Pooler is the Schema for the poolers API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the Pooler. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: |- + This is the cluster reference on which the Pooler will work. + Pooler name should never match with any cluster name within the same namespace. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + deploymentStrategy: + description: The deployment strategy to use for pgbouncer to replace + existing pods with new ones + properties: + rollingUpdate: + description: |- + Rolling update config params. Present only if DeploymentStrategyType = + RollingUpdate. + properties: + maxSurge: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be scheduled above the desired number of + pods. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + This can not be 0 if MaxUnavailable is 0. + Absolute number is calculated from percentage by rounding up. + Defaults to 25%. + Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when + the rolling update starts, such that the total number of old and new pods do not exceed + 130% of desired pods. Once old pods have been killed, + new ReplicaSet can be scaled up further, ensuring that total number of pods running + at any time during the update is at most 130% of desired pods. + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be unavailable during the update. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + Absolute number is calculated from percentage by rounding down. + This can not be 0 if MaxSurge is 0. + Defaults to 25%. + Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods + immediately when the rolling update starts. Once new pods are ready, old ReplicaSet + can be scaled down further, followed by scaling up the new ReplicaSet, ensuring + that the total number of pods available at all times during the update is at + least 70% of desired pods. + x-kubernetes-int-or-string: true + type: object + type: + description: Type of deployment. Can be "Recreate" or "RollingUpdate". + Default is RollingUpdate. + type: string + type: object + instances: + default: 1 + description: 'The number of replicas we want. Default: 1.' + format: int32 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this pooler. + properties: + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + type: object + pgbouncer: + description: The PgBouncer configuration + properties: + authQuery: + description: |- + The query that will be used to download the hash of the password + of a certain user. Default: "SELECT usename, passwd FROM public.user_search($1)". + In case it is specified, also an AuthQuerySecret has to be specified and + no automatic CNPG Cluster integration will be triggered. + type: string + authQuerySecret: + description: |- + The credentials of the user that need to be used for the authentication + query. In case it is specified, also an AuthQuery + (e.g. "SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1") + has to be specified and no automatic CNPG Cluster integration will be triggered. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + parameters: + additionalProperties: + type: string + description: |- + Additional parameters to be passed to PgBouncer - please check + the CNPG documentation for a list of options you can configure + type: object + paused: + default: false + description: |- + When set to `true`, PgBouncer will disconnect from the PostgreSQL + server, first waiting for all queries to complete, and pause all new + client connections until this value is set to `false` (default). Internally, + the operator calls PgBouncer's `PAUSE` and `RESUME` commands. + type: boolean + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + poolMode: + default: session + description: 'The pool mode. Default: `session`.' + enum: + - session + - transaction + type: string + type: object + serviceTemplate: + description: Template for the Service to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information on service's + port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the configurations + of session affinity. + properties: + clientIP: + description: clientIP contains the configurations of Client + IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic is + distributed to Service endpoints. Implementations can use this field as a + hint, but are not required to guarantee strict adherence. If the field is + not set, the implementation will apply its default routing strategy. If set + to "PreferClose", implementations should prioritize endpoints that are + topologically close (e.g., same zone). + This is a beta field and requires enabling ServiceTrafficDistribution feature. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + template: + description: The template of the Pod to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the pod. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + activeDeadlineSeconds: + description: |- + Optional duration in seconds the pod may be active on the node relative to + StartTime before the system will actively try to mark it failed and kill associated containers. + Value must be a positive integer. + format: int64 + type: integer + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + automountServiceAccountToken: + description: AutomountServiceAccountToken indicates whether + a service account token should be automatically mounted. + type: boolean + containers: + description: |- + List of containers belonging to the pod. + Containers cannot currently be added or removed. + There must be at least one container in a Pod. + Cannot be updated. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + dnsConfig: + description: |- + Specifies the DNS parameters of a pod. + Parameters specified here will be merged to the generated DNS + configuration based on DNSPolicy. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver + options of a pod. + properties: + name: + description: |- + Name is this DNS resolver option's name. + Required. + type: string + value: + description: Value is this DNS resolver option's + value. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + dnsPolicy: + description: |- + Set DNS policy for the pod. + Defaults to "ClusterFirst". + Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + To have DNS options set along with hostNetwork, you have to specify DNS policy + explicitly to 'ClusterFirstWithHostNet'. + type: string + enableServiceLinks: + description: |- + EnableServiceLinks indicates whether information about services should be injected into pod's + environment variables, matching the syntax of Docker links. + Optional: Defaults to true. + type: boolean + ephemeralContainers: + description: |- + List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + pod to perform user-initiated actions such as debugging. This list cannot be specified when + creating a pod, and it cannot be modified by updating the pod spec. In order to add an + ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + items: + description: |- + An EphemeralContainer is a temporary container that you may add to an existing Pod for + user-initiated activities such as debugging. Ephemeral containers have no resource or + scheduling guarantees, and they will not be restarted when they exit or when a Pod is + removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the + Pod to exceed its resource allocation. + + To add an ephemeral container, use the ephemeralcontainers subresource of an existing + Pod. Ephemeral containers may not be removed or restarted. + properties: + args: + description: |- + Arguments to the entrypoint. + The image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: Lifecycle is not allowed for ephemeral + containers. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the ephemeral container specified as a DNS_LABEL. + This name must be unique among all containers, init containers and ephemeral containers. + type: string + ports: + description: Ports are not allowed for ephemeral containers. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + already allocated to the pod. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for the container to manage the restart behavior of each + container within a pod. + This may only be set for init containers. You cannot set this field on + ephemeral containers. + type: string + securityContext: + description: |- + Optional: SecurityContext defines the security options the ephemeral container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + targetContainerName: + description: |- + If set, the name of the container from PodSpec that this ephemeral container targets. + The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + If not set then the ephemeral container uses the namespaces configured in the Pod spec. + + The container runtime must implement support for this feature. If the runtime does not + support namespace targeting then the result of setting this field is undefined. + type: string + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + hostAliases: + description: |- + HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + file if specified. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + description: IP address of the host file entry. + type: string + required: + - ip + type: object + type: array + x-kubernetes-list-map-keys: + - ip + x-kubernetes-list-type: map + hostIPC: + description: |- + Use the host's ipc namespace. + Optional: Default to false. + type: boolean + hostNetwork: + description: |- + Host networking requested for this pod. Use the host's network namespace. + If this option is set, the ports that will be used must be specified. + Default to false. + type: boolean + hostPID: + description: |- + Use the host's pid namespace. + Optional: Default to false. + type: boolean + hostUsers: + description: |- + Use the host's user namespace. + Optional: Default to true. + If set to true or not present, the pod will be run in the host user namespace, useful + for when the pod needs a feature only available to the host user namespace, such as + loading a kernel module with CAP_SYS_MODULE. + When set to false, a new userns is created for the pod. Setting false is useful for + mitigating container breakout vulnerabilities even allowing users to run their + containers as root without actually having root privileges on the host. + This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. + type: boolean + hostname: + description: |- + Specifies the hostname of the Pod + If not specified, the pod's hostname will be set to a system-defined value. + type: string + imagePullSecrets: + description: |- + ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + If specified, these secrets will be passed to individual puller implementations for them to use. + More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + initContainers: + description: |- + List of initialization containers belonging to the pod. + Init containers are executed in order prior to containers being started. If any + init container fails, the pod is considered to have failed and is handled according + to its restartPolicy. The name for an init container or normal container must be + unique among all containers. + Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. + The resourceRequirements of an init container are taken into account during scheduling + by finding the highest request/limit for each resource type, and then using the max of + of that value or the sum of the normal containers. Limits are applied to init containers + in a similar fashion. + Init containers cannot currently be added or removed. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + nodeName: + description: |- + NodeName indicates in which node this pod is scheduled. + If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. + Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. + This field should not be used to express a desire for the pod to be scheduled on a specific node. + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the pod to fit on a node. + Selector which must match a node's labels for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + x-kubernetes-map-type: atomic + os: + description: |- + Specifies the OS of the containers in the pod. + Some pod and container fields are restricted if this is set. + + If the OS field is set to linux, the following fields must be unset: + -securityContext.windowsOptions + + If the OS field is set to windows, following fields must be unset: + - spec.hostPID + - spec.hostIPC + - spec.hostUsers + - spec.securityContext.appArmorProfile + - spec.securityContext.seLinuxOptions + - spec.securityContext.seccompProfile + - spec.securityContext.fsGroup + - spec.securityContext.fsGroupChangePolicy + - spec.securityContext.sysctls + - spec.shareProcessNamespace + - spec.securityContext.runAsUser + - spec.securityContext.runAsGroup + - spec.securityContext.supplementalGroups + - spec.securityContext.supplementalGroupsPolicy + - spec.containers[*].securityContext.appArmorProfile + - spec.containers[*].securityContext.seLinuxOptions + - spec.containers[*].securityContext.seccompProfile + - spec.containers[*].securityContext.capabilities + - spec.containers[*].securityContext.readOnlyRootFilesystem + - spec.containers[*].securityContext.privileged + - spec.containers[*].securityContext.allowPrivilegeEscalation + - spec.containers[*].securityContext.procMount + - spec.containers[*].securityContext.runAsUser + - spec.containers[*].securityContext.runAsGroup + properties: + name: + description: |- + Name is the name of the operating system. The currently supported values are linux and windows. + Additional value may be defined in future and can be one of: + https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + Clients should expect to handle additional values and treat unrecognized values in this field as os: null + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + This field will be autopopulated at admission time by the RuntimeClass admission controller. If + the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + The RuntimeClass admission controller will reject Pod create requests which have the overhead already + set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md + type: object + preemptionPolicy: + description: |- + PreemptionPolicy is the Policy for preempting pods with lower priority. + One of Never, PreemptLowerPriority. + Defaults to PreemptLowerPriority if unset. + type: string + priority: + description: |- + The priority value. Various system components use this field to find the + priority of the pod. When Priority Admission Controller is enabled, it + prevents users from setting this field. The admission controller populates + this field from PriorityClassName. + The higher the value, the higher the priority. + format: int32 + type: integer + priorityClassName: + description: |- + If specified, indicates the pod's priority. "system-node-critical" and + "system-cluster-critical" are two special keywords which indicate the + highest priorities with the former being the highest priority. Any other + name must be defined by creating a PriorityClass object with that name. + If not specified, the pod priority will be default or zero if there is no + default. + type: string + readinessGates: + description: |- + If specified, all readiness gates will be evaluated for pod readiness. + A pod is ready when all its containers are ready AND + all conditions specified in the readiness gates have status equal to "True" + More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates + items: + description: PodReadinessGate contains the reference to + a pod condition + properties: + conditionType: + description: ConditionType refers to a condition in + the pod's condition list with matching type. + type: string + required: + - conditionType + type: object + type: array + x-kubernetes-list-type: atomic + resourceClaims: + description: |- + ResourceClaims defines which ResourceClaims must be allocated + and reserved before the Pod is allowed to start. The resources + will be made available to those containers which consume them + by name. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. + items: + description: |- + PodResourceClaim references exactly one ResourceClaim, either directly + or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim + for the pod. + + It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. + Containers that need access to the ResourceClaim reference it with this name. + properties: + name: + description: |- + Name uniquely identifies this resource claim inside the pod. + This must be a DNS_LABEL. + type: string + resourceClaimName: + description: |- + ResourceClaimName is the name of a ResourceClaim object in the same + namespace as this pod. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + resourceClaimTemplateName: + description: |- + ResourceClaimTemplateName is the name of a ResourceClaimTemplate + object in the same namespace as this pod. + + The template will be used to create a new ResourceClaim, which will + be bound to this pod. When this pod is deleted, the ResourceClaim + will also be deleted. The pod name and resource name, along with a + generated component, will be used to form a unique name for the + ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + + This field is immutable and no changes will be made to the + corresponding ResourceClaim by the control plane after creating the + ResourceClaim. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + resources: + description: |- + Resources is the total amount of CPU and Memory resources required by all + containers in the pod. It supports specifying Requests and Limits for + "cpu" and "memory" resource names only. ResourceClaims are not supported. + + This field enables fine-grained control over resource allocation for the + entire pod, allowing resource sharing among containers in a pod. + + This is an alpha field and requires enabling the PodLevelResources feature + gate. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for all containers within the pod. + One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. + Default to Always. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy + type: string + runtimeClassName: + description: |- + RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + empty definition that uses the default runtime handler. + More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + type: string + schedulerName: + description: |- + If specified, the pod will be dispatched by specified scheduler. + If not specified, the pod will be dispatched by default scheduler. + type: string + schedulingGates: + description: |- + SchedulingGates is an opaque list of values that if specified will block scheduling the pod. + If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + scheduler will not attempt to schedule the pod. + + SchedulingGates can only be set at pod creation time, and be removed only afterwards. + items: + description: PodSchedulingGate is associated to a Pod to + guard its scheduling. + properties: + name: + description: |- + Name of the scheduling gate. + Each scheduling gate must have a unique name field. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + securityContext: + description: |- + SecurityContext holds pod-level security attributes and common container settings. + Optional: Defaults to empty. See type description for default values of each field. + properties: + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxChangePolicy: + description: |- + seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. + It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. + Valid values are "MountOption" and "Recursive". + + "Recursive" means relabeling of all files on all Pod volumes by the container runtime. + This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. + + "MountOption" mounts all eligible Pod volumes with `-o context` mount option. + This requires all Pods that share the same volume to use the same SELinux label. + It is not possible to share the same volume among privileged and unprivileged Pods. + Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes + whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their + CSIDriver instance. Other volumes are always re-labelled recursively. + "MountOption" value is allowed only when SELinuxMount feature gate is enabled. + + If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. + If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes + and "Recursive" for all other volumes. + + This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers. + + All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. + Note that this field cannot be set when spec.os.name is windows. + type: string + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in + addition to the container's primary GID and fsGroup (if specified). If + the SupplementalGroupsPolicy feature is enabled, the + supplementalGroupsPolicy field determines whether these are in addition + to or instead of any group memberships defined in the container image. + If unspecified, no additional groups are added, though group memberships + defined in the container image may still be used, depending on the + supplementalGroupsPolicy field. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + description: |- + Defines how supplemental groups of the first container processes are calculated. + Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + and the container runtime must implement support for this feature. + Note that this field cannot be set when spec.os.name is windows. + type: string + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be + set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: |- + DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. + Deprecated: Use serviceAccountName instead. + type: string + serviceAccountName: + description: |- + ServiceAccountName is the name of the ServiceAccount to use to run this pod. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + type: string + setHostnameAsFQDN: + description: |- + If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). + In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). + In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. + If a pod does not have FQDN, this has no effect. + Default to false. + type: boolean + shareProcessNamespace: + description: |- + Share a single process namespace between all of the containers in a pod. + When this is set containers will be able to view and signal processes from other containers + in the same pod, and the first process in each container will not be assigned PID 1. + HostPID and ShareProcessNamespace cannot both be set. + Optional: Default to false. + type: boolean + subdomain: + description: |- + If specified, the fully qualified Pod hostname will be "...svc.". + If not specified, the pod will not have a domainname at all. + type: string + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + If this value is nil, the default grace period will be used instead. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: If specified, the pod's tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of pods ought to spread across topology + domains. Scheduler will schedule pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + description: |- + List of volumes that can be mounted by containers belonging to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk + in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in + the blob storage + type: string + fsType: + default: ext4 + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage account Managed: azure + managed data disk (only in managed availability + set). defaults to shared' + type: string + readOnly: + default: false + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default + is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers. + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about + the pod that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing the + pod field + properties: + fieldRef: + description: 'Required: Selects a field of + the pod: only annotations, labels, name, + namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path. + Must be utf-8 encoded. The first item of + the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource + that is attached to a kubelet's host machine and then + exposed to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target + worldwide names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. + properties: + driver: + description: driver is the name of the driver to + use for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the + specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + image: + description: |- + image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + The volume is resolved at pod startup depending on which PullPolicy value is provided: + + - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + + The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + The volume will be mounted read-only (ro) and non-executable files (noexec). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + default: default + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon + Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources + secrets, configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume + root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the + configMap data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about + the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name, namespace and uid + are supported.' + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file + to be created. Must not be absolute + or contain the ''..'' path. Must + be utf-8 encoded. The first item + of the relative path must not + start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the + secret data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + default: /etc/ceph/keyring + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. + properties: + fsType: + default: xfs + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the + ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the + ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL + communication with Gateway, default false + type: boolean + storageMode: + default: ThinProvisioned + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage + Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether the + Secret or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy + Based Management (SPBM) profile ID associated + with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + required: + - containers + type: object + type: object + type: + default: rw + description: 'Type of service to forward traffic to. Default: `rw`.' + enum: + - rw + - ro + type: string + required: + - cluster + - pgbouncer + type: object + status: + description: |- + Most recently observed status of the Pooler. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + instances: + description: The number of pods trying to be scheduled + format: int32 + type: integer + secrets: + description: The resource version of the config object + properties: + clientCA: + description: The client CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + pgBouncerSecrets: + description: The version of the secrets used by PgBouncer + properties: + authQuery: + description: The auth query secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + serverCA: + description: The server CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + serverTLS: + description: The server TLS secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: publications.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Publication + listKind: PublicationList + plural: publications + singular: publication + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Publication is the Schema for the publications API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PublicationSpec defines the desired state of Publication + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "publisher" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "publisher" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + name: + description: The name of the publication inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Publication parameters part of the `WITH` clause as expected by + PostgreSQL `CREATE PUBLICATION` command + type: object + publicationReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this publication + enum: + - delete + - retain + type: string + target: + description: Target of the publication as expected by PostgreSQL `CREATE + PUBLICATION` command + properties: + allTables: + description: |- + Marks the publication as one that replicates changes for all tables + in the database, including tables created in the future. + Corresponding to `FOR ALL TABLES` in PostgreSQL. + type: boolean + x-kubernetes-validations: + - message: allTables is immutable + rule: self == oldSelf + objects: + description: Just the following schema objects + items: + description: PublicationTargetObject is an object to publish + properties: + table: + description: |- + Specifies a list of tables to add to the publication. Corresponding + to `FOR TABLE` in PostgreSQL. + properties: + columns: + description: The columns to publish + items: + type: string + type: array + name: + description: The table name + type: string + only: + description: Whether to limit to the table only or include + all its descendants + type: boolean + schema: + description: The schema name + type: string + required: + - name + type: object + tablesInSchema: + description: |- + Marks the publication as one that replicates changes for all tables + in the specified list of schemas, including tables created in the + future. Corresponding to `FOR TABLES IN SCHEMA` in PostgreSQL. + type: string + type: object + x-kubernetes-validations: + - message: tablesInSchema and table are mutually exclusive + rule: (has(self.tablesInSchema) && !has(self.table)) || (!has(self.tablesInSchema) + && has(self.table)) + maxItems: 100000 + type: array + x-kubernetes-validations: + - message: specifying a column list when the publication also + publishes tablesInSchema is not supported + rule: '!(self.exists(o, has(o.table) && has(o.table.columns)) + && self.exists(o, has(o.tablesInSchema)))' + type: object + x-kubernetes-validations: + - message: allTables and objects are mutually exclusive + rule: (has(self.allTables) && !has(self.objects)) || (!has(self.allTables) + && has(self.objects)) + required: + - cluster + - dbname + - name + - target + type: object + status: + description: PublicationStatus defines the observed state of Publication + properties: + applied: + description: Applied is true if the publication was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: scheduledbackups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ScheduledBackup + listKind: ScheduledBackupList + plural: scheduledbackups + singular: scheduledbackup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .status.lastScheduleTime + name: Last Backup + type: date + name: v1 + schema: + openAPIV3Schema: + description: ScheduledBackup is the Schema for the scheduledbackups API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ScheduledBackup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + backupOwnerReference: + default: none + description: |- + Indicates which ownerReference should be put inside the created backup resources.
+ - none: no owner reference for created backup objects (same behavior as before the field was introduced)
+ - self: sets the Scheduled backup object as owner of the backup
+ - cluster: set the cluster as owner of the backup
+ enum: + - none + - self + - cluster + type: string + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + immediate: + description: If the first backup has to be immediately start after + creation or not + type: boolean + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + schedule: + description: |- + The schedule does not follow the same format used in Kubernetes CronJobs + as it includes an additional seconds specifier, + see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format + type: string + suspend: + description: If this backup is suspended or not + type: boolean + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + - schedule + type: object + status: + description: |- + Most recently observed status of the ScheduledBackup. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + lastCheckTime: + description: The latest time the schedule + format: date-time + type: string + lastScheduleTime: + description: Information when was the last time that backup was successfully + scheduled. + format: date-time + type: string + nextScheduleTime: + description: Next time we will run a backup + format: date-time + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: subscriptions.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Subscription + listKind: SubscriptionList + plural: subscriptions + singular: subscription + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Subscription is the Schema for the subscriptions API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SubscriptionSpec defines the desired state of Subscription + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "subscriber" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "subscriber" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + externalClusterName: + description: The name of the external cluster with the publication + ("publisher") + type: string + name: + description: The name of the subscription inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Subscription parameters part of the `WITH` clause as expected by + PostgreSQL `CREATE SUBSCRIPTION` command + type: object + publicationDBName: + description: |- + The name of the database containing the publication on the external + cluster. Defaults to the one in the external cluster definition. + type: string + publicationName: + description: |- + The name of the publication inside the PostgreSQL database in the + "publisher" + type: string + subscriptionReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this subscription + enum: + - delete + - retain + type: string + required: + - cluster + - dbname + - externalClusterName + - name + - publicationName + type: object + status: + description: SubscriptionStatus defines the observed state of Subscription + properties: + applied: + description: Applied is true if the subscription was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cnpg-manager +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps/status + - secrets/status + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - pods + - pods/exec + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - pods/status + verbs: + - get +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - get + - patch +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +- apiGroups: + - monitoring.coreos.com + resources: + - podmonitors + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups + - clusters + - databases + - poolers + - publications + - scheduledbackups + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups/status + - databases/status + - publications/status + - scheduledbackups/status + - subscriptions/status + verbs: + - get + - patch + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusterimagecatalogs + - imagecatalogs + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/finalizers + - poolers/finalizers + verbs: + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/status + - poolers/status + verbs: + - get + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - create + - get + - list + - patch + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cnpg-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cnpg-manager +subjects: +- kind: ServiceAccount + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: v1 +data: + queries: | + backends: + query: | + SELECT sa.datname + , sa.usename + , sa.application_name + , states.state + , COALESCE(sa.count, 0) AS total + , COALESCE(sa.max_tx_secs, 0) AS max_tx_duration_seconds + FROM ( VALUES ('active') + , ('idle') + , ('idle in transaction') + , ('idle in transaction (aborted)') + , ('fastpath function call') + , ('disabled') + ) AS states(state) + LEFT JOIN ( + SELECT datname + , state + , usename + , COALESCE(application_name, '') AS application_name + , COUNT(*) + , COALESCE(EXTRACT (EPOCH FROM (max(now() - xact_start))), 0) AS max_tx_secs + FROM pg_catalog.pg_stat_activity + GROUP BY datname, state, usename, application_name + ) sa ON states.state = sa.state + WHERE sa.usename IS NOT NULL + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - usename: + usage: "LABEL" + description: "Name of the user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - state: + usage: "LABEL" + description: "State of the backend" + - total: + usage: "GAUGE" + description: "Number of backends" + - max_tx_duration_seconds: + usage: "GAUGE" + description: "Maximum duration of a transaction in seconds" + + backends_waiting: + query: | + SELECT count(*) AS total + FROM pg_catalog.pg_locks blocked_locks + JOIN pg_catalog.pg_locks blocking_locks + ON blocking_locks.locktype = blocked_locks.locktype + AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database + AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation + AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page + AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple + AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid + AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid + AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid + AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid + AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid + AND blocking_locks.pid != blocked_locks.pid + JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid + WHERE NOT blocked_locks.granted + metrics: + - total: + usage: "GAUGE" + description: "Total number of backends that are currently waiting on other queries" + + pg_database: + query: | + SELECT datname + , pg_catalog.pg_database_size(datname) AS size_bytes + , pg_catalog.age(datfrozenxid) AS xid_age + , pg_catalog.mxid_age(datminmxid) AS mxid_age + FROM pg_catalog.pg_database + WHERE datallowconn + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - size_bytes: + usage: "GAUGE" + description: "Disk space used by the database" + - xid_age: + usage: "GAUGE" + description: "Number of transactions from the frozen XID to the current one" + - mxid_age: + usage: "GAUGE" + description: "Number of multiple transactions (Multixact) from the frozen XID to the current one" + + pg_postmaster: + query: | + SELECT EXTRACT(EPOCH FROM pg_postmaster_start_time) AS start_time + FROM pg_catalog.pg_postmaster_start_time() + metrics: + - start_time: + usage: "GAUGE" + description: "Time at which postgres started (based on epoch)" + + pg_replication: + query: "SELECT CASE WHEN ( + NOT pg_catalog.pg_is_in_recovery() + OR pg_catalog.pg_last_wal_receive_lsn() = pg_catalog.pg_last_wal_replay_lsn()) + THEN 0 + ELSE GREATEST (0, + EXTRACT(EPOCH FROM (now() - pg_catalog.pg_last_xact_replay_timestamp()))) + END AS lag, + pg_catalog.pg_is_in_recovery() AS in_recovery, + EXISTS (TABLE pg_stat_wal_receiver) AS is_wal_receiver_up, + (SELECT count(*) FROM pg_catalog.pg_stat_replication) AS streaming_replicas" + metrics: + - lag: + usage: "GAUGE" + description: "Replication lag behind primary in seconds" + - in_recovery: + usage: "GAUGE" + description: "Whether the instance is in recovery" + - is_wal_receiver_up: + usage: "GAUGE" + description: "Whether the instance wal_receiver is up" + - streaming_replicas: + usage: "GAUGE" + description: "Number of streaming replicas connected to the instance" + + pg_replication_slots: + query: | + SELECT slot_name, + slot_type, + database, + active, + (CASE pg_catalog.pg_is_in_recovery() + WHEN TRUE THEN pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_last_wal_receive_lsn(), restart_lsn) + ELSE pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), restart_lsn) + END) as pg_wal_lsn_diff + FROM pg_catalog.pg_replication_slots + WHERE NOT temporary + metrics: + - slot_name: + usage: "LABEL" + description: "Name of the replication slot" + - slot_type: + usage: "LABEL" + description: "Type of the replication slot" + - database: + usage: "LABEL" + description: "Name of the database" + - active: + usage: "GAUGE" + description: "Flag indicating whether the slot is active" + - pg_wal_lsn_diff: + usage: "GAUGE" + description: "Replication lag in bytes" + + pg_stat_archiver: + query: | + SELECT archived_count + , failed_count + , COALESCE(EXTRACT(EPOCH FROM (now() - last_archived_time)), -1) AS seconds_since_last_archival + , COALESCE(EXTRACT(EPOCH FROM (now() - last_failed_time)), -1) AS seconds_since_last_failure + , COALESCE(EXTRACT(EPOCH FROM last_archived_time), -1) AS last_archived_time + , COALESCE(EXTRACT(EPOCH FROM last_failed_time), -1) AS last_failed_time + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_archived_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_archived_wal_start_lsn + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_failed_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_failed_wal_start_lsn + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_archiver + metrics: + - archived_count: + usage: "COUNTER" + description: "Number of WAL files that have been successfully archived" + - failed_count: + usage: "COUNTER" + description: "Number of failed attempts for archiving WAL files" + - seconds_since_last_archival: + usage: "GAUGE" + description: "Seconds since the last successful archival operation" + - seconds_since_last_failure: + usage: "GAUGE" + description: "Seconds since the last failed archival operation" + - last_archived_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving succeeded" + - last_failed_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving failed" + - last_archived_wal_start_lsn: + usage: "GAUGE" + description: "Archived WAL start LSN" + - last_failed_wal_start_lsn: + usage: "GAUGE" + description: "Last failed WAL LSN" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_bgwriter: + runonserver: "<17.0.0" + query: | + SELECT checkpoints_timed + , checkpoints_req + , checkpoint_write_time + , checkpoint_sync_time + , buffers_checkpoint + , buffers_clean + , maxwritten_clean + , buffers_backend + , buffers_backend_fsync + , buffers_alloc + FROM pg_catalog.pg_stat_bgwriter + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - checkpoint_write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds" + - checkpoint_sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds" + - buffers_checkpoint: + usage: "COUNTER" + description: "Number of buffers written during checkpoints" + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_backend: + usage: "COUNTER" + description: "Number of buffers written directly by a backend" + - buffers_backend_fsync: + usage: "COUNTER" + description: "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + + pg_stat_bgwriter_17: + runonserver: ">=17.0.0" + name: pg_stat_bgwriter + query: | + SELECT buffers_clean + , maxwritten_clean + , buffers_alloc + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_bgwriter + metrics: + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_checkpointer: + runonserver: ">=17.0.0" + query: | + SELECT num_timed AS checkpoints_timed + , num_requested AS checkpoints_req + , restartpoints_timed + , restartpoints_req + , restartpoints_done + , write_time + , sync_time + , buffers_written + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_checkpointer + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - restartpoints_timed: + usage: "COUNTER" + description: "Number of scheduled restartpoints due to timeout or after a failed attempt to perform it" + - restartpoints_req: + usage: "COUNTER" + description: "Number of requested restartpoints that have been performed" + - restartpoints_done: + usage: "COUNTER" + description: "Number of restartpoints that have been performed" + - write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are written to disk, in milliseconds" + - sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are synchronized to disk, in milliseconds" + - buffers_written: + usage: "COUNTER" + description: "Number of buffers written during checkpoints and restartpoints" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_database: + query: | + SELECT datname + , xact_commit + , xact_rollback + , blks_read + , blks_hit + , tup_returned + , tup_fetched + , tup_inserted + , tup_updated + , tup_deleted + , conflicts + , temp_files + , temp_bytes + , deadlocks + , blk_read_time + , blk_write_time + FROM pg_catalog.pg_stat_database + metrics: + - datname: + usage: "LABEL" + description: "Name of this database" + - xact_commit: + usage: "COUNTER" + description: "Number of transactions in this database that have been committed" + - xact_rollback: + usage: "COUNTER" + description: "Number of transactions in this database that have been rolled back" + - blks_read: + usage: "COUNTER" + description: "Number of disk blocks read in this database" + - blks_hit: + usage: "COUNTER" + description: "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)" + - tup_returned: + usage: "COUNTER" + description: "Number of rows returned by queries in this database" + - tup_fetched: + usage: "COUNTER" + description: "Number of rows fetched by queries in this database" + - tup_inserted: + usage: "COUNTER" + description: "Number of rows inserted by queries in this database" + - tup_updated: + usage: "COUNTER" + description: "Number of rows updated by queries in this database" + - tup_deleted: + usage: "COUNTER" + description: "Number of rows deleted by queries in this database" + - conflicts: + usage: "COUNTER" + description: "Number of queries canceled due to conflicts with recovery in this database" + - temp_files: + usage: "COUNTER" + description: "Number of temporary files created by queries in this database" + - temp_bytes: + usage: "COUNTER" + description: "Total amount of data written to temporary files by queries in this database" + - deadlocks: + usage: "COUNTER" + description: "Number of deadlocks detected in this database" + - blk_read_time: + usage: "COUNTER" + description: "Time spent reading data file blocks by backends in this database, in milliseconds" + - blk_write_time: + usage: "COUNTER" + description: "Time spent writing data file blocks by backends in this database, in milliseconds" + + pg_stat_replication: + primary: true + query: | + SELECT usename + , COALESCE(application_name, '') AS application_name + , COALESCE(client_addr::text, '') AS client_addr + , COALESCE(client_port::text, '') AS client_port + , EXTRACT(EPOCH FROM backend_start) AS backend_start + , COALESCE(pg_catalog.age(backend_xmin), 0) AS backend_xmin_age + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), sent_lsn) AS sent_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), write_lsn) AS write_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), flush_lsn) AS flush_diff_bytes + , COALESCE(pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), replay_lsn),0) AS replay_diff_bytes + , COALESCE((EXTRACT(EPOCH FROM write_lag)),0)::float AS write_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM flush_lag)),0)::float AS flush_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM replay_lag)),0)::float AS replay_lag_seconds + FROM pg_catalog.pg_stat_replication + metrics: + - usename: + usage: "LABEL" + description: "Name of the replication user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - client_addr: + usage: "LABEL" + description: "Client IP address" + - client_port: + usage: "LABEL" + description: "Client TCP port" + - backend_start: + usage: "COUNTER" + description: "Time when this process was started" + - backend_xmin_age: + usage: "COUNTER" + description: "The age of this standby's xmin horizon" + - sent_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location sent on this connection" + - write_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location written to disk by this standby server" + - flush_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location flushed to disk by this standby server" + - replay_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location replayed into the database on this standby server" + - write_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it" + - flush_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it" + - replay_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it" + + pg_settings: + query: | + SELECT name, + CASE setting WHEN 'on' THEN '1' WHEN 'off' THEN '0' ELSE setting END AS setting + FROM pg_catalog.pg_settings + WHERE vartype IN ('integer', 'real', 'bool') + ORDER BY 1 + metrics: + - name: + usage: "LABEL" + description: "Name of the setting" + - setting: + usage: "GAUGE" + description: "Setting value" +kind: ConfigMap +metadata: + labels: + cnpg.io/reload: "" + name: cnpg-default-monitoring + namespace: cnpg-system +--- +apiVersion: v1 +kind: Service +metadata: + name: cnpg-webhook-service + namespace: cnpg-system +spec: + ports: + - port: 443 + targetPort: 9443 + selector: + app.kubernetes.io/name: cloudnative-pg +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-controller-manager + namespace: cnpg-system +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: cloudnative-pg + template: + metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + spec: + containers: + - args: + - controller + - --leader-elect + - --max-concurrent-reconciles=10 + - --config-map-name=cnpg-controller-manager-config + - --secret-name=cnpg-controller-manager-config + - --webhook-port=9443 + command: + - /manager + env: + - name: OPERATOR_IMAGE_NAME + value: ghcr.io/cloudnative-pg/cloudnative-pg:1.25.0 + - name: OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MONITORING_QUERIES_CONFIGMAP + value: cnpg-default-monitoring + image: ghcr.io/cloudnative-pg/cloudnative-pg:1.25.0 + livenessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + name: manager + ports: + - containerPort: 8080 + name: metrics + protocol: TCP + - containerPort: 9443 + name: webhook-server + protocol: TCP + readinessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + resources: + limits: + cpu: 100m + memory: 200Mi + requests: + cpu: 100m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 10001 + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /controller + name: scratch-data + - mountPath: /run/secrets/cnpg.io/webhook + name: webhook-certificates + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: cnpg-manager + terminationGracePeriodSeconds: 10 + volumes: + - emptyDir: {} + name: scratch-data + - name: webhook-certificates + secret: + defaultMode: 420 + optional: true + secretName: cnpg-webhook-cert +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: cnpg-mutating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: mbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: mcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: mscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: cnpg-validating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: vbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: vcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-pooler + failurePolicy: Fail + name: vpooler.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - poolers + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: vscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None From e947e3204c7029489ecc346ed2a73bbf59e52b65 Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Tue, 24 Dec 2024 10:36:47 +0100 Subject: [PATCH 267/836] chore: update issue templates and backport & CI/CD workflows (#6445) Closes #6444 Signed-off-by: Gabriele Bartolini Signed-off-by: Jonathan Gonzalez V. Signed-off-by: Leonardo Cecchi Co-authored-by: Jonathan Gonzalez V. Co-authored-by: Leonardo Cecchi --- .github/ISSUE_TEMPLATE/bug.yml | 9 ++++----- .github/renovate.json5 | 2 +- .github/workflows/backport.yml | 6 +++--- .github/workflows/continuous-delivery.yml | 2 +- .github/workflows/continuous-integration.yml | 2 +- contribute/release_procedure.md | 6 ++++-- 6 files changed, 14 insertions(+), 13 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml index d02633e40a..354cf7cb17 100644 --- a/.github/ISSUE_TEMPLATE/bug.yml +++ b/.github/ISSUE_TEMPLATE/bug.yml @@ -48,10 +48,10 @@ body: label: Version description: What is the version of CloudNativePG you are running? options: - - "1.24.0" - - "1.23.4" + - "1.25 (latest patch)" + - "1.24 (latest patch)" - "trunk (main)" - - "older in 1.23.x" + - "older in 1.24.x" - "older minor (unsupported)" validations: required: true @@ -60,11 +60,10 @@ body: attributes: label: What version of Kubernetes are you using? options: + - "1.32" - "1.31" - "1.30" - "1.29" - - "1.28" - - "1.27 (unsupported)" - "other (unsupported)" validations: required: true diff --git a/.github/renovate.json5 b/.github/renovate.json5 index 00735afaea..af007aa499 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -8,8 +8,8 @@ baseBranches: [ 'main', 'release-1.22', - 'release-1.23', 'release-1.24', + 'release-1.25' ], ignorePaths: [ 'docs/**', diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 7cdf956314..1a1f7a13c4 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -32,8 +32,8 @@ jobs: labels: | backport-requested :arrow_backward: release-1.22 - release-1.23 release-1.24 + release-1.25 - name: Create comment uses: peter-evans/create-or-update-comment@v4 @@ -56,8 +56,8 @@ jobs: labels: | backport-requested :arrow_backward: release-1.22 - release-1.23 release-1.24 + release-1.25 ## backport pull request in condition when pr contains 'backport-requested' label and contains target branches labels back-porting-pr: @@ -73,7 +73,7 @@ jobs: strategy: fail-fast: false matrix: - branch: [release-1.22, release-1.23, release-1.24] + branch: [release-1.22, release-1.24] env: PR: ${{ github.event.pull_request.number }} outputs: diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index 7ce21a1d15..639cb9b9c4 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -69,7 +69,7 @@ jobs: strategy: fail-fast: false matrix: - branch: [release-1.22, release-1.23, release-1.24] + branch: [release-1.22, release-1.24, release-1.25] steps: - name: Invoke workflow with inputs uses: benc-uk/workflow-dispatch@v1 diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 5b15566bba..b57299a5ef 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -52,7 +52,7 @@ jobs: strategy: fail-fast: false matrix: - branch: [release-1.22, release-1.23, release-1.24] + branch: [release-1.22, release-1.24, release-1.25] steps: - name: Invoke workflow with inputs diff --git a/contribute/release_procedure.md b/contribute/release_procedure.md index 15aa72df43..18dd0270bf 100644 --- a/contribute/release_procedure.md +++ b/contribute/release_procedure.md @@ -144,8 +144,10 @@ This procedure must happen immediately before starting the release. **IMPORTANT:** Now we add support for the automatic backporting of merged pull requests from main to the new release branch. Once the new release branch is created, go back to `main` and submit a pull request to update the -[backport](https://github.com/cloudnative-pg/cloudnative-pg/blob/main/.github/workflows/backport.yml) -and [continuous delivery](https://github.com/cloudnative-pg/cloudnative-pg/blob/main/.github/workflows/continuous-delivery.yml) +[backport](https://github.com/cloudnative-pg/cloudnative-pg/blob/main/.github/workflows/backport.yml), +[continuous delivery](https://github.com/cloudnative-pg/cloudnative-pg/blob/main/.github/workflows/continuous-delivery.yml), +[continuous integration](https://github.com/cloudnative-pg/cloudnative-pg/blob/main/.github/workflows/continuous-integration.yml) +and [Renovate](https://github.com/cloudnative-pg/cloudnative-pg/blob/main/.github/renovate.json5) workflows to support the new release branch. And also remember to update the [github issue template](https://github.com/cloudnative-pg/cloudnative-pg/blob/main/.github/ISSUE_TEMPLATE/bug.yml). From 482dd0f2f0d31704985570c82a9b83daeeb09e5f Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 26 Dec 2024 11:44:06 +0100 Subject: [PATCH 268/836] chore(deps): update helm/kind-action action to v1.12.0 (main) (#6428) --- .github/workflows/continuous-integration.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index b57299a5ef..4e52d12902 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -829,7 +829,7 @@ jobs: uses: actions/checkout@v4 - name: Setting up KinD cluster - uses: helm/kind-action@v1.11.0 + uses: helm/kind-action@v1.12.0 with: wait: "600s" version: ${{ env.KIND_VERSION }} From cbb5977a2edea8d61b3c6ba30e2f1c07e39bbff6 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 27 Dec 2024 11:40:44 +0100 Subject: [PATCH 269/836] fix(deps): update module github.com/onsi/gomega to v1.36.2 (main) (#6461) --- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index 78f66c0b64..c01aed9916 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( github.com/logrusorgru/aurora/v4 v4.0.0 github.com/mitchellh/go-ps v1.0.0 github.com/onsi/ginkgo/v2 v2.22.1 - github.com/onsi/gomega v1.36.1 + github.com/onsi/gomega v1.36.2 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.79.2 github.com/prometheus/client_golang v1.20.5 github.com/robfig/cron v1.2.0 @@ -101,7 +101,7 @@ require ( github.com/xlab/treeprint v1.2.0 // indirect golang.org/x/crypto v0.31.0 // indirect golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect - golang.org/x/net v0.32.0 // indirect + golang.org/x/net v0.33.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/sync v0.10.0 // indirect golang.org/x/sys v0.28.0 // indirect @@ -110,7 +110,7 @@ require ( golang.org/x/tools v0.28.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect - google.golang.org/protobuf v1.36.0 // indirect + google.golang.org/protobuf v1.36.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect diff --git a/go.sum b/go.sum index 8c96643b6a..10ec5d29be 100644 --- a/go.sum +++ b/go.sum @@ -144,8 +144,8 @@ github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/onsi/ginkgo/v2 v2.22.1 h1:QW7tbJAUDyVDVOM5dFa7qaybo+CRfR7bemlQUN6Z8aM= github.com/onsi/ginkgo/v2 v2.22.1/go.mod h1:S6aTpoRsSq2cZOd+pssHAlKW/Q/jZt6cPrPlnj4a1xM= -github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw= -github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= +github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -225,8 +225,8 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI= -golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs= +golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -266,8 +266,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU= google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= -google.golang.org/protobuf v1.36.0 h1:mjIs9gYtt56AzC4ZaffQuh88TZurBGhIJMBZGSxNerQ= -google.golang.org/protobuf v1.36.0/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= +google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= From fda31256fbc92d706d9210e88cbce26fd02ad119 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Tue, 31 Dec 2024 08:47:39 +0100 Subject: [PATCH 270/836] test(e2e): raise `AssertClusterStandbysAreStreaming` timeout (#6455) Closes #6398 Signed-off-by: Armando Ruocco --- tests/e2e/asserts_test.go | 6 +++--- tests/e2e/cluster_microservice_test.go | 2 +- tests/e2e/drain_node_test.go | 6 +++--- tests/e2e/fencing_test.go | 2 +- tests/e2e/pg_data_corruption_test.go | 2 +- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/tests/e2e/asserts_test.go b/tests/e2e/asserts_test.go index ccf73d21a9..1621205e04 100644 --- a/tests/e2e/asserts_test.go +++ b/tests/e2e/asserts_test.go @@ -1575,7 +1575,7 @@ func AssertClusterRestoreWithApplicationDB(namespace, restoreClusterFile, tableN }) // Restored standby should be attached to restored primary - AssertClusterStandbysAreStreaming(namespace, restoredClusterName, 120) + AssertClusterStandbysAreStreaming(namespace, restoredClusterName, 140) // Gather Credentials appUser, appUserPass, err := testsUtils.GetCredentials(restoredClusterName, namespace, @@ -1638,7 +1638,7 @@ func AssertClusterRestore(namespace, restoreClusterFile, tableName string) { Expect(strings.Trim(out, "\n"), err).To(Equal("00000002")) // Restored standby should be attached to restored primary - AssertClusterStandbysAreStreaming(namespace, restoredClusterName, 120) + AssertClusterStandbysAreStreaming(namespace, restoredClusterName, 140) }) } @@ -1655,7 +1655,7 @@ func AssertClusterImport(namespace, clusterWithExternalClusterName, clusterName, AssertClusterIsReady(namespace, clusterWithExternalClusterName, testTimeouts[testsUtils.ClusterIsReadySlow], env) // Restored standby should be attached to restored primary - AssertClusterStandbysAreStreaming(namespace, clusterWithExternalClusterName, 120) + AssertClusterStandbysAreStreaming(namespace, clusterWithExternalClusterName, 140) }) return cluster } diff --git a/tests/e2e/cluster_microservice_test.go b/tests/e2e/cluster_microservice_test.go index d957e1976f..476ea0e4aa 100644 --- a/tests/e2e/cluster_microservice_test.go +++ b/tests/e2e/cluster_microservice_test.go @@ -316,7 +316,7 @@ func assertImportRenamesSelectedDatabase( Expect(err).ToNot(HaveOccurred()) // We give more time than the usual 600s, since the recovery is slower AssertClusterIsReady(namespace, importedClusterName, 1000, env) - AssertClusterStandbysAreStreaming(namespace, importedClusterName, 120) + AssertClusterStandbysAreStreaming(namespace, importedClusterName, 140) }) tableLocator := TableLocator{ diff --git a/tests/e2e/drain_node_test.go b/tests/e2e/drain_node_test.go index b018065c1b..d3ac7f8907 100644 --- a/tests/e2e/drain_node_test.go +++ b/tests/e2e/drain_node_test.go @@ -185,7 +185,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La }) AssertDataExpectedCount(env, tableLocator, 2) - AssertClusterStandbysAreStreaming(namespace, clusterName, 120) + AssertClusterStandbysAreStreaming(namespace, clusterName, 140) }) // Scenario: all the pods of a cluster are on a single node and another schedulable node exists. @@ -304,7 +304,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La }) AssertDataExpectedCount(env, tableLocator, 2) - AssertClusterStandbysAreStreaming(namespace, clusterName, 120) + AssertClusterStandbysAreStreaming(namespace, clusterName, 140) }) }) }) @@ -409,7 +409,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La }) AssertDataExpectedCount(env, tableLocator, 2) - AssertClusterStandbysAreStreaming(namespace, clusterName, 120) + AssertClusterStandbysAreStreaming(namespace, clusterName, 140) err = nodes.UncordonAllNodes(env) Expect(err).ToNot(HaveOccurred()) }) diff --git a/tests/e2e/fencing_test.go b/tests/e2e/fencing_test.go index 4644a4b3ab..a43e7a4191 100644 --- a/tests/e2e/fencing_test.go +++ b/tests/e2e/fencing_test.go @@ -158,7 +158,7 @@ var _ = Describe("Fencing", Label(tests.LabelPlugin), func() { Expect(beforeFencingPodName).Should(BeEquivalentTo(currentPrimaryPodInfo.GetName())) }) By("all followers should be streaming again from the primary instance", func() { - AssertClusterStandbysAreStreaming(namespace, clusterName, 120) + AssertClusterStandbysAreStreaming(namespace, clusterName, 140) }) checkFencingAnnotationSet(fencingMethod, nil) }) diff --git a/tests/e2e/pg_data_corruption_test.go b/tests/e2e/pg_data_corruption_test.go index c8c6fbe321..c0672f4479 100644 --- a/tests/e2e/pg_data_corruption_test.go +++ b/tests/e2e/pg_data_corruption_test.go @@ -194,7 +194,7 @@ var _ = Describe("PGDATA Corruption", Label(tests.LabelRecovery), Ordered, func( }) AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReadyQuick], env) AssertDataExpectedCount(env, tableLocator, 2) - AssertClusterStandbysAreStreaming(namespace, clusterName, 120) + AssertClusterStandbysAreStreaming(namespace, clusterName, 140) } Context("plain cluster", func() { From 6d0f57e72389c78110d8f53eba806679d8dece40 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 1 Jan 2025 18:56:25 +0100 Subject: [PATCH 271/836] fix(deps): update github.com/cloudnative-pg/cnpg-i digest to 7e2cfa5 (main) (#6451) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index c01aed9916..7b94c93e14 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/blang/semver v3.5.1+incompatible github.com/cheynewallace/tabby v1.1.1 github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a - github.com/cloudnative-pg/cnpg-i v0.0.0-20241218212131-cbc4287931ee + github.com/cloudnative-pg/cnpg-i v0.0.0-20241224161104-7e2cfa59debc github.com/cloudnative-pg/machinery v0.0.0-20241219102532-2807bc88310d github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/evanphx/json-patch/v5 v5.9.0 diff --git a/go.sum b/go.sum index 10ec5d29be..eaa9073eca 100644 --- a/go.sum +++ b/go.sum @@ -20,8 +20,8 @@ github.com/cheynewallace/tabby v1.1.1 h1:JvUR8waht4Y0S3JF17G6Vhyt+FRhnqVCkk8l4Yr github.com/cheynewallace/tabby v1.1.1/go.mod h1:Pba/6cUL8uYqvOc9RkyvFbHGrQ9wShyrn6/S/1OYVys= github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a h1:VrEa9P/HfA6csNOh0DRlUyeUoKuByV57tLnf2rTIqfU= github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a/go.mod h1:HPGwXHlatQEnb2HdsbGTZLEo8qlxKLdxTHiTeF9TTqw= -github.com/cloudnative-pg/cnpg-i v0.0.0-20241218212131-cbc4287931ee h1:PJc4BpPu0b684BrwWzy0B5W/CSqrnUV+jv3PTrSUx8g= -github.com/cloudnative-pg/cnpg-i v0.0.0-20241218212131-cbc4287931ee/go.mod h1:ahVFn+JzYkFfv7Iwpswu4lsuC9yK7zZupM1ssaIKPFI= +github.com/cloudnative-pg/cnpg-i v0.0.0-20241224161104-7e2cfa59debc h1:wo0KfZ4NRhA2/COjz8vTd1P+K/tMUMBPLtbfYQx138A= +github.com/cloudnative-pg/cnpg-i v0.0.0-20241224161104-7e2cfa59debc/go.mod h1:wmXfeji9qPPW+F/1OgDHdkI97ISN1I2f8vJKv/7sssY= github.com/cloudnative-pg/machinery v0.0.0-20241219102532-2807bc88310d h1:v9IgiRYa7r+KCUxl5lCyUXdhsefZ90engPSMNLBqYmc= github.com/cloudnative-pg/machinery v0.0.0-20241219102532-2807bc88310d/go.mod h1:uBHGRIk5rt07mO4zjIC1uvGBWTH6PqIiD1PfpvPGZKU= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= From 8620d2d86bd114262b80b62860e45d77ed2f6859 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 1 Jan 2025 20:04:06 +0100 Subject: [PATCH 272/836] fix(deps): update github.com/cloudnative-pg/machinery digest to 66cd032 (main) (#6449) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 7b94c93e14..5efa611133 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/cheynewallace/tabby v1.1.1 github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a github.com/cloudnative-pg/cnpg-i v0.0.0-20241224161104-7e2cfa59debc - github.com/cloudnative-pg/machinery v0.0.0-20241219102532-2807bc88310d + github.com/cloudnative-pg/machinery v0.0.0-20241223154527-66cd032ef607 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/evanphx/json-patch/v5 v5.9.0 github.com/go-logr/logr v1.4.2 diff --git a/go.sum b/go.sum index eaa9073eca..79dc98c475 100644 --- a/go.sum +++ b/go.sum @@ -22,8 +22,8 @@ github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a h1:VrE github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a/go.mod h1:HPGwXHlatQEnb2HdsbGTZLEo8qlxKLdxTHiTeF9TTqw= github.com/cloudnative-pg/cnpg-i v0.0.0-20241224161104-7e2cfa59debc h1:wo0KfZ4NRhA2/COjz8vTd1P+K/tMUMBPLtbfYQx138A= github.com/cloudnative-pg/cnpg-i v0.0.0-20241224161104-7e2cfa59debc/go.mod h1:wmXfeji9qPPW+F/1OgDHdkI97ISN1I2f8vJKv/7sssY= -github.com/cloudnative-pg/machinery v0.0.0-20241219102532-2807bc88310d h1:v9IgiRYa7r+KCUxl5lCyUXdhsefZ90engPSMNLBqYmc= -github.com/cloudnative-pg/machinery v0.0.0-20241219102532-2807bc88310d/go.mod h1:uBHGRIk5rt07mO4zjIC1uvGBWTH6PqIiD1PfpvPGZKU= +github.com/cloudnative-pg/machinery v0.0.0-20241223154527-66cd032ef607 h1:Jymgt/H6iNoUZCqF6YtOqE2GgQIM1e1tWjT42B6vPJs= +github.com/cloudnative-pg/machinery v0.0.0-20241223154527-66cd032ef607/go.mod h1:n6br6GuNXcwYI5SgRArt9rM2hgZ1ElZr4vkJCWfiC/U= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= From 6c8afc4275ee439cefb44217ae0d71b5bdcb715a Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 2 Jan 2025 09:22:48 +0100 Subject: [PATCH 273/836] fix(deps): update module github.com/onsi/ginkgo/v2 to v2.22.2 (main) (#6481) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 5efa611133..8ddc9591f2 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( github.com/lib/pq v1.10.9 github.com/logrusorgru/aurora/v4 v4.0.0 github.com/mitchellh/go-ps v1.0.0 - github.com/onsi/ginkgo/v2 v2.22.1 + github.com/onsi/ginkgo/v2 v2.22.2 github.com/onsi/gomega v1.36.2 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.79.2 github.com/prometheus/client_golang v1.20.5 diff --git a/go.sum b/go.sum index 79dc98c475..22f177486f 100644 --- a/go.sum +++ b/go.sum @@ -142,8 +142,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.22.1 h1:QW7tbJAUDyVDVOM5dFa7qaybo+CRfR7bemlQUN6Z8aM= -github.com/onsi/ginkgo/v2 v2.22.1/go.mod h1:S6aTpoRsSq2cZOd+pssHAlKW/Q/jZt6cPrPlnj4a1xM= +github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU= +github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk= github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= From 759760a7064ff239de7d59c1e7339bf028631216 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Thu, 2 Jan 2025 10:56:41 +0100 Subject: [PATCH 274/836] fix: linter errors for golangci-lint 1.63.1 (#6489) The new version of golangci-lint 1.63.1 detected a new issue that we should fix. Closes #6488 Signed-off-by: Jonathan Gonzalez V. Signed-off-by: Leonardo Cecchi Co-authored-by: Leonardo Cecchi --- internal/cmd/plugin/psql/psql_test.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/internal/cmd/plugin/psql/psql_test.go b/internal/cmd/plugin/psql/psql_test.go index 682705e76b..22058e5d8b 100644 --- a/internal/cmd/plugin/psql/psql_test.go +++ b/internal/cmd/plugin/psql/psql_test.go @@ -69,8 +69,9 @@ var _ = Describe("psql launcher", func() { } _, err := cmd.getPodName() - Expect(err).To(HaveOccurred()) - Expect(err.(*ErrMissingPod)).ToNot(BeNil()) + Expect(err).To(MatchError((&ErrMissingPod{ + role: "primary", + }).Error())) }) It("correctly composes a kubectl exec command line", func() { From b16ceef20b058204bb14f9c5db5ab0cde725c2ba Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Thu, 2 Jan 2025 11:22:44 +0100 Subject: [PATCH 275/836] fix: add missing release-1.25 branch to backport workflow (#6492) Signed-off-by: Jonathan Gonzalez V. --- .github/workflows/backport.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 1a1f7a13c4..37b2e1ec97 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -73,7 +73,7 @@ jobs: strategy: fail-fast: false matrix: - branch: [release-1.22, release-1.24] + branch: [release-1.22, release-1.24, release-1.25] env: PR: ${{ github.event.pull_request.number }} outputs: From ebf36a59e54b5634b56ee6058d1b7551eafde083 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Thu, 2 Jan 2025 13:58:05 +0100 Subject: [PATCH 276/836] test(e2e): split e2e utils into packages (#5907) This patch classifies the E2e utility functions into categories, creates a package per category and moves each function into the package of the corresponding categories. While doing that, it removes spurious dependencies on the `env` package. The following is the list of packages created: * `backups` * `cloudvendors` * `deployments` * `environment` * `envsubst` * `fencing` * `importdb` * `logs` * `nodes` * `openshift` * `operator` * `postgres` * `proxy` * `run` * `secrets` * `services` * `storage` * `timeouts` * `yaml` Closes #6453 Signed-off-by: Jonathan Gonzalez V. Signed-off-by: Marco Nenciarini Signed-off-by: Francesco Canovai Signed-off-by: Armando Ruocco Co-authored-by: Marco Nenciarini Co-authored-by: Francesco Canovai Co-authored-by: Armando Ruocco Co-authored-by: Gabriele Quaresima --- Makefile | 2 +- tests/e2e/affinity_test.go | 6 +- tests/e2e/apparmor_test.go | 5 +- tests/e2e/architecture_test.go | 14 +- tests/e2e/asserts_test.go | 708 +++++++++++------- tests/e2e/backup_restore_azure_test.go | 262 ++++--- tests/e2e/backup_restore_azurite_test.go | 99 ++- tests/e2e/backup_restore_minio_test.go | 207 +++-- tests/e2e/certificates_test.go | 297 +++++--- tests/e2e/cluster_microservice_test.go | 84 ++- tests/e2e/cluster_monolithic_test.go | 45 +- tests/e2e/cluster_setup_test.go | 32 +- tests/e2e/commons_test.go | 18 +- tests/e2e/config_support_test.go | 36 +- tests/e2e/configuration_update_test.go | 167 +++-- tests/e2e/connection_test.go | 12 +- .../declarative_database_management_test.go | 36 +- tests/e2e/declarative_hibernation_test.go | 23 +- tests/e2e/disk_space_test.go | 54 +- tests/e2e/drain_node_test.go | 122 +-- tests/e2e/eviction_test.go | 47 +- tests/e2e/failover_test.go | 87 ++- tests/e2e/fastfailover_test.go | 6 +- tests/e2e/fastswitchover_test.go | 28 +- tests/e2e/fencing_test.go | 87 ++- tests/e2e/hibernation_test.go | 53 +- tests/e2e/initdb_test.go | 30 +- tests/e2e/logs_test.go | 59 +- tests/e2e/managed_roles_test.go | 144 ++-- tests/e2e/managed_services_test.go | 42 +- tests/e2e/metrics_test.go | 74 +- tests/e2e/monitoring_test.go | 29 +- tests/e2e/nodeselector_test.go | 16 +- tests/e2e/openshift_upgrade_test.go | 60 +- tests/e2e/operator_deployment_test.go | 5 +- tests/e2e/operator_ha_test.go | 44 +- tests/e2e/operator_unavailable_test.go | 28 +- tests/e2e/pg_basebackup_test.go | 48 +- tests/e2e/pg_data_corruption_test.go | 30 +- tests/e2e/pg_wal_volume_test.go | 19 +- tests/e2e/pgbouncer_metrics_test.go | 11 +- tests/e2e/pgbouncer_test.go | 24 +- tests/e2e/pgbouncer_types_test.go | 19 +- tests/e2e/probes_test.go | 8 +- tests/e2e/publication_subscription_test.go | 80 +- tests/e2e/pvc_deletion_test.go | 14 +- tests/e2e/replica_mode_cluster_test.go | 191 +++-- tests/e2e/replication_slot_test.go | 53 +- tests/e2e/rolling_update_test.go | 108 +-- tests/e2e/scaling_test.go | 14 +- tests/e2e/storage_expansion_test.go | 19 +- tests/e2e/suite_test.go | 45 +- tests/e2e/switchover_test.go | 9 +- tests/e2e/syncreplicas_test.go | 78 +- tests/e2e/tablespaces_test.go | 422 +++++++---- tests/e2e/tolerations_test.go | 11 +- tests/e2e/update_user_test.go | 54 +- tests/e2e/upgrade_test.go | 145 ++-- tests/e2e/volume_snapshot_test.go | 224 +++--- tests/e2e/wal_restore_parallel_test.go | 59 +- tests/e2e/webhook_test.go | 25 +- tests/levels.go | 6 +- tests/utils/{ => backups}/azurite.go | 90 ++- tests/utils/{ => backups}/backup.go | 311 ++++---- tests/utils/backups/doc.go | 18 + tests/utils/certificates.go | 51 -- .../utils/{ => cloudvendors}/cloud_vendor.go | 3 +- tests/utils/cluster.go | 409 ---------- tests/utils/clusterutils/cluster.go | 227 ++++++ tests/utils/commons.go | 133 ---- tests/utils/{ => deployments}/deployment.go | 21 +- tests/utils/doc.go | 18 + tests/utils/environment/doc.go | 18 + tests/utils/{ => environment}/environment.go | 145 +--- .../environment_test.go} | 2 +- .../{job.go => environment/suite_test.go} | 26 +- tests/utils/envsubst/doc.go | 18 + tests/utils/{ => envsubst}/envsubst.go | 2 +- tests/utils/{ => envsubst}/envsubst_test.go | 2 +- .../{lease.go => envsubst/suite_test.go} | 18 +- tests/utils/exec/exec.go | 156 ++++ tests/utils/{fence.go => fencing/fencing.go} | 44 +- tests/utils/hibernate.go | 103 --- tests/utils/{ => importdb}/import_db.go | 30 +- tests/utils/logs/doc.go | 18 + tests/utils/{ => logs}/logs.go | 15 +- tests/utils/{ => logs}/logs_test.go | 2 +- tests/utils/{ => logs}/suite_test.go | 4 +- tests/utils/minio/minio.go | 31 +- tests/utils/namespace.go | 217 ------ tests/utils/namespaces/namespace.go | 377 ++++++++++ tests/utils/nodes/{drain.go => nodes.go} | 65 +- tests/utils/objects/objects.go | 117 +++ tests/utils/{ => openshift}/openshift.go | 88 ++- tests/utils/operator/doc.go | 18 + tests/utils/{ => operator}/operator.go | 156 ++-- tests/utils/{ => operator}/release.go | 3 +- tests/utils/{ => operator}/release_test.go | 8 +- .../{monitoring.go => operator/suite_test.go} | 24 +- tests/utils/{ => operator}/upgrade.go | 38 +- tests/utils/{ => operator}/webhooks.go | 74 +- tests/utils/pod.go | 273 ------- tests/utils/pods/pod.go | 194 +++++ tests/utils/postgres.go | 57 -- tests/utils/postgres/doc.go | 18 + tests/utils/postgres/postgres.go | 133 ++++ .../postgres_test.go} | 2 +- tests/utils/{ => postgres}/psql_connection.go | 64 +- tests/utils/postgres/suite_test.go | 29 + tests/utils/{ => proxy}/proxy.go | 33 +- .../replication_slots.go | 95 ++- tests/utils/{ => run}/run.go | 31 +- tests/utils/{ => secrets}/secrets.go | 40 +- tests/utils/{ => services}/service.go | 37 +- tests/utils/{ => storage}/storage.go | 47 +- tests/utils/time.go | 43 -- tests/utils/{ => timeouts}/timeouts.go | 3 +- tests/utils/utils.go | 170 +++++ tests/utils/version.go | 51 -- tests/utils/webapp.go | 88 --- tests/utils/{ => yaml}/yaml.go | 39 +- 121 files changed, 5313 insertions(+), 4018 deletions(-) rename tests/utils/{ => backups}/azurite.go (88%) rename tests/utils/{ => backups}/backup.go (67%) create mode 100644 tests/utils/backups/doc.go delete mode 100644 tests/utils/certificates.go rename tests/utils/{ => cloudvendors}/cloud_vendor.go (96%) delete mode 100644 tests/utils/cluster.go create mode 100644 tests/utils/clusterutils/cluster.go delete mode 100644 tests/utils/commons.go rename tests/utils/{ => deployments}/deployment.go (72%) create mode 100644 tests/utils/doc.go create mode 100644 tests/utils/environment/doc.go rename tests/utils/{ => environment}/environment.go (50%) rename tests/utils/{namespace_test.go => environment/environment_test.go} (98%) rename tests/utils/{job.go => environment/suite_test.go} (52%) create mode 100644 tests/utils/envsubst/doc.go rename tests/utils/{ => envsubst}/envsubst.go (99%) rename tests/utils/{ => envsubst}/envsubst_test.go (99%) rename tests/utils/{lease.go => envsubst/suite_test.go} (51%) create mode 100644 tests/utils/exec/exec.go rename tests/utils/{fence.go => fencing/fencing.go} (61%) delete mode 100644 tests/utils/hibernate.go rename tests/utils/{ => importdb}/import_db.go (84%) create mode 100644 tests/utils/logs/doc.go rename tests/utils/{ => logs}/logs.go (93%) rename tests/utils/{ => logs}/logs_test.go (99%) rename tests/utils/{ => logs}/suite_test.go (93%) delete mode 100644 tests/utils/namespace.go create mode 100644 tests/utils/namespaces/namespace.go rename tests/utils/nodes/{drain.go => nodes.go} (54%) create mode 100644 tests/utils/objects/objects.go rename tests/utils/{ => openshift}/openshift.go (70%) create mode 100644 tests/utils/operator/doc.go rename tests/utils/{ => operator}/operator.go (59%) rename tests/utils/{ => operator}/release.go (97%) rename tests/utils/{ => operator}/release_test.go (95%) rename tests/utils/{monitoring.go => operator/suite_test.go} (51%) rename tests/utils/{ => operator}/upgrade.go (70%) rename tests/utils/{ => operator}/webhooks.go (65%) delete mode 100644 tests/utils/pod.go create mode 100644 tests/utils/pods/pod.go delete mode 100644 tests/utils/postgres.go create mode 100644 tests/utils/postgres/doc.go create mode 100644 tests/utils/postgres/postgres.go rename tests/utils/{version_test.go => postgres/postgres_test.go} (98%) rename tests/utils/{ => postgres}/psql_connection.go (79%) create mode 100644 tests/utils/postgres/suite_test.go rename tests/utils/{ => proxy}/proxy.go (65%) rename tests/utils/{ => replicationslot}/replication_slots.go (69%) rename tests/utils/{ => run}/run.go (71%) rename tests/utils/{ => secrets}/secrets.go (77%) rename tests/utils/{ => services}/service.go (72%) rename tests/utils/{ => storage}/storage.go (73%) delete mode 100644 tests/utils/time.go rename tests/utils/{ => timeouts}/timeouts.go (97%) create mode 100644 tests/utils/utils.go delete mode 100644 tests/utils/version.go delete mode 100644 tests/utils/webapp.go rename tests/utils/{ => yaml}/yaml.go (59%) diff --git a/Makefile b/Makefile index a80d95dfb7..0ebe229185 100644 --- a/Makefile +++ b/Makefile @@ -101,7 +101,7 @@ test: generate fmt vet manifests envtest ## Run tests. source <(${ENVTEST} use -p env --bin-dir ${ENVTEST_ASSETS_DIR} ${ENVTEST_K8S_VERSION}) ;\ export KUBEBUILDER_CONTROLPLANE_STOP_TIMEOUT=60s ;\ export KUBEBUILDER_CONTROLPLANE_START_TIMEOUT=60s ;\ - go test -coverpkg=./... -coverprofile=cover.out ./api/... ./cmd/... ./internal/... ./pkg/... ./tests/utils + go test -coverpkg=./... -coverprofile=cover.out ./api/... ./cmd/... ./internal/... ./pkg/... ./tests/utils/... test-race: generate fmt vet manifests envtest ## Run tests enabling race detection. mkdir -p ${ENVTEST_ASSETS_DIR} ;\ diff --git a/tests/e2e/affinity_test.go b/tests/e2e/affinity_test.go index 3a74391ac2..69d2ce92a8 100644 --- a/tests/e2e/affinity_test.go +++ b/tests/e2e/affinity_test.go @@ -20,7 +20,7 @@ import ( "fmt" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -44,13 +44,13 @@ var _ = Describe("E2E Affinity", Serial, Label(tests.LabelPodScheduling), func() }) It("can create a cluster and a pooler with required affinity", func() { - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, clusterFile, env) createAndAssertPgBouncerPoolerIsSetUp(namespace, poolerFile, 3) - _, _, err := utils.Run(fmt.Sprintf("kubectl scale --replicas=3 -n %v cluster/%v", namespace, clusterName)) + _, _, err := run.Run(fmt.Sprintf("kubectl scale --replicas=3 -n %v cluster/%v", namespace, clusterName)) Expect(err).ToNot(HaveOccurred()) AssertClusterIsReady(namespace, clusterName, 300, env) }) diff --git a/tests/e2e/apparmor_test.go b/tests/e2e/apparmor_test.go index 70ecabeeff..c38a37401c 100644 --- a/tests/e2e/apparmor_test.go +++ b/tests/e2e/apparmor_test.go @@ -22,6 +22,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" pkgutils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -48,14 +49,14 @@ var _ = Describe("AppArmor support", Serial, Label(tests.LabelNoOpenshift, tests }) It("sets up a cluster enabling AppArmor annotation feature", func() { - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, clusterAppArmorFile, env) By("verifying AppArmor annotations on cluster and pods", func() { // Gathers the pod list using annotations - podList, _ := env.GetClusterPodList(namespace, clusterName) + podList, _ := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) for _, pod := range podList.Items { annotation := pod.ObjectMeta.Annotations[pkgutils.AppArmorAnnotationPrefix+"/"+specs.PostgresContainerName] Expect(annotation).ShouldNot(BeEmpty(), diff --git a/tests/e2e/architecture_test.go b/tests/e2e/architecture_test.go index 34222b58cb..4aeb3992b3 100644 --- a/tests/e2e/architecture_test.go +++ b/tests/e2e/architecture_test.go @@ -19,7 +19,9 @@ package e2e import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/operator" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -74,21 +76,21 @@ var _ = Describe("Available Architectures", Label(tests.LabelBasic), func() { var err error It("manages each available architecture", func() { - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - clusterName, err := env.GetResourceNameFromYAML(clusterManifest) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterManifest) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, clusterManifest, env) // Fetch the operator's available architectures - operatorPod, err := env.GetOperatorPod() + operatorPod, err := operator.GetPod(env.Ctx, env.Client) Expect(err).ToNot(HaveOccurred()) - imageArchitectures, err := utils.GetOperatorArchitectures(&operatorPod) + imageArchitectures, err := operator.Architectures(&operatorPod) Expect(err).ToNot(HaveOccurred()) // Fetch the Cluster status - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) archStatus := cluster.Status.AvailableArchitectures diff --git a/tests/e2e/asserts_test.go b/tests/e2e/asserts_test.go index 1621205e04..97c5758b96 100644 --- a/tests/e2e/asserts_test.go +++ b/tests/e2e/asserts_test.go @@ -17,6 +17,7 @@ limitations under the License. package e2e import ( + "context" "database/sql" "errors" "fmt" @@ -34,6 +35,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" "k8s.io/client-go/util/retry" "k8s.io/utils/strings/slices" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" @@ -43,17 +45,37 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/backups" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/deployments" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/environment" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/envsubst" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/importdb" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/nodes" + objectsutils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/operator" + podutils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/proxy" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/replicationslot" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/secrets" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/services" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/storage" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) -func AssertSwitchover(namespace string, clusterName string, env *testsUtils.TestingEnvironment) { +func AssertSwitchover(namespace string, clusterName string, env *environment.TestingEnvironment) { AssertSwitchoverWithHistory(namespace, clusterName, false, env) } -func AssertSwitchoverOnReplica(namespace string, clusterName string, env *testsUtils.TestingEnvironment) { +func AssertSwitchoverOnReplica(namespace string, clusterName string, env *environment.TestingEnvironment) { AssertSwitchoverWithHistory(namespace, clusterName, true, env) } @@ -65,7 +87,7 @@ func AssertSwitchoverWithHistory( namespace string, clusterName string, isReplica bool, - env *testsUtils.TestingEnvironment, + env *environment.TestingEnvironment, ) { var pods []string var oldPrimary, targetPrimary string @@ -77,7 +99,7 @@ func AssertSwitchoverWithHistory( Eventually(func(g Gomega) { var err error - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) g.Expect(cluster.Status.CurrentPrimary, err).To( BeEquivalentTo(cluster.Status.TargetPrimary), @@ -87,7 +109,7 @@ func AssertSwitchoverWithHistory( oldPrimary = cluster.Status.CurrentPrimary // Gather pod names - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) oldPodListLength = len(podList.Items) for _, p := range podList.Items { @@ -102,7 +124,7 @@ func AssertSwitchoverWithHistory( By(fmt.Sprintf("setting the TargetPrimary node to trigger a switchover to %s", targetPrimary), func() { err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) cluster.Status.TargetPrimary = targetPrimary return env.Client.Status().Update(env.Ctx, cluster) @@ -112,10 +134,10 @@ func AssertSwitchoverWithHistory( By("waiting that the TargetPrimary become also CurrentPrimary", func() { Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) return cluster.Status.CurrentPrimary, err - }, testTimeouts[testsUtils.NewPrimaryAfterSwitchover]).Should(BeEquivalentTo(targetPrimary)) + }, testTimeouts[timeouts.NewPrimaryAfterSwitchover]).Should(BeEquivalentTo(targetPrimary)) }) By("waiting that the old primary become ready", func() { @@ -147,7 +169,7 @@ func AssertSwitchoverWithHistory( // After we finish the switchover, we should wait for the cluster to be ready // otherwise, anyone executing this may not wait and also, the following part of the function // may fail because the switchover hasn't properly finish yet. - AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReady], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env) if !isReplica { By("confirming that the all postgres containers have *.history file after switchover", func() { @@ -155,7 +177,7 @@ func AssertSwitchoverWithHistory( timeout := 120 // Gather pod names - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(len(podList.Items), err).To(BeEquivalentTo(oldPodListLength)) for _, p := range podList.Items { pods = append(pods, p.Name) @@ -164,8 +186,9 @@ func AssertSwitchoverWithHistory( Eventually(func() error { count := 0 for _, pod := range pods { - out, _, err := env.ExecCommandInInstancePod( - testsUtils.PodLocator{ + out, _, err := exec.CommandInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: pod, }, nil, "sh", "-c", "ls $PGDATA/pg_wal/*.history") @@ -197,7 +220,7 @@ func AssertCreateCluster( namespace string, clusterName string, sampleFile string, - env *testsUtils.TestingEnvironment, + env *environment.TestingEnvironment, ) { By(fmt.Sprintf("having a %v namespace", namespace), func() { // Creating a namespace should be quick @@ -209,19 +232,19 @@ func AssertCreateCluster( namespaceResource := &corev1.Namespace{} err := env.Client.Get(env.Ctx, namespacedName, namespaceResource) return namespaceResource.GetName(), err - }, testTimeouts[testsUtils.NamespaceCreation]).Should(BeEquivalentTo(namespace)) + }, testTimeouts[timeouts.NamespaceCreation]).Should(BeEquivalentTo(namespace)) }) By(fmt.Sprintf("creating a Cluster in the %v namespace", namespace), func() { CreateResourceFromFile(namespace, sampleFile) }) // Setting up a cluster with three pods is slow, usually 200-600s - AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReady], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env) } // AssertClusterIsReady checks the cluster has as many pods as in spec, that // none of them are going to be deleted, and that the status is Healthy -func AssertClusterIsReady(namespace string, clusterName string, timeout int, env *testsUtils.TestingEnvironment) { +func AssertClusterIsReady(namespace string, clusterName string, timeout int, env *environment.TestingEnvironment) { By(fmt.Sprintf("having a Cluster %s with each instance in status ready", clusterName), func() { // Eventually the number of ready instances should be equal to the // amount of instances defined in the cluster and @@ -230,13 +253,13 @@ func AssertClusterIsReady(namespace string, clusterName string, timeout int, env Eventually(func(g Gomega) { var err error - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) }).Should(Succeed()) start := time.Now() Eventually(func() (string, error) { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) if err != nil { return "", err } @@ -246,7 +269,7 @@ func AssertClusterIsReady(namespace string, clusterName string, timeout int, env return fmt.Sprintf("Pod '%s' is waiting for deletion", pod.Name), nil } } - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) return cluster.Status.Phase, err } return fmt.Sprintf("Ready pod is not as expected. Spec Instances: %d, ready pods: %d \n", @@ -254,19 +277,19 @@ func AssertClusterIsReady(namespace string, clusterName string, timeout int, env utils.CountReadyPods(podList.Items)), nil }, timeout, 2).Should(BeEquivalentTo(apiv1.PhaseHealthy), func() string { - cluster := testsUtils.PrintClusterResources(namespace, clusterName, env) - nodes, _ := env.DescribeKubernetesNodes() + cluster := testsUtils.PrintClusterResources(env.Ctx, env.Client, namespace, clusterName) + kubeNodes, _ := nodes.DescribeKubernetesNodes(env.Ctx, env.Client) return fmt.Sprintf("CLUSTER STATE\n%s\n\nK8S NODES\n%s", - cluster, nodes) + cluster, kubeNodes) }, ) if cluster.Spec.Instances != 1 { Eventually(func(g Gomega) { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred(), "cannot get cluster pod list") - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred(), "cannot find cluster primary pod") replicaNamesList := make([]string, 0, len(podList.Items)-1) @@ -276,8 +299,9 @@ func AssertClusterIsReady(namespace string, clusterName string, timeout int, env } } replicaNamesString := strings.Join(replicaNamesList, ",") - out, _, err := env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + out, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: primaryPod.Name, }, @@ -297,7 +321,7 @@ func AssertClusterDefault( namespace string, clusterName string, isExpectedToDefault bool, - env *testsUtils.TestingEnvironment, + env *environment.TestingEnvironment, ) { By("having a Cluster object populated with default values", func() { // Eventually the number of ready instances should be equal to the @@ -306,7 +330,7 @@ func AssertClusterDefault( var cluster *apiv1.Cluster Eventually(func(g Gomega) { var err error - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) g.Expect(err).ToNot(HaveOccurred()) }).Should(Succeed()) @@ -320,20 +344,20 @@ func AssertClusterDefault( }) } -func AssertWebhookEnabled(env *testsUtils.TestingEnvironment, mutating, validating string) { +func AssertWebhookEnabled(env *environment.TestingEnvironment, mutating, validating string) { By("re-setting namespace selector for all admission controllers", func() { // Setting the namespace selector in MutatingWebhook and ValidatingWebhook // to nil will go back to the default behaviour - mWhc, position, err := testsUtils.GetCNPGsMutatingWebhookByName(env, mutating) + mWhc, position, err := operator.GetMutatingWebhookByName(env.Ctx, env.Client, mutating) Expect(err).ToNot(HaveOccurred()) mWhc.Webhooks[position].NamespaceSelector = nil - err = testsUtils.UpdateCNPGsMutatingWebhookConf(env, mWhc) + err = operator.UpdateMutatingWebhookConf(env.Ctx, env.Interface, mWhc) Expect(err).ToNot(HaveOccurred()) - vWhc, position, err := testsUtils.GetCNPGsValidatingWebhookByName(env, validating) + vWhc, position, err := operator.GetValidatingWebhookByName(env.Ctx, env.Client, validating) Expect(err).ToNot(HaveOccurred()) vWhc.Webhooks[position].NamespaceSelector = nil - err = testsUtils.UpdateCNPGsValidatingWebhookConf(env, vWhc) + err = operator.UpdateValidatingWebhookConf(env.Ctx, env.Interface, vWhc) Expect(err).ToNot(HaveOccurred()) }) } @@ -346,7 +370,7 @@ func AssertUpdateSecret( namespace string, clusterName string, timeout int, - env *testsUtils.TestingEnvironment, + env *environment.TestingEnvironment, ) { var secret corev1.Secret @@ -367,7 +391,7 @@ func AssertUpdateSecret( // Wait for the cluster to pick up the updated secrets version first Eventually(func() string { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) if err != nil { GinkgoWriter.Printf("Error reports while retrieving cluster %v\n", err.Error()) return "" @@ -405,19 +429,14 @@ func AssertConnection( dbname string, user string, password string, - env *testsUtils.TestingEnvironment, + env *environment.TestingEnvironment, ) { - By(fmt.Sprintf("checking that %v service exists", service), func() { - Eventually(func(g Gomega) { - _, err := testsUtils.GetService(namespace, service, env) - g.Expect(err).ToNot(HaveOccurred()) - }, RetryTimeout).Should(Succeed()) - }) - By(fmt.Sprintf("connecting to the %v service as %v", service, user), func() { Eventually(func(g Gomega) { - forwardConn, conn, err := testsUtils.ForwardPSQLServiceConnection(env, namespace, service, - dbname, user, password) + forwardConn, conn, err := postgres.ForwardPSQLServiceConnection( + env.Ctx, env.Interface, env.RestClientConfig, + namespace, service, dbname, user, password, + ) defer func() { _ = conn.Close() forwardConn.Close() @@ -434,16 +453,20 @@ func AssertConnection( } // AssertOperatorIsReady verifies that the operator is ready -func AssertOperatorIsReady() { +func AssertOperatorIsReady( + ctx context.Context, + crudClient ctrlclient.Client, + kubeInterface kubernetes.Interface, +) { Eventually(func() (bool, error) { - ready, err := env.IsOperatorReady() + ready, err := operator.IsReady(ctx, crudClient, kubeInterface) if ready && err == nil { return true, nil } // Waiting a bit to avoid overloading the API server time.Sleep(1 * time.Second) return ready, err - }, testTimeouts[testsUtils.OperatorIsReady]).Should(BeTrue(), "Operator pod is not ready") + }, testTimeouts[timeouts.OperatorIsReady]).Should(BeTrue(), "Operator pod is not ready") } type TableLocator struct { @@ -455,18 +478,21 @@ type TableLocator struct { } // AssertCreateTestData create test data on a given TableLocator -func AssertCreateTestData(env *testsUtils.TestingEnvironment, tl TableLocator) { +func AssertCreateTestData(env *environment.TestingEnvironment, tl TableLocator) { if tl.DatabaseName == "" { - tl.DatabaseName = testsUtils.AppDBName + tl.DatabaseName = postgres.AppDBName } if tl.Tablespace == "" { - tl.Tablespace = testsUtils.TablespaceDefaultName + tl.Tablespace = postgres.TablespaceDefaultName } By(fmt.Sprintf("creating test data in table %v (cluster %v, database %v, tablespace %v)", tl.TableName, tl.ClusterName, tl.DatabaseName, tl.Tablespace), func() { - forward, conn, err := testsUtils.ForwardPSQLConnection( - env, + forward, conn, err := postgres.ForwardPSQLConnection( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, tl.Namespace, tl.ClusterName, tl.DatabaseName, @@ -492,7 +518,9 @@ func AssertCreateTestDataLargeObject(namespace, clusterName string, oid int, dat query := fmt.Sprintf("CREATE TABLE IF NOT EXISTS image (name text,raster oid); "+ "INSERT INTO image (name, raster) VALUES ('beautiful image', lo_from_bytea(%d, '%s'));", oid, data) - _, err := testsUtils.RunExecOverForward(env, namespace, clusterName, testsUtils.AppDBName, + _, err := postgres.RunExecOverForward( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + namespace, clusterName, postgres.AppDBName, apiv1.ApplicationUserSecretSuffix, query) Expect(err).ToNot(HaveOccurred()) }) @@ -506,14 +534,15 @@ func insertRecordIntoTable(tableName string, value int, conn *sql.DB) { func QueryMatchExpectationPredicate( pod *corev1.Pod, - dbname testsUtils.DatabaseName, + dbname exec.DatabaseName, query string, expectedOutput string, ) func(g Gomega) { return func(g Gomega) { // executor - stdout, stderr, err := env.ExecQueryInInstancePod( - testsUtils.PodLocator{Namespace: pod.Namespace, PodName: pod.Name}, + stdout, stderr, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{Namespace: pod.Namespace, PodName: pod.Name}, dbname, query, ) @@ -536,14 +565,17 @@ func databaseExistsQuery(dbName string) string { // AssertDataExpectedCount verifies that an expected amount of rows exists on the table func AssertDataExpectedCount( - env *testsUtils.TestingEnvironment, + env *environment.TestingEnvironment, tl TableLocator, expectedValue int, ) { By(fmt.Sprintf("verifying test data in table %v (cluster %v, database %v, tablespace %v)", tl.TableName, tl.ClusterName, tl.DatabaseName, tl.Tablespace), func() { - row, err := testsUtils.RunQueryRowOverForward( - env, + row, err := postgres.RunQueryRowOverForward( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, tl.Namespace, tl.ClusterName, tl.DatabaseName, @@ -565,22 +597,23 @@ func AssertLargeObjectValue(namespace, clusterName string, oid int, data string) query := fmt.Sprintf("SELECT encode(lo_get(%v), 'escape');", oid) Eventually(func() (string, error) { // We keep getting the pod, since there could be a new pod with the same name - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) if err != nil { return "", err } - stdout, _, err := env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: primaryPod.Namespace, PodName: primaryPod.Name, }, - testsUtils.AppDBName, + postgres.AppDBName, query) if err != nil { return "", err } return strings.Trim(stdout, "\n"), nil - }, testTimeouts[testsUtils.LargeObject]).Should(BeEquivalentTo(data)) + }, testTimeouts[timeouts.LargeObject]).Should(BeEquivalentTo(data)) }) } @@ -588,18 +621,19 @@ func AssertLargeObjectValue(namespace, clusterName string, oid int, data string) func AssertClusterStandbysAreStreaming(namespace string, clusterName string, timeout int32) { query := "SELECT count(*) FROM pg_stat_wal_receiver" Eventually(func() error { - standbyPods, err := env.GetClusterReplicas(namespace, clusterName) + standbyPods, err := clusterutils.GetReplicas(env.Ctx, env.Client, namespace, clusterName) if err != nil { return err } for _, pod := range standbyPods.Items { - out, _, err := env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + out, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: pod.Namespace, PodName: pod.Name, }, - testsUtils.PostgresDBName, + postgres.PostgresDBName, query) if err != nil { return err @@ -643,12 +677,13 @@ func AssertStandbysFollowPromotion(namespace string, clusterName string, timeout if err := env.Client.Get(env.Ctx, podNamespacedName, pod); err != nil { return "", err } - out, _, err := env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + out, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: pod.Namespace, PodName: pod.Name, }, - testsUtils.AppDBName, + postgres.AppDBName, query) return strings.TrimSpace(out), err }, timeout).Should(BeEquivalentTo("t"), @@ -657,7 +692,7 @@ func AssertStandbysFollowPromotion(namespace string, clusterName string, timeout }) By("having all the instances ready", func() { - AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReady], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env) }) By(fmt.Sprintf("restoring full cluster functionality within %v seconds", timeout), func() { @@ -697,11 +732,12 @@ func AssertWritesResumedBeforeTimeout(namespace string, clusterName string, time pod := &corev1.Pod{} err := env.Client.Get(env.Ctx, namespacedName, pod) Expect(err).ToNot(HaveOccurred()) - out, _, err := env.EventuallyExecQueryInInstancePod( - testsUtils.PodLocator{ + out, _, err := exec.EventuallyExecQueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: pod.Namespace, PodName: pod.Name, - }, testsUtils.AppDBName, + }, postgres.AppDBName, query, RetryTimeout, PollingTime, @@ -727,7 +763,7 @@ func AssertNewPrimary(namespace string, clusterName string, oldPrimary string) { var cluster *apiv1.Cluster Eventually(func() (string, error) { var err error - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) return cluster.Status.TargetPrimary, err }, timeout).ShouldNot(Or(BeEquivalentTo(oldPrimary), BeEquivalentTo(apiv1.PendingFailoverMarker))) @@ -755,11 +791,12 @@ func AssertNewPrimary(namespace string, clusterName string, oldPrimary string) { Expect(err).ToNot(HaveOccurred()) // Expect write operation to succeed query := "CREATE TABLE IF NOT EXISTS assert_new_primary(var1 text);" - _, _, err = env.EventuallyExecQueryInInstancePod( - testsUtils.PodLocator{ + _, _, err = exec.EventuallyExecQueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: pod.Namespace, PodName: pod.Name, - }, testsUtils.AppDBName, + }, postgres.AppDBName, query, RetryTimeout, PollingTime, @@ -772,7 +809,7 @@ func AssertNewPrimary(namespace string, clusterName string, oldPrimary string) { func CheckPointAndSwitchWalOnPrimary(namespace, clusterName string) string { var latestWAL string By("trigger checkpoint and switch wal on primary", func() { - pod, err := env.GetClusterPrimary(namespace, clusterName) + pod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) primary := pod.GetName() latestWAL = switchWalAndGetLatestArchive(namespace, primary) @@ -785,7 +822,7 @@ func AssertArchiveWalOnMinio(namespace, clusterName string, serverName string) { var latestWALPath string // Create a WAL on the primary and check if it arrives at minio, within a short time By("archiving WALs and verifying they exist", func() { - pod, err := env.GetClusterPrimary(namespace, clusterName) + pod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) primary := pod.GetName() latestWAL := switchWalAndGetLatestArchive(namespace, primary) @@ -796,13 +833,13 @@ func AssertArchiveWalOnMinio(namespace, clusterName string, serverName string) { Eventually(func() (int, error) { // WALs are compressed with gzip in the fixture return minio.CountFiles(minioEnv, latestWALPath) - }, testTimeouts[testsUtils.WalsInMinio]).Should(BeEquivalentTo(1)) + }, testTimeouts[timeouts.WalsInMinio]).Should(BeEquivalentTo(1)) }) } func AssertScheduledBackupsAreScheduled(namespace string, backupYAMLPath string, timeout int) { CreateResourceFromFile(namespace, backupYAMLPath) - scheduledBackupName, err := env.GetResourceNameFromYAML(backupYAMLPath) + scheduledBackupName, err := yaml.GetResourceNameFromYAML(env.Scheme, backupYAMLPath) Expect(err).NotTo(HaveOccurred()) // We expect the scheduled backup to be scheduled before a @@ -872,15 +909,16 @@ func getScheduledBackupCompleteBackupsCount(namespace string, scheduledBackupNam func AssertPgRecoveryMode(pod *corev1.Pod, expectedValue bool) { By(fmt.Sprintf("verifying that postgres recovery mode is %v", expectedValue), func() { Eventually(func() (string, error) { - stdOut, stdErr, err := env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + stdOut, stdErr, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: pod.Namespace, PodName: pod.Name, }, - testsUtils.PostgresDBName, + postgres.PostgresDBName, "select pg_is_in_recovery();") if err != nil { - GinkgoWriter.Printf("stdout: %v\ntderr: %v\n", stdOut, stdErr) + GinkgoWriter.Printf("stdout: %v\nstderr: %v\n", stdOut, stdErr) } return strings.Trim(stdOut, "\n"), err }, 300, 10).Should(BeEquivalentTo(boolPGOutput(expectedValue))) @@ -917,12 +955,13 @@ func AssertReplicaModeCluster( AssertCreateTestData(env, tableLocator) By("creating replica cluster", func() { - replicaClusterName, err := env.GetResourceNameFromYAML(replicaClusterSample) + replicaClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, replicaClusterSample) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, replicaClusterName, replicaClusterSample, env) // Get primary from replica cluster Eventually(func() error { - primaryReplicaCluster, err = env.GetClusterPrimary(namespace, replicaClusterName) + primaryReplicaCluster, err = clusterutils.GetPrimary(env.Ctx, env.Client, namespace, + replicaClusterName) return err }, 30, 3).Should(Succeed()) AssertPgRecoveryMode(primaryReplicaCluster, true) @@ -930,20 +969,24 @@ func AssertReplicaModeCluster( By("checking data have been copied correctly in replica cluster", func() { Eventually(func() (string, error) { - stdOut, _, err := env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + stdOut, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: primaryReplicaCluster.Namespace, PodName: primaryReplicaCluster.Name, }, - testsUtils.DatabaseName(srcClusterDBName), + exec.DatabaseName(srcClusterDBName), checkQuery) return strings.Trim(stdOut, "\n"), err }, 180, 10).Should(BeEquivalentTo("2")) }) By("writing some new data to the source cluster", func() { - forwardSource, connSource, err := testsUtils.ForwardPSQLConnection( - env, + forwardSource, connSource, err := postgres.ForwardPSQLConnection( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, namespace, srcClusterName, srcClusterDBName, @@ -959,12 +1002,13 @@ func AssertReplicaModeCluster( By("checking new data have been copied correctly in replica cluster", func() { Eventually(func() (string, error) { - stdOut, _, err := env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + stdOut, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: primaryReplicaCluster.Namespace, PodName: primaryReplicaCluster.Name, }, - testsUtils.DatabaseName(srcClusterDBName), + exec.DatabaseName(srcClusterDBName), checkQuery) return strings.Trim(stdOut, "\n"), err }, 180, 15).Should(BeEquivalentTo("3")) @@ -974,9 +1018,9 @@ func AssertReplicaModeCluster( // verify the replica database created followed the source database, rather than // default to the "app" db and user By("checking that in replica cluster there is no database app and user app", func() { - Eventually(QueryMatchExpectationPredicate(primaryReplicaCluster, testsUtils.PostgresDBName, + Eventually(QueryMatchExpectationPredicate(primaryReplicaCluster, postgres.PostgresDBName, databaseExistsQuery("app"), "f"), 30).Should(Succeed()) - Eventually(QueryMatchExpectationPredicate(primaryReplicaCluster, testsUtils.PostgresDBName, + Eventually(QueryMatchExpectationPredicate(primaryReplicaCluster, postgres.PostgresDBName, roleExistsQuery("app"), "f"), 30).Should(Succeed()) }) } @@ -1001,7 +1045,9 @@ func AssertDetachReplicaModeCluster( var referenceTime time.Time By("taking the reference time before the detaching", func() { Eventually(func(g Gomega) { - referenceCondition, err := testsUtils.GetConditionsInClusterStatus(namespace, replicaClusterName, env, + referenceCondition, err := backups.GetConditionsInClusterStatus( + env.Ctx, env.Client, + namespace, replicaClusterName, apiv1.ConditionClusterReady) g.Expect(err).ToNot(HaveOccurred()) g.Expect(referenceCondition.Status).To(BeEquivalentTo(corev1.ConditionTrue)) @@ -1012,7 +1058,7 @@ func AssertDetachReplicaModeCluster( By("disabling the replica mode", func() { Eventually(func(g Gomega) { - _, _, err := testsUtils.RunUnchecked(fmt.Sprintf( + _, _, err := run.Unchecked(fmt.Sprintf( "kubectl patch cluster %v -n %v -p '{\"spec\":{\"replica\":{\"enabled\":false}}}'"+ " --type='merge'", replicaClusterName, namespace)) @@ -1022,16 +1068,18 @@ func AssertDetachReplicaModeCluster( By("ensuring the replica cluster got promoted and restarted", func() { Eventually(func(g Gomega) { - cluster, err := env.GetCluster(namespace, replicaClusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, replicaClusterName) g.Expect(err).ToNot(HaveOccurred()) - condition, err := testsUtils.GetConditionsInClusterStatus(namespace, cluster.Name, env, + condition, err := backups.GetConditionsInClusterStatus( + env.Ctx, env.Client, + namespace, cluster.Name, apiv1.ConditionClusterReady) g.Expect(err).ToNot(HaveOccurred()) g.Expect(condition).ToNot(BeNil()) g.Expect(condition.Status).To(BeEquivalentTo(corev1.ConditionTrue)) g.Expect(condition.LastTransitionTime.Time).To(BeTemporally(">", referenceTime)) }).WithTimeout(60 * time.Second).Should(Succeed()) - AssertClusterIsReady(namespace, replicaClusterName, testTimeouts[testsUtils.ClusterIsReady], env) + AssertClusterIsReady(namespace, replicaClusterName, testTimeouts[timeouts.ClusterIsReady], env) }) By("verifying write operation on the replica cluster primary pod", func() { @@ -1041,13 +1089,15 @@ func AssertDetachReplicaModeCluster( var err error // Get primary from replica cluster - primaryReplicaCluster, err = env.GetClusterPrimary(namespace, replicaClusterName) + primaryReplicaCluster, err = clusterutils.GetPrimary(env.Ctx, env.Client, namespace, + replicaClusterName) g.Expect(err).ToNot(HaveOccurred()) - _, _, err = env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + _, _, err = exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: primaryReplicaCluster.Namespace, PodName: primaryReplicaCluster.Name, - }, testsUtils.DatabaseName(srcDatabaseName), + }, exec.DatabaseName(srcDatabaseName), query, ) g.Expect(err).ToNot(HaveOccurred()) @@ -1057,9 +1107,9 @@ func AssertDetachReplicaModeCluster( By("verifying the replica database doesn't exist in the replica cluster", func() { // Application database configuration is skipped for replica clusters, // so we expect these to not be present - Eventually(QueryMatchExpectationPredicate(primaryReplicaCluster, testsUtils.PostgresDBName, + Eventually(QueryMatchExpectationPredicate(primaryReplicaCluster, postgres.PostgresDBName, databaseExistsQuery(replicaDatabaseName), "f"), 30).Should(Succeed()) - Eventually(QueryMatchExpectationPredicate(primaryReplicaCluster, testsUtils.PostgresDBName, + Eventually(QueryMatchExpectationPredicate(primaryReplicaCluster, postgres.PostgresDBName, roleExistsQuery(replicaUserName), "f"), 30).Should(Succeed()) }) @@ -1074,11 +1124,12 @@ func AssertDetachReplicaModeCluster( }) By("verifying that replica cluster was not modified", func() { - outTables, stdErr, err := env.EventuallyExecQueryInInstancePod( - testsUtils.PodLocator{ + outTables, stdErr, err := exec.EventuallyExecQueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: primaryReplicaCluster.Namespace, PodName: primaryReplicaCluster.Name, - }, testsUtils.DatabaseName(srcDatabaseName), + }, exec.DatabaseName(srcDatabaseName), "\\dt", RetryTimeout, PollingTime, @@ -1091,10 +1142,14 @@ func AssertDetachReplicaModeCluster( }) } -func AssertWritesToReplicaFails(namespace, service, appDBName, appDBUser, appDBPass string) { +func AssertWritesToReplicaFails( + namespace, service, appDBName, appDBUser, appDBPass string, +) { By(fmt.Sprintf("Verifying %v service doesn't allow writes", service), func() { - forwardConn, conn, err := testsUtils.ForwardPSQLServiceConnection(env, namespace, service, - appDBName, appDBUser, appDBPass) + forwardConn, conn, err := postgres.ForwardPSQLServiceConnection( + env.Ctx, env.Interface, env.RestClientConfig, + namespace, service, appDBName, appDBUser, appDBPass, + ) defer func() { _ = conn.Close() forwardConn.Close() @@ -1118,8 +1173,10 @@ func AssertWritesToReplicaFails(namespace, service, appDBName, appDBUser, appDBP func AssertWritesToPrimarySucceeds(namespace, service, appDBName, appDBUser, appDBPass string) { By(fmt.Sprintf("Verifying %v service correctly manages writes", service), func() { - forwardConn, conn, err := testsUtils.ForwardPSQLServiceConnection(env, namespace, service, - appDBName, appDBUser, appDBPass) + forwardConn, conn, err := postgres.ForwardPSQLServiceConnection( + env.Ctx, env.Interface, env.RestClientConfig, + namespace, service, appDBName, appDBUser, appDBPass, + ) defer func() { _ = conn.Close() forwardConn.Close() @@ -1170,7 +1227,7 @@ func AssertFastFailOver( }) By("having a Cluster with three instances ready", func() { - AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReady], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env) }) // Node 1 should be the primary, so the -rw service should @@ -1208,7 +1265,9 @@ func AssertFastFailOver( ", PRIMARY KEY (id)" + ")" - _, err = testsUtils.RunExecOverForward(env, namespace, clusterName, testsUtils.AppDBName, + _, err = postgres.RunExecOverForward( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + namespace, clusterName, postgres.AppDBName, apiv1.ApplicationUserSecretSuffix, query) Expect(err).ToNot(HaveOccurred()) }) @@ -1219,11 +1278,11 @@ func AssertFastFailOver( // on the postgres primary. We make sure that the first // records appear on the database before moving to the next // step. - _, _, err = testsUtils.Run("kubectl create -n " + namespace + + _, _, err = run.Run("kubectl create -n " + namespace + " -f " + webTestFile) Expect(err).ToNot(HaveOccurred()) - _, _, err = testsUtils.Run("kubectl create -n " + namespace + + _, _, err = run.Run("kubectl create -n " + namespace + " -f " + webTestJob) Expect(err).ToNot(HaveOccurred()) @@ -1239,12 +1298,13 @@ func AssertFastFailOver( if err = env.Client.Get(env.Ctx, primaryPodNamespacedName, primaryPod); err != nil { return "", err } - out, _, err := env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + out, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: primaryPod.Namespace, PodName: primaryPod.Name, }, - testsUtils.AppDBName, + postgres.AppDBName, query) return strings.TrimSpace(out), err }, RetryTimeout).Should(BeEquivalentTo("t")) @@ -1256,7 +1316,7 @@ func AssertFastFailOver( GracePeriodSeconds: &quickDeletionPeriod, } lm := clusterName + "-1" - err = env.DeletePod(namespace, lm, quickDelete) + err = podutils.Delete(env.Ctx, env.Client, namespace, lm, quickDelete) Expect(err).ToNot(HaveOccurred()) }) @@ -1268,7 +1328,7 @@ func AssertFastFailOver( func AssertCustomMetricsResourcesExist(namespace, sampleFile string, configMapsCount, secretsCount int) { By("verifying the custom metrics ConfigMaps and Secrets exist", func() { // Create the ConfigMaps and a Secret - _, _, err := testsUtils.Run("kubectl apply -n " + namespace + " -f " + sampleFile) + _, _, err := run.Run("kubectl apply -n " + namespace + " -f " + sampleFile) Expect(err).ToNot(HaveOccurred()) // Check configmaps exist @@ -1295,7 +1355,7 @@ func AssertCustomMetricsResourcesExist(namespace, sampleFile string, configMapsC } func AssertCreationOfTestDataForTargetDB( - env *testsUtils.TestingEnvironment, + env *environment.TestingEnvironment, namespace, clusterName, targetDBName, @@ -1303,26 +1363,33 @@ func AssertCreationOfTestDataForTargetDB( ) { By(fmt.Sprintf("creating target database '%v' and table '%v'", targetDBName, tableName), func() { // We need to gather the cluster primary to create the database via superuser - currentPrimary, err := env.GetClusterPrimary(namespace, clusterName) + currentPrimary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - appUser, _, err := testsUtils.GetCredentials(clusterName, namespace, apiv1.ApplicationUserSecretSuffix, env) + appUser, _, err := secrets.GetCredentials( + env.Ctx, env.Client, + clusterName, namespace, apiv1.ApplicationUserSecretSuffix, + ) Expect(err).ToNot(HaveOccurred()) // Create database createDBQuery := fmt.Sprintf("CREATE DATABASE %v OWNER %v", targetDBName, appUser) - _, _, err = env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + _, _, err = exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: currentPrimary.Namespace, PodName: currentPrimary.Name, }, - testsUtils.PostgresDBName, + postgres.PostgresDBName, createDBQuery) Expect(err).ToNot(HaveOccurred()) // Open a connection to the newly created database - forward, conn, err := testsUtils.ForwardPSQLConnection( - env, + forward, conn, err := postgres.ForwardPSQLConnection( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, namespace, clusterName, targetDBName, @@ -1370,7 +1437,7 @@ func AssertApplicationDatabaseConnection( Expect(err).ToNot(HaveOccurred()) appPassword = string(appSecret.Data["password"]) } - rwService := testsUtils.GetReadWriteServiceName(clusterName) + rwService := services.GetReadWriteServiceName(clusterName) AssertConnection(namespace, rwService, appDB, appUser, appPassword, env) }) @@ -1378,11 +1445,11 @@ func AssertApplicationDatabaseConnection( func AssertMetricsData(namespace, targetOne, targetTwo, targetSecret string, cluster *apiv1.Cluster) { By("collect and verify metric being exposed with target databases", func() { - podList, err := env.GetClusterPodList(namespace, cluster.Name) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, cluster.Name) Expect(err).ToNot(HaveOccurred()) for _, pod := range podList.Items { podName := pod.GetName() - out, err := testsUtils.RetrieveMetricsFromInstance(env, pod, cluster.IsMetricsTLSEnabled()) + out, err := proxy.RetrieveMetricsFromInstance(env.Ctx, env.Interface, pod, cluster.IsMetricsTLSEnabled()) Expect(err).ToNot(HaveOccurred()) Expect(strings.Contains(out, fmt.Sprintf(`cnpg_some_query_rows{datname="%v"} 0`, targetOne))).Should(BeTrue(), @@ -1407,7 +1474,10 @@ func AssertMetricsData(namespace, targetOne, targetTwo, targetSecret string, clu func CreateAndAssertServerCertificatesSecrets( namespace, clusterName, caSecName, tlsSecName string, includeCAPrivateKey bool, ) { - cluster, caPair, err := testsUtils.CreateSecretCA(namespace, clusterName, caSecName, includeCAPrivateKey, env) + cluster, caPair, err := secrets.CreateSecretCA( + env.Ctx, env.Client, + namespace, clusterName, caSecName, includeCAPrivateKey, + ) Expect(err).ToNot(HaveOccurred()) serverPair, err := caPair.CreateAndSignPair(cluster.GetServiceReadWriteName(), certs.CertTypeServer, @@ -1422,7 +1492,9 @@ func CreateAndAssertServerCertificatesSecrets( func CreateAndAssertClientCertificatesSecrets( namespace, clusterName, caSecName, tlsSecName, userSecName string, includeCAPrivateKey bool, ) { - _, caPair, err := testsUtils.CreateSecretCA(namespace, clusterName, caSecName, includeCAPrivateKey, env) + _, caPair, err := secrets.CreateSecretCA( + env.Ctx, env.Client, + namespace, clusterName, caSecName, includeCAPrivateKey) Expect(err).ToNot(HaveOccurred()) // Sign tls certificates for streaming_replica user @@ -1452,7 +1524,9 @@ func AssertSSLVerifyFullDBConnectionFromAppPod(namespace string, clusterName str "sslrootcert=/etc/secrets/ca/ca.crt "+ "dbname=app user=app sslmode=verify-full", clusterName, namespace) timeout := time.Second * 10 - stdout, stderr, err := env.ExecCommand(env.Ctx, appPod, appPod.Spec.Containers[0].Name, &timeout, + stdout, stderr, err := exec.Command( + env.Ctx, env.Interface, env.RestClientConfig, + appPod, appPod.Spec.Containers[0].Name, &timeout, "psql", dsn, "-tAc", "SELECT 1") return stdout, stderr, err }, 360).Should(BeEquivalentTo("1\n")) @@ -1461,34 +1535,38 @@ func AssertSSLVerifyFullDBConnectionFromAppPod(namespace string, clusterName str func AssertClusterAsyncReplica(namespace, sourceClusterFile, restoreClusterFile, tableName string) { By("Async Replication into external cluster", func() { - restoredClusterName, err := env.GetResourceNameFromYAML(restoreClusterFile) + restoredClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, restoreClusterFile) Expect(err).ToNot(HaveOccurred()) // Add additional data to the source cluster - sourceClusterName, err := env.GetResourceNameFromYAML(sourceClusterFile) + sourceClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sourceClusterFile) Expect(err).ToNot(HaveOccurred()) CreateResourceFromFile(namespace, restoreClusterFile) // We give more time than the usual 600s, since the recovery is slower - AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[testsUtils.ClusterIsReadySlow], env) + AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[timeouts.ClusterIsReadySlow], env) // Test data should be present on restored primary - restoredPrimary, err := env.GetClusterPrimary(namespace, restoredClusterName) + restoredPrimary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, restoredClusterName) Expect(err).ToNot(HaveOccurred()) // We need the credentials from the source cluster because the replica cluster // doesn't create the credentials on its own namespace - appUser, appUserPass, err := testsUtils.GetCredentials( + appUser, appUserPass, err := secrets.GetCredentials( + env.Ctx, + env.Client, sourceClusterName, namespace, apiv1.ApplicationUserSecretSuffix, - env, ) Expect(err).ToNot(HaveOccurred()) - forwardRestored, connRestored, err := testsUtils.ForwardPSQLConnectionWithCreds( - env, + forwardRestored, connRestored, err := postgres.ForwardPSQLConnectionWithCreds( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, namespace, restoredClusterName, - testsUtils.AppDBName, + postgres.AppDBName, appUser, appUserPass, ) @@ -1504,11 +1582,14 @@ func AssertClusterAsyncReplica(namespace, sourceClusterFile, restoreClusterFile, Expect(err).ToNot(HaveOccurred()) Expect(countString).To(BeEquivalentTo("2")) - forwardSource, connSource, err := testsUtils.ForwardPSQLConnection( - env, + forwardSource, connSource, err := postgres.ForwardPSQLConnection( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, namespace, sourceClusterName, - testsUtils.AppDBName, + postgres.AppDBName, apiv1.ApplicationUserSecretSuffix, ) defer func() { @@ -1523,46 +1604,52 @@ func AssertClusterAsyncReplica(namespace, sourceClusterFile, restoreClusterFile, tableLocator := TableLocator{ Namespace: namespace, ClusterName: sourceClusterName, - DatabaseName: testsUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: tableName, } AssertDataExpectedCount(env, tableLocator, 3) - cluster, err := env.GetCluster(namespace, restoredClusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, restoredClusterName) Expect(err).ToNot(HaveOccurred()) expectedReplicas := cluster.Spec.Instances - 1 // Cascading replicas should be attached to primary replica - connectedReplicas, err := testsUtils.CountReplicas(env, restoredPrimary) + connectedReplicas, err := postgres.CountReplicas( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + restoredPrimary, RetryTimeout, + ) Expect(connectedReplicas, err).To(BeEquivalentTo(expectedReplicas)) }) } func AssertClusterRestoreWithApplicationDB(namespace, restoreClusterFile, tableName string) { - restoredClusterName, err := env.GetResourceNameFromYAML(restoreClusterFile) + restoredClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, restoreClusterFile) Expect(err).ToNot(HaveOccurred()) By("Restoring a backup in a new cluster", func() { CreateResourceFromFile(namespace, restoreClusterFile) // We give more time than the usual 600s, since the recovery is slower - AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[testsUtils.ClusterIsReadySlow], env) + AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[timeouts.ClusterIsReadySlow], env) // Test data should be present on restored primary tableLocator := TableLocator{ Namespace: namespace, ClusterName: restoredClusterName, - DatabaseName: testsUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: tableName, } AssertDataExpectedCount(env, tableLocator, 2) }) By("Ensuring the restored cluster is on timeline 2", func() { - row, err := testsUtils.RunQueryRowOverForward( - env, + row, err := postgres.RunQueryRowOverForward( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, namespace, restoredClusterName, - testsUtils.AppDBName, + postgres.AppDBName, apiv1.ApplicationUserSecretSuffix, "SELECT substring(pg_walfile_name(pg_current_wal_lsn()), 1, 8)", ) @@ -1578,8 +1665,10 @@ func AssertClusterRestoreWithApplicationDB(namespace, restoreClusterFile, tableN AssertClusterStandbysAreStreaming(namespace, restoredClusterName, 140) // Gather Credentials - appUser, appUserPass, err := testsUtils.GetCredentials(restoredClusterName, namespace, - apiv1.ApplicationUserSecretSuffix, env) + appUser, appUserPass, err := secrets.GetCredentials( + env.Ctx, env.Client, + restoredClusterName, namespace, + apiv1.ApplicationUserSecretSuffix) Expect(err).ToNot(HaveOccurred()) secretName := restoredClusterName + apiv1.ApplicationUserSecretSuffix @@ -1588,9 +1677,10 @@ func AssertClusterRestoreWithApplicationDB(namespace, restoreClusterFile, tableN namespace, restoredClusterName, appUser, - testsUtils.AppDBName, + postgres.AppDBName, appUserPass, - secretName) + secretName, + ) }) By("update user application password for restored cluster and verify connectivity", func() { @@ -1601,39 +1691,41 @@ func AssertClusterRestoreWithApplicationDB(namespace, restoreClusterFile, tableN namespace, restoredClusterName, appUser, - testsUtils.AppDBName, + postgres.AppDBName, newPassword, - secretName) + secretName, + ) }) } func AssertClusterRestore(namespace, restoreClusterFile, tableName string) { - restoredClusterName, err := env.GetResourceNameFromYAML(restoreClusterFile) + restoredClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, restoreClusterFile) Expect(err).ToNot(HaveOccurred()) By("Restoring a backup in a new cluster", func() { CreateResourceFromFile(namespace, restoreClusterFile) // We give more time than the usual 600s, since the recovery is slower - AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[testsUtils.ClusterIsReadySlow], env) + AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[timeouts.ClusterIsReadySlow], env) // Test data should be present on restored primary primary := restoredClusterName + "-1" tableLocator := TableLocator{ Namespace: namespace, ClusterName: restoredClusterName, - DatabaseName: testsUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: tableName, } AssertDataExpectedCount(env, tableLocator, 2) // Restored primary should be on timeline 2 - out, _, err := env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + out, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: primary, }, - testsUtils.AppDBName, + postgres.AppDBName, "select substring(pg_walfile_name(pg_current_wal_lsn()), 1, 8)") Expect(strings.Trim(out, "\n"), err).To(Equal("00000002")) @@ -1648,12 +1740,12 @@ func AssertClusterImport(namespace, clusterWithExternalClusterName, clusterName, var cluster *apiv1.Cluster By("Importing Database in a new cluster", func() { var err error - cluster, err = testsUtils.ImportDatabaseMicroservice(namespace, clusterName, - clusterWithExternalClusterName, "", databaseName, env) + cluster, err = importdb.ImportDatabaseMicroservice(env.Ctx, env.Client, namespace, clusterName, + clusterWithExternalClusterName, "", databaseName) Expect(err).ToNot(HaveOccurred()) // We give more time than the usual 600s, since the recovery is slower AssertClusterIsReady(namespace, clusterWithExternalClusterName, - testTimeouts[testsUtils.ClusterIsReadySlow], env) + testTimeouts[timeouts.ClusterIsReadySlow], env) // Restored standby should be attached to restored primary AssertClusterStandbysAreStreaming(namespace, clusterWithExternalClusterName, 140) }) @@ -1698,7 +1790,7 @@ func AssertSuspendScheduleBackups(namespace, scheduledBackupName string) { Eventually(func() error { cmd := fmt.Sprintf("kubectl patch ScheduledBackup %v -n %v -p '{\"spec\":{\"suspend\":true}}' "+ "--type='merge'", scheduledBackupName, namespace) - _, _, err = testsUtils.RunUnchecked(cmd) + _, _, err = run.Unchecked(cmd) if err != nil { return err } @@ -1746,7 +1838,7 @@ func AssertSuspendScheduleBackups(namespace, scheduledBackupName string) { Eventually(func() error { cmd := fmt.Sprintf("kubectl patch ScheduledBackup %v -n %v -p '{\"spec\":{\"suspend\":false}}' "+ "--type='merge'", scheduledBackupName, namespace) - _, _, err = testsUtils.RunUnchecked(cmd) + _, _, err = run.Unchecked(cmd) if err != nil { return err } @@ -1775,20 +1867,23 @@ func AssertSuspendScheduleBackups(namespace, scheduledBackupName string) { func AssertClusterWasRestoredWithPITRAndApplicationDB(namespace, clusterName, tableName, lsn string) { // We give more time than the usual 600s, since the recovery is slower - AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReadySlow], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReadySlow], env) // Gather the recovered cluster primary - primaryInfo, err := env.GetClusterPrimary(namespace, clusterName) + primaryInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) secretName := clusterName + apiv1.ApplicationUserSecretSuffix By("Ensuring the restored cluster is on timeline 3", func() { // Restored primary should be on timeline 3 - row, err := testsUtils.RunQueryRowOverForward( - env, + row, err := postgres.RunQueryRowOverForward( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, namespace, clusterName, - testsUtils.AppDBName, + postgres.AppDBName, apiv1.ApplicationUserSecretSuffix, "select substring(pg_walfile_name(pg_current_wal_lsn()), 1, 8)", ) @@ -1800,7 +1895,9 @@ func AssertClusterWasRestoredWithPITRAndApplicationDB(namespace, clusterName, ta Expect(currentWalLsn).To(Equal(lsn)) // Restored standby should be attached to restored primary - Expect(testsUtils.CountReplicas(env, primaryInfo)).To(BeEquivalentTo(2)) + Expect(postgres.CountReplicas( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + primaryInfo, RetryTimeout)).To(BeEquivalentTo(2)) }) By(fmt.Sprintf("after restored, 3rd entry should not be exists in table '%v'", tableName), func() { @@ -1808,15 +1905,16 @@ func AssertClusterWasRestoredWithPITRAndApplicationDB(namespace, clusterName, ta tableLocator := TableLocator{ Namespace: namespace, ClusterName: clusterName, - DatabaseName: testsUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: tableName, } AssertDataExpectedCount(env, tableLocator, 2) }) // Gather credentials - appUser, appUserPass, err := testsUtils.GetCredentials(clusterName, namespace, apiv1.ApplicationUserSecretSuffix, - env) + appUser, appUserPass, err := secrets.GetCredentials( + env.Ctx, env.Client, + clusterName, namespace, apiv1.ApplicationUserSecretSuffix) Expect(err).ToNot(HaveOccurred()) By("checking the restored cluster with auto generated app password connectable", func() { @@ -1824,9 +1922,10 @@ func AssertClusterWasRestoredWithPITRAndApplicationDB(namespace, clusterName, ta namespace, clusterName, appUser, - testsUtils.AppDBName, + postgres.AppDBName, appUserPass, - secretName) + secretName, + ) }) By("update user application password for restored cluster and verify connectivity", func() { @@ -1836,25 +1935,29 @@ func AssertClusterWasRestoredWithPITRAndApplicationDB(namespace, clusterName, ta namespace, clusterName, appUser, - testsUtils.AppDBName, + postgres.AppDBName, newPassword, - secretName) + secretName, + ) }) } func AssertClusterWasRestoredWithPITR(namespace, clusterName, tableName, lsn string) { By("restoring a backup cluster with PITR in a new cluster", func() { // We give more time than the usual 600s, since the recovery is slower - AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReadySlow], env) - primaryInfo, err := env.GetClusterPrimary(namespace, clusterName) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReadySlow], env) + primaryInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) // Restored primary should be on timeline 3 - row, err := testsUtils.RunQueryRowOverForward( - env, + row, err := postgres.RunQueryRowOverForward( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, namespace, clusterName, - testsUtils.AppDBName, + postgres.AppDBName, apiv1.ApplicationUserSecretSuffix, "select substring(pg_walfile_name(pg_current_wal_lsn()), 1, 8)", ) @@ -1866,7 +1969,9 @@ func AssertClusterWasRestoredWithPITR(namespace, clusterName, tableName, lsn str Expect(currentWalLsn).To(Equal(lsn)) // Restored standby should be attached to restored primary - Expect(testsUtils.CountReplicas(env, primaryInfo)).To(BeEquivalentTo(2)) + Expect(postgres.CountReplicas( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + primaryInfo, RetryTimeout)).To(BeEquivalentTo(2)) }) By(fmt.Sprintf("after restored, 3rd entry should not be exists in table '%v'", tableName), func() { @@ -1874,7 +1979,7 @@ func AssertClusterWasRestoredWithPITR(namespace, clusterName, tableName, lsn str tableLocator := TableLocator{ Namespace: namespace, ClusterName: clusterName, - DatabaseName: testsUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: tableName, } AssertDataExpectedCount(env, tableLocator, 2) @@ -1883,7 +1988,7 @@ func AssertClusterWasRestoredWithPITR(namespace, clusterName, tableName, lsn str func AssertArchiveConditionMet(namespace, clusterName, timeout string) { By("Waiting for the condition", func() { - out, _, err := testsUtils.Run(fmt.Sprintf( + out, _, err := run.Run(fmt.Sprintf( "kubectl -n %s wait --for=condition=ContinuousArchiving=true cluster/%s --timeout=%s", namespace, clusterName, timeout)) Expect(err).ToNot(HaveOccurred()) @@ -1894,21 +1999,23 @@ func AssertArchiveConditionMet(namespace, clusterName, timeout string) { // switchWalAndGetLatestArchive trigger a new wal and get the name of latest wal file func switchWalAndGetLatestArchive(namespace, podName string) string { - _, _, err := env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + _, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: podName, }, - testsUtils.PostgresDBName, + postgres.PostgresDBName, "CHECKPOINT;") Expect(err).ToNot(HaveOccurred()) - out, _, err := env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + out, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: podName, }, - testsUtils.PostgresDBName, + postgres.PostgresDBName, "SELECT pg_walfile_name(pg_switch_wal());") Expect(err).ToNot(HaveOccurred()) @@ -1918,7 +2025,7 @@ func switchWalAndGetLatestArchive(namespace, podName string) string { func createAndAssertPgBouncerPoolerIsSetUp(namespace, poolerYamlFilePath string, expectedInstanceCount int) { CreateResourceFromFile(namespace, poolerYamlFilePath) Eventually(func() (int32, error) { - poolerName, err := env.GetResourceNameFromYAML(poolerYamlFilePath) + poolerName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerYamlFilePath) Expect(err).ToNot(HaveOccurred()) // Wait for the deployment to be ready deployment := &appsv1.Deployment{} @@ -1937,7 +2044,7 @@ func assertPgBouncerPoolerDeploymentStrategy( ) { By("verify pooler deployment has expected rolling update configuration", func() { Eventually(func() bool { - poolerName, err := env.GetResourceNameFromYAML(poolerYamlFilePath) + poolerName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerYamlFilePath) Expect(err).ToNot(HaveOccurred()) // Wait for the deployment to be ready deployment := &appsv1.Deployment{} @@ -1957,7 +2064,7 @@ func assertPgBouncerPoolerDeploymentStrategy( // assertPGBouncerPodsAreReady verifies if PGBouncer pooler pods are ready func assertPGBouncerPodsAreReady(namespace, poolerYamlFilePath string, expectedPodCount int) { Eventually(func() (bool, error) { - poolerName, err := env.GetResourceNameFromYAML(poolerYamlFilePath) + poolerName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerYamlFilePath) Expect(err).ToNot(HaveOccurred()) podList := &corev1.PodList{} err = env.Client.List(env.Ctx, podList, ctrlclient.InNamespace(namespace), @@ -1995,12 +2102,14 @@ func assertReadWriteConnectionUsingPgBouncerService( poolerYamlFilePath string, isPoolerRW bool, ) { - poolerService, err := env.GetResourceNameFromYAML(poolerYamlFilePath) + poolerService, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerYamlFilePath) Expect(err).ToNot(HaveOccurred()) - appUser, generatedAppUserPassword, err := testsUtils.GetCredentials( - clusterName, namespace, apiv1.ApplicationUserSecretSuffix, env) + + appUser, generatedAppUserPassword, err := secrets.GetCredentials( + env.Ctx, env.Client, + clusterName, namespace, apiv1.ApplicationUserSecretSuffix) Expect(err).ToNot(HaveOccurred()) - AssertConnection(namespace, poolerService, testsUtils.AppDBName, appUser, generatedAppUserPassword, env) + AssertConnection(namespace, poolerService, postgres.AppDBName, appUser, generatedAppUserPassword, env) // verify that, if pooler type setup read write then it will allow both read and // write operations or if pooler type setup read only then it will allow only read operations @@ -2015,7 +2124,7 @@ func assertReadWriteConnectionUsingPgBouncerService( func assertPodIsRecreated(namespace, poolerSampleFile string) { var podNameBeforeDelete string - poolerName, err := env.GetResourceNameFromYAML(poolerSampleFile) + poolerName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerSampleFile) Expect(err).ToNot(HaveOccurred()) By(fmt.Sprintf("deleting pooler '%s' pod", poolerName), func() { @@ -2029,7 +2138,7 @@ func assertPodIsRecreated(namespace, poolerSampleFile string) { // deleting pgbouncer pod cmd := fmt.Sprintf("kubectl delete pod %s -n %s", podNameBeforeDelete, namespace) - _, _, err = testsUtils.Run(cmd) + _, _, err = run.Run(cmd) Expect(err).ToNot(HaveOccurred()) }) By(fmt.Sprintf("verifying pooler '%s' pod has been recreated", poolerName), func() { @@ -2056,7 +2165,7 @@ func assertPodIsRecreated(namespace, poolerSampleFile string) { func assertDeploymentIsRecreated(namespace, poolerSampleFile string) { var deploymentUID types.UID - poolerName, err := env.GetResourceNameFromYAML(poolerSampleFile) + poolerName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerSampleFile) Expect(err).ToNot(HaveOccurred()) deploymentNamespacedName := types.NamespacedName{ @@ -2068,7 +2177,7 @@ func assertDeploymentIsRecreated(namespace, poolerSampleFile string) { err := env.Client.Get(env.Ctx, deploymentNamespacedName, deployment) g.Expect(err).ToNot(HaveOccurred()) }).Should(Succeed()) - err = testsUtils.DeploymentWaitForReady(env, deployment, 60) + err = deployments.WaitForReady(env.Ctx, env.Client, deployment, 60) Expect(err).ToNot(HaveOccurred()) deploymentName := deployment.GetName() @@ -2097,7 +2206,7 @@ func assertDeploymentIsRecreated(namespace, poolerSampleFile string) { }, 300).ShouldNot(BeEquivalentTo(deploymentUID)) }) By(fmt.Sprintf("new '%s' deployment has new pods ready", deploymentName), func() { - err := testsUtils.DeploymentWaitForReady(env, deployment, 120) + err := deployments.WaitForReady(env.Ctx, env.Client, deployment, 120) Expect(err).ToNot(HaveOccurred()) }) By("verifying UIDs of pods have changed", func() { @@ -2124,7 +2233,7 @@ func assertPGBouncerEndpointsContainsPodsIP( ) { var pgBouncerPods []*corev1.Pod endpoint := &corev1.Endpoints{} - endpointName, err := env.GetResourceNameFromYAML(poolerYamlFilePath) + endpointName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerYamlFilePath) Expect(err).ToNot(HaveOccurred()) Eventually(func(g Gomega) { @@ -2132,7 +2241,7 @@ func assertPGBouncerEndpointsContainsPodsIP( g.Expect(err).ToNot(HaveOccurred()) }).Should(Succeed()) - poolerName, err := env.GetResourceNameFromYAML(poolerYamlFilePath) + poolerName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerYamlFilePath) Expect(err).ToNot(HaveOccurred()) podList := &corev1.PodList{} err = env.Client.List(env.Ctx, podList, ctrlclient.InNamespace(namespace), @@ -2157,7 +2266,7 @@ func assertPGBouncerHasServiceNameInsideHostParameter(namespace, serviceName str for _, pod := range podList.Items { command := fmt.Sprintf("kubectl exec -n %s %s -- /bin/bash -c 'grep "+ " \"host=%s\" controller/configs/pgbouncer.ini'", namespace, pod.Name, serviceName) - out, _, err := testsUtils.Run(command) + out, _, err := run.Run(command) Expect(err).ToNot(HaveOccurred()) expectedContainedHost := fmt.Sprintf("host=%s", serviceName) Expect(out).To(ContainSubstring(expectedContainedHost)) @@ -2166,7 +2275,10 @@ func assertPGBouncerHasServiceNameInsideHostParameter(namespace, serviceName str // OnlineResizePVC is for verifying if storage can be automatically expanded, or not func OnlineResizePVC(namespace, clusterName string) { - walStorageEnabled, err := testsUtils.IsWalStorageEnabled(namespace, clusterName, env) + walStorageEnabled, err := storage.IsWalStorageEnabled( + env.Ctx, env.Client, + namespace, clusterName, + ) Expect(err).ToNot(HaveOccurred()) pvc := &corev1.PersistentVolumeClaimList{} @@ -2192,7 +2304,7 @@ func OnlineResizePVC(namespace, clusterName string) { namespace, s) Eventually(func() error { - _, _, err := testsUtils.RunUnchecked(cmd) + _, _, err := run.Unchecked(cmd) return err }, 60, 5).Should(Succeed()) } @@ -2222,7 +2334,10 @@ func OnlineResizePVC(namespace, clusterName string) { } func OfflineResizePVC(namespace, clusterName string, timeout int) { - walStorageEnabled, err := testsUtils.IsWalStorageEnabled(namespace, clusterName, env) + walStorageEnabled, err := storage.IsWalStorageEnabled( + env.Ctx, env.Client, + namespace, clusterName, + ) Expect(err).ToNot(HaveOccurred()) By("verify PVC size before expansion", func() { @@ -2248,64 +2363,65 @@ func OfflineResizePVC(namespace, clusterName string, timeout int) { namespace, s) Eventually(func() error { - _, _, err := testsUtils.RunUnchecked(cmd) + _, _, err := run.Unchecked(cmd) return err }, 60, 5).Should(Succeed()) } }) By("deleting Pod and PVCs, first replicas then the primary", func() { // Gathering cluster primary - currentPrimary, err := env.GetClusterPrimary(namespace, clusterName) + currentPrimary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) currentPrimaryWalStorageName := currentPrimary.Name + "-wal" quickDelete := &ctrlclient.DeleteOptions{ GracePeriodSeconds: &quickDeletionPeriod, } - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) Expect(len(podList.Items), err).To(BeEquivalentTo(3)) // Iterating through PVC list for deleting pod and PVC for storage expansion - for _, pod := range podList.Items { + for _, p := range podList.Items { // Comparing cluster pods to not be primary to ensure cluster is healthy. // Primary will be eventually deleted - if !specs.IsPodPrimary(pod) { + if !specs.IsPodPrimary(p) { // Deleting PVC - _, _, err = testsUtils.Run( - "kubectl delete pvc " + pod.Name + " -n " + namespace + " --wait=false") + _, _, err = run.Run( + "kubectl delete pvc " + p.Name + " -n " + namespace + " --wait=false") Expect(err).ToNot(HaveOccurred()) // Deleting WalStorage PVC if needed if walStorageEnabled { - _, _, err = testsUtils.Run( - "kubectl delete pvc " + pod.Name + "-wal" + " -n " + namespace + " --wait=false") + _, _, err = run.Run( + "kubectl delete pvc " + p.Name + "-wal" + " -n " + namespace + " --wait=false") Expect(err).ToNot(HaveOccurred()) } // Deleting standby and replica pods - err = env.DeletePod(namespace, pod.Name, quickDelete) + err = podutils.Delete(env.Ctx, env.Client, namespace, p.Name, quickDelete) Expect(err).ToNot(HaveOccurred()) } } AssertClusterIsReady(namespace, clusterName, timeout, env) // Deleting primary pvc - _, _, err = testsUtils.Run( + _, _, err = run.Run( "kubectl delete pvc " + currentPrimary.Name + " -n " + namespace + " --wait=false") Expect(err).ToNot(HaveOccurred()) // Deleting Primary WalStorage PVC if needed if walStorageEnabled { - _, _, err = testsUtils.Run( + _, _, err = run.Run( "kubectl delete pvc " + currentPrimaryWalStorageName + " -n " + namespace + " --wait=false") Expect(err).ToNot(HaveOccurred()) } // Deleting primary pod - err = env.DeletePod(namespace, currentPrimary.Name, quickDelete) + err = podutils.Delete(env.Ctx, env.Client, namespace, currentPrimary.Name, quickDelete) Expect(err).ToNot(HaveOccurred()) }) AssertClusterIsReady(namespace, clusterName, timeout, env) By("verifying Cluster storage is expanded", func() { // Gathering PVC list for comparison - pvcList, err := env.GetPVCList(namespace) + pvcList, err := storage.GetPVCList(env.Ctx, env.Client, namespace) Expect(err).ToNot(HaveOccurred()) // Gathering PVC size and comparing with expanded value expectedCount := 3 @@ -2332,19 +2448,22 @@ func DeleteTableUsingPgBouncerService( namespace, clusterName, poolerYamlFilePath string, - env *testsUtils.TestingEnvironment, + env *environment.TestingEnvironment, pod *corev1.Pod, ) { - poolerService, err := env.GetResourceNameFromYAML(poolerYamlFilePath) + poolerService, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerYamlFilePath) Expect(err).ToNot(HaveOccurred()) - appUser, generatedAppUserPassword, err := testsUtils.GetCredentials( - clusterName, namespace, apiv1.ApplicationUserSecretSuffix, env) + + appUser, generatedAppUserPassword, err := secrets.GetCredentials( + env.Ctx, env.Client, + clusterName, namespace, apiv1.ApplicationUserSecretSuffix, + ) Expect(err).ToNot(HaveOccurred()) - AssertConnection(namespace, poolerService, testsUtils.AppDBName, appUser, generatedAppUserPassword, env) + AssertConnection(namespace, poolerService, postgres.AppDBName, appUser, generatedAppUserPassword, env) connectionTimeout := time.Second * 10 - dsn := testsUtils.CreateDSN(poolerService, appUser, testsUtils.AppDBName, generatedAppUserPassword, - testsUtils.Require, 5432) + dsn := services.CreateDSN(poolerService, appUser, postgres.AppDBName, generatedAppUserPassword, + services.Require, 5432) _, _, err = env.EventuallyExecCommand(env.Ctx, *pod, specs.PostgresContainerName, &connectionTimeout, "psql", dsn, "-tAc", "DROP TABLE table1") Expect(err).ToNot(HaveOccurred()) @@ -2372,11 +2491,11 @@ func collectAndAssertDefaultMetricsPresentOnEachPod( ) } - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) for _, pod := range podList.Items { podName := pod.GetName() - out, err := testsUtils.RetrieveMetricsFromInstance(env, pod, tlsEnabled) + out, err := proxy.RetrieveMetricsFromInstance(env.Ctx, env.Interface, pod, tlsEnabled) Expect(err).ToNot(HaveOccurred()) // error should be zero on each pod metrics @@ -2428,11 +2547,11 @@ func collectAndAssertCollectorMetricsPresentOnEachPod(cluster *apiv1.Cluster) { ) } By("collecting and verify set of collector metrics on each pod", func() { - podList, err := env.GetClusterPodList(cluster.Namespace, cluster.Name) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, cluster.Namespace, cluster.Name) Expect(err).ToNot(HaveOccurred()) for _, pod := range podList.Items { podName := pod.GetName() - out, err := testsUtils.RetrieveMetricsFromInstance(env, pod, cluster.IsMetricsTLSEnabled()) + out, err := proxy.RetrieveMetricsFromInstance(env.Ctx, env.Interface, pod, cluster.IsMetricsTLSEnabled()) Expect(err).ToNot(HaveOccurred()) // error should be zero on each pod metrics @@ -2452,17 +2571,17 @@ func collectAndAssertCollectorMetricsPresentOnEachPod(cluster *apiv1.Cluster) { // YAML sample file and returns any errors func CreateResourcesFromFileWithError(namespace, sampleFilePath string) error { wrapErr := func(err error) error { return fmt.Errorf("on CreateResourcesFromFileWithError: %w", err) } - yaml, err := GetYAMLContent(sampleFilePath) + yamlContent, err := GetYAMLContent(sampleFilePath) if err != nil { return wrapErr(err) } - objects, err := testsUtils.ParseObjectsFromYAML(yaml, namespace) + objects, err := yaml.ParseObjectsFromYAML(yamlContent, namespace) if err != nil { return wrapErr(err) } for _, obj := range objects { - _, err := testsUtils.CreateObject(env, obj) + _, err := objectsutils.Create(env.Ctx, env.Client, obj) if err != nil { return wrapErr(err) } @@ -2488,7 +2607,7 @@ func GetYAMLContent(sampleFilePath string) ([]byte, error) { if err != nil { return nil, wrapErr(err) } - yaml := data + yamlContent := data if filepath.Ext(cleanPath) == ".template" { preRollingUpdateImg := os.Getenv("E2E_PRE_ROLLING_UPDATE_IMG") @@ -2508,12 +2627,12 @@ func GetYAMLContent(sampleFilePath string) ([]byte, error) { envVars["SERVER_NAME"] = serverName } - yaml, err = testsUtils.Envsubst(envVars, data) + yamlContent, err = envsubst.Envsubst(envVars, data) if err != nil { return nil, wrapErr(err) } } - return yaml, nil + return yamlContent, nil } func buildTemplateEnvs(additionalEnvs map[string]string) map[string]string { @@ -2537,17 +2656,17 @@ func buildTemplateEnvs(additionalEnvs map[string]string) map[string]string { // DeleteResourcesFromFile deletes the Kubernetes objects described in the file func DeleteResourcesFromFile(namespace, sampleFilePath string) error { wrapErr := func(err error) error { return fmt.Errorf("in DeleteResourcesFromFile: %w", err) } - yaml, err := GetYAMLContent(sampleFilePath) + yamlContent, err := GetYAMLContent(sampleFilePath) if err != nil { return wrapErr(err) } - objects, err := testsUtils.ParseObjectsFromYAML(yaml, namespace) + objects, err := yaml.ParseObjectsFromYAML(yamlContent, namespace) if err != nil { return wrapErr(err) } for _, obj := range objects { - err := testsUtils.DeleteObject(env, obj) + err := objectsutils.Delete(env.Ctx, env.Client, obj) if err != nil { return wrapErr(err) } @@ -2558,19 +2677,20 @@ func DeleteResourcesFromFile(namespace, sampleFilePath string) error { // Assert in the giving cluster, all the postgres db has no pending restart func AssertPostgresNoPendingRestart(namespace, clusterName string, timeout int) { By("waiting for all pods have no pending restart", func() { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) query := "SELECT EXISTS(SELECT 1 FROM pg_settings WHERE pending_restart)" // Check that the new parameter has been modified in every pod Eventually(func() (bool, error) { noPendingRestart := true for _, pod := range podList.Items { - stdout, _, err := env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: pod.Namespace, PodName: pod.Name, }, - testsUtils.PostgresDBName, + postgres.PostgresDBName, query) if err != nil { return false, nil @@ -2596,8 +2716,9 @@ func AssertBackupConditionTimestampChangedInClusterStatus( ) { By(fmt.Sprintf("waiting for backup condition status in cluster '%v'", clusterName), func() { Eventually(func() (bool, error) { - getBackupCondition, err := testsUtils.GetConditionsInClusterStatus( - namespace, clusterName, env, clusterConditionType) + getBackupCondition, err := backups.GetConditionsInClusterStatus( + env.Ctx, env.Client, + namespace, clusterName, clusterConditionType) if err != nil { return false, err } @@ -2611,12 +2732,13 @@ func AssertClusterReadinessStatusIsReached( clusterName string, conditionStatus apiv1.ConditionStatus, timeout int, - env *testsUtils.TestingEnvironment, + env *environment.TestingEnvironment, ) { By(fmt.Sprintf("waiting for cluster condition status in cluster '%v'", clusterName), func() { Eventually(func() (string, error) { - clusterCondition, err := testsUtils.GetConditionsInClusterStatus( - namespace, clusterName, env, apiv1.ConditionClusterReady) + clusterCondition, err := backups.GetConditionsInClusterStatus( + env.Ctx, env.Client, + namespace, clusterName, apiv1.ConditionClusterReady) if err != nil { return "", err } @@ -2635,7 +2757,7 @@ func AssertPvcHasLabels( By("checking PVC have the correct role labels", func() { Eventually(func(g Gomega) { // Gather the list of PVCs in the current namespace - pvcList, err := env.GetPVCList(namespace) + pvcList, err := storage.GetPVCList(env.Ctx, env.Client, namespace) g.Expect(err).ToNot(HaveOccurred()) // Iterating through PVC list @@ -2663,7 +2785,7 @@ func AssertPvcHasLabels( utils.PvcRoleLabelName: ExpectedPvcRole, utils.ClusterInstanceRoleLabelName: ExpectedRole, } - g.Expect(testsUtils.PvcHasLabels(pvc, expectedLabels)).To(BeTrue(), + g.Expect(storage.PvcHasLabels(pvc, expectedLabels)).To(BeTrue(), fmt.Sprintf("expectedLabels: %v and found actualLabels on pvc: %v", expectedLabels, pod.GetLabels())) } @@ -2684,11 +2806,15 @@ func AssertReplicationSlotsOnPod( ) { GinkgoWriter.Println("checking contain slots:", expectedSlots, "for pod:", pod.Name) Eventually(func() ([]string, error) { - currentSlots, err := testsUtils.GetReplicationSlotsOnPod(namespace, pod.GetName(), env) + currentSlots, err := replicationslot.GetReplicationSlotsOnPod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + namespace, pod.GetName(), postgres.AppDBName) return currentSlots, err }, 300).Should(ContainElements(expectedSlots), func() string { - return testsUtils.PrintReplicationSlots(namespace, clusterName, env) + return replicationslot.PrintReplicationSlots( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + namespace, clusterName, postgres.AppDBName) }) GinkgoWriter.Println("executing replication slot assertion query on pod", pod.Name) @@ -2705,17 +2831,20 @@ func AssertReplicationSlotsOnPod( "AND temporary = 'f' AND slot_type = 'physical')", slot, isActiveOnPrimary) } Eventually(func() (string, error) { - stdout, _, err := env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: pod.Namespace, PodName: pod.Name, }, - testsUtils.PostgresDBName, + postgres.PostgresDBName, query) return strings.TrimSpace(stdout), err }, 300).Should(BeEquivalentTo("t"), func() string { - return testsUtils.PrintReplicationSlots(namespace, clusterName, env) + return replicationslot.PrintReplicationSlots( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + namespace, clusterName, postgres.AppDBName) }) } } @@ -2726,19 +2855,23 @@ func AssertClusterReplicationSlotsAligned( namespace, clusterName string, ) { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Eventually(func() bool { var lsnList []string for _, pod := range podList.Items { - out, err := testsUtils.GetReplicationSlotLsnsOnPod(namespace, clusterName, pod, env) + out, err := replicationslot.GetReplicationSlotLsnsOnPod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + namespace, clusterName, postgres.AppDBName, pod) Expect(err).ToNot(HaveOccurred()) lsnList = append(lsnList, out...) } - return testsUtils.AreSameLsn(lsnList) + return replicationslot.AreSameLsn(lsnList) }, 300).Should(BeEquivalentTo(true), func() string { - return testsUtils.PrintReplicationSlots(namespace, clusterName, env) + return replicationslot.PrintReplicationSlots( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + namespace, clusterName, postgres.AppDBName) }) } @@ -2746,11 +2879,12 @@ func AssertClusterReplicationSlotsAligned( // of the cluster exist and are aligned. func AssertClusterHAReplicationSlots(namespace, clusterName string) { By("verifying all cluster's replication slots exist and are aligned", func() { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) for _, pod := range podList.Items { - expectedSlots, err := testsUtils.GetExpectedHAReplicationSlotsOnPod(namespace, clusterName, pod.GetName(), - env) + expectedSlots, err := replicationslot.GetExpectedHAReplicationSlotsOnPod( + env.Ctx, env.Client, + namespace, clusterName, pod.GetName()) Expect(err).ToNot(HaveOccurred()) AssertReplicationSlotsOnPod(namespace, clusterName, pod, expectedSlots, true, false) } @@ -2761,7 +2895,7 @@ func AssertClusterHAReplicationSlots(namespace, clusterName string) { // AssertClusterRollingRestart restarts a given cluster func AssertClusterRollingRestart(namespace, clusterName string) { By(fmt.Sprintf("restarting cluster %v", clusterName), func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) clusterRestarted := cluster.DeepCopy() if clusterRestarted.Annotations == nil { @@ -2774,14 +2908,14 @@ func AssertClusterRollingRestart(namespace, clusterName string) { }) AssertClusterEventuallyReachesPhase(namespace, clusterName, []string{apiv1.PhaseUpgrade, apiv1.PhaseWaitingForInstancesToBeActive}, 120) - AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReadyQuick], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReadyQuick], env) } // AssertPVCCount matches count and pvc List. func AssertPVCCount(namespace, clusterName string, pvcCount, timeout int) { By(fmt.Sprintf("verify cluster %v healthy pvc list", clusterName), func() { Eventually(func(g Gomega) { - cluster, _ := env.GetCluster(namespace, clusterName) + cluster, _ := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(cluster.Status.PVCCount).To(BeEquivalentTo(pvcCount)) pvcList := &corev1.PersistentVolumeClaimList{} @@ -2816,7 +2950,7 @@ func AssertClusterEventuallyReachesPhase(namespace, clusterName string, phase [] // assertPredicateClusterHasPhase returns true if the Cluster's phase is contained in a given slice of phases func assertPredicateClusterHasPhase(namespace, clusterName string, phase []string) func(g Gomega) { return func(g Gomega) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) g.Expect(slices.Contains(phase, cluster.Status.Phase)).To(BeTrue()) } diff --git a/tests/e2e/backup_restore_azure_test.go b/tests/e2e/backup_restore_azure_test.go index 65c3f8a2ed..6b335da4a3 100644 --- a/tests/e2e/backup_restore_azure_test.go +++ b/tests/e2e/backup_restore_azure_test.go @@ -21,7 +21,13 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/tests" - testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/backups" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/secrets" + testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -31,6 +37,7 @@ var _ = Describe("Azure - Backup and restore", Label(tests.LabelBackupRestore), const ( tableName = "to_restore" ) + AzureConfiguration := backups.NewAzureConfigurationFromEnv() BeforeEach(func() { if testLevelEnv.Depth < int(tests.High) { @@ -55,23 +62,24 @@ var _ = Describe("Azure - Backup and restore", Label(tests.LabelBackupRestore), BeforeAll(func() { const namespacePrefix = "cluster-backup-azure-blob" var err error - clusterName, err = env.GetResourceNameFromYAML(azureBlobSampleFile) + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, azureBlobSampleFile) Expect(err).ToNot(HaveOccurred()) // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) // The Azure Blob Storage should have been created ad-hoc for the tests. // The credentials are retrieved from the environment variables, as we can't create // a fixture for them By("creating the Azure Blob Storage credentials", func() { - _, err = testUtils.CreateObjectStorageSecret( + _, err = secrets.CreateObjectStorageSecret( + env.Ctx, + env.Client, namespace, "backup-storage-creds", - env.AzureConfiguration.StorageAccount, - env.AzureConfiguration.StorageKey, - env, + AzureConfiguration.StorageAccount, + AzureConfiguration.StorageKey, ) Expect(err).ToNot(HaveOccurred()) }) @@ -87,22 +95,26 @@ var _ = Describe("Azure - Backup and restore", Label(tests.LabelBackupRestore), tableLocator := TableLocator{ Namespace: namespace, ClusterName: clusterName, - DatabaseName: testUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: tableName, } AssertCreateTestData(env, tableLocator) - assertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration) + assertArchiveWalOnAzureBlob(namespace, clusterName, AzureConfiguration) By("uploading a backup", func() { // We create a backup - testUtils.ExecuteBackup(namespace, backupFile, false, testTimeouts[testUtils.BackupIsReady], env) - testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName) + backups.Execute( + env.Ctx, env.Client, env.Scheme, + namespace, backupFile, false, + testTimeouts[testUtils.BackupIsReady], + ) + backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName) // Verifying file called data.tar should be available on Azure blob storage Eventually(func() (int, error) { - return testUtils.CountFilesOnAzureBlobStorage(env.AzureConfiguration, clusterName, "data.tar") + return backups.CountFilesOnAzureBlobStorage(AzureConfiguration, clusterName, "data.tar") }, 30).Should(BeNumerically(">=", 1)) Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) return cluster.Status.FirstRecoverabilityPoint, err }, 30).ShouldNot(BeEmpty()) }) @@ -118,14 +130,14 @@ var _ = Describe("Azure - Backup and restore", Label(tests.LabelBackupRestore), // Create a scheduled backup with the 'immediate' option enabled. We expect the backup to be available It("immediately starts a backup using ScheduledBackups 'immediate' option", func() { - scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupSampleFile) + scheduledBackupName, err := yaml.GetResourceNameFromYAML(env.Scheme, scheduledBackupSampleFile) Expect(err).ToNot(HaveOccurred()) AssertScheduledBackupsImmediate(namespace, scheduledBackupSampleFile, scheduledBackupName) // Only one data.tar files should be present Eventually(func() (int, error) { - return testUtils.CountFilesOnAzureBlobStorage(env.AzureConfiguration, + return backups.CountFilesOnAzureBlobStorage(AzureConfiguration, clusterName, "data.tar") }, 30).Should(BeNumerically("==", 2)) }) @@ -138,19 +150,21 @@ var _ = Describe("Azure - Backup and restore", Label(tests.LabelBackupRestore), namespace, clusterName, backupFile, - env.AzureConfiguration, + AzureConfiguration, 2, currentTimestamp, ) - assertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration) + assertArchiveWalOnAzureBlob(namespace, clusterName, AzureConfiguration) - cluster, err := testUtils.CreateClusterFromBackupUsingPITR( + cluster, err := backups.CreateClusterFromBackupUsingPITR( + env.Ctx, + env.Client, + env.Scheme, namespace, restoredClusterName, backupFile, *currentTimestamp, - env, ) Expect(err).ToNot(HaveOccurred()) AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[testUtils.ClusterIsReady], env) @@ -158,7 +172,7 @@ var _ = Describe("Azure - Backup and restore", Label(tests.LabelBackupRestore), // Restore backup in a new cluster, also cover if no application database is configured AssertClusterWasRestoredWithPITR(namespace, restoredClusterName, tableName, "00000002") By("deleting the restored cluster", func() { - Expect(testUtils.DeleteObject(env, cluster)).To(Succeed()) + Expect(objects.Delete(env.Ctx, env.Client, cluster)).To(Succeed()) }) }) @@ -169,7 +183,7 @@ var _ = Describe("Azure - Backup and restore", Label(tests.LabelBackupRestore), It("verifies that scheduled backups can be suspended", func() { const scheduledBackupSampleFile = fixturesDir + "/backup/scheduled_backup_suspend/scheduled-backup-suspend-azure-blob.yaml" - scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupSampleFile) + scheduledBackupName, err := yaml.GetResourceNameFromYAML(env.Scheme, scheduledBackupSampleFile) Expect(err).ToNot(HaveOccurred()) By("scheduling backups", func() { @@ -178,7 +192,7 @@ var _ = Describe("Azure - Backup and restore", Label(tests.LabelBackupRestore), // AssertScheduledBackupsImmediate creates at least two backups, we should find // their base backups Eventually(func() (int, error) { - return testUtils.CountFilesOnAzureBlobStorage(env.AzureConfiguration, + return backups.CountFilesOnAzureBlobStorage(AzureConfiguration, clusterName, "data.tar") }, 60).Should(BeNumerically(">=", 2)) }) @@ -203,6 +217,7 @@ var _ = Describe("Azure - Clusters Recovery From Barman Object Store", Label(tes ) currentTimestamp := new(string) + AzureConfiguration := backups.NewAzureConfigurationFromEnv() BeforeEach(func() { if testLevelEnv.Depth < int(level) { @@ -222,23 +237,25 @@ var _ = Describe("Azure - Clusters Recovery From Barman Object Store", Label(tes BeforeAll(func() { const namespacePrefix = "recovery-barman-object-azure" var err error - clusterName, err = env.GetResourceNameFromYAML(clusterSourceFileAzure) + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterSourceFileAzure) Expect(err).ToNot(HaveOccurred()) // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) // The Azure Blob Storage should have been created ad-hoc for the tests. // The credentials are retrieved from the environment variables, as we can't create // a fixture for them By("creating the Azure Blob Storage credentials", func() { - _, err = testUtils.CreateObjectStorageSecret( + _, err = secrets.CreateObjectStorageSecret( + env.Ctx, + env.Client, namespace, "backup-storage-creds", - env.AzureConfiguration.StorageAccount, - env.AzureConfiguration.StorageKey, - env) + AzureConfiguration.StorageAccount, + AzureConfiguration.StorageKey, + ) Expect(err).ToNot(HaveOccurred()) }) @@ -246,32 +263,37 @@ var _ = Describe("Azure - Clusters Recovery From Barman Object Store", Label(tes AssertCreateCluster(namespace, clusterName, clusterSourceFileAzure, env) }) - It("restores a cluster from barman object using 'barmanObjectStore' option in 'externalClusters' section", func() { - // Write a table and some data on the "app" database - tableLocator := TableLocator{ - Namespace: namespace, - ClusterName: clusterName, - DatabaseName: testUtils.AppDBName, - TableName: tableName, - } - AssertCreateTestData(env, tableLocator) - assertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration) - - By("backing up a cluster and verifying it exists on azure blob storage", func() { - // Create the backup - testUtils.ExecuteBackup(namespace, sourceBackupFileAzure, false, testTimeouts[testUtils.BackupIsReady], env) - testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName) - // Verifying file called data.tar should be available on Azure blob storage - Eventually(func() (int, error) { - return testUtils.CountFilesOnAzureBlobStorage(env.AzureConfiguration, clusterName, "data.tar") - }, 30).Should(BeNumerically(">=", 1)) + It("restores a cluster from barman object using 'barmanObjectStore' option in 'externalClusters' section", + func() { + // Write a table and some data on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: postgres.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) + assertArchiveWalOnAzureBlob(namespace, clusterName, AzureConfiguration) + + By("backing up a cluster and verifying it exists on azure blob storage", func() { + // Create the backup + backups.Execute( + env.Ctx, env.Client, env.Scheme, + namespace, sourceBackupFileAzure, false, + testTimeouts[testUtils.BackupIsReady], + ) + backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName) + // Verifying file called data.tar should be available on Azure blob storage + Eventually(func() (int, error) { + return backups.CountFilesOnAzureBlobStorage(AzureConfiguration, clusterName, "data.tar") + }, 30).Should(BeNumerically(">=", 1)) + }) + + // Restoring cluster using a recovery barman object store, which is defined + // in the externalClusters section + AssertClusterRestore(namespace, externalClusterFileAzure, tableName) }) - // Restoring cluster using a recovery barman object store, which is defined - // in the externalClusters section - AssertClusterRestore(namespace, externalClusterFileAzure, tableName) - }) - It("restores a cluster with 'PITR' from barman object using "+ "'barmanObjectStore' option in 'externalClusters' section", func() { externalClusterName := "external-cluster-azure-pitr" @@ -280,20 +302,22 @@ var _ = Describe("Azure - Clusters Recovery From Barman Object Store", Label(tes namespace, clusterName, sourceBackupFileAzurePITR, - env.AzureConfiguration, + AzureConfiguration, 1, currentTimestamp, ) - restoredCluster, err := testUtils.CreateClusterFromExternalClusterBackupWithPITROnAzure( + restoredCluster, err := backups.CreateClusterFromExternalClusterBackupWithPITROnAzure( + env.Ctx, + env.Client, namespace, externalClusterName, clusterName, *currentTimestamp, "backup-storage-creds", - env.AzureConfiguration.StorageAccount, - env.AzureConfiguration.BlobContainer, - env) + AzureConfiguration.StorageAccount, + AzureConfiguration.BlobContainer, + ) Expect(err).ToNot(HaveOccurred()) // Restoring cluster using a recovery barman object store, which is defined @@ -306,7 +330,7 @@ var _ = Describe("Azure - Clusters Recovery From Barman Object Store", Label(tes ) By("delete restored cluster", func() { - Expect(testUtils.DeleteObject(env, restoredCluster)).To(Succeed()) + Expect(objects.Delete(env.Ctx, env.Client, restoredCluster)).To(Succeed()) }) }) }) @@ -319,22 +343,23 @@ var _ = Describe("Azure - Clusters Recovery From Barman Object Store", Label(tes } const namespacePrefix = "cluster-backup-azure-blob-sas" var err error - clusterName, err = env.GetResourceNameFromYAML(clusterSourceFileAzureSAS) + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterSourceFileAzureSAS) Expect(err).ToNot(HaveOccurred()) // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) // The Azure Blob Storage should have been created ad-hoc for the tests, // we get the credentials from the environment variables as we can't create // a fixture for them By("creating the Azure Blob Container SAS Token credentials", func() { - err = testUtils.CreateSASTokenCredentials( + err = backups.CreateSASTokenCredentials( + env.Ctx, + env.Client, namespace, - env.AzureConfiguration.StorageAccount, - env.AzureConfiguration.StorageKey, - env, + AzureConfiguration.StorageAccount, + AzureConfiguration.StorageKey, ) Expect(err).ToNot(HaveOccurred()) }) @@ -343,34 +368,39 @@ var _ = Describe("Azure - Clusters Recovery From Barman Object Store", Label(tes AssertCreateCluster(namespace, clusterName, clusterSourceFileAzureSAS, env) }) - It("restores cluster from barman object using 'barmanObjectStore' option in 'externalClusters' section", func() { - // Write a table and some data on the "app" database - tableLocator := TableLocator{ - Namespace: namespace, - ClusterName: clusterName, - DatabaseName: testUtils.AppDBName, - TableName: tableName, - } - AssertCreateTestData(env, tableLocator) - - // Create a WAL on the primary and check if it arrives in the - // Azure Blob Storage within a short time - assertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration) - - By("backing up a cluster and verifying it exists on azure blob storage", func() { - // We create a Backup - testUtils.ExecuteBackup(namespace, sourceBackupFileAzureSAS, false, testTimeouts[testUtils.BackupIsReady], env) - testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName) - // Verifying file called data.tar should be available on Azure blob storage - Eventually(func() (int, error) { - return testUtils.CountFilesOnAzureBlobStorage(env.AzureConfiguration, clusterName, "data.tar") - }, 30).Should(BeNumerically(">=", 1)) + It("restores cluster from barman object using 'barmanObjectStore' option in 'externalClusters' section", + func() { + // Write a table and some data on the "app" database + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: postgres.AppDBName, + TableName: tableName, + } + AssertCreateTestData(env, tableLocator) + + // Create a WAL on the primary and check if it arrives in the + // Azure Blob Storage within a short time + assertArchiveWalOnAzureBlob(namespace, clusterName, AzureConfiguration) + + By("backing up a cluster and verifying it exists on azure blob storage", func() { + // We create a Backup + backups.Execute( + env.Ctx, env.Client, env.Scheme, + namespace, sourceBackupFileAzureSAS, false, + testTimeouts[testUtils.BackupIsReady], + ) + backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName) + // Verifying file called data.tar should be available on Azure blob storage + Eventually(func() (int, error) { + return backups.CountFilesOnAzureBlobStorage(AzureConfiguration, clusterName, "data.tar") + }, 30).Should(BeNumerically(">=", 1)) + }) + + // Restore backup in a new cluster + AssertClusterRestoreWithApplicationDB(namespace, clusterRestoreFileAzureSAS, tableName) }) - // Restore backup in a new cluster - AssertClusterRestoreWithApplicationDB(namespace, clusterRestoreFileAzureSAS, tableName) - }) - It("restores a cluster with 'PITR' from barman object using "+ "'barmanObjectStore' option in 'externalClusters' section", func() { externalClusterName := "external-cluster-azure-pitr" @@ -379,20 +409,22 @@ var _ = Describe("Azure - Clusters Recovery From Barman Object Store", Label(tes namespace, clusterName, sourceBackupFileAzurePITRSAS, - env.AzureConfiguration, + AzureConfiguration, 1, currentTimestamp, ) - restoredCluster, err := testUtils.CreateClusterFromExternalClusterBackupWithPITROnAzure( + restoredCluster, err := backups.CreateClusterFromExternalClusterBackupWithPITROnAzure( + env.Ctx, + env.Client, namespace, externalClusterName, clusterName, *currentTimestamp, "backup-storage-creds-sas", - env.AzureConfiguration.StorageAccount, - env.AzureConfiguration.BlobContainer, - env) + AzureConfiguration.StorageAccount, + AzureConfiguration.BlobContainer, + ) Expect(err).ToNot(HaveOccurred()) // Restoring cluster using a recovery barman object store, which is defined @@ -405,17 +437,17 @@ var _ = Describe("Azure - Clusters Recovery From Barman Object Store", Label(tes ) By("delete restored cluster", func() { - Expect(testUtils.DeleteObject(env, restoredCluster)).To(Succeed()) + Expect(objects.Delete(env.Ctx, env.Client, restoredCluster)).To(Succeed()) }) }) }) }) }) -func assertArchiveWalOnAzureBlob(namespace, clusterName string, configuration testUtils.AzureConfiguration) { +func assertArchiveWalOnAzureBlob(namespace, clusterName string, configuration backups.AzureConfiguration) { // Create a WAL on the primary and check if it arrives at the Azure Blob Storage, within a short time By("archiving WALs and verifying they exist", func() { - primary, err := env.GetClusterPrimary(namespace, clusterName) + primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) latestWAL := switchWalAndGetLatestArchive(primary.Namespace, primary.Name) // Define what file we are looking for in Azure. @@ -423,7 +455,7 @@ func assertArchiveWalOnAzureBlob(namespace, clusterName string, configuration te path := fmt.Sprintf("wals\\/0000000100000000\\/%v.gz", latestWAL) // Verifying on blob storage using az Eventually(func() (int, error) { - return testUtils.CountFilesOnAzureBlobStorage(configuration, clusterName, path) + return backups.CountFilesOnAzureBlobStorage(configuration, clusterName, path) }, 60).Should(BeEquivalentTo(1)) }) } @@ -432,19 +464,23 @@ func prepareClusterForPITROnAzureBlob( namespace string, clusterName string, backupSampleFile string, - azureConfig testUtils.AzureConfiguration, + azureConfig backups.AzureConfiguration, expectedVal int, currentTimestamp *string, ) { const tableNamePitr = "for_restore" By("backing up a cluster and verifying it exists on Azure Blob", func() { - testUtils.ExecuteBackup(namespace, backupSampleFile, false, testTimeouts[testUtils.BackupIsReady], env) + backups.Execute( + env.Ctx, env.Client, env.Scheme, + namespace, backupSampleFile, false, + testTimeouts[testUtils.BackupIsReady], + ) Eventually(func() (int, error) { - return testUtils.CountFilesOnAzureBlobStorage(azureConfig, clusterName, "data.tar") + return backups.CountFilesOnAzureBlobStorage(azureConfig, clusterName, "data.tar") }, 30).Should(BeEquivalentTo(expectedVal)) Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) return cluster.Status.FirstRecoverabilityPoint, err }, 30).ShouldNot(BeEmpty()) @@ -454,23 +490,29 @@ func prepareClusterForPITROnAzureBlob( tableLocator := TableLocator{ Namespace: namespace, ClusterName: clusterName, - DatabaseName: testUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: tableNamePitr, } AssertCreateTestData(env, tableLocator) By("getting currentTimestamp", func() { - ts, err := testUtils.GetCurrentTimestamp(namespace, clusterName, env) + ts, err := postgres.GetCurrentTimestamp( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + namespace, clusterName, + ) *currentTimestamp = ts Expect(err).ToNot(HaveOccurred()) }) By(fmt.Sprintf("writing 3rd entry into test table '%v'", tableNamePitr), func() { - forward, conn, err := testUtils.ForwardPSQLConnection( - env, + forward, conn, err := postgres.ForwardPSQLConnection( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, namespace, clusterName, - testUtils.AppDBName, + postgres.AppDBName, apiv1.ApplicationUserSecretSuffix, ) defer func() { @@ -480,7 +522,7 @@ func prepareClusterForPITROnAzureBlob( Expect(err).ToNot(HaveOccurred()) insertRecordIntoTable(tableNamePitr, 3, conn) }) - assertArchiveWalOnAzureBlob(namespace, clusterName, env.AzureConfiguration) + assertArchiveWalOnAzureBlob(namespace, clusterName, azureConfig) AssertArchiveConditionMet(namespace, clusterName, "5m") - testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName) + backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName) } diff --git a/tests/e2e/backup_restore_azurite_test.go b/tests/e2e/backup_restore_azurite_test.go index cb3254c5a3..8b4e8f47ae 100644 --- a/tests/e2e/backup_restore_azurite_test.go +++ b/tests/e2e/backup_restore_azurite_test.go @@ -21,7 +21,12 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/tests" - testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/backups" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -61,17 +66,20 @@ var _ = Describe("Azurite - Backup and restore", Label(tests.LabelBackupRestore) BeforeAll(func() { const namespacePrefix = "cluster-backup-azurite" var err error - clusterName, err = env.GetResourceNameFromYAML(azuriteBlobSampleFile) + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, azuriteBlobSampleFile) Expect(err).ToNot(HaveOccurred()) // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) // Create and assert ca and tls certificate secrets on Azurite By("creating ca and tls certificate secrets", func() { - err := testUtils.CreateCertificateSecretsOnAzurite(namespace, clusterName, - azuriteCaSecName, azuriteTLSSecName, env) + err := backups.CreateCertificateSecretsOnAzurite( + env.Ctx, env.Client, + namespace, clusterName, + azuriteCaSecName, azuriteTLSSecName, + ) Expect(err).ToNot(HaveOccurred()) }) // Setup Azurite and az cli along with Postgresql cluster @@ -86,7 +94,7 @@ var _ = Describe("Azurite - Backup and restore", Label(tests.LabelBackupRestore) // Create a scheduled backup with the 'immediate' option enabled. // We expect the backup to be available It("immediately starts a backup using ScheduledBackups immediate option", func() { - scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupImmediateSampleFile) + scheduledBackupName, err := yaml.GetResourceNameFromYAML(env.Scheme, scheduledBackupImmediateSampleFile) Expect(err).ToNot(HaveOccurred()) AssertScheduledBackupsImmediate(namespace, scheduledBackupImmediateSampleFile, scheduledBackupName) @@ -94,7 +102,7 @@ var _ = Describe("Azurite - Backup and restore", Label(tests.LabelBackupRestore) // AssertScheduledBackupsImmediate creates at least two backups, we should find // their base backups Eventually(func() (int, error) { - return testUtils.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar") + return backups.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar") }, 30).Should(BeNumerically("==", 2)) }) @@ -107,12 +115,14 @@ var _ = Describe("Azurite - Backup and restore", Label(tests.LabelBackupRestore) prepareClusterForPITROnAzurite(namespace, clusterName, backupFilePITR, currentTimestamp) - cluster, err := testUtils.CreateClusterFromBackupUsingPITR( + cluster, err := backups.CreateClusterFromBackupUsingPITR( + env.Ctx, + env.Client, + env.Scheme, namespace, restoredClusterName, backupFilePITR, *currentTimestamp, - env, ) Expect(err).NotTo(HaveOccurred()) AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[testUtils.ClusterIsReady], env) @@ -121,7 +131,7 @@ var _ = Describe("Azurite - Backup and restore", Label(tests.LabelBackupRestore) AssertClusterWasRestoredWithPITR(namespace, restoredClusterName, tableName, "00000002") By("deleting the restored cluster", func() { - Expect(testUtils.DeleteObject(env, cluster)).To(Succeed()) + Expect(objects.Delete(env.Ctx, env.Client, cluster)).To(Succeed()) }) }) @@ -130,13 +140,13 @@ var _ = Describe("Azurite - Backup and restore", Label(tests.LabelBackupRestore) // We then patch it again back to its initial state and verify that // the amount of backups keeps increasing again It("verifies that scheduled backups can be suspended", func() { - scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupSampleFile) + scheduledBackupName, err := yaml.GetResourceNameFromYAML(env.Scheme, scheduledBackupSampleFile) Expect(err).ToNot(HaveOccurred()) By("scheduling backups", func() { AssertScheduledBackupsAreScheduled(namespace, scheduledBackupSampleFile, 300) Eventually(func() (int, error) { - return testUtils.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar") + return backups.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar") }, 60).Should(BeNumerically(">=", 3)) }) @@ -164,21 +174,23 @@ var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.Label } const namespacePrefix = "recovery-barman-object-azurite" var err error - clusterName, err = env.GetResourceNameFromYAML(azuriteBlobSampleFile) + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, azuriteBlobSampleFile) Expect(err).ToNot(HaveOccurred()) // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) // Create and assert ca and tls certificate secrets on Azurite By("creating ca and tls certificate secrets", func() { - err := testUtils.CreateCertificateSecretsOnAzurite( + err := backups.CreateCertificateSecretsOnAzurite( + env.Ctx, + env.Client, namespace, clusterName, azuriteCaSecName, azuriteTLSSecName, - env) + ) Expect(err).ToNot(HaveOccurred()) }) // Setup Azurite and az cli along with PostgreSQL cluster @@ -206,8 +218,9 @@ var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.Label prepareClusterForPITROnAzurite(namespace, clusterName, backupFileAzuritePITR, currentTimestamp) // Create a cluster from a particular time using external backup. - restoredCluster, err := testUtils.CreateClusterFromExternalClusterBackupWithPITROnAzurite( - namespace, externalClusterRestoreName, clusterName, *currentTimestamp, env) + restoredCluster, err := backups.CreateClusterFromExternalClusterBackupWithPITROnAzurite( + env.Ctx, env.Client, + namespace, externalClusterRestoreName, clusterName, *currentTimestamp) Expect(err).NotTo(HaveOccurred()) AssertClusterWasRestoredWithPITRAndApplicationDB( @@ -218,7 +231,7 @@ var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.Label ) By("delete restored cluster", func() { - Expect(testUtils.DeleteObject(env, restoredCluster)).To(Succeed()) + Expect(objects.Delete(env.Ctx, env.Client, restoredCluster)).To(Succeed()) }) }) }) @@ -226,20 +239,20 @@ var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.Label func prepareClusterOnAzurite(namespace, clusterName, clusterSampleFile string) { By("creating the Azurite storage credentials", func() { - err := testUtils.CreateStorageCredentialsOnAzurite(namespace, env) + err := backups.CreateStorageCredentialsOnAzurite(env.Ctx, env.Client, namespace) Expect(err).ToNot(HaveOccurred()) }) By("setting up Azurite to hold the backups", func() { // Deploying azurite for blob storage - err := testUtils.InstallAzurite(namespace, env) + err := backups.InstallAzurite(env.Ctx, env.Client, namespace) Expect(err).ToNot(HaveOccurred()) }) By("setting up az-cli", func() { // This is required as we have a service of Azurite running locally. // In order to connect, we need az cli inside the namespace - err := testUtils.InstallAzCli(namespace, env) + err := backups.InstallAzCli(env.Ctx, env.Client, namespace) Expect(err).ToNot(HaveOccurred()) }) @@ -262,7 +275,7 @@ func prepareClusterBackupOnAzurite( tableLocator := TableLocator{ Namespace: namespace, ClusterName: clusterName, - DatabaseName: testUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: tableName, } AssertCreateTestData(env, tableLocator) @@ -270,18 +283,22 @@ func prepareClusterBackupOnAzurite( By("backing up a cluster and verifying it exists on azurite", func() { // We create a Backup - testUtils.ExecuteBackup(namespace, backupFile, false, testTimeouts[testUtils.BackupIsReady], env) + backups.Execute( + env.Ctx, env.Client, env.Scheme, + namespace, backupFile, false, + testTimeouts[testUtils.BackupIsReady], + ) // Verifying file called data.tar should be available on Azurite blob storage Eventually(func() (int, error) { - return testUtils.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar") + return backups.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar") }, 30).Should(BeNumerically(">=", 1)) Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) return cluster.Status.FirstRecoverabilityPoint, err }, 30).ShouldNot(BeEmpty()) }) - testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName) + backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName) } func prepareClusterForPITROnAzurite( @@ -292,13 +309,17 @@ func prepareClusterForPITROnAzurite( ) { By("backing up a cluster and verifying it exists on azurite", func() { // We create a Backup - testUtils.ExecuteBackup(namespace, backupSampleFile, false, testTimeouts[testUtils.BackupIsReady], env) + backups.Execute( + env.Ctx, env.Client, env.Scheme, + namespace, backupSampleFile, false, + testTimeouts[testUtils.BackupIsReady], + ) // Verifying file called data.tar should be available on Azurite blob storage Eventually(func() (int, error) { - return testUtils.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar") + return backups.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar") }, 30).Should(BeNumerically(">=", 1)) Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) return cluster.Status.FirstRecoverabilityPoint, err }, 30).ShouldNot(BeEmpty()) @@ -308,23 +329,29 @@ func prepareClusterForPITROnAzurite( tableLocator := TableLocator{ Namespace: namespace, ClusterName: clusterName, - DatabaseName: testUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: "for_restore", } AssertCreateTestData(env, tableLocator) By("getting currentTimestamp", func() { - ts, err := testUtils.GetCurrentTimestamp(namespace, clusterName, env) + ts, err := postgres.GetCurrentTimestamp( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + namespace, clusterName, + ) *currentTimestamp = ts Expect(err).ToNot(HaveOccurred()) }) By(fmt.Sprintf("writing 3rd entry into test table '%v'", "for_restore"), func() { - forward, conn, err := testUtils.ForwardPSQLConnection( - env, + forward, conn, err := postgres.ForwardPSQLConnection( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, namespace, clusterName, - testUtils.AppDBName, + postgres.AppDBName, apiv1.ApplicationUserSecretSuffix, ) defer func() { @@ -347,7 +374,7 @@ func assertArchiveWalOnAzurite(namespace, clusterName string) { path := fmt.Sprintf("%v\\/wals\\/0000000100000000\\/%v.gz", clusterName, latestWAL) // verifying on blob storage using az Eventually(func() (int, error) { - return testUtils.CountFilesOnAzuriteBlobStorage(namespace, clusterName, path) + return backups.CountFilesOnAzuriteBlobStorage(namespace, clusterName, path) }, 60).Should(BeEquivalentTo(1)) }) } diff --git a/tests/e2e/backup_restore_minio_test.go b/tests/e2e/backup_restore_minio_test.go index 41ada349f0..4f4a611d1d 100644 --- a/tests/e2e/backup_restore_minio_test.go +++ b/tests/e2e/backup_restore_minio_test.go @@ -24,8 +24,16 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/tests" - testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/backups" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/logs" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/secrets" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -60,10 +68,10 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore), } const namespacePrefix = "cluster-backup-minio" var err error - clusterName, err = env.GetResourceNameFromYAML(clusterWithMinioSampleFile) + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterWithMinioSampleFile) Expect(err).ToNot(HaveOccurred()) - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) By("create the certificates for MinIO", func() { @@ -72,12 +80,13 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore), }) By("creating the credentials for minio", func() { - _, err = testUtils.CreateObjectStorageSecret( + _, err = secrets.CreateObjectStorageSecret( + env.Ctx, + env.Client, namespace, "backup-storage-creds", "minio", "minio123", - env, ) Expect(err).ToNot(HaveOccurred()) }) @@ -89,7 +98,7 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore), AssertCreateCluster(namespace, clusterName, clusterWithMinioSampleFile, env) By("verify test connectivity to minio using barman-cloud-wal-archive script", func() { - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Eventually(func() (bool, error) { connectionStatus, err := minio.TestConnectivityUsingBarmanCloudWalArchive( @@ -113,9 +122,9 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore), clusterRestoreSampleFile = fixturesDir + "/backup/cluster-from-restore.yaml.template" ) var backup *apiv1.Backup - restoredClusterName, err := env.GetResourceNameFromYAML(clusterWithMinioSampleFile) + restoredClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterWithMinioSampleFile) Expect(err).ToNot(HaveOccurred()) - backupName, err := env.GetResourceNameFromYAML(backupFile) + backupName, err := yaml.GetResourceNameFromYAML(env.Scheme, backupFile) Expect(err).ToNot(HaveOccurred()) // Create required test data AssertCreationOfTestDataForTargetDB(env, namespace, clusterName, targetDBOne, testTableName) @@ -126,7 +135,7 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore), tableLocator := TableLocator{ Namespace: namespace, ClusterName: clusterName, - DatabaseName: testUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: tableName, } AssertCreateTestData(env, tableLocator) @@ -137,28 +146,28 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore), // There should be a backup resource and By(fmt.Sprintf("backing up a cluster and verifying it exists on minio, backup path is %v", latestTar), func() { - backup = testUtils.ExecuteBackup(namespace, backupFile, false, - testTimeouts[testUtils.BackupIsReady], env) - testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName) + backup = backups.Execute(env.Ctx, env.Client, env.Scheme, namespace, backupFile, false, + testTimeouts[timeouts.BackupIsReady]) + backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName) Eventually(func() (int, error) { return minio.CountFiles(minioEnv, latestTar) }, 60).Should(BeEquivalentTo(1)) Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) if err != nil { return "", err } return cluster.Status.FirstRecoverabilityPoint, err }, 30).ShouldNot(BeEmpty()) Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) if err != nil { return "", err } return cluster.Status.LastSuccessfulBackup, err }, 30).ShouldNot(BeEmpty()) Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) if err != nil { return "", err } @@ -169,14 +178,17 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore), By("verifying the backup is using the expected barman-cloud-backup options", func() { Expect(backup).ToNot(BeNil()) Expect(backup.Status.InstanceID).ToNot(BeNil()) - logEntries, err := testUtils.ParseJSONLogs(namespace, backup.Status.InstanceID.PodName, env) + logEntries, err := logs.ParseJSONLogs( + env.Ctx, env.Interface, namespace, + backup.Status.InstanceID.PodName, + ) Expect(err).ToNot(HaveOccurred()) expectedBaseBackupOptions := []string{ "--immediate-checkpoint", "--min-chunk-size=5MB", "--read-timeout=59", } - result, err := testUtils.CheckOptionsForBarmanCommand( + result, err := logs.CheckOptionsForBarmanCommand( logEntries, barmanCloudBackupLogEntry, backup.Name, @@ -201,7 +213,11 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore), err = env.Client.Delete(env.Ctx, backup) Expect(err).ToNot(HaveOccurred()) // create a second backup - testUtils.ExecuteBackup(namespace, backupFile, false, testTimeouts[testUtils.BackupIsReady], env) + backups.Execute( + env.Ctx, env.Client, env.Scheme, + namespace, backupFile, false, + testTimeouts[timeouts.BackupIsReady], + ) latestTar = minio.GetFilePath(clusterName, "data.tar") Eventually(func() (int, error) { return minio.CountFiles(minioEnv, latestTar) @@ -214,7 +230,7 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore), ctrlclient.ObjectKey{Namespace: namespace, Name: backupName}, backup) Expect(err).ToNot(HaveOccurred()) - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) // We know that our current images always contain the latest barman version if cluster.ShouldForceLegacyBackup() { @@ -227,7 +243,7 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore), // Restore backup in a new cluster, also cover if no application database is configured AssertClusterRestore(namespace, clusterRestoreSampleFile, tableName) - cluster, err := env.GetCluster(namespace, restoredClusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, restoredClusterName) Expect(err).ToNot(HaveOccurred()) AssertMetricsData(namespace, targetDBOne, targetDBTwo, targetDBSecret, cluster) @@ -265,7 +281,7 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore), backupStandbyFile = fixturesDir + "/backup/minio/backup-minio-standby.yaml" ) - targetClusterName, err := env.GetResourceNameFromYAML(clusterWithMinioStandbySampleFile) + targetClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterWithMinioStandbySampleFile) Expect(err).ToNot(HaveOccurred()) // Create the cluster with custom serverName in the backup spec @@ -280,7 +296,7 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore), tableLocator := TableLocator{ Namespace: namespace, ClusterName: targetClusterName, - DatabaseName: testUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: tableName, } AssertCreateTestData(env, tableLocator) @@ -291,13 +307,17 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore), // There should be a backup resource and By(fmt.Sprintf("backing up a cluster from standby and verifying it exists on minio, backup path is %v", latestTar), func() { - testUtils.ExecuteBackup(namespace, backupStandbyFile, true, testTimeouts[testUtils.BackupIsReady], env) - testUtils.AssertBackupConditionInClusterStatus(env, namespace, targetClusterName) + backups.Execute( + env.Ctx, env.Client, env.Scheme, + namespace, backupStandbyFile, true, + testTimeouts[timeouts.BackupIsReady], + ) + backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, targetClusterName) Eventually(func() (int, error) { return minio.CountFiles(minioEnv, latestTar) }, 60).Should(BeEquivalentTo(1)) Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, targetClusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, targetClusterName) return cluster.Status.FirstRecoverabilityPoint, err }, 30).ShouldNot(BeEmpty()) }) @@ -315,7 +335,7 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore), backupWithTargetFile = fixturesDir + "/backup/minio/backup-minio-override-target.yaml" ) - targetClusterName, err := env.GetResourceNameFromYAML(clusterWithMinioSampleFile) + targetClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterWithMinioSampleFile) Expect(err).ToNot(HaveOccurred()) // Create the cluster with custom serverName in the backup spec @@ -330,7 +350,7 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore), tableLocator := TableLocator{ Namespace: namespace, ClusterName: targetClusterName, - DatabaseName: testUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: tableName, } AssertCreateTestData(env, tableLocator) @@ -341,14 +361,17 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore), // There should be a backup resource and By(fmt.Sprintf("backing up a cluster from standby (defined in backup file) and verifying it exists on minio,"+ " backup path is %v", latestTar), func() { - testUtils.ExecuteBackup(namespace, backupWithTargetFile, true, testTimeouts[testUtils.BackupIsReady], - env) - testUtils.AssertBackupConditionInClusterStatus(env, namespace, targetClusterName) + backups.Execute( + env.Ctx, env.Client, env.Scheme, + namespace, backupWithTargetFile, true, + testTimeouts[timeouts.BackupIsReady], + ) + backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, targetClusterName) Eventually(func() (int, error) { return minio.CountFiles(minioEnv, latestTar) }, 60).Should(BeEquivalentTo(1)) Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, targetClusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, targetClusterName) return cluster.Status.FirstRecoverabilityPoint, err }, 30).ShouldNot(BeEmpty()) }) @@ -375,7 +398,7 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore), clusterServerName = "pg-backup-minio-Custom-Name" ) - customClusterName, err := env.GetResourceNameFromYAML(clusterWithMinioCustomSampleFile) + customClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterWithMinioCustomSampleFile) Expect(err).ToNot(HaveOccurred()) // Create the cluster with custom serverName in the backup spec @@ -390,7 +413,7 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore), tableLocator := TableLocator{ Namespace: namespace, ClusterName: customClusterName, - DatabaseName: testUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: tableName, } AssertCreateTestData(env, tableLocator) @@ -399,8 +422,12 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore), // There should be a backup resource and By("backing up a cluster and verifying it exists on minio", func() { - testUtils.ExecuteBackup(namespace, backupFileCustom, false, testTimeouts[testUtils.BackupIsReady], env) - testUtils.AssertBackupConditionInClusterStatus(env, namespace, customClusterName) + backups.Execute( + env.Ctx, env.Client, env.Scheme, + namespace, backupFileCustom, false, + testTimeouts[timeouts.BackupIsReady], + ) + backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, customClusterName) latestBaseTar := minio.GetFilePath(clusterServerName, "data.tar") Eventually(func() (int, error) { return minio.CountFiles(minioEnv, latestBaseTar) @@ -408,7 +435,7 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore), fmt.Sprintf("verify the number of backup %v is equals to 1", latestBaseTar)) // this is the second backup we take on the bucket Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, customClusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, customClusterName) return cluster.Status.FirstRecoverabilityPoint, err }, 30).ShouldNot(BeEmpty()) }) @@ -431,7 +458,7 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore), It("immediately starts a backup using ScheduledBackups 'immediate' option", func() { const scheduledBackupSampleFile = fixturesDir + "/backup/scheduled_backup_immediate/scheduled-backup-immediate-minio.yaml" - scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupSampleFile) + scheduledBackupName, err := yaml.GetResourceNameFromYAML(env.Scheme, scheduledBackupSampleFile) Expect(err).ToNot(HaveOccurred()) AssertScheduledBackupsImmediate(namespace, scheduledBackupSampleFile, scheduledBackupName) @@ -458,21 +485,23 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore), currentTimestamp, ) - cluster, err := testUtils.CreateClusterFromBackupUsingPITR( + cluster, err := backups.CreateClusterFromBackupUsingPITR( + env.Ctx, + env.Client, + env.Scheme, namespace, restoredClusterName, backupFilePITR, *currentTimestamp, - env, ) Expect(err).NotTo(HaveOccurred()) - AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[testUtils.ClusterIsReady], env) + AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[timeouts.ClusterIsReady], env) // Restore backup in a new cluster, also cover if no application database is configured AssertClusterWasRestoredWithPITR(namespace, restoredClusterName, tableName, "00000003") By("deleting the restored cluster", func() { - Expect(testUtils.DeleteObject(env, cluster)).To(Succeed()) + Expect(objects.Delete(env.Ctx, env.Client, cluster)).To(Succeed()) }) }) @@ -483,7 +512,7 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore), It("verifies that scheduled backups can be suspended", func() { const scheduledBackupSampleFile = fixturesDir + "/backup/scheduled_backup_suspend/scheduled-backup-suspend-minio.yaml" - scheduledBackupName, err := env.GetResourceNameFromYAML(scheduledBackupSampleFile) + scheduledBackupName, err := yaml.GetResourceNameFromYAML(env.Scheme, scheduledBackupSampleFile) Expect(err).ToNot(HaveOccurred()) By("scheduling backups", func() { @@ -504,14 +533,14 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore), Expect(err).ToNot(HaveOccurred()) Expect(tags.Tags).ToNot(BeEmpty()) - currentPrimary, err := env.GetClusterPrimary(namespace, clusterName) + currentPrimary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) oldPrimary := currentPrimary.GetName() // Force-delete the primary quickDelete := &ctrlclient.DeleteOptions{ GracePeriodSeconds: &quickDeletionPeriod, } - err = env.DeletePod(namespace, currentPrimary.GetName(), quickDelete) + err = pods.Delete(env.Ctx, env.Client, namespace, currentPrimary.GetName(), quickDelete) Expect(err).ToNot(HaveOccurred()) AssertNewPrimary(namespace, clusterName, oldPrimary) @@ -551,19 +580,20 @@ var _ = Describe("MinIO - Clusters Recovery from Barman Object Store", Label(tes } const namespacePrefix = "recovery-barman-object-minio" var err error - clusterName, err = env.GetResourceNameFromYAML(clusterSourceFileMinio) + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterSourceFileMinio) Expect(err).ToNot(HaveOccurred()) // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) By("creating the credentials for minio", func() { - _, err = testUtils.CreateObjectStorageSecret( + _, err = secrets.CreateObjectStorageSecret( + env.Ctx, + env.Client, namespace, "backup-storage-creds", "minio", "minio123", - env, ) Expect(err).ToNot(HaveOccurred()) }) @@ -577,7 +607,7 @@ var _ = Describe("MinIO - Clusters Recovery from Barman Object Store", Label(tes AssertCreateCluster(namespace, clusterName, clusterSourceFileMinio, env) By("verify test connectivity to minio using barman-cloud-wal-archive script", func() { - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Eventually(func() (bool, error) { connectionStatus, err := minio.TestConnectivityUsingBarmanCloudWalArchive( @@ -592,14 +622,14 @@ var _ = Describe("MinIO - Clusters Recovery from Barman Object Store", Label(tes It("restores a cluster from barman object using 'barmanObjectStore' option in 'externalClusters' section", func() { - externalClusterName, err := env.GetResourceNameFromYAML(externalClusterFileMinio) + externalClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, externalClusterFileMinio) Expect(err).ToNot(HaveOccurred()) // Write a table and some data on the "app" database tableLocator := TableLocator{ Namespace: namespace, ClusterName: clusterName, - DatabaseName: testUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: tableName, } AssertCreateTestData(env, tableLocator) @@ -608,12 +638,13 @@ var _ = Describe("MinIO - Clusters Recovery from Barman Object Store", Label(tes // There should be a backup resource and By("backing up a cluster and verifying it exists on minio", func() { - testUtils.ExecuteBackup(namespace, sourceTakeFirstBackupFileMinio, false, - testTimeouts[testUtils.BackupIsReady], env) - testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName) + backups.Execute(env.Ctx, env.Client, env.Scheme, namespace, sourceTakeFirstBackupFileMinio, + false, + testTimeouts[timeouts.BackupIsReady]) + backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName) // TODO: this is to force a CHECKPOINT when we run the backup on standby. - // This should be better handled inside ExecuteBackup + // This should be better handled inside Execute AssertArchiveWalOnMinio(namespace, clusterName, clusterName) latestTar := minio.GetFilePath(clusterName, "data.tar") @@ -622,7 +653,7 @@ var _ = Describe("MinIO - Clusters Recovery from Barman Object Store", Label(tes }, 60).Should(BeEquivalentTo(1), fmt.Sprintf("verify the number of backup %v is equals to 1", latestTar)) Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) if err != nil { return "", err } @@ -638,7 +669,7 @@ var _ = Describe("MinIO - Clusters Recovery from Barman Object Store", Label(tes tableLocator = TableLocator{ Namespace: namespace, ClusterName: externalClusterName, - DatabaseName: testUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: tableName, } AssertDataExpectedCount(env, tableLocator, 2) @@ -657,16 +688,22 @@ var _ = Describe("MinIO - Clusters Recovery from Barman Object Store", Label(tes // We have already written 2 rows in test table 'to_restore' in above test now we will take current // timestamp. It will use to restore cluster from source using PITR By("getting currentTimestamp", func() { - ts, err := testUtils.GetCurrentTimestamp(namespace, clusterName, env) + ts, err := postgres.GetCurrentTimestamp( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + namespace, clusterName, + ) *currentTimestamp = ts Expect(err).ToNot(HaveOccurred()) }) By(fmt.Sprintf("writing 2 more entries in table '%v'", tableName), func() { - forward, conn, err := testUtils.ForwardPSQLConnection( - env, + forward, conn, err := postgres.ForwardPSQLConnection( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, namespace, clusterName, - testUtils.AppDBName, + postgres.AppDBName, apiv1.ApplicationUserSecretSuffix, ) defer func() { @@ -679,9 +716,10 @@ var _ = Describe("MinIO - Clusters Recovery from Barman Object Store", Label(tes insertRecordIntoTable(tableName, 4, conn) }) By("creating second backup and verifying it exists on minio", func() { - testUtils.ExecuteBackup(namespace, sourceTakeSecondBackupFileMinio, false, - testTimeouts[testUtils.BackupIsReady], env) - testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName) + backups.Execute(env.Ctx, env.Client, env.Scheme, namespace, sourceTakeSecondBackupFileMinio, + false, + testTimeouts[timeouts.BackupIsReady]) + backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName) latestTar := minio.GetFilePath(clusterName, "data.tar") Eventually(func() (int, error) { return minio.CountFiles(minioEnv, latestTar) @@ -691,8 +729,9 @@ var _ = Describe("MinIO - Clusters Recovery from Barman Object Store", Label(tes var restoredCluster *apiv1.Cluster By("create a cluster from backup with PITR", func() { var err error - restoredCluster, err = testUtils.CreateClusterFromExternalClusterBackupWithPITROnMinio( - namespace, externalClusterRestoreName, clusterName, *currentTimestamp, env) + restoredCluster, err = backups.CreateClusterFromExternalClusterBackupWithPITROnMinio( + env.Ctx, env.Client, + namespace, externalClusterRestoreName, clusterName, *currentTimestamp) Expect(err).NotTo(HaveOccurred()) }) AssertClusterWasRestoredWithPITRAndApplicationDB( @@ -702,7 +741,7 @@ var _ = Describe("MinIO - Clusters Recovery from Barman Object Store", Label(tes "00000002", ) By("delete restored cluster", func() { - Expect(testUtils.DeleteObject(env, restoredCluster)).To(Succeed()) + Expect(objects.Delete(env.Ctx, env.Client, restoredCluster)).To(Succeed()) }) }) @@ -711,7 +750,7 @@ var _ = Describe("MinIO - Clusters Recovery from Barman Object Store", Label(tes tableLocator := TableLocator{ Namespace: namespace, ClusterName: clusterName, - DatabaseName: testUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: "for_restore_repl", } AssertCreateTestData(env, tableLocator) @@ -719,9 +758,9 @@ var _ = Describe("MinIO - Clusters Recovery from Barman Object Store", Label(tes AssertArchiveWalOnMinio(namespace, clusterName, clusterName) By("backing up a cluster and verifying it exists on minio", func() { - testUtils.ExecuteBackup(namespace, sourceTakeThirdBackupFileMinio, false, - testTimeouts[testUtils.BackupIsReady], env) - testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName) + backups.Execute(env.Ctx, env.Client, env.Scheme, namespace, sourceTakeThirdBackupFileMinio, false, + testTimeouts[timeouts.BackupIsReady]) + backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName) latestTar := minio.GetFilePath(clusterName, "data.tar") Eventually(func() (int, error) { return minio.CountFiles(minioEnv, latestTar) @@ -750,7 +789,11 @@ func prepareClusterForPITROnMinio( const tableNamePitr = "for_restore" By("backing up a cluster and verifying it exists on minio", func() { - testUtils.ExecuteBackup(namespace, backupSampleFile, false, testTimeouts[testUtils.BackupIsReady], env) + backups.Execute( + env.Ctx, env.Client, env.Scheme, + namespace, backupSampleFile, false, + testTimeouts[timeouts.BackupIsReady], + ) latestTar := minio.GetFilePath(clusterName, "data.tar") Eventually(func() (int, error) { return minio.CountFiles(minioEnv, latestTar) @@ -758,7 +801,7 @@ func prepareClusterForPITROnMinio( fmt.Sprintf("verify the number of backups %v is greater than or equal to %v", latestTar, expectedVal)) Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) return cluster.Status.FirstRecoverabilityPoint, err }, 30).ShouldNot(BeEmpty()) @@ -768,23 +811,29 @@ func prepareClusterForPITROnMinio( tableLocator := TableLocator{ Namespace: namespace, ClusterName: clusterName, - DatabaseName: testUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: tableNamePitr, } AssertCreateTestData(env, tableLocator) By("getting currentTimestamp", func() { - ts, err := testUtils.GetCurrentTimestamp(namespace, clusterName, env) + ts, err := postgres.GetCurrentTimestamp( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + namespace, clusterName, + ) *currentTimestamp = ts Expect(err).ToNot(HaveOccurred()) }) By(fmt.Sprintf("writing 3rd entry into test table '%v'", tableNamePitr), func() { - forward, conn, err := testUtils.ForwardPSQLConnection( - env, + forward, conn, err := postgres.ForwardPSQLConnection( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, namespace, clusterName, - testUtils.AppDBName, + postgres.AppDBName, apiv1.ApplicationUserSecretSuffix, ) defer func() { @@ -796,5 +845,5 @@ func prepareClusterForPITROnMinio( }) AssertArchiveWalOnMinio(namespace, clusterName, clusterName) AssertArchiveConditionMet(namespace, clusterName, "5m") - testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName) + backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName) } diff --git a/tests/e2e/certificates_test.go b/tests/e2e/certificates_test.go index 5cd0f173d7..a12c885059 100644 --- a/tests/e2e/certificates_test.go +++ b/tests/e2e/certificates_test.go @@ -17,12 +17,21 @@ limitations under the License. package e2e import ( + "context" "fmt" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/util/retry" + "k8s.io/utils/ptr" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + podutils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -41,6 +50,95 @@ import ( // from an application, by using certificates that have been created by 'kubectl-cnpg' // Then we verify that the server certificate and the operator are able to handle the provided server certificates var _ = Describe("Certificates", func() { + createClientCertificatesViaKubectlPluginFunc := func( + ctx context.Context, + crudClient ctrlclient.Client, + cluster apiv1.Cluster, + certName string, + userName string, + ) error { + // clientCertName := "cluster-cert" + // user := "app" + // Create the certificate + _, _, err := run.Run(fmt.Sprintf( + "kubectl cnpg certificate %v --cnpg-cluster %v --cnpg-user %v -n %v", + certName, + cluster.Name, + userName, + cluster.Namespace)) + if err != nil { + return err + } + // Verifying client certificate secret existence + secret := &corev1.Secret{} + err = crudClient.Get(ctx, ctrlclient.ObjectKey{Namespace: cluster.Namespace, Name: certName}, secret) + return err + } + + defaultPodFunc := func(namespace string, name string, rootCASecretName string, tlsSecretName string) corev1.Pod { + var secretMode int32 = 0o600 + seccompProfile := &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + } + + return corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "secret-volume-root-ca", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: rootCASecretName, + DefaultMode: &secretMode, + }, + }, + }, + { + Name: "secret-volume-tls", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: tlsSecretName, + DefaultMode: &secretMode, + }, + }, + }, + }, + Containers: []corev1.Container{ + { + Name: name, + Image: "ghcr.io/cloudnative-pg/webtest:1.6.0", + Ports: []corev1.ContainerPort{ + { + ContainerPort: 8080, + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "secret-volume-root-ca", + MountPath: "/etc/secrets/ca", + }, + { + Name: "secret-volume-tls", + MountPath: "/etc/secrets/tls", + }, + }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: ptr.To(false), + SeccompProfile: seccompProfile, + }, + }, + }, + SecurityContext: &corev1.PodSecurityContext{ + SeccompProfile: seccompProfile, + }, + }, + } + } + const ( serverCASecretName = "my-postgresql-server-ca" // #nosec serverCertSecretName = "my-postgresql-server" // #nosec @@ -67,7 +165,7 @@ var _ = Describe("Certificates", func() { cleanClusterCertification := func() { err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) cluster.Spec.Certificates.ServerTLSSecret = "" cluster.Spec.Certificates.ServerCASecret = "" @@ -82,20 +180,21 @@ var _ = Describe("Certificates", func() { var err error // Create a cluster in a namespace we'll delete after the test const namespacePrefix = "postgresql-cert" - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - clusterName, err = env.GetResourceNameFromYAML(sampleFile) + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) // Create the client certificate - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - err = utils.CreateClientCertificatesViaKubectlPlugin( + err = createClientCertificatesViaKubectlPluginFunc( + env.Ctx, + env.Client, *cluster, kubectlCNPGClientCertSecretName, "app", - env, ) Expect(err).ToNot(HaveOccurred()) }) @@ -106,96 +205,99 @@ var _ = Describe("Certificates", func() { It("can authenticate using a Certificate that is generated from the 'kubectl-cnpg' plugin", Label(tests.LabelPlugin), func() { - pod := utils.DefaultWebapp(namespace, "app-pod-cert-1", + pod := defaultPodFunc(namespace, "app-pod-cert-1", defaultCASecretName, kubectlCNPGClientCertSecretName) - err := utils.PodCreateAndWaitForReady(env, &pod, 240) + err := podutils.CreateAndWaitForReady(env.Ctx, env.Client, &pod, 240) Expect(err).ToNot(HaveOccurred()) AssertSSLVerifyFullDBConnectionFromAppPod(namespace, clusterName, pod) }) - It("can authenticate after switching to user-supplied server certs", Label(tests.LabelServiceConnectivity), func() { - CreateAndAssertServerCertificatesSecrets( - namespace, - clusterName, - serverCASecretName, - serverCertSecretName, - false, - ) + It("can authenticate after switching to user-supplied server certs", Label(tests.LabelServiceConnectivity), + func() { + CreateAndAssertServerCertificatesSecrets( + namespace, + clusterName, + serverCASecretName, + serverCertSecretName, + false, + ) - var err error - // Updating defaults certificates entries with user provided certificates, - // i.e server CA and TLS secrets inside the cluster - Eventually(func() error { - _, _, err = utils.RunUnchecked(fmt.Sprintf( - "kubectl patch cluster %v -n %v -p "+ - "'{\"spec\":{\"certificates\":{\"serverCASecret\":\"%v\","+ - "\"serverTLSSecret\":\"%v\"}}}'"+ - " --type='merge'", clusterName, namespace, serverCASecretName, serverCertSecretName)) - if err != nil { - return err - } - return nil - }, 60, 5).Should(Succeed()) - - Eventually(func() (bool, error) { - certUpdateStatus := false - cluster, err := env.GetCluster(namespace, clusterName) - if cluster.Status.Certificates.ServerCASecret == serverCASecretName { - if cluster.Status.Certificates.ServerTLSSecret == serverCertSecretName { - certUpdateStatus = true + var err error + // Updating defaults certificates entries with user provided certificates, + // i.e server CA and TLS secrets inside the cluster + Eventually(func() error { + _, _, err = run.Unchecked(fmt.Sprintf( + "kubectl patch cluster %v -n %v -p "+ + "'{\"spec\":{\"certificates\":{\"serverCASecret\":\"%v\","+ + "\"serverTLSSecret\":\"%v\"}}}'"+ + " --type='merge'", clusterName, namespace, serverCASecretName, serverCertSecretName)) + if err != nil { + return err } - } - return certUpdateStatus, err - }, 120).Should(BeTrue(), fmt.Sprintf("Error: %v", err)) + return nil + }, 60, 5).Should(Succeed()) - pod := utils.DefaultWebapp( - namespace, - "app-pod-cert-2", - serverCASecretName, - kubectlCNPGClientCertSecretName, - ) - err = utils.PodCreateAndWaitForReady(env, &pod, 240) - Expect(err).ToNot(HaveOccurred()) - AssertSSLVerifyFullDBConnectionFromAppPod(namespace, clusterName, pod) - }) + Eventually(func() (bool, error) { + certUpdateStatus := false + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) + if cluster.Status.Certificates.ServerCASecret == serverCASecretName { + if cluster.Status.Certificates.ServerTLSSecret == serverCertSecretName { + certUpdateStatus = true + } + } + return certUpdateStatus, err + }, 120).Should(BeTrue(), fmt.Sprintf("Error: %v", err)) - It("can connect after switching to user-supplied client certificates", Label(tests.LabelServiceConnectivity), func() { - // Create certificates secret for client - CreateAndAssertClientCertificatesSecrets(namespace, clusterName, clientCASecretName, replicaCertSecretName, - clientCertSecretName, false) - - // Updating defaults certificates entries with user provided certificates, - // i.e client CA and TLS secrets inside the cluster - Eventually(func() error { - _, _, err := utils.RunUnchecked(fmt.Sprintf( - "kubectl patch cluster %v -n %v -p "+ - "'{\"spec\":{\"certificates\":{\"clientCASecret\":\"%v\","+ - "\"replicationTLSSecret\":\"%v\"}}}'"+ - " --type='merge'", clusterName, namespace, clientCASecretName, replicaCertSecretName)) - if err != nil { - return err - } - return nil - }, 60, 5).Should(Succeed()) - - Eventually(func() (bool, error) { - cluster, err := env.GetCluster(namespace, clusterName) - return cluster.Spec.Certificates.ClientCASecret == clientCASecretName && - cluster.Status.Certificates.ReplicationTLSSecret == replicaCertSecretName, err - }, 120, 5).Should(BeTrue()) - - pod := utils.DefaultWebapp(namespace, "app-pod-cert-3", defaultCASecretName, clientCertSecretName) - err := utils.PodCreateAndWaitForReady(env, &pod, 240) - Expect(err).ToNot(HaveOccurred()) - AssertSSLVerifyFullDBConnectionFromAppPod(namespace, clusterName, pod) - }) + pod := defaultPodFunc( + namespace, + "app-pod-cert-2", + serverCASecretName, + kubectlCNPGClientCertSecretName, + ) + err = podutils.CreateAndWaitForReady(env.Ctx, env.Client, &pod, 240) + Expect(err).ToNot(HaveOccurred()) + AssertSSLVerifyFullDBConnectionFromAppPod(namespace, clusterName, pod) + }) + + It("can connect after switching to user-supplied client certificates", Label(tests.LabelServiceConnectivity), + func() { + // Create certificates secret for client + CreateAndAssertClientCertificatesSecrets(namespace, clusterName, clientCASecretName, + replicaCertSecretName, + clientCertSecretName, false) + + // Updating defaults certificates entries with user provided certificates, + // i.e client CA and TLS secrets inside the cluster + Eventually(func() error { + _, _, err := run.Unchecked(fmt.Sprintf( + "kubectl patch cluster %v -n %v -p "+ + "'{\"spec\":{\"certificates\":{\"clientCASecret\":\"%v\","+ + "\"replicationTLSSecret\":\"%v\"}}}'"+ + " --type='merge'", clusterName, namespace, clientCASecretName, replicaCertSecretName)) + if err != nil { + return err + } + return nil + }, 60, 5).Should(Succeed()) + + Eventually(func() (bool, error) { + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) + return cluster.Spec.Certificates.ClientCASecret == clientCASecretName && + cluster.Status.Certificates.ReplicationTLSSecret == replicaCertSecretName, err + }, 120, 5).Should(BeTrue()) + + pod := defaultPodFunc(namespace, "app-pod-cert-3", defaultCASecretName, clientCertSecretName) + err := podutils.CreateAndWaitForReady(env.Ctx, env.Client, &pod, 240) + Expect(err).ToNot(HaveOccurred()) + AssertSSLVerifyFullDBConnectionFromAppPod(namespace, clusterName, pod) + }) It("can connect after switching both server and client certificates to user-supplied mode", Label(tests.LabelServiceConnectivity), func() { // Updating defaults certificates entries with user provided certificates, // i.e server and client CA and TLS secrets inside the cluster Eventually(func() error { - _, _, err := utils.RunUnchecked(fmt.Sprintf( + _, _, err := run.Unchecked(fmt.Sprintf( "kubectl patch cluster %v -n %v -p "+ "'{\"spec\":{\"certificates\":{\"serverCASecret\":\"%v\","+ "\"serverTLSSecret\":\"%v\",\"clientCASecret\":\"%v\","+ @@ -215,15 +317,15 @@ var _ = Describe("Certificates", func() { }, 60, 5).Should(Succeed()) Eventually(func() (bool, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) return cluster.Status.Certificates.ServerCASecret == serverCASecretName && cluster.Status.Certificates.ClientCASecret == clientCASecretName && cluster.Status.Certificates.ServerTLSSecret == serverCertSecretName && cluster.Status.Certificates.ReplicationTLSSecret == replicaCertSecretName, err }, 120, 5).Should(BeTrue()) - pod := utils.DefaultWebapp(namespace, "app-pod-cert-4", serverCASecretName, clientCertSecretName) - err := utils.PodCreateAndWaitForReady(env, &pod, 240) + pod := defaultPodFunc(namespace, "app-pod-cert-4", serverCASecretName, clientCertSecretName) + err := podutils.CreateAndWaitForReady(env.Ctx, env.Client, &pod, 240) Expect(err).ToNot(HaveOccurred()) AssertSSLVerifyFullDBConnectionFromAppPod(namespace, clusterName, pod) }) @@ -242,7 +344,7 @@ var _ = Describe("Certificates", func() { var err error // Create a cluster in a namespace that will be deleted after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) CreateAndAssertServerCertificatesSecrets( namespace, @@ -252,23 +354,24 @@ var _ = Describe("Certificates", func() { false, ) AssertCreateCluster(namespace, clusterName, sampleFile, env) - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - err = utils.CreateClientCertificatesViaKubectlPlugin( + err = createClientCertificatesViaKubectlPluginFunc( + env.Ctx, + env.Client, *cluster, kubectlCNPGClientCertSecretName, "app", - env, ) Expect(err).ToNot(HaveOccurred()) - pod := utils.DefaultWebapp( + pod := defaultPodFunc( namespace, "app-pod-cert-2", serverCASecretName, kubectlCNPGClientCertSecretName, ) - err = utils.PodCreateAndWaitForReady(env, &pod, 240) + err = podutils.CreateAndWaitForReady(env.Ctx, env.Client, &pod, 240) Expect(err).ToNot(HaveOccurred()) AssertSSLVerifyFullDBConnectionFromAppPod(namespace, clusterName, pod) }) @@ -287,7 +390,7 @@ var _ = Describe("Certificates", func() { var err error // Create a cluster in a namespace that will be deleted after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) // Create certificates secret for client @@ -300,8 +403,8 @@ var _ = Describe("Certificates", func() { false, ) AssertCreateCluster(namespace, clusterName, sampleFile, env) - pod := utils.DefaultWebapp(namespace, "app-pod-cert-3", defaultCASecretName, clientCertSecretName) - err = utils.PodCreateAndWaitForReady(env, &pod, 240) + pod := defaultPodFunc(namespace, "app-pod-cert-3", defaultCASecretName, clientCertSecretName) + err = podutils.CreateAndWaitForReady(env.Ctx, env.Client, &pod, 240) Expect(err).ToNot(HaveOccurred()) AssertSSLVerifyFullDBConnectionFromAppPod(namespace, clusterName, pod) }) @@ -320,7 +423,7 @@ var _ = Describe("Certificates", func() { // Create a cluster in a namespace that will be deleted after the test var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) // Create certificates secret for server @@ -341,8 +444,8 @@ var _ = Describe("Certificates", func() { false, ) AssertCreateCluster(namespace, clusterName, sampleFile, env) - pod := utils.DefaultWebapp(namespace, "app-pod-cert-4", serverCASecretName, clientCertSecretName) - err = utils.PodCreateAndWaitForReady(env, &pod, 240) + pod := defaultPodFunc(namespace, "app-pod-cert-4", serverCASecretName, clientCertSecretName) + err = podutils.CreateAndWaitForReady(env.Ctx, env.Client, &pod, 240) Expect(err).ToNot(HaveOccurred()) AssertSSLVerifyFullDBConnectionFromAppPod(namespace, clusterName, pod) }) diff --git a/tests/e2e/cluster_microservice_test.go b/tests/e2e/cluster_microservice_test.go index 476ea0e4aa..b48712f541 100644 --- a/tests/e2e/cluster_microservice_test.go +++ b/tests/e2e/cluster_microservice_test.go @@ -29,7 +29,12 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" "github.com/cloudnative-pg/cloudnative-pg/tests" - testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/importdb" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -60,19 +65,19 @@ var _ = Describe("Imports with Microservice Approach", Label(tests.LabelImportin It("can import a database with large objects", func() { var err error const namespacePrefix = "microservice-large-object" - sourceClusterName, err = env.GetResourceNameFromYAML(sourceSampleFile) + sourceClusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sourceSampleFile) Expect(err).ToNot(HaveOccurred()) oid := 16393 data := "large object test" - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, sourceClusterName, sourceSampleFile, env) tableLocator := TableLocator{ Namespace: namespace, ClusterName: sourceClusterName, - DatabaseName: testsUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: tableName, } AssertCreateTestData(env, tableLocator) @@ -83,23 +88,23 @@ var _ = Describe("Imports with Microservice Approach", Label(tests.LabelImportin tableLocator = TableLocator{ Namespace: namespace, ClusterName: importedClusterName, - DatabaseName: testsUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: tableName, } AssertDataExpectedCount(env, tableLocator, 2) AssertLargeObjectValue(namespace, importedClusterName, oid, data) By("deleting the imported database", func() { - Expect(testsUtils.DeleteObject(env, cluster)).To(Succeed()) + Expect(objects.Delete(env.Ctx, env.Client, cluster)).To(Succeed()) }) }) It("can import a database", func() { var err error const namespacePrefix = "microservice" - sourceClusterName, err = env.GetResourceNameFromYAML(sourceSampleFile) + sourceClusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sourceSampleFile) Expect(err).ToNot(HaveOccurred()) - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, sourceClusterName, sourceSampleFile, env) assertCreateTableWithDataOnSourceCluster(namespace, tableName, sourceClusterName) @@ -109,7 +114,7 @@ var _ = Describe("Imports with Microservice Approach", Label(tests.LabelImportin tableLocator := TableLocator{ Namespace: namespace, ClusterName: importedClusterName, - DatabaseName: testsUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: tableName, } AssertDataExpectedCount(env, tableLocator, 2) @@ -121,7 +126,7 @@ var _ = Describe("Imports with Microservice Approach", Label(tests.LabelImportin const namespacePrefix = "microservice-different-db" importedClusterName = "cluster-pgdump-different-db" // create namespace - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) assertImportRenamesSelectedDatabase(namespace, sourceSampleFile, importedClusterName, tableName, "") @@ -132,9 +137,9 @@ var _ = Describe("Imports with Microservice Approach", Label(tests.LabelImportin // nonexistent database in cluster definition while importing var err error const namespacePrefix = "cnpg-microservice-error" - sourceClusterName, err = env.GetResourceNameFromYAML(sourceSampleFile) + sourceClusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sourceSampleFile) Expect(err).ToNot(HaveOccurred()) - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, sourceClusterName, sourceSampleFile, env) @@ -171,14 +176,14 @@ var _ = Describe("Imports with Microservice Approach", Label(tests.LabelImportin } // Gather the target image - targetImage, err := testsUtils.BumpPostgresImageMajorVersion(postgresImage) + targetImage, err := postgres.BumpPostgresImageMajorVersion(postgresImage) Expect(err).ToNot(HaveOccurred()) Expect(targetImage).ShouldNot(BeEmpty(), "targetImage could not be empty") By(fmt.Sprintf("import cluster with different major, target version is %s", targetImage), func() { var err error // create namespace - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) assertImportRenamesSelectedDatabase(namespace, sourceSampleFile, importedClusterName, tableName, targetImage) @@ -211,7 +216,7 @@ func assertCreateTableWithDataOnSourceCluster( ) { By("create user, insert record in new table, assign new user as owner "+ "and grant read only to app user", func() { - pod, err := env.GetClusterPrimary(namespace, clusterName) + pod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) query := fmt.Sprintf( @@ -222,12 +227,13 @@ func assertCreateTableWithDataOnSourceCluster( "GRANT SELECT ON %[1]v TO app;", tableName) - _, _, err = env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + _, _, err = exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: pod.Namespace, PodName: pod.Name, }, - testsUtils.AppDBName, + postgres.AppDBName, query) Expect(err).ToNot(HaveOccurred()) }) @@ -240,28 +246,29 @@ func assertTableAndDataOnImportedCluster( importedClusterName string, ) { By("verifying presence of table and data from source in imported cluster", func() { - pod, err := env.GetClusterPrimary(namespace, importedClusterName) + pod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, importedClusterName) Expect(err).ToNot(HaveOccurred()) By("Verifying imported table has owner app user", func() { queryImported := fmt.Sprintf( "select * from pg_tables where tablename = '%v' and tableowner = '%v'", tableName, - testsUtils.AppUser, + postgres.AppUser, ) - out, _, err := env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + out, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: pod.Namespace, PodName: pod.Name, }, - testsUtils.AppDBName, + postgres.AppDBName, queryImported) Expect(err).ToNot(HaveOccurred()) Expect(strings.Contains(out, tableName), err).Should(BeTrue()) }) By("verifying the user named 'micro' on source is not in imported database", func() { - Eventually(QueryMatchExpectationPredicate(pod, testsUtils.PostgresDBName, + Eventually(QueryMatchExpectationPredicate(pod, postgres.PostgresDBName, roleExistsQuery("micro"), "f"), 30).Should(Succeed()) }) }) @@ -279,23 +286,24 @@ func assertImportRenamesSelectedDatabase( ) { dbList := []string{"db1", "db2", "db3"} dbToImport := dbList[1] - clusterName, err := env.GetResourceNameFromYAML(sampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) By("creating multiple dbs on source and set ownership to app", func() { for _, db := range dbList { // Create database createDBQuery := fmt.Sprintf("CREATE DATABASE %v OWNER app", db) - _, _, err = env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + _, _, err = exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: primaryPod.Namespace, PodName: primaryPod.Name, }, - testsUtils.PostgresDBName, + postgres.PostgresDBName, createDBQuery) Expect(err).ToNot(HaveOccurred()) } @@ -304,15 +312,17 @@ func assertImportRenamesSelectedDatabase( By(fmt.Sprintf("creating table '%s' and insert records on selected db %v", tableName, dbToImport), func() { // create a table with two records query := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s AS VALUES (1),(2);", tableName) - _, err = testsUtils.RunExecOverForward(env, namespace, clusterName, dbToImport, + _, err = postgres.RunExecOverForward( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + namespace, clusterName, dbToImport, apiv1.ApplicationUserSecretSuffix, query) Expect(err).ToNot(HaveOccurred()) }) var importedCluster *apiv1.Cluster By("importing Database with microservice approach in a new cluster", func() { - importedCluster, err = testsUtils.ImportDatabaseMicroservice(namespace, clusterName, - importedClusterName, imageName, dbToImport, env) + importedCluster, err = importdb.ImportDatabaseMicroservice(env.Ctx, env.Client, namespace, clusterName, + importedClusterName, imageName, dbToImport) Expect(err).ToNot(HaveOccurred()) // We give more time than the usual 600s, since the recovery is slower AssertClusterIsReady(namespace, importedClusterName, 1000, env) @@ -322,18 +332,18 @@ func assertImportRenamesSelectedDatabase( tableLocator := TableLocator{ Namespace: namespace, ClusterName: importedClusterName, - DatabaseName: testsUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: tableName, } AssertDataExpectedCount(env, tableLocator, 2) By("verifying that only 'app' DB exists in the imported cluster", func() { - importedPrimaryPod, err := env.GetClusterPrimary(namespace, importedClusterName) + importedPrimaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, importedClusterName) Expect(err).ToNot(HaveOccurred()) - Eventually(QueryMatchExpectationPredicate(importedPrimaryPod, testsUtils.PostgresDBName, + Eventually(QueryMatchExpectationPredicate(importedPrimaryPod, postgres.PostgresDBName, roleExistsQuery("db2"), "f"), 30).Should(Succeed()) - Eventually(QueryMatchExpectationPredicate(importedPrimaryPod, testsUtils.PostgresDBName, + Eventually(QueryMatchExpectationPredicate(importedPrimaryPod, postgres.PostgresDBName, roleExistsQuery("app"), "t"), 30).Should(Succeed()) }) @@ -341,6 +351,6 @@ func assertImportRenamesSelectedDatabase( err = DeleteResourcesFromFile(namespace, sampleFile) Expect(err).ToNot(HaveOccurred()) - Expect(testsUtils.DeleteObject(env, importedCluster)).To(Succeed()) + Expect(objects.Delete(env.Ctx, env.Client, importedCluster)).To(Succeed()) }) } diff --git a/tests/e2e/cluster_monolithic_test.go b/tests/e2e/cluster_monolithic_test.go index 34d1f3de9e..05099d1ebf 100644 --- a/tests/e2e/cluster_monolithic_test.go +++ b/tests/e2e/cluster_monolithic_test.go @@ -25,7 +25,10 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/tests" - testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/importdb" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -52,7 +55,7 @@ var _ = Describe("Imports with Monolithic Approach", Label(tests.LabelImportingD ) var namespace, sourceClusterName string - var forwardTarget *testsUtils.PSQLForwardConnection + var forwardTarget *postgres.PSQLForwardConnection var connTarget *sql.DB BeforeEach(func() { @@ -68,19 +71,22 @@ var _ = Describe("Imports with Monolithic Approach", Label(tests.LabelImportingD By("creating the source cluster", func() { const namespacePrefix = "cluster-monolith" - sourceClusterName, err = env.GetResourceNameFromYAML(sourceClusterFile) + sourceClusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sourceClusterFile) Expect(err).ToNot(HaveOccurred()) - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, sourceClusterName, sourceClusterFile, env) }) By("creating several roles, one of them a superuser and source databases", func() { - forward, conn, err := testsUtils.ForwardPSQLConnection( - env, + forward, conn, err := postgres.ForwardPSQLConnection( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, namespace, sourceClusterName, - testsUtils.PostgresDBName, + postgres.PostgresDBName, apiv1.SuperUserSecretSuffix, ) defer func() { @@ -116,13 +122,13 @@ var _ = Describe("Imports with Monolithic Approach", Label(tests.LabelImportingD for _, database := range sourceDatabases { query := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s AS VALUES (1),(2);", tableName) conn, err := forward.GetPooler().Connection(database) + Expect(err).ToNot(HaveOccurred()) // We need to set the max idle connection back to a higher number // otherwise the conn.Exec() will close the connection // and that will produce a RST packet from PostgreSQL that will kill the // port-forward tunnel // More about the RST packet here https://www.postgresql.org/message-id/165ba87e-fa48-4eae-b1f3-f9a831b4890b%40Spark conn.SetMaxIdleConns(3) - Expect(err).ToNot(HaveOccurred()) _, err = conn.Exec(query) Expect(err).ToNot(HaveOccurred()) } @@ -131,26 +137,33 @@ var _ = Describe("Imports with Monolithic Approach", Label(tests.LabelImportingD By("creating target cluster", func() { postgresImage := os.Getenv("POSTGRES_IMG") Expect(postgresImage).ShouldNot(BeEmpty(), "POSTGRES_IMG env should not be empty") - expectedImageName, err := testsUtils.BumpPostgresImageMajorVersion(postgresImage) + expectedImageName, err := postgres.BumpPostgresImageMajorVersion(postgresImage) Expect(err).ToNot(HaveOccurred()) Expect(expectedImageName).ShouldNot(BeEmpty(), "imageName could not be empty") - _, err = testsUtils.ImportDatabasesMonolith(namespace, + + _, err = importdb.ImportDatabasesMonolith( + env.Ctx, + env.Client, + namespace, sourceClusterName, targetClusterName, expectedImageName, sourceDatabases, sourceRoles, - env) + ) Expect(err).ToNot(HaveOccurred()) - AssertClusterIsReady(namespace, targetClusterName, testTimeouts[testsUtils.ClusterIsReady], env) + AssertClusterIsReady(namespace, targetClusterName, testTimeouts[timeouts.ClusterIsReady], env) }) By("connect to the imported cluster", func() { - forwardTarget, connTarget, err = testsUtils.ForwardPSQLConnection( - env, + forwardTarget, connTarget, err = postgres.ForwardPSQLConnection( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, namespace, targetClusterName, - testsUtils.PostgresDBName, + postgres.PostgresDBName, apiv1.SuperUserSecretSuffix, ) Expect(err).ToNot(HaveOccurred()) @@ -182,13 +195,13 @@ var _ = Describe("Imports with Monolithic Approach", Label(tests.LabelImportingD for _, database := range sourceDatabases { selectQuery := fmt.Sprintf("SELECT COUNT(*) FROM %s", tableName) connTemp, err := forwardTarget.GetPooler().Connection(database) + Expect(err).ToNot(HaveOccurred()) // We need to set the max idle connection back to a higher number // otherwise the conn.Exec() will close the connection // and that will produce a RST packet from PostgreSQL that will kill the // port-forward tunnel // More about the RST packet here https://www.postgresql.org/message-id/165ba87e-fa48-4eae-b1f3-f9a831b4890b%40Spark connTemp.SetMaxIdleConns(3) - Expect(err).ToNot(HaveOccurred()) row := connTemp.QueryRow(selectQuery) var count int err = row.Scan(&count) diff --git a/tests/e2e/cluster_setup_test.go b/tests/e2e/cluster_setup_test.go index 9f2d124712..1e20854751 100644 --- a/tests/e2e/cluster_setup_test.go +++ b/tests/e2e/cluster_setup_test.go @@ -27,7 +27,9 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" - testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -53,13 +55,13 @@ var _ = Describe("Cluster setup", Label(tests.LabelSmoke, tests.LabelBasic), fun var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) By("having three PostgreSQL pods with status ready", func() { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(utils.CountReadyPods(podList.Items), err).Should(BeEquivalentTo(3)) }) @@ -75,11 +77,14 @@ var _ = Describe("Cluster setup", Label(tests.LabelSmoke, tests.LabelBasic), fun err := env.Client.Get(env.Ctx, namespacedName, pod) Expect(err).ToNot(HaveOccurred()) - forward, conn, err := testsUtils.ForwardPSQLConnection( - env, + forward, conn, err := postgres.ForwardPSQLConnection( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, namespace, clusterName, - testsUtils.AppDBName, + postgres.AppDBName, apiv1.ApplicationUserSecretSuffix, ) Expect(err).NotTo(HaveOccurred()) @@ -121,13 +126,16 @@ var _ = Describe("Cluster setup", Label(tests.LabelSmoke, tests.LabelBasic), fun return int32(-1), nil }, timeout).Should(BeEquivalentTo(restart + 1)) - AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReady], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env) - forward, conn, err = testsUtils.ForwardPSQLConnection( - env, + forward, conn, err = postgres.ForwardPSQLConnection( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, namespace, clusterName, - testsUtils.AppDBName, + postgres.AppDBName, apiv1.ApplicationUserSecretSuffix, ) defer func() { @@ -145,7 +153,7 @@ var _ = Describe("Cluster setup", Label(tests.LabelSmoke, tests.LabelBasic), fun const namespacePrefix = "cluster-conditions" var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) By(fmt.Sprintf("having a %v namespace", namespace), func() { @@ -172,7 +180,7 @@ var _ = Describe("Cluster setup", Label(tests.LabelSmoke, tests.LabelBasic), fun // scale up the cluster to verify if the cluster remains in Ready By("scaling up the cluster size", func() { - err := env.ScaleClusterSize(namespace, clusterName, 5) + err := clusterutils.ScaleSize(env.Ctx, env.Client, namespace, clusterName, 5) Expect(err).ToNot(HaveOccurred()) }) diff --git a/tests/e2e/commons_test.go b/tests/e2e/commons_test.go index 38dc4007f8..50b12cb1f6 100644 --- a/tests/e2e/commons_test.go +++ b/tests/e2e/commons_test.go @@ -16,33 +16,35 @@ limitations under the License. package e2e -import "github.com/cloudnative-pg/cloudnative-pg/tests/utils" +import ( + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/cloudvendors" +) -func MustGetEnvProfile() utils.EnvProfile { - return utils.GetEnvProfile(*testCloudVendorEnv) +func MustGetEnvProfile() cloudvendors.EnvProfile { + return cloudvendors.GetEnvProfile(*testCloudVendorEnv) } // IsAKS checks if the running cluster is on AKS func IsAKS() bool { - return *testCloudVendorEnv == utils.AKS + return *testCloudVendorEnv == cloudvendors.AKS } // IsEKS checks if the running cluster is on EKS func IsEKS() bool { - return *testCloudVendorEnv == utils.EKS + return *testCloudVendorEnv == cloudvendors.EKS } // IsGKE checks if the running cluster is on GKE func IsGKE() bool { - return *testCloudVendorEnv == utils.GKE + return *testCloudVendorEnv == cloudvendors.GKE } // IsLocal checks if the running cluster is on local func IsLocal() bool { - return *testCloudVendorEnv == utils.LOCAL + return *testCloudVendorEnv == cloudvendors.LOCAL } // IsOpenshift checks if the running cluster is on OpenShift func IsOpenshift() bool { - return *testCloudVendorEnv == utils.OCP + return *testCloudVendorEnv == cloudvendors.OCP } diff --git a/tests/e2e/config_support_test.go b/tests/e2e/config_support_test.go index bf29de31e1..8477e3a0c2 100644 --- a/tests/e2e/config_support_test.go +++ b/tests/e2e/config_support_test.go @@ -24,7 +24,9 @@ import ( ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/operator" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -49,7 +51,7 @@ var _ = Describe("Config support", Serial, Ordered, Label(tests.LabelDisruptive, Skip("Test depth is lower than the amount requested for this test") } - operatorDeployment, err := env.GetOperatorDeployment() + operatorDeployment, err := operator.GetDeployment(env.Ctx, env.Client) Expect(err).ToNot(HaveOccurred()) operatorNamespace = operatorDeployment.GetNamespace() @@ -74,14 +76,14 @@ var _ = Describe("Config support", Serial, Ordered, Label(tests.LabelDisruptive, err = env.Client.Delete(env.Ctx, secret) Expect(err).NotTo(HaveOccurred()) - err = utils.ReloadOperatorDeployment(env, 120) + err = operator.ReloadDeployment(env.Ctx, env.Client, env.Interface, 120) Expect(err).ToNot(HaveOccurred()) }) It("creates the configuration map and secret", func() { // create a config map where operator is deployed cmd := fmt.Sprintf("kubectl apply -n %v -f %v", operatorNamespace, configMapFile) - _, _, err := utils.Run(cmd) + _, _, err := run.Run(cmd) Expect(err).ToNot(HaveOccurred()) // Check if configmap is created Eventually(func() ([]corev1.ConfigMap, error) { @@ -95,7 +97,7 @@ var _ = Describe("Config support", Serial, Ordered, Label(tests.LabelDisruptive, // create a secret where operator is deployed cmd = fmt.Sprintf("kubectl apply -n %v -f %v", operatorNamespace, secretFile) - _, _, err = utils.Run(cmd) + _, _, err = run.Run(cmd) Expect(err).ToNot(HaveOccurred()) // Check if configmap is created Eventually(func() ([]corev1.Secret, error) { @@ -108,30 +110,31 @@ var _ = Describe("Config support", Serial, Ordered, Label(tests.LabelDisruptive, }, 10).Should(HaveLen(1)) // Reload the operator with the new config - err = utils.ReloadOperatorDeployment(env, 120) + err = operator.ReloadDeployment(env.Ctx, env.Client, env.Interface, 120) Expect(err).ToNot(HaveOccurred()) }) It("creates a cluster", func() { var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, clusterWithInheritedLabelsFile, env) }) It("verify label's and annotation's inheritance when global config-map changed", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).NotTo(HaveOccurred()) By("checking the cluster has the requested labels", func() { expectedLabels := map[string]string{"environment": "qaEnv"} - Expect(utils.ClusterHasLabels(cluster, expectedLabels)).To(BeTrue()) + Expect(clusterutils.HasLabels(cluster, expectedLabels)).To(BeTrue()) }) By("checking the pods inherit labels matching the ones in the configuration secret", func() { expectedLabels := map[string]string{"environment": "qaEnv"} Eventually(func() (bool, error) { - return utils.AllClusterPodsHaveLabels(env, namespace, clusterName, expectedLabels) + return clusterutils.AllPodsHaveLabels(env.Ctx, env.Client, namespace, clusterName, + expectedLabels) }, 180).Should(BeTrue()) }) By("checking the pods inherit labels matching wildcard ones in the configuration secret", func() { @@ -140,17 +143,19 @@ var _ = Describe("Config support", Serial, Ordered, Label(tests.LabelDisruptive, "example.com/prod": "prod", } Eventually(func() (bool, error) { - return utils.AllClusterPodsHaveLabels(env, namespace, clusterName, expectedLabels) + return clusterutils.AllPodsHaveLabels(env.Ctx, env.Client, namespace, clusterName, + expectedLabels) }, 180).Should(BeTrue()) }) By("checking the cluster has the requested annotation", func() { expectedAnnotations := map[string]string{"categories": "DatabaseApplication"} - Expect(utils.ClusterHasAnnotations(cluster, expectedAnnotations)).To(BeTrue()) + Expect(clusterutils.HasAnnotations(cluster, expectedAnnotations)).To(BeTrue()) }) By("checking the pods inherit annotations matching the ones in the configuration configMap", func() { expectedAnnotations := map[string]string{"categories": "DatabaseApplication"} Eventually(func() (bool, error) { - return utils.AllClusterPodsHaveAnnotations(env, namespace, clusterName, expectedAnnotations) + return clusterutils.AllPodsHaveAnnotations(env.Ctx, env.Client, namespace, clusterName, + expectedAnnotations) }, 180).Should(BeTrue()) }) By("checking the pods inherit annotations matching wildcard ones in the configuration configMap", func() { @@ -159,7 +164,8 @@ var _ = Describe("Config support", Serial, Ordered, Label(tests.LabelDisruptive, "example.com/prod": "prod", } Eventually(func() (bool, error) { - return utils.AllClusterPodsHaveLabels(env, namespace, clusterName, expectedAnnotations) + return clusterutils.AllPodsHaveLabels(env.Ctx, env.Client, namespace, clusterName, + expectedAnnotations) }, 180).Should(BeTrue()) }) }) @@ -167,7 +173,7 @@ var _ = Describe("Config support", Serial, Ordered, Label(tests.LabelDisruptive, // Setting MONITORING_QUERIES_CONFIGMAP: "" should disable monitoring // queries on new cluster. We expect those metrics to be missing. It("verify metrics details when updated default monitoring configMap queries parameter is set to be empty", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).NotTo(HaveOccurred()) collectAndAssertDefaultMetricsPresentOnEachPod(namespace, clusterName, cluster.IsMetricsTLSEnabled(), false) diff --git a/tests/e2e/configuration_update_test.go b/tests/e2e/configuration_update_test.go index 74800a23ab..4918d6d755 100644 --- a/tests/e2e/configuration_update_test.go +++ b/tests/e2e/configuration_update_test.go @@ -32,7 +32,10 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -65,7 +68,7 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada cluster := &apiv1.Cluster{} err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { var err error - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) cluster.Spec.PostgresConfiguration.Parameters = paramsMap return env.Client.Update(env.Ctx, cluster) @@ -77,7 +80,7 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada cluster := &apiv1.Cluster{} err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { var err error - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) cluster.Spec.PostgresConfiguration.PgHBA = []string{"host all all all trust"} return env.Client.Update(env.Ctx, cluster) @@ -89,7 +92,7 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada cluster := &apiv1.Cluster{} err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { var err error - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) cluster.Spec.PostgresConfiguration.PgIdent = []string{"email /^(.*)@example\\.com \\1"} return env.Client.Update(env.Ctx, cluster) @@ -102,26 +105,27 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada cluster := &apiv1.Cluster{} err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { var err error - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).NotTo(HaveOccurred()) cluster.Spec.PostgresConfiguration.Parameters = params return env.Client.Update(env.Ctx, cluster) }) Expect(apierrors.IsInvalid(err)).To(BeTrue()) - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) // Expect other config parameters applied together with a blockedParameter to not have changed for idx := range podList.Items { pod := podList.Items[idx] Eventually(func(g Gomega) int { - stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: pod.Namespace, PodName: pod.Name, }, - utils.PostgresDBName, + postgres.PostgresDBName, "show autovacuum_max_workers") g.Expect(err).ToNot(HaveOccurred()) @@ -140,7 +144,7 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada By("create cluster with default configuration", func() { var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) }) @@ -148,7 +152,7 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada It("01. reloading Pg when a parameter requiring reload is modified", func() { // max_connection increase to 110 - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) By("apply configuration update", func() { @@ -161,12 +165,13 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada // Check that the parameter has been modified in every pod for _, pod := range podList.Items { Eventually(func() (int, error, error) { - stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: pod.Namespace, PodName: pod.Name, }, - utils.PostgresDBName, + postgres.PostgresDBName, "show work_mem") value, atoiErr := strconv.Atoi(strings.Trim(stdout, "MB\n")) return value, err, atoiErr @@ -179,11 +184,11 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada endpointName := clusterName + "-rw" // Connection should fail now because we are not supplying a password - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) By("verify that connections fail by default", func() { - _, _, err := env.ExecCommand(env.Ctx, podList.Items[0], + _, _, err := exec.Command(env.Ctx, env.Interface, env.RestClientConfig, podList.Items[0], specs.PostgresContainerName, &commandTimeout, "psql", "-U", "postgres", "-h", endpointName, "-tAc", "select 1", ) @@ -201,19 +206,20 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada query := "select count(*) from pg_hba_file_rules where type = 'host' and auth_method = 'trust'" for _, pod := range podList.Items { Eventually(func() (string, error) { - stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: pod.Namespace, PodName: pod.Name, }, - utils.PostgresDBName, + postgres.PostgresDBName, query) return strings.Trim(stdout, "\n"), err }, timeout).Should(BeEquivalentTo("1")) } // The connection should work now Eventually(func() (int, error, error) { - stdout, _, err := env.ExecCommand(env.Ctx, podList.Items[0], + stdout, _, err := exec.Command(env.Ctx, env.Interface, env.RestClientConfig, podList.Items[0], specs.PostgresContainerName, &commandTimeout, "psql", "-U", "postgres", "-h", endpointName, "-tAc", "select 1") value, atoiErr := strconv.Atoi(strings.Trim(stdout, "\n")) @@ -225,10 +231,10 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada It("03. restarting and switching Pg when a parameter requiring restart is modified", func() { timeout := 300 - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(cluster.Status.CurrentPrimary, err).To(BeEquivalentTo(cluster.Status.TargetPrimary)) oldPrimary := cluster.Status.CurrentPrimary @@ -243,12 +249,13 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada // Check that the new parameter has been modified in every pod for _, pod := range podList.Items { Eventually(func() (int, error, error) { - stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: pod.Namespace, PodName: pod.Name, }, - utils.PostgresDBName, + postgres.PostgresDBName, "show shared_buffers") value, atoiErr := strconv.Atoi(strings.Trim(stdout, "MB\n")) return value, err, atoiErr @@ -259,7 +266,7 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada By("verify that a switchover happened", func() { // Check that a switchover happened Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) return cluster.Status.CurrentPrimary, err }, timeout).ShouldNot(BeEquivalentTo(oldPrimary)) }) @@ -267,10 +274,10 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada It("04. restarting and switching Pg when mixed parameters are modified", func() { timeout := 300 - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(cluster.Status.CurrentPrimary, err).To(BeEquivalentTo(cluster.Status.TargetPrimary)) oldPrimary := cluster.Status.CurrentPrimary @@ -286,24 +293,26 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada // Check that both parameters have been modified in each pod for _, pod := range podList.Items { Eventually(func() (int, error, error) { - stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: pod.Namespace, PodName: pod.Name, }, - utils.PostgresDBName, + postgres.PostgresDBName, "show max_replication_slots") value, atoiErr := strconv.Atoi(strings.Trim(stdout, "\n")) return value, err, atoiErr }, timeout).Should(BeEquivalentTo(16)) Eventually(func() (int, error, error) { - stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: pod.Namespace, PodName: pod.Name, }, - utils.PostgresDBName, + postgres.PostgresDBName, "show maintenance_work_mem") value, atoiErr := strconv.Atoi(strings.Trim(stdout, "MB\n")) return value, err, atoiErr @@ -314,7 +323,7 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada By("verify that a switchover happened", func() { // Check that a switchover happened Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) return cluster.Status.CurrentPrimary, err }, timeout).ShouldNot(BeEquivalentTo(oldPrimary)) }) @@ -337,10 +346,10 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada func() { // max_connection decrease to 105 timeout := 300 - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(cluster.Status.CurrentPrimary, err).To(BeEquivalentTo(cluster.Status.TargetPrimary)) oldPrimary := cluster.Status.CurrentPrimary @@ -356,12 +365,13 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada // Check that the new parameter has been modified in every pod for _, pod := range podList.Items { Eventually(func() (int, error, error) { - stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: pod.Namespace, PodName: pod.Name, }, - utils.PostgresDBName, + postgres.PostgresDBName, "show max_connections") value, atoiErr := strconv.Atoi(strings.Trim(stdout, "\n")) return value, err, atoiErr @@ -372,7 +382,7 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada By("verify that a switchover not happened", func() { // Check that a switchover did not happen Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) return cluster.Status.CurrentPrimary, err }, timeout).Should(BeEquivalentTo(oldPrimary)) }) @@ -384,10 +394,10 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada func() { timeout := 300 - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(cluster.Status.CurrentPrimary, err).To(BeEquivalentTo(cluster.Status.TargetPrimary)) oldPrimary := cluster.Status.CurrentPrimary @@ -402,12 +412,13 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada // Check that the new parameter has been modified in every pod for _, pod := range podList.Items { Eventually(func() (int, error, error) { - stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: pod.Namespace, PodName: pod.Name, }, - utils.PostgresDBName, + postgres.PostgresDBName, "show max_connections") value, atoiErr := strconv.Atoi(strings.Trim(stdout, "\n")) return value, err, atoiErr @@ -418,7 +429,7 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada By("verify that a switchover not happened", func() { // Check that a switchover did not happen Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) return cluster.Status.CurrentPrimary, err }, timeout).Should(BeEquivalentTo(oldPrimary)) }) @@ -427,18 +438,19 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada // pg_ident_file_mappings is available from v15 only It("09. reloading Pg when pg_ident rules are modified", func() { if env.PostgresVersion > 14 { - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) query := "select count(1) from pg_ident_file_mappings;" By("check that there is only one entry in pg_ident_file_mappings", func() { Eventually(func() (string, error) { - stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: primaryPod.Namespace, PodName: primaryPod.Name, }, - utils.PostgresDBName, + postgres.PostgresDBName, query) return strings.Trim(stdout, "\n"), err }, timeout).Should(BeEquivalentTo("1")) @@ -452,12 +464,13 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada By("verify that there are now two entries in pg_ident_file_mappings", func() { Eventually(func() (string, error) { - stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: primaryPod.Namespace, PodName: primaryPod.Name, }, - utils.PostgresDBName, + postgres.PostgresDBName, query) return strings.Trim(stdout, "\n"), err }, timeout).Should(BeEquivalentTo("2")) @@ -484,10 +497,10 @@ var _ = Describe("Configuration update with primaryUpdateMethod", Label(tests.La const namespacePrefix = "config-change-primary-update-restart" var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - clusterName, err = env.GetResourceNameFromYAML(clusterFileWithPrimaryUpdateRestart) + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterFileWithPrimaryUpdateRestart) Expect(err).ToNot(HaveOccurred()) By("setting up cluster with primaryUpdateMethod value set to restart", func() { @@ -504,16 +517,19 @@ var _ = Describe("Configuration update with primaryUpdateMethod", Label(tests.La var primaryStartTime time.Time By("getting old primary info", func() { - primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) + primaryPodInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) oldPrimaryPodName = primaryPodInfo.GetName() - forward, conn, err := utils.ForwardPSQLConnection( - env, + forward, conn, err := postgres.ForwardPSQLConnection( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, namespace, clusterName, - utils.AppDBName, + postgres.AppDBName, apiv1.ApplicationUserSecretSuffix, ) Expect(err).ToNot(HaveOccurred()) @@ -544,7 +560,7 @@ var _ = Describe("Configuration update with primaryUpdateMethod", Label(tests.La }) By(fmt.Sprintf("updating max_connection value to %v", newMaxConnectionsValue), func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) updated := cluster.DeepCopy() @@ -555,17 +571,18 @@ var _ = Describe("Configuration update with primaryUpdateMethod", Label(tests.La }) By("verifying the new value for max_connections is updated for all instances", func() { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) for _, pod := range podList.Items { Eventually(func() (int, error, error) { - stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: pod.Namespace, PodName: pod.Name, }, - utils.PostgresDBName, + postgres.PostgresDBName, "show max_connections") value, atoiErr := strconv.Atoi(strings.Trim(stdout, "\n")) return value, err, atoiErr @@ -576,7 +593,7 @@ var _ = Describe("Configuration update with primaryUpdateMethod", Label(tests.La By("verifying the old primary is still the primary", func() { Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) return cluster.Status.CurrentPrimary, err }, 60).Should(BeEquivalentTo(oldPrimaryPodName)) }) @@ -591,11 +608,12 @@ var _ = Describe("Configuration update with primaryUpdateMethod", Label(tests.La // take pg postmaster start time query := "select to_char(pg_postmaster_start_time(), 'YYYY-MM-DD HH24:MI:SS');" - stdout, _, cmdErr := env.EventuallyExecQueryInInstancePod( - utils.PodLocator{ + stdout, _, cmdErr := exec.EventuallyExecQueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: pod.Namespace, PodName: pod.Name, - }, utils.PostgresDBName, + }, postgres.PostgresDBName, query, RetryTimeout, PollingTime, @@ -614,7 +632,7 @@ var _ = Describe("Configuration update with primaryUpdateMethod", Label(tests.La const expectedNewValueForWorkMem = "10MB" By("updating work mem ", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) updated := cluster.DeepCopy() @@ -624,18 +642,19 @@ var _ = Describe("Configuration update with primaryUpdateMethod", Label(tests.La }) By("verify that work_mem result as expected", func() { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) // Check that the parameter has been modified in every pod for _, pod := range podList.Items { Eventually(func() (int, error, error) { - stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: pod.Namespace, PodName: pod.Name, }, - utils.PostgresDBName, + postgres.PostgresDBName, "show work_mem") value, atoiErr := strconv.Atoi(strings.Trim(stdout, "MB\n")) return value, err, atoiErr diff --git a/tests/e2e/connection_test.go b/tests/e2e/connection_test.go index fd962159f0..27b770922b 100644 --- a/tests/e2e/connection_test.go +++ b/tests/e2e/connection_test.go @@ -23,7 +23,8 @@ import ( "k8s.io/apimachinery/pkg/types" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/environment" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -51,7 +52,7 @@ var _ = Describe("Connection via services", Label(tests.LabelServiceConnectivity appDBUser string, appPassword string, superuserPassword string, - env *utils.TestingEnvironment, + env *environment.TestingEnvironment, ) { // We test -rw, -ro and -r services with the app user and the superuser rwService := fmt.Sprintf("%v-rw", clusterName) @@ -59,8 +60,7 @@ var _ = Describe("Connection via services", Label(tests.LabelServiceConnectivity roService := fmt.Sprintf("%v-ro", clusterName) services := []string{rwService, roService, rService} for _, service := range services { - AssertConnection(namespace, service, appDBName, utils.PostgresDBName, superuserPassword, env) - AssertConnection(namespace, service, appDBName, appDBUser, appPassword, env) + AssertConnection(namespace, service, appDBName, postgres.PostgresDBName, superuserPassword, env) } AssertWritesToReplicaFails(namespace, roService, appDBName, appDBUser, appPassword) @@ -78,7 +78,7 @@ var _ = Describe("Connection via services", Label(tests.LabelServiceConnectivity It("can connect with auto-generated passwords", func() { // Create a cluster in a namespace we'll delete after the test var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) @@ -123,7 +123,7 @@ var _ = Describe("Connection via services", Label(tests.LabelServiceConnectivity // Create a cluster in a namespace we'll delete after the test var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) AssertServices(namespace, clusterName, appDBName, appDBUser, diff --git a/tests/e2e/declarative_database_management_test.go b/tests/e2e/declarative_database_management_test.go index 594bba356c..5cf4c6b010 100644 --- a/tests/e2e/declarative_database_management_test.go +++ b/tests/e2e/declarative_database_management_test.go @@ -24,7 +24,12 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/tests" - testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/namespaces" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -58,10 +63,10 @@ var _ = Describe("Declarative database management", Label(tests.LabelSmoke, test BeforeAll(func() { // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - clusterName, err = env.GetResourceNameFromYAML(clusterManifest) + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterManifest) Expect(err).ToNot(HaveOccurred()) By("setting up cluster and declarative database CRD", func() { @@ -74,8 +79,9 @@ var _ = Describe("Declarative database management", Label(tests.LabelSmoke, test "and encoding = pg_char_to_encoding('%s') and datctype = '%s' and datcollate = '%s'", db.Spec.Name, db.Spec.Encoding, db.Spec.LcCtype, db.Spec.LcCollate) Eventually(func(g Gomega) { - stdout, _, err := env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: primaryPod, }, @@ -96,7 +102,7 @@ var _ = Describe("Declarative database management", Label(tests.LabelSmoke, test ) By("applying Database CRD manifest", func() { CreateResourceFromFile(namespace, databaseManifest) - databaseObjectName, err = env.GetResourceNameFromYAML(databaseManifest) + databaseObjectName, err = yaml.GetResourceNameFromYAML(env.Scheme, databaseManifest) Expect(err).NotTo(HaveOccurred()) }) By("ensuring the Database CRD succeeded reconciliation", func() { @@ -116,24 +122,24 @@ var _ = Describe("Declarative database management", Label(tests.LabelSmoke, test }) By("verifying new database has been created with the expected fields", func() { - primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) + primaryPodInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - Eventually(QueryMatchExpectationPredicate(primaryPodInfo, testsUtils.PostgresDBName, + Eventually(QueryMatchExpectationPredicate(primaryPodInfo, postgres.PostgresDBName, databaseExistsQuery(dbname), "t"), 30).Should(Succeed()) assertDatabaseHasExpectedFields(namespace, primaryPodInfo.Name, database) }) By("removing the Database object", func() { - Expect(testsUtils.DeleteObject(env, &database)).To(Succeed()) + Expect(objects.Delete(env.Ctx, env.Client, &database)).To(Succeed()) }) By("verifying the retention policy in the postgres database", func() { - primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) + primaryPodInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - Eventually(QueryMatchExpectationPredicate(primaryPodInfo, testsUtils.PostgresDBName, + Eventually(QueryMatchExpectationPredicate(primaryPodInfo, postgres.PostgresDBName, databaseExistsQuery(dbname), boolPGOutput(retainOnDeletion)), 30).Should(Succeed()) }) } @@ -166,10 +172,10 @@ var _ = Describe("Declarative database management", Label(tests.LabelSmoke, test ) It("will not prevent the deletion of the namespace with lagging finalizers", func() { By("setting up the new namespace and cluster", func() { - err = env.CreateNamespace(namespace) + err = namespaces.CreateNamespace(env.Ctx, env.Client, namespace) Expect(err).ToNot(HaveOccurred()) - clusterName, err = env.GetResourceNameFromYAML(clusterManifest) + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterManifest) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, clusterManifest, env) @@ -177,7 +183,7 @@ var _ = Describe("Declarative database management", Label(tests.LabelSmoke, test By("creating the database", func() { databaseManifest := fixturesDir + "/declarative_databases/database-with-delete-reclaim-policy.yaml.template" - databaseObjectName, err = env.GetResourceNameFromYAML(databaseManifest) + databaseObjectName, err = yaml.GetResourceNameFromYAML(env.Scheme, databaseManifest) Expect(err).NotTo(HaveOccurred()) CreateResourceFromFile(namespace, databaseManifest) }) @@ -195,7 +201,7 @@ var _ = Describe("Declarative database management", Label(tests.LabelSmoke, test }, 300).WithPolling(10 * time.Second).Should(Succeed()) }) By("deleting the namespace and making sure it succeeds before timeout", func() { - err := env.DeleteNamespaceAndWait(namespace, 120) + err := namespaces.DeleteNamespaceAndWait(env.Ctx, env.Client, namespace, 120) Expect(err).ToNot(HaveOccurred()) }) }) diff --git a/tests/e2e/declarative_hibernation_test.go b/tests/e2e/declarative_hibernation_test.go index 7f30bf1070..c08333f120 100644 --- a/tests/e2e/declarative_hibernation_test.go +++ b/tests/e2e/declarative_hibernation_test.go @@ -24,7 +24,9 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/hibernation" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" - testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -47,10 +49,10 @@ var _ = Describe("Cluster declarative hibernation", func() { It("hibernates an existing cluster", func(ctx SpecContext) { const namespacePrefix = "declarative-hibernation" - clusterName, err := env.GetResourceNameFromYAML(sampleFileCluster) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFileCluster) Expect(err).ToNot(HaveOccurred()) // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) By("creating a new cluster", func() { @@ -66,7 +68,7 @@ var _ = Describe("Cluster declarative hibernation", func() { }) By("hibernating the new cluster", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).NotTo(HaveOccurred()) if cluster.Annotations == nil { cluster.Annotations = make(map[string]string) @@ -79,19 +81,20 @@ var _ = Describe("Cluster declarative hibernation", func() { By("waiting for the cluster to be hibernated correctly", func() { Eventually(func(g Gomega) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).NotTo(HaveOccurred()) - g.Expect(meta.IsStatusConditionTrue(cluster.Status.Conditions, hibernation.HibernationConditionType)).To(BeTrue()) + g.Expect(meta.IsStatusConditionTrue(cluster.Status.Conditions, + hibernation.HibernationConditionType)).To(BeTrue()) }, 300).Should(Succeed()) }) By("verifying that the Pods have been deleted for the cluster", func() { - podList, _ := env.GetClusterPodList(namespace, clusterName) + podList, _ := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(len(podList.Items)).Should(BeEquivalentTo(0)) }) By("rehydrating the cluster", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).NotTo(HaveOccurred()) if cluster.Annotations == nil { cluster.Annotations = make(map[string]string) @@ -105,7 +108,7 @@ var _ = Describe("Cluster declarative hibernation", func() { By("waiting for the condition to be removed", func() { Eventually(func(g Gomega) { var err error - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) condition := meta.FindStatusCondition(cluster.Status.Conditions, hibernation.HibernationConditionType) @@ -115,7 +118,7 @@ var _ = Describe("Cluster declarative hibernation", func() { By("waiting for the Pods to be recreated", func() { Eventually(func(g Gomega) { - podList, _ := env.GetClusterPodList(namespace, clusterName) + podList, _ := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) g.Expect(len(podList.Items)).Should(BeEquivalentTo(cluster.Spec.Instances)) }, 300).Should(Succeed()) }) diff --git a/tests/e2e/disk_space_test.go b/tests/e2e/disk_space_test.go index d838034d9d..c616dd8950 100644 --- a/tests/e2e/disk_space_test.go +++ b/tests/e2e/disk_space_test.go @@ -28,7 +28,11 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/tests" - testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -48,19 +52,20 @@ var _ = Describe("Volume space unavailable", Label(tests.LabelStorage), func() { var primaryPod *corev1.Pod By("finding cluster resources", func() { var err error - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Expect(cluster).ToNot(BeNil()) - primaryPod, err = env.GetClusterPrimary(namespace, clusterName) + primaryPod, err = clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Expect(primaryPod).ToNot(BeNil()) }) By("filling the WAL volume", func() { timeout := time.Minute * 5 - _, _, err := env.ExecCommandInInstancePod( - testsUtils.PodLocator{ + _, _, err := exec.CommandInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: primaryPod.Name, }, @@ -73,35 +78,37 @@ var _ = Describe("Volume space unavailable", Label(tests.LabelStorage), func() { By("writing something when no space is available", func() { // Create the table used by the scenario query := "CREATE TABLE diskspace AS SELECT generate_series(1, 1000000);" - _, _, err := env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + _, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: primaryPod.Namespace, PodName: primaryPod.Name, }, - testsUtils.AppDBName, + postgres.AppDBName, query) Expect(err).To(HaveOccurred()) query = "CHECKPOINT; SELECT pg_switch_wal(); CHECKPOINT" - _, _, err = env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + _, _, err = exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: primaryPod.Namespace, PodName: primaryPod.Name, }, - testsUtils.PostgresDBName, + postgres.PostgresDBName, query) Expect(err).To(HaveOccurred()) }) By("waiting for the primary to become not ready", func() { Eventually(func(g Gomega) bool { - primaryPod, err := env.GetPod(namespace, primaryPod.Name) + primaryPod, err := pods.Get(env.Ctx, env.Client, namespace, primaryPod.Name) g.Expect(err).ToNot(HaveOccurred()) - return testsUtils.PodHasCondition(primaryPod, corev1.PodReady, corev1.ConditionFalse) + return pods.HasCondition(primaryPod, corev1.PodReady, corev1.ConditionFalse) }).WithTimeout(time.Minute).Should(BeTrue()) }) By("checking if the operator detects the issue", func() { Eventually(func(g Gomega) string { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) return cluster.Status.Phase }).WithTimeout(time.Minute).Should(Equal("Not enough disk space")) @@ -114,11 +121,11 @@ var _ = Describe("Volume space unavailable", Label(tests.LabelStorage), func() { primaryWALPVC := &corev1.PersistentVolumeClaim{} By("finding cluster resources", func() { var err error - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Expect(cluster).ToNot(BeNil()) - primaryPod, err = env.GetClusterPrimary(namespace, clusterName) + primaryPod, err = clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Expect(primaryPod).ToNot(BeNil()) @@ -159,19 +166,20 @@ var _ = Describe("Volume space unavailable", Label(tests.LabelStorage), func() { // We can't delete the Pod, as this will trigger // a failover. Eventually(func(g Gomega) bool { - primaryPod, err := env.GetPod(namespace, primaryPod.Name) + primaryPod, err := pods.Get(env.Ctx, env.Client, namespace, primaryPod.Name) g.Expect(err).ToNot(HaveOccurred()) - return testsUtils.PodHasCondition(primaryPod, corev1.PodReady, corev1.ConditionTrue) + return pods.HasCondition(primaryPod, corev1.PodReady, corev1.ConditionTrue) }).WithTimeout(10 * time.Minute).Should(BeTrue()) }) By("writing some WAL", func() { query := "CHECKPOINT; SELECT pg_switch_wal(); CHECKPOINT" - _, _, err := env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + _, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: primaryPod.Namespace, PodName: primaryPod.Name, }, - testsUtils.PostgresDBName, + postgres.PostgresDBName, query) Expect(err).NotTo(HaveOccurred()) }) @@ -191,10 +199,10 @@ var _ = Describe("Volume space unavailable", Label(tests.LabelStorage), func() { func(sampleFile string) { var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - clusterName, err := env.GetResourceNameFromYAML(sampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) diff --git a/tests/e2e/drain_node_test.go b/tests/e2e/drain_node_test.go index d3ac7f8907..5b2055cdd1 100644 --- a/tests/e2e/drain_node_test.go +++ b/tests/e2e/drain_node_test.go @@ -26,8 +26,12 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" - testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/nodes" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" + testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -43,7 +47,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La if testLevelEnv.Depth < int(level) { Skip("Test depth is lower than the amount requested for this test") } - nodes, _ := env.GetNodeList() + nodes, _ := nodes.List(env.Ctx, env.Client) // We label three nodes where we could run the workloads, and ignore // the others. The pods of the clusters created in this test run only // where the drain label exists. @@ -51,7 +55,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La if (node.Spec.Unschedulable != true) && (len(node.Spec.Taints) == 0) { nodesWithLabels = append(nodesWithLabels, node.Name) cmd := fmt.Sprintf("kubectl label node %v drain=drain --overwrite", node.Name) - _, stderr, err := testsUtils.Run(cmd) + _, stderr, err := run.Run(cmd) Expect(stderr).To(BeEmpty()) Expect(err).ToNot(HaveOccurred()) } @@ -66,11 +70,11 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La AfterEach(func() { // Uncordon the cordoned nodes and remove the labels we added in the // BeforeEach section - err := nodes.UncordonAllNodes(env) + err := nodes.UncordonAll(env.Ctx, env.Client) Expect(err).ToNot(HaveOccurred()) for _, node := range nodesWithLabels { cmd := fmt.Sprintf("kubectl label node %v drain- ", node) - _, _, err := testsUtils.Run(cmd) + _, _, err := run.Run(cmd) Expect(err).ToNot(HaveOccurred()) } nodesWithLabels = nil @@ -97,13 +101,13 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La // mark a node unschedulable so the pods will be distributed only on two nodes for _, cordonNode := range nodesWithLabels[:len(nodesWithLabels)-2] { cmd := fmt.Sprintf("kubectl cordon %v", cordonNode) - _, _, err := testsUtils.Run(cmd) + _, _, err := run.Run(cmd) Expect(err).ToNot(HaveOccurred()) } }) var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) @@ -111,7 +115,10 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La // Wait for jobs to be removed timeout := 180 Eventually(func() (int, error) { - podList, err := env.GetPodList(namespace) + podList, err := pods.List(env.Ctx, env.Client, namespace) + if err != nil { + return 0, err + } return len(podList.Items), err }, timeout).Should(BeEquivalentTo(3)) }) @@ -121,7 +128,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La tableLocator := TableLocator{ Namespace: namespace, ClusterName: clusterName, - DatabaseName: testsUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: "test", } AssertCreateTestData(env, tableLocator) @@ -130,7 +137,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La // their volumes. We do not expect the UIDs to change. // We take advantage of the fact that related PVCs and Pods have // the same name. - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) pvcUIDMap := make(map[string]types.UID) for _, pod := range podList.Items { @@ -145,20 +152,26 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La } // Drain the node containing the primary pod and store the list of running pods - podsOnPrimaryNode := nodes.DrainPrimaryNode(namespace, clusterName, - testTimeouts[testsUtils.DrainNode], env) + podsOnPrimaryNode := nodes.DrainPrimary( + env.Ctx, env.Client, + namespace, clusterName, + testTimeouts[testsUtils.DrainNode], + ) By("verifying failover after drain", func() { timeout := 180 // Expect a failover to have happened Eventually(func() (string, error) { - pod, err := env.GetClusterPrimary(namespace, clusterName) + pod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + if err != nil { + return "", err + } return pod.Name, err }, timeout).ShouldNot(BeEquivalentTo(oldPrimary)) }) By("uncordon nodes and check new pods use old pvcs", func() { - err := nodes.UncordonAllNodes(env) + err := nodes.UncordonAll(env.Ctx, env.Client) Expect(err).ToNot(HaveOccurred()) // Ensure evicted pods have restarted and are running. // one of them could have become the new primary. @@ -212,13 +225,13 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La for _, cordonNode := range nodesWithLabels[:len(nodesWithLabels)-1] { cordonNodes = append(cordonNodes, cordonNode) cmd := fmt.Sprintf("kubectl cordon %v", cordonNode) - _, _, err := testsUtils.Run(cmd) + _, _, err := run.Run(cmd) Expect(err).ToNot(HaveOccurred()) } }) var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) @@ -226,7 +239,10 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La // Wait for jobs to be removed timeout := 180 Eventually(func() (int, error) { - podList, err := env.GetPodList(namespace) + podList, err := pods.List(env.Ctx, env.Client, namespace) + if err != nil { + return 0, err + } return len(podList.Items), err }, timeout).Should(BeEquivalentTo(3)) }) @@ -236,7 +252,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La tableLocator := TableLocator{ Namespace: namespace, ClusterName: clusterName, - DatabaseName: testsUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: "test", } AssertCreateTestData(env, tableLocator) @@ -245,7 +261,9 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La // their volumes. We do not expect the UIDs to change. // We take advantage of the fact that related PVCs and Pods have // the same name. - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + pvcUIDMap := make(map[string]types.UID) for _, pod := range podList.Items { pvcNamespacedName := types.NamespacedName{ @@ -262,19 +280,25 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La // to move to. By(fmt.Sprintf("uncordon one more node '%v'", cordonNodes[0]), func() { cmd := fmt.Sprintf("kubectl uncordon %v", cordonNodes[0]) - _, _, err = testsUtils.Run(cmd) + _, _, err = run.Run(cmd) Expect(err).ToNot(HaveOccurred()) }) // Drain the node containing the primary pod and store the list of running pods - podsOnPrimaryNode := nodes.DrainPrimaryNode(namespace, clusterName, - testTimeouts[testsUtils.DrainNode], env) + podsOnPrimaryNode := nodes.DrainPrimary( + env.Ctx, env.Client, + namespace, clusterName, + testTimeouts[testsUtils.DrainNode], + ) By("verifying failover after drain", func() { timeout := 180 // Expect a failover to have happened Eventually(func() (string, error) { - pod, err := env.GetClusterPrimary(namespace, clusterName) + pod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + if err != nil { + return "", err + } return pod.Name, err }, timeout).ShouldNot(BeEquivalentTo(oldPrimary)) }) @@ -334,13 +358,13 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La By("leaving a single uncordoned", func() { for _, cordonNode := range nodesWithLabels[:len(nodesWithLabels)-1] { cmd := fmt.Sprintf("kubectl cordon %v", cordonNode) - _, _, err := testsUtils.Run(cmd) + _, _, err := run.Run(cmd) Expect(err).ToNot(HaveOccurred()) } }) var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) @@ -349,7 +373,10 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La // Wait for jobs to be removed timeout := 180 Eventually(func() (int, error) { - podList, err := env.GetPodList(namespace) + podList, err := pods.List(env.Ctx, env.Client, namespace) + if err != nil { + return 0, err + } return len(podList.Items), err }, timeout).Should(BeEquivalentTo(3)) }) @@ -358,7 +385,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La // not exist anymore after the drain var podsBeforeDrain []string By("retrieving the current pods' names", func() { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) for _, pod := range podList.Items { podsBeforeDrain = append(podsBeforeDrain, pod.Name) @@ -369,7 +396,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La tableLocator := TableLocator{ Namespace: namespace, ClusterName: clusterName, - DatabaseName: testsUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: "test", } AssertCreateTestData(env, tableLocator) @@ -377,13 +404,16 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La // We uncordon a cordoned node. New pods can go there. By("uncordon node for pod failover", func() { cmd := fmt.Sprintf("kubectl uncordon %v", nodesWithLabels[0]) - _, _, err := testsUtils.Run(cmd) + _, _, err := run.Run(cmd) Expect(err).ToNot(HaveOccurred()) }) // Drain the node containing the primary pod. Pods should be moved // to the node we've just uncordoned - nodes.DrainPrimaryNode(namespace, clusterName, testTimeouts[testsUtils.DrainNode], env) + nodes.DrainPrimary( + env.Ctx, env.Client, + namespace, clusterName, testTimeouts[testsUtils.DrainNode], + ) // Expect pods to be recreated and to be ready AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReady], env) @@ -393,7 +423,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La timeout := 600 Eventually(func(g Gomega) { matchingNames := 0 - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) for _, pod := range podList.Items { // compare the old pod list with the current pod names @@ -410,7 +440,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La AssertDataExpectedCount(env, tableLocator, 2) AssertClusterStandbysAreStreaming(namespace, clusterName, 140) - err = nodes.UncordonAllNodes(env) + err = nodes.UncordonAll(env.Ctx, env.Client) Expect(err).ToNot(HaveOccurred()) }) }) @@ -424,7 +454,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La BeforeAll(func() { var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) }) @@ -436,7 +466,10 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La // Wait for jobs to be removed timeout := 180 Eventually(func() (int, error) { - podList, err := env.GetPodList(namespace) + podList, err := pods.List(env.Ctx, env.Client, namespace) + if err != nil { + return 0, err + } return len(podList.Items), err }, timeout).Should(BeEquivalentTo(1)) }) @@ -445,20 +478,23 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La tableLocator := TableLocator{ Namespace: namespace, ClusterName: clusterName, - DatabaseName: testsUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: "test", } AssertCreateTestData(env, tableLocator) // Drain the node containing the primary pod and store the list of running pods - _ = nodes.DrainPrimaryNode(namespace, clusterName, - testTimeouts[testsUtils.DrainNode], env) + _ = nodes.DrainPrimary( + env.Ctx, env.Client, + namespace, clusterName, + testTimeouts[testsUtils.DrainNode], + ) By("verifying the primary is now pending", func() { timeout := 180 // Expect a failover to have happened Eventually(func() (string, error) { - pod, err := env.GetPod(namespace, clusterName+"-1") + pod, err := pods.Get(env.Ctx, env.Client, namespace, clusterName+"-1") if err != nil { return "", err } @@ -467,7 +503,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La }) By("uncordoning all nodes", func() { - err := nodes.UncordonAllNodes(env) + err := nodes.UncordonAll(env.Ctx, env.Client) Expect(err).ToNot(HaveOccurred()) }) @@ -479,7 +515,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La When("the PDB is enabled", func() { It("prevents the primary node from being drained", func() { By("enabling PDB", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) updated := cluster.DeepCopy() @@ -491,7 +527,7 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La By("having the draining of the primary node rejected", func() { var primaryNode string Eventually(func(g Gomega) { - pod, err := env.GetClusterPrimary(namespace, clusterName) + pod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) primaryNode = pod.Spec.NodeName }, 60).Should(Succeed()) @@ -501,14 +537,14 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La cmd := fmt.Sprintf( "kubectl drain %v --ignore-daemonsets --delete-emptydir-data --force --timeout=%ds", primaryNode, 60) - _, stderr, err := testsUtils.RunUnchecked(cmd) + _, stderr, err := run.Unchecked(cmd) g.Expect(err).To(HaveOccurred()) g.Expect(stderr).To(ContainSubstring("Cannot evict pod as it would violate the pod's disruption budget")) }, 60).Should(Succeed()) }) By("uncordoning all nodes", func() { - err := nodes.UncordonAllNodes(env) + err := nodes.UncordonAll(env.Ctx, env.Client) Expect(err).ToNot(HaveOccurred()) }) }) diff --git a/tests/e2e/eviction_test.go b/tests/e2e/eviction_test.go index 077d3a1f56..121f4f14fd 100644 --- a/tests/e2e/eviction_test.go +++ b/tests/e2e/eviction_test.go @@ -28,7 +28,11 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" - testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/environment" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -59,7 +63,7 @@ var _ = Describe("Pod eviction", Serial, Label(tests.LabelDisruptive), func() { multiInstanceSampleFile = fixturesDir + "/eviction/multi-instance-cluster.yaml.template" ) - evictPod := func(podName string, namespace string, env *testsUtils.TestingEnvironment, timeoutSeconds uint) error { + evictPod := func(podName string, namespace string, env *environment.TestingEnvironment, timeoutSeconds uint) error { var pod corev1.Pod err := env.Client.Get(env.Ctx, ctrlclient.ObjectKey{Namespace: namespace, Name: podName}, @@ -119,18 +123,18 @@ var _ = Describe("Pod eviction", Serial, Label(tests.LabelDisruptive), func() { } const namespacePrefix = "single-instance-pod-eviction" var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) By("creating a cluster", func() { // Create a cluster in a namespace we'll delete after the test - clusterName, err := env.GetResourceNameFromYAML(singleInstanceSampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, singleInstanceSampleFile) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, singleInstanceSampleFile, env) }) }) It("evicts the primary pod in single instance cluster", func() { - clusterName, err := env.GetResourceNameFromYAML(singleInstanceSampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, singleInstanceSampleFile) Expect(err).ToNot(HaveOccurred()) podName := clusterName + "-1" err = evictPod(podName, namespace, env, 60) @@ -152,7 +156,7 @@ var _ = Describe("Pod eviction", Serial, Label(tests.LabelDisruptive), func() { }) By("checking the cluster is healthy", func() { - AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReadyQuick], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReadyQuick], env) }) }) }) @@ -176,20 +180,20 @@ var _ = Describe("Pod eviction", Serial, Label(tests.LabelDisruptive), func() { BeforeAll(func() { const namespacePrefix = "multi-instance-pod-eviction" var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) By("Creating a cluster with multiple instances", func() { // Create a cluster in a namespace and shared in containers, we'll delete after the test - clusterName, err := env.GetResourceNameFromYAML(multiInstanceSampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, multiInstanceSampleFile) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, multiInstanceSampleFile, env) }) By("retrieving the nodeName for primary pod", func() { var primaryPod *corev1.Pod - clusterName, err := env.GetResourceNameFromYAML(multiInstanceSampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, multiInstanceSampleFile) Expect(err).ToNot(HaveOccurred()) - primaryPod, err = env.GetClusterPrimary(namespace, clusterName) + primaryPod, err = clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) taintNodeName = primaryPod.Spec.NodeName }) @@ -197,8 +201,9 @@ var _ = Describe("Pod eviction", Serial, Label(tests.LabelDisruptive), func() { AfterAll(func() { if needRemoveTaint { By("cleaning the taint on node", func() { - cmd := fmt.Sprintf("kubectl taint nodes %v node.kubernetes.io/memory-pressure:NoExecute-", taintNodeName) - _, _, err := testsUtils.Run(cmd) + cmd := fmt.Sprintf("kubectl taint nodes %v node.kubernetes.io/memory-pressure:NoExecute-", + taintNodeName) + _, _, err := run.Run(cmd) Expect(err).ToNot(HaveOccurred()) }) } @@ -207,12 +212,12 @@ var _ = Describe("Pod eviction", Serial, Label(tests.LabelDisruptive), func() { It("evicts the replica pod in multiple instance cluster", func() { var podName string - clusterName, err := env.GetResourceNameFromYAML(multiInstanceSampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, multiInstanceSampleFile) Expect(err).ToNot(HaveOccurred()) // Find the standby pod By("getting standby pod to evict", func() { - podList, _ := env.GetClusterPodList(namespace, clusterName) + podList, _ := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(len(podList.Items)).To(BeEquivalentTo(3)) for _, pod := range podList.Items { // Avoid parting non ready nodes, non active nodes, or primary nodes @@ -243,16 +248,16 @@ var _ = Describe("Pod eviction", Serial, Label(tests.LabelDisruptive), func() { }) By("checking the cluster is healthy", func() { - AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReadyQuick], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReadyQuick], env) }) }) It("evicts the primary pod in multiple instance cluster", func() { var primaryPod *corev1.Pod - clusterName, err := env.GetResourceNameFromYAML(multiInstanceSampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, multiInstanceSampleFile) Expect(err).ToNot(HaveOccurred()) - primaryPod, err = env.GetClusterPrimary(namespace, clusterName) + primaryPod, err = clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) // We can not use patch to simulate the eviction of a primary pod; @@ -260,21 +265,21 @@ var _ = Describe("Pod eviction", Serial, Label(tests.LabelDisruptive), func() { By("taint the node to simulate pod been evicted", func() { cmd := fmt.Sprintf("kubectl taint nodes %v node.kubernetes.io/memory-pressure:NoExecute", taintNodeName) - _, _, err = testsUtils.Run(cmd) + _, _, err = run.Run(cmd) Expect(err).ToNot(HaveOccurred()) needRemoveTaint = true time.Sleep(3 * time.Second) cmd = fmt.Sprintf("kubectl taint nodes %v node.kubernetes.io/memory-pressure:NoExecute-", taintNodeName) - _, _, err = testsUtils.Run(cmd) + _, _, err = run.Run(cmd) Expect(err).ToNot(HaveOccurred()) needRemoveTaint = false }) By("checking switchover happens", func() { Eventually(func() bool { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) for _, p := range podList.Items { if specs.IsPodPrimary(p) && primaryPod.GetName() != p.GetName() { @@ -287,7 +292,7 @@ var _ = Describe("Pod eviction", Serial, Label(tests.LabelDisruptive), func() { // Pod need rejoin, need more time By("checking the cluster is healthy", func() { - AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReadyQuick], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReadyQuick], env) }) }) }) diff --git a/tests/e2e/failover_test.go b/tests/e2e/failover_test.go index a59695bad6..6940c4b9fe 100644 --- a/tests/e2e/failover_test.go +++ b/tests/e2e/failover_test.go @@ -27,7 +27,12 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + podutils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -52,13 +57,14 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() { // We check that the currentPrimary is the -1 instance as expected, // and we define the targetPrimary (-3) and pausedReplica (-2). By("checking that CurrentPrimary and TargetPrimary are equal", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(cluster.Status.CurrentPrimary, err).To( BeEquivalentTo(cluster.Status.TargetPrimary)) currentPrimary = cluster.Status.CurrentPrimary // Gather pod names - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) + Expect(err).NotTo(HaveOccurred()) Expect(len(podList.Items), err).To(BeEquivalentTo(3)) for _, p := range podList.Items { pods = append(pods, p.Name) @@ -72,18 +78,19 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() { // In this way we know that this standby will lag behind when // we do some work on the primary. By("pausing the walreceiver on the 2nd node of the Cluster", func() { - primaryPod, err := env.GetPod(namespace, currentPrimary) + primaryPod, err := podutils.Get(env.Ctx, env.Client, namespace, currentPrimary) Expect(err).ToNot(HaveOccurred()) - pausedPod, err := env.GetPod(namespace, pausedReplica) + pausedPod, err := podutils.Get(env.Ctx, env.Client, namespace, pausedReplica) Expect(err).ToNot(HaveOccurred()) // Get the walreceiver pid query := "SELECT pid FROM pg_stat_activity WHERE backend_type = 'walreceiver'" - out, _, err := env.EventuallyExecQueryInInstancePod( - utils.PodLocator{ + out, _, err := exec.EventuallyExecQueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: pausedPod.Namespace, PodName: pausedPod.Name, - }, utils.PostgresDBName, + }, postgres.PostgresDBName, query, RetryTimeout, PollingTime, @@ -100,11 +107,12 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() { // We don't want to wait for the replication timeout. query = fmt.Sprintf("SELECT pg_terminate_backend(pid) FROM pg_stat_replication "+ "WHERE application_name = '%v'", pausedReplica) - _, _, err = env.EventuallyExecQueryInInstancePod( - utils.PodLocator{ + _, _, err = exec.EventuallyExecQueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: primaryPod.Namespace, PodName: primaryPod.Name, - }, utils.PostgresDBName, + }, postgres.PostgresDBName, query, RetryTimeout, PollingTime, @@ -113,24 +121,27 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() { // Expect the primary to have lost connection with the stopped standby Eventually(func() (int, error) { - primaryPod, err = env.GetPod(namespace, currentPrimary) + primaryPod, err = podutils.Get(env.Ctx, env.Client, namespace, currentPrimary) Expect(err).ToNot(HaveOccurred()) - return utils.CountReplicas(env, primaryPod) + return postgres.CountReplicas( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + primaryPod, RetryTimeout) }, RetryTimeout).Should(BeEquivalentTo(1)) }) // Perform a CHECKPOINT on the primary and wait for the working standby // to replicate at it By("generating some WAL traffic in the Cluster", func() { - primaryPod, err := env.GetPod(namespace, currentPrimary) + primaryPod, err := podutils.Get(env.Ctx, env.Client, namespace, currentPrimary) Expect(err).ToNot(HaveOccurred()) // Gather the current WAL LSN - initialLSN, _, err := env.EventuallyExecQueryInInstancePod( - utils.PodLocator{ + initialLSN, _, err := exec.EventuallyExecQueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: primaryPod.Namespace, PodName: primaryPod.Name, - }, utils.PostgresDBName, + }, postgres.PostgresDBName, "SELECT pg_current_wal_lsn()", RetryTimeout, PollingTime, @@ -138,11 +149,12 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() { Expect(err).ToNot(HaveOccurred()) // Execute a checkpoint - _, _, err = env.EventuallyExecQueryInInstancePod( - utils.PodLocator{ + _, _, err = exec.EventuallyExecQueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: primaryPod.Namespace, PodName: primaryPod.Name, - }, utils.PostgresDBName, + }, postgres.PostgresDBName, "CHECKPOINT", RetryTimeout, PollingTime, @@ -155,13 +167,14 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() { // The replay_lsn of the targetPrimary should be ahead // of the one before the checkpoint Eventually(func() (string, error) { - primaryPod, err = env.GetPod(namespace, currentPrimary) + primaryPod, err = podutils.Get(env.Ctx, env.Client, namespace, currentPrimary) Expect(err).ToNot(HaveOccurred()) - out, _, err := env.EventuallyExecQueryInInstancePod( - utils.PodLocator{ + out, _, err := exec.EventuallyExecQueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: primaryPod.Namespace, PodName: primaryPod.Name, - }, utils.PostgresDBName, + }, postgres.PostgresDBName, query, RetryTimeout, PollingTime, @@ -177,18 +190,18 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() { quickDelete := &ctrlclient.DeleteOptions{ GracePeriodSeconds: &quickDeletionPeriod, } - err := env.DeletePod(namespace, currentPrimary, quickDelete) + err := podutils.Delete(env.Ctx, env.Client, namespace, currentPrimary, quickDelete) Expect(err).ToNot(HaveOccurred()) // We wait until the operator knows that the primary is dead. // At this point the promotion is waiting for all the walreceivers // to be disconnected. We can send the SIGCONT now. Eventually(func() (int, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) return cluster.Status.ReadyInstances, err }, RetryTimeout).Should(BeEquivalentTo(2)) - pausedPod, err := env.GetPod(namespace, pausedReplica) + pausedPod, err := podutils.Get(env.Ctx, env.Client, namespace, pausedReplica) Expect(err).ToNot(HaveOccurred()) // Send the SIGCONT to the walreceiver PID to resume execution @@ -200,7 +213,7 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() { By("making sure that the operator is enforcing the switchover delay") timeout := 120 Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) return cluster.Status.CurrentPrimaryFailingSinceTimestamp, err }, timeout).Should(Not(Equal(""))) } @@ -209,13 +222,13 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() { // The operator should eventually set the cluster target primary to // the instance we expect to take that role (-3). Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) return cluster.Status.TargetPrimary, err - }, testTimeouts[utils.NewTargetOnFailover]). + }, testTimeouts[timeouts.NewTargetOnFailover]). ShouldNot( Or(BeEquivalentTo(currentPrimary), BeEquivalentTo(apiv1.PendingFailoverMarker))) - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(cluster.Status.TargetPrimary, err).To( BeEquivalentTo(targetPrimary)) }) @@ -224,9 +237,9 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() { // operator to the target primary By("waiting for the TargetPrimary to become CurrentPrimary", func() { Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) return cluster.Status.CurrentPrimary, err - }, testTimeouts[utils.NewPrimaryAfterFailover]).Should(BeEquivalentTo(targetPrimary)) + }, testTimeouts[timeouts.NewPrimaryAfterFailover]).Should(BeEquivalentTo(targetPrimary)) }) } @@ -244,9 +257,9 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() { ) var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - clusterName, err := env.GetResourceNameFromYAML(sampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) @@ -260,10 +273,10 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() { namespacePrefix = "failover-e2e-delay" ) var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - clusterName, err := env.GetResourceNameFromYAML(sampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) diff --git a/tests/e2e/fastfailover_test.go b/tests/e2e/fastfailover_test.go index debccca369..f49921ceb2 100644 --- a/tests/e2e/fastfailover_test.go +++ b/tests/e2e/fastfailover_test.go @@ -68,7 +68,7 @@ var _ = Describe("Fast failover", Serial, Label(tests.LabelPerformance, tests.La clusterName = "cluster-fast-failover" var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertFastFailOver(namespace, sampleFileWithoutReplicationSlots, clusterName, webTestFile, webTestJob, maxReattachTime, maxFailoverTime) @@ -86,7 +86,7 @@ var _ = Describe("Fast failover", Serial, Label(tests.LabelPerformance, tests.La clusterName = "cluster-fast-failover" var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertFastFailOver(namespace, sampleFileWithReplicationSlots, clusterName, webTestFile, webTestJob, maxReattachTime, maxFailoverTime) @@ -100,7 +100,7 @@ var _ = Describe("Fast failover", Serial, Label(tests.LabelPerformance, tests.La clusterName = "cluster-syncreplicas-fast-failover" var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertFastFailOver( namespace, sampleFileSyncReplicas, clusterName, webTestSyncReplicas, webTestJob, maxReattachTime, maxFailoverTime) diff --git a/tests/e2e/fastswitchover_test.go b/tests/e2e/fastswitchover_test.go index d7a45efd47..99c8017bb7 100644 --- a/tests/e2e/fastswitchover_test.go +++ b/tests/e2e/fastswitchover_test.go @@ -27,6 +27,11 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/tests" "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -60,7 +65,7 @@ var _ = Describe("Fast switchover", Serial, Label(tests.LabelPerformance, tests. // Create a cluster in a namespace we'll delete after the test const namespacePrefix = "primary-switchover-time" var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) assertFastSwitchover(namespace, sampleFileWithoutReplicationSlots, clusterName, webTestFile, webTestJob) }) @@ -70,7 +75,7 @@ var _ = Describe("Fast switchover", Serial, Label(tests.LabelPerformance, tests. // Create a cluster in a namespace we'll delete after the test const namespacePrefix = "primary-switchover-time-with-slots" var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) assertFastSwitchover(namespace, sampleFileWithReplicationSlots, clusterName, webTestFile, webTestJob) AssertClusterHAReplicationSlots(namespace, clusterName) @@ -99,7 +104,7 @@ func assertFastSwitchover(namespace, sampleFile, clusterName, webTestFile, webTe CreateResourceFromFile(namespace, sampleFile) }) By("having a Cluster with three instances ready", func() { - AssertClusterIsReady(namespace, clusterName, testTimeouts[utils.ClusterIsReady], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env) }) // Node 1 should be the primary, so the -rw service should // point there. We verify this. @@ -135,7 +140,9 @@ func assertFastSwitchover(namespace, sampleFile, clusterName, webTestFile, webTe ", PRIMARY KEY (id)" + ")" - _, err := utils.RunExecOverForward(env, namespace, clusterName, utils.AppDBName, + _, err := postgres.RunExecOverForward( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + namespace, clusterName, postgres.AppDBName, apiv1.ApplicationUserSecretSuffix, query) Expect(err).ToNot(HaveOccurred()) }) @@ -146,10 +153,10 @@ func assertFastSwitchover(namespace, sampleFile, clusterName, webTestFile, webTe // on the postgres primary. We make sure that the first // records appear on the database before moving to the next // step. - _, _, err := utils.Run("kubectl create -n " + namespace + + _, _, err := run.Run("kubectl create -n " + namespace + " -f " + webTestFile) Expect(err).ToNot(HaveOccurred()) - _, _, err = utils.Run("kubectl create -n " + namespace + + _, _, err = run.Run("kubectl create -n " + namespace + " -f " + webTestJob) Expect(err).ToNot(HaveOccurred()) @@ -164,12 +171,13 @@ func assertFastSwitchover(namespace, sampleFile, clusterName, webTestFile, webTe if err != nil { return "", err } - out, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ + out, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: primaryPod.Namespace, PodName: primaryPod.Name, }, - utils.AppDBName, + postgres.AppDBName, query) return strings.TrimSpace(out), err }, RetryTimeout).Should(BeEquivalentTo("t")) @@ -178,7 +186,7 @@ func assertFastSwitchover(namespace, sampleFile, clusterName, webTestFile, webTe By("setting the TargetPrimary to node2 to trigger a switchover", func() { targetPrimary = clusterName + "-2" err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) cluster.Status.TargetPrimary = targetPrimary return env.Client.Status().Update(env.Ctx, cluster) diff --git a/tests/e2e/fencing_test.go b/tests/e2e/fencing_test.go index a43e7a4191..acf8291104 100644 --- a/tests/e2e/fencing_test.go +++ b/tests/e2e/fencing_test.go @@ -29,7 +29,13 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" - testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/fencing" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -78,12 +84,13 @@ var _ = Describe("Fencing", Label(tests.LabelPlugin), func() { if err != nil { return 0, err } - out, _, err := env.ExecQueryInInstancePod( - testUtils.PodLocator{ + out, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: pod.Namespace, PodName: pod.Name, }, - testUtils.PostgresDBName, + postgres.PostgresDBName, query) if err != nil { return 0, err @@ -94,23 +101,23 @@ var _ = Describe("Fencing", Label(tests.LabelPlugin), func() { } checkPostgresConnection := func(podName, namespace string) { - err := testUtils.GetObject(env, ctrlclient.ObjectKey{Namespace: namespace, Name: podName}, &pod) + err := objects.Get(env.Ctx, env.Client, ctrlclient.ObjectKey{Namespace: namespace, Name: podName}, &pod) Expect(err).ToNot(HaveOccurred()) timeout := time.Second * 10 dsn := fmt.Sprintf("host=%v user=%v dbname=%v password=%v sslmode=require", - testUtils.PGLocalSocketDir, "postgres", "postgres", "") + postgres.PGLocalSocketDir, "postgres", "postgres", "") stdOut, stdErr, err := utils.ExecCommand(env.Ctx, env.Interface, env.RestClientConfig, pod, specs.PostgresContainerName, &timeout, "psql", dsn, "-tAc", "SELECT 1") Expect(err).To(HaveOccurred(), stdErr, stdOut) } - checkFencingAnnotationSet := func(fencingMethod testUtils.FencingMethod, content []string) { - if fencingMethod != testUtils.UsingAnnotation { + checkFencingAnnotationSet := func(fencingMethod fencing.Method, content []string) { + if fencingMethod != fencing.UsingAnnotation { return } By("checking the cluster has the expected annotation set", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).NotTo(HaveOccurred()) if len(content) == 0 { Expect(cluster.Annotations).To(Or(Not(HaveKey(utils.FencedInstanceAnnotation)), @@ -124,19 +131,20 @@ var _ = Describe("Fencing", Label(tests.LabelPlugin), func() { }) } - assertFencingPrimaryWorks := func(fencingMethod testUtils.FencingMethod) { + assertFencingPrimaryWorks := func(fencingMethod fencing.Method) { It("can fence a primary instance", func() { var beforeFencingPodName string By("fencing the primary instance", func() { - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) beforeFencingPodName = primaryPod.GetName() - Expect(testUtils.FencingOn(env, beforeFencingPodName, + Expect(fencing.On(env.Ctx, env.Client, beforeFencingPodName, namespace, clusterName, fencingMethod)).Should(Succeed()) }) By("check the instance is not ready, but kept as primary instance", func() { checkInstanceStatusReadyOrNot(beforeFencingPodName, namespace, false) - currentPrimaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) + currentPrimaryPodInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, + clusterName) Expect(err).ToNot(HaveOccurred()) Expect(beforeFencingPodName).To(Equal(currentPrimaryPodInfo.GetName())) }) @@ -146,14 +154,15 @@ var _ = Describe("Fencing", Label(tests.LabelPlugin), func() { checkPostgresConnection(beforeFencingPodName, namespace) }) By("lift the fencing", func() { - Expect(testUtils.FencingOff(env, beforeFencingPodName, + Expect(fencing.Off(env.Ctx, env.Client, beforeFencingPodName, namespace, clusterName, fencingMethod)).ToNot(HaveOccurred()) }) By("the old primary becomes ready", func() { checkInstanceStatusReadyOrNot(beforeFencingPodName, namespace, true) }) By("the old primary should still be the primary instance", func() { - currentPrimaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) + currentPrimaryPodInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, + clusterName) Expect(err).ToNot(HaveOccurred()) Expect(beforeFencingPodName).Should(BeEquivalentTo(currentPrimaryPodInfo.GetName())) }) @@ -163,12 +172,12 @@ var _ = Describe("Fencing", Label(tests.LabelPlugin), func() { checkFencingAnnotationSet(fencingMethod, nil) }) } - assertFencingFollowerWorks := func(fencingMethod testUtils.FencingMethod) { + assertFencingFollowerWorks := func(fencingMethod fencing.Method) { It("can fence a follower instance", func() { var beforeFencingPodName string - AssertClusterIsReady(namespace, clusterName, testTimeouts[testUtils.ClusterIsReadyQuick], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReadyQuick], env) By("fence a follower instance", func() { - podList, _ := env.GetClusterPodList(namespace, clusterName) + podList, _ := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(len(podList.Items)).To(BeEquivalentTo(3)) for _, pod := range podList.Items { if specs.IsPodStandby(pod) { @@ -177,7 +186,7 @@ var _ = Describe("Fencing", Label(tests.LabelPlugin), func() { } } Expect(beforeFencingPodName).ToNot(BeEmpty()) - Expect(testUtils.FencingOn(env, beforeFencingPodName, + Expect(fencing.On(env.Ctx, env.Client, beforeFencingPodName, namespace, clusterName, fencingMethod)).ToNot(HaveOccurred()) }) checkFencingAnnotationSet(fencingMethod, []string{beforeFencingPodName}) @@ -189,7 +198,7 @@ var _ = Describe("Fencing", Label(tests.LabelPlugin), func() { checkPostgresConnection(beforeFencingPodName, namespace) }) By("lift the fencing", func() { - Expect(testUtils.FencingOff(env, beforeFencingPodName, + Expect(fencing.Off(env.Ctx, env.Client, beforeFencingPodName, namespace, clusterName, fencingMethod)).ToNot(HaveOccurred()) }) By("the instance becomes ready", func() { @@ -201,41 +210,43 @@ var _ = Describe("Fencing", Label(tests.LabelPlugin), func() { checkFencingAnnotationSet(fencingMethod, nil) }) } - assertFencingClusterWorks := func(fencingMethod testUtils.FencingMethod) { + assertFencingClusterWorks := func(fencingMethod fencing.Method) { It("can fence all the instances in a cluster", func() { - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) primaryPodName := primaryPod.GetName() By("fence the whole cluster using \"(*)\"", func() { - Expect(testUtils.FencingOn(env, "*", namespace, clusterName, fencingMethod)).ToNot(HaveOccurred()) + Expect(fencing.On(env.Ctx, env.Client, "*", namespace, clusterName, + fencingMethod)).ToNot(HaveOccurred()) }) checkFencingAnnotationSet(fencingMethod, []string{"*"}) By("check all instances are not ready", func() { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).NotTo(HaveOccurred()) for _, pod := range podList.Items { checkInstanceStatusReadyOrNot(pod.GetName(), namespace, false) } }) By("check postgres connection on all instances", func() { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).NotTo(HaveOccurred()) for _, pod := range podList.Items { checkPostgresConnection(pod.GetName(), namespace) } }) By("lift the fencing", func() { - Expect(testUtils.FencingOff(env, "*", namespace, clusterName, fencingMethod)).ToNot(HaveOccurred()) + Expect(fencing.Off(env.Ctx, env.Client, "*", namespace, clusterName, + fencingMethod)).ToNot(HaveOccurred()) }) By("all instances become ready", func() { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).NotTo(HaveOccurred()) for _, pod := range podList.Items { checkInstanceStatusReadyOrNot(pod.GetName(), namespace, true) } }) By("the old primary is still the primary instance", func() { - podName, err := env.GetClusterPrimary(namespace, clusterName) + podName, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Expect(primaryPodName).Should(BeEquivalentTo(podName.GetName())) }) @@ -250,33 +261,33 @@ var _ = Describe("Fencing", Label(tests.LabelPlugin), func() { var err error BeforeAll(func() { const namespacePrefix = "fencing-using-plugin" - clusterName, err = env.GetResourceNameFromYAML(sampleFile) + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) }) - assertFencingPrimaryWorks(testUtils.UsingPlugin) - assertFencingFollowerWorks(testUtils.UsingPlugin) - assertFencingClusterWorks(testUtils.UsingPlugin) + assertFencingPrimaryWorks(fencing.UsingPlugin) + assertFencingFollowerWorks(fencing.UsingPlugin) + assertFencingClusterWorks(fencing.UsingPlugin) }) Context("using annotation", Ordered, func() { var err error BeforeAll(func() { const namespacePrefix = "fencing-using-annotation" - clusterName, err = env.GetResourceNameFromYAML(sampleFile) + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) }) - assertFencingPrimaryWorks(testUtils.UsingAnnotation) - assertFencingFollowerWorks(testUtils.UsingAnnotation) - assertFencingClusterWorks(testUtils.UsingAnnotation) + assertFencingPrimaryWorks(fencing.UsingAnnotation) + assertFencingFollowerWorks(fencing.UsingAnnotation) + assertFencingClusterWorks(fencing.UsingAnnotation) }) }) diff --git a/tests/e2e/hibernation_test.go b/tests/e2e/hibernation_test.go index ce4f38ea64..0b4df3876a 100644 --- a/tests/e2e/hibernation_test.go +++ b/tests/e2e/hibernation_test.go @@ -30,7 +30,13 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/persistentvolumeclaim" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" - testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/storage" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -67,7 +73,7 @@ var _ = Describe("Cluster Hibernation with plugin", Label(tests.LabelPlugin), fu var clusterManifest []byte var beforeHibernationCurrentPrimary string By("collecting current primary details", func() { - beforeHibernationClusterInfo, err = env.GetCluster(namespace, clusterName) + beforeHibernationClusterInfo, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) beforeHibernationCurrentPrimary = beforeHibernationClusterInfo.Status.CurrentPrimary // collect expected cluster manifesto info @@ -79,19 +85,19 @@ var _ = Describe("Cluster Hibernation with plugin", Label(tests.LabelPlugin), fu getPvc := func(role persistentvolumeclaim.Meta, instanceName string) corev1.PersistentVolumeClaim { pvcName := role.GetName(instanceName) pvcInfo := corev1.PersistentVolumeClaim{} - err = testsUtils.GetObject(env, ctrlclient.ObjectKey{Namespace: namespace, Name: pvcName}, &pvcInfo) + err = objects.Get(env.Ctx, env.Client, ctrlclient.ObjectKey{Namespace: namespace, Name: pvcName}, &pvcInfo) Expect(err).ToNot(HaveOccurred()) return pvcInfo } performHibernation := func(mode mode, namespace, clusterName string) { By(fmt.Sprintf("performing hibernation %v", mode), func() { - _, _, err := testsUtils.Run(fmt.Sprintf("kubectl cnpg hibernate %v %v -n %v", + _, _, err := run.Run(fmt.Sprintf("kubectl cnpg hibernate %v %v -n %v", mode, clusterName, namespace)) Expect(err).ToNot(HaveOccurred()) }) By(fmt.Sprintf("verifying cluster %v pods are removed", clusterName), func() { Eventually(func(g Gomega) { - podList, _ := env.GetClusterPodList(namespace, clusterName) + podList, _ := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) g.Expect(len(podList.Items)).Should(BeEquivalentTo(0)) }, 300).Should(Succeed()) }) @@ -100,7 +106,7 @@ var _ = Describe("Cluster Hibernation with plugin", Label(tests.LabelPlugin), fu getHibernationStatusInJSON := func(namespace, clusterName string) map[string]interface{} { var data map[string]interface{} By("getting hibernation status", func() { - stdOut, _, err := testsUtils.Run(fmt.Sprintf("kubectl cnpg hibernate %v %v -n %v -ojson", + stdOut, _, err := run.Run(fmt.Sprintf("kubectl cnpg hibernate %v %v -n %v -ojson", HibernateStatus, clusterName, namespace)) Expect(err).ToNot(HaveOccurred(), stdOut) err = json.Unmarshal([]byte(stdOut), &data) @@ -115,14 +121,16 @@ var _ = Describe("Cluster Hibernation with plugin", Label(tests.LabelPlugin), fu Expect(strings.Contains(string(message), actualStatus)).Should(BeEquivalentTo(true), actualStatus+"\\not-contained-in\\"+string(message)) } - verifyClusterResources := func(namespace, clusterName string, objs []persistentvolumeclaim.ExpectedObjectCalculator) { + verifyClusterResources := func( + namespace, clusterName string, objs []persistentvolumeclaim.ExpectedObjectCalculator, + ) { By(fmt.Sprintf("verifying cluster resources are removed "+ "post hibernation where roles %v", objs), func() { timeout := 120 By(fmt.Sprintf("verifying cluster %v is removed", clusterName), func() { Eventually(func() (bool, apiv1.Cluster) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) if err != nil { return true, apiv1.Cluster{} } @@ -132,7 +140,7 @@ var _ = Describe("Cluster Hibernation with plugin", Label(tests.LabelPlugin), fu By(fmt.Sprintf("verifying cluster %v PVCs are removed", clusterName), func() { Eventually(func() (int, error) { - pvcList, err := env.GetPVCList(namespace) + pvcList, err := storage.GetPVCList(env.Ctx, env.Client, namespace) if err != nil { return -1, err } @@ -200,7 +208,8 @@ var _ = Describe("Cluster Hibernation with plugin", Label(tests.LabelPlugin), fu }) }) } - verifyPvc := func(expectedObject persistentvolumeclaim.ExpectedObjectCalculator, pvcUid types.UID, + verifyPvc := func( + expectedObject persistentvolumeclaim.ExpectedObjectCalculator, pvcUid types.UID, clusterManifest []byte, instanceName string, ) { pvcInfo := getPvc(expectedObject, instanceName) @@ -212,12 +221,12 @@ var _ = Describe("Cluster Hibernation with plugin", Label(tests.LabelPlugin), fu utils.PgControldataAnnotationName, utils.ClusterManifestAnnotationName, } - testsUtils.ObjectHasAnnotations(&pvcInfo, expectedAnnotationKeyPresent) + storage.ObjectHasAnnotations(&pvcInfo, expectedAnnotationKeyPresent) expectedAnnotation := map[string]string{ utils.HibernateClusterManifestAnnotationName: string(clusterManifest), utils.ClusterManifestAnnotationName: string(clusterManifest), } - testsUtils.ObjectMatchesAnnotations(&pvcInfo, expectedAnnotation) + storage.ObjectMatchesAnnotations(&pvcInfo, expectedAnnotation) } assertHibernation := func(namespace, clusterName, tableName string) { @@ -228,7 +237,7 @@ var _ = Describe("Cluster Hibernation with plugin", Label(tests.LabelPlugin), fu tableLocator := TableLocator{ Namespace: namespace, ClusterName: clusterName, - DatabaseName: testsUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: tableName, } AssertCreateTestData(env, tableLocator) @@ -293,7 +302,7 @@ var _ = Describe("Cluster Hibernation with plugin", Label(tests.LabelPlugin), fu verifySummaryInHibernationStatus(clusterName, clusterOffStatusMessage) }) - AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReady], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env) // Test data should be present after hibernation off AssertDataExpectedCount(env, tableLocator, 2) } @@ -301,10 +310,10 @@ var _ = Describe("Cluster Hibernation with plugin", Label(tests.LabelPlugin), fu When("cluster setup with PG-WAL volume", func() { It("hibernation process should work", func() { const namespacePrefix = "hibernation-on-with-pg-wal" - clusterName, err := env.GetResourceNameFromYAML(sampleFileClusterWithPGWalVolume) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFileClusterWithPGWalVolume) Expect(err).ToNot(HaveOccurred()) // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFileClusterWithPGWalVolume, env) assertHibernation(namespace, clusterName, tableName) @@ -315,17 +324,17 @@ var _ = Describe("Cluster Hibernation with plugin", Label(tests.LabelPlugin), fu var beforeHibernationPgDataPvcUID types.UID const namespacePrefix = "hibernation-without-pg-wal" - clusterName, err := env.GetResourceNameFromYAML(sampleFileClusterWithOutPGWalVolume) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFileClusterWithOutPGWalVolume) Expect(err).ToNot(HaveOccurred()) // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFileClusterWithOutPGWalVolume, env) // Write a table and some data on the "app" database tableLocator := TableLocator{ Namespace: namespace, ClusterName: clusterName, - DatabaseName: testsUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: tableName, } AssertCreateTestData(env, tableLocator) @@ -373,7 +382,7 @@ var _ = Describe("Cluster Hibernation with plugin", Label(tests.LabelPlugin), fu verifySummaryInHibernationStatus(clusterName, clusterOffStatusMessage) }) - AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReady], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env) // Test data should be present after hibernation off AssertDataExpectedCount(env, tableLocator, 2) }) @@ -381,10 +390,10 @@ var _ = Describe("Cluster Hibernation with plugin", Label(tests.LabelPlugin), fu When("cluster hibernation after switchover", func() { It("hibernation process should work", func() { const namespacePrefix = "hibernation-with-switchover" - clusterName, err := env.GetResourceNameFromYAML(sampleFileClusterWithPGWalVolume) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFileClusterWithPGWalVolume) Expect(err).ToNot(HaveOccurred()) // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFileClusterWithPGWalVolume, env) AssertSwitchover(namespace, clusterName, env) diff --git a/tests/e2e/initdb_test.go b/tests/e2e/initdb_test.go index d399e033af..a7922a946b 100644 --- a/tests/e2e/initdb_test.go +++ b/tests/e2e/initdb_test.go @@ -22,7 +22,8 @@ import ( "strings" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -47,19 +48,20 @@ var _ = Describe("InitDB settings", Label(tests.LabelSmoke, tests.LabelBasic), f namespace, clusterName, tableName string, - dbName utils.DatabaseName, + dbName exec.DatabaseName, expectedCount int, ) { query := fmt.Sprintf("SELECT count(*) FROM %s", tableName) - primary, err := env.GetClusterPrimary(namespace, clusterName) + primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) By(fmt.Sprintf( "querying the %s table in the %s database defined by postInit SQL", tableName, dbName), func() { - stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: primary.Name, }, dbName, @@ -88,7 +90,7 @@ var _ = Describe("InitDB settings", Label(tests.LabelSmoke, tests.LabelBasic), f // Create a cluster in a namespace we'll delete after the test const namespacePrefix = "initdb-postqueries" var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) CreateResourceFromFile(namespace, postInitSQLSecretRef) @@ -127,11 +129,12 @@ var _ = Describe("InitDB settings", Label(tests.LabelSmoke, tests.LabelBasic), f "app", 10000) By("checking inside the database the default locale", func() { - primary, err := env.GetClusterPrimary(namespace, clusterName) + primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: primary.Name, }, "postgres", @@ -154,16 +157,17 @@ var _ = Describe("InitDB settings", Label(tests.LabelSmoke, tests.LabelBasic), f // Create a cluster in a namespace we'll delete after the test const namespacePrefix = "initdb-locale" var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, postInitSQLCluster, env) By("checking inside the database", func() { - primary, err := env.GetClusterPrimary(namespace, clusterName) + primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - stdout, _, err := env.ExecQueryInInstancePod( - utils.PodLocator{ + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: primary.Name, }, "postgres", diff --git a/tests/e2e/logs_test.go b/tests/e2e/logs_test.go index 7bd3c44259..a8da797b3e 100644 --- a/tests/e2e/logs_test.go +++ b/tests/e2e/logs_test.go @@ -28,7 +28,9 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" - testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/logs" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -50,23 +52,26 @@ var _ = Describe("JSON log output", Label(tests.LabelObservability), func() { const sampleFile = fixturesDir + "/json_logs/cluster-json-logs.yaml.template" var namespaceErr error // Create a cluster in a namespace we'll delete after the test - namespace, namespaceErr = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, namespaceErr = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(namespaceErr).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) By("verifying the presence of possible logger values", func() { - podList, _ := env.GetClusterPodList(namespace, clusterName) + podList, _ := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) for _, pod := range podList.Items { // Gather pod logs in the form of a Json Array - logEntries, err := testsUtils.ParseJSONLogs(namespace, pod.GetName(), env) + logEntries, err := logs.ParseJSONLogs( + env.Ctx, env.Interface, + namespace, pod.GetName(), + ) Expect(err).NotTo(HaveOccurred(), "unable to parse json logs") Expect(logEntries).ToNot(BeEmpty(), "no logs found") // Logger field Assertions - isPgControlDataLoggerFound := testsUtils.HasLogger(logEntries, "pg_controldata") + isPgControlDataLoggerFound := logs.HasLogger(logEntries, "pg_controldata") Expect(isPgControlDataLoggerFound).To(BeTrue(), fmt.Sprintf("pg_controldata logger not found in pod %v logs", pod.GetName())) - isPostgresLoggerFound := testsUtils.HasLogger(logEntries, "postgres") + isPostgresLoggerFound := logs.HasLogger(logEntries, "postgres") Expect(isPostgresLoggerFound).To(BeTrue(), fmt.Sprintf("postgres logger not found in pod %v logs", pod.GetName())) } @@ -74,7 +79,7 @@ var _ = Describe("JSON log output", Label(tests.LabelObservability), func() { By("verifying the format of error queries being logged", func() { errorTestQuery := "selecct 1\nwith newlines\n" - podList, _ := env.GetClusterPodList(namespace, clusterName) + podList, _ := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) timeout := 300 for _, pod := range podList.Items { @@ -91,11 +96,14 @@ var _ = Describe("JSON log output", Label(tests.LabelObservability), func() { // Eventually the error log line will be logged Eventually(func(g Gomega) bool { // Gather pod logs in the form of a Json Array - logEntries, err := testsUtils.ParseJSONLogs(namespace, pod.GetName(), env) + logEntries, err := logs.ParseJSONLogs( + env.Ctx, env.Interface, + namespace, pod.GetName(), + ) g.Expect(err).ToNot(HaveOccurred()) // Gather the record containing the wrong query result - return testsUtils.AssertQueryRecord( + return logs.AssertQueryRecord( logEntries, errorTestQuery, queryError.Error(), @@ -107,7 +115,7 @@ var _ = Describe("JSON log output", Label(tests.LabelObservability), func() { By("verifying only the primary instance logs write queries", func() { errorTestQuery := "ccreate table test(var text)" - primaryPod, _ := env.GetClusterPrimary(namespace, clusterName) + primaryPod, _ := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) timeout := 300 var queryError error @@ -123,14 +131,17 @@ var _ = Describe("JSON log output", Label(tests.LabelObservability), func() { // Expect the query to be eventually logged on the primary Eventually(func() (bool, error) { // Gather pod logs in the form of a Json Array - logEntries, err := testsUtils.ParseJSONLogs(namespace, primaryPod.GetName(), env) + logEntries, err := logs.ParseJSONLogs( + env.Ctx, env.Interface, + namespace, primaryPod.GetName(), + ) if err != nil { GinkgoWriter.Printf("Error reported while gathering primary pod log %s\n", err.Error()) return false, err } // Gather the record containing the wrong query result - return testsUtils.AssertQueryRecord(logEntries, errorTestQuery, queryError.Error(), + return logs.AssertQueryRecord(logEntries, errorTestQuery, queryError.Error(), logpipe.LoggingCollectorRecordName), nil }, timeout).Should(BeTrue()) @@ -146,12 +157,15 @@ var _ = Describe("JSON log output", Label(tests.LabelObservability), func() { // Expect the query not to be logged on replicas for _, pod := range podList.Items { // Gather pod logs in the form of a Json Array - logEntries, err := testsUtils.ParseJSONLogs(namespace, pod.GetName(), env) + logEntries, err := logs.ParseJSONLogs( + env.Ctx, env.Interface, + namespace, pod.GetName(), + ) Expect(err).NotTo(HaveOccurred()) Expect(logEntries).ToNot(BeEmpty()) // No record should be returned in this case - isQueryRecordContained := testsUtils.AssertQueryRecord( + isQueryRecordContained := logs.AssertQueryRecord( logEntries, queryError.Error(), errorTestQuery, @@ -164,18 +178,18 @@ var _ = Describe("JSON log output", Label(tests.LabelObservability), func() { By("verifying pg_rewind logs after deleting the old primary pod", func() { // Force-delete the primary - currentPrimary, _ := env.GetClusterPrimary(namespace, clusterName) + currentPrimary, _ := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) quickDelete := &client.DeleteOptions{ GracePeriodSeconds: &quickDeletionPeriod, } - deletePodError := env.DeletePod(namespace, currentPrimary.GetName(), quickDelete) + deletePodError := pods.Delete(env.Ctx, env.Client, namespace, currentPrimary.GetName(), quickDelete) Expect(deletePodError).ToNot(HaveOccurred()) // Expect a new primary to be elected timeout := 180 Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) if err != nil { GinkgoWriter.Printf("Error reported while getting current primary %s\n", err.Error()) return "", err @@ -189,14 +203,17 @@ var _ = Describe("JSON log output", Label(tests.LabelObservability), func() { Eventually(func() (bool, error) { // Gather pod logs in the form of a JSON slice - logEntries, err := testsUtils.ParseJSONLogs(namespace, currentPrimary.GetName(), env) + logEntries, err := logs.ParseJSONLogs( + env.Ctx, env.Interface, + namespace, currentPrimary.GetName(), + ) if err != nil { GinkgoWriter.Printf("Error reported while getting the 'pg_rewind' logger in old primary %s, %s\n", currentPrimary, err.Error()) return false, err } // Expect pg_rewind logger to eventually be present on the old primary logs - return testsUtils.HasLogger(logEntries, "pg_rewind"), nil + return logs.HasLogger(logEntries, "pg_rewind"), nil }, timeout).Should(BeTrue()) }) }) @@ -221,10 +238,10 @@ var _ = Describe("JSON log output unit tests", Label(tests.LabelObservability), Expect(err).ToNot(HaveOccurred()) It("Can check valid logging_collector record for query", func() { Expect(parsedRecord).NotTo(BeNil()) - Expect(testsUtils.CheckRecordForQuery(parsedRecord, errorTestQuery, user, database, message)).To(BeTrue()) + Expect(logs.CheckRecordForQuery(parsedRecord, errorTestQuery, user, database, message)).To(BeTrue()) }) It("Can check valid logging_collector ", func() { Expect(parsedRecord).NotTo(BeNil()) - Expect(testsUtils.IsWellFormedLogForLogger(parsedRecord, "postgres")).To(BeTrue()) + Expect(logs.IsWellFormedLogForLogger(parsedRecord, "postgres")).To(BeTrue()) }) }) diff --git a/tests/e2e/managed_roles_test.go b/tests/e2e/managed_roles_test.go index 64fa7bd753..9f431f330d 100644 --- a/tests/e2e/managed_roles_test.go +++ b/tests/e2e/managed_roles_test.go @@ -30,7 +30,12 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/tests" - testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/services" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -69,10 +74,10 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic BeforeAll(func() { var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - clusterName, err = env.GetResourceNameFromYAML(clusterManifest) + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterManifest) Expect(err).ToNot(HaveOccurred()) By("setting up cluster with managed roles", func() { @@ -91,12 +96,13 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic FROM pg_auth_members GROUP BY member ) mem ON member = oid WHERE rolname =` + pq.QuoteLiteral(roleName) - stdout, _, err := env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: primaryPod, }, - testsUtils.PostgresDBName, + postgres.PostgresDBName, query) if err != nil { return []string{ERROR} @@ -108,15 +114,16 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic } assertRoleStatus := func(namespace, clusterName, query, expectedResult string) { - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Eventually(func() string { - stdout, _, err := env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: primaryPod.Name, }, - testsUtils.PostgresDBName, + postgres.PostgresDBName, query) if err != nil { return "" @@ -136,31 +143,34 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic rolConnLimitInSpec := 4 By("ensuring the roles created in the managed stanza are in the database with correct attributes", func() { - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - Eventually(QueryMatchExpectationPredicate(primaryPod, testsUtils.PostgresDBName, + Eventually(QueryMatchExpectationPredicate(primaryPod, postgres.PostgresDBName, roleExistsQuery(username), "t"), 30).Should(Succeed()) - Eventually(QueryMatchExpectationPredicate(primaryPod, testsUtils.PostgresDBName, + Eventually(QueryMatchExpectationPredicate(primaryPod, postgres.PostgresDBName, roleExistsQuery(userWithPerpetualPass), "t"), 30).Should(Succeed()) - Eventually(QueryMatchExpectationPredicate(primaryPod, testsUtils.PostgresDBName, + Eventually(QueryMatchExpectationPredicate(primaryPod, postgres.PostgresDBName, roleExistsQuery(userWithHashedPassword), "t"), 30).Should(Succeed()) - Eventually(QueryMatchExpectationPredicate(primaryPod, testsUtils.PostgresDBName, + Eventually(QueryMatchExpectationPredicate(primaryPod, postgres.PostgresDBName, roleExistsQuery(unrealizableUser), "f"), 30).Should(Succeed()) query := fmt.Sprintf("SELECT true FROM pg_roles WHERE rolname='%s' and rolcanlogin=%v and rolsuper=%v "+ "and rolcreatedb=%v and rolcreaterole=%v and rolinherit=%v and rolreplication=%v "+ - "and rolbypassrls=%v and rolconnlimit=%v", username, rolCanLoginInSpec, rolSuperInSpec, rolCreateDBInSpec, + "and rolbypassrls=%v and rolconnlimit=%v", username, rolCanLoginInSpec, rolSuperInSpec, + rolCreateDBInSpec, rolCreateRoleInSpec, rolInheritInSpec, rolReplicationInSpec, rolByPassRLSInSpec, rolConnLimitInSpec) - query2 := fmt.Sprintf("SELECT rolvaliduntil is NULL FROM pg_roles WHERE rolname='%s'", userWithPerpetualPass) + query2 := fmt.Sprintf("SELECT rolvaliduntil is NULL FROM pg_roles WHERE rolname='%s'", + userWithPerpetualPass) for _, q := range []string{query, query2} { - stdout, _, err := env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: primaryPod.Namespace, PodName: primaryPod.Name, }, - testsUtils.PostgresDBName, + postgres.PostgresDBName, q) Expect(err).ToNot(HaveOccurred()) Expect(stdout).To(Equal("t\n")) @@ -168,19 +178,16 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic }) By("Verifying connectivity of new managed role", func() { - rwService := testsUtils.GetReadWriteServiceName(clusterName) + rwService := services.GetReadWriteServiceName(clusterName) // assert connectable use username and password defined in secrets - AssertConnection(namespace, rwService, testsUtils.PostgresDBName, - username, password, env) - AssertConnection(namespace, rwService, testsUtils.PostgresDBName, - userWithHashedPassword, userWithHashedPassword, env) + AssertConnection(namespace, rwService, postgres.PostgresDBName, username, password, env) }) By("ensuring the app role has been granted createdb in the managed stanza", func() { - primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) + primaryPodInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - Eventually(QueryMatchExpectationPredicate(primaryPodInfo, testsUtils.PostgresDBName, + Eventually(QueryMatchExpectationPredicate(primaryPodInfo, postgres.PostgresDBName, roleExistsQuery(appUsername), "t"), 30).Should(Succeed()) query := fmt.Sprintf("SELECT rolcreatedb and rolvaliduntil='infinity' "+ @@ -189,25 +196,25 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic }) By("verifying connectivity of app user", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).NotTo(HaveOccurred()) appUserSecret := corev1.Secret{} - err = testsUtils.GetObject( - env, + err = objects.Get( + env.Ctx, env.Client, types.NamespacedName{Name: cluster.GetApplicationSecretName(), Namespace: namespace}, &appUserSecret, ) Expect(err).NotTo(HaveOccurred()) pass := string(appUserSecret.Data["password"]) - rwService := testsUtils.GetReadWriteServiceName(clusterName) + rwService := services.GetReadWriteServiceName(clusterName) // assert connectable use username and password defined in secrets - AssertConnection(namespace, rwService, testsUtils.PostgresDBName, appUsername, pass, env) + AssertConnection(namespace, rwService, postgres.PostgresDBName, appUsername, pass, env) }) By("Verify show unrealizable role configurations in the status", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Eventually(func() int { @@ -227,10 +234,10 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic expectedCreateDB := false expectedCreateRole := true expectedConnLmt := int64(10) - rwService := testsUtils.GetReadWriteServiceName(clusterName) + rwService := services.GetReadWriteServiceName(clusterName) By("updating role attribute in spec", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) updated := cluster.DeepCopy() @@ -250,8 +257,10 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic }) By("the connection should fail since we disabled the login", func() { - forwardConn, conn, err := testsUtils.ForwardPSQLServiceConnection(env, namespace, rwService, - testsUtils.PostgresDBName, username, password) + forwardConn, conn, err := postgres.ForwardPSQLServiceConnection( + env.Ctx, env.Interface, env.RestClientConfig, + namespace, rwService, postgres.PostgresDBName, username, password, + ) defer func() { _ = conn.Close() forwardConn.Close() @@ -264,7 +273,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic }) By("enable Login again", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) updated := cluster.DeepCopy() updated.Spec.Managed.Roles[0].Login = true @@ -281,9 +290,9 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic }) By("the connectivity should be success again", func() { - rwService := testsUtils.GetReadWriteServiceName(clusterName) + rwService := services.GetReadWriteServiceName(clusterName) // assert connectable use username and password defined in secrets - AssertConnection(namespace, rwService, testsUtils.PostgresDBName, username, password, env) + AssertConnection(namespace, rwService, postgres.PostgresDBName, username, password, env) }) }) @@ -299,7 +308,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic defaultRolConnLimit = int64(-1) ) By("Add role new_role with all attribute omit", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) updated := cluster.DeepCopy() @@ -325,7 +334,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic It("Can update role comment and verify changes in db ", func() { By("Update comment for role new_role", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) updated := cluster.DeepCopy() @@ -357,11 +366,11 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic }) It("Can update role membership and verify changes in db ", func() { - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) By("Remove invalid parent role from unrealizableUser and verify user in database", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) updated := cluster.DeepCopy() @@ -373,16 +382,16 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic err = env.Client.Patch(env.Ctx, updated, client.MergeFrom(cluster)) Expect(err).ToNot(HaveOccurred()) Eventually(func() int { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) return len(cluster.Status.ManagedRolesStatus.CannotReconcile) }, 30).Should(Equal(0)) - Eventually(QueryMatchExpectationPredicate(primaryPod, testsUtils.PostgresDBName, + Eventually(QueryMatchExpectationPredicate(primaryPod, postgres.PostgresDBName, roleExistsQuery(unrealizableUser), "t"), 30).Should(Succeed()) }) By("Add role in InRole for role new_role and verify in database", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) updated := cluster.DeepCopy() @@ -397,7 +406,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic err = env.Client.Patch(env.Ctx, updated, client.MergeFrom(cluster)) Expect(err).ToNot(HaveOccurred()) Eventually(func() int { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) return len(cluster.Status.ManagedRolesStatus.CannotReconcile) }, 30).Should(Equal(0)) @@ -405,7 +414,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic }) By("Remove parent role from InRole for role new_role and verify in database", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) updated := cluster.DeepCopy() @@ -419,7 +428,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic err = env.Client.Patch(env.Ctx, updated, client.MergeFrom(cluster)) Expect(err).ToNot(HaveOccurred()) Eventually(func() int { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) return len(cluster.Status.ManagedRolesStatus.CannotReconcile) }, 30).Should(Equal(0)) @@ -427,7 +436,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic }) By("Mock the error for unrealizable User and verify user in database", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) updated := cluster.DeepCopy() @@ -439,20 +448,20 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic err = env.Client.Patch(env.Ctx, updated, client.MergeFrom(cluster)) Expect(err).ToNot(HaveOccurred()) // user not changed - Eventually(QueryMatchExpectationPredicate(primaryPod, testsUtils.PostgresDBName, + Eventually(QueryMatchExpectationPredicate(primaryPod, postgres.PostgresDBName, roleExistsQuery(unrealizableUser), "t"), 30).Should(Succeed()) Eventually(func() int { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) return len(cluster.Status.ManagedRolesStatus.CannotReconcile) }, 30).Should(Equal(1)) Eventually(func() int { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) return len(cluster.Status.ManagedRolesStatus.CannotReconcile[unrealizableUser]) }, 30).Should(Equal(1)) Eventually(func() string { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) return cluster.Status.ManagedRolesStatus.CannotReconcile[unrealizableUser][0] }, 30).Should(ContainSubstring(fmt.Sprintf("role \"%s\" is a member of role \"%s\"", @@ -464,7 +473,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic var err error newPassword := "ThisIsNew" - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) By("update password from secrets", func() { @@ -473,35 +482,36 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic }) By("Verify connectivity using changed password in secret", func() { - rwService := testsUtils.GetReadWriteServiceName(clusterName) + rwService := services.GetReadWriteServiceName(clusterName) // assert connectable use username and password defined in secrets - AssertConnection(namespace, rwService, testsUtils.PostgresDBName, username, newPassword, env) + AssertConnection(namespace, rwService, postgres.PostgresDBName, username, newPassword, env) }) By("Update password in database", func() { query := fmt.Sprintf("ALTER ROLE %s WITH PASSWORD %s", username, pq.QuoteLiteral(newPassword)) - _, _, err = env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + _, _, err = exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: primaryPod.Name, }, - testsUtils.PostgresDBName, + postgres.PostgresDBName, query) Expect(err).ToNot(HaveOccurred()) }) By("Verify password in secrets is still valid", func() { - rwService := testsUtils.GetReadWriteServiceName(clusterName) - AssertConnection(namespace, rwService, testsUtils.PostgresDBName, username, newPassword, env) + rwService := services.GetReadWriteServiceName(clusterName) + AssertConnection(namespace, rwService, postgres.PostgresDBName, username, newPassword, env) }) }) It("Can update role password validUntil and verify in the database", func() { newValidUntilString := "2023-04-04T00:00:00.000000Z" By("Update comment for role new_role", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) updated := cluster.DeepCopy() for i, r := range updated.Spec.Managed.Roles { @@ -537,7 +547,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic It("Can drop role with ensure absent option", func() { By("Delete role new_role with EnsureOption ", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) updated := cluster.DeepCopy() @@ -551,9 +561,9 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic }) By("Verify new_role not existed in db", func() { - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - Eventually(QueryMatchExpectationPredicate(primaryPod, testsUtils.PostgresDBName, + Eventually(QueryMatchExpectationPredicate(primaryPod, postgres.PostgresDBName, roleExistsQuery(newUserName), "f"), 30).Should(Succeed()) }) }) diff --git a/tests/e2e/managed_services_test.go b/tests/e2e/managed_services_test.go index ee139aa744..348d051472 100644 --- a/tests/e2e/managed_services_test.go +++ b/tests/e2e/managed_services_test.go @@ -27,7 +27,9 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -53,14 +55,14 @@ var _ = Describe("Managed services tests", Label(tests.LabelSmoke, tests.LabelBa It("should create and delete a rw managed service", func(ctx SpecContext) { const clusterManifest = fixturesDir + "/managed_services/cluster-managed-services-rw.yaml.template" const serviceName = "test-rw" - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - clusterName, err := env.GetResourceNameFromYAML(clusterManifest) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterManifest) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, clusterManifest, env) - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) By("ensuring the service is created", func() { @@ -78,32 +80,32 @@ var _ = Describe("Managed services tests", Label(tests.LabelSmoke, tests.LabelBa By("ensuring the service is deleted when removed from the additional field", func() { Eventually(func(g Gomega) error { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) cluster.Spec.Managed.Services.Additional = []apiv1.ManagedService{} return env.Client.Update(ctx, cluster) }, RetryTimeout, PollingTime).Should(Succeed()) - AssertClusterIsReady(namespace, clusterName, testTimeouts[utils.ManagedServices], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ManagedServices], env) Eventually(func(g Gomega) { var serviceRW corev1.Service err = env.Client.Get(ctx, types.NamespacedName{Name: serviceName, Namespace: namespace}, &serviceRW) g.Expect(apierrs.IsNotFound(err)).To(BeTrue()) - }, testTimeouts[utils.ManagedServices]).Should(Succeed()) + }, testTimeouts[timeouts.ManagedServices]).Should(Succeed()) }) }) It("should properly handle disabledDefaultServices field", func(ctx SpecContext) { const clusterManifest = fixturesDir + "/managed_services/cluster-managed-services-no-default.yaml.template" - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - clusterName, err := env.GetResourceNameFromYAML(clusterManifest) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterManifest) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, clusterManifest, env) - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) ro := specs.CreateClusterReadOnlyService(*cluster) @@ -124,45 +126,45 @@ var _ = Describe("Managed services tests", Label(tests.LabelSmoke, tests.LabelBa By("creating them when they are re-enabled", func() { Eventually(func(g Gomega) error { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) cluster.Spec.Managed.Services.DisabledDefaultServices = []apiv1.ServiceSelectorType{} return env.Client.Update(ctx, cluster) }, RetryTimeout, PollingTime).Should(Succeed()) - AssertClusterIsReady(namespace, clusterName, testTimeouts[utils.ManagedServices], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ManagedServices], env) Eventually(func(g Gomega) { var service corev1.Service err = env.Client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: rw.Name}, &service) g.Expect(err).ToNot(HaveOccurred()) - }, testTimeouts[utils.ManagedServices]).Should(Succeed()) + }, testTimeouts[timeouts.ManagedServices]).Should(Succeed()) Eventually(func(g Gomega) { var service corev1.Service err = env.Client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: ro.Name}, &service) g.Expect(err).ToNot(HaveOccurred()) - }, testTimeouts[utils.ManagedServices]).Should(Succeed()) + }, testTimeouts[timeouts.ManagedServices]).Should(Succeed()) Eventually(func(g Gomega) { var service corev1.Service err = env.Client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: r.Name}, &service) g.Expect(err).ToNot(HaveOccurred()) - }, testTimeouts[utils.ManagedServices]).Should(Succeed()) + }, testTimeouts[timeouts.ManagedServices]).Should(Succeed()) }) }) It("should properly handle replace update strategy", func(ctx SpecContext) { const clusterManifest = fixturesDir + "/managed_services/cluster-managed-services-replace-strategy.yaml.template" const serviceName = "test-rw" - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - clusterName, err := env.GetResourceNameFromYAML(clusterManifest) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterManifest) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, clusterManifest, env) - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) var creationTimestamp metav1.Time @@ -185,7 +187,7 @@ var _ = Describe("Managed services tests", Label(tests.LabelSmoke, tests.LabelBa By("updating the service definition", func() { Eventually(func(g Gomega) error { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) cluster.Spec.Managed.Services.Additional[0].ServiceTemplate.ObjectMeta.Labels["new-label"] = "new" return env.Client.Update(ctx, cluster) @@ -200,7 +202,7 @@ var _ = Describe("Managed services tests", Label(tests.LabelSmoke, tests.LabelBa g.Expect(service.Labels["new-label"]).To(Equal("new")) g.Expect(service.UID).ToNot(Equal(uid)) g.Expect(service.CreationTimestamp).ToNot(Equal(creationTimestamp)) - }, testTimeouts[utils.ManagedServices]).Should(Succeed()) + }, testTimeouts[timeouts.ManagedServices]).Should(Succeed()) }) }) }) diff --git a/tests/e2e/metrics_test.go b/tests/e2e/metrics_test.go index e057220f35..430c89fa17 100644 --- a/tests/e2e/metrics_test.go +++ b/tests/e2e/metrics_test.go @@ -28,7 +28,10 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/proxy" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -92,29 +95,30 @@ var _ = Describe("Metrics", Label(tests.LabelObservability), func() { AssertGatherMetrics := func(namespacePrefix, clusterFile string) { // Create the cluster namespace - namespace, err := env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err := env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCustomMetricsResourcesExist(namespace, customQueriesSampleFile, 2, 1) - metricsClusterName, err := env.GetResourceNameFromYAML(clusterFile) + metricsClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterFile) Expect(err).ToNot(HaveOccurred()) // Create the cluster AssertCreateCluster(namespace, metricsClusterName, clusterFile, env) - cluster, err := env.GetCluster(namespace, metricsClusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, metricsClusterName) Expect(err).NotTo(HaveOccurred()) // Check metrics on each pod By("ensuring metrics are correct on each pod", func() { - podList, err := env.GetClusterPodList(namespace, metricsClusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, metricsClusterName) Expect(err).ToNot(HaveOccurred()) // Gather metrics in each pod for _, pod := range podList.Items { By(fmt.Sprintf("checking metrics for pod: %s", pod.Name), func() { - out, err := utils.RetrieveMetricsFromInstance(env, pod, cluster.IsMetricsTLSEnabled()) + out, err := proxy.RetrieveMetricsFromInstance(env.Ctx, env.Interface, pod, + cluster.IsMetricsTLSEnabled()) Expect(err).ToNot(HaveOccurred(), "while getting pod metrics") expectedMetrics := buildExpectedMetrics(cluster, !specs.IsPodPrimary(pod)) assertIncludesMetrics(out, expectedMetrics) @@ -140,10 +144,10 @@ var _ = Describe("Metrics", Label(tests.LabelObservability), func() { It("can gather metrics with multiple target databases", func() { const namespacePrefix = "metrics-target-databases-e2e" - metricsClusterName, err := env.GetResourceNameFromYAML(clusterMetricsDBFile) + metricsClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterMetricsDBFile) Expect(err).ToNot(HaveOccurred()) // Create the cluster namespace - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCustomMetricsResourcesExist(namespace, customQueriesTargetDBSampleFile, 1, 1) @@ -153,7 +157,7 @@ var _ = Describe("Metrics", Label(tests.LabelObservability), func() { AssertCreationOfTestDataForTargetDB(env, namespace, metricsClusterName, targetDBTwo, testTableName) AssertCreationOfTestDataForTargetDB(env, namespace, metricsClusterName, targetDBSecret, testTableName) - cluster, err := env.GetCluster(namespace, metricsClusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, metricsClusterName) Expect(err).ToNot(HaveOccurred()) AssertMetricsData(namespace, targetDBOne, targetDBTwo, targetDBSecret, cluster) @@ -162,10 +166,10 @@ var _ = Describe("Metrics", Label(tests.LabelObservability), func() { It("can gather default metrics details", func() { const clusterWithDefaultMetricsFile = fixturesDir + "/base/cluster-storage-class.yaml.template" const namespacePrefix = "default-metrics-details" - metricsClusterName, err := env.GetResourceNameFromYAML(clusterWithDefaultMetricsFile) + metricsClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterWithDefaultMetricsFile) Expect(err).ToNot(HaveOccurred()) - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, metricsClusterName, clusterWithDefaultMetricsFile, env) @@ -181,30 +185,32 @@ var _ = Describe("Metrics", Label(tests.LabelObservability), func() { return err }, 10).ShouldNot(HaveOccurred()) }) - cluster, err := env.GetCluster(namespace, metricsClusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, metricsClusterName) Expect(err).ToNot(HaveOccurred()) - collectAndAssertDefaultMetricsPresentOnEachPod(namespace, metricsClusterName, cluster.IsMetricsTLSEnabled(), true) + collectAndAssertDefaultMetricsPresentOnEachPod(namespace, metricsClusterName, cluster.IsMetricsTLSEnabled(), + true) }) It("can gather metrics depending on the predicate query", func() { // Create the cluster namespace const namespacePrefix = "predicate-query-metrics-e2e" - metricsClusterName, err := env.GetResourceNameFromYAML(clusterMetricsPredicateQueryFile) + metricsClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterMetricsPredicateQueryFile) Expect(err).ToNot(HaveOccurred()) - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - AssertCustomMetricsResourcesExist(namespace, fixturesDir+"/metrics/custom-queries-with-predicate-query.yaml", 1, 0) + AssertCustomMetricsResourcesExist(namespace, fixturesDir+"/metrics/custom-queries-with-predicate-query.yaml", 1, + 0) // Create the cluster AssertCreateCluster(namespace, metricsClusterName, clusterMetricsPredicateQueryFile, env) By("ensuring only metrics with a positive predicate are collected", func() { - podList, err := env.GetClusterPodList(namespace, metricsClusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, metricsClusterName) Expect(err).ToNot(HaveOccurred()) - cluster, err := env.GetCluster(namespace, metricsClusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, metricsClusterName) Expect(err).ToNot(HaveOccurred()) // We expect only the metrics that have a predicate_query valid. @@ -223,7 +229,8 @@ var _ = Describe("Metrics", Label(tests.LabelObservability), func() { // Gather metrics in each pod for _, pod := range podList.Items { By(fmt.Sprintf("checking metrics for pod: %s", pod.Name), func() { - out, err := utils.RetrieveMetricsFromInstance(env, pod, cluster.IsMetricsTLSEnabled()) + out, err := proxy.RetrieveMetricsFromInstance(env.Ctx, env.Interface, pod, + cluster.IsMetricsTLSEnabled()) Expect(err).ToNot(HaveOccurred(), "while getting pod metrics") assertIncludesMetrics(out, expectedMetrics) assertExcludesMetrics(out, nonCollectableMetrics) @@ -237,18 +244,19 @@ var _ = Describe("Metrics", Label(tests.LabelObservability), func() { const defaultMonitoringQueriesDisableSampleFile = fixturesDir + "/metrics/cluster-disable-default-metrics.yaml.template" const namespacePrefix = "disable-default-metrics" - metricsClusterName, err := env.GetResourceNameFromYAML(defaultMonitoringQueriesDisableSampleFile) + metricsClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, defaultMonitoringQueriesDisableSampleFile) Expect(err).ToNot(HaveOccurred()) - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) // Create the cluster AssertCreateCluster(namespace, metricsClusterName, defaultMonitoringQueriesDisableSampleFile, env) - cluster, err := env.GetCluster(namespace, metricsClusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, metricsClusterName) Expect(err).ToNot(HaveOccurred()) - collectAndAssertDefaultMetricsPresentOnEachPod(namespace, metricsClusterName, cluster.IsMetricsTLSEnabled(), false) + collectAndAssertDefaultMetricsPresentOnEachPod(namespace, metricsClusterName, cluster.IsMetricsTLSEnabled(), + false) }) It("execute custom queries against the application database on replica clusters", func() { @@ -263,15 +271,15 @@ var _ = Describe("Metrics", Label(tests.LabelObservability), func() { ) // Fetching the source cluster name - srcClusterName, err := env.GetResourceNameFromYAML(srcClusterSampleFile) + srcClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, srcClusterSampleFile) Expect(err).ToNot(HaveOccurred()) // Fetching replica cluster name - replicaClusterName, err := env.GetResourceNameFromYAML(replicaClusterSampleFile) + replicaClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, replicaClusterSampleFile) Expect(err).ToNot(HaveOccurred()) // create namespace - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) // Creating and verifying custom queries configmap @@ -290,8 +298,11 @@ var _ = Describe("Metrics", Label(tests.LabelObservability), func() { ) By(fmt.Sprintf("grant select permission for %v table to pg_monitor", testTableName), func() { - forward, conn, err := utils.ForwardPSQLConnection( - env, + forward, conn, err := postgres.ForwardPSQLConnection( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, namespace, srcClusterName, srcClusterDatabaseName, @@ -307,17 +318,18 @@ var _ = Describe("Metrics", Label(tests.LabelObservability), func() { _, err = conn.Exec(cmd) Expect(err).ToNot(HaveOccurred()) }) - replicaCluster, err := env.GetCluster(namespace, replicaClusterName) + replicaCluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, replicaClusterName) Expect(err).ToNot(HaveOccurred()) By("collecting metrics on each pod and checking that the table has been found", func() { - podList, err := env.GetClusterPodList(namespace, replicaClusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, replicaClusterName) Expect(err).ToNot(HaveOccurred()) // Gather metrics in each pod expectedMetric := fmt.Sprintf("cnpg_%v_row_count 3", testTableName) for _, pod := range podList.Items { - out, err := utils.RetrieveMetricsFromInstance(env, pod, replicaCluster.IsMetricsTLSEnabled()) + out, err := proxy.RetrieveMetricsFromInstance(env.Ctx, env.Interface, pod, + replicaCluster.IsMetricsTLSEnabled()) Expect(err).Should(Not(HaveOccurred())) Expect(strings.Split(out, "\n")).Should(ContainElement(expectedMetric)) } diff --git a/tests/e2e/monitoring_test.go b/tests/e2e/monitoring_test.go index 6fabfaf38d..c1ef082a10 100644 --- a/tests/e2e/monitoring_test.go +++ b/tests/e2e/monitoring_test.go @@ -17,8 +17,15 @@ limitations under the License. package e2e import ( + "context" + + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + "k8s.io/apimachinery/pkg/types" + k8client "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -26,6 +33,24 @@ import ( // Set of tests that set up a cluster with monitoring support enabled var _ = Describe("PodMonitor support", Serial, Label(tests.LabelObservability), func() { + getPodMonitorFunc := func( + ctx context.Context, + crudClient k8client.Client, + namespace, name string, + ) (*monitoringv1.PodMonitor, error) { + podMonitor := &monitoringv1.PodMonitor{} + namespacedName := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + + err := objects.Get(ctx, crudClient, namespacedName, podMonitor) + if err != nil { + return nil, err + } + return podMonitor, nil + } + const ( namespacePrefix = "cluster-monitoring-e2e" level = tests.Medium @@ -53,13 +78,13 @@ var _ = Describe("PodMonitor support", Serial, Label(tests.LabelObservability), }) It("sets up a cluster enabling PodMonitor feature", func() { - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterDefaultName, clusterDefaultMonitoringFile, env) By("verifying PodMonitor existence", func() { - podMonitor, err := env.GetPodMonitor(namespace, clusterDefaultName) + podMonitor, err := getPodMonitorFunc(env.Ctx, env.Client, namespace, clusterDefaultName) Expect(err).ToNot(HaveOccurred()) endpoints := podMonitor.Spec.PodMetricsEndpoints diff --git a/tests/e2e/nodeselector_test.go b/tests/e2e/nodeselector_test.go index 761b3ecfe3..a93945838c 100644 --- a/tests/e2e/nodeselector_test.go +++ b/tests/e2e/nodeselector_test.go @@ -24,7 +24,9 @@ import ( "k8s.io/apimachinery/pkg/types" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/nodes" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -48,7 +50,7 @@ var _ = Describe("nodeSelector", Label(tests.LabelPodScheduling), func() { // We create a namespace and verify it exists By(fmt.Sprintf("having a %v namespace", namespace), func() { var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) // Creating a namespace should be quick @@ -76,7 +78,7 @@ var _ = Describe("nodeSelector", Label(tests.LabelPodScheduling), func() { timeout := 120 Eventually(func() bool { isPending := false - podList, err := env.GetPodList(namespace) + podList, err := pods.List(env.Ctx, env.Client, namespace) Expect(err).ToNot(HaveOccurred()) if len(podList.Items) > 0 { if len(podList.Items[0].Status.Conditions) > 0 { @@ -107,13 +109,13 @@ var _ = Describe("nodeSelector", Label(tests.LabelPodScheduling), func() { var nodeName string var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) // We label one node with the label we have defined in the cluster // YAML definition By("labelling a node", func() { - nodeList, err := env.GetNodeList() + nodeList, err := nodes.List(env.Ctx, env.Client) Expect(err).ToNot(HaveOccurred()) // We want to label a node that is uncordoned and untainted, @@ -126,14 +128,14 @@ var _ = Describe("nodeSelector", Label(tests.LabelPodScheduling), func() { } } cmd := fmt.Sprintf("kubectl label node %v nodeselectortest=exists --overwrite", nodeName) - _, _, err = utils.Run(cmd) + _, _, err = run.Run(cmd) Expect(err).ToNot(HaveOccurred()) }) // All the pods should be running on the labeled node By("confirm pods run on the labelled node", func() { AssertCreateCluster(namespace, clusterName, sampleFile, env) - podList, err := env.GetPodList(namespace) + podList, err := pods.List(env.Ctx, env.Client, namespace) Expect(err).ToNot(HaveOccurred()) for _, podDetails := range podList.Items { if podDetails.Status.Phase == "Running" { diff --git a/tests/e2e/openshift_upgrade_test.go b/tests/e2e/openshift_upgrade_test.go index 3305128b8f..0ba60354ab 100644 --- a/tests/e2e/openshift_upgrade_test.go +++ b/tests/e2e/openshift_upgrade_test.go @@ -16,10 +16,17 @@ limitations under the License. package e2e import ( + "fmt" + "regexp" + "strings" + "github.com/blang/semver" "github.com/cloudnative-pg/cloudnative-pg/tests" - testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/openshift" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/operator" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -53,21 +60,21 @@ var _ = Describe("Upgrade Paths on OpenShift", Label(tests.LabelUpgrade), Ordere ocp412, err = semver.Make("4.12.0") Expect(err).ToNot(HaveOccurred()) // Get current OpenShift Versions - ocpVersion, err = testsUtils.GetOpenshiftVersion(env) + ocpVersion, err = openshift.GetOpenshiftVersion(env.Ctx, env.RestClientConfig) Expect(err).ToNot(HaveOccurred()) }) cleanupOperator := func() error { // Cleanup the Operator - err = testsUtils.DeleteOperatorCRDs(env) + err = openshift.DeleteOperatorCRDs(env.Ctx, env.Client) if err != nil { return err } - err = testsUtils.DeleteSubscription(env) + err = openshift.DeleteSubscription(env.Ctx, env.Client) if err != nil { return err } - err = testsUtils.DeleteCSV(env) + err = openshift.DeleteCSV(env.Ctx, env.Client) if err != nil { return err } @@ -78,7 +85,7 @@ var _ = Describe("Upgrade Paths on OpenShift", Label(tests.LabelUpgrade), Ordere err := cleanupOperator() Expect(err).ToNot(HaveOccurred()) Eventually(func() error { - _, err = env.GetOperatorPod() + _, err = operator.GetPod(env.Ctx, env.Client) return err }, 120).Should(HaveOccurred()) } @@ -86,21 +93,21 @@ var _ = Describe("Upgrade Paths on OpenShift", Label(tests.LabelUpgrade), Ordere assertClusterIsAligned := func(namespace, clusterName string) { By("Verifying the cluster pods have been upgraded", func() { Eventually(func() bool { - return testsUtils.HasOperatorBeenUpgraded(env) + return operator.HasBeenUpgraded(env.Ctx, env.Client) }).Should(BeTrue()) - operatorPodName, err := testsUtils.GetOperatorPodName(env) + operatorPodName, err := operator.GetPodName(env.Ctx, env.Client) Expect(err).ToNot(HaveOccurred()) - expectedVersion, err := testsUtils.GetOperatorVersion("openshift-operators", operatorPodName) + expectedVersion, err := operator.Version("openshift-operators", operatorPodName) Expect(err).ToNot(HaveOccurred()) - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) for _, pod := range podList.Items { Eventually(func() (string, error) { - return testsUtils.GetManagerVersion(namespace, pod.Name) + return GetManagerVersion(namespace, pod.Name) }, 300).Should(BeEquivalentTo(expectedVersion)) } }) @@ -110,13 +117,13 @@ var _ = Describe("Upgrade Paths on OpenShift", Label(tests.LabelUpgrade), Ordere // Apply a subscription in the openshift-operators namespace. // This should create the operator By("Applying the initial subscription", func() { - err := testsUtils.CreateSubscription(env, initialSubscription) + err := openshift.CreateSubscription(env.Ctx, env.Client, initialSubscription) Expect(err).ToNot(HaveOccurred()) - AssertOperatorIsReady() + AssertOperatorIsReady(env.Ctx, env.Client, env.Interface) }) // Gather the version and semantic Versions of the operator - currentVersion, err := testsUtils.GetSubscriptionVersion(env) + currentVersion, err := openshift.GetSubscriptionVersion(env.Ctx, env.Client) Expect(err).ToNot(HaveOccurred()) currentSemVersion, err := semver.Make(currentVersion) Expect(err).ToNot(HaveOccurred()) @@ -124,27 +131,27 @@ var _ = Describe("Upgrade Paths on OpenShift", Label(tests.LabelUpgrade), Ordere Expect(err).ToNot(HaveOccurred()) // Create a Cluster in a namespace we'll delete at the end - namespace, err := env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err := env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) By("Patching the status condition if required", func() { // Patch the status conditions if we are running on a pre new-policy release if currentSemVersion.LT(newPolicyRelease) { - err = testsUtils.PatchStatusCondition(namespace, clusterName, env) + err = openshift.PatchStatusCondition(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) } }) By("Applying the upgrade subscription", func() { // Apply the new subscription to upgrade to a new version of the operator - err = testsUtils.UpgradeSubscription(env, upgradeSubscription) + err = openshift.UpgradeSubscription(env.Ctx, env.Client, upgradeSubscription) Expect(err).ToNot(HaveOccurred()) Eventually(func() (string, error) { - return testsUtils.GetSubscriptionVersion(env) + return openshift.GetSubscriptionVersion(env.Ctx, env.Client) }, 300). ShouldNot(BeEquivalentTo(currentVersion)) - AssertOperatorIsReady() + AssertOperatorIsReady(env.Ctx, env.Client, env.Interface) }) // Check if the upgrade was successful by making sure all the pods @@ -160,3 +167,18 @@ var _ = Describe("Upgrade Paths on OpenShift", Label(tests.LabelUpgrade), Ordere applyUpgrade("stable-v1", "alpha") }) }) + +// GetManagerVersion returns the current manager version of a given pod +func GetManagerVersion(namespace, podName string) (string, error) { + out, _, err := run.Unchecked(fmt.Sprintf( + "kubectl -n %v exec %v -c postgres -- /controller/manager version", + namespace, + podName, + )) + if err != nil { + return "", err + } + versionRegexp := regexp.MustCompile(`^Build: {Version:(\d+.*) Commit.*}$`) + ver := versionRegexp.FindStringSubmatch(strings.TrimSpace(out))[1] + return ver, nil +} diff --git a/tests/e2e/operator_deployment_test.go b/tests/e2e/operator_deployment_test.go index 9b121ccfbc..d451723b8c 100644 --- a/tests/e2e/operator_deployment_test.go +++ b/tests/e2e/operator_deployment_test.go @@ -18,6 +18,7 @@ package e2e import ( "github.com/cloudnative-pg/cloudnative-pg/tests" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/operator" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -34,10 +35,10 @@ var _ = Describe("PostgreSQL operator deployment", Label(tests.LabelBasic, tests It("sets up the operator", func() { By("having a pod for the operator in state ready", func() { - AssertOperatorIsReady() + AssertOperatorIsReady(env.Ctx, env.Client, env.Interface) }) By("having a deployment for the operator in state ready", func() { - ready, err := env.IsOperatorDeploymentReady() + ready, err := operator.IsDeploymentReady(env.Ctx, env.Client) Expect(err).ToNot(HaveOccurred()) Expect(ready).To(BeTrue()) }) diff --git a/tests/e2e/operator_ha_test.go b/tests/e2e/operator_ha_test.go index 878f93df9d..94a65fe81c 100644 --- a/tests/e2e/operator_ha_test.go +++ b/tests/e2e/operator_ha_test.go @@ -21,7 +21,9 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" - testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/operator" + podutils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -49,11 +51,11 @@ var _ = Describe("Operator High Availability", Serial, It("can work as HA mode", func() { // Make sure there's at least one pod of the operator - err := env.ScaleOperatorDeployment(1) + err := operator.ScaleOperatorDeployment(env.Ctx, env.Client, 1) Expect(err).ToNot(HaveOccurred()) // Get Operator Pod name - operatorPodName, err := env.GetOperatorPod() + operatorPodName, err := operator.GetPod(env.Ctx, env.Client) Expect(err).ToNot(HaveOccurred()) By("having an operator already running", func() { @@ -64,11 +66,11 @@ var _ = Describe("Operator High Availability", Serial, }) // Get operator namespace - operatorNamespace, err := env.GetOperatorNamespaceName() + operatorNamespace, err := operator.NamespaceName(env.Ctx, env.Client) Expect(err).ToNot(HaveOccurred()) // Create the cluster namespace - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) // Create Cluster @@ -76,18 +78,20 @@ var _ = Describe("Operator High Availability", Serial, By("verifying current leader", func() { // Check for the current Operator Pod leader from ConfigMap - Expect(testsUtils.GetLeaderInfoFromLease(operatorNamespace, env)).To(HavePrefix(operatorPodName.GetName())) + Expect(operator.GetLeaderInfoFromLease( + env.Ctx, env.Interface, + operatorNamespace)).To(HavePrefix(operatorPodName.GetName())) }) By("scale up operator replicas to 3", func() { // Set old leader pod name to operator pod name oldLeaderPodName = operatorPodName.GetName() - err := env.ScaleOperatorDeployment(3) + err := operator.ScaleOperatorDeployment(env.Ctx, env.Client, 3) Expect(err).ToNot(HaveOccurred()) // Gather pod names from operator deployment - podList, err := env.GetPodList(operatorNamespace) + podList, err := podutils.List(env.Ctx, env.Client, operatorNamespace) Expect(err).ToNot(HaveOccurred()) for _, podItem := range podList.Items { operatorPodNames = append(operatorPodNames, podItem.GetName()) @@ -97,7 +101,9 @@ var _ = Describe("Operator High Availability", Serial, By("verifying leader information after scale up", func() { // Check for Operator Pod leader from ConfigMap to be the former one Eventually(func() (string, error) { - return testsUtils.GetLeaderInfoFromLease(operatorNamespace, env) + return operator.GetLeaderInfoFromLease( + env.Ctx, env.Interface, + operatorNamespace) }, 60).Should(HavePrefix(oldLeaderPodName)) }) @@ -106,12 +112,12 @@ var _ = Describe("Operator High Availability", Serial, quickDelete := &ctrlclient.DeleteOptions{ GracePeriodSeconds: &quickDeletionPeriod, } - err = env.DeletePod(operatorNamespace, oldLeaderPodName, quickDelete) + err = podutils.Delete(env.Ctx, env.Client, operatorNamespace, oldLeaderPodName, quickDelete) Expect(err).ToNot(HaveOccurred()) // Verify operator pod should have been deleted Eventually(func() []string { - podList, err := env.GetPodList(operatorNamespace) + podList, err := podutils.List(env.Ctx, env.Client, operatorNamespace) Expect(err).ToNot(HaveOccurred()) var podNames []string for _, podItem := range podList.Items { @@ -124,13 +130,15 @@ var _ = Describe("Operator High Availability", Serial, By("new leader should be configured", func() { // Verify that the leader name is different from the previous one Eventually(func() (string, error) { - return testsUtils.GetLeaderInfoFromLease(operatorNamespace, env) + return operator.GetLeaderInfoFromLease( + env.Ctx, env.Interface, + operatorNamespace) }, 120).ShouldNot(HavePrefix(oldLeaderPodName)) }) By("verifying reconciliation", func() { // Get current CNPG cluster's Primary - currentPrimary, err := env.GetClusterPrimary(namespace, clusterName) + currentPrimary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) oldPrimary := currentPrimary.GetName() @@ -138,7 +146,7 @@ var _ = Describe("Operator High Availability", Serial, quickDelete := &ctrlclient.DeleteOptions{ GracePeriodSeconds: &quickDeletionPeriod, } - err = env.DeletePod(namespace, currentPrimary.GetName(), quickDelete) + err = podutils.Delete(env.Ctx, env.Client, namespace, currentPrimary.GetName(), quickDelete) Expect(err).ToNot(HaveOccurred()) // Expect a new primary to be elected and promoted @@ -147,18 +155,20 @@ var _ = Describe("Operator High Availability", Serial, By("scale down operator replicas to 1", func() { // Scale down operator deployment to one replica - err := env.ScaleOperatorDeployment(1) + err := operator.ScaleOperatorDeployment(env.Ctx, env.Client, 1) Expect(err).ToNot(HaveOccurred()) }) By("verifying leader information after scale down", func() { // Get Operator Pod name - operatorPodName, err := env.GetOperatorPod() + operatorPodName, err := operator.GetPod(env.Ctx, env.Client) Expect(err).ToNot(HaveOccurred()) // Verify the Operator Pod is the leader Eventually(func() (string, error) { - return testsUtils.GetLeaderInfoFromLease(operatorNamespace, env) + return operator.GetLeaderInfoFromLease( + env.Ctx, env.Interface, + operatorNamespace) }, 120).Should(HavePrefix(operatorPodName.GetName())) }) }) diff --git a/tests/e2e/operator_unavailable_test.go b/tests/e2e/operator_unavailable_test.go index 5f23913135..4a38e29a46 100644 --- a/tests/e2e/operator_unavailable_test.go +++ b/tests/e2e/operator_unavailable_test.go @@ -26,14 +26,16 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" - testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/operator" + podutils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) // Set of tests in which we test the concurrent disruption of both the primary -// and the operator pods, asserting that the latter is able to perform a pending +// and the operator podutils, asserting that the latter is able to perform a pending // failover once a new operator pod comes back available. var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, tests.LabelOperator), func() { const ( @@ -54,7 +56,7 @@ var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, te It("can survive operator failures", func() { var err error // Create the cluster namespace - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) @@ -63,13 +65,13 @@ var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, te tableLocator := TableLocator{ Namespace: namespace, ClusterName: clusterName, - DatabaseName: testsUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: "test", } AssertCreateTestData(env, tableLocator) By("scaling down operator replicas to zero", func() { - err := env.ScaleOperatorDeployment(0) + err := operator.ScaleOperatorDeployment(env.Ctx, env.Client, 0) Expect(err).ToNot(HaveOccurred()) }) @@ -78,7 +80,7 @@ var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, te quickDelete := &ctrlclient.DeleteOptions{ GracePeriodSeconds: &quickDeletionPeriod, } - err = env.DeletePod(namespace, currentPrimary, quickDelete) + err = podutils.Delete(env.Ctx, env.Client, namespace, currentPrimary, quickDelete) Expect(err).ToNot(HaveOccurred()) // Expect only 2 instances to be up and running @@ -108,7 +110,7 @@ var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, te By("scaling up the operator replicas to 1", func() { // Scale up operator deployment to one replica - err := env.ScaleOperatorDeployment(1) + err := operator.ScaleOperatorDeployment(env.Ctx, env.Client, 1) Expect(err).ToNot(HaveOccurred()) }) @@ -138,7 +140,7 @@ var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, te var operatorPodName string var err error // Create the cluster namespace - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) @@ -147,12 +149,12 @@ var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, te tableLocator := TableLocator{ Namespace: namespace, ClusterName: clusterName, - DatabaseName: testsUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: "test", } AssertCreateTestData(env, tableLocator) - operatorNamespace, err := env.GetOperatorNamespaceName() + operatorNamespace, err := operator.NamespaceName(env.Ctx, env.Client) Expect(err).ToNot(HaveOccurred()) By("deleting primary and operator pod", func() { @@ -171,11 +173,11 @@ var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, te wg.Add(1) wg.Add(1) go func() { - _ = env.DeletePod(operatorNamespace, operatorPodName, quickDelete) + _ = podutils.Delete(env.Ctx, env.Client, operatorNamespace, operatorPodName, quickDelete) wg.Done() }() go func() { - _ = env.DeletePod(namespace, currentPrimary, quickDelete) + _ = podutils.Delete(env.Ctx, env.Client, namespace, currentPrimary, quickDelete) wg.Done() }() wg.Wait() @@ -202,7 +204,7 @@ var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, te g.Expect(podList.Items[0].Name).NotTo(BeEquivalentTo(operatorPodName)) }, timeout).Should(Succeed()) Eventually(func() (bool, error) { - return env.IsOperatorDeploymentReady() + return operator.IsDeploymentReady(env.Ctx, env.Client) }, timeout).Should(BeTrue()) }) diff --git a/tests/e2e/pg_basebackup_test.go b/tests/e2e/pg_basebackup_test.go index de4fdcd812..ea0b11806f 100644 --- a/tests/e2e/pg_basebackup_test.go +++ b/tests/e2e/pg_basebackup_test.go @@ -19,7 +19,9 @@ package e2e import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -46,16 +48,16 @@ var _ = Describe("Bootstrap with pg_basebackup", Label(tests.LabelRecovery), fun Context("can bootstrap via pg_basebackup", Ordered, func() { BeforeAll(func() { // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) // Create the source Cluster - srcClusterName, err = env.GetResourceNameFromYAML(srcCluster) + srcClusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, srcCluster) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, srcClusterName, srcCluster, env) tableLocator := TableLocator{ Namespace: namespace, ClusterName: srcClusterName, - DatabaseName: utils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: tableName, } AssertCreateTestData(env, tableLocator) @@ -63,17 +65,17 @@ var _ = Describe("Bootstrap with pg_basebackup", Label(tests.LabelRecovery), fun It("using basic authentication", func() { // Create the destination Cluster - dstClusterName, err := env.GetResourceNameFromYAML(dstClusterBasic) + dstClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, dstClusterBasic) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, dstClusterName, dstClusterBasic, env) // We give more time than the usual 600s, since the recovery is slower - AssertClusterIsReady(namespace, dstClusterName, testTimeouts[utils.ClusterIsReadySlow], env) + AssertClusterIsReady(namespace, dstClusterName, testTimeouts[timeouts.ClusterIsReadySlow], env) secretName := dstClusterName + apiv1.ApplicationUserSecretSuffix By("checking the dst cluster with auto generated app password connectable", func() { AssertApplicationDatabaseConnection(namespace, dstClusterName, - appUser, utils.AppDBName, "", secretName) + appUser, postgres.AppDBName, "", secretName) }) By("update user application password for dst cluster and verify connectivity", func() { @@ -83,7 +85,7 @@ var _ = Describe("Bootstrap with pg_basebackup", Label(tests.LabelRecovery), fun namespace, dstClusterName, appUser, - utils.AppDBName, + postgres.AppDBName, newPassword, secretName) }) @@ -92,18 +94,21 @@ var _ = Describe("Bootstrap with pg_basebackup", Label(tests.LabelRecovery), fun tableLocator := TableLocator{ Namespace: namespace, ClusterName: dstClusterName, - DatabaseName: utils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: tableName, } AssertDataExpectedCount(env, tableLocator, 2) }) By("writing some new data to the dst cluster", func() { - forward, conn, err := utils.ForwardPSQLConnection( - env, + forward, conn, err := postgres.ForwardPSQLConnection( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, namespace, dstClusterName, - utils.AppDBName, + postgres.AppDBName, apiv1.ApplicationUserSecretSuffix, ) defer func() { @@ -118,7 +123,7 @@ var _ = Describe("Bootstrap with pg_basebackup", Label(tests.LabelRecovery), fun tableLocator := TableLocator{ Namespace: namespace, ClusterName: srcClusterName, - DatabaseName: utils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: tableName, } AssertDataExpectedCount(env, tableLocator, 2) @@ -127,28 +132,31 @@ var _ = Describe("Bootstrap with pg_basebackup", Label(tests.LabelRecovery), fun It("using TLS authentication", func() { // Create the destination Cluster - dstClusterName, err := env.GetResourceNameFromYAML(dstClusterTLS) + dstClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, dstClusterTLS) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, dstClusterName, dstClusterTLS, env) // We give more time than the usual 600s, since the recovery is slower - AssertClusterIsReady(namespace, dstClusterName, testTimeouts[utils.ClusterIsReadySlow], env) + AssertClusterIsReady(namespace, dstClusterName, testTimeouts[timeouts.ClusterIsReadySlow], env) By("checking data have been copied correctly", func() { tableLocator := TableLocator{ Namespace: namespace, ClusterName: dstClusterName, - DatabaseName: utils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: tableName, } AssertDataExpectedCount(env, tableLocator, 2) }) By("writing some new data to the dst cluster", func() { - forward, conn, err := utils.ForwardPSQLConnection( - env, + forward, conn, err := postgres.ForwardPSQLConnection( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, namespace, dstClusterName, - utils.AppDBName, + postgres.AppDBName, apiv1.ApplicationUserSecretSuffix, ) defer func() { @@ -163,7 +171,7 @@ var _ = Describe("Bootstrap with pg_basebackup", Label(tests.LabelRecovery), fun tableLocator := TableLocator{ Namespace: namespace, ClusterName: srcClusterName, - DatabaseName: utils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: tableName, } AssertDataExpectedCount(env, tableLocator, 2) diff --git a/tests/e2e/pg_data_corruption_test.go b/tests/e2e/pg_data_corruption_test.go index c0672f4479..44acfafed8 100644 --- a/tests/e2e/pg_data_corruption_test.go +++ b/tests/e2e/pg_data_corruption_test.go @@ -27,7 +27,13 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" - testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + podutils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/storage" + testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -44,7 +50,7 @@ var _ = Describe("PGDATA Corruption", Label(tests.LabelRecovery), Ordered, func( Skip("Test depth is lower than the amount requested for this test") } var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) }) @@ -55,19 +61,19 @@ var _ = Describe("PGDATA Corruption", Label(tests.LabelRecovery), Ordered, func( var oldPrimaryPodName, oldPrimaryPVCName string var err error tableName := "test_pg_data_corruption" - clusterName, err := env.GetResourceNameFromYAML(sampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) tableLocator := TableLocator{ Namespace: namespace, ClusterName: clusterName, - DatabaseName: testsUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: tableName, } AssertCreateTestData(env, tableLocator) By("gathering current primary pod and pvc", func() { - oldPrimaryPod, err := env.GetClusterPrimary(namespace, clusterName) + oldPrimaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) oldPrimaryPodName = oldPrimaryPod.GetName() // Get the PVC related to the pod @@ -84,8 +90,9 @@ var _ = Describe("PGDATA Corruption", Label(tests.LabelRecovery), Ordered, func( By("corrupting primary pod by removing PGDATA", func() { cmd := fmt.Sprintf("find %v/base/* -type f -delete", specs.PgDataPath) - _, _, err = env.ExecCommandInInstancePod( - testsUtils.PodLocator{ + _, _, err = exec.CommandInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: oldPrimaryPodName, }, nil, @@ -95,7 +102,7 @@ var _ = Describe("PGDATA Corruption", Label(tests.LabelRecovery), Ordered, func( By("verifying failover happened after the primary pod PGDATA got corrupted", func() { Eventually(func() (string, error) { - newPrimaryPod, err := env.GetClusterPrimary(namespace, clusterName) + newPrimaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) if err != nil { return "", err } @@ -126,7 +133,10 @@ var _ = Describe("PGDATA Corruption", Label(tests.LabelRecovery), Ordered, func( By("removing the old primary pod and its pvc", func() { // Check if walStorage is enabled - walStorageEnabled, err := testsUtils.IsWalStorageEnabled(namespace, clusterName, env) + walStorageEnabled, err := storage.IsWalStorageEnabled( + env.Ctx, env.Client, + namespace, clusterName, + ) Expect(err).ToNot(HaveOccurred()) // Force delete setting @@ -160,7 +170,7 @@ var _ = Describe("PGDATA Corruption", Label(tests.LabelRecovery), Ordered, func( } // Deleting old primary pod - err = env.DeletePod(namespace, oldPrimaryPodName, quickDelete) + err = podutils.Delete(env.Ctx, env.Client, namespace, oldPrimaryPodName, quickDelete) Expect(err).ToNot(HaveOccurred()) // checking that the old primary pod is eventually gone diff --git a/tests/e2e/pg_wal_volume_test.go b/tests/e2e/pg_wal_volume_test.go index 612f936b59..75c4d52a59 100644 --- a/tests/e2e/pg_wal_volume_test.go +++ b/tests/e2e/pg_wal_volume_test.go @@ -30,7 +30,9 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/tests" - testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -46,7 +48,7 @@ var _ = Describe("Separate pg_wal volume", Label(tests.LabelStorage), func() { ) var namespace string verifyPgWal := func(namespace string) { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(len(podList.Items), err).To(BeEquivalentTo(3)) By("checking that pg_wal PVC has been created", func() { for _, pod := range podList.Items { @@ -77,8 +79,9 @@ var _ = Describe("Separate pg_wal volume", Label(tests.LabelStorage), func() { ".*[0-9]$") timeout := 300 Eventually(func() (int, error, error) { - out, _, err := env.ExecCommandInInstancePod( - testsUtils.PodLocator{ + out, _, err := exec.CommandInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: pod.GetName(), }, nil, @@ -93,7 +96,7 @@ var _ = Describe("Separate pg_wal volume", Label(tests.LabelStorage), func() { // Inline function to patch walStorage in existing cluster updateWalStorage := func(namespace, clusterName string) { err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).NotTo(HaveOccurred()) WalStorageClass := os.Getenv("E2E_DEFAULT_STORAGE_CLASS") cluster.Spec.WalStorage = &apiv1.StorageConfiguration{ @@ -118,7 +121,7 @@ var _ = Describe("Separate pg_wal volume", Label(tests.LabelStorage), func() { const namespacePrefix = "pg-wal-volume-e2e" var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFileWithPgWal, env) verifyPgWal(namespace) @@ -128,7 +131,7 @@ var _ = Describe("Separate pg_wal volume", Label(tests.LabelStorage), func() { const namespacePrefix = "add-pg-wal-volume-e2e" var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFileWithoutPgWal, env) By(fmt.Sprintf("adding pg_wal volume in existing cluster: %v", clusterName), func() { @@ -137,7 +140,7 @@ var _ = Describe("Separate pg_wal volume", Label(tests.LabelStorage), func() { AssertPVCCount(namespace, clusterName, expectedPvcCount, 120) AssertClusterEventuallyReachesPhase(namespace, clusterName, []string{apiv1.PhaseUpgrade, apiv1.PhaseWaitingForInstancesToBeActive}, 30) - AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReadyQuick], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReadyQuick], env) AssertClusterPhaseIsConsistent(namespace, clusterName, []string{apiv1.PhaseHealthy}, 30) verifyPgWal(namespace) }) diff --git a/tests/e2e/pgbouncer_metrics_test.go b/tests/e2e/pgbouncer_metrics_test.go index a33e6a7285..6fbf4d4fab 100644 --- a/tests/e2e/pgbouncer_metrics_test.go +++ b/tests/e2e/pgbouncer_metrics_test.go @@ -26,7 +26,8 @@ import ( pkgutils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/proxy" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -49,16 +50,16 @@ var _ = Describe("PGBouncer Metrics", Label(tests.LabelObservability), func() { It("should retrieve the metrics exposed by a freshly created pooler of type pgBouncer and validate its content", func() { var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - clusterName, err = env.GetResourceNameFromYAML(cnpgCluster) + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, cnpgCluster) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, cnpgCluster, env) createAndAssertPgBouncerPoolerIsSetUp(namespace, poolerBasicAuthRWSampleFile, 1) - poolerName, err := env.GetResourceNameFromYAML(poolerBasicAuthRWSampleFile) + poolerName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerBasicAuthRWSampleFile) Expect(err).ToNot(HaveOccurred()) podList := &corev1.PodList{} err = env.Client.List(env.Ctx, podList, ctrlclient.InNamespace(namespace), @@ -96,7 +97,7 @@ var _ = Describe("PGBouncer Metrics", Label(tests.LabelObservability), func() { for _, pod := range podList.Items { podName := pod.GetName() - out, err := utils.RetrieveMetricsFromPgBouncer(env, pod) + out, err := proxy.RetrieveMetricsFromPgBouncer(env.Ctx, env.Interface, pod) Expect(err).ToNot(HaveOccurred()) matches := metricsRegexp.FindAllString(out, -1) Expect(matches).To( diff --git a/tests/e2e/pgbouncer_test.go b/tests/e2e/pgbouncer_test.go index d07734c106..edbd2560d9 100644 --- a/tests/e2e/pgbouncer_test.go +++ b/tests/e2e/pgbouncer_test.go @@ -22,6 +22,9 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -47,14 +50,14 @@ var _ = Describe("PGBouncer Connections", Label(tests.LabelServiceConnectivity), Context("no user-defined certificates", Ordered, func() { BeforeAll(func() { // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace("pgbouncer-auth-no-user-certs") + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, "pgbouncer-auth-no-user-certs") Expect(err).ToNot(HaveOccurred()) - clusterName, err = env.GetResourceNameFromYAML(sampleFile) + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) }) JustAfterEach(func() { - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) DeleteTableUsingPgBouncerService(namespace, clusterName, poolerBasicAuthRWSampleFile, env, primaryPod) }) @@ -155,9 +158,9 @@ var _ = Describe("PGBouncer Connections", Label(tests.LabelServiceConnectivity), caSecNameClient = "my-postgresql-client-ca" ) // Create a cluster in a namespace that will be deleted after the test - namespace, err = env.CreateUniqueTestNamespace("pgbouncer-separate-certificates") + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, "pgbouncer-separate-certificates") Expect(err).ToNot(HaveOccurred()) - clusterName, err = env.GetResourceNameFromYAML(sampleFileWithCertificate) + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sampleFileWithCertificate) Expect(err).ToNot(HaveOccurred()) // Create certificates secret for server @@ -192,7 +195,7 @@ var _ = Describe("PGBouncer Connections", Label(tests.LabelServiceConnectivity), }) func getPgbouncerPod(sampleFile string) (*corev1.Pod, error) { - poolerKey, err := env.GetResourceNamespacedNameFromYAML(sampleFile) + poolerKey, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) if err != nil { return nil, err } @@ -200,14 +203,17 @@ func getPgbouncerPod(sampleFile string) (*corev1.Pod, error) { Expect(err).ToNot(HaveOccurred()) var podList corev1.PodList - err = env.Client.List(env.Ctx, &podList, ctrlclient.InNamespace(poolerKey.Namespace), - ctrlclient.MatchingLabels{utils.PgbouncerNameLabel: poolerKey.Name}) + err = env.Client.List(env.Ctx, &podList, ctrlclient.InNamespace(""), + ctrlclient.MatchingLabels{utils.PgbouncerNameLabel: poolerKey}) Expect(err).ToNot(HaveOccurred()) Expect(len(podList.Items)).Should(BeEquivalentTo(1)) return &podList.Items[0], nil } func runShowHelpInPod(pod *corev1.Pod) error { - _, _, err := env.ExecCommand(env.Ctx, *pod, "pgbouncer", nil, "psql", "-c", "SHOW HELP") + _, _, err := exec.Command( + env.Ctx, env.Interface, env.RestClientConfig, *pod, + "pgbouncer", nil, "psql", "-c", "SHOW HELP", + ) return err } diff --git a/tests/e2e/pgbouncer_types_test.go b/tests/e2e/pgbouncer_types_test.go index dbdfd51325..5be71b0e8b 100644 --- a/tests/e2e/pgbouncer_types_test.go +++ b/tests/e2e/pgbouncer_types_test.go @@ -24,7 +24,8 @@ import ( pkgutils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -53,9 +54,9 @@ var _ = Describe("PGBouncer Types", Ordered, Label(tests.LabelServiceConnectivit BeforeAll(func() { // Create a cluster in a namespace we'll delete after the test // This cluster will be shared by the next tests - namespace, err = env.CreateUniqueTestNamespace("pgbouncer-types") + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, "pgbouncer-types") Expect(err).ToNot(HaveOccurred()) - clusterName, err = env.GetResourceNameFromYAML(sampleFile) + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) }) @@ -74,7 +75,7 @@ var _ = Describe("PGBouncer Types", Ordered, Label(tests.LabelServiceConnectivit }) By("verify that read-only pooler pgbouncer.ini contains the correct host service", func() { - poolerName, err := env.GetResourceNameFromYAML(poolerCertificateROSampleFile) + poolerName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerCertificateROSampleFile) Expect(err).ToNot(HaveOccurred()) podList := &corev1.PodList{} err = env.Client.List(env.Ctx, podList, ctrlclient.InNamespace(namespace), @@ -89,7 +90,7 @@ var _ = Describe("PGBouncer Types", Ordered, Label(tests.LabelServiceConnectivit }) By("verify that read-write pooler pgbouncer.ini contains the correct host service", func() { - poolerName, err := env.GetResourceNameFromYAML(poolerCertificateRWSampleFile) + poolerName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerCertificateRWSampleFile) Expect(err).ToNot(HaveOccurred()) podList := &corev1.PodList{} err = env.Client.List(env.Ctx, podList, ctrlclient.InNamespace(namespace), @@ -105,7 +106,7 @@ var _ = Describe("PGBouncer Types", Ordered, Label(tests.LabelServiceConnectivit By(fmt.Sprintf("scaling PGBouncer to %v instances", instances), func() { command := fmt.Sprintf("kubectl scale pooler %s -n %s --replicas=%v", poolerResourceNameRO, namespace, instances) - _, _, err := utils.Run(command) + _, _, err := run.Run(command) Expect(err).ToNot(HaveOccurred()) // verifying if PGBouncer pooler pods are ready after scale up @@ -114,7 +115,7 @@ var _ = Describe("PGBouncer Types", Ordered, Label(tests.LabelServiceConnectivit // // scale up command for 3 replicas for read write command = fmt.Sprintf("kubectl scale pooler %s -n %s --replicas=%v", poolerResourceNameRW, namespace, instances) - _, _, err = utils.Run(command) + _, _, err = run.Run(command) Expect(err).ToNot(HaveOccurred()) // verifying if PGBouncer pooler pods are ready after scale up @@ -126,7 +127,7 @@ var _ = Describe("PGBouncer Types", Ordered, Label(tests.LabelServiceConnectivit }) By("verifying that read-only pooler pgbouncer.ini contains the correct host service", func() { - poolerName, err := env.GetResourceNameFromYAML(poolerCertificateROSampleFile) + poolerName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerCertificateROSampleFile) Expect(err).ToNot(HaveOccurred()) podList := &corev1.PodList{} err = env.Client.List(env.Ctx, podList, ctrlclient.InNamespace(namespace), @@ -141,7 +142,7 @@ var _ = Describe("PGBouncer Types", Ordered, Label(tests.LabelServiceConnectivit }) By("verifying that read-write pooler pgbouncer.ini contains the correct host service", func() { - poolerName, err := env.GetResourceNameFromYAML(poolerCertificateRWSampleFile) + poolerName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerCertificateRWSampleFile) Expect(err).ToNot(HaveOccurred()) podList := &corev1.PodList{} err = env.Client.List(env.Ctx, podList, ctrlclient.InNamespace(namespace), diff --git a/tests/e2e/probes_test.go b/tests/e2e/probes_test.go index 9e7dae8567..c3858d9210 100644 --- a/tests/e2e/probes_test.go +++ b/tests/e2e/probes_test.go @@ -24,7 +24,7 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/tests" - testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -81,7 +81,7 @@ var _ = Describe("Probes configuration tests", Label(tests.LabelBasic), func() { // Create a cluster in a namespace we'll delete after the test const namespacePrefix = "probes" var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) @@ -119,7 +119,7 @@ var _ = Describe("Probes configuration tests", Label(tests.LabelBasic), func() { By("waiting for the cluster to restart", func() { AssertClusterEventuallyReachesPhase(namespace, clusterName, []string{apiv1.PhaseUpgrade, apiv1.PhaseWaitingForInstancesToBeActive}, 120) - AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReadyQuick], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReadyQuick], env) }) By("checking the applied settings", func() { @@ -161,7 +161,7 @@ var _ = Describe("Probes configuration tests", Label(tests.LabelBasic), func() { By("waiting for the cluster to restart", func() { AssertClusterEventuallyReachesPhase(namespace, clusterName, []string{apiv1.PhaseUpgrade, apiv1.PhaseWaitingForInstancesToBeActive}, 120) - AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReadyQuick], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReadyQuick], env) }) By("checking the applied settings", func() { diff --git a/tests/e2e/publication_subscription_test.go b/tests/e2e/publication_subscription_test.go index e6dccd6e66..ae1910fa29 100644 --- a/tests/e2e/publication_subscription_test.go +++ b/tests/e2e/publication_subscription_test.go @@ -24,7 +24,11 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/tests" - testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -65,13 +69,13 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelPublicationSub BeforeAll(func() { // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - sourceClusterName, err = env.GetResourceNameFromYAML(sourceClusterManifest) + sourceClusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sourceClusterManifest) Expect(err).ToNot(HaveOccurred()) - destinationClusterName, err = env.GetResourceNameFromYAML(destinationClusterManifest) + destinationClusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, destinationClusterManifest) Expect(err).ToNot(HaveOccurred()) By("setting up source cluster", func() { @@ -89,10 +93,14 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelPublicationSub // We need to make sure that publication/subscription have been removed before // attempting to drop the database, otherwise the DROP DATABASE will fail because // there's an active logical replication slot. - destPrimaryPod, err := env.GetClusterPrimary(namespace, destinationClusterName) + destPrimaryPod, err := clusterutils.GetPrimary( + env.Ctx, env.Client, + namespace, destinationClusterName, + ) Expect(err).ToNot(HaveOccurred()) - _, _, err = env.EventuallyExecQueryInInstancePod( - testsUtils.PodLocator{ + _, _, err = exec.EventuallyExecQueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: destPrimaryPod.Namespace, PodName: destPrimaryPod.Name, }, @@ -103,10 +111,14 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelPublicationSub ) Expect(err).ToNot(HaveOccurred()) - sourcePrimaryPod, err := env.GetClusterPrimary(namespace, sourceClusterName) + sourcePrimaryPod, err := clusterutils.GetPrimary( + env.Ctx, env.Client, + namespace, sourceClusterName, + ) Expect(err).ToNot(HaveOccurred()) - _, _, err = env.EventuallyExecQueryInInstancePod( - testsUtils.PodLocator{ + _, _, err = exec.EventuallyExecQueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: sourcePrimaryPod.Namespace, PodName: sourcePrimaryPod.Name, }, @@ -119,15 +131,15 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelPublicationSub Expect(DeleteResourcesFromFile(namespace, destinationDatabaseManifest)).To(Succeed()) Expect(DeleteResourcesFromFile(namespace, sourceDatabaseManifest)).To(Succeed()) - Eventually(QueryMatchExpectationPredicate(sourcePrimaryPod, testsUtils.PostgresDBName, + Eventually(QueryMatchExpectationPredicate(sourcePrimaryPod, postgres.PostgresDBName, databaseExistsQuery(dbname), "f"), 30).Should(Succeed()) - Eventually(QueryMatchExpectationPredicate(destPrimaryPod, testsUtils.PostgresDBName, + Eventually(QueryMatchExpectationPredicate(destPrimaryPod, postgres.PostgresDBName, databaseExistsQuery(dbname), "f"), 30).Should(Succeed()) }) assertCreateDatabase := func(namespace, clusterName, databaseManifest string) { databaseObject := &apiv1.Database{} - databaseObjectName, err := env.GetResourceNameFromYAML(databaseManifest) + databaseObjectName, err := yaml.GetResourceNameFromYAML(env.Scheme, databaseManifest) Expect(err).NotTo(HaveOccurred()) By(fmt.Sprintf("applying the %s Database CRD manifest", databaseObjectName), func() { @@ -148,17 +160,17 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelPublicationSub }) By(fmt.Sprintf("verifying the %s database has been created", databaseObject.Spec.Name), func() { - primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) + primaryPodInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - Eventually(QueryMatchExpectationPredicate(primaryPodInfo, testsUtils.PostgresDBName, + Eventually(QueryMatchExpectationPredicate(primaryPodInfo, postgres.PostgresDBName, databaseExistsQuery(databaseObject.Spec.Name), "t"), 30).Should(Succeed()) }) } // nolint:dupl assertCreatePublication := func(namespace, clusterName, publicationManifest string) { - pubObjectName, err := env.GetResourceNameFromYAML(publicationManifest) + pubObjectName, err := yaml.GetResourceNameFromYAML(env.Scheme, publicationManifest) Expect(err).NotTo(HaveOccurred()) By("applying Publication CRD manifest", func() { @@ -181,7 +193,7 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelPublicationSub }) By("verifying new publication has been created", func() { - primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) + primaryPodInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Eventually(QueryMatchExpectationPredicate(primaryPodInfo, dbname, @@ -191,7 +203,7 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelPublicationSub // nolint:dupl assertCreateSubscription := func(namespace, clusterName, subscriptionManifest string) { - subObjectName, err := env.GetResourceNameFromYAML(subscriptionManifest) + subObjectName, err := yaml.GetResourceNameFromYAML(env.Scheme, subscriptionManifest) Expect(err).NotTo(HaveOccurred()) By("applying Subscription CRD manifest", func() { @@ -214,7 +226,7 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelPublicationSub }) By("verifying new subscription has been created", func() { - primaryPodInfo, err := env.GetClusterPrimary(namespace, clusterName) + primaryPodInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Eventually(QueryMatchExpectationPredicate(primaryPodInfo, dbname, @@ -237,8 +249,11 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelPublicationSub By("creating an empty table inside the destination database", func() { query := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %v (column1 int) ;", tableName) - _, err = testsUtils.RunExecOverForward(env, namespace, destinationClusterName, dbname, - apiv1.ApplicationUserSecretSuffix, query) + _, err = postgres.RunExecOverForward( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + namespace, destinationClusterName, dbname, + apiv1.ApplicationUserSecretSuffix, query, + ) Expect(err).ToNot(HaveOccurred()) }) @@ -257,14 +272,14 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelPublicationSub subscriptionReclaimPolicy = apiv1.SubscriptionReclaimRetain } // Get the object names - pubObjectName, err := env.GetResourceNameFromYAML(pubManifest) + pubObjectName, err := yaml.GetResourceNameFromYAML(env.Scheme, pubManifest) Expect(err).NotTo(HaveOccurred()) - subObjectName, err := env.GetResourceNameFromYAML(subManifest) + subObjectName, err := yaml.GetResourceNameFromYAML(env.Scheme, subManifest) Expect(err).NotTo(HaveOccurred()) Eventually(func(g Gomega) { - err = testsUtils.GetObject( - env, + err = objects.Get( + env.Ctx, env.Client, types.NamespacedName{Namespace: namespace, Name: pubObjectName}, &publication, ) @@ -273,8 +288,8 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelPublicationSub err = env.Client.Update(env.Ctx, &publication) g.Expect(err).ToNot(HaveOccurred()) - err = testsUtils.GetObject( - env, + err = objects.Get( + env.Ctx, env.Client, types.NamespacedName{Namespace: namespace, Name: subObjectName}, &subscription, ) @@ -296,12 +311,12 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelPublicationSub }) By("removing the objects", func() { - Expect(testsUtils.DeleteObject(env, &publication)).To(Succeed()) - Expect(testsUtils.DeleteObject(env, &subscription)).To(Succeed()) + Expect(objects.Delete(env.Ctx, env.Client, &publication)).To(Succeed()) + Expect(objects.Delete(env.Ctx, env.Client, &subscription)).To(Succeed()) }) By("verifying the publication reclaim policy outcome", func() { - primaryPodInfo, err := env.GetClusterPrimary(namespace, sourceClusterName) + primaryPodInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, sourceClusterName) Expect(err).ToNot(HaveOccurred()) Eventually(QueryMatchExpectationPredicate(primaryPodInfo, dbname, @@ -309,7 +324,10 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelPublicationSub }) By("verifying the subscription reclaim policy outcome", func() { - primaryPodInfo, err := env.GetClusterPrimary(namespace, destinationClusterName) + primaryPodInfo, err := clusterutils.GetPrimary( + env.Ctx, env.Client, + namespace, destinationClusterName, + ) Expect(err).ToNot(HaveOccurred()) Eventually(QueryMatchExpectationPredicate(primaryPodInfo, dbname, diff --git a/tests/e2e/pvc_deletion_test.go b/tests/e2e/pvc_deletion_test.go index ac5d7032bc..05c5747a26 100644 --- a/tests/e2e/pvc_deletion_test.go +++ b/tests/e2e/pvc_deletion_test.go @@ -25,7 +25,8 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" - testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + podutils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/storage" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -48,7 +49,7 @@ var _ = Describe("PVC Deletion", Label(tests.LabelSelfHealing), func() { It("correctly manages PVCs", func() { var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) @@ -79,7 +80,7 @@ var _ = Describe("PVC Deletion", Label(tests.LabelSelfHealing), func() { quickDelete := &ctrlclient.DeleteOptions{ GracePeriodSeconds: &quickDeletionPeriod, } - err = env.DeletePod(namespace, podName, quickDelete) + err = podutils.Delete(env.Ctx, env.Client, namespace, podName, quickDelete) Expect(err).ToNot(HaveOccurred()) // The pod should be back @@ -122,7 +123,10 @@ var _ = Describe("PVC Deletion", Label(tests.LabelSelfHealing), func() { originalPVCUID := pvc.GetUID() // Check if walStorage is enabled - walStorageEnabled, err := testsUtils.IsWalStorageEnabled(namespace, clusterName, env) + walStorageEnabled, err := storage.IsWalStorageEnabled( + env.Ctx, env.Client, + namespace, clusterName, + ) Expect(err).ToNot(HaveOccurred()) // Force delete setting @@ -149,7 +153,7 @@ var _ = Describe("PVC Deletion", Label(tests.LabelSelfHealing), func() { } // Deleting primary pod - err = env.DeletePod(namespace, podName, quickDelete) + err = podutils.Delete(env.Ctx, env.Client, namespace, podName, quickDelete) Expect(err).ToNot(HaveOccurred()) // A new pod should be created diff --git a/tests/e2e/replica_mode_cluster_test.go b/tests/e2e/replica_mode_cluster_test.go index 97b94c9781..dd38fd09a0 100644 --- a/tests/e2e/replica_mode_cluster_test.go +++ b/tests/e2e/replica_mode_cluster_test.go @@ -35,8 +35,15 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/replicaclusterswitch" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" - testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/backups" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/secrets" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/storage" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -75,7 +82,7 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { testTableName = "replica_mode_tls_auth" ) - replicaNamespace, err := env.CreateUniqueTestNamespace(replicaNamespacePrefix) + replicaNamespace, err := env.CreateUniqueTestNamespace(env.Ctx, env.Client, replicaNamespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(replicaNamespace, srcClusterName, srcClusterSample, env) @@ -87,7 +94,7 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { testTableName, ) - replicaName, err := env.GetResourceNameFromYAML(replicaClusterSampleTLS) + replicaName, err := yaml.GetResourceNameFromYAML(env.Scheme, replicaClusterSampleTLS) Expect(err).ToNot(HaveOccurred()) assertReplicaClusterTopology(replicaNamespace, replicaName) @@ -108,9 +115,9 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { testTableName = "replica_mode_basic_auth" ) - replicaClusterName, err := env.GetResourceNameFromYAML(replicaClusterSampleBasicAuth) + replicaClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, replicaClusterSampleBasicAuth) Expect(err).ToNot(HaveOccurred()) - namespace, err = env.CreateUniqueTestNamespace(replicaNamespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, replicaNamespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, srcClusterName, srcClusterSample, env) @@ -153,7 +160,7 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { return nil } - namespace, err = env.CreateUniqueTestNamespace("replica-promotion-demotion") + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, "replica-promotion-demotion") Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterOneName, clusterOneFile, env) @@ -167,26 +174,27 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { // turn the src cluster into a replica By("setting replica mode on the src cluster", func() { - cluster, err := env.GetCluster(namespace, clusterOneName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterOneName) Expect(err).ToNot(HaveOccurred()) updateTime := time.Now().Truncate(time.Second) cluster.Spec.ReplicaCluster.Enabled = ptr.To(true) err = env.Client.Update(ctx, cluster) Expect(err).ToNot(HaveOccurred()) Eventually(func(g Gomega) { - cluster, err := env.GetCluster(namespace, clusterOneName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterOneName) g.Expect(err).ToNot(HaveOccurred()) condition := getReplicaClusterSwitchCondition(cluster.Status.Conditions) g.Expect(condition).ToNot(BeNil()) g.Expect(condition.Status).To(Equal(metav1.ConditionTrue)) g.Expect(condition.LastTransitionTime.Time).To(BeTemporally(">=", updateTime)) }).WithTimeout(30 * time.Second).Should(Succeed()) - AssertClusterIsReady(namespace, clusterOneName, testTimeouts[testUtils.ClusterIsReady], env) + AssertClusterIsReady(namespace, clusterOneName, testTimeouts[timeouts.ClusterIsReady], env) }) By("checking that src cluster is now a replica cluster", func() { Eventually(func() error { - clusterOnePrimary, err = env.GetClusterPrimary(namespace, clusterOneName) + clusterOnePrimary, err = clusterutils.GetPrimary(env.Ctx, env.Client, namespace, + clusterOneName) return err }, 30, 3).Should(Succeed()) AssertPgRecoveryMode(clusterOnePrimary, true) @@ -194,17 +202,18 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { // turn the dst cluster into a primary By("disabling the replica mode on the dst cluster", func() { - cluster, err := env.GetCluster(namespace, clusterTwoName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterTwoName) Expect(err).ToNot(HaveOccurred()) cluster.Spec.ReplicaCluster.Enabled = ptr.To(false) err = env.Client.Update(ctx, cluster) Expect(err).ToNot(HaveOccurred()) - AssertClusterIsReady(namespace, clusterTwoName, testTimeouts[testUtils.ClusterIsReady], env) + AssertClusterIsReady(namespace, clusterTwoName, testTimeouts[timeouts.ClusterIsReady], env) }) By("checking that dst cluster has been promoted", func() { Eventually(func() error { - clusterTwoPrimary, err = env.GetClusterPrimary(namespace, clusterTwoName) + clusterTwoPrimary, err = clusterutils.GetPrimary(env.Ctx, env.Client, namespace, + clusterTwoName) return err }, 30, 3).Should(Succeed()) AssertPgRecoveryMode(clusterTwoPrimary, false) @@ -225,8 +234,10 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { // We need to copy the password changes over to the src Cluster, which is now a Replica // Cluster, in order to connect using the "-app" secret. By("updating the appUser secret of the src cluster", func() { - _, appSecretPassword, err := testUtils.GetCredentials(clusterTwoName, namespace, - apiv1.ApplicationUserSecretSuffix, env) + _, appSecretPassword, err := secrets.GetCredentials( + env.Ctx, env.Client, + clusterTwoName, namespace, + apiv1.ApplicationUserSecretSuffix) Expect(err).ToNot(HaveOccurred()) AssertUpdateSecret("password", appSecretPassword, clusterOneName+apiv1.ApplicationUserSecretSuffix, namespace, clusterOneName, 30, env) @@ -252,18 +263,19 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { testTableName = "replica_mode_archive" ) - replicaClusterName, err := env.GetResourceNameFromYAML(replicaClusterSample) + replicaClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, replicaClusterSample) Expect(err).ToNot(HaveOccurred()) - replicaNamespace, err := env.CreateUniqueTestNamespace(replicaNamespacePrefix) + replicaNamespace, err := env.CreateUniqueTestNamespace(env.Ctx, env.Client, replicaNamespacePrefix) Expect(err).ToNot(HaveOccurred()) By("creating the credentials for minio", func() { - _, err = testUtils.CreateObjectStorageSecret( + _, err = secrets.CreateObjectStorageSecret( + env.Ctx, + env.Client, replicaNamespace, "backup-storage-creds", "minio", "minio123", - env, ) Expect(err).ToNot(HaveOccurred()) }) @@ -284,14 +296,20 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { ) // Get primary from replica cluster - primaryReplicaCluster, err := env.GetClusterPrimary(replicaNamespace, replicaClusterName) + primaryReplicaCluster, err := clusterutils.GetPrimary( + env.Ctx, + env.Client, + replicaNamespace, + replicaClusterName, + ) Expect(err).ToNot(HaveOccurred()) By("verify archive mode is set to 'always on' designated primary", func() { query := "show archive_mode;" Eventually(func() (string, error) { - stdOut, _, err := env.ExecQueryInInstancePod( - testUtils.PodLocator{ + stdOut, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: primaryReplicaCluster.Namespace, PodName: primaryReplicaCluster.Name, }, @@ -318,16 +336,17 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { BeforeAll(func() { var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) By("creating the credentials for minio", func() { - _, err = testUtils.CreateObjectStorageSecret( + _, err = secrets.CreateObjectStorageSecret( + env.Ctx, + env.Client, namespace, "backup-storage-creds", "minio", "minio123", - env, ) Expect(err).ToNot(HaveOccurred()) }) @@ -338,7 +357,7 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { }) // Create the cluster - clusterName, err = env.GetResourceNameFromYAML(clusterSample) + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterSample) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, clusterSample, env) }) @@ -351,13 +370,15 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { By("creating a backup and waiting until it's completed", func() { backupName := fmt.Sprintf("%v-backup", clusterName) - backup, err := testUtils.CreateOnDemandBackup( + backup, err := backups.CreateOnDemand( + env.Ctx, + env.Client, namespace, clusterName, backupName, apiv1.BackupTargetStandby, apiv1.BackupMethodBarmanObjectStore, - env) + ) Expect(err).ToNot(HaveOccurred()) Eventually(func() (apiv1.BackupPhase, error) { @@ -366,7 +387,7 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { Name: backupName, }, backup) return backup.Status.Phase, err - }, testTimeouts[testUtils.BackupIsReady]).Should(BeEquivalentTo(apiv1.BackupPhaseCompleted)) + }, testTimeouts[timeouts.BackupIsReady]).Should(BeEquivalentTo(apiv1.BackupPhaseCompleted)) }) By("creating a replica cluster from the backup", func() { @@ -404,13 +425,15 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { By("creating a snapshot and waiting until it's completed", func() { var err error snapshotName := fmt.Sprintf("%v-snapshot", clusterName) - backup, err = testUtils.CreateOnDemandBackup( + backup, err = backups.CreateOnDemand( + env.Ctx, + env.Client, namespace, clusterName, snapshotName, apiv1.BackupTargetStandby, apiv1.BackupMethodVolumeSnapshot, - env) + ) Expect(err).ToNot(HaveOccurred()) Eventually(func(g Gomega) { @@ -421,7 +444,7 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { g.Expect(err).ToNot(HaveOccurred()) g.Expect(backup.Status.BackupSnapshotStatus.Elements).To(HaveLen(2)) g.Expect(backup.Status.Phase).To(BeEquivalentTo(apiv1.BackupPhaseCompleted)) - }, testTimeouts[testUtils.VolumeSnapshotIsReady]).Should(Succeed()) + }, testTimeouts[timeouts.VolumeSnapshotIsReady]).Should(Succeed()) }) By("fetching the volume snapshots", func() { @@ -432,11 +455,11 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { Expect(err).ToNot(HaveOccurred()) Expect(snapshotList.Items).To(HaveLen(len(backup.Status.BackupSnapshotStatus.Elements))) - envVars := testUtils.EnvVarsForSnapshots{ + envVars := storage.EnvVarsForSnapshots{ DataSnapshot: snapshotDataEnv, WalSnapshot: snapshotWalEnv, } - err = testUtils.SetSnapshotNameAsEnv(&snapshotList, backup, envVars) + err = storage.SetSnapshotNameAsEnv(&snapshotList, backup, envVars) Expect(err).ToNot(HaveOccurred()) }) @@ -479,11 +502,12 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f }) validateReplication := func(namespace, clusterAName, clusterBName string) { - primary, err := env.GetClusterPrimary(namespace, clusterBName) + primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterBName) Expect(err).ToNot(HaveOccurred()) - _, _, err = env.ExecQueryInInstancePod( - testUtils.PodLocator{Namespace: namespace, PodName: primary.Name}, + _, _, err = exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{Namespace: namespace, PodName: primary.Name}, "postgres", "CREATE TABLE test_replication AS SELECT 1;", ) @@ -491,14 +515,15 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f _ = switchWalAndGetLatestArchive(namespace, primary.Name) Eventually(func(g Gomega) { - podListA, err := env.GetClusterPodList(namespace, clusterAName) + podListA, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterAName) g.Expect(err).ToNot(HaveOccurred()) - podListB, err := env.GetClusterPodList(namespace, clusterBName) + podListB, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterBName) g.Expect(err).ToNot(HaveOccurred()) for _, podA := range podListA.Items { - _, _, err = env.ExecQueryInInstancePod( - testUtils.PodLocator{Namespace: namespace, PodName: podA.Name}, + _, _, err = exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{Namespace: namespace, PodName: podA.Name}, "postgres", "SELECT * FROM test_replication;", ) @@ -506,34 +531,36 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f } for _, podB := range podListB.Items { - _, _, err = env.ExecQueryInInstancePod( - testUtils.PodLocator{Namespace: namespace, PodName: podB.Name}, + _, _, err = exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{Namespace: namespace, PodName: podB.Name}, "postgres", "SELECT * FROM test_replication;", ) g.Expect(err).ToNot(HaveOccurred()) } - }, testTimeouts[testUtils.ClusterIsReadyQuick]).Should(Succeed()) + }, testTimeouts[timeouts.ClusterIsReadyQuick]).Should(Succeed()) } waitForTimelineIncrease := func(namespace, clusterName string, expectedTimeline int) bool { return Eventually(func(g Gomega) { - primary, err := env.GetClusterPrimary(namespace, clusterName) + primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) - stdout, _, err := env.ExecQueryInInstancePod( - testUtils.PodLocator{Namespace: namespace, PodName: primary.Name}, + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{Namespace: namespace, PodName: primary.Name}, "postgres", "SELECT timeline_id FROM pg_control_checkpoint();", ) g.Expect(err).ToNot(HaveOccurred()) g.Expect(strings.TrimSpace(stdout)).To(Equal(fmt.Sprintf("%d", expectedTimeline))) - }, testTimeouts[testUtils.ClusterIsReadyQuick]).Should(Succeed()) + }, testTimeouts[timeouts.ClusterIsReadyQuick]).Should(Succeed()) } DescribeTable("should demote and promote the clusters correctly", func(clusterAFile string, clusterBFile string, expectedTimeline int) { var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) DeferCleanup(func() error { // Since we use multiple times the same cluster names for the same minio instance, we need to clean it up @@ -553,12 +580,13 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f DeferCleanup(func() { close(stopLoad) }) By("creating the credentials for minio", func() { - _, err = testUtils.CreateObjectStorageSecret( + _, err = secrets.CreateObjectStorageSecret( + env.Ctx, + env.Client, namespace, "backup-storage-creds", "minio", "minio123", - env, ) Expect(err).ToNot(HaveOccurred()) }) @@ -570,15 +598,16 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f By("creating the A cluster", func() { var err error - clusterAName, err = env.GetResourceNameFromYAML(clusterAFile) + clusterAName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterAFile) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterAName, clusterAFile, env) }) By("creating some load on the A cluster", func() { - primary, err := env.GetClusterPrimary(namespace, clusterAName) + primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterAName) Expect(err).ToNot(HaveOccurred()) - _, _, err = env.ExecQueryInInstancePod( - testUtils.PodLocator{Namespace: namespace, PodName: primary.Name}, + _, _, err = exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{Namespace: namespace, PodName: primary.Name}, "postgres", "CREATE TABLE switchover_load (i int);", ) @@ -586,8 +615,9 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f go func() { for { - _, _, _ = env.ExecQueryInInstancePod( - testUtils.PodLocator{Namespace: namespace, PodName: primary.Name}, + _, _, _ = exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{Namespace: namespace, PodName: primary.Name}, "postgres", "INSERT INTO switchover_load SELECT generate_series(1, 10000)", ) @@ -603,7 +633,8 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f }) By("backing up the A cluster", func() { - backup, err := testUtils.CreateBackup( + backup, err := backups.Create( + env.Ctx, env.Client, apiv1.Backup{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, @@ -615,12 +646,11 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f Cluster: apiv1.LocalObjectReference{Name: clusterAName}, }, }, - env, ) Expect(err).ToNot(HaveOccurred()) // Speed up backup finalization - primary, err := env.GetClusterPrimary(namespace, clusterAName) + primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterAName) Expect(err).ToNot(HaveOccurred()) _ = switchWalAndGetLatestArchive(namespace, primary.Name) Expect(err).ToNot(HaveOccurred()) @@ -633,25 +663,25 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f }, backup) return backup.Status.Phase, err }, - testTimeouts[testUtils.BackupIsReady], + testTimeouts[timeouts.BackupIsReady], ).WithPolling(10 * time.Second). Should(BeEquivalentTo(apiv1.BackupPhaseCompleted)) }) By("creating the B cluster from the backup", func() { var err error - clusterBName, err = env.GetResourceNameFromYAML(clusterBFile) + clusterBName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterBFile) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterBName, clusterBFile, env) }) By("demoting A to a replica", func() { - cluster, err := env.GetCluster(namespace, clusterAName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterAName) Expect(err).ToNot(HaveOccurred()) oldCluster := cluster.DeepCopy() cluster.Spec.ReplicaCluster.Primary = clusterBName Expect(env.Client.Patch(env.Ctx, cluster, k8client.MergeFrom(oldCluster))).To(Succeed()) - podList, err := env.GetClusterPodList(namespace, clusterAName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterAName) Expect(err).ToNot(HaveOccurred()) for _, pod := range podList.Items { AssertPgRecoveryMode(&pod, true) @@ -660,7 +690,7 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f var token, invalidToken string By("getting the demotion token", func() { - cluster, err := env.GetCluster(namespace, clusterAName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterAName) Expect(err).ToNot(HaveOccurred()) token = cluster.Status.DemotionToken }) @@ -675,7 +705,7 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f }) By("promoting B with the invalid token", func() { - cluster, err := env.GetCluster(namespace, clusterBName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterBName) Expect(err).ToNot(HaveOccurred()) oldCluster := cluster.DeepCopy() @@ -686,25 +716,26 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f By("failing to promote B with the invalid token", func() { Consistently(func(g Gomega) { - pod, err := env.GetClusterPrimary(namespace, clusterBName) + pod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterBName) g.Expect(err).ToNot(HaveOccurred()) - stdOut, _, err := env.ExecQueryInInstancePod( - testUtils.PodLocator{ + stdOut, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: pod.Namespace, PodName: pod.Name, }, - testUtils.PostgresDBName, + postgres.PostgresDBName, "select pg_is_in_recovery();") g.Expect(err).ToNot(HaveOccurred()) g.Expect(strings.Trim(stdOut, "\n")).To(Equal("t")) }, 60, 10).Should(Succeed()) - cluster, err := env.GetCluster(namespace, clusterBName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterBName) Expect(err).ToNot(HaveOccurred()) Expect(cluster.Status.Phase).To(BeEquivalentTo(apiv1.PhaseUnrecoverable)) }) By("promoting B with the right token", func() { - cluster, err := env.GetCluster(namespace, clusterBName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterBName) Expect(err).ToNot(HaveOccurred()) oldCluster := cluster.DeepCopy() cluster.Spec.ReplicaCluster.PromotionToken = token @@ -717,10 +748,10 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f }) By("verifying B contains the primary", func() { - primary, err := env.GetClusterPrimary(namespace, clusterBName) + primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterBName) Expect(err).ToNot(HaveOccurred()) AssertPgRecoveryMode(primary, false) - podList, err := env.GetClusterReplicas(namespace, clusterBName) + podList, err := clusterutils.GetReplicas(env.Ctx, env.Client, namespace, clusterBName) Expect(err).ToNot(HaveOccurred()) for _, pod := range podList.Items { AssertPgRecoveryMode(&pod, true) @@ -748,7 +779,7 @@ func assertReplicaClusterTopology(namespace, clusterName string) { standbys []string ) - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Expect(cluster.Status.ReadyInstances).To(BeEquivalentTo(cluster.Spec.Instances)) @@ -760,8 +791,9 @@ func assertReplicaClusterTopology(namespace, clusterName string) { standbys = funk.FilterString(cluster.Status.InstanceNames, func(name string) bool { return name != primary }) getStreamingInfo := func(podName string) ([]string, error) { - stdout, _, err := env.ExecCommandInInstancePod( - testUtils.PodLocator{ + stdout, _, err := exec.CommandInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: podName, }, @@ -804,8 +836,9 @@ func assertReplicaClusterTopology(namespace, clusterName string) { By("verifying that the new primary is streaming from the source cluster", func() { Eventually(func(g Gomega) { - stdout, _, err := env.ExecCommandInInstancePod( - testUtils.PodLocator{ + stdout, _, err := exec.CommandInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: primary, }, diff --git a/tests/e2e/replication_slot_test.go b/tests/e2e/replication_slot_test.go index c57404f93f..e832a1fada 100644 --- a/tests/e2e/replication_slot_test.go +++ b/tests/e2e/replication_slot_test.go @@ -24,7 +24,10 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/tests" - testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/replicationslot" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -47,17 +50,19 @@ var _ = Describe("Replication Slot", Label(tests.LabelReplication), func() { It("Can enable and disable replication slots", func() { var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) By("enabling replication slot on cluster", func() { - err := testsUtils.ToggleHAReplicationSlots(namespace, clusterName, true, env) + err := replicationslot.ToggleHAReplicationSlots( + env.Ctx, env.Client, + namespace, clusterName, true) Expect(err).ToNot(HaveOccurred()) // Replication slots should be Enabled Consistently(func() (bool, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) if err != nil { return false, err } @@ -73,13 +78,13 @@ var _ = Describe("Replication Slot", Label(tests.LabelReplication), func() { } By("checking Primary HA slots exist and are active", func() { - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - expectedSlots, err := testsUtils.GetExpectedHAReplicationSlotsOnPod( + expectedSlots, err := replicationslot.GetExpectedHAReplicationSlotsOnPod( + env.Ctx, env.Client, namespace, clusterName, primaryPod.GetName(), - env, ) Expect(err).ToNot(HaveOccurred()) AssertReplicationSlotsOnPod(namespace, clusterName, *primaryPod, expectedSlots, true, false) @@ -90,12 +95,15 @@ var _ = Describe("Replication Slot", Label(tests.LabelReplication), func() { var err error before := time.Now() Eventually(func(g Gomega) { - replicaPods, err = env.GetClusterReplicas(namespace, clusterName) + replicaPods, err = clusterutils.GetReplicas(env.Ctx, env.Client, namespace, clusterName) g.Expect(len(replicaPods.Items), err).To(BeEquivalentTo(2)) }, 90, 2).Should(Succeed()) GinkgoWriter.Println("standby slot check succeeded in", time.Since(before)) for _, pod := range replicaPods.Items { - expectedSlots, err := testsUtils.GetExpectedHAReplicationSlotsOnPod(namespace, clusterName, pod.GetName(), env) + expectedSlots, err := replicationslot.GetExpectedHAReplicationSlotsOnPod( + env.Ctx, env.Client, + namespace, clusterName, pod.GetName(), + ) Expect(err).ToNot(HaveOccurred()) AssertReplicationSlotsOnPod(namespace, clusterName, pod, expectedSlots, true, false) } @@ -106,16 +114,17 @@ var _ = Describe("Replication Slot", Label(tests.LabelReplication), func() { }) By("creating a physical replication slots on the primary", func() { - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) query := fmt.Sprintf("SELECT pg_create_physical_replication_slot('%s');", userPhysicalSlot) - _, _, err = env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + _, _, err = exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: primaryPod.Namespace, PodName: primaryPod.Name, }, - testsUtils.PostgresDBName, + postgres.PostgresDBName, query) Expect(err).ToNot(HaveOccurred()) }) @@ -125,7 +134,7 @@ var _ = Describe("Replication Slot", Label(tests.LabelReplication), func() { var err error before := time.Now() Eventually(func(g Gomega) { - replicaPods, err = env.GetClusterReplicas(namespace, clusterName) + replicaPods, err = clusterutils.GetReplicas(env.Ctx, env.Client, namespace, clusterName) g.Expect(len(replicaPods.Items), err).To(BeEquivalentTo(2)) }, 90, 2).Should(Succeed()) GinkgoWriter.Println("standby slot check succeeded in", time.Since(before)) @@ -136,14 +145,18 @@ var _ = Describe("Replication Slot", Label(tests.LabelReplication), func() { }) By("disabling replication slot from running cluster", func() { - err := testsUtils.ToggleHAReplicationSlots(namespace, clusterName, false, env) + err := replicationslot.ToggleHAReplicationSlots( + env.Ctx, env.Client, + namespace, clusterName, false) Expect(err).ToNot(HaveOccurred()) - err = testsUtils.ToggleSynchronizeReplicationSlots(namespace, clusterName, false, env) + err = replicationslot.ToggleSynchronizeReplicationSlots( + env.Ctx, env.Client, + namespace, clusterName, false) Expect(err).ToNot(HaveOccurred()) // Replication slots should be Disabled Consistently(func() (bool, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) if err != nil { return false, err } @@ -159,11 +172,13 @@ var _ = Describe("Replication Slot", Label(tests.LabelReplication), func() { } By("verifying slots have been removed from the cluster's pods", func() { - pods, err := env.GetClusterPodList(namespace, clusterName) + pods, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) for _, pod := range pods.Items { Eventually(func(g Gomega) error { - slotOnPod, err := testsUtils.GetReplicationSlotsOnPod(namespace, pod.GetName(), env) + slotOnPod, err := replicationslot.GetReplicationSlotsOnPod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + namespace, pod.GetName(), postgres.AppDBName) if err != nil { return err } diff --git a/tests/e2e/rolling_update_test.go b/tests/e2e/rolling_update_test.go index 7b3bde3ae9..2440e0e299 100644 --- a/tests/e2e/rolling_update_test.go +++ b/tests/e2e/rolling_update_test.go @@ -33,6 +33,10 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + podutils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -45,12 +49,12 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun Skip("Test depth is lower than the amount requested for this test") } }) - // gatherClusterInfo returns the current lists of pods, pod UIDs and pvc UIDs in a given cluster + // gatherClusterInfo returns the current lists of podutils, pod UIDs and pvc UIDs in a given cluster gatherClusterInfo := func(namespace string, clusterName string) ([]string, []types.UID, []types.UID, error) { var podNames []string var podUIDs []types.UID var pvcUIDs []types.UID - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) for _, pod := range podList.Items { podNames = append(podNames, pod.GetName()) @@ -73,7 +77,10 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun namespace string, clusterName string, imageName string, expectedInstances int, timeout int, ) { Eventually(func() (int32, error) { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) + if err != nil { + return 0, err + } updatedPods := int32(0) for _, pod := range podList.Items { // We need to check if a pod is ready, otherwise we @@ -95,7 +102,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun }, timeout).Should(BeEquivalentTo(expectedInstances)) } - // Verify that after an update all the pods are ready and running + // Verify that after an update all the podutils are ready and running // an updated image AssertUpdateImage := func(namespace string, clusterName string) { // TODO: the nodes are downloading the image sequentially, @@ -113,7 +120,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun var cluster *apiv1.Cluster Eventually(func(g Gomega) error { var err error - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) cluster.Spec.ImageName = updatedImageName @@ -123,15 +130,16 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun // All the postgres containers should have the updated image AssertPodsRunOnImage(namespace, clusterName, updatedImageName, cluster.Spec.Instances, timeout) - // Setting up a cluster with three pods is slow, usually 200-600s - AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReady], env) + // Setting up a cluster with three podutils is slow, usually 200-600s + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env) } // Verify that the pod name changes amount to an expected number - AssertChangedNames := func(namespace string, clusterName string, + AssertChangedNames := func( + namespace string, clusterName string, originalPodNames []string, expectedUnchangedNames int, ) { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) matchingNames := 0 for _, pod := range podList.Items { @@ -147,10 +155,11 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun } // Verify that the pod UIDs changes are the expected number - AssertNewPodsUID := func(namespace string, clusterName string, + AssertNewPodsUID := func( + namespace string, clusterName string, originalPodUID []types.UID, expectedUnchangedUIDs int, ) { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) matchingUID := 0 for _, pod := range podList.Items { @@ -166,10 +175,11 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun } // Verify that the PVC UIDs changes are the expected number - AssertChangedPvcUID := func(namespace string, clusterName string, + AssertChangedPvcUID := func( + namespace string, clusterName string, originalPVCUID []types.UID, expectedUnchangedPvcUIDs int, ) { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) matchingPVC := 0 for _, pod := range podList.Items { @@ -192,14 +202,15 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun } // Verify that the -rw endpoint points to the expected primary - AssertPrimary := func(namespace, clusterName string, + AssertPrimary := func( + namespace, clusterName string, oldPrimaryPod *corev1.Pod, expectNewPrimaryIdx bool, ) { var cluster *apiv1.Cluster var err error Eventually(func(g Gomega) { - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) if expectNewPrimaryIdx { g.Expect(cluster.Status.CurrentPrimary).ToNot(BeEquivalentTo(oldPrimaryPod.Name)) @@ -209,7 +220,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun }, RetryTimeout).Should(Succeed()) // Get the new current primary Pod - currentPrimaryPod, err := env.GetPod(namespace, cluster.Status.CurrentPrimary) + currentPrimaryPod, err := podutils.Get(env.Ctx, env.Client, namespace, cluster.Status.CurrentPrimary) Expect(err).ToNot(HaveOccurred()) endpointName := clusterName + "-rw" @@ -226,8 +237,8 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun }, timeout).Should(BeEquivalentTo(currentPrimaryPod.Status.PodIP)) } - // Verify that the IPs of the pods match the ones in the -r endpoint and - // that the amount of pods is the expected one + // Verify that the IPs of the podutils match the ones in the -r endpoint and + // that the amount of podutils is the expected one AssertReadyEndpoint := func(namespace string, clusterName string, expectedEndpoints int) { endpointName := clusterName + "-r" endpoint := &corev1.Endpoints{} @@ -238,7 +249,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun err := env.Client.Get(env.Ctx, endpointNamespacedName, endpoint) Expect(err).ToNot(HaveOccurred()) - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(expectedEndpoints, err).To(BeEquivalentTo(len(podList.Items))) matchingIP := 0 for _, pod := range podList.Items { @@ -252,7 +263,8 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun Expect(matchingIP).To(BeEquivalentTo(expectedEndpoints)) } - AssertRollingUpdate := func(namespace string, clusterName string, + AssertRollingUpdate := func( + namespace string, clusterName string, sampleFile string, expectNewPrimaryIdx bool, ) { var originalPodNames []string @@ -262,12 +274,12 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun AssertCreateCluster(namespace, clusterName, sampleFile, env) // Gather the number of instances in this Cluster - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) clusterInstances := cluster.Spec.Instances // Gather the original primary Pod - originalPrimaryPod, err := env.GetClusterPrimary(namespace, clusterName) + originalPrimaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) By("Gathering info on the current state", func() { @@ -277,18 +289,18 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun By("updating the cluster definition", func() { AssertUpdateImage(namespace, clusterName) }) - // Since we're using a pvc, after the update the pods should + // Since we're using a pvc, after the update the podutils should // have been created with the same name using the same pvc. // Here we check that the names we've saved at the beginning - // of the It are the same names of the current pods. - By("checking that the names of the pods have not changed", func() { + // of the It are the same names of the current podutils. + By("checking that the names of the podutils have not changed", func() { AssertChangedNames(namespace, clusterName, originalPodNames, clusterInstances) }) // Even if they have the same names, they should have different - // UIDs, as the pods are new. Here we check that the UID + // UIDs, as the podutils are new. Here we check that the UID // we've saved at the beginning of the It don't match the // current ones. - By("checking that the pods are new ones", func() { + By("checking that the podutils are new ones", func() { AssertNewPodsUID(namespace, clusterName, originalPodUID, 0) }) // The PVC get reused, so they should have the same UID @@ -303,7 +315,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun By("having the current primary on the new TargetPrimary", func() { AssertPrimary(namespace, clusterName, originalPrimaryPod, expectNewPrimaryIdx) }) - // Check that the new pods are included in the endpoint + // Check that the new podutils are included in the endpoint By("having each pod included in the -r service", func() { AssertReadyEndpoint(namespace, clusterName, clusterInstances) }) @@ -409,15 +421,15 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun Expect(err).ToNot(HaveOccurred()) err = env.Client.Create(env.Ctx, cluster) Expect(err).ToNot(HaveOccurred()) - AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReady], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env) // Gather the number of instances in this Cluster - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) clusterInstances := cluster.Spec.Instances // Gather the original primary Pod - originalPrimaryPod, err := env.GetClusterPrimary(namespace, clusterName) + originalPrimaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) By("Gathering info on the current state", func() { @@ -431,20 +443,20 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun Expect(err).ToNot(HaveOccurred()) }) AssertPodsRunOnImage(namespace, clusterName, updatedImageName, cluster.Spec.Instances, 900) - AssertClusterIsReady(namespace, clusterName, testTimeouts[testsUtils.ClusterIsReady], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env) - // Since we're using a pvc, after the update the pods should + // Since we're using a pvc, after the update the podutils should // have been created with the same name using the same pvc. // Here we check that the names we've saved at the beginning - // of the It are the same names of the current pods. - By("checking that the names of the pods have not changed", func() { + // of the It are the same names of the current podutils. + By("checking that the names of the podutils have not changed", func() { AssertChangedNames(namespace, clusterName, originalPodNames, clusterInstances) }) // Even if they have the same names, they should have different - // UIDs, as the pods are new. Here we check that the UID + // UIDs, as the podutils are new. Here we check that the UID // we've saved at the beginning of the It don't match the // current ones. - By("checking that the pods are new ones", func() { + By("checking that the podutils are new ones", func() { AssertNewPodsUID(namespace, clusterName, originalPodUID, 0) }) // The PVC get reused, so they should have the same UID @@ -459,7 +471,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun By("having the current primary on the new TargetPrimary", func() { AssertPrimary(namespace, clusterName, originalPrimaryPod, expectNewPrimaryIdx) }) - // Check that the new pods are included in the endpoint + // Check that the new podutils are included in the endpoint By("having each pod included in the -r service", func() { AssertReadyEndpoint(namespace, clusterName, clusterInstances) }) @@ -477,9 +489,9 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun // the image name has to be tagged as foo:MAJ.MIN. We'll update // it to foo:MAJ, representing the latest minor. // Create a cluster in a namespace we'll delete after the test - namespace, err := env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err := env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - clusterName, err := env.GetResourceNameFromYAML(sampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) AssertRollingUpdate(namespace, clusterName, sampleFile, true) }) @@ -496,9 +508,9 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun // the image name has to be tagged as foo:MAJ.MIN. We'll update // it to foo:MAJ, representing the latest minor. // Create a cluster in a namespace we'll delete after the test - namespace, err := env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err := env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - clusterName, err := env.GetResourceNameFromYAML(sampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) AssertRollingUpdate(namespace, clusterName, sampleFile, false) }) @@ -510,9 +522,9 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun sampleFile = fixturesDir + "/rolling_updates/cluster-using-primary-update-method.yaml.template" ) It("can do rolling update", func() { - namespace, err := env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err := env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - clusterName, err := env.GetResourceNameFromYAML(sampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) AssertRollingUpdate(namespace, clusterName, sampleFile, false) }) @@ -555,7 +567,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun // the image name has to be tagged as foo:MAJ.MIN. We'll update // it to foo:MAJ, representing the latest minor. // Create a cluster in a namespace we'll delete after the test - namespace, err := env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err := env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) // Create a new image catalog and a new cluster @@ -575,7 +587,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun // the image name has to be tagged as foo:MAJ.MIN. We'll update // it to foo:MAJ, representing the latest minor. // Create a cluster in a namespace we'll delete after the test - namespace, err := env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err := env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) catalog := newImageCatalog(namespace, clusterName, pgVersion.Major(), preRollingImg) @@ -611,7 +623,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun // the image name has to be tagged as foo:MAJ.MIN. We'll update // it to foo:MAJ, representing the latest minor. // Create a cluster in a namespace we'll delete after the test - namespace, err := env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err := env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) cluster := newImageCatalogCluster(namespace, clusterName, pgVersion.Major(), 3, storageClass) @@ -629,7 +641,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun // the image name has to be tagged as foo:MAJ.MIN. We'll update // it to foo:MAJ, representing the latest minor. // Create a cluster in a namespace we'll delete after the test - namespace, err := env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err := env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) cluster := newImageCatalogCluster(namespace, clusterName, pgVersion.Major(), 1, storageClass) diff --git a/tests/e2e/scaling_test.go b/tests/e2e/scaling_test.go index 000030e850..d47f86ed30 100644 --- a/tests/e2e/scaling_test.go +++ b/tests/e2e/scaling_test.go @@ -20,7 +20,7 @@ import ( "fmt" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -47,7 +47,7 @@ var _ = Describe("Cluster scale up and down", Serial, Label(tests.LabelReplicati const namespacePrefix = "cluster-scale-e2e-with-slots" var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFileWithReplicationSlots, env) @@ -55,7 +55,7 @@ var _ = Describe("Cluster scale up and down", Serial, Label(tests.LabelReplicati // Add a node to the cluster and verify the cluster has one more // element By("adding an instance to the cluster", func() { - _, _, err := utils.Run(fmt.Sprintf("kubectl scale --replicas=4 -n %v cluster/%v", namespace, clusterName)) + _, _, err := run.Run(fmt.Sprintf("kubectl scale --replicas=4 -n %v cluster/%v", namespace, clusterName)) Expect(err).ToNot(HaveOccurred()) timeout := 300 AssertClusterIsReady(namespace, clusterName, timeout, env) @@ -66,7 +66,7 @@ var _ = Describe("Cluster scale up and down", Serial, Label(tests.LabelReplicati // Remove a node from the cluster and verify the cluster has one // element less By("removing an instance from the cluster", func() { - _, _, err := utils.Run(fmt.Sprintf("kubectl scale --replicas=3 -n %v cluster/%v", namespace, clusterName)) + _, _, err := run.Run(fmt.Sprintf("kubectl scale --replicas=3 -n %v cluster/%v", namespace, clusterName)) Expect(err).ToNot(HaveOccurred()) timeout := 60 AssertClusterIsReady(namespace, clusterName, timeout, env) @@ -84,14 +84,14 @@ var _ = Describe("Cluster scale up and down", Serial, Label(tests.LabelReplicati // Create a cluster in a namespace we'll delete after the test const namespacePrefix = "cluster-scale-e2e" var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFileWithoutReplicationSlots, env) // Add a node to the cluster and verify the cluster has one more // element By("adding an instance to the cluster", func() { - _, _, err := utils.Run(fmt.Sprintf("kubectl scale --replicas=4 -n %v cluster/%v", namespace, clusterName)) + _, _, err := run.Run(fmt.Sprintf("kubectl scale --replicas=4 -n %v cluster/%v", namespace, clusterName)) Expect(err).ToNot(HaveOccurred()) timeout := 300 AssertClusterIsReady(namespace, clusterName, timeout, env) @@ -101,7 +101,7 @@ var _ = Describe("Cluster scale up and down", Serial, Label(tests.LabelReplicati // Remove a node from the cluster and verify the cluster has one // element less By("removing an instance from the cluster", func() { - _, _, err := utils.Run(fmt.Sprintf("kubectl scale --replicas=3 -n %v cluster/%v", namespace, clusterName)) + _, _, err := run.Run(fmt.Sprintf("kubectl scale --replicas=3 -n %v cluster/%v", namespace, clusterName)) Expect(err).ToNot(HaveOccurred()) timeout := 60 AssertClusterIsReady(namespace, clusterName, timeout, env) diff --git a/tests/e2e/storage_expansion_test.go b/tests/e2e/storage_expansion_test.go index 4713dde4c3..283a4383fd 100644 --- a/tests/e2e/storage_expansion_test.go +++ b/tests/e2e/storage_expansion_test.go @@ -21,7 +21,8 @@ import ( "os" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/storage" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -53,7 +54,10 @@ var _ = Describe("Verify storage", Label(tests.LabelStorage), func() { // Initializing namespace variable to be used in test case namespacePrefix = "storage-expansion-true" // Extracting bool value of AllowVolumeExpansion - allowExpansion, err := utils.GetStorageAllowExpansion(defaultStorageClass, env) + allowExpansion, err := storage.GetStorageAllowExpansion( + env.Ctx, env.Client, + defaultStorageClass, + ) Expect(err).ToNot(HaveOccurred()) if (allowExpansion == nil) || (*allowExpansion == false) { Skip(fmt.Sprintf("AllowedVolumeExpansion is false on %v", defaultStorageClass)) @@ -63,7 +67,7 @@ var _ = Describe("Verify storage", Label(tests.LabelStorage), func() { It("expands PVCs via online resize", func() { var err error // Creating namespace - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) // Creating a cluster with three nodes AssertCreateCluster(namespace, clusterName, sampleFile, env) @@ -76,7 +80,10 @@ var _ = Describe("Verify storage", Label(tests.LabelStorage), func() { // Initializing namespace variable to be used in test case namespacePrefix = "storage-expansion-false" // Extracting bool value of AllowVolumeExpansion - allowExpansion, err := utils.GetStorageAllowExpansion(defaultStorageClass, env) + allowExpansion, err := storage.GetStorageAllowExpansion( + env.Ctx, env.Client, + defaultStorageClass, + ) Expect(err).ToNot(HaveOccurred()) if (allowExpansion != nil) && (*allowExpansion == true) { Skip(fmt.Sprintf("AllowedVolumeExpansion is true on %v", defaultStorageClass)) @@ -85,14 +92,14 @@ var _ = Describe("Verify storage", Label(tests.LabelStorage), func() { It("expands PVCs via offline resize", func() { var err error // Creating namespace - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) By("update cluster for resizeInUseVolumes as false", func() { // Updating cluster with 'resizeInUseVolumes' sets to 'false' in storage. // Check if operator does not return error Eventually(func() error { - _, _, err = utils.RunUnchecked("kubectl patch cluster " + clusterName + " -n " + namespace + + _, _, err = run.Unchecked("kubectl patch cluster " + clusterName + " -n " + namespace + " -p '{\"spec\":{\"storage\":{\"resizeInUseVolumes\":false}}}' --type=merge") if err != nil { return err diff --git a/tests/e2e/suite_test.go b/tests/e2e/suite_test.go index fa637fffce..46dbcf1491 100644 --- a/tests/e2e/suite_test.go +++ b/tests/e2e/suite_test.go @@ -35,9 +35,14 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" cnpgUtils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/cloudvendors" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/environment" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/namespaces" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/operator" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/sternmultitailer" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -45,19 +50,19 @@ import ( const ( fixturesDir = "./fixtures" - RetryTimeout = utils.RetryTimeout - PollingTime = utils.PollingTime + RetryTimeout = environment.RetryTimeout + PollingTime = objects.PollingTime ) var ( - env *utils.TestingEnvironment + env *environment.TestingEnvironment testLevelEnv *tests.TestEnvLevel - testCloudVendorEnv *utils.TestEnvVendor + testCloudVendorEnv *cloudvendors.TestEnvVendor expectedOperatorPodName string operatorPodWasRenamed bool operatorWasRestarted bool quickDeletionPeriod = int64(1) - testTimeouts map[utils.Timeout]int + testTimeouts map[timeouts.Timeout]int minioEnv = &minio.Env{ Namespace: "minio", ServiceName: "minio-service.minio", @@ -68,21 +73,21 @@ var ( var _ = SynchronizedBeforeSuite(func() []byte { var err error - env, err = utils.NewTestingEnvironment() + env, err = environment.NewTestingEnvironment() Expect(err).ShouldNot(HaveOccurred()) // Start stern to write the logs of every pod we are interested in. Since we don't have a way to have a selector // matching both the operator's and the clusters' pods, we need to start stern twice. sternClustersCtx, sternClusterCancel := context.WithCancel(env.Ctx) sternClusterDoneChan := sternmultitailer.StreamLogs(sternClustersCtx, env.Interface, clusterPodsLabelSelector(), - env.SternLogDir) + namespaces.SternLogDirectory) DeferCleanup(func() { sternClusterCancel() <-sternClusterDoneChan }) sternOperatorCtx, sternOperatorCancel := context.WithCancel(env.Ctx) sternOperatorDoneChan := sternmultitailer.StreamLogs(sternOperatorCtx, env.Interface, operatorPodsLabelSelector(), - env.SternLogDir) + namespaces.SternLogDirectory) DeferCleanup(func() { sternOperatorCancel() <-sternOperatorDoneChan @@ -92,13 +97,13 @@ var _ = SynchronizedBeforeSuite(func() []byte { _ = appsv1.AddToScheme(env.Scheme) // Set up a global MinIO service on his own namespace - err = env.CreateNamespace(minioEnv.Namespace) + err = namespaces.CreateNamespace(env.Ctx, env.Client, minioEnv.Namespace) Expect(err).ToNot(HaveOccurred()) DeferCleanup(func() { - err := env.DeleteNamespaceAndWait(minioEnv.Namespace, 300) + err := namespaces.DeleteNamespaceAndWait(env.Ctx, env.Client, minioEnv.Namespace, 300) Expect(err).ToNot(HaveOccurred()) }) - minioEnv.Timeout = uint(testTimeouts[utils.MinioInstallation]) + minioEnv.Timeout = uint(testTimeouts[timeouts.MinioInstallation]) minioClient, err := minio.Deploy(minioEnv, env) Expect(err).ToNot(HaveOccurred()) @@ -118,7 +123,7 @@ var _ = SynchronizedBeforeSuite(func() []byte { var err error // We are creating new testing env object again because above testing env can not serialize and // accessible to all nodes (specs) - if env, err = utils.NewTestingEnvironment(); err != nil { + if env, err = environment.NewTestingEnvironment(); err != nil { panic(err) } @@ -129,11 +134,11 @@ var _ = SynchronizedBeforeSuite(func() []byte { panic(err) } - if testTimeouts, err = utils.Timeouts(); err != nil { + if testTimeouts, err = timeouts.Timeouts(); err != nil { panic(err) } - if testCloudVendorEnv, err = utils.TestCloudVendor(); err != nil { + if testCloudVendorEnv, err = cloudvendors.TestCloudVendor(); err != nil { panic(err) } @@ -149,7 +154,7 @@ var _ = ReportAfterSuite("Gathering failed reports", func(report Report) { // Keep the logs of the operator and the clusters in case of failure // If everything is skipped, env has not been initialized, and we'll have nothing to clean up if report.SuiteSucceeded && env != nil { - err := fileutils.RemoveDirectory(env.SternLogDir) + err := fileutils.RemoveDirectory(namespaces.SternLogDirectory) Expect(err).ToNot(HaveOccurred()) } }) @@ -163,7 +168,7 @@ var _ = BeforeEach(func() { return } - operatorPod, err := env.GetOperatorPod() + operatorPod, err := operator.GetPod(env.Ctx, env.Client) Expect(err).ToNot(HaveOccurred()) if operatorPodWasRenamed { @@ -196,14 +201,14 @@ var _ = AfterEach(func() { if len(breakingLabelsInCurrentTest.([]string)) != 0 { return } - operatorPod, err := env.GetOperatorPod() + operatorPod, err := operator.GetPod(env.Ctx, env.Client) Expect(err).ToNot(HaveOccurred()) - wasRenamed := utils.OperatorPodRenamed(operatorPod, expectedOperatorPodName) + wasRenamed := operator.PodRenamed(operatorPod, expectedOperatorPodName) if wasRenamed { operatorPodWasRenamed = true Fail("operator was renamed") } - wasRestarted := utils.OperatorPodRestarted(operatorPod) + wasRestarted := operator.PodRestarted(operatorPod) if wasRestarted { operatorWasRestarted = true Fail("operator was restarted") diff --git a/tests/e2e/switchover_test.go b/tests/e2e/switchover_test.go index 4801a8f5ff..dc773c16ed 100644 --- a/tests/e2e/switchover_test.go +++ b/tests/e2e/switchover_test.go @@ -18,6 +18,7 @@ package e2e import ( "github.com/cloudnative-pg/cloudnative-pg/tests" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -41,9 +42,9 @@ var _ = Describe("Switchover", Serial, Label(tests.LabelSelfHealing), func() { // Create a cluster in a namespace we'll delete after the test const namespacePrefix = "switchover-e2e-with-slots" var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - clusterName, err := env.GetResourceNameFromYAML(sampleFileWithReplicationSlots) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFileWithReplicationSlots) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFileWithReplicationSlots, env) @@ -57,9 +58,9 @@ var _ = Describe("Switchover", Serial, Label(tests.LabelSelfHealing), func() { // Create a cluster in a namespace we'll delete after the test const namespacePrefix = "switchover-e2e" var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - clusterName, err := env.GetResourceNameFromYAML(sampleFileWithoutReplicationSlots) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFileWithoutReplicationSlots) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFileWithoutReplicationSlots, env) diff --git a/tests/e2e/syncreplicas_test.go b/tests/e2e/syncreplicas_test.go index fcd321874e..c6bc7dc3ef 100644 --- a/tests/e2e/syncreplicas_test.go +++ b/tests/e2e/syncreplicas_test.go @@ -27,7 +27,11 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/fencing" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -43,11 +47,12 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { getSyncReplicationCount := func(namespace, clusterName, syncState string, expectedCount int) { Eventually(func() (int, error, error) { - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - out, stdErr, err := env.ExecQueryInInstancePod( - utils.PodLocator{ + out, stdErr, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: primaryPod.GetName(), }, @@ -63,11 +68,12 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { compareSynchronousStandbyNames := func(namespace, clusterName, element string) { Eventually(func() string { - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - out, stdErr, err := env.ExecQueryInInstancePod( - utils.PodLocator{ + out, stdErr, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: primaryPod.GetName(), }, @@ -88,11 +94,11 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { namespacePrefix = "legacy-sync-replicas-e2e" sampleFile = fixturesDir + "/sync_replicas/cluster-sync-replica-legacy.yaml.template" ) - clusterName, err := env.GetResourceNameFromYAML(sampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) @@ -104,7 +110,7 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { By("checking that synchronous_standby_names reflects cluster's changes", func() { // Set MaxSyncReplicas to 1 Eventually(func(g Gomega) error { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) cluster.Spec.MaxSyncReplicas = 1 @@ -112,13 +118,13 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { }, RetryTimeout, 5).Should(Succeed()) // Scale the cluster down to 2 pods - _, _, err := utils.Run(fmt.Sprintf("kubectl scale --replicas=2 -n %v cluster/%v", namespace, + _, _, err := run.Run(fmt.Sprintf("kubectl scale --replicas=2 -n %v cluster/%v", namespace, clusterName)) Expect(err).ToNot(HaveOccurred()) timeout := 120 // Wait for pod 3 to be completely terminated Eventually(func() (int, error) { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) return len(podList.Items), err }, timeout).Should(BeEquivalentTo(2)) @@ -127,14 +133,14 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { compareSynchronousStandbyNames(namespace, clusterName, "ANY 1") }) By("failing when SyncReplicas fields are invalid", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) // Expect an error. MaxSyncReplicas must be lower than the number of instances cluster.Spec.MaxSyncReplicas = 2 err = env.Client.Update(env.Ctx, cluster) Expect(err).To(HaveOccurred()) - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) // Expect an error. MinSyncReplicas must be lower than MaxSyncReplicas cluster.Spec.MinSyncReplicas = 2 @@ -148,7 +154,7 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { namespacePrefix = "sync-replicas-statstatements" sampleFile = fixturesDir + "/sync_replicas/cluster-pgstatstatements.yaml.template" ) - clusterName, err := env.GetResourceNameFromYAML(sampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) // Are extensions a problem with synchronous replication? No, absolutely not, @@ -159,7 +165,7 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { // bootstrapping the cluster, the CREATE EXTENSION instruction will block // the primary since the desired number of synchronous replicas (even when 1) // is not met. - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) @@ -180,11 +186,11 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { namespacePrefix = "sync-replicas-e2e" sampleFile = fixturesDir + "/sync_replicas/cluster-sync-replica.yaml.template" ) - clusterName, err := env.GetResourceNameFromYAML(sampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) @@ -195,7 +201,7 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { By("setting MaxStandbyNamesFromCluster to 1 and decreasing to 1 the sync replicas required", func() { Eventually(func(g Gomega) error { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) cluster.Spec.PostgresConfiguration.Synchronous.MaxStandbyNamesFromCluster = ptr.To(1) cluster.Spec.PostgresConfiguration.Synchronous.Number = 1 @@ -208,7 +214,7 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { By("switching to MethodFirst (priority-based)", func() { Eventually(func(g Gomega) error { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) cluster.Spec.PostgresConfiguration.Synchronous.Method = apiv1.SynchronousReplicaConfigurationMethodFirst return env.Client.Update(env.Ctx, cluster) @@ -220,7 +226,7 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { By("by properly setting standbyNamesPre and standbyNamesPost", func() { Eventually(func(g Gomega) error { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) cluster.Spec.PostgresConfiguration.Synchronous.MaxStandbyNamesFromCluster = nil cluster.Spec.PostgresConfiguration.Synchronous.StandbyNamesPre = []string{"preSyncReplica"} @@ -238,10 +244,10 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { namespacePrefix = "sync-replicas-preferred" sampleFile = fixturesDir + "/sync_replicas/preferred.yaml.template" ) - clusterName, err := env.GetResourceNameFromYAML(sampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) @@ -251,31 +257,33 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { }) By("fencing a replica and verifying we have only 1 quorum-based replica", func() { - Expect(utils.FencingOn(env, fmt.Sprintf("%v-3", clusterName), - namespace, clusterName, utils.UsingAnnotation)).Should(Succeed()) + Expect(fencing.On(env.Ctx, env.Client, fmt.Sprintf("%v-3", clusterName), + namespace, clusterName, fencing.UsingAnnotation)).Should(Succeed()) getSyncReplicationCount(namespace, clusterName, "quorum", 1) compareSynchronousStandbyNames(namespace, clusterName, "ANY 1") }) By("fencing the second replica and verifying we unset synchronous_standby_names", func() { - Expect(utils.FencingOn(env, fmt.Sprintf("%v-2", clusterName), - namespace, clusterName, utils.UsingAnnotation)).Should(Succeed()) + Expect(fencing.On(env.Ctx, env.Client, fmt.Sprintf("%v-2", clusterName), + namespace, clusterName, fencing.UsingAnnotation)).Should(Succeed()) Eventually(func() string { commandTimeout := time.Second * 10 - primary, err := env.GetClusterPrimary(namespace, clusterName) + primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - stdout, _, err := env.ExecCommand(env.Ctx, *primary, specs.PostgresContainerName, - &commandTimeout, - "psql", "-U", "postgres", "-tAc", "show synchronous_standby_names") + stdout, _, err := exec.Command( + env.Ctx, env.Interface, env.RestClientConfig, + *primary, specs.PostgresContainerName, &commandTimeout, + "psql", "-U", "postgres", "-tAc", "show synchronous_standby_names", + ) Expect(err).ToNot(HaveOccurred()) return strings.Trim(stdout, "\n") }, 160).Should(BeEmpty()) }) By("unfenicing the replicas and verifying we have 2 quorum-based replicas", func() { - Expect(utils.FencingOff(env, fmt.Sprintf("%v-3", clusterName), - namespace, clusterName, utils.UsingAnnotation)).Should(Succeed()) - Expect(utils.FencingOff(env, fmt.Sprintf("%v-2", clusterName), - namespace, clusterName, utils.UsingAnnotation)).Should(Succeed()) + Expect(fencing.Off(env.Ctx, env.Client, fmt.Sprintf("%v-3", clusterName), + namespace, clusterName, fencing.UsingAnnotation)).Should(Succeed()) + Expect(fencing.Off(env.Ctx, env.Client, fmt.Sprintf("%v-2", clusterName), + namespace, clusterName, fencing.UsingAnnotation)).Should(Succeed()) getSyncReplicationCount(namespace, clusterName, "quorum", 2) compareSynchronousStandbyNames(namespace, clusterName, "ANY 2") }) diff --git a/tests/e2e/tablespaces_test.go b/tests/e2e/tablespaces_test.go index afbaa42c13..53c1dc3e62 100644 --- a/tests/e2e/tablespaces_test.go +++ b/tests/e2e/tablespaces_test.go @@ -17,6 +17,7 @@ limitations under the License. package e2e import ( + "context" "fmt" "os" "path" @@ -32,11 +33,21 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/hibernation" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" - testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/backups" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/fencing" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/secrets" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/storage" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -68,13 +79,13 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, clusterSetup := func(namespace, clusterManifest string) { var err error - clusterName, err = env.GetResourceNameFromYAML(clusterManifest) + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterManifest) Expect(err).ToNot(HaveOccurred()) By("creating a cluster and having it be ready", func() { AssertCreateCluster(namespace, clusterName, clusterManifest, env) }) - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) } @@ -90,17 +101,18 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, ) BeforeAll(func() { // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) // We create the MinIO credentials required to login into the system By("creating the credentials for minio", func() { - _, err = testUtils.CreateObjectStorageSecret( + _, err = secrets.CreateObjectStorageSecret( + env.Ctx, + env.Client, namespace, "backup-storage-creds", "minio", "minio123", - env, ) Expect(err).ToNot(HaveOccurred()) }) @@ -114,28 +126,28 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, }) It("can verify tablespaces and PVC were created", func() { - AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 2, testTimeouts[testUtils.Short]) - AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[testUtils.Short]) - AssertDatabaseContainsTablespaces(cluster, testTimeouts[testUtils.Short]) - AssertRoleReconciled(namespace, clusterName, "dante", testTimeouts[testUtils.Short]) - AssertRoleReconciled(namespace, clusterName, "alpha", testTimeouts[testUtils.Short]) + AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 2, testTimeouts[timeouts.Short]) + AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[timeouts.Short]) + AssertDatabaseContainsTablespaces(cluster, testTimeouts[timeouts.Short]) + AssertRoleReconciled(namespace, clusterName, "dante", testTimeouts[timeouts.Short]) + AssertRoleReconciled(namespace, clusterName, "alpha", testTimeouts[timeouts.Short]) AssertTablespaceAndOwnerExist(cluster, "atablespace", "app") AssertTablespaceAndOwnerExist(cluster, "anothertablespace", "dante") }) It("can update the cluster by change the owner of tablesapce", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) updateTablespaceOwner(cluster, "anothertablespace", "alpha") - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - AssertTablespaceReconciled(namespace, clusterName, "anothertablespace", testTimeouts[testUtils.Short]) + AssertTablespaceReconciled(namespace, clusterName, "anothertablespace", testTimeouts[timeouts.Short]) AssertTablespaceAndOwnerExist(cluster, "anothertablespace", "alpha") }) It("can update the cluster to set a tablespace as temporary", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) By("setting the first tablespace as temporary", func() { @@ -158,13 +170,16 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, }) It("can create the backup and verify content in the object store", func() { - backupName, err = env.GetResourceNameFromYAML(clusterBackupManifest) + backupName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterBackupManifest) Expect(err).ToNot(HaveOccurred()) By(fmt.Sprintf("creating backup %s and verifying backup is ready", backupName), func() { - testUtils.ExecuteBackup(namespace, clusterBackupManifest, false, testTimeouts[testUtils.BackupIsReady], - env) - testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName) + backups.Execute( + env.Ctx, env.Client, env.Scheme, + namespace, clusterBackupManifest, false, + testTimeouts[timeouts.BackupIsReady], + ) + backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName) }) By("verifying the number of tars in minio", func() { @@ -173,32 +188,18 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, By("verifying backup status", func() { Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) if err != nil { return "", err } return cluster.Status.FirstRecoverabilityPoint, err }, 30).ShouldNot(BeEmpty()) - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) - if err != nil { - return "", err - } - return cluster.Status.LastSuccessfulBackup, err - }, 30).ShouldNot(BeEmpty()) - Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) - if err != nil { - return "", err - } - return cluster.Status.LastFailedBackup, err - }, 30).Should(BeEmpty()) }) }) It("can update the cluster adding a new tablespace and backup again", func() { By("adding a new tablespace to the cluster", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) addTablespaces(cluster, []apiv1.TablespaceConfiguration{ @@ -214,26 +215,26 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, }, }) - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Expect(cluster.ContainsTablespaces()).To(BeTrue()) }) By("verifying there are 3 tablespaces and PVCs were created", func() { - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Expect(cluster.Spec.Tablespaces).To(HaveLen(3)) - AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 3, testTimeouts[testUtils.PodRollout]) - AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[testUtils.PodRollout]) - AssertDatabaseContainsTablespaces(cluster, testTimeouts[testUtils.PodRollout]) + AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 3, testTimeouts[timeouts.PodRollout]) + AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[timeouts.PodRollout]) + AssertDatabaseContainsTablespaces(cluster, testTimeouts[timeouts.PodRollout]) AssertTablespaceAndOwnerExist(cluster, "atablespace", "app") AssertTablespaceAndOwnerExist(cluster, "anothertablespace", "alpha") AssertTablespaceAndOwnerExist(cluster, "thirdtablespace", "dante") }) By("waiting for the cluster to be ready", func() { - AssertClusterIsReady(namespace, clusterName, testTimeouts[testUtils.ClusterIsReady], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env) }) By("verifying expected number of PVCs for tablespaces", func() { @@ -242,14 +243,15 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, }) By("creating a new backup and verifying backup is ready", func() { - backupCondition, err := testUtils.GetConditionsInClusterStatus( + backupCondition, err := backups.GetConditionsInClusterStatus( + env.Ctx, + env.Client, namespace, clusterName, - env, apiv1.ConditionBackup, ) Expect(err).ShouldNot(HaveOccurred()) - _, stderr, err := testUtils.Run( + _, stderr, err := run.Run( fmt.Sprintf("kubectl cnpg backup %s -n %s --backup-name %s", clusterName, namespace, fullBackupName)) Expect(stderr).To(BeEmpty()) @@ -262,10 +264,10 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, ) // TODO: this is to force a CHECKPOINT when we run the backup on standby. - // This should be better handled inside ExecuteBackup + // This should be better handled inside Execute AssertArchiveWalOnMinio(namespace, clusterName, clusterName) - testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName) + backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName) }) By("verifying the number of tars in the latest base backup", func() { @@ -278,21 +280,21 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, By("verifying backup status", func() { Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) if err != nil { return "", err } return cluster.Status.FirstRecoverabilityPoint, err }, 30).ShouldNot(BeEmpty()) Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) if err != nil { return "", err } return cluster.Status.LastSuccessfulBackup, err }, 30).ShouldNot(BeEmpty()) Eventually(func() (string, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) if err != nil { return "", err } @@ -309,23 +311,23 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, const clusterRestoreFromBarmanManifest string = fixturesDir + "/tablespaces/restore-cluster-from-barman.yaml.template" - restoredClusterName, err := env.GetResourceNameFromYAML(clusterRestoreFromBarmanManifest) + restoredClusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterRestoreFromBarmanManifest) Expect(err).ToNot(HaveOccurred()) By("creating the cluster to be restored through snapshot", func() { CreateResourceFromFile(namespace, clusterRestoreFromBarmanManifest) // A delay of 5 min when restoring with tablespaces is normal, let's give extra time - AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[testUtils.ClusterIsReadySlow], + AssertClusterIsReady(namespace, restoredClusterName, testTimeouts[timeouts.ClusterIsReadySlow], env) }) By("verifying that tablespaces and PVC were created", func() { - restoredCluster, err := env.GetCluster(namespace, restoredClusterName) + restoredCluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, restoredClusterName) Expect(err).ToNot(HaveOccurred()) AssertClusterHasMountPointsAndVolumesForTablespaces(restoredCluster, 3, - testTimeouts[testUtils.Short]) - AssertClusterHasPvcsAndDataDirsForTablespaces(restoredCluster, testTimeouts[testUtils.Short]) - AssertDatabaseContainsTablespaces(restoredCluster, testTimeouts[testUtils.Short]) + testTimeouts[timeouts.Short]) + AssertClusterHasPvcsAndDataDirsForTablespaces(restoredCluster, testTimeouts[timeouts.Short]) + AssertDatabaseContainsTablespaces(restoredCluster, testTimeouts[timeouts.Short]) AssertTablespaceAndOwnerExist(cluster, "atablespace", "app") AssertTablespaceAndOwnerExist(cluster, "anothertablespace", "alpha") AssertTablespaceAndOwnerExist(cluster, "thirdtablespace", "dante") @@ -359,17 +361,18 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, BeforeAll(func() { // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) // We create the required credentials for MinIO By("creating the credentials for minio", func() { - _, err = testUtils.CreateObjectStorageSecret( + _, err = secrets.CreateObjectStorageSecret( + env.Ctx, + env.Client, namespace, "backup-storage-creds", "minio", "minio123", - env, ) Expect(err).ToNot(HaveOccurred()) }) @@ -383,30 +386,32 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, }) It("can verify tablespaces and PVC were created", func() { - AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 2, testTimeouts[testUtils.Short]) - AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[testUtils.Short]) - AssertDatabaseContainsTablespaces(cluster, testTimeouts[testUtils.Short]) + AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 2, testTimeouts[timeouts.Short]) + AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[timeouts.Short]) + AssertDatabaseContainsTablespaces(cluster, testTimeouts[timeouts.Short]) }) It("can create the volume snapshot backup declaratively and verify the backup", func() { - backupName, err = env.GetResourceNameFromYAML(clusterVolumesnapshoBackupManifest) + backupName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterVolumesnapshoBackupManifest) Expect(err).ToNot(HaveOccurred()) By(fmt.Sprintf("creating backup %s and verifying backup is ready", backupName), func() { - backupObject = testUtils.ExecuteBackup( + backupObject = backups.Execute( + env.Ctx, + env.Client, + env.Scheme, namespace, clusterVolumesnapshoBackupManifest, false, - testTimeouts[testUtils.VolumeSnapshotIsReady], - env, + testTimeouts[timeouts.VolumeSnapshotIsReady], ) - testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterName) + backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName) }) By("checking that volumeSnapshots are properly labeled", func() { Eventually(func(g Gomega) { for _, snapshot := range backupObject.Status.BackupSnapshotStatus.Elements { - volumeSnapshot, err := env.GetVolumeSnapshot(namespace, snapshot.Name) + volumeSnapshot, err := backups.GetVolumeSnapshot(env.Ctx, env.Client, namespace, snapshot.Name) g.Expect(err).ToNot(HaveOccurred()) g.Expect(volumeSnapshot.Name).Should(ContainSubstring(clusterName)) g.Expect(volumeSnapshot.Labels[utils.BackupNameLabelName]).To(BeEquivalentTo(backupObject.Name)) @@ -423,7 +428,7 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, tl1 := TableLocator{ Namespace: namespace, ClusterName: clusterName, - DatabaseName: testUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: table1, Tablespace: tablespace1, } @@ -431,20 +436,21 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, tl2 := TableLocator{ Namespace: namespace, ClusterName: clusterName, - DatabaseName: testUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: table2, Tablespace: tablespace2, } AssertCreateTestData(env, tl2) - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) // Execute a checkpoint - _, _, err = env.EventuallyExecQueryInInstancePod( - testUtils.PodLocator{ + _, _, err = exec.EventuallyExecQueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: primaryPod.Namespace, PodName: primaryPod.Name, - }, testUtils.PostgresDBName, + }, postgres.PostgresDBName, "CHECKPOINT", RetryTimeout, PollingTime, @@ -454,7 +460,7 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, backupName = clusterName + pgTime.GetCurrentTimestampWithFormat("20060102150405") By("creating a volumeSnapshot and waiting until it's completed", func() { - err := testUtils.CreateOnDemandBackupViaKubectlPlugin( + err := backups.CreateOnDemandBackupViaKubectlPlugin( namespace, clusterName, backupName, @@ -468,7 +474,7 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, AssertArchiveWalOnMinio(namespace, clusterName, clusterName) Eventually(func(g Gomega) { - backupList, err := env.GetBackupList(namespace) + backupList, err := backups.List(env.Ctx, env.Client, namespace) g.Expect(err).ToNot(HaveOccurred()) for _, backup := range backupList.Items { if backup.Name != backupName { @@ -480,13 +486,13 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, backup.Status.Error) g.Expect(backup.Status.BackupSnapshotStatus.Elements).To(HaveLen(4)) } - }, testTimeouts[testUtils.VolumeSnapshotIsReady]).Should(Succeed()) + }, testTimeouts[timeouts.VolumeSnapshotIsReady]).Should(Succeed()) }) By("checking that volumeSnapshots are properly labeled", func() { Eventually(func(g Gomega) { for _, snapshot := range backupObject.Status.BackupSnapshotStatus.Elements { - volumeSnapshot, err := env.GetVolumeSnapshot(namespace, snapshot.Name) + volumeSnapshot, err := backups.GetVolumeSnapshot(env.Ctx, env.Client, namespace, snapshot.Name) g.Expect(err).ToNot(HaveOccurred()) g.Expect(volumeSnapshot.Name).Should(ContainSubstring(clusterName)) g.Expect(volumeSnapshot.Labels[utils.BackupNameLabelName]).To(BeEquivalentTo(backupObject.Name)) @@ -502,36 +508,38 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, err = os.Setenv("BACKUP_NAME", backupName) Expect(err).ToNot(HaveOccurred()) - clusterToRestoreName, err := env.GetResourceNameFromYAML(clusterVolumesnapshoRestoreManifest) + clusterToRestoreName, err := yaml.GetResourceNameFromYAML(env.Scheme, + clusterVolumesnapshoRestoreManifest) Expect(err).ToNot(HaveOccurred()) By("creating the cluster to be restored through snapshot", func() { CreateResourceFromFile(namespace, clusterVolumesnapshoRestoreManifest) - AssertClusterIsReady(namespace, clusterToRestoreName, testTimeouts[testUtils.ClusterIsReadySlow], + AssertClusterIsReady(namespace, clusterToRestoreName, testTimeouts[timeouts.ClusterIsReadySlow], env) }) By("verifying that tablespaces and PVC were created", func() { - restoredCluster, err := env.GetCluster(namespace, clusterToRestoreName) + restoredCluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, + clusterToRestoreName) Expect(err).ToNot(HaveOccurred()) AssertClusterHasMountPointsAndVolumesForTablespaces(restoredCluster, 2, - testTimeouts[testUtils.Short]) - AssertClusterHasPvcsAndDataDirsForTablespaces(restoredCluster, testTimeouts[testUtils.Short]) - AssertDatabaseContainsTablespaces(restoredCluster, testTimeouts[testUtils.Short]) + testTimeouts[timeouts.Short]) + AssertClusterHasPvcsAndDataDirsForTablespaces(restoredCluster, testTimeouts[timeouts.Short]) + AssertDatabaseContainsTablespaces(restoredCluster, testTimeouts[timeouts.Short]) }) By("verifying the correct data exists in the restored cluster", func() { tableLocator := TableLocator{ Namespace: namespace, ClusterName: clusterToRestoreName, - DatabaseName: testUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: table1, } AssertDataExpectedCount(env, tableLocator, 2) tableLocator = TableLocator{ Namespace: namespace, ClusterName: clusterToRestoreName, - DatabaseName: testUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: table2, } AssertDataExpectedCount(env, tableLocator, 2) @@ -541,11 +549,14 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, It(fmt.Sprintf("can create the cluster by recovery from volume snapshot backup with pitr %v", backupName), func() { By("inserting test data and creating WALs on the cluster to be snapshotted", func() { - forward, conn, err := testUtils.ForwardPSQLConnection( - env, + forward, conn, err := postgres.ForwardPSQLConnection( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, namespace, clusterName, - testUtils.AppDBName, + postgres.AppDBName, apiv1.ApplicationUserSecretSuffix, ) defer func() { @@ -566,7 +577,10 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, // including the newly created data within the recovery_target_time time.Sleep(1 * time.Second) // Get the recovery_target_time and pass it to the template engine - recoveryTargetTime, err := testUtils.GetCurrentTimestamp(namespace, clusterName, env) + recoveryTargetTime, err := postgres.GetCurrentTimestamp( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + namespace, clusterName, + ) Expect(err).ToNot(HaveOccurred()) err = os.Setenv(recoveryTargetTimeEnv, recoveryTargetTime) Expect(err).ToNot(HaveOccurred()) @@ -586,45 +600,45 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, Expect(err).ToNot(HaveOccurred()) Expect(snapshotList.Items).To(HaveLen(len(backupObject.Status.BackupSnapshotStatus.Elements))) - envVars := testUtils.EnvVarsForSnapshots{ + envVars := storage.EnvVarsForSnapshots{ DataSnapshot: snapshotDataEnv, WalSnapshot: snapshotWalEnv, TablespaceSnapshotPrefix: snapshotTbsEnv, } - err = testUtils.SetSnapshotNameAsEnv(&snapshotList, backupObject, envVars) + err = storage.SetSnapshotNameAsEnv(&snapshotList, backupObject, envVars) Expect(err).ToNot(HaveOccurred()) }) - clusterToPITRName, err := env.GetResourceNameFromYAML(clusterVolumesnapshoPITRManifest) + clusterToPITRName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterVolumesnapshoPITRManifest) Expect(err).ToNot(HaveOccurred()) By("creating the cluster to be restored through snapshot", func() { CreateResourceFromFile(namespace, clusterVolumesnapshoPITRManifest) - AssertClusterIsReady(namespace, clusterToPITRName, testTimeouts[testUtils.ClusterIsReadySlow], + AssertClusterIsReady(namespace, clusterToPITRName, testTimeouts[timeouts.ClusterIsReadySlow], env) }) By("can verify tablespaces and PVC were created", func() { - recoveryCluster, err := env.GetCluster(namespace, clusterToPITRName) + recoveryCluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterToPITRName) Expect(err).ToNot(HaveOccurred()) AssertClusterHasMountPointsAndVolumesForTablespaces(recoveryCluster, 2, - testTimeouts[testUtils.Short]) - AssertClusterHasPvcsAndDataDirsForTablespaces(recoveryCluster, testTimeouts[testUtils.Short]) - AssertDatabaseContainsTablespaces(recoveryCluster, testTimeouts[testUtils.Short]) + testTimeouts[timeouts.Short]) + AssertClusterHasPvcsAndDataDirsForTablespaces(recoveryCluster, testTimeouts[timeouts.Short]) + AssertDatabaseContainsTablespaces(recoveryCluster, testTimeouts[timeouts.Short]) }) By("verifying the correct data exists in the restored cluster", func() { tableLocator := TableLocator{ Namespace: namespace, ClusterName: clusterToPITRName, - DatabaseName: testUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: table1, } AssertDataExpectedCount(env, tableLocator, 4) tableLocator = TableLocator{ Namespace: namespace, ClusterName: clusterToPITRName, - DatabaseName: testUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: table2, } AssertDataExpectedCount(env, tableLocator, 4) @@ -638,14 +652,14 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, BeforeAll(func() { var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) clusterSetup(namespace, clusterManifest) }) It("can update cluster by adding tablespaces", func() { By("adding tablespaces to the spec and patching", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Expect(cluster.ContainsTablespaces()).To(BeFalse()) @@ -664,30 +678,30 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, }, }) - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Expect(cluster.ContainsTablespaces()).To(BeTrue()) }) By("verify tablespaces and PVC were created", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Expect(cluster.ContainsTablespaces()).To(BeTrue()) - AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 2, testTimeouts[testUtils.PodRollout]) - AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[testUtils.PodRollout]) - AssertDatabaseContainsTablespaces(cluster, testTimeouts[testUtils.PodRollout]) + AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 2, testTimeouts[timeouts.PodRollout]) + AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[timeouts.PodRollout]) + AssertDatabaseContainsTablespaces(cluster, testTimeouts[timeouts.PodRollout]) }) By("waiting for the cluster to be ready again", func() { - AssertClusterIsReady(namespace, clusterName, testTimeouts[testUtils.ClusterIsReady], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env) }) }) It("can hibernate via plugin a cluster with tablespaces", func() { - assertCanHibernateClusterWithTablespaces(namespace, clusterName, testUtils.HibernateImperatively, 2) + assertCanHibernateClusterWithTablespaces(namespace, clusterName, hibernateImperatively, 2) }) It("can hibernate via annotation a cluster with tablespaces", func() { - assertCanHibernateClusterWithTablespaces(namespace, clusterName, testUtils.HibernateDeclaratively, 6) + assertCanHibernateClusterWithTablespaces(namespace, clusterName, hibernateDeclaratively, 6) }) It("can fence a cluster with tablespaces using the plugin", func() { @@ -696,13 +710,13 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, }) By("fencing the cluster", func() { - err := testUtils.FencingOn(env, "*", namespace, clusterName, testUtils.UsingPlugin) + err := fencing.On(env.Ctx, env.Client, "*", namespace, clusterName, fencing.UsingPlugin) Expect(err).ToNot(HaveOccurred()) }) By("check all instances become not ready", func() { Eventually(func() (bool, error) { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) if err != nil { return false, err } @@ -721,13 +735,13 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, }) By("un-fencing the cluster", func() { - err := testUtils.FencingOff(env, "*", namespace, clusterName, testUtils.UsingPlugin) + err := fencing.Off(env.Ctx, env.Client, "*", namespace, clusterName, fencing.UsingPlugin) Expect(err).ToNot(HaveOccurred()) }) By("all instances become ready", func() { Eventually(func() (bool, error) { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) if err != nil { return false, err } @@ -746,14 +760,14 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, }) By("verify tablespaces and PVC are there", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Expect(cluster.ContainsTablespaces()).To(BeTrue()) - AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 2, testTimeouts[testUtils.PodRollout]) - AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[testUtils.PodRollout]) - AssertDatabaseContainsTablespaces(cluster, testTimeouts[testUtils.PodRollout]) - AssertClusterIsReady(namespace, clusterName, testTimeouts[testUtils.ClusterIsReady], env) + AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 2, testTimeouts[timeouts.PodRollout]) + AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[timeouts.PodRollout]) + AssertDatabaseContainsTablespaces(cluster, testTimeouts[timeouts.PodRollout]) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env) }) By("verifying all PVCs for tablespaces are recreated", func() { @@ -768,14 +782,14 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, BeforeAll(func() { var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) clusterSetup(namespace, clusterManifest) }) It("can update cluster adding tablespaces", func() { By("patch cluster with primaryUpdateMethod=switchover", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Expect(cluster.ContainsTablespaces()).To(BeFalse()) @@ -785,10 +799,10 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, Expect(err).ToNot(HaveOccurred()) }) By("waiting for the cluster to be ready", func() { - AssertClusterIsReady(namespace, clusterName, testTimeouts[testUtils.ClusterIsReady], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env) }) By("adding tablespaces to the spec and patching", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Expect(cluster.ContainsTablespaces()).To(BeFalse()) @@ -810,21 +824,21 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, err = env.Client.Patch(env.Ctx, updated, client.MergeFrom(cluster)) Expect(err).ToNot(HaveOccurred()) - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Expect(cluster.ContainsTablespaces()).To(BeTrue()) }) }) It("can verify tablespaces and PVC were created", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Expect(cluster.ContainsTablespaces()).To(BeTrue()) - AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 2, testTimeouts[testUtils.PodRollout]) - AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[testUtils.PodRollout]) - AssertDatabaseContainsTablespaces(cluster, testTimeouts[testUtils.PodRollout]) - AssertClusterIsReady(namespace, clusterName, testTimeouts[testUtils.ClusterIsReady], env) + AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 2, testTimeouts[timeouts.PodRollout]) + AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[timeouts.PodRollout]) + AssertDatabaseContainsTablespaces(cluster, testTimeouts[timeouts.PodRollout]) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env) }) }) }) @@ -855,7 +869,7 @@ func AssertTablespaceReconciled( ) { By(fmt.Sprintf("checking if tablespace %v is in reconciled status", tablespaceName), func() { Eventually(func(g Gomega) bool { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) for _, state := range cluster.Status.TablespacesStatus { if state.State == apiv1.TablespaceStatusReconciled && state.Name == tablespaceName { @@ -874,7 +888,7 @@ func AssertRoleReconciled( ) { By(fmt.Sprintf("checking if role %v is in reconciled status", roleName), func() { Eventually(func(g Gomega) bool { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) for state, names := range cluster.Status.ManagedRolesStatus.ByStatus { if state == apiv1.RoleStatusReconciled { @@ -911,7 +925,7 @@ func AssertClusterHasMountPointsAndVolumesForTablespaces( Eventually(func(g Gomega) { g.Expect(cluster.ContainsTablespaces()).To(BeTrue()) g.Expect(cluster.Spec.Tablespaces).To(HaveLen(numTablespaces)) - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) for _, pod := range podList.Items { g.Expect(pod.Spec.Containers).ToNot(BeEmpty()) @@ -967,7 +981,7 @@ func AssertClusterHasPvcsAndDataDirsForTablespaces(cluster *apiv1.Cluster, timeo clusterName := cluster.ObjectMeta.Name By("checking all the required PVCs were created", func() { Eventually(func(g Gomega) { - pvcList, err := env.GetPVCList(namespace) + pvcList, err := storage.GetPVCList(env.Ctx, env.Client, namespace) g.Expect(err).ShouldNot(HaveOccurred()) var tablespacePvcNames []string for _, pvc := range pvcList.Items { @@ -987,7 +1001,7 @@ func AssertClusterHasPvcsAndDataDirsForTablespaces(cluster *apiv1.Cluster, timeo } } } - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) for _, pod := range podList.Items { for _, tbsConfig := range cluster.Spec.Tablespaces { @@ -999,13 +1013,14 @@ func AssertClusterHasPvcsAndDataDirsForTablespaces(cluster *apiv1.Cluster, timeo By("checking the data directory for the tablespaces is owned by postgres", func() { Eventually(func(g Gomega) { // minio may in the same namespace with cluster pod - pvcList, err := env.GetClusterPodList(namespace, clusterName) + pvcList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ShouldNot(HaveOccurred()) for _, pod := range pvcList.Items { for _, tbsConfig := range cluster.Spec.Tablespaces { dataDir := fmt.Sprintf("/var/lib/postgresql/tablespaces/%s/data", tbsConfig.Name) - owner, stdErr, err := env.ExecCommandInInstancePod( - testUtils.PodLocator{ + owner, stdErr, err := exec.CommandInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: pod.Name, }, nil, @@ -1030,17 +1045,18 @@ func AssertDatabaseContainsTablespaces(cluster *apiv1.Cluster, timeout int) { clusterName := cluster.ObjectMeta.Name By("checking the expected tablespaces are in the database", func() { Eventually(func(g Gomega) { - instances, err := env.GetClusterPodList(namespace, clusterName) + instances, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ShouldNot(HaveOccurred()) var tbsListing string for _, instance := range instances.Items { var stdErr string var err error - tbsListing, stdErr, err = env.ExecQueryInInstancePod( - testUtils.PodLocator{ + tbsListing, stdErr, err = exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: instance.Name, - }, testUtils.AppDBName, + }, postgres.AppDBName, "SELECT oid, spcname, pg_get_userbyid(spcowner) FROM pg_tablespace;", ) g.Expect(stdErr).To(BeEmpty()) @@ -1059,16 +1075,17 @@ func AssertTempTablespaceContent(cluster *apiv1.Cluster, timeout int, content st clusterName := cluster.ObjectMeta.Name By("checking the expected setting in a new PG session", func() { Eventually(func(g Gomega) { - primary, err := env.GetClusterPrimary(namespace, clusterName) + primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) if err != nil { g.Expect(err).ShouldNot(HaveOccurred()) } - settingValue, stdErr, err := env.ExecQueryInInstancePod( - testUtils.PodLocator{ + settingValue, stdErr, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: primary.Name, - }, testUtils.AppDBName, + }, postgres.AppDBName, "SHOW temp_tablespaces", ) g.Expect(stdErr).To(BeEmpty()) @@ -1083,17 +1100,18 @@ func AssertTempTablespaceBehavior(cluster *apiv1.Cluster, expectedTempTablespace namespace := cluster.ObjectMeta.Namespace clusterName := cluster.ObjectMeta.Name - primary, err := env.GetClusterPrimary(namespace, clusterName) + primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) if err != nil { Expect(err).ShouldNot(HaveOccurred()) } By("checking the temporary table is created into the temporary tablespace", func() { - commandOutput, stdErr, err := env.ExecQueryInInstancePod( - testUtils.PodLocator{ + commandOutput, stdErr, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: primary.Name, - }, testUtils.AppDBName, + }, postgres.AppDBName, "CREATE TEMPORARY TABLE cnp_e2e_test_table (i INTEGER); "+ "SELECT spcname FROM pg_tablespace WHERE OID="+ "(SELECT reltablespace FROM pg_class WHERE oid = 'cnp_e2e_test_table'::regclass)", @@ -1109,13 +1127,14 @@ func AssertTempTablespaceBehavior(cluster *apiv1.Cluster, expectedTempTablespace func AssertTablespaceAndOwnerExist(cluster *apiv1.Cluster, tablespace, owner string) { namespace := cluster.ObjectMeta.Namespace clusterName := cluster.ObjectMeta.Name - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ShouldNot(HaveOccurred()) - result, stdErr, err := env.ExecQueryInInstancePod( - testUtils.PodLocator{ + result, stdErr, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: primaryPod.Name, - }, testUtils.AppDBName, + }, postgres.AppDBName, fmt.Sprintf("SELECT 1 FROM pg_tablespace WHERE spcname = '%s' AND pg_get_userbyid(spcowner) = '%s';", tablespace, owner), @@ -1129,7 +1148,7 @@ func AssertTablespaceAndOwnerExist(cluster *apiv1.Cluster, tablespace, owner str func assertCanHibernateClusterWithTablespaces( namespace string, clusterName string, - method testUtils.HibernationMethod, + method hibernationMethod, keptPVCs int, ) { By("verifying expected PVCs for tablespaces before hibernate", func() { @@ -1137,13 +1156,13 @@ func assertCanHibernateClusterWithTablespaces( }) By("hibernate the cluster", func() { - err := testUtils.HibernateOn(env, namespace, clusterName, method) + err := hibernateOn(env.Ctx, env.Client, namespace, clusterName, method) Expect(err).ToNot(HaveOccurred()) }) By(fmt.Sprintf("verifying cluster %v pods are removed", clusterName), func() { Eventually(func(g Gomega) { - podList, _ := env.GetClusterPodList(namespace, clusterName) + podList, _ := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) g.Expect(podList.Items).Should(BeEmpty()) }, 300).Should(Succeed()) }) @@ -1153,22 +1172,22 @@ func assertCanHibernateClusterWithTablespaces( }) By("hibernate off the cluster", func() { - err := testUtils.HibernateOff(env, namespace, clusterName, method) + err := hibernateOff(env.Ctx, env.Client, namespace, clusterName, method) Expect(err).ToNot(HaveOccurred()) }) By("waiting for the cluster to be ready", func() { - AssertClusterIsReady(namespace, clusterName, testTimeouts[testUtils.ClusterIsReady], env) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env) }) By("verify tablespaces and PVC are there", func() { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) Expect(cluster.ContainsTablespaces()).To(BeTrue()) - AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 2, testTimeouts[testUtils.PodRollout]) - AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[testUtils.PodRollout]) - AssertDatabaseContainsTablespaces(cluster, testTimeouts[testUtils.PodRollout]) + AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 2, testTimeouts[timeouts.PodRollout]) + AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[timeouts.PodRollout]) + AssertDatabaseContainsTablespaces(cluster, testTimeouts[timeouts.PodRollout]) }) By("verifying all PVCs for tablespaces are recreated", func() { @@ -1179,7 +1198,7 @@ func assertCanHibernateClusterWithTablespaces( func eventuallyHasExpectedNumberOfPVCs(pvcCount int, namespace string) { By(fmt.Sprintf("checking cluster eventually has %d PVCs for tablespaces", pvcCount)) Eventually(func(g Gomega) { - pvcList, err := env.GetPVCList(namespace) + pvcList, err := storage.GetPVCList(env.Ctx, env.Client, namespace) g.Expect(err).ShouldNot(HaveOccurred()) tbsPvc := 0 for _, pvc := range pvcList.Items { @@ -1190,12 +1209,12 @@ func eventuallyHasExpectedNumberOfPVCs(pvcCount int, namespace string) { tbsPvc++ } g.Expect(tbsPvc).Should(Equal(pvcCount)) - }, testTimeouts[testUtils.ClusterIsReady]).Should(Succeed()) + }, testTimeouts[timeouts.ClusterIsReady]).Should(Succeed()) } func eventuallyHasCompletedBackups(namespace string, numBackups int) { Eventually(func(g Gomega) { - backups, err := env.GetBackupList(namespace) + backups, err := backups.List(env.Ctx, env.Client, namespace) Expect(err).ShouldNot(HaveOccurred()) Expect(backups.Items).To(HaveLen(numBackups)) @@ -1252,3 +1271,78 @@ func getSnapshots( return snapshotList, nil } + +type hibernationMethod string + +const ( + // hibernateDeclaratively it is a keyword to use while fencing on/off the instances using annotation method + hibernateDeclaratively hibernationMethod = "annotation" + // hibernateImperatively it is a keyword to use while fencing on/off the instances using plugin method + hibernateImperatively hibernationMethod = "plugin" +) + +func hibernateOn( + ctx context.Context, + crudClient client.Client, + namespace, + clusterName string, + method hibernationMethod, +) error { + switch method { + case hibernateImperatively: + _, _, err := run.Run(fmt.Sprintf("kubectl cnpg hibernate on %v -n %v", + clusterName, namespace)) + if err != nil { + return err + } + return nil + case hibernateDeclaratively: + cluster, err := clusterutils.Get(ctx, crudClient, namespace, clusterName) + if err != nil { + return err + } + if cluster.Annotations == nil { + cluster.Annotations = make(map[string]string) + } + originCluster := cluster.DeepCopy() + cluster.Annotations[utils.HibernationAnnotationName] = hibernation.HibernationOn + + err = crudClient.Patch(context.Background(), cluster, client.MergeFrom(originCluster)) + return err + default: + return fmt.Errorf("unknown method: %v", method) + } +} + +func hibernateOff( + ctx context.Context, + crudClient client.Client, + namespace, + clusterName string, + method hibernationMethod, +) error { + switch method { + case hibernateImperatively: + _, _, err := run.Run(fmt.Sprintf("kubectl cnpg hibernate off %v -n %v", + clusterName, namespace)) + if err != nil { + return err + } + return nil + case hibernateDeclaratively: + cluster, err := clusterutils.Get(ctx, crudClient, namespace, clusterName) + if err != nil { + return err + } + if cluster.Annotations == nil { + cluster.Annotations = make(map[string]string) + } + originCluster := cluster.DeepCopy() + cluster.Annotations[utils.HibernationAnnotationName] = hibernation.HibernationOff + + err = crudClient.Patch(context.Background(), cluster, client.MergeFrom(originCluster)) + return err + default: + return fmt.Errorf("unknown method: %v", method) + } +} diff --git a/tests/e2e/tolerations_test.go b/tests/e2e/tolerations_test.go index c5862874cf..16f81c6d52 100644 --- a/tests/e2e/tolerations_test.go +++ b/tests/e2e/tolerations_test.go @@ -20,7 +20,8 @@ import ( "fmt" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/nodes" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -48,7 +49,7 @@ var _ = Describe("E2E Tolerations Node", Serial, Label(tests.LabelDisruptive, te AfterEach(func() { for _, node := range taintedNodes { cmd := fmt.Sprintf("kubectl taint node %v %s=test:NoSchedule-", node, tolerationKey) - _, _, err := utils.Run(cmd) + _, _, err := run.Run(cmd) Expect(err).ToNot(HaveOccurred()) } taintedNodes = nil @@ -57,16 +58,16 @@ var _ = Describe("E2E Tolerations Node", Serial, Label(tests.LabelDisruptive, te It("can create a cluster with tolerations", func() { var err error // Initialize empty global namespace variable - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) By("tainting all the nodes", func() { - nodes, _ := env.GetNodeList() + nodes, _ := nodes.List(env.Ctx, env.Client) // We taint all the nodes where we could run the workloads for _, node := range nodes.Items { if (node.Spec.Unschedulable != true) && (len(node.Spec.Taints) == 0) { cmd := fmt.Sprintf("kubectl taint node %v %s=test:NoSchedule", node.Name, tolerationKey) - _, _, err := utils.Run(cmd) + _, _, err := run.Run(cmd) Expect(err).ToNot(HaveOccurred()) taintedNodes = append(taintedNodes, node.Name) } diff --git a/tests/e2e/update_user_test.go b/tests/e2e/update_user_test.go index b3824000c2..44d4784f70 100644 --- a/tests/e2e/update_user_test.go +++ b/tests/e2e/update_user_test.go @@ -27,7 +27,12 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/tests" - testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/secrets" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/services" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -50,25 +55,25 @@ var _ = Describe("Update user and superuser password", Label(tests.LabelServiceC It("can update the user application password", func() { var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - clusterName, err := env.GetResourceNameFromYAML(sampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) - rwService := testsUtils.GetReadWriteServiceName(clusterName) + rwService := services.GetReadWriteServiceName(clusterName) appSecretName := clusterName + apiv1.ApplicationUserSecretSuffix superUserSecretName := clusterName + apiv1.SuperUserSecretSuffix - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) By("update user application password", func() { const newPassword = "eeh2Zahohx" //nolint:gosec AssertUpdateSecret("password", newPassword, appSecretName, namespace, clusterName, 30, env) - AssertConnection(namespace, rwService, testsUtils.AppDBName, testsUtils.AppUser, newPassword, env) + AssertConnection(namespace, rwService, postgres.AppDBName, postgres.AppUser, newPassword, env) }) By("fail updating user application password with wrong user in secret", func() { @@ -79,21 +84,21 @@ var _ = Describe("Update user and superuser password", Label(tests.LabelServiceC AssertUpdateSecret("username", newUser, appSecretName, namespace, clusterName, 30, env) timeout := time.Second * 10 - dsn := testsUtils.CreateDSN(rwService, newUser, testsUtils.AppDBName, newPassword, testsUtils.Require, 5432) + dsn := services.CreateDSN(rwService, newUser, postgres.AppDBName, newPassword, services.Require, 5432) - _, _, err := env.ExecCommand(env.Ctx, *primaryPod, + _, _, err := exec.Command(env.Ctx, env.Interface, env.RestClientConfig, *primaryPod, specs.PostgresContainerName, &timeout, "psql", dsn, "-tAc", "SELECT 1") Expect(err).To(HaveOccurred()) // Revert the username change - AssertUpdateSecret("username", testsUtils.AppUser, appSecretName, namespace, clusterName, 30, env) + AssertUpdateSecret("username", postgres.AppUser, appSecretName, namespace, clusterName, 30, env) }) By("update superuser password", func() { // Setting EnableSuperuserAccess to true Eventually(func() error { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).NotTo(HaveOccurred()) cluster.Spec.EnableSuperuserAccess = ptr.To(true) return env.Client.Update(env.Ctx, cluster) @@ -112,7 +117,7 @@ var _ = Describe("Update user and superuser password", Label(tests.LabelServiceC const newPassword = "fi6uCae7" //nolint:gosec AssertUpdateSecret("password", newPassword, superUserSecretName, namespace, clusterName, 30, env) - AssertConnection(namespace, rwService, testsUtils.PostgresDBName, testsUtils.PostgresUser, newPassword, env) + AssertConnection(namespace, rwService, postgres.PostgresDBName, postgres.PostgresUser, newPassword, env) }) }) }) @@ -134,13 +139,13 @@ var _ = Describe("Enable superuser password", Label(tests.LabelServiceConnectivi It("enable and disable superuser access", func() { var err error // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - clusterName, err := env.GetResourceNameFromYAML(sampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(namespace, clusterName, sampleFile, env) - rwService := testsUtils.GetReadWriteServiceName(clusterName) + rwService := services.GetReadWriteServiceName(clusterName) secretName := clusterName + apiv1.SuperUserSecretSuffix var secret corev1.Secret @@ -149,7 +154,7 @@ var _ = Describe("Enable superuser password", Label(tests.LabelServiceConnectivi Name: secretName, } - primaryPod, err := env.GetClusterPrimary(namespace, clusterName) + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) By("ensure superuser access is disabled by default", func() { @@ -162,12 +167,13 @@ var _ = Describe("Enable superuser password", Label(tests.LabelServiceConnectivi query := "SELECT rolpassword IS NULL FROM pg_authid WHERE rolname='postgres'" // We should have the `postgres` user with a null password Eventually(func() string { - stdout, _, err := env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: primaryPod.Namespace, PodName: primaryPod.Name, }, - testsUtils.PostgresDBName, + postgres.PostgresDBName, query) if err != nil { return "" @@ -179,7 +185,7 @@ var _ = Describe("Enable superuser password", Label(tests.LabelServiceConnectivi By("enable superuser access", func() { // Setting EnableSuperuserAccess to true Eventually(func() error { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).NotTo(HaveOccurred()) cluster.Spec.EnableSuperuserAccess = ptr.To(true) return env.Client.Update(env.Ctx, cluster) @@ -191,16 +197,18 @@ var _ = Describe("Enable superuser password", Label(tests.LabelServiceConnectivi g.Expect(err).ToNot(HaveOccurred()) }, 90).WithPolling(time.Second).Should(Succeed()) - superUser, superUserPass, err := testsUtils.GetCredentials(clusterName, namespace, - apiv1.SuperUserSecretSuffix, env) + superUser, superUserPass, err := secrets.GetCredentials( + env.Ctx, env.Client, + clusterName, namespace, apiv1.SuperUserSecretSuffix, + ) Expect(err).ToNot(HaveOccurred()) - AssertConnection(namespace, rwService, testsUtils.PostgresDBName, superUser, superUserPass, env) + AssertConnection(namespace, rwService, postgres.PostgresDBName, superUser, superUserPass, env) }) By("disable superuser access", func() { // Setting EnableSuperuserAccess to false Eventually(func() error { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).NotTo(HaveOccurred()) cluster.Spec.EnableSuperuserAccess = ptr.To(false) return env.Client.Update(env.Ctx, cluster) diff --git a/tests/e2e/upgrade_test.go b/tests/e2e/upgrade_test.go index f698d29c6f..c96704c38c 100644 --- a/tests/e2e/upgrade_test.go +++ b/tests/e2e/upgrade_test.go @@ -36,8 +36,16 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/tests" - testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/namespaces" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/operator" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/proxy" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/secrets" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -120,14 +128,14 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O // Since the 'cnpg-system' namespace is deleted after each spec is completed, // we should create it and then create the pull image secret - err := env.EnsureNamespace(operatorNamespace) + err := namespaces.EnsureNamespace(env.Ctx, env.Client, operatorNamespace) Expect(err).NotTo(HaveOccurred()) dockerServer := os.Getenv("DOCKER_SERVER") dockerUsername := os.Getenv("DOCKER_USERNAME") dockerPassword := os.Getenv("DOCKER_PASSWORD") if dockerServer != "" && dockerUsername != "" && dockerPassword != "" { - _, _, err := testsUtils.Run(fmt.Sprintf(`kubectl -n %v create secret docker-registry + _, _, err := run.Run(fmt.Sprintf(`kubectl -n %v create secret docker-registry cnpg-pull-secret --docker-server="%v" --docker-username="%v" @@ -170,10 +178,10 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O databaseName := "appdb" By("checking basic functionality performing a configuration upgrade on the cluster", func() { - podList, err := env.GetClusterPodList(upgradeNamespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, upgradeNamespace, clusterName) Expect(err).ToNot(HaveOccurred()) // Gather current primary - cluster, err := env.GetCluster(upgradeNamespace, clusterName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, upgradeNamespace, clusterName) Expect(cluster.Status.CurrentPrimary, err).To(BeEquivalentTo(cluster.Status.TargetPrimary)) oldPrimary := cluster.Status.CurrentPrimary @@ -191,12 +199,13 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O // Check that both parameters have been modified in each pod for _, pod := range podList.Items { Eventually(func() (int, error) { - stdout, stderr, err := env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + stdout, stderr, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: pod.Namespace, PodName: pod.Name, }, - testsUtils.PostgresDBName, + postgres.PostgresDBName, "show max_replication_slots") if err != nil { return 0, err @@ -210,12 +219,13 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O "Pod %v should have updated its config", pod.Name) Eventually(func() (int, error, error) { - stdout, _, err := env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: pod.Namespace, PodName: pod.Name, }, - testsUtils.PostgresDBName, + postgres.PostgresDBName, "show maintenance_work_mem") value, atoiErr := strconv.Atoi(strings.Trim(stdout, "MB\n")) return value, err, atoiErr @@ -224,7 +234,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O } // Check that a switchover happened Eventually(func() (bool, error) { - c, err := env.GetCluster(upgradeNamespace, clusterName) + c, err := clusterutils.Get(env.Ctx, env.Client, upgradeNamespace, clusterName) Expect(err).ToNot(HaveOccurred()) GinkgoWriter.Printf("Current Primary: %s, Current Primary timestamp: %s\n", @@ -243,15 +253,16 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O By("verifying that all the standbys streams from the primary", func() { // To check this we find the primary and create a table on it. // The table should be replicated on the standbys. - primary, err := env.GetClusterPrimary(upgradeNamespace, clusterName) + primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, upgradeNamespace, clusterName) Expect(err).ToNot(HaveOccurred()) query := "CREATE TABLE IF NOT EXISTS postswitch(i int);" - _, _, err = env.EventuallyExecQueryInInstancePod( - testsUtils.PodLocator{ + _, _, err = exec.EventuallyExecQueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: primary.Namespace, PodName: primary.Name, - }, testsUtils.DatabaseName(databaseName), + }, exec.DatabaseName(databaseName), query, RetryTimeout, PollingTime, @@ -270,12 +281,13 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O return "", err } - out, _, err := env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + out, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: pod.Namespace, PodName: pod.Name, }, - testsUtils.DatabaseName(databaseName), + exec.DatabaseName(databaseName), "SELECT count(*) = 0 FROM postswitch") return strings.TrimSpace(out), err }, 240).Should(BeEquivalentTo("t"), @@ -286,12 +298,12 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O // getExecutableHashesFromInstances prints the manager's executable hash of each pod to a given IO writer getExecutableHashesFromInstances := func(upgradeNamespace, clusterName string, w io.Writer) error { - pods, err := env.GetClusterPodList(upgradeNamespace, clusterName) + pods, err := clusterutils.ListPods(env.Ctx, env.Client, upgradeNamespace, clusterName) if err != nil { return err } for _, pod := range pods.Items { - status, err := testsUtils.RetrievePgStatusFromInstance(env, pod, true) + status, err := proxy.RetrievePgStatusFromInstance(env.Ctx, env.Interface, pod, true) if err != nil { continue } @@ -367,7 +379,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O } err := retry.OnError(backoffCheckingPodRestarts, shouldRetry, func() error { var currentUIDs []types.UID - currentPodList, err := env.GetClusterPodList(namespace, clusterName) + currentPodList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) if err != nil { return err } @@ -388,15 +400,20 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O GinkgoWriter.Println("cleaning up") if CurrentSpecReport().Failed() { // Dump the minio namespace when failed - env.DumpNamespaceObjects(minioEnv.Namespace, "out/"+CurrentSpecReport().LeafNodeText+"minio.log") + namespaces.DumpNamespaceObjects( + env.Ctx, env.Client, + minioEnv.Namespace, "out/"+CurrentSpecReport().LeafNodeText+"minio.log", + ) // Dump the operator namespace, as operator is changing too - env.DumpOperator(operatorNamespace, + operator.Dump( + env.Ctx, env.Client, + operatorNamespace, "out/"+CurrentSpecReport().LeafNodeText+"operator.log") } // Delete the operator's namespace in case that the previous test make corrupted changes to // the operator's namespace so that affects subsequent test - if err := env.DeleteNamespaceAndWait(operatorNamespace, 60); err != nil { + if err := namespaces.DeleteNamespaceAndWait(env.Ctx, env.Client, operatorNamespace, 60); err != nil { return fmt.Errorf("could not cleanup, failed to delete operator namespace: %v", err) } @@ -419,7 +436,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O namespacePrefix), func() { var err error // Create a upgradeNamespace for all the resources - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) // Creating a upgradeNamespace should be quick @@ -440,18 +457,18 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O deployOperator := func(operatorManifestFile string) { By(fmt.Sprintf("applying manager manifest %s", operatorManifestFile), func() { // Upgrade to the new version - _, stderr, err := testsUtils.Run( + _, stderr, err := run.Run( fmt.Sprintf("kubectl apply --server-side --force-conflicts -f %v", operatorManifestFile)) Expect(err).NotTo(HaveOccurred(), "stderr: "+stderr) }) By("waiting for the deployment to be rolled out", func() { - deployment, err := env.GetOperatorDeployment() + deployment, err := operator.GetDeployment(env.Ctx, env.Client) Expect(err).NotTo(HaveOccurred()) timeout := 240 Eventually(func() error { - _, stderr, err := testsUtils.Run(fmt.Sprintf( + _, stderr, err := run.Run(fmt.Sprintf( "kubectl -n %v rollout status deployment %v -w --timeout=%vs", operatorNamespace, deployment.Name, @@ -465,7 +482,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O }, timeout).ShouldNot(HaveOccurred()) }) By("getting the operator info", func() { - pod, err := env.GetOperatorPod() + pod, err := operator.GetPod(env.Ctx, env.Client) Expect(err).NotTo(HaveOccurred()) GinkgoWriter.Println("image used for operator", pod.Spec.Containers[0].Image) }) @@ -481,12 +498,13 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O CreateResourceFromFile(upgradeNamespace, pgSecrets) }) By("creating the cloud storage credentials", func() { - _, err := testsUtils.CreateObjectStorageSecret( + _, err := secrets.CreateObjectStorageSecret( + env.Ctx, + env.Client, upgradeNamespace, "aws-creds", "minio", "minio123", - env, ) Expect(err).NotTo(HaveOccurred()) }) @@ -516,7 +534,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O // By doing that we don't test that the online upgrade won't // trigger any Pod restart. We still test that the operator // is upgraded in this case too. - _, stderr, err := testsUtils.Run( + _, stderr, err := run.Run( fmt.Sprintf("kubectl annotate -n %s cluster/%s cnpg.io/reconcilePodSpec=disabled", upgradeNamespace, clusterName1)) Expect(err).NotTo(HaveOccurred(), "stderr: "+stderr) @@ -525,7 +543,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O // Cluster ready happens after minio is ready By("having a Cluster with three instances ready", func() { - AssertClusterIsReady(upgradeNamespace, clusterName1, testTimeouts[testsUtils.ClusterIsReady], env) + AssertClusterIsReady(upgradeNamespace, clusterName1, testTimeouts[timeouts.ClusterIsReady], env) }) By("creating a Pooler with two instances", func() { @@ -535,15 +553,16 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O // Now that everything is in place, we add a bit of data we'll use to // check if the backup is working By("creating data on the database", func() { - primary, err := env.GetClusterPrimary(upgradeNamespace, clusterName1) + primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, upgradeNamespace, clusterName1) Expect(err).ToNot(HaveOccurred()) query := "CREATE TABLE IF NOT EXISTS to_restore AS VALUES (1),(2);" - _, _, err = env.EventuallyExecQueryInInstancePod( - testsUtils.PodLocator{ + _, _, err = exec.EventuallyExecQueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: primary.Namespace, PodName: primary.Name, - }, testsUtils.DatabaseName(databaseName), + }, exec.DatabaseName(databaseName), query, RetryTimeout, PollingTime, @@ -571,8 +590,9 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O // A file called data.tar.gz should be available on minio Eventually(func() (int, error, error) { - out, _, err := env.ExecCommandInContainer( - testsUtils.ContainerLocator{ + out, _, err := exec.CommandInContainer( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.ContainerLocator{ Namespace: minioEnv.Namespace, PodName: minioEnv.Client.Name, ContainerName: "mc", @@ -592,7 +612,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O assertPGBouncerPodsAreReady(upgradeNamespace, pgBouncerSampleFile, 2) var podUIDs []types.UID - podList, err := env.GetClusterPodList(upgradeNamespace, clusterName1) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, upgradeNamespace, clusterName1) Expect(err).ToNot(HaveOccurred()) for _, pod := range podList.Items { podUIDs = append(podUIDs, pod.GetUID()) @@ -641,7 +661,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O // the instance pods should not restart By("verifying that the instance pods are not restarted", func() { - podList, err := env.GetClusterPodList(upgradeNamespace, clusterName1) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, upgradeNamespace, clusterName1) Expect(err).ToNot(HaveOccurred()) for _, pod := range podList.Items { Expect(pod.Status.ContainerStatuses).NotTo(BeEmpty()) @@ -656,7 +676,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O err := os.Setenv("SERVER_NAME", serverName2) Expect(err).ToNot(HaveOccurred()) CreateResourceFromFile(upgradeNamespace, sampleFile2) - AssertClusterIsReady(upgradeNamespace, clusterName2, testTimeouts[testsUtils.ClusterIsReady], env) + AssertClusterIsReady(upgradeNamespace, clusterName2, testTimeouts[timeouts.ClusterIsReady], env) }) AssertConfUpgrade(clusterName2, upgradeNamespace) @@ -666,17 +686,18 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O By("restoring the backup taken from the first Cluster in a new cluster", func() { restoredClusterName := "cluster-restore" CreateResourceFromFile(upgradeNamespace, restoreFile) - AssertClusterIsReady(upgradeNamespace, restoredClusterName, testTimeouts[testsUtils.ClusterIsReadySlow], + AssertClusterIsReady(upgradeNamespace, restoredClusterName, testTimeouts[timeouts.ClusterIsReadySlow], env) // Test data should be present on restored primary primary := restoredClusterName + "-1" - out, _, err := env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + out, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: upgradeNamespace, PodName: primary, }, - testsUtils.DatabaseName(databaseName), + exec.DatabaseName(databaseName), "SELECT count(*) FROM to_restore") Expect(strings.Trim(out, "\n"), err).To(BeEquivalentTo("2")) @@ -684,12 +705,13 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O // we expect a promotion. We can't enforce "2" because the timeline // ID will also depend on the history files existing in the cloud // storage and we don't know the status of that. - out, _, err = env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + out, _, err = exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: upgradeNamespace, PodName: primary, }, - testsUtils.DatabaseName(databaseName), + exec.DatabaseName(databaseName), "select substring(pg_walfile_name(pg_current_wal_lsn()), 1, 8)") Expect(err).NotTo(HaveOccurred()) Expect(strconv.Atoi(strings.Trim(out, "\n"))).To( @@ -697,12 +719,13 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O // Restored standbys should soon attach themselves to restored primary Eventually(func() (string, error) { - out, _, err = env.ExecQueryInInstancePod( - testsUtils.PodLocator{ + out, _, err = exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: upgradeNamespace, PodName: primary, }, - testsUtils.DatabaseName(databaseName), + exec.DatabaseName(databaseName), "SELECT count(*) FROM pg_stat_replication") return strings.Trim(out, "\n"), err }, 180).Should(BeEquivalentTo("2")) @@ -756,13 +779,13 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O It("keeps clusters working after a rolling upgrade", func() { upgradeNamespacePrefix := rollingUpgradeNamespace By("applying environment changes for current upgrade to be performed", func() { - testsUtils.CreateOperatorConfigurationMap(operatorNamespace, configName, false, env) + operator.CreateConfigMap(env.Ctx, env.Client, operatorNamespace, configName, false) }) - mostRecentTag, err := testsUtils.GetMostRecentReleaseTag("../../releases") + mostRecentTag, err := operator.GetMostRecentReleaseTag("../../releases") Expect(err).NotTo(HaveOccurred()) GinkgoWriter.Printf("installing the recent CNPG tag %s\n", mostRecentTag) - testsUtils.InstallLatestCNPGOperator(mostRecentTag, env) + operator.InstallLatest(env.Client, mostRecentTag) DeferCleanup(cleanupOperatorAndMinio) upgradeNamespace := assertCreateNamespace(upgradeNamespacePrefix) @@ -772,14 +795,14 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O It("keeps clusters working after an online upgrade", func() { upgradeNamespacePrefix := onlineUpgradeNamespace By("applying environment changes for current upgrade to be performed", func() { - testsUtils.CreateOperatorConfigurationMap(operatorNamespace, configName, true, env) + operator.CreateConfigMap(env.Ctx, env.Client, operatorNamespace, configName, true) }) - mostRecentTag, err := testsUtils.GetMostRecentReleaseTag("../../releases") + mostRecentTag, err := operator.GetMostRecentReleaseTag("../../releases") Expect(err).NotTo(HaveOccurred()) GinkgoWriter.Printf("installing the recent CNPG tag %s\n", mostRecentTag) - testsUtils.InstallLatestCNPGOperator(mostRecentTag, env) + operator.InstallLatest(env.Client, mostRecentTag) DeferCleanup(cleanupOperatorAndMinio) upgradeNamespace := assertCreateNamespace(upgradeNamespacePrefix) @@ -796,7 +819,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O It("keeps clusters working after an online upgrade", func() { upgradeNamespacePrefix := onlineUpgradeNamespace By("applying environment changes for current upgrade to be performed", func() { - testsUtils.CreateOperatorConfigurationMap(operatorNamespace, configName, true, env) + operator.CreateConfigMap(env.Ctx, env.Client, operatorNamespace, configName, true) }) GinkgoWriter.Printf("installing the current operator %s\n", currentOperatorManifest) @@ -810,7 +833,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O It("keeps clusters working after a rolling upgrade", func() { upgradeNamespacePrefix := rollingUpgradeNamespace By("applying environment changes for current upgrade to be performed", func() { - testsUtils.CreateOperatorConfigurationMap(operatorNamespace, configName, false, env) + operator.CreateConfigMap(env.Ctx, env.Client, operatorNamespace, configName, false) }) GinkgoWriter.Printf("installing the current operator %s\n", currentOperatorManifest) deployOperator(currentOperatorManifest) diff --git a/tests/e2e/volume_snapshot_test.go b/tests/e2e/volume_snapshot_test.go index 74f32e8bca..fce9351788 100644 --- a/tests/e2e/volume_snapshot_test.go +++ b/tests/e2e/volume_snapshot_test.go @@ -31,8 +31,14 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" - testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/backups" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/secrets" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/storage" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -75,11 +81,11 @@ var _ = Describe("Verify Volume Snapshot", Skip("Test depth is lower than the amount requested for this test") } var err error - clusterName, err = env.GetResourceNameFromYAML(sampleFile) + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) // Initializing namespace variable to be used in test case - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) // Creating a cluster with three nodes @@ -89,7 +95,7 @@ var _ = Describe("Verify Volume Snapshot", It("can create a Volume Snapshot", func() { var backupObject apiv1.Backup By("creating a volumeSnapshot and waiting until it's completed", func() { - err := testUtils.CreateOnDemandBackupViaKubectlPlugin( + err := backups.CreateOnDemandBackupViaKubectlPlugin( namespace, clusterName, "", @@ -101,7 +107,7 @@ var _ = Describe("Verify Volume Snapshot", // trigger a checkpoint as the backup may run on standby CheckPointAndSwitchWalOnPrimary(namespace, clusterName) Eventually(func(g Gomega) { - backupList, err := env.GetBackupList(namespace) + backupList, err := backups.List(env.Ctx, env.Client, namespace) g.Expect(err).ToNot(HaveOccurred()) for _, backup := range backupList.Items { if !strings.Contains(backup.Name, clusterName) { @@ -113,13 +119,14 @@ var _ = Describe("Verify Volume Snapshot", backup.Status.Error) g.Expect(backup.Status.BackupSnapshotStatus.Elements).To(HaveLen(2)) } - }, testTimeouts[testUtils.VolumeSnapshotIsReady]).Should(Succeed()) + }, testTimeouts[timeouts.VolumeSnapshotIsReady]).Should(Succeed()) }) By("checking that volumeSnapshots are properly labeled", func() { Eventually(func(g Gomega) { for _, snapshot := range backupObject.Status.BackupSnapshotStatus.Elements { - volumeSnapshot, err := env.GetVolumeSnapshot(namespace, snapshot.Name) + volumeSnapshot, err := backups.GetVolumeSnapshot(env.Ctx, env.Client, namespace, + snapshot.Name) g.Expect(err).ToNot(HaveOccurred()) g.Expect(volumeSnapshot.Name).Should(ContainSubstring(clusterName)) g.Expect(volumeSnapshot.Labels[utils.BackupNameLabelName]).To(BeEquivalentTo(backupObject.Name)) @@ -157,10 +164,10 @@ var _ = Describe("Verify Volume Snapshot", } var err error - clusterToSnapshotName, err = env.GetResourceNameFromYAML(clusterToSnapshot) + clusterToSnapshotName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterToSnapshot) Expect(err).ToNot(HaveOccurred()) - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) By("create the certificates for MinIO", func() { @@ -168,12 +175,14 @@ var _ = Describe("Verify Volume Snapshot", Expect(err).ToNot(HaveOccurred()) }) - _, err = testUtils.CreateObjectStorageSecret( + _, err = secrets.CreateObjectStorageSecret( + env.Ctx, + env.Client, namespace, "backup-storage-creds", "minio", "minio123", - env) + ) Expect(err).ToNot(HaveOccurred()) }) @@ -194,11 +203,13 @@ var _ = Describe("Verify Volume Snapshot", }) By("verify test connectivity to minio using barman-cloud-wal-archive script", func() { - primaryPod, err := env.GetClusterPrimary(namespace, clusterToSnapshotName) + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, + clusterToSnapshotName) Expect(err).ToNot(HaveOccurred()) Eventually(func() (bool, error) { connectionStatus, err := minio.TestConnectivityUsingBarmanCloudWalArchive( - namespace, clusterToSnapshotName, primaryPod.GetName(), "minio", "minio123", minioEnv.ServiceName) + namespace, clusterToSnapshotName, primaryPod.GetName(), "minio", "minio123", + minioEnv.ServiceName) if err != nil { return false, err } @@ -210,13 +221,15 @@ var _ = Describe("Verify Volume Snapshot", By("creating a snapshot and waiting until it's completed", func() { var err error backupName := fmt.Sprintf("%s-example", clusterToSnapshotName) - backup, err = testUtils.CreateOnDemandBackup( + backup, err = backups.CreateOnDemand( + env.Ctx, + env.Client, namespace, clusterToSnapshotName, backupName, apiv1.BackupTargetStandby, apiv1.BackupMethodVolumeSnapshot, - env) + ) Expect(err).ToNot(HaveOccurred()) // trigger a checkpoint CheckPointAndSwitchWalOnPrimary(namespace, clusterToSnapshotName) @@ -231,7 +244,7 @@ var _ = Describe("Verify Volume Snapshot", "Backup should be completed correctly, error message is '%s'", backup.Status.Error) g.Expect(backup.Status.BackupSnapshotStatus.Elements).To(HaveLen(2)) - }, testTimeouts[testUtils.VolumeSnapshotIsReady]).Should(Succeed()) + }, testTimeouts[timeouts.VolumeSnapshotIsReady]).Should(Succeed()) }) By("fetching the volume snapshots", func() { @@ -239,11 +252,11 @@ var _ = Describe("Verify Volume Snapshot", Expect(err).ToNot(HaveOccurred()) Expect(snapshotList.Items).To(HaveLen(len(backup.Status.BackupSnapshotStatus.Elements))) - envVars := testUtils.EnvVarsForSnapshots{ + envVars := storage.EnvVarsForSnapshots{ DataSnapshot: snapshotDataEnv, WalSnapshot: snapshotWalEnv, } - err = testUtils.SetSnapshotNameAsEnv(&snapshotList, backup, envVars) + err = storage.SetSnapshotNameAsEnv(&snapshotList, backup, envVars) Expect(err).ToNot(HaveOccurred()) }) @@ -252,7 +265,7 @@ var _ = Describe("Verify Volume Snapshot", tableLocator := TableLocator{ Namespace: namespace, ClusterName: clusterToSnapshotName, - DatabaseName: testUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: tableName, } AssertCreateTestData(env, tableLocator) @@ -262,16 +275,22 @@ var _ = Describe("Verify Volume Snapshot", // including the newly created data within the recovery_target_time time.Sleep(1 * time.Second) // Get the recovery_target_time and pass it to the template engine - recoveryTargetTime, err := testUtils.GetCurrentTimestamp(namespace, clusterToSnapshotName, env) + recoveryTargetTime, err := postgres.GetCurrentTimestamp( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + namespace, clusterToSnapshotName, + ) Expect(err).ToNot(HaveOccurred()) err = os.Setenv(recoveryTargetTimeEnv, recoveryTargetTime) Expect(err).ToNot(HaveOccurred()) - forward, conn, err := testUtils.ForwardPSQLConnection( - env, + forward, conn, err := postgres.ForwardPSQLConnection( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, namespace, clusterToSnapshotName, - testUtils.AppDBName, + postgres.AppDBName, apiv1.ApplicationUserSecretSuffix, ) defer func() { @@ -287,19 +306,20 @@ var _ = Describe("Verify Volume Snapshot", AssertArchiveWalOnMinio(namespace, clusterToSnapshotName, clusterToSnapshotName) }) - clusterToRestoreName, err := env.GetResourceNameFromYAML(clusterSnapshotRestoreFile) + clusterToRestoreName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterSnapshotRestoreFile) Expect(err).ToNot(HaveOccurred()) By("creating the cluster to be restored through snapshot and PITR", func() { AssertCreateCluster(namespace, clusterToRestoreName, clusterSnapshotRestoreFile, env) - AssertClusterIsReady(namespace, clusterToRestoreName, testTimeouts[testUtils.ClusterIsReadySlow], env) + AssertClusterIsReady(namespace, clusterToRestoreName, testTimeouts[timeouts.ClusterIsReadySlow], + env) }) By("verifying the correct data exists in the restored cluster", func() { tableLocator := TableLocator{ Namespace: namespace, ClusterName: clusterToRestoreName, - DatabaseName: testUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: tableName, } AssertDataExpectedCount(env, tableLocator, 2) @@ -369,18 +389,18 @@ var _ = Describe("Verify Volume Snapshot", } var err error - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) DeferCleanup(func() { _ = os.Unsetenv(snapshotDataEnv) _ = os.Unsetenv(snapshotWalEnv) }) - clusterToBackupName, err = env.GetResourceNameFromYAML(clusterToBackupFilePath) + clusterToBackupName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterToBackupFilePath) Expect(err).ToNot(HaveOccurred()) By("creating the cluster on which to execute the backup", func() { AssertCreateCluster(namespace, clusterToBackupName, clusterToBackupFilePath, env) - AssertClusterIsReady(namespace, clusterToBackupName, testTimeouts[testUtils.ClusterIsReadySlow], env) + AssertClusterIsReady(namespace, clusterToBackupName, testTimeouts[timeouts.ClusterIsReadySlow], env) }) }) @@ -389,13 +409,13 @@ var _ = Describe("Verify Volume Snapshot", tableLocator := TableLocator{ Namespace: namespace, ClusterName: clusterToBackupName, - DatabaseName: testUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: tableName, } AssertCreateTestData(env, tableLocator) }) - backupName, err := env.GetResourceNameFromYAML(backupFileFilePath) + backupName, err := yaml.GetResourceNameFromYAML(env.Scheme, backupFileFilePath) Expect(err).ToNot(HaveOccurred()) By("executing the backup", func() { @@ -406,13 +426,14 @@ var _ = Describe("Verify Volume Snapshot", var backup apiv1.Backup By("waiting the backup to complete", func() { Eventually(func(g Gomega) { - err := env.Client.Get(env.Ctx, types.NamespacedName{Name: backupName, Namespace: namespace}, &backup) + err := env.Client.Get(env.Ctx, types.NamespacedName{Name: backupName, Namespace: namespace}, + &backup) g.Expect(err).ToNot(HaveOccurred()) g.Expect(backup.Status.Phase).To(BeEquivalentTo(apiv1.BackupPhaseCompleted), "Backup should be completed correctly, error message is '%s'", backup.Status.Error) - }, testTimeouts[testUtils.VolumeSnapshotIsReady]).Should(Succeed()) - testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterToBackupName) + }, testTimeouts[timeouts.VolumeSnapshotIsReady]).Should(Succeed()) + backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterToBackupName) }) By("checking that the backup status is correctly populated", func() { @@ -427,26 +448,26 @@ var _ = Describe("Verify Volume Snapshot", var clusterToBackup *apiv1.Cluster By("fetching the created cluster", func() { - clusterToBackup, err = env.GetCluster(namespace, clusterToBackupName) + clusterToBackup, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterToBackupName) Expect(err).ToNot(HaveOccurred()) }) snapshotList := getAndVerifySnapshots(clusterToBackup, backup) - envVars := testUtils.EnvVarsForSnapshots{ + envVars := storage.EnvVarsForSnapshots{ DataSnapshot: snapshotDataEnv, WalSnapshot: snapshotWalEnv, } - err = testUtils.SetSnapshotNameAsEnv(&snapshotList, &backup, envVars) + err = storage.SetSnapshotNameAsEnv(&snapshotList, &backup, envVars) Expect(err).ToNot(HaveOccurred()) - clusterToRestoreName, err := env.GetResourceNameFromYAML(clusterToRestoreFilePath) + clusterToRestoreName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterToRestoreFilePath) Expect(err).ToNot(HaveOccurred()) By("executing the restore", func() { CreateResourceFromFile(namespace, clusterToRestoreFilePath) AssertClusterIsReady(namespace, clusterToRestoreName, - testTimeouts[testUtils.ClusterIsReady], + testTimeouts[timeouts.ClusterIsReady], env, ) }) @@ -455,14 +476,14 @@ var _ = Describe("Verify Volume Snapshot", tableLocator := TableLocator{ Namespace: namespace, ClusterName: clusterToRestoreName, - DatabaseName: testUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: tableName, } AssertDataExpectedCount(env, tableLocator, 2) }) }) It("can take a snapshot targeting the primary", func() { - backupName, err := env.GetResourceNameFromYAML(backupPrimaryFilePath) + backupName, err := yaml.GetResourceNameFromYAML(env.Scheme, backupPrimaryFilePath) Expect(err).ToNot(HaveOccurred()) By("executing the backup", func() { @@ -473,14 +494,15 @@ var _ = Describe("Verify Volume Snapshot", var backup apiv1.Backup By("waiting the backup to complete", func() { Eventually(func(g Gomega) { - err := env.Client.Get(env.Ctx, types.NamespacedName{Name: backupName, Namespace: namespace}, &backup) + err := env.Client.Get(env.Ctx, types.NamespacedName{Name: backupName, Namespace: namespace}, + &backup) g.Expect(err).ToNot(HaveOccurred()) g.Expect(backup.Status.Phase).To( BeEquivalentTo(apiv1.BackupPhaseCompleted), "Backup should be completed correctly, error message is '%s'", backup.Status.Error) - }, testTimeouts[testUtils.VolumeSnapshotIsReady]).Should(Succeed()) - testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterToBackupName) + }, testTimeouts[timeouts.VolumeSnapshotIsReady]).Should(Succeed()) + backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterToBackupName) }) By("checking that the backup status is correctly populated", func() { @@ -495,20 +517,21 @@ var _ = Describe("Verify Volume Snapshot", var clusterToBackup *apiv1.Cluster By("fetching the created cluster", func() { - clusterToBackup, err = env.GetCluster(namespace, clusterToBackupName) + clusterToBackup, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterToBackupName) Expect(err).ToNot(HaveOccurred()) }) _ = getAndVerifySnapshots(clusterToBackup, backup) By("ensuring cluster resumes after snapshot", func() { - AssertClusterIsReady(namespace, clusterToBackupName, testTimeouts[testUtils.ClusterIsReadyQuick], env) + AssertClusterIsReady(namespace, clusterToBackupName, testTimeouts[timeouts.ClusterIsReadyQuick], + env) }) }) It("can take a snapshot in a single instance cluster", func() { By("scaling down the cluster to a single instance", func() { - cluster, err := env.GetCluster(namespace, clusterToBackupName) + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterToBackupName) Expect(err).ToNot(HaveOccurred()) updated := cluster.DeepCopy() @@ -519,21 +542,23 @@ var _ = Describe("Verify Volume Snapshot", By("ensuring there is only one pod", func() { Eventually(func(g Gomega) { - pods, err := env.GetClusterPodList(namespace, clusterToBackupName) + pods, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterToBackupName) g.Expect(err).NotTo(HaveOccurred()) g.Expect(pods.Items).To(HaveLen(1)) - }, testTimeouts[testUtils.ClusterIsReadyQuick]).Should(Succeed()) + }, testTimeouts[timeouts.ClusterIsReadyQuick]).Should(Succeed()) }) backupName := "single-instance-snap" By("taking a backup snapshot", func() { - _, err := testUtils.CreateOnDemandBackup( + _, err := backups.CreateOnDemand( + env.Ctx, + env.Client, namespace, clusterToBackupName, backupName, apiv1.BackupTargetStandby, apiv1.BackupMethodVolumeSnapshot, - env) + ) Expect(err).NotTo(HaveOccurred()) }) @@ -541,13 +566,14 @@ var _ = Describe("Verify Volume Snapshot", var backup apiv1.Backup By("waiting the backup to complete", func() { Eventually(func(g Gomega) { - err := env.Client.Get(env.Ctx, types.NamespacedName{Name: backupName, Namespace: namespace}, &backup) + err := env.Client.Get(env.Ctx, types.NamespacedName{Name: backupName, Namespace: namespace}, + &backup) g.Expect(err).ToNot(HaveOccurred()) g.Expect(backup.Status.Phase).To(BeEquivalentTo(apiv1.BackupPhaseCompleted), "Backup should be completed correctly, error message is '%s'", backup.Status.Error) - }, testTimeouts[testUtils.VolumeSnapshotIsReady]).Should(Succeed()) - testUtils.AssertBackupConditionInClusterStatus(env, namespace, clusterToBackupName) + }, testTimeouts[timeouts.VolumeSnapshotIsReady]).Should(Succeed()) + backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterToBackupName) }) By("checking that the backup status is correctly populated", func() { @@ -562,14 +588,15 @@ var _ = Describe("Verify Volume Snapshot", var clusterToBackup *apiv1.Cluster By("fetching the created cluster", func() { var err error - clusterToBackup, err = env.GetCluster(namespace, clusterToBackupName) + clusterToBackup, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterToBackupName) Expect(err).ToNot(HaveOccurred()) }) _ = getAndVerifySnapshots(clusterToBackup, backup) By("ensuring cluster resumes after snapshot", func() { - AssertClusterIsReady(namespace, clusterToBackupName, testTimeouts[testUtils.ClusterIsReadyQuick], env) + AssertClusterIsReady(namespace, clusterToBackupName, testTimeouts[timeouts.ClusterIsReadyQuick], + env) }) }) }) @@ -591,17 +618,17 @@ var _ = Describe("Verify Volume Snapshot", ) var clusterToSnapshotName string - var backup *apiv1.Backup + var backupTaken *apiv1.Backup BeforeAll(func() { if testLevelEnv.Depth < int(level) { Skip("Test depth is lower than the amount requested for this test") } var err error - clusterToSnapshotName, err = env.GetResourceNameFromYAML(clusterToSnapshot) + clusterToSnapshotName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterToSnapshot) Expect(err).ToNot(HaveOccurred()) - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) By("create the certificates for MinIO", func() { @@ -610,12 +637,13 @@ var _ = Describe("Verify Volume Snapshot", }) By("creating the credentials for minio", func() { - _, err = testUtils.CreateObjectStorageSecret( + _, err = secrets.CreateObjectStorageSecret( + env.Ctx, + env.Client, namespace, "backup-storage-creds", "minio", "minio123", - env, ) Expect(err).ToNot(HaveOccurred()) }) @@ -625,11 +653,13 @@ var _ = Describe("Verify Volume Snapshot", }) By("verify test connectivity to minio using barman-cloud-wal-archive script", func() { - primaryPod, err := env.GetClusterPrimary(namespace, clusterToSnapshotName) + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, + clusterToSnapshotName) Expect(err).ToNot(HaveOccurred()) Eventually(func() (bool, error) { connectionStatus, err := minio.TestConnectivityUsingBarmanCloudWalArchive( - namespace, clusterToSnapshotName, primaryPod.GetName(), "minio", "minio123", minioEnv.ServiceName) + namespace, clusterToSnapshotName, primaryPod.GetName(), "minio", "minio123", + minioEnv.ServiceName) if err != nil { return false, err } @@ -648,11 +678,14 @@ var _ = Describe("Verify Volume Snapshot", }) By("inserting test data and creating WALs on the cluster to be snapshotted", func() { - forward, conn, err := testUtils.ForwardPSQLConnection( - env, + forward, conn, err := postgres.ForwardPSQLConnection( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, namespace, clusterToSnapshotName, - testUtils.AppDBName, + postgres.AppDBName, apiv1.ApplicationUserSecretSuffix, ) defer func() { @@ -664,7 +697,7 @@ var _ = Describe("Verify Volume Snapshot", tableLocator := TableLocator{ Namespace: namespace, ClusterName: clusterToSnapshotName, - DatabaseName: testUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: tableName, } AssertCreateTestData(env, tableLocator) @@ -680,7 +713,8 @@ var _ = Describe("Verify Volume Snapshot", By("creating a snapshot and waiting until it's completed", func() { var err error backupName := fmt.Sprintf("%s-online", clusterToSnapshotName) - backup, err = testUtils.CreateBackup( + backupTaken, err = backups.Create( + env.Ctx, env.Client, apiv1.Backup{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, @@ -692,7 +726,6 @@ var _ = Describe("Verify Volume Snapshot", Cluster: apiv1.LocalObjectReference{Name: clusterToSnapshotName}, }, }, - env, ) Expect(err).ToNot(HaveOccurred()) @@ -700,42 +733,43 @@ var _ = Describe("Verify Volume Snapshot", err = env.Client.Get(env.Ctx, types.NamespacedName{ Namespace: namespace, Name: backupName, - }, backup) + }, backupTaken) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(backup.Status.Phase).To(BeEquivalentTo(apiv1.BackupPhaseCompleted), + g.Expect(backupTaken.Status.Phase).To(BeEquivalentTo(apiv1.BackupPhaseCompleted), "Backup should be completed correctly, error message is '%s'", - backup.Status.Error) - g.Expect(backup.Status.BackupSnapshotStatus.Elements).To(HaveLen(2)) - g.Expect(backup.Status.BackupLabelFile).ToNot(BeEmpty()) - }, testTimeouts[testUtils.VolumeSnapshotIsReady]).Should(Succeed()) + backupTaken.Status.Error) + g.Expect(backupTaken.Status.BackupSnapshotStatus.Elements).To(HaveLen(2)) + g.Expect(backupTaken.Status.BackupLabelFile).ToNot(BeEmpty()) + }, testTimeouts[timeouts.VolumeSnapshotIsReady]).Should(Succeed()) }) By("fetching the volume snapshots", func() { - snapshotList, err := getSnapshots(backup.Name, clusterToSnapshotName, namespace) + snapshotList, err := getSnapshots(backupTaken.Name, clusterToSnapshotName, namespace) Expect(err).ToNot(HaveOccurred()) - Expect(snapshotList.Items).To(HaveLen(len(backup.Status.BackupSnapshotStatus.Elements))) + Expect(snapshotList.Items).To(HaveLen(len(backupTaken.Status.BackupSnapshotStatus.Elements))) - envVars := testUtils.EnvVarsForSnapshots{ + envVars := storage.EnvVarsForSnapshots{ DataSnapshot: snapshotDataEnv, WalSnapshot: snapshotWalEnv, } - err = testUtils.SetSnapshotNameAsEnv(&snapshotList, backup, envVars) + err = storage.SetSnapshotNameAsEnv(&snapshotList, backupTaken, envVars) Expect(err).ToNot(HaveOccurred()) }) - clusterToRestoreName, err := env.GetResourceNameFromYAML(clusterSnapshotRestoreFile) + clusterToRestoreName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterSnapshotRestoreFile) Expect(err).ToNot(HaveOccurred()) By("creating the cluster to be restored through snapshot and PITR", func() { AssertCreateCluster(namespace, clusterToRestoreName, clusterSnapshotRestoreFile, env) - AssertClusterIsReady(namespace, clusterToRestoreName, testTimeouts[testUtils.ClusterIsReadySlow], env) + AssertClusterIsReady(namespace, clusterToRestoreName, testTimeouts[timeouts.ClusterIsReadySlow], + env) }) By("verifying the correct data exists in the restored cluster", func() { tableLocator := TableLocator{ Namespace: namespace, ClusterName: clusterToRestoreName, - DatabaseName: testUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: tableName, } AssertDataExpectedCount(env, tableLocator, 4) @@ -746,11 +780,14 @@ var _ = Describe("Verify Volume Snapshot", // insert some data after the snapshot is taken, we want to verify the data exists in // the new pod when cluster scaled up By("inserting more test data and creating WALs on the cluster snapshotted", func() { - forward, conn, err := testUtils.ForwardPSQLConnection( - env, + forward, conn, err := postgres.ForwardPSQLConnection( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, namespace, clusterToSnapshotName, - testUtils.AppDBName, + postgres.AppDBName, apiv1.ApplicationUserSecretSuffix, ) defer func() { @@ -768,37 +805,38 @@ var _ = Describe("Verify Volume Snapshot", // reuse the snapshot taken from the clusterToSnapshot cluster By("fetching the volume snapshots", func() { - snapshotList, err := getSnapshots(backup.Name, clusterToSnapshotName, namespace) + snapshotList, err := getSnapshots(backupTaken.Name, clusterToSnapshotName, namespace) Expect(err).ToNot(HaveOccurred()) - Expect(snapshotList.Items).To(HaveLen(len(backup.Status.BackupSnapshotStatus.Elements))) + Expect(snapshotList.Items).To(HaveLen(len(backupTaken.Status.BackupSnapshotStatus.Elements))) - envVars := testUtils.EnvVarsForSnapshots{ + envVars := storage.EnvVarsForSnapshots{ DataSnapshot: snapshotDataEnv, WalSnapshot: snapshotWalEnv, } - err = testUtils.SetSnapshotNameAsEnv(&snapshotList, backup, envVars) + err = storage.SetSnapshotNameAsEnv(&snapshotList, backupTaken, envVars) Expect(err).ToNot(HaveOccurred()) }) By("scale up the cluster", func() { - err := env.ScaleClusterSize(namespace, clusterToSnapshotName, 3) + err := clusterutils.ScaleSize(env.Ctx, env.Client, namespace, clusterToSnapshotName, 3) Expect(err).ToNot(HaveOccurred()) }) By("checking the the cluster is working", func() { // Setting up a cluster with three pods is slow, usually 200-600s - AssertClusterIsReady(namespace, clusterToSnapshotName, testTimeouts[testUtils.ClusterIsReady], env) + AssertClusterIsReady(namespace, clusterToSnapshotName, testTimeouts[timeouts.ClusterIsReady], env) }) // we need to verify the streaming replica continue works By("verifying the correct data exists in the new pod of the scaled cluster", func() { - podList, err := env.GetClusterReplicas(namespace, clusterToSnapshotName) + podList, err := clusterutils.GetReplicas(env.Ctx, env.Client, namespace, + clusterToSnapshotName) Expect(err).ToNot(HaveOccurred()) Expect(podList.Items).To(HaveLen(2)) tableLocator := TableLocator{ Namespace: namespace, ClusterName: clusterToSnapshotName, - DatabaseName: testUtils.AppDBName, + DatabaseName: postgres.AppDBName, TableName: tableName, } AssertDataExpectedCount(env, tableLocator, 6) diff --git a/tests/e2e/wal_restore_parallel_test.go b/tests/e2e/wal_restore_parallel_test.go index 03906b7152..5314ec6b7f 100644 --- a/tests/e2e/wal_restore_parallel_test.go +++ b/tests/e2e/wal_restore_parallel_test.go @@ -23,7 +23,11 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/tests" testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/secrets" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -63,19 +67,20 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun ) const namespacePrefix = "pg-backup-minio-wal-max-parallel" - clusterName, err := env.GetResourceNameFromYAML(clusterWithMinioSampleFile) + clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, clusterWithMinioSampleFile) Expect(err).ToNot(HaveOccurred()) - namespace, err = env.CreateUniqueTestNamespace(namespacePrefix) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) By("creating the credentials for minio", func() { - _, err = testUtils.CreateObjectStorageSecret( + _, err = secrets.CreateObjectStorageSecret( + env.Ctx, + env.Client, namespace, "backup-storage-creds", "minio", "minio123", - env, ) Expect(err).ToNot(HaveOccurred()) }) @@ -89,12 +94,12 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun AssertCreateCluster(namespace, clusterName, clusterWithMinioSampleFile, env) // Get the primary - pod, err := env.GetClusterPrimary(namespace, clusterName) + pod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) primary = pod.GetName() // Get the standby - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) for _, po := range podList.Items { if po.Name != primary { @@ -108,7 +113,7 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun // Make sure both Wal-archive and Minio work // Create a WAL on the primary and check if it arrives at minio, within a short time By("archiving WALs and verifying they exist", func() { - pod, err := env.GetClusterPrimary(namespace, clusterName) + pod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) primary := pod.GetName() latestWAL = switchWalAndGetLatestArchive(namespace, primary) @@ -146,8 +151,9 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun By("asserting the spool directory is empty on the standby", func() { if !testUtils.TestDirectoryEmpty(namespace, standby, SpoolDirectory) { purgeSpoolDirectoryCmd := "rm " + SpoolDirectory + "/*" - _, _, err := env.ExecCommandInInstancePod( - testUtils.PodLocator{ + _, _, err := exec.CommandInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: standby, }, nil, @@ -161,8 +167,9 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun // exit code 0, #1 is in the output location, #2 and #3 are in the spool directory. // The flag is unset. By("invoking the wal-restore command requesting #1 wal", func() { - _, _, err := env.ExecCommandInInstancePod( - testUtils.PodLocator{ + _, _, err := exec.CommandInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: standby, }, nil, @@ -194,8 +201,9 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun // exit code 0, #2 is in the output location, #3 is in the spool directory. // The flag is unset. By("invoking the wal-restore command requesting #2 wal", func() { - _, _, err := env.ExecCommandInInstancePod( - testUtils.PodLocator{ + _, _, err := exec.CommandInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: standby, }, nil, @@ -223,8 +231,9 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun // exit code 0, #3 is in the output location, spool directory is empty. // The flag is unset. By("invoking the wal-restore command requesting #3 wal", func() { - _, _, err := env.ExecCommandInInstancePod( - testUtils.PodLocator{ + _, _, err := exec.CommandInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: standby, }, nil, @@ -245,8 +254,9 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun // exit code 0, #4 is in the output location, #5 is in the spool directory. // The flag is set because #6 file not present. By("invoking the wal-restore command requesting #4 wal", func() { - _, _, err := env.ExecCommandInInstancePod( - testUtils.PodLocator{ + _, _, err := exec.CommandInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: standby, }, nil, @@ -281,8 +291,9 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun // Expected outcome: // exit code 0, #5 is in the output location, no files in the spool directory. The flag is still present. By("invoking the wal-restore command requesting #5 wal", func() { - _, _, err := env.ExecCommandInInstancePod( - testUtils.PodLocator{ + _, _, err := exec.CommandInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: standby, }, nil, @@ -309,8 +320,9 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun // Expected outcome: // exit code 1, output location untouched, no files in the spool directory. The flag is unset. By("invoking the wal-restore command requesting #6 wal", func() { - _, _, err := env.ExecCommandInInstancePod( - testUtils.PodLocator{ + _, _, err := exec.CommandInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: standby, }, nil, @@ -332,8 +344,9 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun // exit code 0, #6 is in the output location, no files in the spool directory. // The flag is present again because #7 and #8 are unavailable. By("invoking the wal-restore command requesting #6 wal again", func() { - _, _, err := env.ExecCommandInInstancePod( - testUtils.PodLocator{ + _, _, err := exec.CommandInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ Namespace: namespace, PodName: standby, }, nil, diff --git a/tests/e2e/webhook_test.go b/tests/e2e/webhook_test.go index 6294f27a53..2171638e14 100644 --- a/tests/e2e/webhook_test.go +++ b/tests/e2e/webhook_test.go @@ -22,7 +22,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/operator" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -57,7 +58,7 @@ var _ = Describe("webhook", Serial, Label(tests.LabelDisruptive, tests.LabelOper }) BeforeAll(func() { - clusterName, err = env.GetResourceNameFromYAML(sampleFile) + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) Expect(err).ToNot(HaveOccurred()) }) @@ -66,16 +67,16 @@ var _ = Describe("webhook", Serial, Label(tests.LabelDisruptive, tests.LabelOper clusterIsDefaulted = true By("having a deployment for the operator in state ready", func() { // Make sure that we have at least one operator already working - err := env.ScaleOperatorDeployment(1) + err := operator.ScaleOperatorDeployment(env.Ctx, env.Client, 1) Expect(err).ToNot(HaveOccurred()) - ready, err := env.IsOperatorDeploymentReady() + ready, err := operator.IsDeploymentReady(env.Ctx, env.Client) Expect(err).ShouldNot(HaveOccurred()) Expect(ready).To(BeTrue()) }) // Create a basic PG cluster - webhookNamespace, err := env.CreateUniqueTestNamespace(webhookNamespacePrefix) + webhookNamespace, err := env.CreateUniqueTestNamespace(env.Ctx, env.Client, webhookNamespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(webhookNamespace, clusterName, sampleFile, env) // Check if cluster is ready and the default values are populated @@ -86,7 +87,7 @@ var _ = Describe("webhook", Serial, Label(tests.LabelDisruptive, tests.LabelOper webhookNamespacePrefix := "no-webhook-test" clusterIsDefaulted = true - mWebhook, admissionNumber, err := utils.GetCNPGsMutatingWebhookByName(env, mutatingWebhook) + mWebhook, admissionNumber, err := operator.GetMutatingWebhookByName(env.Ctx, env.Client, mutatingWebhook) Expect(err).ToNot(HaveOccurred()) // Add a namespace selector to MutatingWebhooks and ValidatingWebhook, this will assign the webhooks @@ -96,11 +97,13 @@ var _ = Describe("webhook", Serial, Label(tests.LabelDisruptive, tests.LabelOper newWebhook.Webhooks[admissionNumber].NamespaceSelector = &metav1.LabelSelector{ MatchLabels: map[string]string{"test": "value"}, } - err := utils.UpdateCNPGsMutatingWebhookConf(env, newWebhook) + err := operator.UpdateMutatingWebhookConf(env.Ctx, env.Interface, newWebhook) Expect(err).ToNot(HaveOccurred()) }) - vWebhook, admissionNumber, err := utils.GetCNPGsValidatingWebhookByName(env, validatingWebhook) + vWebhook, admissionNumber, err := operator.GetValidatingWebhookByName( + env.Ctx, env.Client, validatingWebhook, + ) Expect(err).ToNot(HaveOccurred()) By(fmt.Sprintf("Disabling the validating webhook %v namespace", operatorNamespace), func() { @@ -108,12 +111,12 @@ var _ = Describe("webhook", Serial, Label(tests.LabelDisruptive, tests.LabelOper newWebhook.Webhooks[admissionNumber].NamespaceSelector = &metav1.LabelSelector{ MatchLabels: map[string]string{"test": "value"}, } - err := utils.UpdateCNPGsValidatingWebhookConf(env, newWebhook) + err := operator.UpdateValidatingWebhookConf(env.Ctx, env.Interface, newWebhook) Expect(err).ToNot(HaveOccurred()) }) // Create a basic PG cluster - webhookNamespace, err = env.CreateUniqueTestNamespace(webhookNamespacePrefix) + webhookNamespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, webhookNamespacePrefix) Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(webhookNamespace, clusterName, sampleFile, env) // Check if cluster is ready and has no default value in the object @@ -121,7 +124,7 @@ var _ = Describe("webhook", Serial, Label(tests.LabelDisruptive, tests.LabelOper // Make sure the operator is intact and not crashing By("having a deployment for the operator in state ready", func() { - ready, err := env.IsOperatorDeploymentReady() + ready, err := operator.IsDeploymentReady(env.Ctx, env.Client) Expect(err).ShouldNot(HaveOccurred()) Expect(ready).To(BeTrue()) }) diff --git a/tests/levels.go b/tests/levels.go index 2f6475755e..9209f49fd2 100644 --- a/tests/levels.go +++ b/tests/levels.go @@ -20,7 +20,7 @@ import ( "os" "strconv" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/environment" ) // Level - Define test importance. Each test should define its own importance @@ -46,13 +46,13 @@ const defaultTestDepth = int(Medium) // TestEnvLevel struct for operator testing type TestEnvLevel struct { - *utils.TestingEnvironment + *environment.TestingEnvironment Depth int } // TestLevel creates the environment for testing func TestLevel() (*TestEnvLevel, error) { - env, err := utils.NewTestingEnvironment() + env, err := environment.NewTestingEnvironment() if err != nil { return nil, err } diff --git a/tests/utils/azurite.go b/tests/utils/backups/azurite.go similarity index 88% rename from tests/utils/azurite.go rename to tests/utils/backups/azurite.go index 7ea3ed7903..47bf7e22ce 100644 --- a/tests/utils/azurite.go +++ b/tests/utils/backups/azurite.go @@ -14,9 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -package utils +package backups import ( + "context" "encoding/json" "fmt" "os" @@ -29,9 +30,15 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" v1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/certs" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/deployments" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/secrets" ) const ( @@ -46,7 +53,8 @@ type AzureConfiguration struct { BlobContainer string } -func newAzureConfigurationFromEnv() AzureConfiguration { +// NewAzureConfigurationFromEnv creates a new AzureConfiguration from the environment variables +func NewAzureConfigurationFromEnv() AzureConfiguration { return AzureConfiguration{ StorageAccount: os.Getenv("AZURE_STORAGE_ACCOUNT"), StorageKey: os.Getenv("AZURE_STORAGE_KEY"), @@ -56,14 +64,19 @@ func newAzureConfigurationFromEnv() AzureConfiguration { // CreateCertificateSecretsOnAzurite will create secrets for Azurite deployment func CreateCertificateSecretsOnAzurite( + ctx context.Context, + crudClient client.Client, namespace, clusterName, azuriteCaSecName, azuriteTLSSecName string, - env *TestingEnvironment, ) error { // create CA certificates - _, caPair, err := CreateSecretCA(namespace, clusterName, azuriteCaSecName, true, env) + _, caPair, err := secrets.CreateSecretCA( + ctx, crudClient, + namespace, clusterName, azuriteCaSecName, + true, + ) if err != nil { return err } @@ -75,7 +88,7 @@ func CreateCertificateSecretsOnAzurite( return err } serverSecret := serverPair.GenerateCertificateSecret(namespace, azuriteTLSSecName) - err = env.Client.Create(env.Ctx, serverSecret) + err = crudClient.Create(ctx, serverSecret) if err != nil { return err } @@ -83,15 +96,23 @@ func CreateCertificateSecretsOnAzurite( } // CreateStorageCredentialsOnAzurite will create credentials for Azurite -func CreateStorageCredentialsOnAzurite(namespace string, env *TestingEnvironment) error { +func CreateStorageCredentialsOnAzurite( + ctx context.Context, + crudClient client.Client, + namespace string, +) error { azuriteSecrets := getStorageCredentials(namespace) - return env.Client.Create(env.Ctx, &azuriteSecrets) + return crudClient.Create(ctx, &azuriteSecrets) } // InstallAzurite will set up Azurite in defined namespace and creates service -func InstallAzurite(namespace string, env *TestingEnvironment) error { +func InstallAzurite( + ctx context.Context, + crudClient client.Client, + namespace string, +) error { azuriteDeployment := getAzuriteDeployment(namespace) - err := env.Client.Create(env.Ctx, &azuriteDeployment) + err := crudClient.Create(ctx, &azuriteDeployment) if err != nil { return err } @@ -101,23 +122,27 @@ func InstallAzurite(namespace string, env *TestingEnvironment) error { Name: "azurite", } deployment := &apiv1.Deployment{} - err = env.Client.Get(env.Ctx, deploymentNamespacedName, deployment) + err = crudClient.Get(ctx, deploymentNamespacedName, deployment) if err != nil { return err } - err = DeploymentWaitForReady(env, deployment, 300) + err = deployments.WaitForReady(ctx, crudClient, deployment, 300) if err != nil { return err } azuriteService := getAzuriteService(namespace) - err = env.Client.Create(env.Ctx, &azuriteService) + err = crudClient.Create(ctx, &azuriteService) return err } // InstallAzCli will install Az cli -func InstallAzCli(namespace string, env *TestingEnvironment) error { +func InstallAzCli( + ctx context.Context, + crudClient client.Client, + namespace string, +) error { azCLiPod := getAzuriteClientPod(namespace) - err := PodCreateAndWaitForReady(env, &azCLiPod, 180) + err := pods.CreateAndWaitForReady(ctx, crudClient, &azCLiPod, 180) if err != nil { return err } @@ -355,6 +380,8 @@ func getStorageCredentials(namespace string) corev1.Secret { // CreateClusterFromExternalClusterBackupWithPITROnAzure creates a cluster on Azure, starting from an external cluster // backup with PITR func CreateClusterFromExternalClusterBackupWithPITROnAzure( + ctx context.Context, + crudClient client.Client, namespace, externalClusterName, sourceClusterName, @@ -362,7 +389,6 @@ func CreateClusterFromExternalClusterBackupWithPITROnAzure( storageCredentialsSecretName, azStorageAccount, azBlobContainer string, - env *TestingEnvironment, ) (*v1.Cluster, error) { storageClassName := os.Getenv("E2E_DEFAULT_STORAGE_CLASS") destinationPath := fmt.Sprintf("https://%v.blob.core.windows.net/%v/", @@ -428,7 +454,7 @@ func CreateClusterFromExternalClusterBackupWithPITROnAzure( }, }, } - obj, err := CreateObject(env, restoreCluster) + obj, err := objects.Create(ctx, crudClient, restoreCluster) if err != nil { return nil, err } @@ -442,11 +468,12 @@ func CreateClusterFromExternalClusterBackupWithPITROnAzure( // CreateClusterFromExternalClusterBackupWithPITROnAzurite creates a cluster with Azurite, starting from an external // cluster backup with PITR func CreateClusterFromExternalClusterBackupWithPITROnAzurite( + ctx context.Context, + crudClient client.Client, namespace, externalClusterName, sourceClusterName, targetTime string, - env *TestingEnvironment, ) (*v1.Cluster, error) { storageClassName := os.Getenv("E2E_DEFAULT_STORAGE_CLASS") DestinationPath := fmt.Sprintf("https://azurite:10000/storageaccountname/%v", sourceClusterName) @@ -511,7 +538,7 @@ func CreateClusterFromExternalClusterBackupWithPITROnAzurite( }, }, } - obj, err := CreateObject(env, restoreCluster) + obj, err := objects.Create(ctx, crudClient, restoreCluster) if err != nil { return nil, err } @@ -550,7 +577,7 @@ func CountFilesOnAzureBlobStorage( path string, ) (int, error) { azBlobListCmd := ComposeAzBlobListCmd(configuration, clusterName, path) - out, _, err := RunUnchecked(azBlobListCmd) + out, _, err := run.Unchecked(azBlobListCmd) if err != nil { return -1, err } @@ -566,7 +593,7 @@ func CountFilesOnAzuriteBlobStorage( path string, ) (int, error) { azBlobListCmd := ComposeAzBlobListAzuriteCmd(clusterName, path) - out, _, err := RunUnchecked(fmt.Sprintf("kubectl exec -n %v az-cli "+ + out, _, err := run.Unchecked(fmt.Sprintf("kubectl exec -n %v az-cli "+ "-- /bin/bash -c '%v'", namespace, azBlobListCmd)) if err != nil { return -1, err @@ -579,7 +606,7 @@ func CountFilesOnAzuriteBlobStorage( // verifySASTokenWriteActivity returns true if the given token has RW permissions, // otherwise it returns false func verifySASTokenWriteActivity(containerName string, id string, key string) bool { - _, _, err := RunUnchecked(fmt.Sprintf("az storage container create "+ + _, _, err := run.Unchecked(fmt.Sprintf("az storage container create "+ "--name %v --account-name %v "+ "--sas-token %v", containerName, id, key)) @@ -587,7 +614,11 @@ func verifySASTokenWriteActivity(containerName string, id string, key string) bo } // CreateSASTokenCredentials generates Secrets for the Azure Blob Storage -func CreateSASTokenCredentials(namespace string, id string, key string, env *TestingEnvironment) error { +func CreateSASTokenCredentials( + ctx context.Context, + crudClient client.Client, + namespace, id, key string, +) error { // Adding 24 hours to the current time date := time.Now().UTC().Add(time.Hour * 24) // Creating date time format for az command @@ -598,7 +629,7 @@ func CreateSASTokenCredentials(namespace string, id string, key string, env *Tes date.Hour(), date.Minute()) - out, _, err := Run(fmt.Sprintf( + out, _, err := run.Run(fmt.Sprintf( // SAS Token at Blob Container level does not currently work in Barman Cloud // https://github.com/EnterpriseDB/barman/issues/388 // we will use SAS Token at Storage Account level @@ -616,7 +647,7 @@ func CreateSASTokenCredentials(namespace string, id string, key string, env *Tes } SASTokenRW := strings.TrimRight(out, "\n") - out, _, err = Run(fmt.Sprintf( + out, _, err = run.Run(fmt.Sprintf( "az storage account generate-sas --account-name %v "+ "--https-only --permissions lr --account-key %v "+ "--resource-types co --services b --expiry %v -o tsv", @@ -631,12 +662,19 @@ func CreateSASTokenCredentials(namespace string, id string, key string, env *Tes return fmt.Errorf("expected token to be ready only") } - _, err = CreateObjectStorageSecret(namespace, "backup-storage-creds-sas", id, SASTokenRW, env) + _, err = secrets.CreateObjectStorageSecret( + ctx, crudClient, + namespace, "backup-storage-creds-sas", + id, SASTokenRW, + ) if err != nil { return err } - _, err = CreateObjectStorageSecret(namespace, "restore-storage-creds-sas", id, SASTokenRO, env) + _, err = secrets.CreateObjectStorageSecret(ctx, crudClient, + namespace, "restore-storage-creds-sas", + id, SASTokenRO, + ) if err != nil { return err } diff --git a/tests/utils/backup.go b/tests/utils/backups/backup.go similarity index 67% rename from tests/utils/backup.go rename to tests/utils/backups/backup.go index 9ef2aadd9c..67c04f10ec 100644 --- a/tests/utils/backup.go +++ b/tests/utils/backups/backup.go @@ -14,64 +14,191 @@ See the License for the specific language governing permissions and limitations under the License. */ -package utils +package backups import ( + "context" "fmt" "os" - volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + v2 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - - . "github.com/onsi/ginkgo/v2" // nolint - . "github.com/onsi/gomega" // nolint + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" ) -// ExecuteBackup performs a backup and checks the backup status -func ExecuteBackup( +// List gathers the current list of backup in namespace +func List( + ctx context.Context, + crudClient client.Client, + namespace string, +) (*apiv1.BackupList, error) { + backupList := &apiv1.BackupList{} + err := crudClient.List( + ctx, backupList, client.InNamespace(namespace), + ) + return backupList, err +} + +// Create creates a Backup resource for a given cluster name +func Create( + ctx context.Context, + crudClient client.Client, + targetBackup apiv1.Backup, +) (*apiv1.Backup, error) { + obj, err := objects.Create(ctx, crudClient, &targetBackup) + if err != nil { + return nil, err + } + backup, ok := obj.(*apiv1.Backup) + if !ok { + return nil, fmt.Errorf("created object is not of Backup type: %T %v", obj, obj) + } + return backup, nil +} + +// GetVolumeSnapshot gets a VolumeSnapshot given name and namespace +func GetVolumeSnapshot( + ctx context.Context, + crudClient client.Client, + namespace, name string, +) (*v1.VolumeSnapshot, error) { + namespacedName := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + volumeSnapshot := &v1.VolumeSnapshot{} + err := objects.Get(ctx, crudClient, namespacedName, volumeSnapshot) + if err != nil { + return nil, err + } + return volumeSnapshot, nil +} + +// AssertBackupConditionInClusterStatus check that the backup condition in the Cluster's Status +// eventually returns true +func AssertBackupConditionInClusterStatus( + ctx context.Context, + crudClient client.Client, + namespace, clusterName string, +) { + ginkgo.By(fmt.Sprintf("waiting for backup condition status in cluster '%v'", clusterName), func() { + gomega.Eventually(func() (string, error) { + getBackupCondition, err := GetConditionsInClusterStatus( + ctx, crudClient, + namespace, clusterName, + apiv1.ConditionBackup, + ) + if err != nil { + return "", err + } + return string(getBackupCondition.Status), nil + }, 300, 5).Should(gomega.BeEquivalentTo("True")) + }) +} + +// CreateOnDemandBackupViaKubectlPlugin uses the kubectl plugin to create a backup +func CreateOnDemandBackupViaKubectlPlugin( + namespace, + clusterName, + backupName string, + target apiv1.BackupTarget, + method apiv1.BackupMethod, +) error { + command := fmt.Sprintf("kubectl cnpg backup %v -n %v", clusterName, namespace) + + if backupName != "" { + command = fmt.Sprintf("%v --backup-name %v", command, backupName) + } + if target != "" { + command = fmt.Sprintf("%v --backup-target %v", command, target) + } + if method != "" { + command = fmt.Sprintf("%v --method %v", command, method) + } + + _, _, err := run.Run(command) + return err +} + +// GetConditionsInClusterStatus get conditions values as given type from cluster object status +func GetConditionsInClusterStatus( + ctx context.Context, + crudClient client.Client, + namespace, + clusterName string, + conditionType apiv1.ClusterConditionType, +) (*v2.Condition, error) { + var cluster *apiv1.Cluster + var err error + + cluster, err = clusterutils.Get(ctx, crudClient, namespace, clusterName) + if err != nil { + return nil, err + } + + for _, cond := range cluster.Status.Conditions { + if cond.Type == string(conditionType) { + return &cond, nil + } + } + + return nil, fmt.Errorf("no condition matching requested type found: %v", conditionType) +} + +// Execute performs a backup and checks the backup status +func Execute( + ctx context.Context, + crudClient client.Client, + scheme *runtime.Scheme, namespace, backupFile string, onlyTargetStandbys bool, timeoutSeconds int, - env *TestingEnvironment, ) *apiv1.Backup { - backupName, err := env.GetResourceNameFromYAML(backupFile) - Expect(err).ToNot(HaveOccurred()) - Eventually(func() error { - _, stderr, err := RunUnchecked("kubectl apply -n " + namespace + " -f " + backupFile) + backupName, err := yaml.GetResourceNameFromYAML(scheme, backupFile) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Eventually(func() error { + _, stderr, err := run.Unchecked("kubectl apply -n " + namespace + " -f " + backupFile) if err != nil { return fmt.Errorf("could not create backup.\nStdErr: %v\nError: %v", stderr, err) } return nil - }, RetryTimeout, PollingTime).Should(Succeed()) + }, 60, objects.PollingTime).Should(gomega.Succeed()) backupNamespacedName := types.NamespacedName{ Namespace: namespace, Name: backupName, } backup := &apiv1.Backup{} // Verifying backup status - Eventually(func() (apiv1.BackupPhase, error) { - err = env.Client.Get(env.Ctx, backupNamespacedName, backup) + gomega.Eventually(func() (apiv1.BackupPhase, error) { + err = crudClient.Get(ctx, backupNamespacedName, backup) return backup.Status.Phase, err - }, timeoutSeconds).Should(BeEquivalentTo(apiv1.BackupPhaseCompleted)) - Eventually(func() (string, error) { - err = env.Client.Get(env.Ctx, backupNamespacedName, backup) + }, timeoutSeconds).Should(gomega.BeEquivalentTo(apiv1.BackupPhaseCompleted)) + gomega.Eventually(func() (string, error) { + err = crudClient.Get(ctx, backupNamespacedName, backup) if err != nil { return "", err } backupStatus := backup.GetStatus() return backupStatus.BeginLSN, err - }, timeoutSeconds).ShouldNot(BeEmpty()) + }, timeoutSeconds).ShouldNot(gomega.BeEmpty()) var cluster *apiv1.Cluster - Eventually(func() error { + gomega.Eventually(func() error { var err error - cluster, err = env.GetCluster(namespace, backup.Spec.Cluster.Name) + cluster, err = clusterutils.Get(ctx, crudClient, namespace, backup.Spec.Cluster.Name) return err - }, timeoutSeconds).ShouldNot(HaveOccurred()) + }, timeoutSeconds).ShouldNot(gomega.HaveOccurred()) backupStatus := backup.GetStatus() if cluster.Spec.Backup != nil { @@ -81,36 +208,38 @@ func ExecuteBackup( } switch backupTarget { case apiv1.BackupTargetPrimary, "": - Expect(backupStatus.InstanceID.PodName).To(BeEquivalentTo(cluster.Status.TargetPrimary)) + gomega.Expect(backupStatus.InstanceID.PodName).To(gomega.BeEquivalentTo(cluster.Status.TargetPrimary)) case apiv1.BackupTargetStandby: - Expect(backupStatus.InstanceID.PodName).To(BeElementOf(cluster.Status.InstanceNames)) + gomega.Expect(backupStatus.InstanceID.PodName).To(gomega.BeElementOf(cluster.Status.InstanceNames)) if onlyTargetStandbys { - Expect(backupStatus.InstanceID.PodName).NotTo(Equal(cluster.Status.TargetPrimary)) + gomega.Expect(backupStatus.InstanceID.PodName).NotTo(gomega.Equal(cluster.Status.TargetPrimary)) } } } - Expect(backupStatus.BeginWal).NotTo(BeEmpty()) - Expect(backupStatus.EndLSN).NotTo(BeEmpty()) - Expect(backupStatus.EndWal).NotTo(BeEmpty()) + gomega.Expect(backupStatus.BeginWal).NotTo(gomega.BeEmpty()) + gomega.Expect(backupStatus.EndLSN).NotTo(gomega.BeEmpty()) + gomega.Expect(backupStatus.EndWal).NotTo(gomega.BeEmpty()) return backup } // CreateClusterFromBackupUsingPITR creates a cluster from backup, using the PITR func CreateClusterFromBackupUsingPITR( + ctx context.Context, + crudClient client.Client, + scheme *runtime.Scheme, namespace, clusterName, backupFilePath, targetTime string, - env *TestingEnvironment, ) (*apiv1.Cluster, error) { - backupName, err := env.GetResourceNameFromYAML(backupFilePath) + backupName, err := yaml.GetResourceNameFromYAML(scheme, backupFilePath) if err != nil { return nil, err } storageClassName := os.Getenv("E2E_DEFAULT_STORAGE_CLASS") restoreCluster := &apiv1.Cluster{ - ObjectMeta: metav1.ObjectMeta{ + ObjectMeta: v2.ObjectMeta{ Name: clusterName, Namespace: namespace, }, @@ -148,7 +277,7 @@ func CreateClusterFromBackupUsingPITR( }, }, } - obj, err := CreateObject(env, restoreCluster) + obj, err := objects.Create(ctx, crudClient, restoreCluster) if err != nil { return nil, err } @@ -162,16 +291,17 @@ func CreateClusterFromBackupUsingPITR( // CreateClusterFromExternalClusterBackupWithPITROnMinio creates a cluster on Minio, starting from an external cluster // backup with PITR func CreateClusterFromExternalClusterBackupWithPITROnMinio( + ctx context.Context, + crudClient client.Client, namespace, externalClusterName, sourceClusterName, targetTime string, - env *TestingEnvironment, ) (*apiv1.Cluster, error) { storageClassName := os.Getenv("E2E_DEFAULT_STORAGE_CLASS") restoreCluster := &apiv1.Cluster{ - ObjectMeta: metav1.ObjectMeta{ + ObjectMeta: v2.ObjectMeta{ Name: externalClusterName, Namespace: namespace, }, @@ -237,7 +367,7 @@ func CreateClusterFromExternalClusterBackupWithPITROnMinio( }, }, } - obj, err := CreateObject(env, restoreCluster) + obj, err := objects.Create(ctx, crudClient, restoreCluster) if err != nil { return nil, err } @@ -248,67 +378,20 @@ func CreateClusterFromExternalClusterBackupWithPITROnMinio( return cluster, nil } -// GetConditionsInClusterStatus get conditions values as given type from cluster object status -func GetConditionsInClusterStatus( - namespace, - clusterName string, - env *TestingEnvironment, - conditionType apiv1.ClusterConditionType, -) (*metav1.Condition, error) { - var cluster *apiv1.Cluster - var err error - - cluster, err = env.GetCluster(namespace, clusterName) - if err != nil { - return nil, err - } - - for _, cond := range cluster.Status.Conditions { - if cond.Type == string(conditionType) { - return &cond, nil - } - } - - return nil, fmt.Errorf("no condition matching requested type found: %v", conditionType) -} - -// CreateOnDemandBackupViaKubectlPlugin uses the kubectl plugin to create a backup -func CreateOnDemandBackupViaKubectlPlugin( - namespace, - clusterName, - backupName string, - target apiv1.BackupTarget, - method apiv1.BackupMethod, -) error { - command := fmt.Sprintf("kubectl cnpg backup %v -n %v", clusterName, namespace) - - if backupName != "" { - command = fmt.Sprintf("%v --backup-name %v", command, backupName) - } - if target != "" { - command = fmt.Sprintf("%v --backup-target %v", command, target) - } - if method != "" { - command = fmt.Sprintf("%v --method %v", command, method) - } - - _, _, err := Run(command) - return err -} - -// CreateOnDemandBackup creates a Backup resource for a given cluster name -// Deprecated: Use CreateBackup. +// CreateOnDemand creates a Backup resource for a given cluster name +// Deprecated: Use Create. // TODO: eradicate -func CreateOnDemandBackup( +func CreateOnDemand( + ctx context.Context, + crudClient client.Client, namespace, clusterName, backupName string, target apiv1.BackupTarget, method apiv1.BackupMethod, - env *TestingEnvironment, ) (*apiv1.Backup, error) { targetBackup := &apiv1.Backup{ - ObjectMeta: metav1.ObjectMeta{ + ObjectMeta: v2.ObjectMeta{ Name: backupName, Namespace: namespace, }, @@ -326,23 +409,7 @@ func CreateOnDemandBackup( targetBackup.Spec.Method = method } - obj, err := CreateObject(env, targetBackup) - if err != nil { - return nil, err - } - backup, ok := obj.(*apiv1.Backup) - if !ok { - return nil, fmt.Errorf("created object is not of Backup type: %T %v", obj, obj) - } - return backup, nil -} - -// CreateBackup creates a Backup resource for a given cluster name -func CreateBackup( - targetBackup apiv1.Backup, - env *TestingEnvironment, -) (*apiv1.Backup, error) { - obj, err := CreateObject(env, &targetBackup) + obj, err := objects.Create(ctx, crudClient, targetBackup) if err != nil { return nil, err } @@ -352,35 +419,3 @@ func CreateBackup( } return backup, nil } - -// GetVolumeSnapshot gets a VolumeSnapshot given name and namespace -func (env TestingEnvironment) GetVolumeSnapshot( - namespace, - name string, -) (*volumesnapshot.VolumeSnapshot, error) { - namespacedName := types.NamespacedName{ - Namespace: namespace, - Name: name, - } - volumeSnapshot := &volumesnapshot.VolumeSnapshot{} - err := GetObject(&env, namespacedName, volumeSnapshot) - if err != nil { - return nil, err - } - return volumeSnapshot, nil -} - -// AssertBackupConditionInClusterStatus check that the backup condition in the Cluster's Status -// eventually returns true -func AssertBackupConditionInClusterStatus(env *TestingEnvironment, namespace, clusterName string) { - By(fmt.Sprintf("waiting for backup condition status in cluster '%v'", clusterName), func() { - Eventually(func() (string, error) { - getBackupCondition, err := GetConditionsInClusterStatus( - namespace, clusterName, env, apiv1.ConditionBackup) - if err != nil { - return "", err - } - return string(getBackupCondition.Status), nil - }, 300, 5).Should(BeEquivalentTo("True")) - }) -} diff --git a/tests/utils/backups/doc.go b/tests/utils/backups/doc.go new file mode 100644 index 0000000000..3064ea0449 --- /dev/null +++ b/tests/utils/backups/doc.go @@ -0,0 +1,18 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package backups provides backup utilities +package backups diff --git a/tests/utils/certificates.go b/tests/utils/certificates.go deleted file mode 100644 index f6d08cdcad..0000000000 --- a/tests/utils/certificates.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "fmt" - - corev1 "k8s.io/api/core/v1" - ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" - - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" -) - -// CreateClientCertificatesViaKubectlPlugin creates a certificate for a given user on a given cluster -func CreateClientCertificatesViaKubectlPlugin( - cluster apiv1.Cluster, - certName string, - userName string, - env *TestingEnvironment, -) error { - // clientCertName := "cluster-cert" - // user := "app" - // Create the certificate - _, _, err := Run(fmt.Sprintf( - "kubectl cnpg certificate %v --cnpg-cluster %v --cnpg-user %v -n %v", - certName, - cluster.Name, - userName, - cluster.Namespace)) - if err != nil { - return err - } - // Verifying client certificate secret existence - secret := &corev1.Secret{} - err = env.Client.Get(env.Ctx, ctrlclient.ObjectKey{Namespace: cluster.Namespace, Name: certName}, secret) - return err -} diff --git a/tests/utils/cloud_vendor.go b/tests/utils/cloudvendors/cloud_vendor.go similarity index 96% rename from tests/utils/cloud_vendor.go rename to tests/utils/cloudvendors/cloud_vendor.go index 1f3062a15b..be50b780db 100644 --- a/tests/utils/cloud_vendor.go +++ b/tests/utils/cloudvendors/cloud_vendor.go @@ -14,7 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -package utils +// Package cloudvendors provides the variables to define on which cloud vendor the e2e test is running +package cloudvendors import ( "fmt" diff --git a/tests/utils/cluster.go b/tests/utils/cluster.go deleted file mode 100644 index de6a301c83..0000000000 --- a/tests/utils/cluster.go +++ /dev/null @@ -1,409 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "bufio" - "bytes" - "encoding/json" - "fmt" - "os" - "path/filepath" - "strings" - "text/tabwriter" - - "github.com/cheynewallace/tabby" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" -) - -// AllClusterPodsHaveLabels verifies if the labels defined in a map are included -// in all the pods of a cluster -func AllClusterPodsHaveLabels( - env *TestingEnvironment, - namespace, clusterName string, - labels map[string]string, -) (bool, error) { - cluster, err := env.GetCluster(namespace, clusterName) - if err != nil { - return false, err - } - podList, err := env.GetClusterPodList(namespace, clusterName) - if err != nil { - return false, err - } - if len(podList.Items) != cluster.Spec.Instances { - return false, fmt.Errorf("%v found instances, %v expected", len(podList.Items), cluster.Spec.Instances) - } - for _, pod := range podList.Items { - if !PodHasLabels(pod, labels) { - return false, fmt.Errorf("%v found labels, expected %v", pod.Labels, labels) - } - } - return true, nil -} - -// AllClusterPodsHaveAnnotations verifies if the annotations defined in a map are included -// in all the pods of a cluster -func AllClusterPodsHaveAnnotations( - env *TestingEnvironment, - namespace, clusterName string, - annotations map[string]string, -) (bool, error) { - cluster, err := env.GetCluster(namespace, clusterName) - if err != nil { - return false, err - } - podList, err := env.GetClusterPodList(namespace, clusterName) - if err != nil { - return false, err - } - if len(podList.Items) != cluster.Spec.Instances { - return false, fmt.Errorf("%v found instances, %v expected", len(podList.Items), cluster.Spec.Instances) - } - for _, pod := range podList.Items { - if !PodHasAnnotations(pod, annotations) { - return false, fmt.Errorf("%v found annotations, %v expected", pod.Annotations, annotations) - } - } - return true, nil -} - -// ClusterHasLabels verifies that the labels of a cluster contain a specified -// labels map -func ClusterHasLabels( - cluster *apiv1.Cluster, - labels map[string]string, -) bool { - clusterLabels := cluster.Labels - for k, v := range labels { - val, ok := clusterLabels[k] - if !ok || (v != val) { - return false - } - } - return true -} - -// ClusterHasAnnotations verifies that the annotations of a cluster contain a specified -// annotations map -func ClusterHasAnnotations( - cluster *apiv1.Cluster, - annotations map[string]string, -) bool { - clusterAnnotations := cluster.Annotations - for k, v := range annotations { - val, ok := clusterAnnotations[k] - if !ok || (v != val) { - return false - } - } - return true -} - -// DumpNamespaceObjects logs the clusters, pods, pvcs etc. found in a namespace as JSON sections -func (env TestingEnvironment) DumpNamespaceObjects(namespace string, filename string) { - f, err := os.Create(filepath.Clean(filename)) - if err != nil { - fmt.Println(err) - return - } - defer func() { - _ = f.Sync() - _ = f.Close() - }() - w := bufio.NewWriter(f) - clusterList := &apiv1.ClusterList{} - _ = GetObjectList(&env, clusterList, client.InNamespace(namespace)) - - for _, cluster := range clusterList.Items { - out, _ := json.MarshalIndent(cluster, "", " ") - _, _ = fmt.Fprintf(w, "Dumping %v/%v cluster\n", namespace, cluster.Name) - _, _ = fmt.Fprintln(w, string(out)) - } - - podList, _ := env.GetPodList(namespace) - for _, pod := range podList.Items { - out, _ := json.MarshalIndent(pod, "", " ") - _, _ = fmt.Fprintf(w, "Dumping %v/%v pod\n", namespace, pod.Name) - _, _ = fmt.Fprintln(w, string(out)) - } - - pvcList, _ := env.GetPVCList(namespace) - for _, pvc := range pvcList.Items { - out, _ := json.MarshalIndent(pvc, "", " ") - _, _ = fmt.Fprintf(w, "Dumping %v/%v PVC\n", namespace, pvc.Name) - _, _ = fmt.Fprintln(w, string(out)) - } - - jobList, _ := env.GetJobList(namespace) - for _, job := range jobList.Items { - out, _ := json.MarshalIndent(job, "", " ") - _, _ = fmt.Fprintf(w, "Dumping %v/%v job\n", namespace, job.Name) - _, _ = fmt.Fprintln(w, string(out)) - } - - eventList, _ := env.GetEventList(namespace) - out, _ := json.MarshalIndent(eventList.Items, "", " ") - _, _ = fmt.Fprintf(w, "Dumping events for namespace %v\n", namespace) - _, _ = fmt.Fprintln(w, string(out)) - - serviceAccountList, _ := env.GetServiceAccountList(namespace) - for _, sa := range serviceAccountList.Items { - out, _ := json.MarshalIndent(sa, "", " ") - _, _ = fmt.Fprintf(w, "Dumping %v/%v serviceaccount\n", namespace, sa.Name) - _, _ = fmt.Fprintln(w, string(out)) - } - - suffixes := []string{"-r", "-rw", "-any"} - for _, cluster := range clusterList.Items { - for _, suffix := range suffixes { - namespacedName := types.NamespacedName{ - Namespace: namespace, - Name: cluster.Name + suffix, - } - endpoint := &corev1.Endpoints{} - _ = env.Client.Get(env.Ctx, namespacedName, endpoint) - out, _ := json.MarshalIndent(endpoint, "", " ") - _, _ = fmt.Fprintf(w, "Dumping %v/%v endpoint\n", namespace, endpoint.Name) - _, _ = fmt.Fprintln(w, string(out)) - } - } - // dump backup info - backupList, _ := env.GetBackupList(namespace) - // dump backup object info if it's configure - for _, backup := range backupList.Items { - out, _ := json.MarshalIndent(backup, "", " ") - _, _ = fmt.Fprintf(w, "Dumping %v/%v backup\n", namespace, backup.Name) - _, _ = fmt.Fprintln(w, string(out)) - } - // dump scheduledbackup info - scheduledBackupList, _ := env.GetScheduledBackupList(namespace) - // dump backup object info if it's configure - for _, scheduledBackup := range scheduledBackupList.Items { - out, _ := json.MarshalIndent(scheduledBackup, "", " ") - _, _ = fmt.Fprintf(w, "Dumping %v/%v scheduledbackup\n", namespace, scheduledBackup.Name) - _, _ = fmt.Fprintln(w, string(out)) - } - - err = w.Flush() - if err != nil { - fmt.Println(err) - return - } -} - -// GetCluster gets a cluster given name and namespace -func (env TestingEnvironment) GetCluster(namespace string, name string) (*apiv1.Cluster, error) { - namespacedName := types.NamespacedName{ - Namespace: namespace, - Name: name, - } - cluster := &apiv1.Cluster{} - err := GetObject(&env, namespacedName, cluster) - if err != nil { - return nil, err - } - return cluster, nil -} - -// GetClusterPodList gathers the current list of instance pods for a cluster in a namespace -func (env TestingEnvironment) GetClusterPodList(namespace string, clusterName string) (*corev1.PodList, error) { - podList := &corev1.PodList{} - err := GetObjectList(&env, podList, client.InNamespace(namespace), - client.MatchingLabels{ - utils.ClusterLabelName: clusterName, - utils.PodRoleLabelName: "instance", // this ensures we are getting instance pods only - }, - ) - return podList, err -} - -// GetClusterPrimary gets the primary pod of a cluster -func (env TestingEnvironment) GetClusterPrimary(namespace string, clusterName string) (*corev1.Pod, error) { - podList := &corev1.PodList{} - - err := GetObjectList(&env, podList, client.InNamespace(namespace), - client.MatchingLabels{ - utils.ClusterLabelName: clusterName, - utils.ClusterInstanceRoleLabelName: specs.ClusterRoleLabelPrimary, - }, - ) - if err != nil { - return &corev1.Pod{}, err - } - if len(podList.Items) > 0 { - // if there are multiple, get the one without deletion timestamp - for _, pod := range podList.Items { - if pod.DeletionTimestamp == nil { - return &pod, nil - } - } - err = fmt.Errorf("all pod with primary role has deletion timestamp") - return &(podList.Items[0]), err - } - err = fmt.Errorf("no primary found") - return &corev1.Pod{}, err -} - -// GetClusterReplicas gets a slice containing all the replica pods of a cluster -func (env TestingEnvironment) GetClusterReplicas(namespace string, clusterName string) (*corev1.PodList, error) { - podList := &corev1.PodList{} - err := GetObjectList(&env, podList, client.InNamespace(namespace), - client.MatchingLabels{ - utils.ClusterLabelName: clusterName, - utils.ClusterInstanceRoleLabelName: specs.ClusterRoleLabelReplica, - }, - ) - if err != nil { - return podList, err - } - if len(podList.Items) > 0 { - return podList, nil - } - err = fmt.Errorf("no replicas found") - return podList, err -} - -// ScaleClusterSize scales a cluster to the requested size -func (env TestingEnvironment) ScaleClusterSize(namespace, clusterName string, newClusterSize int) error { - cluster, err := env.GetCluster(namespace, clusterName) - if err != nil { - return err - } - originalCluster := cluster.DeepCopy() - cluster.Spec.Instances = newClusterSize - err = env.Client.Patch(env.Ctx, cluster, client.MergeFrom(originalCluster)) - if err != nil { - return err - } - return nil -} - -// PrintClusterResources prints a summary of the cluster pods, jobs, pvcs etc. -func PrintClusterResources(namespace, clusterName string, env *TestingEnvironment) string { - cluster, err := env.GetCluster(namespace, clusterName) - if err != nil { - return fmt.Sprintf("Error while Getting Object %v", err) - } - - buffer := &bytes.Buffer{} - w := tabwriter.NewWriter(buffer, 0, 0, 4, ' ', 0) - clusterInfo := tabby.NewCustom(w) - clusterInfo.AddLine("Timeout while waiting for cluster ready, dumping more cluster information for analysis...") - clusterInfo.AddLine() - clusterInfo.AddLine() - clusterInfo.AddLine("Cluster information:") - clusterInfo.AddLine("Name", cluster.GetName()) - clusterInfo.AddLine("Namespace", cluster.GetNamespace()) - clusterInfo.AddLine() - clusterInfo.AddHeader("Items", "Values") - clusterInfo.AddLine("Spec.Instances", cluster.Spec.Instances) - clusterInfo.AddLine("Wal storage", cluster.ShouldCreateWalArchiveVolume()) - clusterInfo.AddLine("Cluster phase", cluster.Status.Phase) - clusterInfo.AddLine("Phase reason", cluster.Status.PhaseReason) - clusterInfo.AddLine("Cluster target primary", cluster.Status.TargetPrimary) - clusterInfo.AddLine("Cluster current primary", cluster.Status.CurrentPrimary) - clusterInfo.AddLine() - - podList, _ := env.GetClusterPodList(cluster.GetNamespace(), cluster.GetName()) - - clusterInfo.AddLine("Cluster Pods information:") - clusterInfo.AddLine("Ready pod number: ", utils.CountReadyPods(podList.Items)) - clusterInfo.AddLine() - clusterInfo.AddHeader("Items", "Values") - for _, pod := range podList.Items { - clusterInfo.AddLine("Pod name", pod.Name) - clusterInfo.AddLine("Pod phase", pod.Status.Phase) - if cluster.Status.InstancesReportedState != nil { - if instanceReportState, ok := cluster.Status.InstancesReportedState[apiv1.PodName(pod.Name)]; ok { - clusterInfo.AddLine("Is Primary", instanceReportState.IsPrimary) - clusterInfo.AddLine("TimeLineID", instanceReportState.TimeLineID) - clusterInfo.AddLine("---", "---") - } - } else { - clusterInfo.AddLine("InstanceReportState not reported", "") - } - } - - clusterInfo.AddLine("Jobs information:") - clusterInfo.AddLine() - clusterInfo.AddHeader("Items", "Values") - jobList, _ := env.GetJobList(cluster.GetNamespace()) - for _, job := range jobList.Items { - clusterInfo.AddLine("Job name", job.Name) - clusterInfo.AddLine("Job status", fmt.Sprintf("%#v", job.Status)) - } - - pvcList, _ := env.GetPVCList(cluster.GetNamespace()) - clusterInfo.AddLine() - clusterInfo.AddLine("Cluster PVC information: (dumping all pvc under the namespace)") - clusterInfo.AddLine("Available Cluster PVCCount", cluster.Status.PVCCount) - clusterInfo.AddLine() - clusterInfo.AddHeader("Items", "Values") - for _, pvc := range pvcList.Items { - clusterInfo.AddLine("PVC name", pvc.Name) - clusterInfo.AddLine("PVC phase", pvc.Status.Phase) - clusterInfo.AddLine("---", "---") - } - - snapshotList, _ := env.GetSnapshotList(cluster.Namespace) - clusterInfo.AddLine() - clusterInfo.AddLine("Cluster Snapshot information: (dumping all snapshot under the namespace)") - clusterInfo.AddLine() - clusterInfo.AddHeader("Items", "Values") - for _, snapshot := range snapshotList.Items { - clusterInfo.AddLine("Snapshot name", snapshot.Name) - if snapshot.Status.ReadyToUse != nil { - clusterInfo.AddLine("Snapshot ready to use", *snapshot.Status.ReadyToUse) - } else { - clusterInfo.AddLine("Snapshot ready to use", "false") - } - clusterInfo.AddLine("---", "---") - } - - // do not remove, this is needed to ensure that the writer cache is always flushed. - clusterInfo.Print() - - return buffer.String() -} - -// DescribeKubernetesNodes prints the `describe node` for each node in the -// kubernetes cluster -func (env TestingEnvironment) DescribeKubernetesNodes() (string, error) { - nodeList, err := env.GetNodeList() - if err != nil { - return "", err - } - var report strings.Builder - for _, node := range nodeList.Items { - command := fmt.Sprintf("kubectl describe node %v", node.Name) - stdout, _, err := Run(command) - if err != nil { - return "", err - } - report.WriteString("================================================\n") - report.WriteString(stdout) - report.WriteString("================================================\n") - } - return report.String(), nil -} diff --git a/tests/utils/clusterutils/cluster.go b/tests/utils/clusterutils/cluster.go new file mode 100644 index 0000000000..b237a0a9c3 --- /dev/null +++ b/tests/utils/clusterutils/cluster.go @@ -0,0 +1,227 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package clusterutils provides functions to handle cluster actions +package clusterutils + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" +) + +// AllPodsHaveLabels verifies if the labels defined in a map are included +// in all the pods of a cluster +func AllPodsHaveLabels( + ctx context.Context, + crudClient client.Client, + namespace, clusterName string, + labels map[string]string, +) (bool, error) { + cluster, err := Get(ctx, crudClient, namespace, clusterName) + if err != nil { + return false, err + } + podList, err := ListPods(ctx, crudClient, namespace, clusterName) + if err != nil { + return false, err + } + if len(podList.Items) != cluster.Spec.Instances { + return false, fmt.Errorf("%v found instances, %v expected", len(podList.Items), cluster.Spec.Instances) + } + for _, pod := range podList.Items { + if !pods.HasLabels(pod, labels) { + return false, fmt.Errorf("%v found labels, expected %v", pod.Labels, labels) + } + } + return true, nil +} + +// AllPodsHaveAnnotations verifies if the annotations defined in a map are included +// in all the pods of a cluster +func AllPodsHaveAnnotations( + ctx context.Context, + crudClient client.Client, + namespace, clusterName string, + annotations map[string]string, +) (bool, error) { + cluster, err := Get(ctx, crudClient, namespace, clusterName) + if err != nil { + return false, err + } + podList, err := ListPods(ctx, crudClient, namespace, clusterName) + if err != nil { + return false, err + } + if len(podList.Items) != cluster.Spec.Instances { + return false, fmt.Errorf("%v found instances, %v expected", len(podList.Items), cluster.Spec.Instances) + } + for _, pod := range podList.Items { + if !pods.HasAnnotations(pod, annotations) { + return false, fmt.Errorf("%v found annotations, %v expected", pod.Annotations, annotations) + } + } + return true, nil +} + +// HasLabels verifies that the labels of a cluster contain a specified +// labels map +func HasLabels( + cluster *apiv1.Cluster, + labels map[string]string, +) bool { + clusterLabels := cluster.Labels + for k, v := range labels { + val, ok := clusterLabels[k] + if !ok || (v != val) { + return false + } + } + return true +} + +// HasAnnotations verifies that the annotations of a cluster contain a specified +// annotations map +func HasAnnotations( + cluster *apiv1.Cluster, + annotations map[string]string, +) bool { + clusterAnnotations := cluster.Annotations + for k, v := range annotations { + val, ok := clusterAnnotations[k] + if !ok || (v != val) { + return false + } + } + return true +} + +// Get gets a cluster given name and namespace +func Get( + ctx context.Context, + crudClient client.Client, + namespace, name string, +) (*apiv1.Cluster, error) { + namespacedName := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + cluster := &apiv1.Cluster{} + err := objects.Get(ctx, crudClient, namespacedName, cluster) + if err != nil { + return nil, err + } + return cluster, nil +} + +// ListPods gathers the current list of instance pods for a cluster in a namespace +func ListPods( + ctx context.Context, + crudClient client.Client, + namespace, clusterName string, +) (*corev1.PodList, error) { + podList := &corev1.PodList{} + err := objects.List(ctx, crudClient, podList, client.InNamespace(namespace), + client.MatchingLabels{ + utils.ClusterLabelName: clusterName, + utils.PodRoleLabelName: "instance", // this ensures we are getting instance pods only + }, + ) + return podList, err +} + +// GetPrimary gets the primary pod of a cluster +func GetPrimary( + ctx context.Context, + crudClient client.Client, + namespace, clusterName string, +) (*corev1.Pod, error) { + podList := &corev1.PodList{} + + err := objects.List(ctx, crudClient, podList, client.InNamespace(namespace), + client.MatchingLabels{ + utils.ClusterLabelName: clusterName, + utils.ClusterInstanceRoleLabelName: specs.ClusterRoleLabelPrimary, + }, + ) + if err != nil { + return &corev1.Pod{}, err + } + if len(podList.Items) > 0 { + // if there are multiple, get the one without deletion timestamp + for _, pod := range podList.Items { + if pod.DeletionTimestamp == nil { + return &pod, nil + } + } + err = fmt.Errorf("all pod with primary role has deletion timestamp") + return &(podList.Items[0]), err + } + err = fmt.Errorf("no primary found") + return &corev1.Pod{}, err +} + +// GetReplicas gets a slice containing all the replica pods of a cluster +func GetReplicas( + ctx context.Context, + crudClient client.Client, + namespace, clusterName string, +) (*corev1.PodList, error) { + podList := &corev1.PodList{} + err := objects.List(ctx, crudClient, podList, client.InNamespace(namespace), + client.MatchingLabels{ + utils.ClusterLabelName: clusterName, + utils.ClusterInstanceRoleLabelName: specs.ClusterRoleLabelReplica, + }, + ) + if err != nil { + return podList, err + } + if len(podList.Items) > 0 { + return podList, nil + } + err = fmt.Errorf("no replicas found") + return podList, err +} + +// ScaleSize scales a cluster to the requested size +func ScaleSize( + ctx context.Context, + crudClient client.Client, + namespace, clusterName string, + newClusterSize int, +) error { + cluster, err := Get(ctx, crudClient, namespace, clusterName) + if err != nil { + return err + } + originalCluster := cluster.DeepCopy() + cluster.Spec.Instances = newClusterSize + err = crudClient.Patch(ctx, cluster, client.MergeFrom(originalCluster)) + if err != nil { + return err + } + return nil +} diff --git a/tests/utils/commons.go b/tests/utils/commons.go deleted file mode 100644 index d3c77a667e..0000000000 --- a/tests/utils/commons.go +++ /dev/null @@ -1,133 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "fmt" - "time" - - "github.com/avast/retry-go/v4" - apierrs "k8s.io/apimachinery/pkg/api/errors" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// ForgeArchiveWalOnMinio instead of using `switchWalCmd` to generate a real WAL archive, directly forges a WAL archive -// file on Minio by copying and renaming an existing WAL archive file for the sake of more control of testing. To make -// sure the forged one won't be a real WAL archive, we let the sequence in newWALName to be big enough so that it can't -// be a real WAL archive name in an idle postgresql. -func ForgeArchiveWalOnMinio(namespace, clusterName, miniClientPodName, existingWALName, newWALName string) error { - // Forge a WAL archive by copying and renaming the 1st WAL archive - minioWALBasePath := "minio/" + clusterName + "/" + clusterName + "/wals/0000000100000000" - existingWALPath := minioWALBasePath + "/" + existingWALName + ".gz" - newWALNamePath := minioWALBasePath + "/" + newWALName - forgeWALOnMinioCmd := "mc cp " + existingWALPath + " " + newWALNamePath - _, _, err := RunUncheckedRetry(fmt.Sprintf( - "kubectl exec -n %v %v -- %v", - namespace, - miniClientPodName, - forgeWALOnMinioCmd)) - - return err -} - -// TestFileExist tests if a file specified with `fileName` exist under directory `directoryPath`, on pod `podName` in -// namespace `namespace` -func TestFileExist(namespace, podName, directoryPath, fileName string) bool { - filePath := directoryPath + "/" + fileName - testFileExistCommand := "test -f " + filePath - _, _, err := RunUnchecked(fmt.Sprintf( - "kubectl exec -n %v %v -- %v", - namespace, - podName, - testFileExistCommand)) - - return err == nil -} - -// TestDirectoryEmpty tests if a directory `directoryPath` exists on pod `podName` in namespace `namespace` -func TestDirectoryEmpty(namespace, podName, directoryPath string) bool { - testDirectoryEmptyCommand := "test \"$(ls -A" + directoryPath + ")\"" - _, _, err := RunUnchecked(fmt.Sprintf( - "kubectl exec -n %v %v -- %v", - namespace, - podName, - testDirectoryEmptyCommand)) - - return err == nil -} - -// CreateObject create object in the Kubernetes cluster -func CreateObject(env *TestingEnvironment, object client.Object, opts ...client.CreateOption) (client.Object, error) { - err := retry.Do( - func() error { - return env.Client.Create(env.Ctx, object, opts...) - }, - retry.Delay(PollingTime*time.Second), - retry.Attempts(RetryAttempts), - retry.DelayType(retry.FixedDelay), - retry.RetryIf(func(err error) bool { return !apierrs.IsAlreadyExists(err) }), - ) - return object, err -} - -// DeleteObject delete object in the Kubernetes cluster -func DeleteObject(env *TestingEnvironment, object client.Object, opts ...client.DeleteOption) error { - err := retry.Do( - func() error { - return env.Client.Delete(env.Ctx, object, opts...) - }, - retry.Delay(PollingTime*time.Second), - retry.Attempts(RetryAttempts), - retry.DelayType(retry.FixedDelay), - retry.RetryIf(func(err error) bool { return !apierrs.IsNotFound(err) }), - ) - return err -} - -// GetObjectList retrieves list of objects for a given namespace and list options -func GetObjectList(env *TestingEnvironment, objectList client.ObjectList, opts ...client.ListOption) error { - err := retry.Do( - func() error { - err := env.Client.List(env.Ctx, objectList, opts...) - if err != nil { - return err - } - return nil - }, - retry.Delay(PollingTime*time.Second), - retry.Attempts(RetryAttempts), - retry.DelayType(retry.FixedDelay), - ) - return err -} - -// GetObject retrieves an objects for the given object key from the Kubernetes Cluster -func GetObject(env *TestingEnvironment, objectKey client.ObjectKey, object client.Object) error { - err := retry.Do( - func() error { - err := env.Client.Get(env.Ctx, objectKey, object) - if err != nil { - return err - } - return nil - }, - retry.Delay(PollingTime*time.Second), - retry.Attempts(RetryAttempts), - retry.DelayType(retry.FixedDelay), - ) - return err -} diff --git a/tests/utils/deployment.go b/tests/utils/deployments/deployment.go similarity index 72% rename from tests/utils/deployment.go rename to tests/utils/deployments/deployment.go index 31995afc7d..0f9c409136 100644 --- a/tests/utils/deployment.go +++ b/tests/utils/deployments/deployment.go @@ -14,9 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -package utils +// Package deployments contains functions to control deployments +package deployments import ( + "context" "fmt" "time" @@ -25,22 +27,27 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -// DeploymentIsReady checks if a Deployment is ready -func DeploymentIsReady(deployment appsv1.Deployment) bool { +// isReady checks if a Deployment is ready +func isReady(deployment appsv1.Deployment) bool { return deployment.Status.ReadyReplicas == *deployment.Spec.Replicas } -// DeploymentWaitForReady waits for a Deployment to be ready -func DeploymentWaitForReady(env *TestingEnvironment, deployment *appsv1.Deployment, timeoutSeconds uint) error { +// WaitForReady waits for a Deployment to be ready +func WaitForReady( + ctx context.Context, + crudClient client.Client, + deployment *appsv1.Deployment, + timeoutSeconds uint, +) error { err := retry.Do( func() error { - if err := env.Client.Get(env.Ctx, client.ObjectKey{ + if err := crudClient.Get(ctx, client.ObjectKey{ Namespace: deployment.Namespace, Name: deployment.Name, }, deployment); err != nil { return err } - if !DeploymentIsReady(*deployment) { + if !isReady(*deployment) { return fmt.Errorf( "deployment not ready. Namespace: %v, Name: %v", deployment.Namespace, diff --git a/tests/utils/doc.go b/tests/utils/doc.go new file mode 100644 index 0000000000..72e13d50e2 --- /dev/null +++ b/tests/utils/doc.go @@ -0,0 +1,18 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package utils contains helper functions/methods for e2e +package utils diff --git a/tests/utils/environment/doc.go b/tests/utils/environment/doc.go new file mode 100644 index 0000000000..5c0dbc857f --- /dev/null +++ b/tests/utils/environment/doc.go @@ -0,0 +1,18 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package environment contains functions to handle the TestingEnvironment struct +package environment diff --git a/tests/utils/environment.go b/tests/utils/environment/environment.go similarity index 50% rename from tests/utils/environment.go rename to tests/utils/environment/environment.go index b93ea595d8..4a216075be 100644 --- a/tests/utils/environment.go +++ b/tests/utils/environment/environment.go @@ -14,14 +14,12 @@ See the License for the specific language governing permissions and limitations under the License. */ -package utils +package environment import ( "context" "fmt" "os" - "path/filepath" - "strings" "sync" "time" @@ -32,14 +30,9 @@ import ( storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" "github.com/thoas/go-funk" - batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" - eventsv1 "k8s.io/api/events/v1" apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/utils/strings/slices" @@ -47,9 +40,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log/zap" - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/namespaces" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" // Import the client auth plugin package to allow use gke or ake to run tests _ "k8s.io/client-go/plugin/pkg/client/auth" @@ -60,12 +54,6 @@ import ( const ( // RetryTimeout retry timeout (in seconds) when a client api call or kubectl cli request get failed RetryTimeout = 60 - // RetryAttempts maximum number of attempts when it fails in `retry`. Mainly used in `RunUncheckedRetry` - RetryAttempts = 5 - // PollingTime polling interval (in seconds) between retries - PollingTime = 5 - // sternLogDirectory contains the fixed path to store the cluster logs - sternLogDirectory = "cluster_logs/" ) // TestingEnvironment struct for operator testing @@ -76,12 +64,9 @@ type TestingEnvironment struct { APIExtensionClient apiextensionsclientset.Interface Ctx context.Context Scheme *runtime.Scheme - PreserveNamespaces []string Log logr.Logger PostgresVersion uint64 createdNamespaces *uniqueStringSlice - AzureConfiguration AzureConfiguration - SternLogDir string } type uniqueStringSlice struct { @@ -111,7 +96,6 @@ func NewTestingEnvironment() (*TestingEnvironment, error) { env.APIExtensionClient = apiextensionsclientset.NewForConfigOrDie(env.RestClientConfig) env.Ctx = context.Background() env.Scheme = runtime.NewScheme() - env.SternLogDir = sternLogDirectory if err := storagesnapshotv1.AddToScheme(env.Scheme); err != nil { return nil, err @@ -149,10 +133,6 @@ func NewTestingEnvironment() (*TestingEnvironment, error) { return nil, err } - if preserveNamespaces := os.Getenv("PRESERVE_NAMESPACES"); preserveNamespaces != "" { - env.PreserveNamespaces = strings.Fields(preserveNamespaces) - } - clientDiscovery, err := utils.GetDiscoveryClient() if err != nil { return nil, fmt.Errorf("could not get the discovery client: %w", err) @@ -163,8 +143,6 @@ func NewTestingEnvironment() (*TestingEnvironment, error) { return nil, fmt.Errorf("could not detect SeccompProfile support: %w", err) } - env.AzureConfiguration = newAzureConfigurationFromEnv() - return &env, nil } @@ -186,115 +164,20 @@ func (env TestingEnvironment) EventuallyExecCommand( return err } return nil - }, RetryTimeout, PollingTime).Should(Succeed()) + }, RetryTimeout, objects.PollingTime).Should(Succeed()) return stdOut, stdErr, err } -// ExecCommand wraps the utils.ExecCommand pre-setting values constant during -// tests -func (env TestingEnvironment) ExecCommand( +// CreateUniqueTestNamespace creates a namespace by using the passed prefix. +// Return the namespace name and any errors encountered. +// The namespace is automatically cleaned up at the end of the test. +func (env TestingEnvironment) CreateUniqueTestNamespace( ctx context.Context, - pod corev1.Pod, - containerName string, - timeout *time.Duration, - command ...string, -) (string, string, error) { - return utils.ExecCommand(ctx, env.Interface, env.RestClientConfig, - pod, containerName, timeout, command...) -} - -// GetPVCList gathers the current list of PVCs in a namespace -func (env TestingEnvironment) GetPVCList(namespace string) (*corev1.PersistentVolumeClaimList, error) { - pvcList := &corev1.PersistentVolumeClaimList{} - err := env.Client.List( - env.Ctx, pvcList, client.InNamespace(namespace), - ) - return pvcList, err -} - -// GetSnapshotList gathers the current list of VolumeSnapshots in a namespace -func (env TestingEnvironment) GetSnapshotList(namespace string) (*storagesnapshotv1.VolumeSnapshotList, error) { - list := &storagesnapshotv1.VolumeSnapshotList{} - err := env.Client.List(env.Ctx, list, client.InNamespace(namespace)) - - return list, err -} - -// GetJobList gathers the current list of jobs in a namespace -func (env TestingEnvironment) GetJobList(namespace string) (*batchv1.JobList, error) { - jobList := &batchv1.JobList{} - err := env.Client.List( - env.Ctx, jobList, client.InNamespace(namespace), - ) - return jobList, err -} - -// GetServiceAccountList gathers the current list of jobs in a namespace -func (env TestingEnvironment) GetServiceAccountList(namespace string) (*corev1.ServiceAccountList, error) { - serviceAccountList := &corev1.ServiceAccountList{} - err := env.Client.List( - env.Ctx, serviceAccountList, client.InNamespace(namespace), - ) - return serviceAccountList, err -} + crudClient client.Client, + namespacePrefix string, + opts ...client.CreateOption, +) (string, error) { + name := env.createdNamespaces.generateUniqueName(namespacePrefix) -// GetEventList gathers the current list of events in a namespace -func (env TestingEnvironment) GetEventList(namespace string) (*eventsv1.EventList, error) { - eventList := &eventsv1.EventList{} - err := env.Client.List( - env.Ctx, eventList, client.InNamespace(namespace), - ) - return eventList, err -} - -// GetNodeList gathers the current list of Nodes -func (env TestingEnvironment) GetNodeList() (*corev1.NodeList, error) { - nodeList := &corev1.NodeList{} - err := env.Client.List(env.Ctx, nodeList, client.InNamespace("")) - return nodeList, err -} - -// GetBackupList gathers the current list of backup in namespace -func (env TestingEnvironment) GetBackupList(namespace string) (*apiv1.BackupList, error) { - backupList := &apiv1.BackupList{} - err := env.Client.List( - env.Ctx, backupList, client.InNamespace(namespace), - ) - return backupList, err -} - -// GetScheduledBackupList gathers the current list of scheduledBackup in namespace -func (env TestingEnvironment) GetScheduledBackupList(namespace string) (*apiv1.ScheduledBackupList, error) { - scheduledBackupList := &apiv1.ScheduledBackupList{} - err := env.Client.List( - env.Ctx, scheduledBackupList, client.InNamespace(namespace), - ) - return scheduledBackupList, err -} - -// GetResourceNamespacedNameFromYAML returns the NamespacedName representing a resource in a YAML file -func (env TestingEnvironment) GetResourceNamespacedNameFromYAML(path string) (types.NamespacedName, error) { - data, err := os.ReadFile(filepath.Clean(path)) - if err != nil { - return types.NamespacedName{}, err - } - decoder := serializer.NewCodecFactory(env.Scheme).UniversalDeserializer() - obj, _, err := decoder.Decode(data, nil, nil) - if err != nil { - return types.NamespacedName{}, err - } - objectMeta, err := meta.Accessor(obj) - if err != nil { - return types.NamespacedName{}, err - } - return types.NamespacedName{Namespace: objectMeta.GetNamespace(), Name: objectMeta.GetName()}, nil -} - -// GetResourceNameFromYAML returns the name of a resource in a YAML file -func (env TestingEnvironment) GetResourceNameFromYAML(path string) (string, error) { - namespacedName, err := env.GetResourceNamespacedNameFromYAML(path) - if err != nil { - return "", err - } - return namespacedName.Name, err + return name, namespaces.CreateTestNamespace(ctx, crudClient, name, opts...) } diff --git a/tests/utils/namespace_test.go b/tests/utils/environment/environment_test.go similarity index 98% rename from tests/utils/namespace_test.go rename to tests/utils/environment/environment_test.go index 2919a8f915..914e8386a9 100644 --- a/tests/utils/namespace_test.go +++ b/tests/utils/environment/environment_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package utils +package environment import ( . "github.com/onsi/ginkgo/v2" diff --git a/tests/utils/job.go b/tests/utils/environment/suite_test.go similarity index 52% rename from tests/utils/job.go rename to tests/utils/environment/suite_test.go index a9ae454301..61c876f728 100644 --- a/tests/utils/job.go +++ b/tests/utils/environment/suite_test.go @@ -14,28 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ -package utils +package environment import ( - "errors" - "fmt" + "testing" - batchv1 "k8s.io/api/batch/v1" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" ) -// GetJob gets a Job by namespace and name -func (env TestingEnvironment) GetJob(namespace, jobName string) (*batchv1.Job, error) { - wrapErr := func(err error) error { - return fmt.Errorf("while getting job '%s/%s': %w", namespace, jobName, err) - } - jobList, err := env.GetJobList(namespace) - if err != nil { - return nil, wrapErr(err) - } - for _, job := range jobList.Items { - if jobName == job.Name { - return &job, nil - } - } - return nil, wrapErr(errors.New("job not found")) +func TestUtils(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Utils test environment suite") } diff --git a/tests/utils/envsubst/doc.go b/tests/utils/envsubst/doc.go new file mode 100644 index 0000000000..a7d2676b31 --- /dev/null +++ b/tests/utils/envsubst/doc.go @@ -0,0 +1,18 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package envsubst manage the replacemes of env variables in a file +package envsubst diff --git a/tests/utils/envsubst.go b/tests/utils/envsubst/envsubst.go similarity index 99% rename from tests/utils/envsubst.go rename to tests/utils/envsubst/envsubst.go index 74e23f77a8..c4290b38b5 100644 --- a/tests/utils/envsubst.go +++ b/tests/utils/envsubst/envsubst.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package utils +package envsubst import ( "bytes" diff --git a/tests/utils/envsubst_test.go b/tests/utils/envsubst/envsubst_test.go similarity index 99% rename from tests/utils/envsubst_test.go rename to tests/utils/envsubst/envsubst_test.go index 1557a2dccf..da5df755e0 100644 --- a/tests/utils/envsubst_test.go +++ b/tests/utils/envsubst/envsubst_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package utils +package envsubst import ( "errors" diff --git a/tests/utils/lease.go b/tests/utils/envsubst/suite_test.go similarity index 51% rename from tests/utils/lease.go rename to tests/utils/envsubst/suite_test.go index a74de844be..4ac5c2b4c5 100644 --- a/tests/utils/lease.go +++ b/tests/utils/envsubst/suite_test.go @@ -14,20 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ -package utils +package envsubst import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "testing" - "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/controller" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" ) -// GetLeaderInfoFromLease gathers leader holderIdentity from the lease -func GetLeaderInfoFromLease(operatorNamespace string, env *TestingEnvironment) (string, error) { - leaseInterface := env.Interface.CoordinationV1().Leases(operatorNamespace) - lease, err := leaseInterface.Get(env.Ctx, controller.LeaderElectionID, metav1.GetOptions{}) - if err != nil { - return "", err - } - return *lease.Spec.HolderIdentity, nil +func TestUtils(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Utils test envsubst suite") } diff --git a/tests/utils/exec/exec.go b/tests/utils/exec/exec.go new file mode 100644 index 0000000000..58ca134ca6 --- /dev/null +++ b/tests/utils/exec/exec.go @@ -0,0 +1,156 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package exec provides functions to execute commands inside pods or from local +package exec + +import ( + "context" + "fmt" + "time" + + v1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" + pkgutils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" + + . "github.com/onsi/gomega" // nolint +) + +// ContainerLocator contains the necessary data to find a container on a pod +type ContainerLocator struct { + Namespace string + PodName string + ContainerName string +} + +// CommandInContainer executes commands in a given instance pod, in the +// postgres container +func CommandInContainer( + ctx context.Context, + crudClient client.Client, + kubeInterface kubernetes.Interface, + restConfig *rest.Config, + container ContainerLocator, + timeout *time.Duration, + command ...string, +) (string, string, error) { + wrapErr := func(err error) error { + return fmt.Errorf("while executing command in pod '%s/%s': %w", + container.Namespace, container.PodName, err) + } + pod, err := pods.Get(ctx, crudClient, container.Namespace, container.PodName) + if err != nil { + return "", "", wrapErr(err) + } + if !pkgutils.IsPodReady(*pod) { + return "", "", fmt.Errorf("pod not ready. Namespace: %v, Name: %v", pod.Namespace, pod.Name) + } + return Command(ctx, kubeInterface, restConfig, *pod, container.ContainerName, timeout, command...) +} + +// PodLocator contains the necessary data to find a pod +type PodLocator struct { + Namespace string + PodName string +} + +// CommandInInstancePod executes commands in a given instance pod, in the +// postgres container +func CommandInInstancePod( + ctx context.Context, + crudClient client.Client, + kubeInterface kubernetes.Interface, + restConfig *rest.Config, + podLocator PodLocator, + timeout *time.Duration, + command ...string, +) (string, string, error) { + return CommandInContainer( + ctx, crudClient, kubeInterface, restConfig, + ContainerLocator{ + Namespace: podLocator.Namespace, + PodName: podLocator.PodName, + ContainerName: specs.PostgresContainerName, + }, timeout, command...) +} + +// DatabaseName is a special type for the database argument in an Exec call +type DatabaseName string + +// QueryInInstancePod executes a query in an instance pod, by connecting to the pod +// and the postgres container, and using a local connection with the postgres user +func QueryInInstancePod( + ctx context.Context, + crudClient client.Client, + kubeInterface kubernetes.Interface, + restConfig *rest.Config, + podLocator PodLocator, + dbname DatabaseName, + query string, +) (string, string, error) { + timeout := time.Second * 10 + return CommandInInstancePod( + ctx, crudClient, kubeInterface, restConfig, + PodLocator{ + Namespace: podLocator.Namespace, + PodName: podLocator.PodName, + }, &timeout, "psql", "-U", "postgres", string(dbname), "-tAc", query) +} + +// EventuallyExecQueryInInstancePod wraps QueryInInstancePod with an Eventually clause +func EventuallyExecQueryInInstancePod( + ctx context.Context, + crudClient client.Client, + kubeInterface kubernetes.Interface, + restConfig *rest.Config, + podLocator PodLocator, + dbname DatabaseName, + query string, + retryTimeout int, + pollingTime int, +) (string, string, error) { + var stdOut, stdErr string + var err error + + Eventually(func() error { + stdOut, stdErr, err = QueryInInstancePod( + ctx, crudClient, kubeInterface, restConfig, + podLocator, dbname, query) + return err + }, retryTimeout, pollingTime).Should(Succeed()) + + return stdOut, stdErr, err +} + +// Command wraps the utils.ExecCommand pre-setting values constant during +// tests +func Command( + ctx context.Context, + kubeInterface kubernetes.Interface, + restConfig *rest.Config, + pod v1.Pod, + containerName string, + timeout *time.Duration, + command ...string, +) (string, string, error) { + return pkgutils.ExecCommand(ctx, kubeInterface, restConfig, + pod, containerName, timeout, command...) +} diff --git a/tests/utils/fence.go b/tests/utils/fencing/fencing.go similarity index 61% rename from tests/utils/fence.go rename to tests/utils/fencing/fencing.go index bbbc52491d..f48ffaf420 100644 --- a/tests/utils/fence.go +++ b/tests/utils/fencing/fencing.go @@ -14,47 +14,52 @@ See the License for the specific language governing permissions and limitations under the License. */ -package utils +// Package fencing provides functions to manage the fencing on cnpg clusters +package fencing import ( + "context" "fmt" "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" ) -// FencingMethod will be one of the supported ways to trigger an instance fencing -type FencingMethod string +// Method will be one of the supported ways to trigger an instance fencing +type Method string const ( // UsingAnnotation it is a keyword to use while fencing on/off the instances using annotation method - UsingAnnotation FencingMethod = "annotation" + UsingAnnotation Method = "annotation" // UsingPlugin it is a keyword to use while fencing on/off the instances using plugin method - UsingPlugin FencingMethod = "plugin" + UsingPlugin Method = "plugin" ) -// FencingOn marks an instance in a cluster as fenced -func FencingOn( - env *TestingEnvironment, +// On marks an instance in a cluster as fenced +func On( + ctx context.Context, + crudClient client.Client, serverName, namespace, clusterName string, - fencingMethod FencingMethod, + fencingMethod Method, ) error { switch fencingMethod { case UsingPlugin: - _, _, err := Run(fmt.Sprintf("kubectl cnpg fencing on %v %v -n %v", + _, _, err := run.Run(fmt.Sprintf("kubectl cnpg fencing on %v %v -n %v", clusterName, serverName, namespace)) if err != nil { return err } case UsingAnnotation: - err := utils.NewFencingMetadataExecutor(env.Client). + err := utils.NewFencingMetadataExecutor(crudClient). AddFencing(). ForInstance(serverName). - Execute(env.Ctx, types.NamespacedName{Name: clusterName, Namespace: namespace}, &apiv1.Cluster{}) + Execute(ctx, types.NamespacedName{Name: clusterName, Namespace: namespace}, &apiv1.Cluster{}) if err != nil { return err } @@ -64,26 +69,27 @@ func FencingOn( return nil } -// FencingOff marks an instance in a cluster as not fenced -func FencingOff( - env *TestingEnvironment, +// Off marks an instance in a cluster as not fenced +func Off( + ctx context.Context, + crudClient client.Client, serverName, namespace, clusterName string, - fencingMethod FencingMethod, + fencingMethod Method, ) error { switch fencingMethod { case UsingPlugin: - _, _, err := Run(fmt.Sprintf("kubectl cnpg fencing off %v %v -n %v", + _, _, err := run.Run(fmt.Sprintf("kubectl cnpg fencing off %v %v -n %v", clusterName, serverName, namespace)) if err != nil { return err } case UsingAnnotation: - err := utils.NewFencingMetadataExecutor(env.Client). + err := utils.NewFencingMetadataExecutor(crudClient). RemoveFencing(). ForInstance(serverName). - Execute(env.Ctx, types.NamespacedName{Name: clusterName, Namespace: namespace}, &apiv1.Cluster{}) + Execute(ctx, types.NamespacedName{Name: clusterName, Namespace: namespace}, &apiv1.Cluster{}) if err != nil { return err } diff --git a/tests/utils/hibernate.go b/tests/utils/hibernate.go deleted file mode 100644 index 3faee5a5a6..0000000000 --- a/tests/utils/hibernate.go +++ /dev/null @@ -1,103 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "context" - "fmt" - - ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/hibernation" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" -) - -// HibernationMethod will be one of the supported ways to trigger an instance fencing -type HibernationMethod string - -const ( - // HibernateDeclaratively it is a keyword to use while fencing on/off the instances using annotation method - HibernateDeclaratively HibernationMethod = "annotation" - // HibernateImperatively it is a keyword to use while fencing on/off the instances using plugin method - HibernateImperatively HibernationMethod = "plugin" -) - -// HibernateOn hibernate on a cluster -func HibernateOn( - env *TestingEnvironment, - namespace, - clusterName string, - method HibernationMethod, -) error { - switch method { - case HibernateImperatively: - _, _, err := Run(fmt.Sprintf("kubectl cnpg hibernate on %v -n %v", - clusterName, namespace)) - if err != nil { - return err - } - return nil - case HibernateDeclaratively: - cluster, err := env.GetCluster(namespace, clusterName) - if err != nil { - return err - } - if cluster.Annotations == nil { - cluster.Annotations = make(map[string]string) - } - originCluster := cluster.DeepCopy() - cluster.Annotations[utils.HibernationAnnotationName] = hibernation.HibernationOn - - err = env.Client.Patch(context.Background(), cluster, ctrlclient.MergeFrom(originCluster)) - return err - default: - return fmt.Errorf("unknown method: %v", method) - } -} - -// HibernateOff hibernate off a cluster -func HibernateOff( - env *TestingEnvironment, - namespace, - clusterName string, - method HibernationMethod, -) error { - switch method { - case HibernateImperatively: - _, _, err := Run(fmt.Sprintf("kubectl cnpg hibernate off %v -n %v", - clusterName, namespace)) - if err != nil { - return err - } - return nil - case HibernateDeclaratively: - cluster, err := env.GetCluster(namespace, clusterName) - if err != nil { - return err - } - if cluster.Annotations == nil { - cluster.Annotations = make(map[string]string) - } - originCluster := cluster.DeepCopy() - cluster.Annotations[utils.HibernationAnnotationName] = hibernation.HibernationOff - - err = env.Client.Patch(context.Background(), cluster, ctrlclient.MergeFrom(originCluster)) - return err - default: - return fmt.Errorf("unknown method: %v", method) - } -} diff --git a/tests/utils/import_db.go b/tests/utils/importdb/import_db.go similarity index 84% rename from tests/utils/import_db.go rename to tests/utils/importdb/import_db.go index ccb5e62175..1316a76fab 100644 --- a/tests/utils/import_db.go +++ b/tests/utils/importdb/import_db.go @@ -14,17 +14,23 @@ See the License for the specific language governing permissions and limitations under the License. */ -package utils +// Package importdb contains the functions to import a database +package importdb import ( + "context" "fmt" "os" corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/services" ) // ImportDatabaseMicroservice creates a cluster, starting from an external cluster @@ -32,18 +38,19 @@ import ( // NOTE: the application user on the source Cluster needs to be granted with // REPLICATION permissions, which are not set by default func ImportDatabaseMicroservice( + ctx context.Context, + crudClient client.Client, namespace, sourceClusterName, importedClusterName, imageName, databaseName string, - env *TestingEnvironment, ) (*apiv1.Cluster, error) { if imageName == "" { imageName = os.Getenv("POSTGRES_IMG") } storageClassName := os.Getenv("E2E_DEFAULT_STORAGE_CLASS") - host, err := GetHostName(namespace, sourceClusterName, env) + host, err := services.GetHostName(ctx, crudClient, namespace, sourceClusterName) if err != nil { return nil, err } @@ -82,8 +89,8 @@ func ImportDatabaseMicroservice( Name: sourceClusterName, ConnectionParameters: map[string]string{ "host": host, - "user": AppUser, - "dbname": AppDBName, + "user": postgres.AppUser, + "dbname": postgres.AppDBName, }, Password: &corev1.SecretKeySelector{ LocalObjectReference: corev1.LocalObjectReference{ @@ -96,7 +103,7 @@ func ImportDatabaseMicroservice( }, } - obj, err := CreateObject(env, restoreCluster) + obj, err := objects.Create(ctx, crudClient, restoreCluster) if err != nil { return nil, err } @@ -112,19 +119,20 @@ func ImportDatabaseMicroservice( // Imports all the specified `databaseNames` and `roles` from the source cluster // NOTE: enableSuperuserAccess needs to be enabled func ImportDatabasesMonolith( + ctx context.Context, + crudClient client.Client, namespace, sourceClusterName, importedClusterName, imageName string, databaseNames []string, roles []string, - env *TestingEnvironment, ) (*apiv1.Cluster, error) { if imageName == "" { imageName = os.Getenv("POSTGRES_IMG") } storageClassName := os.Getenv("E2E_DEFAULT_STORAGE_CLASS") - host, err := GetHostName(namespace, sourceClusterName, env) + host, err := services.GetHostName(ctx, crudClient, namespace, sourceClusterName) if err != nil { return nil, err } @@ -161,8 +169,8 @@ func ImportDatabasesMonolith( Name: sourceClusterName, ConnectionParameters: map[string]string{ "host": host, - "user": PostgresUser, - "dbname": PostgresDBName, + "user": postgres.PostgresUser, + "dbname": postgres.PostgresDBName, }, Password: &corev1.SecretKeySelector{ LocalObjectReference: corev1.LocalObjectReference{ @@ -175,7 +183,7 @@ func ImportDatabasesMonolith( }, } - obj, err := CreateObject(env, targetCluster) + obj, err := objects.Create(ctx, crudClient, targetCluster) if err != nil { return nil, err } diff --git a/tests/utils/logs/doc.go b/tests/utils/logs/doc.go new file mode 100644 index 0000000000..4af5e3c745 --- /dev/null +++ b/tests/utils/logs/doc.go @@ -0,0 +1,18 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package logs provides a way to parse and get the logs of a pod +package logs diff --git a/tests/utils/logs.go b/tests/utils/logs/logs.go similarity index 93% rename from tests/utils/logs.go rename to tests/utils/logs/logs.go index 04c7e2318c..f344713426 100644 --- a/tests/utils/logs.go +++ b/tests/utils/logs/logs.go @@ -14,21 +14,30 @@ See the License for the specific language governing permissions and limitations under the License. */ -package utils +package logs import ( + "context" "encoding/json" "fmt" "slices" "strings" "time" + + "k8s.io/client-go/kubernetes" + + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" ) // ParseJSONLogs returns the pod's logs of a given pod name, // in the form of a list of JSON entries -func ParseJSONLogs(namespace string, podName string, env *TestingEnvironment) ([]map[string]interface{}, error) { +func ParseJSONLogs( + ctx context.Context, + kubeInterface kubernetes.Interface, + namespace string, podName string, +) ([]map[string]interface{}, error) { // Gather pod logs - podLogs, err := env.GetPodLogs(namespace, podName) + podLogs, err := pods.Logs(ctx, kubeInterface, namespace, podName) if err != nil { return nil, err } diff --git a/tests/utils/logs_test.go b/tests/utils/logs/logs_test.go similarity index 99% rename from tests/utils/logs_test.go rename to tests/utils/logs/logs_test.go index d7fd064253..9f951bc06e 100644 --- a/tests/utils/logs_test.go +++ b/tests/utils/logs/logs_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package utils +package logs import ( "encoding/json" diff --git a/tests/utils/suite_test.go b/tests/utils/logs/suite_test.go similarity index 93% rename from tests/utils/suite_test.go rename to tests/utils/logs/suite_test.go index e15d55b783..329766c9e7 100644 --- a/tests/utils/suite_test.go +++ b/tests/utils/logs/suite_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package utils +package logs import ( "testing" @@ -25,5 +25,5 @@ import ( func TestUtils(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Utils test suite") + RunSpecs(t, "Utils test logs suite") } diff --git a/tests/utils/minio/minio.go b/tests/utils/minio/minio.go index a5f878ed85..27befd0a26 100644 --- a/tests/utils/minio/minio.go +++ b/tests/utils/minio/minio.go @@ -39,7 +39,10 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/certs" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/environment" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" ) const ( @@ -75,7 +78,7 @@ type TagSet struct { // installMinio installs minio in a given namespace func installMinio( - env *utils.TestingEnvironment, + env *environment.TestingEnvironment, minioSetup Setup, timeoutSeconds uint, ) error { @@ -453,7 +456,7 @@ func sslClient(namespace string) corev1.Pod { } // Deploy will create a full MinIO deployment defined inthe minioEnv variable -func Deploy(minioEnv *Env, env *utils.TestingEnvironment) (*corev1.Pod, error) { +func Deploy(minioEnv *Env, env *environment.TestingEnvironment) (*corev1.Pod, error) { var err error minioEnv.CaPair, err = certs.CreateRootCA(minioEnv.Namespace, "minio") if err != nil { @@ -461,7 +464,7 @@ func Deploy(minioEnv *Env, env *utils.TestingEnvironment) (*corev1.Pod, error) { } minioEnv.CaSecretObj = *minioEnv.CaPair.GenerateCASecret(minioEnv.Namespace, minioEnv.CaSecretName) - if _, err = utils.CreateObject(env, &minioEnv.CaSecretObj); err != nil { + if _, err = objects.Create(env.Ctx, env.Client, &minioEnv.CaSecretObj); err != nil { return nil, err } @@ -488,10 +491,10 @@ func Deploy(minioEnv *Env, env *utils.TestingEnvironment) (*corev1.Pod, error) { minioClient := sslClient(minioEnv.Namespace) - return &minioClient, utils.PodCreateAndWaitForReady(env, &minioClient, 240) + return &minioClient, pods.CreateAndWaitForReady(env.Ctx, env.Client, &minioClient, 240) } -func (m *Env) getCaSecret(env *utils.TestingEnvironment, namespace string) (*corev1.Secret, error) { +func (m *Env) getCaSecret(env *environment.TestingEnvironment, namespace string) (*corev1.Secret, error) { var certSecret corev1.Secret if err := env.Client.Get(env.Ctx, types.NamespacedName{ @@ -512,12 +515,12 @@ func (m *Env) getCaSecret(env *utils.TestingEnvironment, namespace string) (*cor } // CreateCaSecret creates the certificates required to authenticate against the the MinIO service -func (m *Env) CreateCaSecret(env *utils.TestingEnvironment, namespace string) error { +func (m *Env) CreateCaSecret(env *environment.TestingEnvironment, namespace string) error { caSecret, err := m.getCaSecret(env, namespace) if err != nil { return err } - _, err = utils.CreateObject(env, caSecret) + _, err = objects.Create(env.Ctx, env.Client, caSecret) return err } @@ -525,7 +528,7 @@ func (m *Env) CreateCaSecret(env *utils.TestingEnvironment, namespace string) er // amount of files matching the given `path` func CountFiles(minioEnv *Env, path string) (value int, err error) { var stdout string - stdout, _, err = utils.RunUnchecked(fmt.Sprintf( + stdout, _, err = run.Unchecked(fmt.Sprintf( "kubectl exec -n %v %v -- %v", minioEnv.Namespace, minioEnv.Client.Name, @@ -541,7 +544,7 @@ func CountFiles(minioEnv *Env, path string) (value int, err error) { // paths matching the given `path` func ListFiles(minioEnv *Env, path string) (string, error) { var stdout string - stdout, _, err := utils.RunUnchecked(fmt.Sprintf( + stdout, _, err := run.Unchecked(fmt.Sprintf( "kubectl exec -n %v %v -- %v", minioEnv.Namespace, minioEnv.Client.Name, @@ -571,7 +574,7 @@ func composeFindCmd(path string, serviceName string) string { func GetFileTags(minioEnv *Env, path string) (TagSet, error) { var output TagSet // Make sure we have a registered backup to access - out, _, err := utils.RunUncheckedRetry(fmt.Sprintf( + out, _, err := run.UncheckedRetry(fmt.Sprintf( "kubectl exec -n %v %v -- sh -c 'mc find minio --path %v | head -n1'", minioEnv.Namespace, minioEnv.Client.Name, @@ -582,7 +585,7 @@ func GetFileTags(minioEnv *Env, path string) (TagSet, error) { walFile := strings.Trim(out, "\n") - stdout, _, err := utils.RunUncheckedRetry(fmt.Sprintf( + stdout, _, err := run.UncheckedRetry(fmt.Sprintf( "kubectl exec -n %v %v -- sh -c 'mc --json tag list %v'", minioEnv.Namespace, minioEnv.Client.Name, @@ -613,7 +616,7 @@ func TestConnectivityUsingBarmanCloudWalArchive( "barman-cloud-wal-archive --cloud-provider aws-s3 --endpoint-url https://%s:9000 s3://cluster-backups/ %s "+ "000000010000000000000000 --test", postgres.BarmanBackupEndpointCACertificateLocation, id, key, minioSvcName, clusterName) - _, _, err := utils.RunUnchecked(fmt.Sprintf( + _, _, err := run.Unchecked(fmt.Sprintf( "kubectl exec -n %v %v -c postgres -- /bin/bash -c \"%v\"", namespace, podName, @@ -627,7 +630,7 @@ func TestConnectivityUsingBarmanCloudWalArchive( // CleanFiles clean files on minio for a given path func CleanFiles(minioEnv *Env, path string) (string, error) { var stdout string - stdout, _, err := utils.RunUnchecked(fmt.Sprintf( + stdout, _, err := run.Unchecked(fmt.Sprintf( "kubectl exec -n %v %v -- %v", minioEnv.Namespace, minioEnv.Client.Name, diff --git a/tests/utils/namespace.go b/tests/utils/namespace.go deleted file mode 100644 index 65a9513278..0000000000 --- a/tests/utils/namespace.go +++ /dev/null @@ -1,217 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "bytes" - "context" - "errors" - "fmt" - "path" - "strings" - "time" - - "github.com/cloudnative-pg/machinery/pkg/fileutils" - "github.com/onsi/ginkgo/v2" - corev1 "k8s.io/api/core/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/wait" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils/logs" -) - -// GetOperatorLogs collects the operator logs -func (env TestingEnvironment) GetOperatorLogs(buf *bytes.Buffer) error { - operatorPod, err := env.GetOperatorPod() - if err != nil { - return err - } - - streamPodLog := logs.StreamingRequest{ - Pod: &operatorPod, - Options: &corev1.PodLogOptions{ - Timestamps: false, - Follow: false, - }, - Client: env.Interface, - } - return streamPodLog.Stream(env.Ctx, buf) -} - -// CleanupNamespace does cleanup duty related to the tear-down of a namespace, -// and is intended to be called in a DeferCleanup clause -func (env TestingEnvironment) CleanupNamespace( - namespace string, - testName string, - testFailed bool, -) error { - if testFailed { - env.DumpNamespaceObjects(namespace, "out/"+testName+".log") - } - - if len(namespace) == 0 { - return fmt.Errorf("namespace is empty") - } - exists, _ := fileutils.FileExists(path.Join(env.SternLogDir, namespace)) - if exists && !testFailed { - err := fileutils.RemoveDirectory(path.Join(env.SternLogDir, namespace)) - if err != nil { - return err - } - } - - return env.DeleteNamespace(namespace) -} - -// CreateUniqueTestNamespace creates a namespace by using the passed prefix. -// Return the namespace name and any errors encountered. -// The namespace is automatically cleaned up at the end of the test. -func (env TestingEnvironment) CreateUniqueTestNamespace( - namespacePrefix string, - opts ...client.CreateOption, -) (string, error) { - name := env.createdNamespaces.generateUniqueName(namespacePrefix) - - return name, env.CreateTestNamespace(name, opts...) -} - -// CreateTestNamespace creates a namespace creates a namespace. -// Prefer CreateUniqueTestNamespace instead, unless you need a -// specific namespace name. If so, make sure there is no collision -// potential. -// The namespace is automatically cleaned up at the end of the test. -func (env TestingEnvironment) CreateTestNamespace( - name string, - opts ...client.CreateOption, -) error { - err := env.CreateNamespace(name, opts...) - if err != nil { - return err - } - - ginkgo.DeferCleanup(func() error { - return env.CleanupNamespace( - name, - ginkgo.CurrentSpecReport().LeafNodeText, - ginkgo.CurrentSpecReport().Failed(), - ) - }) - - return nil -} - -// CreateNamespace creates a namespace. -func (env TestingEnvironment) CreateNamespace(name string, opts ...client.CreateOption) error { - // Exit immediately if the name is empty - if name == "" { - return errors.New("cannot create namespace with empty name") - } - - u := &unstructured.Unstructured{} - u.SetName(name) - u.SetGroupVersionKind(schema.GroupVersionKind{ - Group: "", - Version: "v1", - Kind: "Namespace", - }) - _, err := CreateObject(&env, u, opts...) - return err -} - -// EnsureNamespace checks for the presence of a namespace, and if it does not -// exist, creates it -func (env TestingEnvironment) EnsureNamespace(namespace string) error { - var nsList corev1.NamespaceList - err := GetObjectList(&env, &nsList) - if err != nil { - return err - } - for _, ns := range nsList.Items { - if ns.Name == namespace { - return nil - } - } - return env.CreateNamespace(namespace) -} - -// DeleteNamespace deletes a namespace if existent -func (env TestingEnvironment) DeleteNamespace(name string, opts ...client.DeleteOption) error { - // Exit immediately if the name is empty - if name == "" { - return errors.New("cannot delete namespace with empty name") - } - - // Exit immediately if the namespace is listed in PreserveNamespaces - for _, v := range env.PreserveNamespaces { - if strings.HasPrefix(name, v) { - return nil - } - } - - u := &unstructured.Unstructured{} - u.SetName(name) - u.SetGroupVersionKind(schema.GroupVersionKind{ - Group: "", - Version: "v1", - Kind: "Namespace", - }) - - return DeleteObject(&env, u, opts...) -} - -// DeleteNamespaceAndWait deletes a namespace if existent and returns when deletion is completed -func (env TestingEnvironment) DeleteNamespaceAndWait(name string, timeoutSeconds int) error { - // Exit immediately if the namespace is listed in PreserveNamespaces - for _, v := range env.PreserveNamespaces { - if strings.HasPrefix(name, v) { - return nil - } - } - - ctx, cancel := context.WithTimeout(env.Ctx, time.Duration(timeoutSeconds)*time.Second) - defer cancel() - - err := env.DeleteNamespace(name, client.PropagationPolicy("Background")) - if err != nil { - return err - } - - pods, err := env.GetPodList(name) - if err != nil { - return err - } - - for _, pod := range pods.Items { - err = env.DeletePod(name, pod.Name, client.GracePeriodSeconds(1), client.PropagationPolicy("Background")) - if err != nil && !apierrs.IsNotFound(err) { - return err - } - } - - return wait.PollUntilContextCancel(ctx, time.Second, true, - func(ctx context.Context) (bool, error) { - err := env.Client.Get(ctx, client.ObjectKey{Name: name}, &corev1.Namespace{}) - if apierrs.IsNotFound(err) { - return true, nil - } - return false, err - }, - ) -} diff --git a/tests/utils/namespaces/namespace.go b/tests/utils/namespaces/namespace.go new file mode 100644 index 0000000000..a4e27dc91e --- /dev/null +++ b/tests/utils/namespaces/namespace.go @@ -0,0 +1,377 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package namespaces provides utilities to manage namespaces +package namespaces + +import ( + "bufio" + "context" + "encoding/json" + "errors" + "fmt" + "os" + "path" + "path/filepath" + "strings" + "time" + + "github.com/cloudnative-pg/machinery/pkg/fileutils" + "github.com/onsi/ginkgo/v2" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/events/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "sigs.k8s.io/controller-runtime/pkg/client" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/backups" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/storage" +) + +// SternLogDirectory contains the fixed path to store the cluster logs +const SternLogDirectory = "cluster_logs/" + +func getPreserveNamespaces() []string { + var preserveNamespacesList []string + _, ok := os.LookupEnv("PRESERVE_NAMESPACES") + if ok { + preserveNamespacesList = strings.Fields(os.Getenv("PRESERVE_NAMESPACES")) + } + + return preserveNamespacesList +} + +// cleanupNamespace does cleanup duty related to the tear-down of a namespace, +// and is intended to be called in a DeferCleanup clause +func cleanupNamespace( + ctx context.Context, + crudClient client.Client, + namespace, testName string, + testFailed bool, +) error { + if testFailed { + DumpNamespaceObjects(ctx, crudClient, namespace, "out/"+testName+".log") + } + + if len(namespace) == 0 { + return fmt.Errorf("namespace is empty") + } + exists, _ := fileutils.FileExists(path.Join(SternLogDirectory, namespace)) + if exists && !testFailed { + err := fileutils.RemoveDirectory(path.Join(SternLogDirectory, namespace)) + if err != nil { + return err + } + } + + return deleteNamespace(ctx, crudClient, namespace) +} + +// CreateTestNamespace creates a namespace creates a namespace. +// Prefer CreateUniqueTestNamespace instead, unless you need a +// specific namespace name. If so, make sure there is no collision +// potential. +// The namespace is automatically cleaned up at the end of the test. +func CreateTestNamespace( + ctx context.Context, + crudClient client.Client, + name string, + opts ...client.CreateOption, +) error { + err := CreateNamespace(ctx, crudClient, name, opts...) + if err != nil { + return err + } + + ginkgo.DeferCleanup(func() error { + return cleanupNamespace( + ctx, + crudClient, + name, + ginkgo.CurrentSpecReport().LeafNodeText, + ginkgo.CurrentSpecReport().Failed(), + ) + }) + + return nil +} + +// CreateNamespace creates a namespace. +func CreateNamespace( + ctx context.Context, + crudClient client.Client, + name string, + opts ...client.CreateOption, +) error { + // Exit immediately if the name is empty + if name == "" { + return errors.New("cannot create namespace with empty name") + } + + u := &unstructured.Unstructured{} + u.SetName(name) + u.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Namespace", + }) + _, err := objects.Create(ctx, crudClient, u, opts...) + return err +} + +// EnsureNamespace checks for the presence of a namespace, and if it does not +// exist, creates it +func EnsureNamespace( + ctx context.Context, + crudClient client.Client, + namespace string, +) error { + var nsList corev1.NamespaceList + err := objects.List(ctx, crudClient, &nsList) + if err != nil { + return err + } + for _, ns := range nsList.Items { + if ns.Name == namespace { + return nil + } + } + return CreateNamespace(ctx, crudClient, namespace) +} + +// deleteNamespace deletes a namespace if existent +func deleteNamespace( + ctx context.Context, + crudClient client.Client, + name string, + opts ...client.DeleteOption, +) error { + // Exit immediately if the name is empty + if name == "" { + return errors.New("cannot delete namespace with empty name") + } + + // Exit immediately if the namespace is listed in PreserveNamespaces + for _, v := range getPreserveNamespaces() { + if strings.HasPrefix(name, v) { + return nil + } + } + + u := &unstructured.Unstructured{} + u.SetName(name) + u.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Namespace", + }) + + return objects.Delete(ctx, crudClient, u, opts...) +} + +// DeleteNamespaceAndWait deletes a namespace if existent and returns when deletion is completed +func DeleteNamespaceAndWait( + ctx context.Context, + crudClient client.Client, + name string, + timeoutSeconds int, +) error { + // Exit immediately if the namespace is listed in PreserveNamespaces + for _, v := range getPreserveNamespaces() { + if strings.HasPrefix(name, v) { + return nil + } + } + + ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second) + defer cancel() + + err := deleteNamespace(ctx, crudClient, name, client.PropagationPolicy("Background")) + if err != nil { + return err + } + + podList, err := pods.List(ctx, crudClient, name) + if err != nil { + return err + } + + for _, pod := range podList.Items { + err = pods.Delete( + ctx, crudClient, + name, pod.Name, + client.GracePeriodSeconds(1), client.PropagationPolicy("Background"), + ) + if err != nil && !apierrs.IsNotFound(err) { + return err + } + } + + return wait.PollUntilContextCancel(ctx, time.Second, true, + func(ctx context.Context) (bool, error) { + err := crudClient.Get(ctx, client.ObjectKey{Name: name}, &corev1.Namespace{}) + if apierrs.IsNotFound(err) { + return true, nil + } + return false, err + }, + ) +} + +// DumpNamespaceObjects logs the clusters, pods, pvcs etc. found in a namespace as JSON sections +func DumpNamespaceObjects( + ctx context.Context, + crudClient client.Client, + namespace, filename string, +) { + f, err := os.Create(filepath.Clean(filename)) + if err != nil { + fmt.Println(err) + return + } + defer func() { + _ = f.Sync() + _ = f.Close() + }() + w := bufio.NewWriter(f) + clusterList := &apiv1.ClusterList{} + _ = objects.List(ctx, crudClient, clusterList, client.InNamespace(namespace)) + + for _, cluster := range clusterList.Items { + out, _ := json.MarshalIndent(cluster, "", " ") + _, _ = fmt.Fprintf(w, "Dumping %v/%v cluster\n", namespace, cluster.Name) + _, _ = fmt.Fprintln(w, string(out)) + } + + podList, _ := pods.List(ctx, crudClient, namespace) + for _, pod := range podList.Items { + out, _ := json.MarshalIndent(pod, "", " ") + _, _ = fmt.Fprintf(w, "Dumping %v/%v pod\n", namespace, pod.Name) + _, _ = fmt.Fprintln(w, string(out)) + } + + pvcList, _ := storage.GetPVCList(ctx, crudClient, namespace) + for _, pvc := range pvcList.Items { + out, _ := json.MarshalIndent(pvc, "", " ") + _, _ = fmt.Fprintf(w, "Dumping %v/%v PVC\n", namespace, pvc.Name) + _, _ = fmt.Fprintln(w, string(out)) + } + + jobList := &batchv1.JobList{} + _ = crudClient.List( + ctx, jobList, client.InNamespace(namespace), + ) + for _, job := range jobList.Items { + out, _ := json.MarshalIndent(job, "", " ") + _, _ = fmt.Fprintf(w, "Dumping %v/%v job\n", namespace, job.Name) + _, _ = fmt.Fprintln(w, string(out)) + } + + eventList, _ := GetEventList(ctx, crudClient, namespace) + out, _ := json.MarshalIndent(eventList.Items, "", " ") + _, _ = fmt.Fprintf(w, "Dumping events for namespace %v\n", namespace) + _, _ = fmt.Fprintln(w, string(out)) + + serviceAccountList, _ := GetServiceAccountList(ctx, crudClient, namespace) + for _, sa := range serviceAccountList.Items { + out, _ := json.MarshalIndent(sa, "", " ") + _, _ = fmt.Fprintf(w, "Dumping %v/%v serviceaccount\n", namespace, sa.Name) + _, _ = fmt.Fprintln(w, string(out)) + } + + suffixes := []string{"-r", "-rw", "-any"} + for _, cluster := range clusterList.Items { + for _, suffix := range suffixes { + namespacedName := types.NamespacedName{ + Namespace: namespace, + Name: cluster.Name + suffix, + } + endpoint := &corev1.Endpoints{} + _ = crudClient.Get(ctx, namespacedName, endpoint) + out, _ := json.MarshalIndent(endpoint, "", " ") + _, _ = fmt.Fprintf(w, "Dumping %v/%v endpoint\n", namespace, endpoint.Name) + _, _ = fmt.Fprintln(w, string(out)) + } + } + // dump backup info + backupList, _ := backups.List(ctx, crudClient, namespace) + // dump backup object info if it's configure + for _, backup := range backupList.Items { + out, _ := json.MarshalIndent(backup, "", " ") + _, _ = fmt.Fprintf(w, "Dumping %v/%v backup\n", namespace, backup.Name) + _, _ = fmt.Fprintln(w, string(out)) + } + // dump scheduledbackup info + scheduledBackupList, _ := GetScheduledBackupList(ctx, crudClient, namespace) + // dump backup object info if it's configure + for _, scheduledBackup := range scheduledBackupList.Items { + out, _ := json.MarshalIndent(scheduledBackup, "", " ") + _, _ = fmt.Fprintf(w, "Dumping %v/%v scheduledbackup\n", namespace, scheduledBackup.Name) + _, _ = fmt.Fprintln(w, string(out)) + } + + err = w.Flush() + if err != nil { + fmt.Println(err) + return + } +} + +// GetServiceAccountList gathers the current list of jobs in a namespace +func GetServiceAccountList( + ctx context.Context, + crudClient client.Client, + namespace string, +) (*corev1.ServiceAccountList, error) { + serviceAccountList := &corev1.ServiceAccountList{} + err := crudClient.List( + ctx, serviceAccountList, client.InNamespace(namespace), + ) + return serviceAccountList, err +} + +// GetEventList gathers the current list of events in a namespace +func GetEventList( + ctx context.Context, + crudClient client.Client, + namespace string, +) (*v1.EventList, error) { + eventList := &v1.EventList{} + err := crudClient.List( + ctx, eventList, client.InNamespace(namespace), + ) + return eventList, err +} + +// GetScheduledBackupList gathers the current list of scheduledBackup in namespace +func GetScheduledBackupList( + ctx context.Context, + crudClient client.Client, + namespace string, +) (*apiv1.ScheduledBackupList, error) { + scheduledBackupList := &apiv1.ScheduledBackupList{} + err := crudClient.List( + ctx, scheduledBackupList, client.InNamespace(namespace), + ) + return scheduledBackupList, err +} diff --git a/tests/utils/nodes/drain.go b/tests/utils/nodes/nodes.go similarity index 54% rename from tests/utils/nodes/drain.go rename to tests/utils/nodes/nodes.go index dc2ede03c4..95d4bd3122 100644 --- a/tests/utils/nodes/drain.go +++ b/tests/utils/nodes/nodes.go @@ -18,31 +18,38 @@ limitations under the License. package nodes import ( + "context" "fmt" + "strings" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils" + v1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" . "github.com/onsi/ginkgo/v2" //nolint . "github.com/onsi/gomega" //nolint ) -// DrainPrimaryNode drains the node containing the primary pod. +// DrainPrimary drains the node containing the primary pod. // It returns the names of the pods that were running on that node -func DrainPrimaryNode( +func DrainPrimary( + ctx context.Context, + crudClient client.Client, namespace, clusterName string, timeoutSeconds int, - env *utils.TestingEnvironment, ) []string { var primaryNode string var podNames []string By("identifying primary node and draining", func() { - pod, err := env.GetClusterPrimary(namespace, clusterName) + pod, err := clusterutils.GetPrimary(ctx, crudClient, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) primaryNode = pod.Spec.NodeName // Gather the pods running on this node - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(ctx, crudClient, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) for _, pod := range podList.Items { if pod.Spec.NodeName == primaryNode { @@ -55,14 +62,14 @@ func DrainPrimaryNode( Eventually(func() error { cmd := fmt.Sprintf("kubectl drain %v --ignore-daemonsets --delete-emptydir-data --force --timeout=%ds", primaryNode, timeoutSeconds) - stdout, stderr, err = utils.RunUnchecked(cmd) + stdout, stderr, err = run.Unchecked(cmd) return err }, timeoutSeconds).ShouldNot(HaveOccurred(), fmt.Sprintf("stdout: %s, stderr: %s", stdout, stderr)) }) By("ensuring no cluster pod is still running on the drained node", func() { Eventually(func() ([]string, error) { var usedNodes []string - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(ctx, crudClient, namespace, clusterName) for _, pod := range podList.Items { usedNodes = append(usedNodes, pod.Spec.NodeName) } @@ -73,18 +80,52 @@ func DrainPrimaryNode( return podNames } -// UncordonAllNodes executes the 'kubectl uncordon' command on each node of the list -func UncordonAllNodes(env *utils.TestingEnvironment) error { - nodeList, err := env.GetNodeList() +// UncordonAll executes the 'kubectl uncordon' command on each node of the list +func UncordonAll( + ctx context.Context, + crudClient client.Client, +) error { + nodeList, err := List(ctx, crudClient) if err != nil { return err } for _, node := range nodeList.Items { command := fmt.Sprintf("kubectl uncordon %v", node.Name) - _, _, err = utils.Run(command) + _, _, err = run.Run(command) if err != nil { return err } } return nil } + +// List gathers the current list of Nodes +func List( + ctx context.Context, + crudClient client.Client, +) (*v1.NodeList, error) { + nodeList := &v1.NodeList{} + err := crudClient.List(ctx, nodeList, client.InNamespace("")) + return nodeList, err +} + +// DescribeKubernetesNodes prints the `describe node` for each node in the +// kubernetes cluster +func DescribeKubernetesNodes(ctx context.Context, crudClient client.Client) (string, error) { + nodeList, err := List(ctx, crudClient) + if err != nil { + return "", err + } + var report strings.Builder + for _, node := range nodeList.Items { + command := fmt.Sprintf("kubectl describe node %v", node.Name) + stdout, _, err := run.Run(command) + if err != nil { + return "", err + } + report.WriteString("================================================\n") + report.WriteString(stdout) + report.WriteString("================================================\n") + } + return report.String(), nil +} diff --git a/tests/utils/objects/objects.go b/tests/utils/objects/objects.go new file mode 100644 index 0000000000..af956106be --- /dev/null +++ b/tests/utils/objects/objects.go @@ -0,0 +1,117 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package objects provides functions to manage pure objects in Kubernetes +package objects + +import ( + "context" + "time" + + "github.com/avast/retry-go/v4" + "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + // RetryAttempts maximum number of attempts when it fails in `retry`. Mainly used in `RunUncheckedRetry` + RetryAttempts = 5 + + // PollingTime polling interval (in seconds) between retries + PollingTime = 5 +) + +// Create creates object in the Kubernetes cluster +func Create( + ctx context.Context, + crudClient client.Client, + object client.Object, + opts ...client.CreateOption, +) (client.Object, error) { + err := retry.Do( + func() error { + return crudClient.Create(ctx, object, opts...) + }, + retry.Delay(PollingTime*time.Second), + retry.Attempts(RetryAttempts), + retry.DelayType(retry.FixedDelay), + retry.RetryIf(func(err error) bool { return !errors.IsAlreadyExists(err) }), + ) + return object, err +} + +// Delete deletes an object in the Kubernetes cluster +func Delete( + ctx context.Context, + crudClient client.Client, + object client.Object, + opts ...client.DeleteOption, +) error { + err := retry.Do( + func() error { + return crudClient.Delete(ctx, object, opts...) + }, + retry.Delay(PollingTime*time.Second), + retry.Attempts(RetryAttempts), + retry.DelayType(retry.FixedDelay), + retry.RetryIf(func(err error) bool { return !errors.IsNotFound(err) }), + ) + return err +} + +// List retrieves a list of objects +func List( + ctx context.Context, + crudClient client.Client, + objectList client.ObjectList, + opts ...client.ListOption, +) error { + err := retry.Do( + func() error { + err := crudClient.List(ctx, objectList, opts...) + if err != nil { + return err + } + return nil + }, + retry.Delay(PollingTime*time.Second), + retry.Attempts(RetryAttempts), + retry.DelayType(retry.FixedDelay), + ) + return err +} + +// Get retrieves an object for the given object key from the Kubernetes Cluster +func Get( + ctx context.Context, + crudClient client.Client, + objectKey client.ObjectKey, + object client.Object, +) error { + err := retry.Do( + func() error { + err := crudClient.Get(ctx, objectKey, object) + if err != nil { + return err + } + return nil + }, + retry.Delay(PollingTime*time.Second), + retry.Attempts(RetryAttempts), + retry.DelayType(retry.FixedDelay), + ) + return err +} diff --git a/tests/utils/openshift.go b/tests/utils/openshift/openshift.go similarity index 70% rename from tests/utils/openshift.go rename to tests/utils/openshift/openshift.go index 769ff8c413..2901962e83 100644 --- a/tests/utils/openshift.go +++ b/tests/utils/openshift/openshift.go @@ -14,9 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -package utils +// Package openshift provides functions to work with OLM CRDs +package openshift import ( + "context" "fmt" "strings" @@ -27,14 +29,20 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" "k8s.io/client-go/util/retry" - ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" ) -// GetSubscription returns an unstructured subscription object -func GetSubscription(env *TestingEnvironment) (*unstructured.Unstructured, error) { +// getSubscription returns an unstructured subscription object +func getSubscription( + ctx context.Context, + crudClient client.Client, +) (*unstructured.Unstructured, error) { subscription := &unstructured.Unstructured{} subscription.SetName("cloudnative-pg") subscription.SetNamespace("openshift-operators") @@ -43,13 +51,16 @@ func GetSubscription(env *TestingEnvironment) (*unstructured.Unstructured, error Version: "v1alpha1", Kind: "Subscription", }) - err := env.Client.Get(env.Ctx, ctrlclient.ObjectKeyFromObject(subscription), subscription) + err := crudClient.Get(ctx, client.ObjectKeyFromObject(subscription), subscription) return subscription, err } // GetSubscriptionVersion retrieves the current ClusterServiceVersion version of the operator -func GetSubscriptionVersion(env *TestingEnvironment) (string, error) { - subscription, err := GetSubscription(env) +func GetSubscriptionVersion( + ctx context.Context, + crudClient client.Client, +) (string, error) { + subscription, err := getSubscription(ctx, crudClient) if err != nil { return "", err } @@ -65,17 +76,21 @@ func GetSubscriptionVersion(env *TestingEnvironment) (string, error) { } // PatchStatusCondition removes status conditions on a given Cluster -func PatchStatusCondition(namespace, clusterName string, env *TestingEnvironment) error { +func PatchStatusCondition( + ctx context.Context, + crudClient client.Client, + namespace, clusterName string, +) error { cluster := &apiv1.Cluster{} var err error err = retry.RetryOnConflict(retry.DefaultBackoff, func() error { - cluster, err = env.GetCluster(namespace, clusterName) + cluster, err = clusterutils.Get(ctx, crudClient, namespace, clusterName) if err != nil { return err } clusterNoConditions := cluster.DeepCopy() clusterNoConditions.Status.Conditions = nil - return env.Client.Patch(env.Ctx, clusterNoConditions, ctrlclient.MergeFrom(cluster)) + return crudClient.Patch(ctx, clusterNoConditions, client.MergeFrom(cluster)) }) if err != nil { return err @@ -84,8 +99,8 @@ func PatchStatusCondition(namespace, clusterName string, env *TestingEnvironment } // GetOpenshiftVersion returns the current openshift version -func GetOpenshiftVersion(env *TestingEnvironment) (semver.Version, error) { - client, err := dynamic.NewForConfig(env.RestClientConfig) +func GetOpenshiftVersion(ctx context.Context, restConfig *rest.Config) (semver.Version, error) { + client, err := dynamic.NewForConfig(restConfig) if err != nil { return semver.Version{}, err } @@ -94,7 +109,7 @@ func GetOpenshiftVersion(env *TestingEnvironment) (semver.Version, error) { Group: "operator.openshift.io", Version: "v1", Resource: "openshiftcontrollermanagers", - }).Get(env.Ctx, "cluster", v1.GetOptions{}) + }).Get(ctx, "cluster", v1.GetOptions{}) if err != nil { return semver.Version{}, err } @@ -108,7 +123,11 @@ func GetOpenshiftVersion(env *TestingEnvironment) (semver.Version, error) { } // CreateSubscription creates a subscription object inside openshift with a fixed name -func CreateSubscription(env *TestingEnvironment, channel string) error { +func CreateSubscription( + ctx context.Context, + crudClient client.Client, + channel string, +) error { u := &unstructured.Unstructured{} u.SetName("cloudnative-pg") u.SetNamespace("openshift-operators") @@ -131,12 +150,15 @@ func CreateSubscription(env *TestingEnvironment, channel string) error { return err } - _, err = CreateObject(env, u) + _, err = objects.Create(ctx, crudClient, u) return err } // DeleteSubscription deletes the operator's subscription object -func DeleteSubscription(env *TestingEnvironment) error { +func DeleteSubscription( + ctx context.Context, + crudClient client.Client, +) error { u := &unstructured.Unstructured{} u.SetName("cloudnative-pg") u.SetNamespace("openshift-operators") @@ -146,7 +168,7 @@ func DeleteSubscription(env *TestingEnvironment) error { Kind: "Subscription", }) - err := DeleteObject(env, u) + err := objects.Delete(ctx, crudClient, u) if apierrors.IsNotFound(err) { return nil } @@ -155,7 +177,10 @@ func DeleteSubscription(env *TestingEnvironment) error { } // DeleteOperatorCRDs deletes the CRDs associated with the operator -func DeleteOperatorCRDs(env *TestingEnvironment) error { +func DeleteOperatorCRDs( + ctx context.Context, + crudClient client.Client, +) error { u := &unstructured.Unstructured{} u.SetName("clusters.postgresql.cnpg.io") u.SetGroupVersionKind(schema.GroupVersionKind{ @@ -163,22 +188,22 @@ func DeleteOperatorCRDs(env *TestingEnvironment) error { Version: "v1", Kind: "CustomResourceDefinition", }) - err := DeleteObject(env, u) + err := objects.Delete(ctx, crudClient, u) if !apierrors.IsNotFound(err) { return err } u.SetName("backups.postgresql.cnpg.io") - err = DeleteObject(env, u) + err = objects.Delete(ctx, crudClient, u) if !apierrors.IsNotFound(err) { return err } u.SetName("poolers.postgresql.cnpg.io") - err = DeleteObject(env, u) + err = objects.Delete(ctx, crudClient, u) if !apierrors.IsNotFound(err) { return err } u.SetName("scheduledbackups.postgresql.cnpg.io") - err = DeleteObject(env, u) + err = objects.Delete(ctx, crudClient, u) if apierrors.IsNotFound(err) { return nil } @@ -186,7 +211,10 @@ func DeleteOperatorCRDs(env *TestingEnvironment) error { } // DeleteCSV will delete all operator's CSVs -func DeleteCSV(env *TestingEnvironment) error { +func DeleteCSV( + ctx context.Context, + crudClient client.Client, +) error { ol := &unstructured.UnstructuredList{} ol.SetGroupVersionKind(schema.GroupVersionKind{ Group: "operators.coreos.com", @@ -196,12 +224,12 @@ func DeleteCSV(env *TestingEnvironment) error { labelSelector := labels.SelectorFromSet(map[string]string{ "operators.coreos.com/cloudnative-pg.openshift-operators": "", }) - err := GetObjectList(env, ol, ctrlclient.MatchingLabelsSelector{Selector: labelSelector}) + err := objects.List(ctx, crudClient, ol, client.MatchingLabelsSelector{Selector: labelSelector}) if err != nil { return err } for _, o := range ol.Items { - err = DeleteObject(env, &o) + err = objects.Delete(ctx, crudClient, &o) if err != nil { if apierrors.IsNotFound(err) { continue @@ -213,8 +241,12 @@ func DeleteCSV(env *TestingEnvironment) error { } // UpgradeSubscription patch an unstructured subscription object with target channel -func UpgradeSubscription(env *TestingEnvironment, channel string) error { - subscription, err := GetSubscription(env) +func UpgradeSubscription( + ctx context.Context, + crudClient client.Client, + channel string, +) error { + subscription, err := getSubscription(ctx, crudClient) if err != nil { return err } @@ -225,5 +257,5 @@ func UpgradeSubscription(env *TestingEnvironment, channel string) error { return err } - return env.Client.Patch(env.Ctx, newSubscription, ctrlclient.MergeFrom(subscription)) + return crudClient.Patch(ctx, newSubscription, client.MergeFrom(subscription)) } diff --git a/tests/utils/operator/doc.go b/tests/utils/operator/doc.go new file mode 100644 index 0000000000..a4e7050ee6 --- /dev/null +++ b/tests/utils/operator/doc.go @@ -0,0 +1,18 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package operator provides functions to handle and manage the operator +package operator diff --git a/tests/utils/operator.go b/tests/utils/operator/operator.go similarity index 59% rename from tests/utils/operator.go rename to tests/utils/operator/operator.go index 7ab479b09c..ca705e9e41 100644 --- a/tests/utils/operator.go +++ b/tests/utils/operator/operator.go @@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -14,10 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -package utils +package operator import ( "bufio" + "context" "encoding/json" "fmt" "os" @@ -31,30 +32,40 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/client-go/kubernetes" "k8s.io/utils/ptr" - ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/controller" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" ) -// ReloadOperatorDeployment finds and deletes the operator pod. Returns +// ReloadDeployment finds and deletes the operator pod. Returns // error if the new pod is not ready within a defined timeout -func ReloadOperatorDeployment(env *TestingEnvironment, timeoutSeconds uint) error { - operatorPod, err := env.GetOperatorPod() +func ReloadDeployment( + ctx context.Context, + crudClient client.Client, + kubeInterface kubernetes.Interface, + timeoutSeconds uint, +) error { + operatorPod, err := GetPod(ctx, crudClient) if err != nil { return err } zero := int64(0) - err = env.Client.Delete(env.Ctx, &operatorPod, - &ctrlclient.DeleteOptions{GracePeriodSeconds: &zero}, + err = crudClient.Delete(ctx, &operatorPod, + &client.DeleteOptions{GracePeriodSeconds: &zero}, ) if err != nil { return err } err = retry.Do( func() error { - ready, err := env.IsOperatorReady() + ready, err := IsReady(ctx, crudClient, kubeInterface) if err != nil { return err } @@ -69,8 +80,8 @@ func ReloadOperatorDeployment(env *TestingEnvironment, timeoutSeconds uint) erro return err } -// DumpOperator logs the JSON for the deployment in an operator namespace, its pods and endpoints -func (env TestingEnvironment) DumpOperator(namespace string, filename string) { +// Dump logs the JSON for the deployment in an operator namespace, its pods and endpoints +func Dump(ctx context.Context, crudClient client.Client, namespace, filename string) { f, err := os.Create(filepath.Clean(filename)) if err != nil { fmt.Println(err) @@ -78,12 +89,12 @@ func (env TestingEnvironment) DumpOperator(namespace string, filename string) { } w := bufio.NewWriter(f) - deployment, _ := env.GetOperatorDeployment() + deployment, _ := GetDeployment(ctx, crudClient) out, _ := json.MarshalIndent(deployment, "", " ") _, _ = fmt.Fprintf(w, "Dumping %v/%v deployment\n", namespace, deployment.Name) _, _ = fmt.Fprintln(w, string(out)) - podList, _ := env.GetPodList(namespace) + podList, _ := pods.List(ctx, crudClient, namespace) for _, pod := range podList.Items { out, _ := json.MarshalIndent(pod, "", " ") _, _ = fmt.Fprintf(w, "Dumping %v/%v pod\n", namespace, pod.Name) @@ -99,11 +110,11 @@ func (env TestingEnvironment) DumpOperator(namespace string, filename string) { _ = f.Close() } -// GetOperatorDeployment returns the operator Deployment if there is a single one running, error otherwise -func (env TestingEnvironment) GetOperatorDeployment() (appsv1.Deployment, error) { +// GetDeployment returns the operator Deployment if there is a single one running, error otherwise +func GetDeployment(ctx context.Context, crudClient client.Client) (appsv1.Deployment, error) { deploymentList := &appsv1.DeploymentList{} - if err := GetObjectList(&env, deploymentList, - ctrlclient.MatchingLabels{"app.kubernetes.io/name": "cloudnative-pg"}, + if err := objects.List(ctx, crudClient, deploymentList, + client.MatchingLabels{"app.kubernetes.io/name": "cloudnative-pg"}, ); err != nil { return appsv1.Deployment{}, err } @@ -116,10 +127,11 @@ func (env TestingEnvironment) GetOperatorDeployment() (appsv1.Deployment, error) return deploymentList.Items[0], nil } - if err := GetObjectList( - &env, + if err := objects.List( + ctx, + crudClient, deploymentList, - ctrlclient.HasLabels{"operators.coreos.com/cloudnative-pg.openshift-operators"}, + client.HasLabels{"operators.coreos.com/cloudnative-pg.openshift-operators"}, ); err != nil { return appsv1.Deployment{}, err } @@ -136,14 +148,15 @@ func (env TestingEnvironment) GetOperatorDeployment() (appsv1.Deployment, error) return deploymentList.Items[0], nil } -// GetOperatorPod returns the operator pod if there is a single one running, error otherwise -func (env TestingEnvironment) GetOperatorPod() (corev1.Pod, error) { +// GetPod returns the operator pod if there is a single one running, error otherwise +func GetPod(ctx context.Context, crudClient client.Client) (corev1.Pod, error) { podList := &corev1.PodList{} // This will work for newer version of the operator, which are using // our custom label - if err := GetObjectList( - &env, podList, ctrlclient.MatchingLabels{"app.kubernetes.io/name": "cloudnative-pg"}); err != nil { + if err := objects.List( + ctx, crudClient, + podList, client.MatchingLabels{"app.kubernetes.io/name": "cloudnative-pg"}); err != nil { return corev1.Pod{}, err } activePods := utils.FilterActivePods(podList.Items) @@ -156,17 +169,17 @@ func (env TestingEnvironment) GetOperatorPod() (corev1.Pod, error) { return activePods[0], nil } - operatorNamespace, err := env.GetOperatorNamespaceName() + operatorNamespace, err := NamespaceName(ctx, crudClient) if err != nil { return corev1.Pod{}, err } // This will work for older version of the operator, which are using // the default label from kube-builder - if err := GetObjectList( - &env, podList, - ctrlclient.MatchingLabels{"control-plane": "controller-manager"}, - ctrlclient.InNamespace(operatorNamespace)); err != nil { + if err := objects.List( + ctx, crudClient, podList, + client.MatchingLabels{"control-plane": "controller-manager"}, + client.InNamespace(operatorNamespace)); err != nil { return corev1.Pod{}, err } activePods = utils.FilterActivePods(podList.Items) @@ -178,18 +191,22 @@ func (env TestingEnvironment) GetOperatorPod() (corev1.Pod, error) { return podList.Items[0], nil } -// GetOperatorNamespaceName returns the namespace the operator Deployment is running in -func (env TestingEnvironment) GetOperatorNamespaceName() (string, error) { - deployment, err := env.GetOperatorDeployment() +// NamespaceName returns the namespace the operator Deployment is running in +func NamespaceName(ctx context.Context, crudClient client.Client) (string, error) { + deployment, err := GetDeployment(ctx, crudClient) if err != nil { return "", err } return deployment.GetNamespace(), err } -// IsOperatorReady ensures that the operator will be ready. -func (env TestingEnvironment) IsOperatorReady() (bool, error) { - pod, err := env.GetOperatorPod() +// IsReady ensures that the operator will be ready. +func IsReady( + ctx context.Context, + crudClient client.Client, + kubeInterface kubernetes.Interface, +) (bool, error) { + pod, err := GetPod(ctx, crudClient) if err != nil { return false, err } @@ -211,7 +228,7 @@ func (env TestingEnvironment) IsOperatorReady() (bool, error) { // If the operator is managing certificates for webhooks, check that the setup is completed if !webhookManagedByOLM { - err = CheckWebhookReady(&env, namespace) + err = checkWebhookReady(ctx, crudClient, kubeInterface, namespace) if err != nil { return false, err } @@ -230,7 +247,12 @@ func (env TestingEnvironment) IsOperatorReady() (bool, error) { }, }, } - _, err = CreateObject(&env, testCluster, &ctrlclient.CreateOptions{DryRun: []string{metav1.DryRunAll}}) + _, err = objects.Create( + ctx, + crudClient, + testCluster, + &client.CreateOptions{DryRun: []string{metav1.DryRunAll}}, + ) if err != nil { return false, err } @@ -238,11 +260,11 @@ func (env TestingEnvironment) IsOperatorReady() (bool, error) { return true, err } -// IsOperatorDeploymentReady returns true if the operator deployment has the expected number +// IsDeploymentReady returns true if the operator deployment has the expected number // of ready pods. // It returns an error if there was a problem getting the operator deployment -func (env *TestingEnvironment) IsOperatorDeploymentReady() (bool, error) { - operatorDeployment, err := env.GetOperatorDeployment() +func IsDeploymentReady(ctx context.Context, crudClient client.Client) (bool, error) { + operatorDeployment, err := GetDeployment(ctx, crudClient) if err != nil { return false, err } @@ -257,8 +279,8 @@ func (env *TestingEnvironment) IsOperatorDeploymentReady() (bool, error) { } // ScaleOperatorDeployment will scale the operator to n replicas and return error in case of failure -func (env *TestingEnvironment) ScaleOperatorDeployment(replicas int32) error { - operatorDeployment, err := env.GetOperatorDeployment() +func ScaleOperatorDeployment(ctx context.Context, crudClient client.Client, replicas int32) error { + operatorDeployment, err := GetDeployment(ctx, crudClient) if err != nil { return err } @@ -267,14 +289,14 @@ func (env *TestingEnvironment) ScaleOperatorDeployment(replicas int32) error { updatedOperatorDeployment.Spec.Replicas = ptr.To(replicas) // Scale down operator deployment to zero replicas - err = env.Client.Patch(env.Ctx, &updatedOperatorDeployment, ctrlclient.MergeFrom(&operatorDeployment)) + err = crudClient.Patch(ctx, &updatedOperatorDeployment, client.MergeFrom(&operatorDeployment)) if err != nil { return err } return retry.Do( func() error { - _, err := env.IsOperatorDeploymentReady() + _, err := IsDeploymentReady(ctx, crudClient) return err }, retry.Delay(time.Second), @@ -282,13 +304,13 @@ func (env *TestingEnvironment) ScaleOperatorDeployment(replicas int32) error { ) } -// OperatorPodRenamed checks if the operator pod was renamed -func OperatorPodRenamed(operatorPod corev1.Pod, expectedOperatorPodName string) bool { +// PodRenamed checks if the operator pod was renamed +func PodRenamed(operatorPod corev1.Pod, expectedOperatorPodName string) bool { return operatorPod.GetName() != expectedOperatorPodName } -// OperatorPodRestarted checks if the operator pod was restarted -func OperatorPodRestarted(operatorPod corev1.Pod) bool { +// PodRestarted checks if the operator pod was restarted +func PodRestarted(operatorPod corev1.Pod) bool { restartCount := 0 for _, containerStatus := range operatorPod.Status.ContainerStatuses { if containerStatus.Name == "manager" { @@ -298,10 +320,10 @@ func OperatorPodRestarted(operatorPod corev1.Pod) bool { return restartCount != 0 } -// GetOperatorPodName returns the name of the current operator pod +// GetPodName returns the name of the current operator pod // NOTE: will return an error if the pod is being deleted -func GetOperatorPodName(env *TestingEnvironment) (string, error) { - pod, err := env.GetOperatorPod() +func GetPodName(ctx context.Context, crudClient client.Client) (string, error) { + pod, err := GetPod(ctx, crudClient) if err != nil { return "", err } @@ -312,16 +334,16 @@ func GetOperatorPodName(env *TestingEnvironment) (string, error) { return pod.GetName(), nil } -// HasOperatorBeenUpgraded determines if the operator has been upgraded by checking +// HasBeenUpgraded determines if the operator has been upgraded by checking // if there is a deletion timestamp. If there isn't, it returns true -func HasOperatorBeenUpgraded(env *TestingEnvironment) bool { - _, err := GetOperatorPodName(env) +func HasBeenUpgraded(ctx context.Context, crudClient client.Client) bool { + _, err := GetPodName(ctx, crudClient) return err == nil } -// GetOperatorVersion returns the current operator version -func GetOperatorVersion(namespace, podName string) (string, error) { - out, _, err := RunUnchecked(fmt.Sprintf( +// Version returns the current operator version +func Version(namespace, podName string) (string, error) { + out, _, err := run.Unchecked(fmt.Sprintf( "kubectl -n %v exec %v -c manager -- /manager version", namespace, podName, @@ -334,9 +356,9 @@ func GetOperatorVersion(namespace, podName string) (string, error) { return ver, nil } -// GetOperatorArchitectures returns all the supported operator architectures -func GetOperatorArchitectures(operatorPod *corev1.Pod) ([]string, error) { - out, _, err := RunUnchecked(fmt.Sprintf( +// Architectures returns all the supported operator architectures +func Architectures(operatorPod *corev1.Pod) ([]string, error) { + out, _, err := run.Unchecked(fmt.Sprintf( "kubectl -n %v exec %v -c manager -- /manager debug show-architectures", operatorPod.Namespace, operatorPod.Name, @@ -354,3 +376,17 @@ func GetOperatorArchitectures(operatorPod *corev1.Pod) ([]string, error) { return res, err } + +// GetLeaderInfoFromLease gathers leader holderIdentity from the lease +func GetLeaderInfoFromLease( + ctx context.Context, + kubeInterface kubernetes.Interface, + operatorNamespace string, +) (string, error) { + leaseInterface := kubeInterface.CoordinationV1().Leases(operatorNamespace) + lease, err := leaseInterface.Get(ctx, controller.LeaderElectionID, metav1.GetOptions{}) + if err != nil { + return "", err + } + return *lease.Spec.HolderIdentity, nil +} diff --git a/tests/utils/release.go b/tests/utils/operator/release.go similarity index 97% rename from tests/utils/release.go rename to tests/utils/operator/release.go index 6b480e1957..af372f0ffb 100644 --- a/tests/utils/release.go +++ b/tests/utils/operator/release.go @@ -14,8 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package utils contains helper functions/methods for e2e -package utils +package operator import ( "errors" diff --git a/tests/utils/release_test.go b/tests/utils/operator/release_test.go similarity index 95% rename from tests/utils/release_test.go rename to tests/utils/operator/release_test.go index a65ea5b58f..611141572e 100644 --- a/tests/utils/release_test.go +++ b/tests/utils/operator/release_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package utils +package operator import ( "os" @@ -27,6 +27,8 @@ import ( . "github.com/onsi/gomega" ) +const releaseDirectoryPath = "../../../releases" + var _ = Describe("Release tag extraction", func() { It("properly works with expected filename", func() { tag, err := extractTag("cnpg-0.5.0.yaml") @@ -42,7 +44,7 @@ var _ = Describe("Release tag extraction", func() { var _ = Describe("Most recent tag", func() { It("properly works with release branch", func() { - releasesDir, err := filepath.Abs("../../releases") + releasesDir, err := filepath.Abs(releaseDirectoryPath) Expect(err).ToNot(HaveOccurred()) versionList, err := GetAvailableReleases(releasesDir) @@ -60,7 +62,7 @@ var _ = Describe("Most recent tag", func() { }) It("properly works with dev branch", func() { - releasesDir, err := filepath.Abs("../../releases") + releasesDir, err := filepath.Abs(releaseDirectoryPath) Expect(err).ToNot(HaveOccurred()) GinkgoT().Setenv("BRANCH_NAME", "dev/"+versions.Version) diff --git a/tests/utils/monitoring.go b/tests/utils/operator/suite_test.go similarity index 51% rename from tests/utils/monitoring.go rename to tests/utils/operator/suite_test.go index 2a7c12b3b4..b49f44d833 100644 --- a/tests/utils/monitoring.go +++ b/tests/utils/operator/suite_test.go @@ -14,24 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ -package utils +package operator import ( - monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" - "k8s.io/apimachinery/pkg/types" -) + "testing" -// GetPodMonitor gathers the current PodMonitor in a namespace -func (env TestingEnvironment) GetPodMonitor(namespace string, name string) (*monitoringv1.PodMonitor, error) { - podMonitor := &monitoringv1.PodMonitor{} - namespacedName := types.NamespacedName{ - Namespace: namespace, - Name: name, - } + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) - err := GetObject(&env, namespacedName, podMonitor) - if err != nil { - return nil, err - } - return podMonitor, nil +func TestUtils(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Utils test release suite") } diff --git a/tests/utils/upgrade.go b/tests/utils/operator/upgrade.go similarity index 70% rename from tests/utils/upgrade.go rename to tests/utils/operator/upgrade.go index c8fa832ffd..e8a2e7af21 100644 --- a/tests/utils/upgrade.go +++ b/tests/utils/operator/upgrade.go @@ -14,9 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -package utils +// Package operator provide functions to handle operator install/uninstall process +package operator import ( + "context" "fmt" corev1 "k8s.io/api/core/v1" @@ -24,25 +26,34 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/namespaces" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" . "github.com/onsi/ginkgo/v2" // nolint . "github.com/onsi/gomega" // nolint ) -// CreateOperatorConfigurationMap creates the operator namespace and enables/disable the online upgrade for +// CreateConfigMap creates the operator namespace and enables/disable the online upgrade for // the instance manager -func CreateOperatorConfigurationMap(pgOperatorNamespace, configName string, isOnline bool, env *TestingEnvironment) { +func CreateConfigMap( + ctx context.Context, + crudClient client.Client, + pgOperatorNamespace, configName string, + isOnline bool, +) { By("creating operator namespace", func() { // Create a upgradeNamespace for all the resources namespacedName := types.NamespacedName{ Name: pgOperatorNamespace, } namespaceResource := &corev1.Namespace{} - err := env.Client.Get(env.Ctx, namespacedName, namespaceResource) + err := crudClient.Get(ctx, namespacedName, namespaceResource) if apierrors.IsNotFound(err) { - err = env.CreateNamespace(pgOperatorNamespace) + err = namespaces.CreateNamespace(ctx, crudClient, pgOperatorNamespace) Expect(err).ToNot(HaveOccurred()) } else if err != nil { Expect(err).ToNot(HaveOccurred()) @@ -61,19 +72,22 @@ func CreateOperatorConfigurationMap(pgOperatorNamespace, configName string, isOn }, Data: map[string]string{"ENABLE_INSTANCE_MANAGER_INPLACE_UPDATES": enable}, } - _, err := CreateObject(env, configMap) + _, err := objects.Create(ctx, crudClient, configMap) Expect(err).NotTo(HaveOccurred()) }) } -// InstallLatestCNPGOperator installs an operator version with the most recent release tag -func InstallLatestCNPGOperator(releaseTag string, env *TestingEnvironment) { +// InstallLatest installs an operator version with the most recent release tag +func InstallLatest( + crudClient client.Client, + releaseTag string, +) { mostRecentReleasePath := "../../releases/cnpg-" + releaseTag + ".yaml" Eventually(func() error { GinkgoWriter.Printf("installing: %s\n", mostRecentReleasePath) - _, stderr, err := RunUnchecked("kubectl apply --server-side --force-conflicts -f " + mostRecentReleasePath) + _, stderr, err := run.Unchecked("kubectl apply --server-side --force-conflicts -f " + mostRecentReleasePath) if err != nil { GinkgoWriter.Printf("stderr: %s\n", stderr) } @@ -82,14 +96,14 @@ func InstallLatestCNPGOperator(releaseTag string, env *TestingEnvironment) { }, 60).ShouldNot(HaveOccurred()) Eventually(func() error { - _, _, err := RunUnchecked( + _, _, err := run.Unchecked( "kubectl wait --for condition=established --timeout=60s " + "crd/clusters.postgresql.cnpg.io") return err }, 150).ShouldNot(HaveOccurred()) Eventually(func() error { - mapping, err := env.Client.RESTMapper().RESTMapping( + mapping, err := crudClient.RESTMapper().RESTMapping( schema.GroupKind{Group: apiv1.GroupVersion.Group, Kind: apiv1.ClusterKind}, apiv1.GroupVersion.Version) if err != nil { @@ -102,7 +116,7 @@ func InstallLatestCNPGOperator(releaseTag string, env *TestingEnvironment) { }, 150).ShouldNot(HaveOccurred()) Eventually(func() error { - _, _, err := RunUnchecked( + _, _, err := run.Unchecked( "kubectl wait --for=condition=Available --timeout=2m -n cnpg-system " + "deployments cnpg-controller-manager") return err diff --git a/tests/utils/webhooks.go b/tests/utils/operator/webhooks.go similarity index 65% rename from tests/utils/webhooks.go rename to tests/utils/operator/webhooks.go index 210e541aa2..b4d94462a5 100644 --- a/tests/utils/webhooks.go +++ b/tests/utils/operator/webhooks.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package utils +package operator import ( "bytes" @@ -25,17 +25,24 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/client" "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/controller" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" ) -// GetCNPGsMutatingWebhookByName get the MutatingWebhook filtered by the name of one +// GetMutatingWebhookByName get the MutatingWebhook filtered by the name of one // of the webhooks -func GetCNPGsMutatingWebhookByName(env *TestingEnvironment, name string) ( +func GetMutatingWebhookByName( + ctx context.Context, + crudClient client.Client, + name string, +) ( *admissionregistrationv1.MutatingWebhookConfiguration, int, error, ) { var mWebhooks admissionregistrationv1.MutatingWebhookConfigurationList - err := GetObjectList(env, &mWebhooks) + err := objects.List(ctx, crudClient, &mWebhooks) if err != nil { return nil, 0, err } @@ -50,12 +57,13 @@ func GetCNPGsMutatingWebhookByName(env *TestingEnvironment, name string) ( return nil, 0, fmt.Errorf("mutating webhook not found") } -// UpdateCNPGsMutatingWebhookConf update MutatingWebhookConfiguration object -func UpdateCNPGsMutatingWebhookConf(env *TestingEnvironment, +// UpdateMutatingWebhookConf update MutatingWebhookConfiguration object +func UpdateMutatingWebhookConf( + ctx context.Context, + kubeInterface kubernetes.Interface, wh *admissionregistrationv1.MutatingWebhookConfiguration, ) error { - ctx := context.Background() - _, err := env.Interface.AdmissionregistrationV1(). + _, err := kubeInterface.AdmissionregistrationV1(). MutatingWebhookConfigurations().Update(ctx, wh, metav1.UpdateOptions{}) if err != nil { return err @@ -63,12 +71,12 @@ func UpdateCNPGsMutatingWebhookConf(env *TestingEnvironment, return nil } -// GetCNPGsValidatingWebhookConf get the ValidatingWebhook linked to the operator -func GetCNPGsValidatingWebhookConf(env *TestingEnvironment) ( +// getCNPGsValidatingWebhookConf get the ValidatingWebhook linked to the operator +func getCNPGsValidatingWebhookConf(kubeInterface kubernetes.Interface) ( *admissionregistrationv1.ValidatingWebhookConfiguration, error, ) { ctx := context.Background() - validatingWebhookConfig, err := env.Interface.AdmissionregistrationV1().ValidatingWebhookConfigurations().Get( + validatingWebhookConfig, err := kubeInterface.AdmissionregistrationV1().ValidatingWebhookConfigurations().Get( ctx, controller.ValidatingWebhookConfigurationName, metav1.GetOptions{}) if err != nil { return nil, err @@ -76,13 +84,17 @@ func GetCNPGsValidatingWebhookConf(env *TestingEnvironment) ( return validatingWebhookConfig, nil } -// GetCNPGsValidatingWebhookByName get ValidatingWebhook by the name of one +// GetValidatingWebhookByName get ValidatingWebhook by the name of one // of the webhooks -func GetCNPGsValidatingWebhookByName(env *TestingEnvironment, name string) ( +func GetValidatingWebhookByName( + ctx context.Context, + crudClient client.Client, + name string, +) ( *admissionregistrationv1.ValidatingWebhookConfiguration, int, error, ) { var vWebhooks admissionregistrationv1.ValidatingWebhookConfigurationList - err := GetObjectList(env, &vWebhooks) + err := objects.List(ctx, crudClient, &vWebhooks) if err != nil { return nil, 0, err } @@ -97,12 +109,13 @@ func GetCNPGsValidatingWebhookByName(env *TestingEnvironment, name string) ( return nil, 0, fmt.Errorf("validating webhook not found") } -// UpdateCNPGsValidatingWebhookConf update the ValidatingWebhook object -func UpdateCNPGsValidatingWebhookConf(env *TestingEnvironment, +// UpdateValidatingWebhookConf update the ValidatingWebhook object +func UpdateValidatingWebhookConf( + ctx context.Context, + kubeInterface kubernetes.Interface, wh *admissionregistrationv1.ValidatingWebhookConfiguration, ) error { - ctx := context.Background() - _, err := env.Interface.AdmissionregistrationV1(). + _, err := kubeInterface.AdmissionregistrationV1(). ValidatingWebhookConfigurations().Update(ctx, wh, metav1.UpdateOptions{}) if err != nil { return err @@ -110,22 +123,27 @@ func UpdateCNPGsValidatingWebhookConf(env *TestingEnvironment, return nil } -// CheckWebhookReady ensures that the operator has finished the webhook setup. -func CheckWebhookReady(env *TestingEnvironment, namespace string) error { +// checkWebhookReady ensures that the operator has finished the webhook setup. +func checkWebhookReady( + ctx context.Context, + crudClient client.Client, + kubeInterface kubernetes.Interface, + namespace string, +) error { // Check CA secret := &corev1.Secret{} secretNamespacedName := types.NamespacedName{ Namespace: namespace, Name: controller.WebhookSecretName, } - err := GetObject(env, secretNamespacedName, secret) + err := objects.Get(ctx, crudClient, secretNamespacedName, secret) if err != nil { return err } ca := secret.Data["tls.crt"] - mutatingWebhookConfig, err := env.GetCNPGsMutatingWebhookConf() + mutatingWebhookConfig, err := getCNPGsMutatingWebhookConf(ctx, kubeInterface) if err != nil { return err } @@ -137,7 +155,7 @@ func CheckWebhookReady(env *TestingEnvironment, namespace string) error { } } - validatingWebhookConfig, err := GetCNPGsValidatingWebhookConf(env) + validatingWebhookConfig, err := getCNPGsValidatingWebhookConf(kubeInterface) if err != nil { return err } @@ -152,12 +170,14 @@ func CheckWebhookReady(env *TestingEnvironment, namespace string) error { return nil } -// GetCNPGsMutatingWebhookConf get the MutatingWebhook linked to the operator -func (env TestingEnvironment) GetCNPGsMutatingWebhookConf() ( +// getCNPGsMutatingWebhookConf get the MutatingWebhook linked to the operator +func getCNPGsMutatingWebhookConf( + ctx context.Context, + kubeInterface kubernetes.Interface, +) ( *admissionregistrationv1.MutatingWebhookConfiguration, error, ) { - ctx := context.Background() - return env.Interface.AdmissionregistrationV1(). + return kubeInterface.AdmissionregistrationV1(). MutatingWebhookConfigurations(). Get(ctx, controller.MutatingWebhookConfigurationName, metav1.GetOptions{}) } diff --git a/tests/utils/pod.go b/tests/utils/pod.go deleted file mode 100644 index e439d0e00f..0000000000 --- a/tests/utils/pod.go +++ /dev/null @@ -1,273 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "bytes" - "errors" - "fmt" - "io" - "regexp" - "strings" - "time" - - "github.com/avast/retry-go/v4" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" - pkgutils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" - - . "github.com/onsi/gomega" // nolint -) - -// PodCreateAndWaitForReady creates a given pod object and wait for it to be ready -func PodCreateAndWaitForReady(env *TestingEnvironment, pod *corev1.Pod, timeoutSeconds uint) error { - _, err := CreateObject(env, pod) - if err != nil { - return err - } - return PodWaitForReady(env, pod, timeoutSeconds) -} - -// PodWaitForReady waits for a pod to be ready -func PodWaitForReady(env *TestingEnvironment, pod *corev1.Pod, timeoutSeconds uint) error { - err := retry.Do( - func() error { - if err := env.Client.Get(env.Ctx, client.ObjectKey{ - Namespace: pod.Namespace, - Name: pod.Name, - }, pod); err != nil { - return err - } - if !pkgutils.IsPodReady(*pod) { - return fmt.Errorf("pod not ready. Namespace: %v, Name: %v", pod.Namespace, pod.Name) - } - return nil - }, - retry.Attempts(timeoutSeconds), - retry.Delay(time.Second), - retry.DelayType(retry.FixedDelay), - ) - return err -} - -// PodHasLabels verifies that the labels of a pod contain a specified -// labels map -func PodHasLabels(pod corev1.Pod, labels map[string]string) bool { - podLabels := pod.Labels - for k, v := range labels { - val, ok := podLabels[k] - if !ok || (v != val) { - return false - } - } - return true -} - -// PodHasAnnotations verifies that the annotations of a pod contain a specified -// annotations map -func PodHasAnnotations(pod corev1.Pod, annotations map[string]string) bool { - podAnnotations := pod.Annotations - for k, v := range annotations { - val, ok := podAnnotations[k] - if !ok || (v != val) { - return false - } - } - return true -} - -// PodHasCondition verifies that a pod has a specified condition -func PodHasCondition(pod *corev1.Pod, conditionType corev1.PodConditionType, status corev1.ConditionStatus) bool { - for _, cond := range pod.Status.Conditions { - if cond.Type == conditionType && cond.Status == status { - return true - } - } - return false -} - -// DeletePod deletes a pod if existent -func (env TestingEnvironment) DeletePod(namespace string, name string, opts ...client.DeleteOption) error { - u := &unstructured.Unstructured{} - u.SetName(name) - u.SetNamespace(namespace) - u.SetGroupVersionKind(schema.GroupVersionKind{ - Group: "", - Version: "v1", - Kind: "Pod", - }) - - return DeleteObject(&env, u, opts...) -} - -// GetPodLogs gathers pod logs -func (env TestingEnvironment) GetPodLogs(namespace string, podName string) (string, error) { - req := env.Interface.CoreV1().Pods(namespace).GetLogs(podName, &corev1.PodLogOptions{}) - podLogs, err := req.Stream(env.Ctx) - if err != nil { - return "", err - } - defer func() { - innerErr := podLogs.Close() - if err == nil && innerErr != nil { - err = innerErr - } - }() - - // Create a buffer to hold JSON data - buf := new(bytes.Buffer) - _, err = io.Copy(buf, podLogs) - if err != nil { - return "", err - } - return buf.String(), nil -} - -// GetPodList gathers the current list of pods in a namespace -func (env TestingEnvironment) GetPodList(namespace string) (*corev1.PodList, error) { - podList := &corev1.PodList{} - err := GetObjectList( - &env, podList, client.InNamespace(namespace), - ) - return podList, err -} - -// GetManagerVersion returns the current manager version of a given pod -func GetManagerVersion(namespace, podName string) (string, error) { - out, _, err := RunUnchecked(fmt.Sprintf( - "kubectl -n %v exec %v -c postgres -- /controller/manager version", - namespace, - podName, - )) - if err != nil { - return "", err - } - versionRegexp := regexp.MustCompile(`^Build: {Version:(\d+.*) Commit.*}$`) - ver := versionRegexp.FindStringSubmatch(strings.TrimSpace(out))[1] - return ver, nil -} - -// GetPod gets a pod by namespace and name -func (env TestingEnvironment) GetPod(namespace, podName string) (*corev1.Pod, error) { - wrapErr := func(err error) error { - return fmt.Errorf("while getting pod '%s/%s': %w", namespace, podName, err) - } - podList, err := env.GetPodList(namespace) - if err != nil { - return nil, wrapErr(err) - } - for _, pod := range podList.Items { - if podName == pod.Name { - return &pod, nil - } - } - return nil, wrapErr(errors.New("pod not found")) -} - -// ContainerLocator contains the necessary data to find a container on a pod -type ContainerLocator struct { - Namespace string - PodName string - ContainerName string -} - -// ExecCommandInContainer executes commands in a given instance pod, in the -// postgres container -func (env TestingEnvironment) ExecCommandInContainer( - container ContainerLocator, - timeout *time.Duration, - command ...string, -) (string, string, error) { - wrapErr := func(err error) error { - return fmt.Errorf("while executing command in pod '%s/%s': %w", - container.Namespace, container.PodName, err) - } - pod, err := env.GetPod(container.Namespace, container.PodName) - if err != nil { - return "", "", wrapErr(err) - } - if !pkgutils.IsPodReady(*pod) { - return "", "", fmt.Errorf("pod not ready. Namespace: %v, Name: %v", pod.Namespace, pod.Name) - } - return env.ExecCommand(env.Ctx, *pod, container.ContainerName, timeout, command...) -} - -// PodLocator contains the necessary data to find a pod -type PodLocator struct { - Namespace string - PodName string -} - -// ExecCommandInInstancePod executes commands in a given instance pod, in the -// postgres container -func (env TestingEnvironment) ExecCommandInInstancePod( - podLocator PodLocator, - timeout *time.Duration, - command ...string, -) (string, string, error) { - return env.ExecCommandInContainer( - ContainerLocator{ - Namespace: podLocator.Namespace, - PodName: podLocator.PodName, - ContainerName: specs.PostgresContainerName, - }, timeout, command...) -} - -// DatabaseName is a special type for the database argument in an Exec call -type DatabaseName string - -// ExecQueryInInstancePod executes a query in an instance pod, by connecting to the pod -// and the postgres container, and using a local connection with the postgres user -func (env TestingEnvironment) ExecQueryInInstancePod( - podLocator PodLocator, - dbname DatabaseName, - query string, -) (string, string, error) { - timeout := time.Second * 10 - return env.ExecCommandInInstancePod( - PodLocator{ - Namespace: podLocator.Namespace, - PodName: podLocator.PodName, - }, &timeout, "psql", "-U", "postgres", string(dbname), "-tAc", query) -} - -// EventuallyExecQueryInInstancePod wraps ExecQueryInInstancePod with an Eventually clause -func (env TestingEnvironment) EventuallyExecQueryInInstancePod( - podLocator PodLocator, - dbname DatabaseName, - query string, - retryTimeout int, - pollingTime int, -) (string, string, error) { - var stdOut, stdErr string - var err error - - Eventually(func() error { - stdOut, stdErr, err = env.ExecQueryInInstancePod( - PodLocator{ - Namespace: podLocator.Namespace, - PodName: podLocator.PodName, - }, dbname, query) - return err - }, retryTimeout, pollingTime).Should(Succeed()) - - return stdOut, stdErr, err -} diff --git a/tests/utils/pods/pod.go b/tests/utils/pods/pod.go new file mode 100644 index 0000000000..1db187e89d --- /dev/null +++ b/tests/utils/pods/pod.go @@ -0,0 +1,194 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package pods provides pod utilities to manage pods inside K8s +package pods + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "time" + + "github.com/avast/retry-go/v4" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" +) + +// List gathers the current list of pods in a namespace +func List( + ctx context.Context, + crudClient client.Client, + namespace string, +) (*v1.PodList, error) { + podList := &v1.PodList{} + err := objects.List( + ctx, crudClient, podList, client.InNamespace(namespace), + ) + return podList, err +} + +// Delete deletes a pod if existent +func Delete( + ctx context.Context, + crudClient client.Client, + namespace, name string, + opts ...client.DeleteOption, +) error { + u := &unstructured.Unstructured{} + u.SetName(name) + u.SetNamespace(namespace) + u.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Pod", + }) + + return objects.Delete(ctx, crudClient, u, opts...) +} + +// CreateAndWaitForReady creates a given pod object and wait for it to be ready +func CreateAndWaitForReady( + ctx context.Context, + crudClient client.Client, + pod *v1.Pod, + timeoutSeconds uint, +) error { + _, err := objects.Create(ctx, crudClient, pod) + if err != nil { + return err + } + return waitForReady(ctx, crudClient, pod, timeoutSeconds) +} + +// waitForReady waits for a pod to be ready +func waitForReady( + ctx context.Context, + crudClient client.Client, + pod *v1.Pod, + timeoutSeconds uint, +) error { + err := retry.Do( + func() error { + if err := crudClient.Get(ctx, client.ObjectKey{ + Namespace: pod.Namespace, + Name: pod.Name, + }, pod); err != nil { + return err + } + if !utils.IsPodReady(*pod) { + return fmt.Errorf("pod not ready. Namespace: %v, Name: %v", pod.Namespace, pod.Name) + } + return nil + }, + retry.Attempts(timeoutSeconds), + retry.Delay(time.Second), + retry.DelayType(retry.FixedDelay), + ) + return err +} + +// Logs gathers pod logs +func Logs( + ctx context.Context, + kubeInterface kubernetes.Interface, + namespace, podName string, +) (string, error) { + req := kubeInterface.CoreV1().Pods(namespace).GetLogs(podName, &v1.PodLogOptions{}) + podLogs, err := req.Stream(ctx) + if err != nil { + return "", err + } + defer func() { + innerErr := podLogs.Close() + if err == nil && innerErr != nil { + err = innerErr + } + }() + + // Create a buffer to hold JSON data + buf := new(bytes.Buffer) + _, err = io.Copy(buf, podLogs) + if err != nil { + return "", err + } + return buf.String(), nil +} + +// Get gets a pod by namespace and name +func Get( + ctx context.Context, + crudClient client.Client, + namespace, podName string, +) (*v1.Pod, error) { + wrapErr := func(err error) error { + return fmt.Errorf("while getting pod '%s/%s': %w", namespace, podName, err) + } + podList, err := List(ctx, crudClient, namespace) + if err != nil { + return nil, wrapErr(err) + } + for _, pod := range podList.Items { + if podName == pod.Name { + return &pod, nil + } + } + return nil, wrapErr(errors.New("pod not found")) +} + +// HasLabels verifies that the labels of a pod contain a specified +// labels map +func HasLabels(pod v1.Pod, labels map[string]string) bool { + podLabels := pod.Labels + for k, v := range labels { + val, ok := podLabels[k] + if !ok || (v != val) { + return false + } + } + return true +} + +// HasAnnotations verifies that the annotations of a pod contain a specified +// annotations map +func HasAnnotations(pod v1.Pod, annotations map[string]string) bool { + podAnnotations := pod.Annotations + for k, v := range annotations { + val, ok := podAnnotations[k] + if !ok || (v != val) { + return false + } + } + return true +} + +// HasCondition verifies that a pod has a specified condition +func HasCondition(pod *v1.Pod, conditionType v1.PodConditionType, status v1.ConditionStatus) bool { + for _, cond := range pod.Status.Conditions { + if cond.Type == conditionType && cond.Status == status { + return true + } + } + return false +} diff --git a/tests/utils/postgres.go b/tests/utils/postgres.go deleted file mode 100644 index 9c4011c9f1..0000000000 --- a/tests/utils/postgres.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "strconv" - "strings" - - corev1 "k8s.io/api/core/v1" -) - -const ( - // PGLocalSocketDir is the directory containing the PostgreSQL local socket - PGLocalSocketDir = "/controller/run" - // AppUser for app user - AppUser = "app" - // PostgresUser for postgres user - PostgresUser = "postgres" - // AppDBName database name app - AppDBName = "app" - // PostgresDBName database name postgres - PostgresDBName = "postgres" - // TablespaceDefaultName is the default tablespace location - TablespaceDefaultName = "pg_default" -) - -// CountReplicas counts the number of replicas attached to an instance -func CountReplicas(env *TestingEnvironment, pod *corev1.Pod) (int, error) { - query := "SELECT count(*) FROM pg_stat_replication" - stdOut, _, err := env.EventuallyExecQueryInInstancePod( - PodLocator{ - Namespace: pod.Namespace, - PodName: pod.Name, - }, AppDBName, - query, - RetryTimeout, - PollingTime, - ) - if err != nil { - return 0, nil - } - return strconv.Atoi(strings.Trim(stdOut, "\n")) -} diff --git a/tests/utils/postgres/doc.go b/tests/utils/postgres/doc.go new file mode 100644 index 0000000000..f394238a09 --- /dev/null +++ b/tests/utils/postgres/doc.go @@ -0,0 +1,18 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package postgres provides functions to handle postgres in cnpg clusters +package postgres diff --git a/tests/utils/postgres/postgres.go b/tests/utils/postgres/postgres.go new file mode 100644 index 0000000000..5db3eb088d --- /dev/null +++ b/tests/utils/postgres/postgres.go @@ -0,0 +1,133 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package postgres + +import ( + "context" + "fmt" + "strconv" + "strings" + + "github.com/cloudnative-pg/machinery/pkg/image/reference" + "github.com/cloudnative-pg/machinery/pkg/postgres/version" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + + v1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" +) + +const ( + // PGLocalSocketDir is the directory containing the PostgreSQL local socket + PGLocalSocketDir = "/controller/run" + // AppUser for app user + AppUser = "app" + // PostgresUser for postgres user + PostgresUser = "postgres" + // AppDBName database name app + AppDBName = "app" + // PostgresDBName database name postgres + PostgresDBName = "postgres" + // TablespaceDefaultName is the default tablespace location + TablespaceDefaultName = "pg_default" +) + +// CountReplicas counts the number of replicas attached to an instance +func CountReplicas( + ctx context.Context, + crudClient client.Client, + kubeInterface kubernetes.Interface, + restConfig *rest.Config, + pod *corev1.Pod, + retryTimeout int, +) (int, error) { + query := "SELECT count(*) FROM pg_stat_replication" + stdOut, _, err := exec.EventuallyExecQueryInInstancePod( + ctx, crudClient, kubeInterface, restConfig, + exec.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, AppDBName, + query, + retryTimeout, + objects.PollingTime, + ) + if err != nil { + return 0, nil + } + return strconv.Atoi(strings.Trim(stdOut, "\n")) +} + +// GetCurrentTimestamp getting current time stamp from postgres server +func GetCurrentTimestamp( + ctx context.Context, + crudClient client.Client, + kubeInterface kubernetes.Interface, + restConfig *rest.Config, + namespace, clusterName string, +) (string, error) { + row, err := RunQueryRowOverForward( + ctx, + crudClient, + kubeInterface, + restConfig, + namespace, + clusterName, + AppDBName, + v1.ApplicationUserSecretSuffix, + "select TO_CHAR(CURRENT_TIMESTAMP,'YYYY-MM-DD HH24:MI:SS.US');", + ) + if err != nil { + return "", err + } + + var currentTimestamp string + if err = row.Scan(¤tTimestamp); err != nil { + return "", err + } + + return currentTimestamp, nil +} + +// BumpPostgresImageMajorVersion returns a postgresImage incrementing the major version of the argument (if available) +func BumpPostgresImageMajorVersion(postgresImage string) (string, error) { + imageReference := reference.New(postgresImage) + + postgresImageVersion, err := version.FromTag(imageReference.Tag) + if err != nil { + return "", err + } + + targetPostgresImageMajorVersionInt := postgresImageVersion.Major() + 1 + + defaultImageVersion, err := version.FromTag(reference.New(versions.DefaultImageName).Tag) + if err != nil { + return "", err + } + + if targetPostgresImageMajorVersionInt >= defaultImageVersion.Major() { + return postgresImage, nil + } + + imageReference.Tag = fmt.Sprintf("%d", postgresImageVersion.Major()+1) + + return imageReference.GetNormalizedName(), nil +} diff --git a/tests/utils/version_test.go b/tests/utils/postgres/postgres_test.go similarity index 98% rename from tests/utils/version_test.go rename to tests/utils/postgres/postgres_test.go index 64c7ca13e8..bc449cd4e5 100644 --- a/tests/utils/version_test.go +++ b/tests/utils/postgres/postgres_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package utils +package postgres import ( "bytes" diff --git a/tests/utils/psql_connection.go b/tests/utils/postgres/psql_connection.go similarity index 79% rename from tests/utils/psql_connection.go rename to tests/utils/postgres/psql_connection.go index d3a24cc40a..051a9e234e 100644 --- a/tests/utils/psql_connection.go +++ b/tests/utils/postgres/psql_connection.go @@ -14,19 +14,25 @@ See the License for the specific language governing permissions and limitations under the License. */ -package utils +package postgres import ( + "context" "database/sql" "io" "time" "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" "k8s.io/client-go/tools/portforward" + "sigs.k8s.io/controller-runtime/pkg/client" "github.com/cloudnative-pg/cloudnative-pg/pkg/configfile" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/pool" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/forwardconnection" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/secrets" ) // PSQLForwardConnection manages the creation of a port-forwarding to open a new database connection @@ -103,38 +109,50 @@ func startForwardConnection( // ForwardPSQLConnection simplifies the creation of forwarded connection to PostgreSQL cluster func ForwardPSQLConnection( - env *TestingEnvironment, + ctx context.Context, + crudClient client.Client, + kubeInterface kubernetes.Interface, + restConfig *rest.Config, namespace, clusterName, dbname, secretSuffix string, ) (*PSQLForwardConnection, *sql.DB, error) { - user, pass, err := GetCredentials(clusterName, namespace, secretSuffix, env) + user, pass, err := secrets.GetCredentials(ctx, crudClient, clusterName, namespace, secretSuffix) if err != nil { return nil, nil, err } - return ForwardPSQLConnectionWithCreds(env, namespace, clusterName, dbname, user, pass) + return ForwardPSQLConnectionWithCreds( + ctx, + crudClient, + kubeInterface, + restConfig, + namespace, clusterName, dbname, user, pass, + ) } // ForwardPSQLConnectionWithCreds creates a forwarded connection to a PostgreSQL cluster // using the given credentials func ForwardPSQLConnectionWithCreds( - env *TestingEnvironment, + ctx context.Context, + crudClient client.Client, + kubeInterface kubernetes.Interface, + restConfig *rest.Config, namespace, clusterName, dbname, userApp, passApp string, ) (*PSQLForwardConnection, *sql.DB, error) { - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(ctx, crudClient, namespace, clusterName) if err != nil { return nil, nil, err } dialer, err := forwardconnection.NewDialer( - env.Interface, - env.RestClientConfig, + kubeInterface, + restConfig, namespace, cluster.Status.CurrentPrimary, ) @@ -159,7 +177,9 @@ func ForwardPSQLConnectionWithCreds( // ForwardPSQLServiceConnection creates a forwarded connection to a PostgreSQL service // using the given credentials func ForwardPSQLServiceConnection( - env *TestingEnvironment, + ctx context.Context, + kubeInterface kubernetes.Interface, + restConfig *rest.Config, namespace, serviceName, dbname, @@ -167,9 +187,9 @@ func ForwardPSQLServiceConnection( passApp string, ) (*PSQLForwardConnection, *sql.DB, error) { dialer, portMap, err := forwardconnection.NewDialerFromService( - env.Ctx, - env.Interface, - env.RestClientConfig, + ctx, + kubeInterface, + restConfig, namespace, serviceName, ) @@ -187,7 +207,10 @@ func ForwardPSQLServiceConnection( // RunQueryRowOverForward runs QueryRow with a given query, returning the Row of the SQL command func RunQueryRowOverForward( - env *TestingEnvironment, + ctx context.Context, + crudClient client.Client, + kubeInterface kubernetes.Interface, + restConfig *rest.Config, namespace, clusterName, dbname, @@ -195,7 +218,10 @@ func RunQueryRowOverForward( query string, ) (*sql.Row, error) { forward, conn, err := ForwardPSQLConnection( - env, + ctx, + crudClient, + kubeInterface, + restConfig, namespace, clusterName, dbname, @@ -214,7 +240,10 @@ func RunQueryRowOverForward( // RunExecOverForward runs Exec with a given query, returning the Result of the SQL command func RunExecOverForward( - env *TestingEnvironment, + ctx context.Context, + crudClient client.Client, + kubeInterface kubernetes.Interface, + restConfig *rest.Config, namespace, clusterName, dbname, @@ -222,7 +251,10 @@ func RunExecOverForward( query string, ) (sql.Result, error) { forward, conn, err := ForwardPSQLConnection( - env, + ctx, + crudClient, + kubeInterface, + restConfig, namespace, clusterName, dbname, diff --git a/tests/utils/postgres/suite_test.go b/tests/utils/postgres/suite_test.go new file mode 100644 index 0000000000..70d4a52fcb --- /dev/null +++ b/tests/utils/postgres/suite_test.go @@ -0,0 +1,29 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package postgres + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestUtils(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Utils test postgres suite") +} diff --git a/tests/utils/proxy.go b/tests/utils/proxy/proxy.go similarity index 65% rename from tests/utils/proxy.go rename to tests/utils/proxy/proxy.go index d17b477eb4..f4e0aded9f 100644 --- a/tests/utils/proxy.go +++ b/tests/utils/proxy/proxy.go @@ -14,18 +14,28 @@ See the License for the specific language governing permissions and limitations under the License. */ -package utils +// Package proxy provides functions to use the proxy subresource to call a pod +package proxy import ( + "context" "strconv" corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/url" ) // runProxyRequest makes a GET call on the pod interface proxy, and returns the raw response -func runProxyRequest(env *TestingEnvironment, pod *corev1.Pod, tlsEnabled bool, path string, port int) ([]byte, error) { +func runProxyRequest( + ctx context.Context, + kubeInterface kubernetes.Interface, + pod *corev1.Pod, + tlsEnabled bool, + path string, + port int, +) ([]byte, error) { portString := strconv.Itoa(port) schema := "http" @@ -33,40 +43,43 @@ func runProxyRequest(env *TestingEnvironment, pod *corev1.Pod, tlsEnabled bool, schema = "https" } - req := env.Interface.CoreV1().Pods(pod.Namespace).ProxyGet( + req := kubeInterface.CoreV1().Pods(pod.Namespace).ProxyGet( schema, pod.Name, portString, path, map[string]string{}) - return req.DoRaw(env.Ctx) + return req.DoRaw(ctx) } // RetrieveMetricsFromInstance aims to retrieve the metrics from a PostgreSQL instance pod // using a GET request on the pod interface proxy func RetrieveMetricsFromInstance( - env *TestingEnvironment, + ctx context.Context, + kubeInterface kubernetes.Interface, pod corev1.Pod, tlsEnabled bool, ) (string, error) { - body, err := runProxyRequest(env, &pod, tlsEnabled, url.PathMetrics, int(url.PostgresMetricsPort)) + body, err := runProxyRequest(ctx, kubeInterface, &pod, tlsEnabled, url.PathMetrics, int(url.PostgresMetricsPort)) return string(body), err } // RetrieveMetricsFromPgBouncer aims to retrieve the metrics from a PgBouncer pod // using a GET request on the pod interface proxy func RetrieveMetricsFromPgBouncer( - env *TestingEnvironment, + ctx context.Context, + kubeInterface kubernetes.Interface, pod corev1.Pod, ) (string, error) { - body, err := runProxyRequest(env, &pod, false, url.PathMetrics, int(url.PgBouncerMetricsPort)) + body, err := runProxyRequest(ctx, kubeInterface, &pod, false, url.PathMetrics, int(url.PgBouncerMetricsPort)) return string(body), err } // RetrievePgStatusFromInstance aims to retrieve the pgStatus from a PostgreSQL instance pod // using a GET request on the pod interface proxy func RetrievePgStatusFromInstance( - env *TestingEnvironment, + ctx context.Context, + kubeInterface kubernetes.Interface, pod corev1.Pod, tlsEnabled bool, ) (string, error) { - body, err := runProxyRequest(env, &pod, tlsEnabled, url.PathPgStatus, int(url.StatusPort)) + body, err := runProxyRequest(ctx, kubeInterface, &pod, tlsEnabled, url.PathPgStatus, int(url.StatusPort)) return string(body), err } diff --git a/tests/utils/replication_slots.go b/tests/utils/replicationslot/replication_slots.go similarity index 69% rename from tests/utils/replication_slots.go rename to tests/utils/replicationslot/replication_slots.go index dab55b9e9e..6268e27eb9 100644 --- a/tests/utils/replication_slots.go +++ b/tests/utils/replicationslot/replication_slots.go @@ -14,36 +14,48 @@ See the License for the specific language governing permissions and limitations under the License. */ -package utils +// Package replicationslot provides functions to manage the replication slot of a +// cnpg cluster +package replicationslot import ( + "context" "fmt" "sort" "strings" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" "k8s.io/utils/ptr" - ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" ) // PrintReplicationSlots prints replications slots with their restart_lsn func PrintReplicationSlots( - namespace, - clusterName string, - env *TestingEnvironment, + ctx context.Context, + crudClient client.Client, + kubeInterface kubernetes.Interface, + restConfig *rest.Config, + namespace, clusterName, dbName string, ) string { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(ctx, crudClient, namespace, clusterName) if err != nil { return fmt.Sprintf("Couldn't retrieve the cluster's podlist: %v\n", err) } var output strings.Builder for i, pod := range podList.Items { - slots, err := GetReplicationSlotsOnPod(namespace, pod.GetName(), env) + slots, err := GetReplicationSlotsOnPod( + ctx, crudClient, kubeInterface, restConfig, + namespace, pod.GetName(), dbName, + ) if err != nil { return fmt.Sprintf("Couldn't retrieve slots for pod %v: %v\n", pod.GetName(), err) } @@ -55,12 +67,13 @@ func PrintReplicationSlots( m := make(map[string]string) for _, slot := range slots { query := fmt.Sprintf("SELECT restart_lsn FROM pg_replication_slots WHERE slot_name = '%v'", slot) - restartLsn, _, err := env.ExecQueryInInstancePod( - PodLocator{ + restartLsn, _, err := exec.QueryInInstancePod( + ctx, crudClient, kubeInterface, restConfig, + exec.PodLocator{ Namespace: podList.Items[i].Namespace, PodName: podList.Items[i].Name, }, - AppDBName, + exec.DatabaseName(dbName), query) if err != nil { output.WriteString(fmt.Sprintf("Couldn't retrieve restart_lsn for slot %v: %v\n", slot, err)) @@ -89,15 +102,16 @@ func AreSameLsn(lsnList []string) bool { // GetExpectedHAReplicationSlotsOnPod returns a slice of replication slot names which should be present // in a given pod func GetExpectedHAReplicationSlotsOnPod( + ctx context.Context, + crudClient client.Client, namespace, clusterName, podName string, - env *TestingEnvironment, ) ([]string, error) { - podList, err := env.GetClusterPodList(namespace, clusterName) + podList, err := clusterutils.ListPods(ctx, crudClient, namespace, clusterName) if err != nil { return nil, err } - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(ctx, crudClient, namespace, clusterName) if err != nil { return nil, err } @@ -115,24 +129,31 @@ func GetExpectedHAReplicationSlotsOnPod( // GetReplicationSlotsOnPod returns a slice containing the names of the current replication slots present in // a given pod -func GetReplicationSlotsOnPod(namespace, podName string, env *TestingEnvironment) ([]string, error) { +func GetReplicationSlotsOnPod( + ctx context.Context, + crudClient client.Client, + kubeInterface kubernetes.Interface, + restConfig *rest.Config, + namespace, podName, dbName string, +) ([]string, error) { namespacedName := types.NamespacedName{ Namespace: namespace, Name: podName, } targetPod := &corev1.Pod{} - err := env.Client.Get(env.Ctx, namespacedName, targetPod) + err := crudClient.Get(ctx, namespacedName, targetPod) if err != nil { return nil, err } query := "SELECT slot_name FROM pg_replication_slots WHERE temporary = 'f' AND slot_type = 'physical'" - stdout, _, err := env.ExecQueryInInstancePod( - PodLocator{ + stdout, _, err := exec.QueryInInstancePod( + ctx, crudClient, kubeInterface, restConfig, + exec.PodLocator{ Namespace: targetPod.Namespace, PodName: targetPod.Name, }, - AppDBName, + exec.DatabaseName(dbName), query) if err != nil { return nil, err @@ -150,11 +171,14 @@ func GetReplicationSlotsOnPod(namespace, podName string, env *TestingEnvironment // GetReplicationSlotLsnsOnPod returns a slice containing the current restart_lsn values of each // replication slot present in a given pod func GetReplicationSlotLsnsOnPod( - namespace, clusterName string, + ctx context.Context, + crudClient client.Client, + kubeInterface kubernetes.Interface, + restConfig *rest.Config, + namespace, clusterName, dbName string, pod corev1.Pod, - env *TestingEnvironment, ) ([]string, error) { - slots, err := GetExpectedHAReplicationSlotsOnPod(namespace, clusterName, pod.GetName(), env) + slots, err := GetExpectedHAReplicationSlotsOnPod(ctx, crudClient, namespace, clusterName, pod.GetName()) if err != nil { return nil, err } @@ -163,12 +187,13 @@ func GetReplicationSlotLsnsOnPod( for _, slot := range slots { query := fmt.Sprintf("SELECT restart_lsn FROM pg_replication_slots WHERE slot_name = '%v'", slot) - restartLsn, _, err := env.ExecQueryInInstancePod( - PodLocator{ + restartLsn, _, err := exec.QueryInInstancePod( + ctx, crudClient, kubeInterface, restConfig, + exec.PodLocator{ Namespace: pod.Namespace, PodName: pod.Name, }, - AppDBName, + exec.DatabaseName(dbName), query) if err != nil { return nil, err @@ -179,8 +204,13 @@ func GetReplicationSlotLsnsOnPod( } // ToggleHAReplicationSlots sets the HA Replication Slot feature on/off depending on `enable` -func ToggleHAReplicationSlots(namespace, clusterName string, enable bool, env *TestingEnvironment) error { - cluster, err := env.GetCluster(namespace, clusterName) +func ToggleHAReplicationSlots( + ctx context.Context, + crudClient client.Client, + namespace, clusterName string, + enable bool, +) error { + cluster, err := clusterutils.Get(ctx, crudClient, namespace, clusterName) if err != nil { return err } @@ -194,7 +224,7 @@ func ToggleHAReplicationSlots(namespace, clusterName string, enable bool, env *T } clusterToggle.Spec.ReplicationSlots.HighAvailability.Enabled = ptr.To(enable) - err = env.Client.Patch(env.Ctx, clusterToggle, ctrlclient.MergeFrom(cluster)) + err = crudClient.Patch(ctx, clusterToggle, client.MergeFrom(cluster)) if err != nil { return err } @@ -202,8 +232,13 @@ func ToggleHAReplicationSlots(namespace, clusterName string, enable bool, env *T } // ToggleSynchronizeReplicationSlots sets the Synchronize Replication Slot feature on/off depending on `enable` -func ToggleSynchronizeReplicationSlots(namespace, clusterName string, enable bool, env *TestingEnvironment) error { - cluster, err := env.GetCluster(namespace, clusterName) +func ToggleSynchronizeReplicationSlots( + ctx context.Context, + crudClient client.Client, + namespace, clusterName string, + enable bool, +) error { + cluster, err := clusterutils.Get(ctx, crudClient, namespace, clusterName) if err != nil { return err } @@ -217,7 +252,7 @@ func ToggleSynchronizeReplicationSlots(namespace, clusterName string, enable boo } clusterToggle.Spec.ReplicationSlots.SynchronizeReplicas.Enabled = ptr.To(enable) - err = env.Client.Patch(env.Ctx, clusterToggle, ctrlclient.MergeFrom(cluster)) + err = crudClient.Patch(ctx, clusterToggle, client.MergeFrom(cluster)) if err != nil { return err } diff --git a/tests/utils/run.go b/tests/utils/run/run.go similarity index 71% rename from tests/utils/run.go rename to tests/utils/run/run.go index 7ae3092cbd..d1baa7d80e 100644 --- a/tests/utils/run.go +++ b/tests/utils/run/run.go @@ -14,7 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -package utils +// Package run contains functions to execute commands locally +package run import ( "bytes" @@ -26,10 +27,12 @@ import ( "github.com/avast/retry-go/v4" "github.com/google/shlex" "github.com/onsi/ginkgo/v2" + + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" ) -// RunUnchecked executes a command and process the information -func RunUnchecked(command string) (stdout string, stderr string, err error) { +// Unchecked executes a command and process the information +func Unchecked(command string) (stdout string, stderr string, err error) { tokens, err := shlex.Split(command) if err != nil { ginkgo.GinkgoWriter.Printf("Error parsing command `%v`: %v\n", command, err) @@ -48,8 +51,8 @@ func RunUnchecked(command string) (stdout string, stderr string, err error) { return } -// RunUncheckedRetry executes a command and process the information with retry -func RunUncheckedRetry(command string) (stdout string, stderr string, err error) { +// UncheckedRetry executes a command and process the information with retry +func UncheckedRetry(command string) (stdout string, stderr string, err error) { var tokens []string tokens, err = shlex.Split(command) if err != nil { @@ -64,8 +67,8 @@ func RunUncheckedRetry(command string) (stdout string, stderr string, err error) cmd.Stdout, cmd.Stderr = &outBuffer, &errBuffer return cmd.Run() }, - retry.Delay(PollingTime*time.Second), - retry.Attempts(RetryAttempts), + retry.Delay(objects.PollingTime*time.Second), + retry.Attempts(objects.RetryAttempts), retry.DelayType(retry.FixedDelay), ) stdout = outBuffer.String() @@ -78,19 +81,7 @@ func RunUncheckedRetry(command string) (stdout string, stderr string, err error) // Run executes a command and prints the output when terminates with an error func Run(command string) (stdout string, stderr string, err error) { - stdout, stderr, err = RunUnchecked(command) - - var exerr *exec.ExitError - if errors.As(err, &exerr) { - ginkgo.GinkgoWriter.Printf("RunCheck: %v\nExitCode: %v\n Out:\n%v\nErr:\n%v\n", - command, exerr.ExitCode(), stdout, stderr) - } - return -} - -// RunRetry executes a command with retry and prints the output when terminates with an error -func RunRetry(command string) (stdout string, stderr string, err error) { - stdout, stderr, err = RunUncheckedRetry(command) + stdout, stderr, err = Unchecked(command) var exerr *exec.ExitError if errors.As(err, &exerr) { diff --git a/tests/utils/secrets.go b/tests/utils/secrets/secrets.go similarity index 77% rename from tests/utils/secrets.go rename to tests/utils/secrets/secrets.go index c6f01b3f10..856e0d1ff2 100644 --- a/tests/utils/secrets.go +++ b/tests/utils/secrets/secrets.go @@ -14,9 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -package utils +// Package secrets provides functions to manage and handle secrets +package secrets import ( + "context" "fmt" corev1 "k8s.io/api/core/v1" @@ -27,15 +29,17 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/certs" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" ) // CreateSecretCA generates a CA for the cluster and return the cluster and the key pair func CreateSecretCA( - namespace string, - clusterName string, - caSecName string, + ctx context.Context, + crudClient client.Client, + namespace, clusterName, caSecName string, includeCAPrivateKey bool, - env *TestingEnvironment) ( +) ( *apiv1.Cluster, *certs.KeyPair, error, ) { // creating root CA certificates @@ -43,7 +47,7 @@ func CreateSecretCA( cluster.Namespace = namespace cluster.Name = clusterName secret := &corev1.Secret{} - err := env.Client.Get(env.Ctx, client.ObjectKey{Namespace: namespace, Name: caSecName}, secret) + err := crudClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: caSecName}, secret) if !apierrors.IsNotFound(err) { return cluster, nil, err } @@ -58,7 +62,7 @@ func CreateSecretCA( if !includeCAPrivateKey { delete(caSecret.Data, certs.CAPrivateKeyKey) } - _, err = CreateObject(env, caSecret) + _, err = objects.Create(ctx, crudClient, caSecret) if err != nil { return cluster, caPair, err } @@ -67,13 +71,14 @@ func CreateSecretCA( // GetCredentials retrieve username and password from secrets and return it as per user suffix func GetCredentials( - clusterName, namespace string, - secretSuffix string, - env *TestingEnvironment) ( + ctx context.Context, + crudClient client.Client, + clusterName, namespace, secretSuffix string, +) ( string, string, error, ) { // Get the cluster - cluster, err := env.GetCluster(namespace, clusterName) + cluster, err := clusterutils.Get(ctx, crudClient, namespace, clusterName) if err != nil { return "", "", err } @@ -94,7 +99,7 @@ func GetCredentials( Namespace: namespace, Name: secretName, } - err = env.Client.Get(env.Ctx, secretNamespacedName, secret) + err = crudClient.Get(ctx, secretNamespacedName, secret) if err != nil { return "", "", err } @@ -105,11 +110,10 @@ func GetCredentials( // CreateObjectStorageSecret generates an Opaque Secret with a given ID and Key func CreateObjectStorageSecret( - namespace string, - secretName string, - id string, - key string, - env *TestingEnvironment, + ctx context.Context, + crudClient client.Client, + namespace, secretName string, + id, key string, ) (*corev1.Secret, error) { targetSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -122,7 +126,7 @@ func CreateObjectStorageSecret( }, Type: corev1.SecretTypeOpaque, } - obj, err := CreateObject(env, targetSecret) + obj, err := objects.Create(ctx, crudClient, targetSecret) if err != nil { return nil, err } diff --git a/tests/utils/service.go b/tests/utils/services/service.go similarity index 72% rename from tests/utils/service.go rename to tests/utils/services/service.go index cce93ca126..32acea4382 100644 --- a/tests/utils/service.go +++ b/tests/utils/services/service.go @@ -14,13 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ -package utils +// Package services provides functions tomanage services inside K8s +package services import ( + "context" "fmt" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" ) @@ -45,29 +48,19 @@ func GetReadWriteServiceName(clusterName string) string { return fmt.Sprintf("%v%v", clusterName, apiv1.ServiceReadWriteSuffix) } -// GetService gets a service given name and namespace -func GetService(namespace, name string, env *TestingEnvironment) (*corev1.Service, error) { - namespacedName := types.NamespacedName{ - Namespace: namespace, - Name: name, - } - service := &corev1.Service{} - err := GetObject(env, namespacedName, service) - if err != nil { - return nil, err - } - return service, nil -} - -// GetRwServiceObject return read write service object -func GetRwServiceObject(namespace, clusterName string, env *TestingEnvironment) (*corev1.Service, error) { +// getRwServiceObject return read write service object +func getRwServiceObject( + ctx context.Context, + crudClient client.Client, + namespace, clusterName string, +) (*corev1.Service, error) { svcName := GetReadWriteServiceName(clusterName) service := &corev1.Service{} namespacedName := types.NamespacedName{ Namespace: namespace, Name: svcName, } - err := env.Client.Get(env.Ctx, namespacedName, service) + err := crudClient.Get(ctx, namespacedName, service) if err != nil { return service, err } @@ -82,8 +75,12 @@ func CreateDSN(host, user, dbname, password string, sslmode SSLMode, port int) s } // GetHostName return fully qualified domain name for read write service -func GetHostName(namespace, clusterName string, env *TestingEnvironment) (string, error) { - rwService, err := GetRwServiceObject(namespace, clusterName, env) +func GetHostName( + ctx context.Context, + crudClient client.Client, + namespace, clusterName string, +) (string, error) { + rwService, err := getRwServiceObject(ctx, crudClient, namespace, clusterName) if err != nil { return "", err } diff --git a/tests/utils/storage.go b/tests/utils/storage/storage.go similarity index 73% rename from tests/utils/storage.go rename to tests/utils/storage/storage.go index 16883a5f05..3bc2eab59d 100644 --- a/tests/utils/storage.go +++ b/tests/utils/storage/storage.go @@ -14,9 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -package utils +// Package storage provides functions to manage enything related to storage +package storage import ( + "context" "fmt" "os" @@ -27,18 +29,28 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" ) // GetStorageAllowExpansion returns the boolean value of the 'AllowVolumeExpansion' value of the storage class -func GetStorageAllowExpansion(defaultStorageClass string, env *TestingEnvironment) (*bool, error) { +func GetStorageAllowExpansion( + ctx context.Context, + crudClient client.Client, + defaultStorageClass string, +) (*bool, error) { storageClass := &storagev1.StorageClass{} - err := GetObject(env, client.ObjectKey{Name: defaultStorageClass}, storageClass) + err := objects.Get(ctx, crudClient, client.ObjectKey{Name: defaultStorageClass}, storageClass) return storageClass.AllowVolumeExpansion, err } // IsWalStorageEnabled returns true if 'WalStorage' is being used -func IsWalStorageEnabled(namespace, clusterName string, env *TestingEnvironment) (bool, error) { - cluster, err := env.GetCluster(namespace, clusterName) +func IsWalStorageEnabled( + ctx context.Context, + crudClient client.Client, + namespace, clusterName string, +) (bool, error) { + cluster, err := clusterutils.Get(ctx, crudClient, namespace, clusterName) if cluster.Spec.WalStorage == nil { return false, err } @@ -136,3 +148,28 @@ func SetSnapshotNameAsEnv( } return nil } + +// GetPVCList gathers the current list of PVCs in a namespace +func GetPVCList( + ctx context.Context, + crudClient client.Client, + namespace string, +) (*corev1.PersistentVolumeClaimList, error) { + pvcList := &corev1.PersistentVolumeClaimList{} + err := crudClient.List( + ctx, pvcList, client.InNamespace(namespace), + ) + return pvcList, err +} + +// GetSnapshotList gathers the current list of VolumeSnapshots in a namespace +func GetSnapshotList( + ctx context.Context, + crudClient client.Client, + namespace string, +) (*volumesnapshot.VolumeSnapshotList, error) { + list := &volumesnapshot.VolumeSnapshotList{} + err := crudClient.List(ctx, list, client.InNamespace(namespace)) + + return list, err +} diff --git a/tests/utils/time.go b/tests/utils/time.go deleted file mode 100644 index ecce38b9bd..0000000000 --- a/tests/utils/time.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" -) - -// GetCurrentTimestamp getting current time stamp from postgres server -func GetCurrentTimestamp(namespace, clusterName string, env *TestingEnvironment) (string, error) { - row, err := RunQueryRowOverForward( - env, - namespace, - clusterName, - AppDBName, - apiv1.ApplicationUserSecretSuffix, - "select TO_CHAR(CURRENT_TIMESTAMP,'YYYY-MM-DD HH24:MI:SS.US');", - ) - if err != nil { - return "", err - } - - var currentTimestamp string - if err = row.Scan(¤tTimestamp); err != nil { - return "", err - } - - return currentTimestamp, nil -} diff --git a/tests/utils/timeouts.go b/tests/utils/timeouts/timeouts.go similarity index 97% rename from tests/utils/timeouts.go rename to tests/utils/timeouts/timeouts.go index 8edf62d1de..860a5e9df0 100644 --- a/tests/utils/timeouts.go +++ b/tests/utils/timeouts/timeouts.go @@ -14,7 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -package utils +// Package timeouts contains the timeouts for the E2E test suite +package timeouts import ( "encoding/json" diff --git a/tests/utils/utils.go b/tests/utils/utils.go new file mode 100644 index 0000000000..58a81c8894 --- /dev/null +++ b/tests/utils/utils.go @@ -0,0 +1,170 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "bytes" + "context" + "fmt" + "text/tabwriter" + + "github.com/cheynewallace/tabby" + batchv1 "k8s.io/api/batch/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + v1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + utils2 "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/storage" +) + +// PrintClusterResources prints a summary of the cluster pods, jobs, pvcs etc. +func PrintClusterResources(ctx context.Context, crudClient client.Client, namespace, clusterName string) string { + cluster, err := clusterutils.Get(ctx, crudClient, namespace, clusterName) + if err != nil { + return fmt.Sprintf("Error while Getting Object %v", err) + } + + buffer := &bytes.Buffer{} + w := tabwriter.NewWriter(buffer, 0, 0, 4, ' ', 0) + clusterInfo := tabby.NewCustom(w) + clusterInfo.AddLine("Timeout while waiting for cluster ready, dumping more cluster information for analysis...") + clusterInfo.AddLine() + clusterInfo.AddLine() + clusterInfo.AddLine("Cluster information:") + clusterInfo.AddLine("Name", cluster.GetName()) + clusterInfo.AddLine("Namespace", cluster.GetNamespace()) + clusterInfo.AddLine() + clusterInfo.AddHeader("Items", "Values") + clusterInfo.AddLine("Spec.Instances", cluster.Spec.Instances) + clusterInfo.AddLine("Wal storage", cluster.ShouldCreateWalArchiveVolume()) + clusterInfo.AddLine("Cluster phase", cluster.Status.Phase) + clusterInfo.AddLine("Phase reason", cluster.Status.PhaseReason) + clusterInfo.AddLine("Cluster target primary", cluster.Status.TargetPrimary) + clusterInfo.AddLine("Cluster current primary", cluster.Status.CurrentPrimary) + clusterInfo.AddLine() + + podList, _ := clusterutils.ListPods(ctx, crudClient, cluster.GetNamespace(), cluster.GetName()) + + clusterInfo.AddLine("Cluster Pods information:") + clusterInfo.AddLine("Ready pod number: ", utils2.CountReadyPods(podList.Items)) + clusterInfo.AddLine() + clusterInfo.AddHeader("Items", "Values") + for _, pod := range podList.Items { + clusterInfo.AddLine("Pod name", pod.Name) + clusterInfo.AddLine("Pod phase", pod.Status.Phase) + if cluster.Status.InstancesReportedState != nil { + if instanceReportState, ok := cluster.Status.InstancesReportedState[v1.PodName(pod.Name)]; ok { + clusterInfo.AddLine("Is Primary", instanceReportState.IsPrimary) + clusterInfo.AddLine("TimeLineID", instanceReportState.TimeLineID) + clusterInfo.AddLine("---", "---") + } + } else { + clusterInfo.AddLine("InstanceReportState not reported", "") + } + } + + clusterInfo.AddLine("Jobs information:") + clusterInfo.AddLine() + clusterInfo.AddHeader("Items", "Values") + jobList := &batchv1.JobList{} + _ = crudClient.List( + ctx, jobList, client.InNamespace(namespace), + ) + for _, job := range jobList.Items { + clusterInfo.AddLine("Job name", job.Name) + clusterInfo.AddLine("Job status", fmt.Sprintf("%#v", job.Status)) + } + + pvcList, _ := storage.GetPVCList(ctx, crudClient, cluster.GetNamespace()) + clusterInfo.AddLine() + clusterInfo.AddLine("Cluster PVC information: (dumping all pvc under the namespace)") + clusterInfo.AddLine("Available Cluster PVCCount", cluster.Status.PVCCount) + clusterInfo.AddLine() + clusterInfo.AddHeader("Items", "Values") + for _, pvc := range pvcList.Items { + clusterInfo.AddLine("PVC name", pvc.Name) + clusterInfo.AddLine("PVC phase", pvc.Status.Phase) + clusterInfo.AddLine("---", "---") + } + + snapshotList, _ := storage.GetSnapshotList(ctx, crudClient, cluster.Namespace) + clusterInfo.AddLine() + clusterInfo.AddLine("Cluster Snapshot information: (dumping all snapshot under the namespace)") + clusterInfo.AddLine() + clusterInfo.AddHeader("Items", "Values") + for _, snapshot := range snapshotList.Items { + clusterInfo.AddLine("Snapshot name", snapshot.Name) + if snapshot.Status.ReadyToUse != nil { + clusterInfo.AddLine("Snapshot ready to use", *snapshot.Status.ReadyToUse) + } else { + clusterInfo.AddLine("Snapshot ready to use", "false") + } + clusterInfo.AddLine("---", "---") + } + + // do not remove, this is needed to ensure that the writer cache is always flushed. + clusterInfo.Print() + + return buffer.String() +} + +// ForgeArchiveWalOnMinio instead of using `switchWalCmd` to generate a real WAL archive, directly forges a WAL archive +// file on Minio by copying and renaming an existing WAL archive file for the sake of more control of testing. To make +// sure the forged one won't be a real WAL archive, we let the sequence in newWALName to be big enough so that it can't +// be a real WAL archive name in an idle postgresql. +func ForgeArchiveWalOnMinio(namespace, clusterName, miniClientPodName, existingWALName, newWALName string) error { + // Forge a WAL archive by copying and renaming the 1st WAL archive + minioWALBasePath := "minio/" + clusterName + "/" + clusterName + "/wals/0000000100000000" + existingWALPath := minioWALBasePath + "/" + existingWALName + ".gz" + newWALNamePath := minioWALBasePath + "/" + newWALName + forgeWALOnMinioCmd := "mc cp " + existingWALPath + " " + newWALNamePath + _, _, err := run.UncheckedRetry(fmt.Sprintf( + "kubectl exec -n %v %v -- %v", + namespace, + miniClientPodName, + forgeWALOnMinioCmd)) + + return err +} + +// TestFileExist tests if a file specified with `fileName` exist under directory `directoryPath`, on pod `podName` in +// namespace `namespace` +func TestFileExist(namespace, podName, directoryPath, fileName string) bool { + filePath := directoryPath + "/" + fileName + testFileExistCommand := "test -f " + filePath + _, _, err := run.Unchecked(fmt.Sprintf( + "kubectl exec -n %v %v -- %v", + namespace, + podName, + testFileExistCommand)) + + return err == nil +} + +// TestDirectoryEmpty tests if a directory `directoryPath` exists on pod `podName` in namespace `namespace` +func TestDirectoryEmpty(namespace, podName, directoryPath string) bool { + testDirectoryEmptyCommand := "test \"$(ls -A" + directoryPath + ")\"" + _, _, err := run.Unchecked(fmt.Sprintf( + "kubectl exec -n %v %v -- %v", + namespace, + podName, + testDirectoryEmptyCommand)) + + return err == nil +} diff --git a/tests/utils/version.go b/tests/utils/version.go deleted file mode 100644 index 2416df4b08..0000000000 --- a/tests/utils/version.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "fmt" - - "github.com/cloudnative-pg/machinery/pkg/image/reference" - "github.com/cloudnative-pg/machinery/pkg/postgres/version" - - "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" -) - -// BumpPostgresImageMajorVersion returns a postgresImage incrementing the major version of the argument (if available) -func BumpPostgresImageMajorVersion(postgresImage string) (string, error) { - imageReference := reference.New(postgresImage) - - postgresImageVersion, err := version.FromTag(imageReference.Tag) - if err != nil { - return "", err - } - - targetPostgresImageMajorVersionInt := postgresImageVersion.Major() + 1 - - defaultImageVersion, err := version.FromTag(reference.New(versions.DefaultImageName).Tag) - if err != nil { - return "", err - } - - if targetPostgresImageMajorVersionInt >= defaultImageVersion.Major() { - return postgresImage, nil - } - - imageReference.Tag = fmt.Sprintf("%d", postgresImageVersion.Major()+1) - - return imageReference.GetNormalizedName(), nil -} diff --git a/tests/utils/webapp.go b/tests/utils/webapp.go deleted file mode 100644 index ec2cc1b4bb..0000000000 --- a/tests/utils/webapp.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/ptr" -) - -// DefaultWebapp returns a struct representing a -func DefaultWebapp(namespace string, name string, rootCASecretName string, tlsSecretName string) corev1.Pod { - var secretMode int32 = 0o600 - seccompProfile := &corev1.SeccompProfile{ - Type: corev1.SeccompProfileTypeRuntimeDefault, - } - - return corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: name, - }, - Spec: corev1.PodSpec{ - Volumes: []corev1.Volume{ - { - Name: "secret-volume-root-ca", - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: rootCASecretName, - DefaultMode: &secretMode, - }, - }, - }, - { - Name: "secret-volume-tls", - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: tlsSecretName, - DefaultMode: &secretMode, - }, - }, - }, - }, - Containers: []corev1.Container{ - { - Name: name, - Image: "ghcr.io/cloudnative-pg/webtest:1.6.0", - Ports: []corev1.ContainerPort{ - { - ContainerPort: 8080, - }, - }, - VolumeMounts: []corev1.VolumeMount{ - { - Name: "secret-volume-root-ca", - MountPath: "/etc/secrets/ca", - }, - { - Name: "secret-volume-tls", - MountPath: "/etc/secrets/tls", - }, - }, - SecurityContext: &corev1.SecurityContext{ - AllowPrivilegeEscalation: ptr.To(false), - SeccompProfile: seccompProfile, - }, - }, - }, - SecurityContext: &corev1.PodSecurityContext{ - SeccompProfile: seccompProfile, - }, - }, - } -} diff --git a/tests/utils/yaml.go b/tests/utils/yaml/yaml.go similarity index 59% rename from tests/utils/yaml.go rename to tests/utils/yaml/yaml.go index d418643470..c04f844978 100644 --- a/tests/utils/yaml.go +++ b/tests/utils/yaml/yaml.go @@ -14,13 +14,20 @@ See the License for the specific language governing permissions and limitations under the License. */ -package utils +// Package yaml provides functions to handle yaml files +package yaml import ( "bytes" "fmt" "log" + "os" + "path/filepath" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/client" @@ -61,3 +68,33 @@ func ParseObjectsFromYAML(data []byte, namespace string) ([]client.Object, error } return objects, nil } + +// GetResourceNameFromYAML returns the name of a resource in a YAML file +func GetResourceNameFromYAML(scheme *runtime.Scheme, path string) (string, error) { + namespacedName, err := getResourceNamespacedNameFromYAML(scheme, path) + if err != nil { + return "", err + } + return namespacedName.Name, err +} + +// getResourceNamespacedNameFromYAML returns the NamespacedName representing a resource in a YAML file +func getResourceNamespacedNameFromYAML( + scheme *runtime.Scheme, + path string, +) (types.NamespacedName, error) { + data, err := os.ReadFile(filepath.Clean(path)) + if err != nil { + return types.NamespacedName{}, err + } + decoder := serializer.NewCodecFactory(scheme).UniversalDeserializer() + obj, _, err := decoder.Decode(data, nil, nil) + if err != nil { + return types.NamespacedName{}, err + } + objectMeta, err := meta.Accessor(obj) + if err != nil { + return types.NamespacedName{}, err + } + return types.NamespacedName{Namespace: objectMeta.GetNamespace(), Name: objectMeta.GetName()}, nil +} From 895748f0dac002ffcf292cd6088bf53d5fcbcc8d Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Fri, 3 Jan 2025 13:35:01 +0100 Subject: [PATCH 277/836] test(e2e): ensure unique namespaces during parallel tests (#6456) Closes #6395 Signed-off-by: Armando Ruocco --- tests/utils/environment/environment.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/utils/environment/environment.go b/tests/utils/environment/environment.go index 4a216075be..cbd9c4c971 100644 --- a/tests/utils/environment/environment.go +++ b/tests/utils/environment/environment.go @@ -48,7 +48,8 @@ import ( // Import the client auth plugin package to allow use gke or ake to run tests _ "k8s.io/client-go/plugin/pkg/client/auth" - . "github.com/onsi/gomega" // nolint + . "github.com/onsi/ginkgo/v2" // nolint + . "github.com/onsi/gomega" // nolint ) const ( @@ -77,9 +78,10 @@ type uniqueStringSlice struct { func (a *uniqueStringSlice) generateUniqueName(prefix string) string { a.mu.Lock() defer a.mu.Unlock() + process := GinkgoParallelProcess() for { - potentialUniqueName := fmt.Sprintf("%s-%d", prefix, funk.RandomInt(0, 9999)) + potentialUniqueName := fmt.Sprintf("%s-%d-%d", prefix, process, funk.RandomInt(0, 9999)) if !slices.Contains(a.values, potentialUniqueName) { a.values = append(a.values, potentialUniqueName) return potentialUniqueName From f306052e1f827acb05161123bf38ee24e7162260 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Fri, 3 Jan 2025 17:02:00 +0100 Subject: [PATCH 278/836] test(e2e): higher timeout and better error descriptions for `switchWalAndGetLatestArchive` (#6448) Closes #6413 Signed-off-by: Armando Ruocco Signed-off-by: Leonardo Cecchi Co-authored-by: Leonardo Cecchi --- tests/e2e/asserts_test.go | 16 +++++++++++----- tests/utils/exec/exec.go | 15 +++++++++++++++ 2 files changed, 26 insertions(+), 5 deletions(-) diff --git a/tests/e2e/asserts_test.go b/tests/e2e/asserts_test.go index 97c5758b96..c35de5258e 100644 --- a/tests/e2e/asserts_test.go +++ b/tests/e2e/asserts_test.go @@ -1999,15 +1999,18 @@ func AssertArchiveConditionMet(namespace, clusterName, timeout string) { // switchWalAndGetLatestArchive trigger a new wal and get the name of latest wal file func switchWalAndGetLatestArchive(namespace, podName string) string { - _, _, err := exec.QueryInInstancePod( + _, _, err := exec.QueryInInstancePodWithTimeout( env.Ctx, env.Client, env.Interface, env.RestClientConfig, exec.PodLocator{ Namespace: namespace, PodName: podName, }, postgres.PostgresDBName, - "CHECKPOINT;") - Expect(err).ToNot(HaveOccurred()) + "CHECKPOINT", + 300*time.Second, + ) + Expect(err).ToNot(HaveOccurred(), + "failed to trigger a new wal while executing 'switchWalAndGetLatestArchive'") out, _, err := exec.QueryInInstancePod( env.Ctx, env.Client, env.Interface, env.RestClientConfig, @@ -2016,8 +2019,11 @@ func switchWalAndGetLatestArchive(namespace, podName string) string { PodName: podName, }, postgres.PostgresDBName, - "SELECT pg_walfile_name(pg_switch_wal());") - Expect(err).ToNot(HaveOccurred()) + "SELECT pg_walfile_name(pg_switch_wal());", + ) + Expect(err).ToNot( + HaveOccurred(), + "failed to get latest wal file name while executing 'switchWalAndGetLatestArchive") return strings.TrimSpace(out) } diff --git a/tests/utils/exec/exec.go b/tests/utils/exec/exec.go index 58ca134ca6..7196a273da 100644 --- a/tests/utils/exec/exec.go +++ b/tests/utils/exec/exec.go @@ -107,6 +107,21 @@ func QueryInInstancePod( query string, ) (string, string, error) { timeout := time.Second * 10 + return QueryInInstancePodWithTimeout(ctx, crudClient, kubeInterface, restConfig, podLocator, dbname, query, timeout) +} + +// QueryInInstancePodWithTimeout executes a query in an instance pod, by connecting to the pod +// and the postgres container, and using a local connection with the postgres user +func QueryInInstancePodWithTimeout( + ctx context.Context, + crudClient client.Client, + kubeInterface kubernetes.Interface, + restConfig *rest.Config, + podLocator PodLocator, + dbname DatabaseName, + query string, + timeout time.Duration, +) (string, string, error) { return CommandInInstancePod( ctx, crudClient, kubeInterface, restConfig, PodLocator{ From f2e5a04a6a5e2bc2e21bbf2b0994f9826123e9c9 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Fri, 3 Jan 2025 18:16:17 +0100 Subject: [PATCH 279/836] fix(managed-services): preserve user specified port settings (#6474) Closes #6389 Signed-off-by: Armando Ruocco --- pkg/specs/services.go | 3 ++- pkg/specs/services_test.go | 24 ++++++++++++++++++++++++ 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/pkg/specs/services.go b/pkg/specs/services.go index e7978e6c6f..454f828709 100644 --- a/pkg/specs/services.go +++ b/pkg/specs/services.go @@ -155,7 +155,8 @@ func BuildManagedServices(cluster apiv1.Cluster) ([]corev1.Service, error) { SetSelectors(defaultService.Spec.Selector) for idx := range defaultService.Spec.Ports { - builder = builder.WithServicePort(&defaultService.Spec.Ports[idx]) + // we preserve the user settings over the default configuration, issue: #6389 + builder = builder.WithServicePortNoOverwrite(&defaultService.Spec.Ports[idx]) } for key, value := range defaultService.Labels { diff --git a/pkg/specs/services_test.go b/pkg/specs/services_test.go index dd146fde25..86c108c7a6 100644 --- a/pkg/specs/services_test.go +++ b/pkg/specs/services_test.go @@ -152,6 +152,30 @@ var _ = Describe("BuildManagedServices", func() { Expect(services[0].ObjectMeta.Labels).To(HaveKeyWithValue(utils.IsManagedLabelName, "true")) Expect(services[0].ObjectMeta.Labels).To(HaveKeyWithValue("test-label", "test-value")) Expect(services[0].ObjectMeta.Annotations).To(HaveKeyWithValue("test-annotation", "test-value")) + Expect(services[0].Spec.Ports).To(ContainElement(corev1.ServicePort{ + Name: PostgresContainerName, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromInt32(postgres.ServerPort), + Port: postgres.ServerPort, + NodePort: 0, + })) + }) + + It("should not overwrite the user specified service port with the default one", func() { + cluster.Spec.Managed.Services.Additional[0].ServiceTemplate.Spec.Ports = []corev1.ServicePort{ + { + Name: PostgresContainerName, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromInt32(postgres.ServerPort), + Port: postgres.ServerPort, + NodePort: 5533, + }, + } + services, err := BuildManagedServices(cluster) + Expect(err).NotTo(HaveOccurred()) + Expect(services).NotTo(BeNil()) + Expect(services).To(HaveLen(1)) + Expect(services[0].Spec.Ports[0].NodePort).To(Equal(int32(5533))) }) }) }) From 62014067abdc85f87ddfeb842a133c89e08ba8c4 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Fri, 3 Jan 2025 22:40:04 +0100 Subject: [PATCH 280/836] test(e2e): retry `AssertWritesToPrimarySucceeds` and `AssertWritesToReplicaFails` (#6477) Closes #6415 Signed-off-by: Armando Ruocco --- tests/e2e/asserts_test.go | 82 ++++++++++++++++++++------------------- 1 file changed, 43 insertions(+), 39 deletions(-) diff --git a/tests/e2e/asserts_test.go b/tests/e2e/asserts_test.go index c35de5258e..db514be314 100644 --- a/tests/e2e/asserts_test.go +++ b/tests/e2e/asserts_test.go @@ -1146,54 +1146,58 @@ func AssertWritesToReplicaFails( namespace, service, appDBName, appDBUser, appDBPass string, ) { By(fmt.Sprintf("Verifying %v service doesn't allow writes", service), func() { - forwardConn, conn, err := postgres.ForwardPSQLServiceConnection( - env.Ctx, env.Interface, env.RestClientConfig, - namespace, service, appDBName, appDBUser, appDBPass, - ) - defer func() { - _ = conn.Close() - forwardConn.Close() - }() - Expect(err).ToNot(HaveOccurred()) + Eventually(func(g Gomega) { + forwardConn, conn, err := postgres.ForwardPSQLServiceConnection( + env.Ctx, env.Interface, env.RestClientConfig, + namespace, service, + appDBName, appDBUser, appDBPass) + defer func() { + _ = conn.Close() + forwardConn.Close() + }() + g.Expect(err).ToNot(HaveOccurred()) - var rawValue string - // Expect to be connected to a replica - row := conn.QueryRow("SELECT pg_is_in_recovery()") - err = row.Scan(&rawValue) - Expect(err).ToNot(HaveOccurred()) - isReplica := strings.TrimSpace(rawValue) - Expect(isReplica).To(BeEquivalentTo("true")) + var rawValue string + // Expect to be connected to a replica + row := conn.QueryRow("SELECT pg_is_in_recovery()") + err = row.Scan(&rawValue) + g.Expect(err).ToNot(HaveOccurred()) + isReplica := strings.TrimSpace(rawValue) + g.Expect(isReplica).To(BeEquivalentTo("true")) - // Expect to be in a read-only transaction - _, err = conn.Exec("CREATE TABLE IF NOT EXISTS table1(var1 text)") - Expect(err).To(HaveOccurred()) - Expect(err.Error()).Should(ContainSubstring("cannot execute CREATE TABLE in a read-only transaction")) + // Expect to be in a read-only transaction + _, err = conn.Exec("CREATE TABLE IF NOT EXISTS table1(var1 text)") + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).Should(ContainSubstring("cannot execute CREATE TABLE in a read-only transaction")) + }, RetryTimeout).Should(Succeed()) }) } func AssertWritesToPrimarySucceeds(namespace, service, appDBName, appDBUser, appDBPass string) { By(fmt.Sprintf("Verifying %v service correctly manages writes", service), func() { - forwardConn, conn, err := postgres.ForwardPSQLServiceConnection( - env.Ctx, env.Interface, env.RestClientConfig, - namespace, service, appDBName, appDBUser, appDBPass, - ) - defer func() { - _ = conn.Close() - forwardConn.Close() - }() - Expect(err).ToNot(HaveOccurred()) + Eventually(func(g Gomega) { + forwardConn, conn, err := postgres.ForwardPSQLServiceConnection( + env.Ctx, env.Interface, env.RestClientConfig, + namespace, service, + appDBName, appDBUser, appDBPass) + defer func() { + _ = conn.Close() + forwardConn.Close() + }() + g.Expect(err).ToNot(HaveOccurred()) - var rawValue string - // Expect to be connected to a primary - row := conn.QueryRow("SELECT pg_is_in_recovery()") - err = row.Scan(&rawValue) - Expect(err).ToNot(HaveOccurred()) - isReplica := strings.TrimSpace(rawValue) - Expect(isReplica).To(BeEquivalentTo("false")) + var rawValue string + // Expect to be connected to a primary + row := conn.QueryRow("SELECT pg_is_in_recovery()") + err = row.Scan(&rawValue) + g.Expect(err).ToNot(HaveOccurred()) + isReplica := strings.TrimSpace(rawValue) + g.Expect(isReplica).To(BeEquivalentTo("false")) - // Expect to be able to write - _, err = conn.Exec("CREATE TABLE IF NOT EXISTS table1(var1 text)") - Expect(err).ToNot(HaveOccurred()) + // Expect to be able to write + _, err = conn.Exec("CREATE TABLE IF NOT EXISTS table1(var1 text)") + g.Expect(err).ToNot(HaveOccurred()) + }, RetryTimeout).Should(Succeed()) }) } From e59b3d32a6a9b00a3024b43a261978f372d6a689 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sat, 4 Jan 2025 09:28:03 +0100 Subject: [PATCH 281/836] chore(deps): update dependency golangci/golangci-lint to v1.63.4 (main) (#6482) --- .github/workflows/continuous-integration.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 4e52d12902..d5c39b2e61 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -17,7 +17,7 @@ on: # set up environment variables to be used across all the jobs env: GOLANG_VERSION: "1.23.x" - GOLANGCI_LINT_VERSION: "v1.62.2" + GOLANGCI_LINT_VERSION: "v1.63.4" KUBEBUILDER_VERSION: "2.3.1" KIND_VERSION: "v0.26.0" OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing" From 92fbc45ec9f7b2cc7443191e3c8740af5793eb5c Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sat, 4 Jan 2025 10:26:46 +0100 Subject: [PATCH 282/836] chore(deps): update dependency rook/rook to v1.16.1 (main) (#6502) --- .github/workflows/continuous-delivery.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index 639cb9b9c4..6ccdc1b64b 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -37,7 +37,7 @@ env: GOLANG_VERSION: "1.23.x" KUBEBUILDER_VERSION: "2.3.1" KIND_VERSION: "v0.26.0" - ROOK_VERSION: "v1.16.0" + ROOK_VERSION: "v1.16.1" EXTERNAL_SNAPSHOTTER_VERSION: "v8.2.0" OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing" BUILD_PUSH_PROVENANCE: "" From 8cd4415a3d3724eaa916702f7d0b17ca51301ffb Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Sat, 4 Jan 2025 11:29:00 +0100 Subject: [PATCH 283/836] test(e2e): remove redudant `AssertPostgresNoPendingRestart` calls (#6435) Closes #6410 Signed-off-by: Armando Ruocco Signed-off-by: Francesco Canovai Co-authored-by: Francesco Canovai --- tests/e2e/asserts_test.go | 34 -------------------------- tests/e2e/configuration_update_test.go | 14 ----------- 2 files changed, 48 deletions(-) diff --git a/tests/e2e/asserts_test.go b/tests/e2e/asserts_test.go index db514be314..2c4806bc06 100644 --- a/tests/e2e/asserts_test.go +++ b/tests/e2e/asserts_test.go @@ -2684,40 +2684,6 @@ func DeleteResourcesFromFile(namespace, sampleFilePath string) error { return nil } -// Assert in the giving cluster, all the postgres db has no pending restart -func AssertPostgresNoPendingRestart(namespace, clusterName string, timeout int) { - By("waiting for all pods have no pending restart", func() { - podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - query := "SELECT EXISTS(SELECT 1 FROM pg_settings WHERE pending_restart)" - // Check that the new parameter has been modified in every pod - Eventually(func() (bool, error) { - noPendingRestart := true - for _, pod := range podList.Items { - stdout, _, err := exec.QueryInInstancePod( - env.Ctx, env.Client, env.Interface, env.RestClientConfig, - exec.PodLocator{ - Namespace: pod.Namespace, - PodName: pod.Name, - }, - postgres.PostgresDBName, - query) - if err != nil { - return false, nil - } - if strings.Trim(stdout, "\n") == "f" { - continue - } - - noPendingRestart = false - break - } - return noPendingRestart, nil - }, timeout, 2).Should(BeEquivalentTo(true), - "all pods in cluster has no pending restart") - }) -} - func AssertBackupConditionTimestampChangedInClusterStatus( namespace, clusterName string, diff --git a/tests/e2e/configuration_update_test.go b/tests/e2e/configuration_update_test.go index 4918d6d755..46b7117b41 100644 --- a/tests/e2e/configuration_update_test.go +++ b/tests/e2e/configuration_update_test.go @@ -156,9 +156,7 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada Expect(err).ToNot(HaveOccurred()) By("apply configuration update", func() { - // Update the configuration updateClusterPostgresParams(postgresParams, namespace) - AssertPostgresNoPendingRestart(namespace, clusterName, 300) }) By("verify that work_mem result as expected", func() { @@ -196,9 +194,7 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada }) By("apply configuration update", func() { - // Update the configuration updateClusterPostgresPgHBA(namespace) - AssertPostgresNoPendingRestart(namespace, clusterName, 300) }) By("verify that connections succeed after pg_hba_reload", func() { @@ -239,10 +235,8 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada oldPrimary := cluster.Status.CurrentPrimary By("apply configuration update", func() { - // Update the configuration postgresParams["shared_buffers"] = "256MB" updateClusterPostgresParams(postgresParams, namespace) - AssertPostgresNoPendingRestart(namespace, clusterName, timeout) }) By("verify that shared_buffers setting changed", func() { @@ -282,11 +276,9 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada oldPrimary := cluster.Status.CurrentPrimary By("apply configuration update", func() { - // Update the configuration postgresParams["max_replication_slots"] = "16" postgresParams["maintenance_work_mem"] = "128MB" updateClusterPostgresParams(postgresParams, namespace) - AssertPostgresNoPendingRestart(namespace, clusterName, timeout) }) By("verify that both parameters have been modified in each pod", func() { @@ -358,7 +350,6 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada delete(postgresParams, "port") postgresParams["max_connections"] = "105" updateClusterPostgresParams(postgresParams, namespace) - AssertPostgresNoPendingRestart(namespace, clusterName, timeout) }) By("verify that max_connections has been decreased in every pod", func() { @@ -402,10 +393,8 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada oldPrimary := cluster.Status.CurrentPrimary By("apply configuration update", func() { - // Update the configuration delete(postgresParams, "max_connections") updateClusterPostgresParams(postgresParams, namespace) - AssertPostgresNoPendingRestart(namespace, clusterName, timeout) }) By("verify that the max_connections has been set to default in every pod", func() { @@ -457,9 +446,7 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada }) By("apply configuration update", func() { - // Update the configuration updateClusterPostgresPgIdent(namespace) - AssertPostgresNoPendingRestart(namespace, clusterName, 300) }) By("verify that there are now two entries in pg_ident_file_mappings", func() { @@ -661,7 +648,6 @@ var _ = Describe("Configuration update with primaryUpdateMethod", Label(tests.La }, 160).Should(BeEquivalentTo(10)) } }) - AssertPostgresNoPendingRestart(namespace, clusterName, 120) }) }) }) From cd3e74a8aca6a3309813078432bd11c46a587008 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sat, 4 Jan 2025 14:27:31 +0100 Subject: [PATCH 284/836] chore(deps): update module github.com/goreleaser/goreleaser to v2.5.1 (main) (#6506) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 0ebe229185..d08b303300 100644 --- a/Makefile +++ b/Makefile @@ -43,7 +43,7 @@ BUILD_IMAGE ?= true POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions.go" | cut -f 2 -d \") KUSTOMIZE_VERSION ?= v5.5.0 CONTROLLER_TOOLS_VERSION ?= v0.16.5 -GORELEASER_VERSION ?= v2.5.0 +GORELEASER_VERSION ?= v2.5.1 SPELLCHECK_VERSION ?= 0.45.0 WOKE_VERSION ?= 0.19.0 OPERATOR_SDK_VERSION ?= v1.38.0 From d783035474ef63a4bdf36470211bec0f5a7e014d Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sat, 4 Jan 2025 19:20:06 +0100 Subject: [PATCH 285/836] fix(deps): update github.com/cloudnative-pg/machinery digest to 95c37fe (main) (#6490) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 8ddc9591f2..279ca436cd 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/cheynewallace/tabby v1.1.1 github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a github.com/cloudnative-pg/cnpg-i v0.0.0-20241224161104-7e2cfa59debc - github.com/cloudnative-pg/machinery v0.0.0-20241223154527-66cd032ef607 + github.com/cloudnative-pg/machinery v0.0.0-20250102082645-95c37fe624d0 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/evanphx/json-patch/v5 v5.9.0 github.com/go-logr/logr v1.4.2 diff --git a/go.sum b/go.sum index 22f177486f..3572a565e4 100644 --- a/go.sum +++ b/go.sum @@ -22,8 +22,8 @@ github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a h1:VrE github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a/go.mod h1:HPGwXHlatQEnb2HdsbGTZLEo8qlxKLdxTHiTeF9TTqw= github.com/cloudnative-pg/cnpg-i v0.0.0-20241224161104-7e2cfa59debc h1:wo0KfZ4NRhA2/COjz8vTd1P+K/tMUMBPLtbfYQx138A= github.com/cloudnative-pg/cnpg-i v0.0.0-20241224161104-7e2cfa59debc/go.mod h1:wmXfeji9qPPW+F/1OgDHdkI97ISN1I2f8vJKv/7sssY= -github.com/cloudnative-pg/machinery v0.0.0-20241223154527-66cd032ef607 h1:Jymgt/H6iNoUZCqF6YtOqE2GgQIM1e1tWjT42B6vPJs= -github.com/cloudnative-pg/machinery v0.0.0-20241223154527-66cd032ef607/go.mod h1:n6br6GuNXcwYI5SgRArt9rM2hgZ1ElZr4vkJCWfiC/U= +github.com/cloudnative-pg/machinery v0.0.0-20250102082645-95c37fe624d0 h1:RvwDA4W8K8NQNVQTOzrf9o8P328N7NXztvnq3cUncww= +github.com/cloudnative-pg/machinery v0.0.0-20250102082645-95c37fe624d0/go.mod h1:pitcj6ztiuxfSFH5EbVHv8iCVxF+yQkzf9o9A1KoDvI= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= From 995f3e247d8826687d556567488fa01fd9010d44 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 6 Jan 2025 18:08:27 +0100 Subject: [PATCH 286/836] fix(deps): update module golang.org/x/term to v0.28.0 (main) (#6520) --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 279ca436cd..f163185fe6 100644 --- a/go.mod +++ b/go.mod @@ -37,7 +37,7 @@ require ( go.uber.org/atomic v1.11.0 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 - golang.org/x/term v0.27.0 + golang.org/x/term v0.28.0 google.golang.org/grpc v1.69.2 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.32.0 @@ -104,7 +104,7 @@ require ( golang.org/x/net v0.33.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/sync v0.10.0 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect golang.org/x/time v0.7.0 // indirect golang.org/x/tools v0.28.0 // indirect diff --git a/go.sum b/go.sum index 3572a565e4..db30e2b287 100644 --- a/go.sum +++ b/go.sum @@ -240,10 +240,10 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= -golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= +golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= From 17d5bd1c45a14da35e7e55c86a30c870a8d5be6e Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 6 Jan 2025 21:16:14 +0100 Subject: [PATCH 287/836] fix(deps): update github.com/cloudnative-pg/barman-cloud digest to c147262 (main) (#6516) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index f163185fe6..5058c8e429 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/avast/retry-go/v4 v4.6.0 github.com/blang/semver v3.5.1+incompatible github.com/cheynewallace/tabby v1.1.1 - github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a + github.com/cloudnative-pg/barman-cloud v0.0.0-20250104195650-c1472628b450 github.com/cloudnative-pg/cnpg-i v0.0.0-20241224161104-7e2cfa59debc github.com/cloudnative-pg/machinery v0.0.0-20250102082645-95c37fe624d0 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc diff --git a/go.sum b/go.sum index db30e2b287..ff99792126 100644 --- a/go.sum +++ b/go.sum @@ -18,8 +18,8 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cheynewallace/tabby v1.1.1 h1:JvUR8waht4Y0S3JF17G6Vhyt+FRhnqVCkk8l4YrOU54= github.com/cheynewallace/tabby v1.1.1/go.mod h1:Pba/6cUL8uYqvOc9RkyvFbHGrQ9wShyrn6/S/1OYVys= -github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a h1:VrEa9P/HfA6csNOh0DRlUyeUoKuByV57tLnf2rTIqfU= -github.com/cloudnative-pg/barman-cloud v0.0.0-20241218093921-134c7de4954a/go.mod h1:HPGwXHlatQEnb2HdsbGTZLEo8qlxKLdxTHiTeF9TTqw= +github.com/cloudnative-pg/barman-cloud v0.0.0-20250104195650-c1472628b450 h1:u11mKIHmbEGQWLsAb5hguwgGOOddA8lpPFAViBpbkt8= +github.com/cloudnative-pg/barman-cloud v0.0.0-20250104195650-c1472628b450/go.mod h1:HPGwXHlatQEnb2HdsbGTZLEo8qlxKLdxTHiTeF9TTqw= github.com/cloudnative-pg/cnpg-i v0.0.0-20241224161104-7e2cfa59debc h1:wo0KfZ4NRhA2/COjz8vTd1P+K/tMUMBPLtbfYQx138A= github.com/cloudnative-pg/cnpg-i v0.0.0-20241224161104-7e2cfa59debc/go.mod h1:wmXfeji9qPPW+F/1OgDHdkI97ISN1I2f8vJKv/7sssY= github.com/cloudnative-pg/machinery v0.0.0-20250102082645-95c37fe624d0 h1:RvwDA4W8K8NQNVQTOzrf9o8P328N7NXztvnq3cUncww= From 1d0f8ca652a4b8a8bb70625ca45432a862bc8e04 Mon Sep 17 00:00:00 2001 From: Brad Holland Date: Tue, 7 Jan 2025 03:08:06 -0500 Subject: [PATCH 288/836] docs: fix typo in prometheus rules example (#6473) * fix typo in sample alert description * remove trailing white spaces Closes #6499 Signed-off-by: Brad Holland --- docs/src/samples/monitoring/prometheusrule.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/src/samples/monitoring/prometheusrule.yaml b/docs/src/samples/monitoring/prometheusrule.yaml index b74c66590b..eb344769af 100644 --- a/docs/src/samples/monitoring/prometheusrule.yaml +++ b/docs/src/samples/monitoring/prometheusrule.yaml @@ -26,7 +26,7 @@ spec: severity: warning - alert: PGDatabaseXidAge annotations: - description: Over 150,000,000 transactions from frozen xid on pod {{ $labels.pod }} + description: Over 300,000,000 transactions from frozen xid on pod {{ $labels.pod }} summary: Number of transactions from the frozen XID to the current one expr: |- cnpg_pg_database_xid_age > 300000000 @@ -42,7 +42,7 @@ spec: for: 1m labels: severity: warning - - alert: LastFailedArchiveTime + - alert: LastFailedArchiveTime annotations: description: Archiving failed for {{ $labels.pod }} summary: Checks the last time archiving failed. Will be < 0 when it has not failed. @@ -51,7 +51,7 @@ spec: for: 1m labels: severity: warning - - alert: DatabaseDeadlockConflicts + - alert: DatabaseDeadlockConflicts annotations: description: There are over 10 deadlock conflicts in {{ $labels.pod }} summary: Checks the number of database conflicts From a3369be679a818477bec0d99840bd1c205be8651 Mon Sep 17 00:00:00 2001 From: samtoxie Date: Tue, 7 Jan 2025 09:56:21 +0100 Subject: [PATCH 289/836] doc: add kubectl plugin instructions for Arch Linux User Repository (AUR) (#4745) Add instructions to install the kubectl-cnpg plugin for Arch users using the AUR infrastructure. Signed-off-by: Sam Toxopeus --- docs/src/kubectl-plugin.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/docs/src/kubectl-plugin.md b/docs/src/kubectl-plugin.md index d001a3397e..2637b250a3 100644 --- a/docs/src/kubectl-plugin.md +++ b/docs/src/kubectl-plugin.md @@ -80,6 +80,22 @@ Installed size: 78 M Is this ok [y/N]: y ``` +### Using the Arch Linux User Repository (AUR) Package + +To install the plugin from the [AUR](https://aur.archlinux.org/packages/kubectl-cnpg), follow these steps: + +```sh +git clone https://aur.archlinux.org/kubectl-cnpg.git +cd kubectl-cnpg +makepkg -si +``` + +Or use your favourite AUR-helper, for example [paru](https://github.com/Morganamilo/paru): + +```sh +paru -S kubectl-cnpg +``` + ### Using Krew If you already have [Krew](https://krew.sigs.k8s.io/) installed, you can simply From f115e4816689a5b5817e29635129fb8edd79e233 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Tue, 7 Jan 2025 10:19:28 +0100 Subject: [PATCH 290/836] chore: fix spell checker (#6526) Signed-off-by: Leonardo Cecchi --- .wordlist-en-custom.txt | 2 ++ docs/src/kubectl-plugin.md | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index 16a65fde9c..bb773940fd 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -2,6 +2,7 @@ AES API's APIs ARMv +AUR AZ AZs AcolumnName @@ -1026,6 +1027,7 @@ ownerMetadata ownerReference packagemanifests parseable +paru passfile passwd passwordSecret diff --git a/docs/src/kubectl-plugin.md b/docs/src/kubectl-plugin.md index 2637b250a3..485e1d9aed 100644 --- a/docs/src/kubectl-plugin.md +++ b/docs/src/kubectl-plugin.md @@ -90,7 +90,7 @@ cd kubectl-cnpg makepkg -si ``` -Or use your favourite AUR-helper, for example [paru](https://github.com/Morganamilo/paru): +Or use your favorite AUR-helper, for example [paru](https://github.com/Morganamilo/paru): ```sh paru -S kubectl-cnpg From 1f115523d03f77d5b1e3e276039b534636bff867 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Tue, 7 Jan 2025 10:40:18 +0100 Subject: [PATCH 291/836] chore(doc): improve compatibility pkg documentation (#6495) The `SetCoredumpFilter` for Darwin OS had an incorrect reference to Windows. Also, take the chance to add the doc.go file Signed-off-by: Armando Ruocco --- pkg/system/compatibility/darwin.go | 3 +-- pkg/system/compatibility/doc.go | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+), 2 deletions(-) create mode 100644 pkg/system/compatibility/doc.go diff --git a/pkg/system/compatibility/darwin.go b/pkg/system/compatibility/darwin.go index 83efd17ac3..64c76e9e24 100644 --- a/pkg/system/compatibility/darwin.go +++ b/pkg/system/compatibility/darwin.go @@ -17,10 +17,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package compatibility provides a layer to cross-compile with other OS than Linux package compatibility -// SetCoredumpFilter for Windows compatibility +// SetCoredumpFilter for darwin compatibility func SetCoredumpFilter(_ string) error { return nil } diff --git a/pkg/system/compatibility/doc.go b/pkg/system/compatibility/doc.go new file mode 100644 index 0000000000..5777cdce6d --- /dev/null +++ b/pkg/system/compatibility/doc.go @@ -0,0 +1,18 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package compatibility provides a layer to cross-compile with other OS than Linux +package compatibility From cc91f9380f2f787955d2b0d482c70ec8d5773608 Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Tue, 7 Jan 2025 10:49:22 +0100 Subject: [PATCH 292/836] chore: introduce `stale` GitHub action (#6525) Closes #6524 Signed-off-by: Gabriele Bartolini --- .github/workflows/stale.yml | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 .github/workflows/stale.yml diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 0000000000..6e3ae0010c --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,23 @@ +# See https://github.com/marketplace/actions/close-stale-issues +name: Close inactive issues +on: + schedule: + - cron: "30 1 * * *" + +jobs: + close-issues: + runs-on: ubuntu-latest + permissions: + issues: write + #pull-requests: write + steps: + - uses: actions/stale@v5 + with: + days-before-issue-stale: 60 + days-before-issue-close: 14 + stale-issue-message: "This issue is stale because it has been open for 30 days with no activity." + close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale." + days-before-pr-stale: -1 + days-before-pr-close: -1 + # Comment next line before going to production + debug-only: true From b43d7738ee6425420b4bc404fda0fd7f64d01898 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Tue, 7 Jan 2025 11:31:25 +0100 Subject: [PATCH 293/836] ci: update stale github action (#6529) Signed-off-by: Leonardo Cecchi --- .github/workflows/{stale.yml => close-inactive-issues.yml} | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) rename .github/workflows/{stale.yml => close-inactive-issues.yml} (93%) diff --git a/.github/workflows/stale.yml b/.github/workflows/close-inactive-issues.yml similarity index 93% rename from .github/workflows/stale.yml rename to .github/workflows/close-inactive-issues.yml index 6e3ae0010c..9200682e65 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/close-inactive-issues.yml @@ -1,6 +1,7 @@ # See https://github.com/marketplace/actions/close-stale-issues name: Close inactive issues on: + workflow_dispatch: schedule: - cron: "30 1 * * *" @@ -11,7 +12,7 @@ jobs: issues: write #pull-requests: write steps: - - uses: actions/stale@v5 + - uses: actions/stale@v9 with: days-before-issue-stale: 60 days-before-issue-close: 14 From b336f16314f2bd0c994beb48f089d64c8a9205b3 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Tue, 7 Jan 2025 11:53:04 +0100 Subject: [PATCH 294/836] chore(ci): remove debug only flag from stale github action (#6531) Signed-off-by: Leonardo Cecchi --- .github/workflows/close-inactive-issues.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/close-inactive-issues.yml b/.github/workflows/close-inactive-issues.yml index 9200682e65..df969a7bb4 100644 --- a/.github/workflows/close-inactive-issues.yml +++ b/.github/workflows/close-inactive-issues.yml @@ -20,5 +20,3 @@ jobs: close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale." days-before-pr-stale: -1 days-before-pr-close: -1 - # Comment next line before going to production - debug-only: true From 65d27ffcf17c16a0e1e83caa626879c634cc46b2 Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Tue, 7 Jan 2025 14:01:36 +0100 Subject: [PATCH 295/836] ci(workflow): set right comment in stale issues (#6533) Signed-off-by: Gabriele Bartolini --- .github/workflows/close-inactive-issues.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/close-inactive-issues.yml b/.github/workflows/close-inactive-issues.yml index df969a7bb4..3ee8af2c20 100644 --- a/.github/workflows/close-inactive-issues.yml +++ b/.github/workflows/close-inactive-issues.yml @@ -16,7 +16,8 @@ jobs: with: days-before-issue-stale: 60 days-before-issue-close: 14 - stale-issue-message: "This issue is stale because it has been open for 30 days with no activity." + stale-issue-message: "This issue is stale because it has been open for 60 days with no activity." close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale." days-before-pr-stale: -1 days-before-pr-close: -1 + ascending: true From 14427a6bee09ff9be9ae3f54673e22361301c6b0 Mon Sep 17 00:00:00 2001 From: Pierrick <139142330+pchovelon@users.noreply.github.com> Date: Wed, 8 Jan 2025 10:35:32 +0100 Subject: [PATCH 296/836] change color to green if replication slots are enabled (#6534) Fix #6459 Signed-off-by: Pierrick --- internal/cmd/plugin/status/status.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/cmd/plugin/status/status.go b/internal/cmd/plugin/status/status.go index c27018f386..4d07622063 100644 --- a/internal/cmd/plugin/status/status.go +++ b/internal/cmd/plugin/status/status.go @@ -655,7 +655,7 @@ func (fullStatus *PostgresqlStatus) printReplicaStatus(verbosity int) { } if fullStatus.areReplicationSlotsEnabled() { - fmt.Println(aurora.Yellow("Replication Slots Enabled").String()) + fmt.Println(aurora.Green("Replication Slots Enabled").String()) } status := tabby.New() From ec6a00eec201bdfb4c41b5e46ed2d7ff80732a5d Mon Sep 17 00:00:00 2001 From: Jaime Silvela Date: Wed, 8 Jan 2025 16:07:00 +0100 Subject: [PATCH 297/836] fix: restore functionality of promote plugin command (#6476) This pull request addresses an issue introduced in the latest plugin release, which rendered the `promote` command ineffective. The patch restores the command functionality and adds a unit test to prevent the issue from happening again. Closes #6475 ## Release notes Resolved an issue causing the `promote` plugin command not to work. Signed-off-by: Jaime Silvela Signed-off-by: Armando Ruocco Signed-off-by: Leonardo Cecchi Co-authored-by: Armando Ruocco Co-authored-by: Leonardo Cecchi Co-authored-by: Gabriele Quaresima --- internal/cmd/plugin/promote/cmd.go | 2 +- internal/cmd/plugin/promote/promote.go | 36 +++---- internal/cmd/plugin/promote/promote_test.go | 76 +++++++++++++ internal/cmd/plugin/promote/suite_test.go | 30 ++++++ internal/cmd/plugin/restart/restart.go | 9 +- internal/controller/cluster_status.go | 8 +- .../controller/instance_controller.go | 14 ++- pkg/resources/status/phase.go | 101 ------------------ pkg/resources/status/transactions.go | 58 ++++++++++ pkg/resources/status/update.go | 20 +++- 10 files changed, 220 insertions(+), 134 deletions(-) create mode 100644 internal/cmd/plugin/promote/promote_test.go create mode 100644 internal/cmd/plugin/promote/suite_test.go delete mode 100644 pkg/resources/status/phase.go create mode 100644 pkg/resources/status/transactions.go diff --git a/internal/cmd/plugin/promote/cmd.go b/internal/cmd/plugin/promote/cmd.go index 111c4291b0..c8e0db5bdc 100644 --- a/internal/cmd/plugin/promote/cmd.go +++ b/internal/cmd/plugin/promote/cmd.go @@ -40,7 +40,7 @@ func NewCmd() *cobra.Command { if _, err := strconv.Atoi(args[1]); err == nil { node = fmt.Sprintf("%s-%s", clusterName, node) } - return Promote(ctx, clusterName, node) + return Promote(ctx, plugin.Client, plugin.Namespace, clusterName, node) }, } diff --git a/internal/cmd/plugin/promote/promote.go b/internal/cmd/plugin/promote/promote.go index daba43bf78..a2e160cb37 100644 --- a/internal/cmd/plugin/promote/promote.go +++ b/internal/cmd/plugin/promote/promote.go @@ -26,18 +26,19 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/status" ) -// Promote command implementation -func Promote(ctx context.Context, clusterName string, serverName string) error { +// Promote promotes an instance in a cluster +func Promote(ctx context.Context, cli client.Client, + namespace, clusterName, serverName string, +) error { var cluster apiv1.Cluster // Get the Cluster object - err := plugin.Client.Get(ctx, client.ObjectKey{Namespace: plugin.Namespace, Name: clusterName}, &cluster) + err := cli.Get(ctx, client.ObjectKey{Namespace: namespace, Name: clusterName}, &cluster) if err != nil { - return fmt.Errorf("cluster %s not found in namespace %s: %w", clusterName, plugin.Namespace, err) + return fmt.Errorf("cluster %s not found in namespace %s: %w", clusterName, namespace, err) } // If server name is equal to target primary, there is no need to promote @@ -49,22 +50,21 @@ func Promote(ctx context.Context, clusterName string, serverName string) error { // Check if the Pod exist var pod v1.Pod - err = plugin.Client.Get(ctx, client.ObjectKey{Namespace: plugin.Namespace, Name: serverName}, &pod) + err = cli.Get(ctx, client.ObjectKey{Namespace: namespace, Name: serverName}, &pod) if err != nil { - return fmt.Errorf("new primary node %s not found in namespace %s: %w", serverName, plugin.Namespace, err) + return fmt.Errorf("new primary node %s not found in namespace %s: %w", serverName, namespace, err) } - // The Pod exists, let's update status fields - origCluster := cluster.DeepCopy() - cluster.Status.TargetPrimary = serverName - cluster.Status.TargetPrimaryTimestamp = pgTime.GetCurrentTimestamp() - if err := status.RegisterPhaseWithOrigCluster( - ctx, - plugin.Client, - &cluster, - origCluster, - apiv1.PhaseSwitchover, - fmt.Sprintf("Switching over to %v", serverName), + // The Pod exists, let's update the cluster's status with the new target primary + reconcileTargetPrimaryFunc := func(cluster *apiv1.Cluster) { + cluster.Status.TargetPrimary = serverName + cluster.Status.TargetPrimaryTimestamp = pgTime.GetCurrentTimestamp() + cluster.Status.Phase = apiv1.PhaseSwitchover + cluster.Status.PhaseReason = fmt.Sprintf("Switching over to %v", serverName) + } + if err := status.PatchWithOptimisticLock(ctx, cli, &cluster, + reconcileTargetPrimaryFunc, + status.SetClusterReadyConditionTX, ); err != nil { return err } diff --git a/internal/cmd/plugin/promote/promote_test.go b/internal/cmd/plugin/promote/promote_test.go new file mode 100644 index 0000000000..46ba5ff1a6 --- /dev/null +++ b/internal/cmd/plugin/promote/promote_test.go @@ -0,0 +1,76 @@ +package promote + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + k8client "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/internal/scheme" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("promote subcommand tests", func() { + var client k8client.Client + const namespace = "theNamespace" + BeforeEach(func() { + cluster1 := apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + Namespace: namespace, + }, + Spec: apiv1.ClusterSpec{}, + Status: apiv1.ClusterStatus{ + CurrentPrimary: "cluster1-1", + TargetPrimary: "cluster1-1", + Phase: apiv1.PhaseHealthy, + Conditions: []metav1.Condition{ + { + Type: string(apiv1.ConditionClusterReady), + Status: metav1.ConditionTrue, + Reason: string(apiv1.ClusterReady), + Message: "Cluster is Ready", + }, + }, + }, + } + newPod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1-2", + Namespace: namespace, + }, + } + client = fake.NewClientBuilder().WithScheme(scheme.BuildWithAllKnownScheme()). + WithObjects(&cluster1, &newPod).WithStatusSubresource(&cluster1).Build() + }) + + It("correctly sets the target primary and the phase if the target pod is present", func(ctx SpecContext) { + Expect(Promote(ctx, client, namespace, "cluster1", "cluster1-2")). + To(Succeed()) + var cl apiv1.Cluster + Expect(client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: "cluster1"}, &cl)). + To(Succeed()) + Expect(cl.Status.TargetPrimary).To(Equal("cluster1-2")) + Expect(cl.Status.Phase).To(Equal(apiv1.PhaseSwitchover)) + Expect(cl.Status.PhaseReason).To(Equal("Switching over to cluster1-2")) + Expect(meta.IsStatusConditionTrue(cl.Status.Conditions, string(apiv1.ConditionClusterReady))). + To(BeFalse()) + }) + + It("ignores the promotion if the target pod is missing", func(ctx SpecContext) { + err := Promote(ctx, client, namespace, "cluster1", "cluster1-missingPod") + Expect(err).To(HaveOccurred()) + var cl apiv1.Cluster + Expect(client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: "cluster1"}, &cl)). + To(Succeed()) + Expect(cl.Status.TargetPrimary).To(Equal("cluster1-1")) + Expect(cl.Status.Phase).To(Equal(apiv1.PhaseHealthy)) + Expect(meta.IsStatusConditionTrue(cl.Status.Conditions, string(apiv1.ConditionClusterReady))). + To(BeTrue()) + }) +}) diff --git a/internal/cmd/plugin/promote/suite_test.go b/internal/cmd/plugin/promote/suite_test.go new file mode 100644 index 0000000000..3c0e363370 --- /dev/null +++ b/internal/cmd/plugin/promote/suite_test.go @@ -0,0 +1,30 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package promote + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestPlugin(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Promote plugin Suite") +} diff --git a/internal/cmd/plugin/restart/restart.go b/internal/cmd/plugin/restart/restart.go index ba2484546a..4b863e426e 100644 --- a/internal/cmd/plugin/restart/restart.go +++ b/internal/cmd/plugin/restart/restart.go @@ -66,17 +66,14 @@ func instanceRestart(ctx context.Context, clusterName, node string) error { if err != nil { return err } - originalCluster := cluster.DeepCopy() if cluster.Status.CurrentPrimary == node { - cluster.ManagedFields = nil - if err := status.RegisterPhaseWithOrigCluster( + if err := status.PatchWithOptimisticLock( ctx, plugin.Client, &cluster, - originalCluster, - apiv1.PhaseInplacePrimaryRestart, - "Requested by the user", + status.SetPhaseTX(apiv1.PhaseInplacePrimaryRestart, "Requested by the user"), + status.SetClusterReadyConditionTX, ); err != nil { return fmt.Errorf("while requesting restart on primary POD for cluster %v: %w", clusterName, err) } diff --git a/internal/controller/cluster_status.go b/internal/controller/cluster_status.go index 4f5a2e673a..a092f955c2 100644 --- a/internal/controller/cluster_status.go +++ b/internal/controller/cluster_status.go @@ -732,7 +732,13 @@ func (r *ClusterReconciler) RegisterPhase(ctx context.Context, phase string, reason string, ) error { - return status.RegisterPhase(ctx, r.Client, cluster, phase, reason) + return status.PatchWithOptimisticLock( + ctx, + r.Client, + cluster, + status.SetPhaseTX(phase, reason), + status.SetClusterReadyConditionTX, + ) } // updateClusterStatusThatRequiresInstancesState updates all the cluster status fields that require the instances status diff --git a/internal/management/controller/instance_controller.go b/internal/management/controller/instance_controller.go index dda32b0920..d6900697c1 100644 --- a/internal/management/controller/instance_controller.go +++ b/internal/management/controller/instance_controller.go @@ -327,12 +327,12 @@ func (r *InstanceReconciler) restartPrimaryInplaceIfRequested( return true, err } - return true, clusterstatus.RegisterPhase( + return true, clusterstatus.PatchWithOptimisticLock( ctx, r.client, cluster, - apiv1.PhaseHealthy, - "Primary instance restarted in-place", + clusterstatus.SetPhaseTX(apiv1.PhaseHealthy, "Primary instance restarted in-place"), + clusterstatus.SetClusterReadyConditionTX, ) } return false, nil @@ -1065,7 +1065,13 @@ func (r *InstanceReconciler) processConfigReloadAndManageRestart(ctx context.Con return nil } - return clusterstatus.RegisterPhase(ctx, r.client, cluster, phase, phaseReason) + return clusterstatus.PatchWithOptimisticLock( + ctx, + r.client, + cluster, + clusterstatus.SetPhaseTX(phase, phaseReason), + clusterstatus.SetClusterReadyConditionTX, + ) } // refreshCertificateFilesFromSecret receive a secret and rewrite the file diff --git a/pkg/resources/status/phase.go b/pkg/resources/status/phase.go deleted file mode 100644 index bac80933c5..0000000000 --- a/pkg/resources/status/phase.go +++ /dev/null @@ -1,101 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package status - -import ( - "context" - "fmt" - - "github.com/cloudnative-pg/machinery/pkg/log" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" -) - -// RegisterPhase update phase in the status cluster with the -// proper reason -func RegisterPhase( - ctx context.Context, - cli client.Client, - cluster *apiv1.Cluster, - phase string, - reason string, -) error { - existingCluster := cluster.DeepCopy() - return RegisterPhaseWithOrigCluster(ctx, cli, cluster, existingCluster, phase, reason) -} - -// RegisterPhaseWithOrigCluster update phase in the status cluster with the -// proper reason, it also receives an origCluster to preserve other modifications done to the status -func RegisterPhaseWithOrigCluster( - ctx context.Context, - cli client.Client, - modifiedCluster *apiv1.Cluster, - origCluster *apiv1.Cluster, - phase string, - reason string, -) error { - if err := PatchWithOptimisticLock( - ctx, - cli, - modifiedCluster, - func(cluster *apiv1.Cluster) { - if cluster.Status.Conditions == nil { - cluster.Status.Conditions = []metav1.Condition{} - } - - cluster.Status.Phase = phase - cluster.Status.PhaseReason = reason - - condition := metav1.Condition{ - Type: string(apiv1.ConditionClusterReady), - Status: metav1.ConditionFalse, - Reason: string(apiv1.ClusterIsNotReady), - Message: "Cluster Is Not Ready", - } - - if cluster.Status.Phase == apiv1.PhaseHealthy { - condition = metav1.Condition{ - Type: string(apiv1.ConditionClusterReady), - Status: metav1.ConditionTrue, - Reason: string(apiv1.ClusterReady), - Message: "Cluster is Ready", - } - } - - meta.SetStatusCondition(&cluster.Status.Conditions, condition) - }, - ); err != nil { - return fmt.Errorf("while updating phase: %w", err) - } - - contextLogger := log.FromContext(ctx) - - modifiedPhase := modifiedCluster.Status.Phase - origPhase := origCluster.Status.Phase - - if modifiedPhase != apiv1.PhaseHealthy && origPhase == apiv1.PhaseHealthy { - contextLogger.Info("Cluster is not healthy") - } - if modifiedPhase == apiv1.PhaseHealthy && origPhase != apiv1.PhaseHealthy { - contextLogger.Info("Cluster is healthy") - } - - return nil -} diff --git a/pkg/resources/status/transactions.go b/pkg/resources/status/transactions.go new file mode 100644 index 0000000000..ca04d437a9 --- /dev/null +++ b/pkg/resources/status/transactions.go @@ -0,0 +1,58 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package status + +import ( + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" +) + +// SetClusterReadyConditionTX updates the cluster's readiness condition +// according to the cluster phase +func SetClusterReadyConditionTX(cluster *apiv1.Cluster) { + if cluster.Status.Conditions == nil { + cluster.Status.Conditions = []metav1.Condition{} + } + + condition := metav1.Condition{ + Type: string(apiv1.ConditionClusterReady), + Status: metav1.ConditionFalse, + Reason: string(apiv1.ClusterIsNotReady), + Message: "Cluster Is Not Ready", + } + + if cluster.Status.Phase == apiv1.PhaseHealthy { + condition = metav1.Condition{ + Type: string(apiv1.ConditionClusterReady), + Status: metav1.ConditionTrue, + Reason: string(apiv1.ClusterReady), + Message: "Cluster is Ready", + } + } + + meta.SetStatusCondition(&cluster.Status.Conditions, condition) +} + +// SetPhaseTX is a transaction that sets the cluster phase and reason +func SetPhaseTX(phase string, reason string) func(cluster *apiv1.Cluster) { + return func(cluster *apiv1.Cluster) { + cluster.Status.Phase = phase + cluster.Status.PhaseReason = reason + } +} diff --git a/pkg/resources/status/update.go b/pkg/resources/status/update.go index 0543292d9e..3f916cd0e1 100644 --- a/pkg/resources/status/update.go +++ b/pkg/resources/status/update.go @@ -20,6 +20,7 @@ import ( "context" "fmt" + "github.com/cloudnative-pg/machinery/pkg/log" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/client-go/util/retry" "sigs.k8s.io/controller-runtime/pkg/client" @@ -28,19 +29,23 @@ import ( ) // PatchWithOptimisticLock updates the status of the cluster using the passed -// transaction function. +// transaction functions (in the given order). // Important: after successfully updating the status, this // function refreshes it into the passed cluster func PatchWithOptimisticLock( ctx context.Context, c client.Client, cluster *apiv1.Cluster, - tx func(cluster *apiv1.Cluster), + txs ...func(cluster *apiv1.Cluster), ) error { if cluster == nil { return nil } + contextLogger := log.FromContext(ctx) + + origCluster := cluster.DeepCopy() + if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { var currentCluster apiv1.Cluster if err := c.Get(ctx, client.ObjectKeyFromObject(cluster), ¤tCluster); err != nil { @@ -48,7 +53,9 @@ func PatchWithOptimisticLock( } updatedCluster := currentCluster.DeepCopy() - tx(updatedCluster) + for _, tx := range txs { + tx(updatedCluster) + } if equality.Semantic.DeepEqual(currentCluster.Status, updatedCluster.Status) { return nil @@ -69,5 +76,12 @@ func PatchWithOptimisticLock( return fmt.Errorf("while updating conditions: %w", err) } + if cluster.Status.Phase != apiv1.PhaseHealthy && origCluster.Status.Phase == apiv1.PhaseHealthy { + contextLogger.Info("Cluster has become unhealthy") + } + if cluster.Status.Phase == apiv1.PhaseHealthy && origCluster.Status.Phase != apiv1.PhaseHealthy { + contextLogger.Info("Cluster has become healthy") + } + return nil } From 70bf6aad6fed4ac078c9ba87cfe64b89b4b64d86 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Wed, 8 Jan 2025 16:57:11 +0100 Subject: [PATCH 298/836] chore: correct typos in the `resource/status` pkg (#6543) Signed-off-by: Armando Ruocco --- pkg/resources/status/conditions.go | 2 +- pkg/resources/status/{update.go => patch.go} | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) rename pkg/resources/status/{update.go => patch.go} (97%) diff --git a/pkg/resources/status/conditions.go b/pkg/resources/status/conditions.go index 54b09a056b..c8b814aded 100644 --- a/pkg/resources/status/conditions.go +++ b/pkg/resources/status/conditions.go @@ -74,7 +74,7 @@ func PatchConditionsWithOptimisticLock( return nil }); err != nil { - return fmt.Errorf("while updating conditions: %w", err) + return fmt.Errorf("while patching conditions: %w", err) } return nil diff --git a/pkg/resources/status/update.go b/pkg/resources/status/patch.go similarity index 97% rename from pkg/resources/status/update.go rename to pkg/resources/status/patch.go index 3f916cd0e1..3613a46f43 100644 --- a/pkg/resources/status/update.go +++ b/pkg/resources/status/patch.go @@ -73,7 +73,7 @@ func PatchWithOptimisticLock( return nil }); err != nil { - return fmt.Errorf("while updating conditions: %w", err) + return fmt.Errorf("while patching status: %w", err) } if cluster.Status.Phase != apiv1.PhaseHealthy && origCluster.Status.Phase == apiv1.PhaseHealthy { From 651b4cf749086b94d5781e8d9e2f73e68bbef0f7 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Thu, 9 Jan 2025 12:02:14 +0100 Subject: [PATCH 299/836] refactor: move webhooks in a dedicated package (#6472) This refactoring enhances code organization by moving all webhook-related functionalities into a separate package, aligning with the `go/v4` Kubebuilder project layout. Closes #6471 Signed-off-by: Marco Nenciarini --- api/v1/backup_webhook.go | 120 - api/v1/cluster_defaults.go | 263 ++ api/v1/cluster_defaults_test.go | 316 ++ api/v1/cluster_funcs.go | 9 +- api/v1/cluster_funcs_test.go | 12 +- api/v1/cluster_types.go | 8 +- api/v1/pooler_webhook.go | 223 -- api/v1/scheduledbackup_funcs_test.go | 30 - api/v1/scheduledbackup_webhook.go | 130 - api/v1/scheduledbackup_webhook_test.go | 90 - internal/cmd/manager/controller/controller.go | 11 +- internal/webhook/v1/backup_webhook.go | 172 + .../webhook}/v1/backup_webhook_test.go | 40 +- .../webhook}/v1/cluster_webhook.go | 661 ++-- .../webhook}/v1/cluster_webhook_test.go | 3118 ++++++++--------- internal/webhook/v1/doc.go | 18 + internal/webhook/v1/pooler_webhook.go | 254 ++ .../webhook}/v1/pooler_webhook_test.go | 83 +- .../webhook/v1/scheduledbackup_webhook.go | 190 + .../v1/scheduledbackup_webhook_test.go | 126 + internal/webhook/v1/suite_test.go | 30 + tests/e2e/asserts_test.go | 12 +- tests/e2e/webhook_test.go | 7 +- 23 files changed, 3214 insertions(+), 2709 deletions(-) delete mode 100644 api/v1/backup_webhook.go create mode 100644 api/v1/cluster_defaults.go create mode 100644 api/v1/cluster_defaults_test.go delete mode 100644 api/v1/pooler_webhook.go delete mode 100644 api/v1/scheduledbackup_webhook.go delete mode 100644 api/v1/scheduledbackup_webhook_test.go create mode 100644 internal/webhook/v1/backup_webhook.go rename {api => internal/webhook}/v1/backup_webhook_test.go (66%) rename {api => internal/webhook}/v1/cluster_webhook.go (77%) rename {api => internal/webhook}/v1/cluster_webhook_test.go (55%) create mode 100644 internal/webhook/v1/doc.go create mode 100644 internal/webhook/v1/pooler_webhook.go rename {api => internal/webhook}/v1/pooler_webhook_test.go (52%) create mode 100644 internal/webhook/v1/scheduledbackup_webhook.go create mode 100644 internal/webhook/v1/scheduledbackup_webhook_test.go create mode 100644 internal/webhook/v1/suite_test.go diff --git a/api/v1/backup_webhook.go b/api/v1/backup_webhook.go deleted file mode 100644 index aec1fd54f9..0000000000 --- a/api/v1/backup_webhook.go +++ /dev/null @@ -1,120 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "github.com/cloudnative-pg/machinery/pkg/log" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/validation/field" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/webhook" - "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" -) - -// backupLog is for logging in this package. -var backupLog = log.WithName("backup-resource").WithValues("version", "v1") - -// SetupWebhookWithManager setup the webhook inside the controller manager -func (r *Backup) SetupWebhookWithManager(mgr ctrl.Manager) error { - return ctrl.NewWebhookManagedBy(mgr). - For(r). - Complete() -} - -// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},path=/mutate-postgresql-cnpg-io-v1-backup,mutating=true,failurePolicy=fail,groups=postgresql.cnpg.io,resources=backups,verbs=create;update,versions=v1,name=mbackup.cnpg.io,sideEffects=None - -var _ webhook.Defaulter = &Backup{} - -// Default implements webhook.Defaulter so a webhook will be registered for the type -func (r *Backup) Default() { - backupLog.Info("default", "name", r.Name, "namespace", r.Namespace) -} - -// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. -// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},verbs=create;update,path=/validate-postgresql-cnpg-io-v1-backup,mutating=false,failurePolicy=fail,groups=postgresql.cnpg.io,resources=backups,versions=v1,name=vbackup.cnpg.io,sideEffects=None - -var _ webhook.Validator = &Backup{} - -// ValidateCreate implements webhook.Validator so a webhook will be registered for the type -func (r *Backup) ValidateCreate() (admission.Warnings, error) { - backupLog.Info("validate create", "name", r.Name, "namespace", r.Namespace) - allErrs := r.validate() - if len(allErrs) == 0 { - return nil, nil - } - - return nil, apierrors.NewInvalid( - schema.GroupKind{Group: "postgresql.cnpg.io", Kind: "Backup"}, - r.Name, allErrs) -} - -// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type -func (r *Backup) ValidateUpdate(_ runtime.Object) (admission.Warnings, error) { - backupLog.Info("validate update", "name", r.Name, "namespace", r.Namespace) - return r.ValidateCreate() -} - -// ValidateDelete implements webhook.Validator so a webhook will be registered for the type -func (r *Backup) ValidateDelete() (admission.Warnings, error) { - backupLog.Info("validate delete", "name", r.Name, "namespace", r.Namespace) - return nil, nil -} - -func (r *Backup) validate() field.ErrorList { - var result field.ErrorList - - if r.Spec.Method == BackupMethodVolumeSnapshot && !utils.HaveVolumeSnapshot() { - result = append(result, field.Invalid( - field.NewPath("spec", "method"), - r.Spec.Method, - "Cannot use volumeSnapshot backup method due to missing "+ - "VolumeSnapshot CRD. If you installed the CRD after having "+ - "started the operator, please restart it to enable "+ - "VolumeSnapshot support", - )) - } - - if r.Spec.Method == BackupMethodBarmanObjectStore && r.Spec.Online != nil { - result = append(result, field.Invalid( - field.NewPath("spec", "online"), - r.Spec.Online, - "Online parameter can be specified only if the backup method is volumeSnapshot", - )) - } - - if r.Spec.Method == BackupMethodBarmanObjectStore && r.Spec.OnlineConfiguration != nil { - result = append(result, field.Invalid( - field.NewPath("spec", "onlineConfiguration"), - r.Spec.OnlineConfiguration, - "OnlineConfiguration parameter can be specified only if the backup method is volumeSnapshot", - )) - } - - if r.Spec.Method == BackupMethodPlugin && r.Spec.PluginConfiguration.IsEmpty() { - result = append(result, field.Invalid( - field.NewPath("spec", "pluginConfiguration"), - r.Spec.OnlineConfiguration, - "cannot be empty when the backup method is plugin", - )) - } - - return result -} diff --git a/api/v1/cluster_defaults.go b/api/v1/cluster_defaults.go new file mode 100644 index 0000000000..e368655cb8 --- /dev/null +++ b/api/v1/cluster_defaults.go @@ -0,0 +1,263 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/stringset" + "k8s.io/utils/ptr" + + "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" + "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" +) + +const ( + // DefaultMonitoringKey is the key that should be used in the default metrics configmap to store the queries + DefaultMonitoringKey = "queries" + // DefaultMonitoringConfigMapName is the name of the target configmap with the default monitoring queries, + // if configured + DefaultMonitoringConfigMapName = "cnpg-default-monitoring" + // DefaultMonitoringSecretName is the name of the target secret with the default monitoring queries, + // if configured + DefaultMonitoringSecretName = DefaultMonitoringConfigMapName + // DefaultApplicationDatabaseName is the name of application database if not specified + DefaultApplicationDatabaseName = "app" + // DefaultApplicationUserName is the name of application database owner if not specified + DefaultApplicationUserName = DefaultApplicationDatabaseName +) + +// Default apply the defaults to undefined values in a Cluster preserving the user settings +func (r *Cluster) Default() { + r.setDefaults(true) +} + +// SetDefaults apply the defaults to undefined values in a Cluster +func (r *Cluster) SetDefaults() { + r.setDefaults(false) +} + +func (r *Cluster) setDefaults(preserveUserSettings bool) { + // Defaulting the image name if not specified + if r.Spec.ImageName == "" && r.Spec.ImageCatalogRef == nil { + r.Spec.ImageName = configuration.Current.PostgresImageName + } + + // Defaulting the bootstrap method if not specified + if r.Spec.Bootstrap == nil { + r.Spec.Bootstrap = &BootstrapConfiguration{} + } + + // Defaulting initDB if no other bootstrap method was passed + switch { + case r.Spec.Bootstrap.Recovery != nil: + r.defaultRecovery() + case r.Spec.Bootstrap.PgBaseBackup != nil: + r.defaultPgBaseBackup() + default: + r.defaultInitDB() + } + + // Defaulting the pod anti-affinity type if podAntiAffinity + if (r.Spec.Affinity.EnablePodAntiAffinity == nil || *r.Spec.Affinity.EnablePodAntiAffinity) && + r.Spec.Affinity.PodAntiAffinityType == "" { + r.Spec.Affinity.PodAntiAffinityType = PodAntiAffinityTypePreferred + } + + if r.Spec.Backup != nil && r.Spec.Backup.Target == "" { + r.Spec.Backup.Target = DefaultBackupTarget + } + + psqlVersion, err := r.GetPostgresqlVersion() + if err == nil { + // The validation error will be already raised by the + // validateImageName function + info := postgres.ConfigurationInfo{ + Settings: postgres.CnpgConfigurationSettings, + Version: psqlVersion, + UserSettings: r.Spec.PostgresConfiguration.Parameters, + IsReplicaCluster: r.IsReplica(), + PreserveFixedSettingsFromUser: preserveUserSettings, + IsWalArchivingDisabled: utils.IsWalArchivingDisabled(&r.ObjectMeta), + IsAlterSystemEnabled: r.Spec.PostgresConfiguration.EnableAlterSystem, + } + sanitizedParameters := postgres.CreatePostgresqlConfiguration(info).GetConfigurationParameters() + r.Spec.PostgresConfiguration.Parameters = sanitizedParameters + } + + if r.Spec.LogLevel == "" { + r.Spec.LogLevel = log.InfoLevelString + } + + // we inject the defaultMonitoringQueries if the MonitoringQueriesConfigmap parameter is not empty + // and defaultQueries not disabled on cluster crd + if !r.Spec.Monitoring.AreDefaultQueriesDisabled() { + r.defaultMonitoringQueries(configuration.Current) + } + + // If the ReplicationSlots or HighAvailability stanzas are nil, we create them and enable slots + if r.Spec.ReplicationSlots == nil { + r.Spec.ReplicationSlots = &ReplicationSlotsConfiguration{} + } + if r.Spec.ReplicationSlots.HighAvailability == nil { + r.Spec.ReplicationSlots.HighAvailability = &ReplicationSlotsHAConfiguration{ + Enabled: ptr.To(true), + SlotPrefix: "_cnpg_", + } + } + if r.Spec.ReplicationSlots.SynchronizeReplicas == nil { + r.Spec.ReplicationSlots.SynchronizeReplicas = &SynchronizeReplicasConfiguration{ + Enabled: ptr.To(true), + } + } + + if len(r.Spec.Tablespaces) > 0 { + r.defaultTablespaces() + } + + r.setDefaultPlugins(configuration.Current) +} + +func (r *Cluster) setDefaultPlugins(config *configuration.Data) { + // Add the list of pre-defined plugins + foundPlugins := stringset.New() + for _, plugin := range r.Spec.Plugins { + foundPlugins.Put(plugin.Name) + } + + for _, pluginName := range config.GetIncludePlugins() { + if !foundPlugins.Has(pluginName) { + r.Spec.Plugins = append(r.Spec.Plugins, PluginConfiguration{ + Name: pluginName, + Enabled: ptr.To(true), + }) + } + } +} + +// defaultTablespaces adds the tablespace owner where the +// user didn't specify it +func (r *Cluster) defaultTablespaces() { + defaultOwner := r.GetApplicationDatabaseOwner() + if len(defaultOwner) == 0 { + defaultOwner = "postgres" + } + + for name, tablespaceConfiguration := range r.Spec.Tablespaces { + if len(tablespaceConfiguration.Owner.Name) == 0 { + tablespaceConfiguration.Owner.Name = defaultOwner + } + r.Spec.Tablespaces[name] = tablespaceConfiguration + } +} + +// defaultMonitoringQueries adds the default monitoring queries configMap +// if not already present in CustomQueriesConfigMap +func (r *Cluster) defaultMonitoringQueries(config *configuration.Data) { + if r.Spec.Monitoring == nil { + r.Spec.Monitoring = &MonitoringConfiguration{} + } + + if config.MonitoringQueriesConfigmap != "" { + var defaultConfigMapQueriesAlreadyPresent bool + // We check if the default queries are already inserted in the monitoring configuration + for _, monitoringConfigMap := range r.Spec.Monitoring.CustomQueriesConfigMap { + if monitoringConfigMap.Name == DefaultMonitoringConfigMapName { + defaultConfigMapQueriesAlreadyPresent = true + break + } + } + + // If the default queries are already present there is no need to re-add them. + // Please note that in this case that the default configMap could overwrite user existing queries + // depending on the order. This is an accepted behavior because the user willingly defined the order of his array + if !defaultConfigMapQueriesAlreadyPresent { + r.Spec.Monitoring.CustomQueriesConfigMap = append([]ConfigMapKeySelector{ + { + LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringConfigMapName}, + Key: DefaultMonitoringKey, + }, + }, r.Spec.Monitoring.CustomQueriesConfigMap...) + } + } + + if config.MonitoringQueriesSecret != "" { + var defaultSecretQueriesAlreadyPresent bool + // we check if the default queries are already inserted in the monitoring configuration + for _, monitoringSecret := range r.Spec.Monitoring.CustomQueriesSecret { + if monitoringSecret.Name == DefaultMonitoringSecretName { + defaultSecretQueriesAlreadyPresent = true + break + } + } + + if !defaultSecretQueriesAlreadyPresent { + r.Spec.Monitoring.CustomQueriesSecret = append([]SecretKeySelector{ + { + LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringSecretName}, + Key: DefaultMonitoringKey, + }, + }, r.Spec.Monitoring.CustomQueriesSecret...) + } + } +} + +// defaultInitDB enriches the initDB with defaults if not all the required arguments were passed +func (r *Cluster) defaultInitDB() { + if r.Spec.Bootstrap.InitDB == nil { + r.Spec.Bootstrap.InitDB = &BootstrapInitDB{ + Database: DefaultApplicationDatabaseName, + Owner: DefaultApplicationUserName, + } + } + + if r.Spec.Bootstrap.InitDB.Database == "" { + r.Spec.Bootstrap.InitDB.Database = DefaultApplicationDatabaseName + } + if r.Spec.Bootstrap.InitDB.Owner == "" { + r.Spec.Bootstrap.InitDB.Owner = r.Spec.Bootstrap.InitDB.Database + } + if r.Spec.Bootstrap.InitDB.Encoding == "" { + r.Spec.Bootstrap.InitDB.Encoding = "UTF8" + } + if r.Spec.Bootstrap.InitDB.LocaleCollate == "" { + r.Spec.Bootstrap.InitDB.LocaleCollate = "C" + } + if r.Spec.Bootstrap.InitDB.LocaleCType == "" { + r.Spec.Bootstrap.InitDB.LocaleCType = "C" + } +} + +// defaultRecovery enriches the recovery with defaults if not all the required arguments were passed +func (r *Cluster) defaultRecovery() { + if r.Spec.Bootstrap.Recovery.Database == "" { + r.Spec.Bootstrap.Recovery.Database = DefaultApplicationDatabaseName + } + if r.Spec.Bootstrap.Recovery.Owner == "" { + r.Spec.Bootstrap.Recovery.Owner = r.Spec.Bootstrap.Recovery.Database + } +} + +// defaultPgBaseBackup enriches the pg_basebackup with defaults if not all the required arguments were passed +func (r *Cluster) defaultPgBaseBackup() { + if r.Spec.Bootstrap.PgBaseBackup.Database == "" { + r.Spec.Bootstrap.PgBaseBackup.Database = DefaultApplicationDatabaseName + } + if r.Spec.Bootstrap.PgBaseBackup.Owner == "" { + r.Spec.Bootstrap.PgBaseBackup.Owner = r.Spec.Bootstrap.PgBaseBackup.Database + } +} diff --git a/api/v1/cluster_defaults_test.go b/api/v1/cluster_defaults_test.go new file mode 100644 index 0000000000..4ff95bb8ca --- /dev/null +++ b/api/v1/cluster_defaults_test.go @@ -0,0 +1,316 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/utils/ptr" + + "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("cluster default configuration", func() { + It("defaults to creating an application database", func() { + cluster := Cluster{} + cluster.Default() + Expect(cluster.Spec.Bootstrap.InitDB.Database).To(Equal("app")) + Expect(cluster.Spec.Bootstrap.InitDB.Owner).To(Equal("app")) + }) + + It("defaults the owner user with the database name", func() { + cluster := Cluster{ + Spec: ClusterSpec{ + Bootstrap: &BootstrapConfiguration{ + InitDB: &BootstrapInitDB{ + Database: "appdb", + }, + }, + }, + } + + cluster.Default() + Expect(cluster.Spec.Bootstrap.InitDB.Owner).To(Equal("appdb")) + }) + + It("defaults to create an application database if recovery is used", func() { + cluster := Cluster{ + Spec: ClusterSpec{ + Bootstrap: &BootstrapConfiguration{ + Recovery: &BootstrapRecovery{}, + }, + }, + } + cluster.Default() + Expect(cluster.ShouldRecoveryCreateApplicationDatabase()).Should(BeTrue()) + Expect(cluster.Spec.Bootstrap.Recovery.Database).ShouldNot(BeEmpty()) + Expect(cluster.Spec.Bootstrap.Recovery.Owner).ShouldNot(BeEmpty()) + Expect(cluster.Spec.Bootstrap.Recovery.Secret).Should(BeNil()) + }) + + It("defaults the owner user with the database name for recovery", func() { + cluster := Cluster{ + Spec: ClusterSpec{ + Bootstrap: &BootstrapConfiguration{ + Recovery: &BootstrapRecovery{ + Database: "appdb", + }, + }, + }, + } + + cluster.Default() + Expect(cluster.Spec.Bootstrap.Recovery.Owner).To(Equal("appdb")) + }) + + It("defaults to create an application database if pg_basebackup is used", func() { + cluster := Cluster{ + Spec: ClusterSpec{ + Bootstrap: &BootstrapConfiguration{ + PgBaseBackup: &BootstrapPgBaseBackup{}, + }, + }, + } + cluster.Default() + Expect(cluster.ShouldPgBaseBackupCreateApplicationDatabase()).Should(BeTrue()) + Expect(cluster.Spec.Bootstrap.PgBaseBackup.Database).ShouldNot(BeEmpty()) + Expect(cluster.Spec.Bootstrap.PgBaseBackup.Owner).ShouldNot(BeEmpty()) + Expect(cluster.Spec.Bootstrap.PgBaseBackup.Secret).Should(BeNil()) + }) + + It("defaults the owner user with the database name for pg_basebackup", func() { + cluster := Cluster{ + Spec: ClusterSpec{ + Bootstrap: &BootstrapConfiguration{ + PgBaseBackup: &BootstrapPgBaseBackup{ + Database: "appdb", + }, + }, + }, + } + + cluster.Default() + Expect(cluster.Spec.Bootstrap.PgBaseBackup.Owner).To(Equal("appdb")) + }) + + It("defaults the PostgreSQL configuration with parameters from the operator", func() { + cluster := Cluster{} + cluster.Default() + Expect(cluster.Spec.PostgresConfiguration.Parameters).ToNot(BeEmpty()) + }) + + It("defaults the anti-affinity", func() { + cluster := Cluster{ + Spec: ClusterSpec{ + Affinity: AffinityConfiguration{}, + }, + } + cluster.Default() + Expect(cluster.Spec.Affinity.PodAntiAffinityType).To(BeEquivalentTo(PodAntiAffinityTypePreferred)) + Expect(cluster.Spec.Affinity.EnablePodAntiAffinity).To(BeNil()) + }) + + It("should fill the image name if isn't already set", func() { + cluster := Cluster{} + cluster.Default() + Expect(cluster.Spec.ImageName).To(Equal(configuration.Current.PostgresImageName)) + }) + + It("shouldn't set the image name if already present", func() { + cluster := Cluster{ + Spec: ClusterSpec{ + ImageName: "test:13", + }, + } + cluster.Default() + Expect(cluster.Spec.ImageName).To(Equal("test:13")) + }) + + It("should setup the application database name", func() { + cluster := Cluster{} + cluster.Default() + Expect(cluster.Spec.Bootstrap.InitDB.Database).To(Equal("app")) + Expect(cluster.Spec.Bootstrap.InitDB.Owner).To(Equal("app")) + }) + + It("should set the owner name as the database name", func() { + cluster := Cluster{ + Spec: ClusterSpec{ + Bootstrap: &BootstrapConfiguration{ + InitDB: &BootstrapInitDB{ + Database: "test", + }, + }, + }, + } + cluster.Default() + Expect(cluster.Spec.Bootstrap.InitDB.Database).To(Equal("test")) + Expect(cluster.Spec.Bootstrap.InitDB.Owner).To(Equal("test")) + }) + + It("should not overwrite application database and owner settings", func() { + cluster := Cluster{ + Spec: ClusterSpec{ + Bootstrap: &BootstrapConfiguration{ + InitDB: &BootstrapInitDB{ + Database: "testdb", + Owner: "testuser", + }, + }, + }, + } + cluster.Default() + Expect(cluster.Spec.Bootstrap.InitDB.Database).To(Equal("testdb")) + Expect(cluster.Spec.Bootstrap.InitDB.Owner).To(Equal("testuser")) + }) +}) + +var _ = Describe("Default monitoring queries", func() { + It("correctly set the default monitoring queries configmap and secret when none is already specified", func() { + cluster := &Cluster{} + cluster.defaultMonitoringQueries(&configuration.Data{ + MonitoringQueriesSecret: "test-secret", + MonitoringQueriesConfigmap: "test-configmap", + }) + Expect(cluster.Spec.Monitoring).NotTo(BeNil()) + Expect(cluster.Spec.Monitoring.CustomQueriesConfigMap).NotTo(BeEmpty()) + Expect(cluster.Spec.Monitoring.CustomQueriesConfigMap). + To(ContainElement(ConfigMapKeySelector{ + LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringConfigMapName}, + Key: DefaultMonitoringKey, + })) + Expect(cluster.Spec.Monitoring.CustomQueriesSecret).NotTo(BeEmpty()) + Expect(cluster.Spec.Monitoring.CustomQueriesSecret). + To(ContainElement(SecretKeySelector{ + LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringSecretName}, + Key: DefaultMonitoringKey, + })) + }) + testCluster := &Cluster{Spec: ClusterSpec{Monitoring: &MonitoringConfiguration{ + CustomQueriesConfigMap: []ConfigMapKeySelector{ + { + LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringConfigMapName}, + Key: "test2", + }, + }, + CustomQueriesSecret: []SecretKeySelector{ + { + LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringConfigMapName}, + Key: "test3", + }, + }, + }}} + It("correctly set the default monitoring queries configmap when other metrics are already specified", func() { + modifiedCluster := testCluster.DeepCopy() + modifiedCluster.defaultMonitoringQueries(&configuration.Data{ + MonitoringQueriesConfigmap: "test-configmap", + }) + + Expect(modifiedCluster.Spec.Monitoring).NotTo(BeNil()) + Expect(modifiedCluster.Spec.Monitoring.CustomQueriesConfigMap).NotTo(BeEmpty()) + Expect(modifiedCluster.Spec.Monitoring.CustomQueriesSecret).NotTo(BeEmpty()) + Expect(modifiedCluster.Spec.Monitoring.CustomQueriesConfigMap). + To(ContainElement(ConfigMapKeySelector{ + LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringConfigMapName}, + Key: "test2", + })) + + Expect(modifiedCluster.Spec.Monitoring.CustomQueriesSecret). + To(BeEquivalentTo(testCluster.Spec.Monitoring.CustomQueriesSecret)) + Expect(modifiedCluster.Spec.Monitoring.CustomQueriesConfigMap). + To(ContainElements(testCluster.Spec.Monitoring.CustomQueriesConfigMap)) + }) + It("correctly set the default monitoring queries secret when other metrics are already specified", func() { + modifiedCluster := testCluster.DeepCopy() + modifiedCluster.defaultMonitoringQueries(&configuration.Data{ + MonitoringQueriesSecret: "test-secret", + }) + + Expect(modifiedCluster.Spec.Monitoring).NotTo(BeNil()) + Expect(modifiedCluster.Spec.Monitoring.CustomQueriesSecret).NotTo(BeEmpty()) + Expect(modifiedCluster.Spec.Monitoring.CustomQueriesConfigMap).NotTo(BeEmpty()) + Expect(modifiedCluster.Spec.Monitoring.CustomQueriesSecret). + To(ContainElement(SecretKeySelector{ + LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringSecretName}, + Key: "test3", + })) + + Expect(modifiedCluster.Spec.Monitoring.CustomQueriesConfigMap). + To(BeEquivalentTo(testCluster.Spec.Monitoring.CustomQueriesConfigMap)) + Expect(modifiedCluster.Spec.Monitoring.CustomQueriesSecret). + To(ContainElements(testCluster.Spec.Monitoring.CustomQueriesSecret)) + }) +}) + +var _ = Describe("setDefaultPlugins", func() { + It("adds pre-defined plugins if not already present", func() { + cluster := &Cluster{ + Spec: ClusterSpec{ + Plugins: []PluginConfiguration{ + {Name: "existing-plugin", Enabled: ptr.To(true)}, + }, + }, + } + config := &configuration.Data{ + IncludePlugins: "predefined-plugin1,predefined-plugin2", + } + + cluster.setDefaultPlugins(config) + + Expect(cluster.Spec.Plugins).To( + ContainElement(PluginConfiguration{Name: "existing-plugin", Enabled: ptr.To(true)})) + Expect(cluster.Spec.Plugins).To( + ContainElement(PluginConfiguration{Name: "predefined-plugin1", Enabled: ptr.To(true)})) + Expect(cluster.Spec.Plugins).To( + ContainElement(PluginConfiguration{Name: "predefined-plugin2", Enabled: ptr.To(true)})) + }) + + It("does not add pre-defined plugins if already present", func() { + cluster := &Cluster{ + Spec: ClusterSpec{ + Plugins: []PluginConfiguration{ + {Name: "predefined-plugin1", Enabled: ptr.To(false)}, + }, + }, + } + config := &configuration.Data{ + IncludePlugins: "predefined-plugin1,predefined-plugin2", + } + + cluster.setDefaultPlugins(config) + + Expect(cluster.Spec.Plugins).To(HaveLen(2)) + Expect(cluster.Spec.Plugins).To( + ContainElement(PluginConfiguration{Name: "predefined-plugin1", Enabled: ptr.To(false)})) + Expect(cluster.Spec.Plugins).To( + ContainElement(PluginConfiguration{Name: "predefined-plugin2", Enabled: ptr.To(true)})) + }) + + It("handles empty plugin list gracefully", func() { + cluster := &Cluster{} + config := &configuration.Data{ + IncludePlugins: "predefined-plugin1", + } + + cluster.setDefaultPlugins(config) + + Expect(cluster.Spec.Plugins).To(HaveLen(1)) + Expect(cluster.Spec.Plugins).To( + ContainElement(PluginConfiguration{Name: "predefined-plugin1", Enabled: ptr.To(true)})) + }) +}) diff --git a/api/v1/cluster_funcs.go b/api/v1/cluster_funcs.go index a116aee695..132656c4db 100644 --- a/api/v1/cluster_funcs.go +++ b/api/v1/cluster_funcs.go @@ -182,6 +182,11 @@ func (r *SynchronizeReplicasConfiguration) GetEnabled() bool { return true } +// ValidateRegex returns all the errors that happened during the regex compilation +func (r *SynchronizeReplicasConfiguration) ValidateRegex() []error { + return r.compileRegex() +} + // IsExcludedByUser returns if a replication slot should not be reconciled on the replicas func (r *SynchronizeReplicasConfiguration) IsExcludedByUser(slotName string) (bool, error) { if r == nil { @@ -940,7 +945,7 @@ func (cluster *Cluster) ContainsTablespaces() bool { // user func (cluster Cluster) GetPostgresUID() int64 { if cluster.Spec.PostgresUID == 0 { - return defaultPostgresUID + return DefaultPostgresUID } return cluster.Spec.PostgresUID } @@ -949,7 +954,7 @@ func (cluster Cluster) GetPostgresUID() int64 { // user func (cluster Cluster) GetPostgresGID() int64 { if cluster.Spec.PostgresGID == 0 { - return defaultPostgresGID + return DefaultPostgresGID } return cluster.Spec.PostgresGID } diff --git a/api/v1/cluster_funcs_test.go b/api/v1/cluster_funcs_test.go index 34c67f363f..d0126362f5 100644 --- a/api/v1/cluster_funcs_test.go +++ b/api/v1/cluster_funcs_test.go @@ -1159,10 +1159,10 @@ var _ = Describe("SynchronizeReplicasConfiguration", func() { synchronizeReplicas = &SynchronizeReplicasConfiguration{} }) - Context("compileRegex", func() { + Context("CompileRegex", func() { It("should return no errors when SynchronizeReplicasConfiguration is nil", func() { synchronizeReplicas = nil - Expect(synchronizeReplicas.compileRegex()).To(BeEmpty()) + Expect(synchronizeReplicas.ValidateRegex()).To(BeEmpty()) }) Context("when SynchronizeReplicasConfiguration is not nil", func() { @@ -1171,7 +1171,7 @@ var _ = Describe("SynchronizeReplicasConfiguration", func() { }) It("should compile patterns without errors", func() { - Expect(synchronizeReplicas.compileRegex()).To(BeEmpty()) + Expect(synchronizeReplicas.ValidateRegex()).To(BeEmpty()) }) Context("when a pattern fails to compile", func() { @@ -1180,15 +1180,15 @@ var _ = Describe("SynchronizeReplicasConfiguration", func() { }) It("should return errors for the invalid pattern", func() { - errors := synchronizeReplicas.compileRegex() + errors := synchronizeReplicas.ValidateRegex() Expect(errors).To(HaveLen(1)) }) }) }) It("should return no errors on subsequent calls when compile is called multiple times", func() { - Expect(synchronizeReplicas.compileRegex()).To(BeEmpty()) - Expect(synchronizeReplicas.compileRegex()).To(BeEmpty()) + Expect(synchronizeReplicas.ValidateRegex()).To(BeEmpty()) + Expect(synchronizeReplicas.ValidateRegex()).To(BeEmpty()) }) }) diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go index 57dae17184..cc14768848 100644 --- a/api/v1/cluster_types.go +++ b/api/v1/cluster_types.go @@ -87,11 +87,11 @@ const ( // streaming replication purposes StreamingReplicationUser = "streaming_replica" - // defaultPostgresUID is the default UID which is used by PostgreSQL - defaultPostgresUID = 26 + // DefaultPostgresUID is the default UID which is used by PostgreSQL + DefaultPostgresUID = 26 - // defaultPostgresGID is the default GID which is used by PostgreSQL - defaultPostgresGID = 26 + // DefaultPostgresGID is the default GID which is used by PostgreSQL + DefaultPostgresGID = 26 // PodAntiAffinityTypeRequired is the label for required anti-affinity type PodAntiAffinityTypeRequired = "required" diff --git a/api/v1/pooler_webhook.go b/api/v1/pooler_webhook.go deleted file mode 100644 index b86ac2622b..0000000000 --- a/api/v1/pooler_webhook.go +++ /dev/null @@ -1,223 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "fmt" - - "github.com/cloudnative-pg/machinery/pkg/log" - "github.com/cloudnative-pg/machinery/pkg/stringset" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/validation/field" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/webhook" - "sigs.k8s.io/controller-runtime/pkg/webhook/admission" -) - -var ( - // poolerLog is for logging in this package. - poolerLog = log.WithName("pooler-resource").WithValues("version", "v1") - - // AllowedPgbouncerGenericConfigurationParameters is the list of allowed parameters for PgBouncer - AllowedPgbouncerGenericConfigurationParameters = stringset.From([]string{ - "application_name_add_host", - "autodb_idle_timeout", - "cancel_wait_timeout", - "client_idle_timeout", - "client_login_timeout", - "default_pool_size", - "disable_pqexec", - "dns_max_ttl", - "dns_nxdomain_ttl", - "idle_transaction_timeout", - "ignore_startup_parameters", - "listen_backlog", - "log_connections", - "log_disconnections", - "log_pooler_errors", - "log_stats", - "max_client_conn", - "max_db_connections", - "max_packet_size", - "max_prepared_statements", - "max_user_connections", - "min_pool_size", - "pkt_buf", - "query_timeout", - "query_wait_timeout", - "reserve_pool_size", - "reserve_pool_timeout", - "sbuf_loopcnt", - "server_check_delay", - "server_check_query", - "server_connect_timeout", - "server_fast_close", - "server_idle_timeout", - "server_lifetime", - "server_login_retry", - "server_reset_query", - "server_reset_query_always", - "server_round_robin", - "server_tls_ciphers", - "server_tls_protocols", - "stats_period", - "suspend_timeout", - "tcp_defer_accept", - "tcp_socket_buffer", - "tcp_keepalive", - "tcp_keepcnt", - "tcp_keepidle", - "tcp_keepintvl", - "tcp_user_timeout", - "track_extra_parameters", - "verbose", - }) -) - -// SetupWebhookWithManager setup the webhook inside the controller manager -func (r *Pooler) SetupWebhookWithManager(mgr ctrl.Manager) error { - return ctrl.NewWebhookManagedBy(mgr). - For(r). - Complete() -} - -// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. -// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},verbs=create;update,path=/validate-postgresql-cnpg-io-v1-pooler,mutating=false,failurePolicy=fail,groups=postgresql.cnpg.io,resources=poolers,versions=v1,name=vpooler.cnpg.io,sideEffects=None - -var _ webhook.Validator = &Pooler{} - -// ValidateCreate implements webhook.Validator so a webhook will be registered for the type -func (r *Pooler) ValidateCreate() (warns admission.Warnings, err error) { - poolerLog.Info("validate create", "name", r.Name, "namespace", r.Namespace) - - if !r.IsAutomatedIntegration() { - poolerLog.Info("Pooler not automatically configured, manual configuration required", - "name", r.Name, "namespace", r.Namespace, "cluster", r.Spec.Cluster.Name) - warns = append(warns, fmt.Sprintf("The operator won't handle the Pooler %q integration with the Cluster %q (%q). "+ - "Manually configure it as described in the docs.", r.Name, r.Spec.Cluster.Name, r.Namespace)) - } - - allErrs := r.Validate() - - if len(allErrs) == 0 { - return warns, nil - } - - return nil, apierrors.NewInvalid( - schema.GroupKind{Group: "postgresql.cnpg.io", Kind: "Pooler"}, - r.Name, allErrs) -} - -// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type -func (r *Pooler) ValidateUpdate(old runtime.Object) (warns admission.Warnings, err error) { - poolerLog.Info("validate update", "name", r.Name, "namespace", r.Namespace) - - oldPooler := old.(*Pooler) - - if oldPooler.IsAutomatedIntegration() && !r.IsAutomatedIntegration() { - poolerLog.Info("Pooler not automatically configured, manual configuration required", - "name", r.Name, "namespace", r.Namespace, "cluster", r.Spec.Cluster.Name) - warns = append(warns, fmt.Sprintf("The operator won't handle the Pooler %q integration with the Cluster %q (%q). "+ - "Manually configure it as described in the docs.", r.Name, r.Spec.Cluster.Name, r.Namespace)) - } - - allErrs := r.Validate() - if len(allErrs) == 0 { - return nil, nil - } - - return warns, apierrors.NewInvalid( - schema.GroupKind{Group: "postgresql.cnpg.io", Kind: "Pooler"}, - r.Name, allErrs) -} - -// ValidateDelete implements webhook.Validator so a webhook will be registered for the type -func (r *Pooler) ValidateDelete() (admission.Warnings, error) { - poolerLog.Info("validate delete", "name", r.Name, "namespace", r.Namespace) - return nil, nil -} - -func (r *Pooler) validatePgBouncer() field.ErrorList { - var result field.ErrorList - switch { - case r.Spec.PgBouncer == nil: - result = append(result, - field.Invalid( - field.NewPath("spec", "pgbouncer"), - "", "required pgbouncer configuration")) - case r.Spec.PgBouncer.AuthQuerySecret != nil && r.Spec.PgBouncer.AuthQuerySecret.Name != "" && - r.Spec.PgBouncer.AuthQuery == "": - result = append(result, - field.Invalid( - field.NewPath("spec", "pgbouncer", "authQuery"), - "", "must specify an auth query when providing an auth query secret")) - case (r.Spec.PgBouncer.AuthQuerySecret == nil || r.Spec.PgBouncer.AuthQuerySecret.Name == "") && - r.Spec.PgBouncer.AuthQuery != "": - result = append(result, - field.Invalid( - field.NewPath("spec", "pgbouncer", "authQuerySecret", "name"), - "", "must specify an existing auth query secret when providing an auth query secret")) - } - - if r.Spec.PgBouncer != nil && len(r.Spec.PgBouncer.Parameters) > 0 { - result = append(result, r.validatePgbouncerGenericParameters()...) - } - - return result -} - -func (r *Pooler) validateCluster() field.ErrorList { - var result field.ErrorList - if r.Spec.Cluster.Name == "" { - result = append(result, - field.Invalid( - field.NewPath("spec", "cluster", "name"), - "", "must specify a cluster name")) - } - if r.Spec.Cluster.Name == r.Name { - result = append(result, - field.Invalid( - field.NewPath("metadata", "name"), - r.Name, "the pooler resource cannot have the same name of a cluster")) - } - return result -} - -// Validate validates the configuration of a Pooler, returning -// a list of errors -func (r *Pooler) Validate() (allErrs field.ErrorList) { - allErrs = append(allErrs, r.validatePgBouncer()...) - allErrs = append(allErrs, r.validateCluster()...) - return allErrs -} - -// validatePgbouncerGenericParameters validates pgbouncer parameters -func (r *Pooler) validatePgbouncerGenericParameters() field.ErrorList { - var result field.ErrorList - - for param := range r.Spec.PgBouncer.Parameters { - if !AllowedPgbouncerGenericConfigurationParameters.Has(param) { - result = append(result, - field.Invalid( - field.NewPath("spec", "cluster", "parameters"), - param, "Invalid or reserved parameter")) - } - } - return result -} diff --git a/api/v1/scheduledbackup_funcs_test.go b/api/v1/scheduledbackup_funcs_test.go index d4da0915ea..150192d8d4 100644 --- a/api/v1/scheduledbackup_funcs_test.go +++ b/api/v1/scheduledbackup_funcs_test.go @@ -17,8 +17,6 @@ limitations under the License. package v1 import ( - "k8s.io/utils/ptr" - "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" . "github.com/onsi/ginkgo/v2" @@ -68,32 +66,4 @@ var _ = Describe("Scheduled backup", func() { Expect(backup.ObjectMeta.Name).To(BeEquivalentTo(backupName)) Expect(backup.Spec.Target).To(BeEquivalentTo(BackupTargetPrimary)) }) - - It("complains if online is set on a barman backup", func() { - scheduledBackup := &ScheduledBackup{ - Spec: ScheduledBackupSpec{ - Method: BackupMethodBarmanObjectStore, - Online: ptr.To(true), - Schedule: "* * * * * *", - }, - } - warnings, result := scheduledBackup.validate() - Expect(warnings).To(BeEmpty()) - Expect(result).To(HaveLen(1)) - Expect(result[0].Field).To(Equal("spec.online")) - }) - - It("complains if onlineConfiguration is set on a barman backup", func() { - scheduledBackup := &ScheduledBackup{ - Spec: ScheduledBackupSpec{ - Method: BackupMethodBarmanObjectStore, - OnlineConfiguration: &OnlineConfiguration{}, - Schedule: "* * * * * *", - }, - } - warnings, result := scheduledBackup.validate() - Expect(warnings).To(BeEmpty()) - Expect(result).To(HaveLen(1)) - Expect(result[0].Field).To(Equal("spec.onlineConfiguration")) - }) }) diff --git a/api/v1/scheduledbackup_webhook.go b/api/v1/scheduledbackup_webhook.go deleted file mode 100644 index 30be039614..0000000000 --- a/api/v1/scheduledbackup_webhook.go +++ /dev/null @@ -1,130 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "strings" - - "github.com/cloudnative-pg/machinery/pkg/log" - "github.com/robfig/cron" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/validation/field" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/webhook" - "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" -) - -// scheduledBackupLog is for logging in this package. -var scheduledBackupLog = log.WithName("scheduledbackup-resource").WithValues("version", "v1") - -// SetupWebhookWithManager setup the webhook inside the controller manager -func (r *ScheduledBackup) SetupWebhookWithManager(mgr ctrl.Manager) error { - return ctrl.NewWebhookManagedBy(mgr). - For(r). - Complete() -} - -// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},path=/mutate-postgresql-cnpg-io-v1-scheduledbackup,mutating=true,failurePolicy=fail,groups=postgresql.cnpg.io,resources=scheduledbackups,verbs=create;update,versions=v1,name=mscheduledbackup.cnpg.io,sideEffects=None - -var _ webhook.Defaulter = &ScheduledBackup{} - -// Default implements webhook.Defaulter so a webhook will be registered for the type -func (r *ScheduledBackup) Default() { - scheduledBackupLog.Info("default", "name", r.Name, "namespace", r.Namespace) -} - -// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. -// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},verbs=create;update,path=/validate-postgresql-cnpg-io-v1-scheduledbackup,mutating=false,failurePolicy=fail,groups=postgresql.cnpg.io,resources=scheduledbackups,versions=v1,name=vscheduledbackup.cnpg.io,sideEffects=None - -var _ webhook.Validator = &ScheduledBackup{} - -// ValidateCreate implements webhook.Validator so a webhook will be registered for the type -func (r *ScheduledBackup) ValidateCreate() (admission.Warnings, error) { - scheduledBackupLog.Info("validate create", "name", r.Name, "namespace", r.Namespace) - - warnings, allErrs := r.validate() - if len(allErrs) == 0 { - return warnings, nil - } - - return nil, apierrors.NewInvalid( - schema.GroupKind{Group: "scheduledbackup.cnpg.io", Kind: "Backup"}, - r.Name, allErrs) -} - -// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type -func (r *ScheduledBackup) ValidateUpdate(_ runtime.Object) (admission.Warnings, error) { - scheduledBackupLog.Info("validate update", "name", r.Name, "namespace", r.Namespace) - return nil, nil -} - -// ValidateDelete implements webhook.Validator so a webhook will be registered for the type -func (r *ScheduledBackup) ValidateDelete() (admission.Warnings, error) { - scheduledBackupLog.Info("validate delete", "name", r.Name, "namespace", r.Namespace) - return nil, nil -} - -func (r *ScheduledBackup) validate() (admission.Warnings, field.ErrorList) { - var result field.ErrorList - var warnings admission.Warnings - - if _, err := cron.Parse(r.GetSchedule()); err != nil { - result = append(result, - field.Invalid( - field.NewPath("spec", "schedule"), - r.Spec.Schedule, err.Error())) - } else if len(strings.Fields(r.Spec.Schedule)) != 6 { - warnings = append( - warnings, - "Schedule parameter may not have the right number of arguments "+ - "(usually six arguments are needed)", - ) - } - - if r.Spec.Method == BackupMethodVolumeSnapshot && !utils.HaveVolumeSnapshot() { - result = append(result, field.Invalid( - field.NewPath("spec", "method"), - r.Spec.Method, - "Cannot use volumeSnapshot backup method due to missing "+ - "VolumeSnapshot CRD. If you installed the CRD after having "+ - "started the operator, please restart it to enable "+ - "VolumeSnapshot support", - )) - } - - if r.Spec.Method == BackupMethodBarmanObjectStore && r.Spec.Online != nil { - result = append(result, field.Invalid( - field.NewPath("spec", "online"), - r.Spec.Online, - "Online parameter can be specified only if the method is volumeSnapshot", - )) - } - - if r.Spec.Method == BackupMethodBarmanObjectStore && r.Spec.OnlineConfiguration != nil { - result = append(result, field.Invalid( - field.NewPath("spec", "onlineConfiguration"), - r.Spec.OnlineConfiguration, - "OnlineConfiguration parameter can be specified only if the method is volumeSnapshot", - )) - } - - return warnings, result -} diff --git a/api/v1/scheduledbackup_webhook_test.go b/api/v1/scheduledbackup_webhook_test.go deleted file mode 100644 index 0ef5043a97..0000000000 --- a/api/v1/scheduledbackup_webhook_test.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -var _ = Describe("Validate schedule", func() { - It("doesn't complain if there's a schedule", func() { - schedule := &ScheduledBackup{ - Spec: ScheduledBackupSpec{ - Schedule: "0 0 0 * * *", - }, - } - - warnings, result := schedule.validate() - Expect(warnings).To(BeEmpty()) - Expect(result).To(BeEmpty()) - }) - - It("warn the user if the schedule has a wrong number of arguments", func() { - schedule := &ScheduledBackup{ - Spec: ScheduledBackupSpec{ - Schedule: "1 2 3 4 5", - }, - } - - warnings, result := schedule.validate() - Expect(warnings).To(HaveLen(1)) - Expect(result).To(BeEmpty()) - }) - - It("complain with a wrong time", func() { - schedule := &ScheduledBackup{ - Spec: ScheduledBackupSpec{ - Schedule: "0 0 0 * * * 1996", - }, - } - - warnings, result := schedule.validate() - Expect(warnings).To(BeEmpty()) - Expect(result).To(HaveLen(1)) - }) - - It("doesn't complain if VolumeSnapshot CRD is present", func() { - schedule := &ScheduledBackup{ - Spec: ScheduledBackupSpec{ - Schedule: "0 0 0 * * *", - Method: BackupMethodVolumeSnapshot, - }, - } - utils.SetVolumeSnapshot(true) - - warnings, result := schedule.validate() - Expect(warnings).To(BeEmpty()) - Expect(result).To(BeEmpty()) - }) - - It("complains if VolumeSnapshot CRD is not present", func() { - schedule := &ScheduledBackup{ - Spec: ScheduledBackupSpec{ - Schedule: "0 0 0 * * *", - Method: BackupMethodVolumeSnapshot, - }, - } - utils.SetVolumeSnapshot(false) - warnings, result := schedule.validate() - Expect(warnings).To(BeEmpty()) - Expect(result).To(HaveLen(1)) - Expect(result[0].Field).To(Equal("spec.method")) - }) -}) diff --git a/internal/cmd/manager/controller/controller.go b/internal/cmd/manager/controller/controller.go index 84b452c908..01ccec5111 100644 --- a/internal/cmd/manager/controller/controller.go +++ b/internal/cmd/manager/controller/controller.go @@ -32,12 +32,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/metrics/server" "sigs.k8s.io/controller-runtime/pkg/webhook" - // +kubebuilder:scaffold:imports - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository" "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" "github.com/cloudnative-pg/cloudnative-pg/internal/controller" schemeBuilder "github.com/cloudnative-pg/cloudnative-pg/internal/scheme" + webhookv1 "github.com/cloudnative-pg/cloudnative-pg/internal/webhook/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/certs" "github.com/cloudnative-pg/cloudnative-pg/pkg/multicache" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" @@ -262,22 +261,22 @@ func RunController( return err } - if err = (&apiv1.Cluster{}).SetupWebhookWithManager(mgr); err != nil { + if err = webhookv1.SetupClusterWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "Cluster", "version", "v1") return err } - if err = (&apiv1.Backup{}).SetupWebhookWithManager(mgr); err != nil { + if err = webhookv1.SetupBackupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "Backup", "version", "v1") return err } - if err = (&apiv1.ScheduledBackup{}).SetupWebhookWithManager(mgr); err != nil { + if err = webhookv1.SetupScheduledBackupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "ScheduledBackup", "version", "v1") return err } - if err = (&apiv1.Pooler{}).SetupWebhookWithManager(mgr); err != nil { + if err = webhookv1.SetupPoolerWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "Pooler", "version", "v1") return err } diff --git a/internal/webhook/v1/backup_webhook.go b/internal/webhook/v1/backup_webhook.go new file mode 100644 index 0000000000..87ed87d1f6 --- /dev/null +++ b/internal/webhook/v1/backup_webhook.go @@ -0,0 +1,172 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "fmt" + + "github.com/cloudnative-pg/machinery/pkg/log" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" +) + +// backupLog is for logging in this package. +var backupLog = log.WithName("backup-resource").WithValues("version", "v1") + +// SetupBackupWebhookWithManager registers the webhook for Backup in the manager. +func SetupBackupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr).For(&apiv1.Backup{}). + WithValidator(&BackupCustomValidator{}). + WithDefaulter(&BackupCustomDefaulter{}). + Complete() +} + +// NOTE: The 'path' attribute must follow a specific pattern and should not be modified directly here. +// Modifying the path for an invalid path can cause API server errors; failing to locate the webhook. +// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},path=/mutate-postgresql-cnpg-io-v1-backup,mutating=true,failurePolicy=fail,groups=postgresql.cnpg.io,resources=backups,verbs=create;update,versions=v1,name=mbackup.cnpg.io,sideEffects=None + +// BackupCustomDefaulter struct is responsible for setting default values on the custom resource of the +// Kind Backup when those are created or updated. +type BackupCustomDefaulter struct{} + +var _ webhook.CustomDefaulter = &BackupCustomDefaulter{} + +// Default implements webhook.CustomDefaulter so a webhook will be registered for the Kind Backup. +func (d *BackupCustomDefaulter) Default(_ context.Context, obj runtime.Object) error { + backup, ok := obj.(*apiv1.Backup) + if !ok { + return fmt.Errorf("expected an Backup object but got %T", obj) + } + backupLog.Info("Defaulting for Backup", "name", backup.GetName(), "namespace", backup.GetNamespace()) + + // TODO(user): fill in your defaulting logic. + + return nil +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. +// NOTE: The 'path' attribute must follow a specific pattern and should not be modified directly here. +// Modifying the path for an invalid path can cause API server errors; failing to locate the webhook. +// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},verbs=create;update,path=/validate-postgresql-cnpg-io-v1-backup,mutating=false,failurePolicy=fail,groups=postgresql.cnpg.io,resources=backups,versions=v1,name=vbackup.cnpg.io,sideEffects=None + +// BackupCustomValidator struct is responsible for validating the Backup resource +// when it is created, updated, or deleted. +type BackupCustomValidator struct{} + +var _ webhook.CustomValidator = &BackupCustomValidator{} + +// ValidateCreate implements webhook.CustomValidator so a webhook will be registered for the type Backup. +func (v *BackupCustomValidator) ValidateCreate(_ context.Context, obj runtime.Object) (admission.Warnings, error) { + backup, ok := obj.(*apiv1.Backup) + if !ok { + return nil, fmt.Errorf("expected a Backup object but got %T", obj) + } + backupLog.Info("Validation for Backup upon creation", "name", backup.GetName(), "namespace", backup.GetNamespace()) + + allErrs := v.validate(backup) + if len(allErrs) == 0 { + return nil, nil + } + + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "postgresql.cnpg.io", Kind: "Backup"}, + backup.Name, allErrs) +} + +// ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type Backup. +func (v *BackupCustomValidator) ValidateUpdate( + _ context.Context, + _, newObj runtime.Object, +) (admission.Warnings, error) { + backup, ok := newObj.(*apiv1.Backup) + if !ok { + return nil, fmt.Errorf("expected a Backup object for the newObj but got %T", newObj) + } + backupLog.Info("Validation for Backup upon update", "name", backup.GetName(), "namespace", backup.GetNamespace()) + + allErrs := v.validate(backup) + if len(allErrs) == 0 { + return nil, nil + } + + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "backup.cnpg.io", Kind: "Backup"}, + backup.Name, allErrs) +} + +// ValidateDelete implements webhook.CustomValidator so a webhook will be registered for the type Backup. +func (v *BackupCustomValidator) ValidateDelete(_ context.Context, obj runtime.Object) (admission.Warnings, error) { + backup, ok := obj.(*apiv1.Backup) + if !ok { + return nil, fmt.Errorf("expected a Backup object but got %T", obj) + } + backupLog.Info("Validation for Backup upon deletion", "name", backup.GetName(), "namespace", backup.GetNamespace()) + + // TODO(user): fill in your validation logic upon object deletion. + + return nil, nil +} + +func (v *BackupCustomValidator) validate(r *apiv1.Backup) field.ErrorList { + var result field.ErrorList + + if r.Spec.Method == apiv1.BackupMethodVolumeSnapshot && !utils.HaveVolumeSnapshot() { + result = append(result, field.Invalid( + field.NewPath("spec", "method"), + r.Spec.Method, + "Cannot use volumeSnapshot backup method due to missing "+ + "VolumeSnapshot CRD. If you installed the CRD after having "+ + "started the operator, please restart it to enable "+ + "VolumeSnapshot support", + )) + } + + if r.Spec.Method == apiv1.BackupMethodBarmanObjectStore && r.Spec.Online != nil { + result = append(result, field.Invalid( + field.NewPath("spec", "online"), + r.Spec.Online, + "Online parameter can be specified only if the backup method is volumeSnapshot", + )) + } + + if r.Spec.Method == apiv1.BackupMethodBarmanObjectStore && r.Spec.OnlineConfiguration != nil { + result = append(result, field.Invalid( + field.NewPath("spec", "onlineConfiguration"), + r.Spec.OnlineConfiguration, + "OnlineConfiguration parameter can be specified only if the backup method is volumeSnapshot", + )) + } + + if r.Spec.Method == apiv1.BackupMethodPlugin && r.Spec.PluginConfiguration.IsEmpty() { + result = append(result, field.Invalid( + field.NewPath("spec", "pluginConfiguration"), + r.Spec.OnlineConfiguration, + "cannot be empty when the backup method is plugin", + )) + } + + return result +} diff --git a/api/v1/backup_webhook_test.go b/internal/webhook/v1/backup_webhook_test.go similarity index 66% rename from api/v1/backup_webhook_test.go rename to internal/webhook/v1/backup_webhook_test.go index d7de85abe3..2ac2fbf883 100644 --- a/api/v1/backup_webhook_test.go +++ b/internal/webhook/v1/backup_webhook_test.go @@ -19,6 +19,7 @@ package v1 import ( "k8s.io/utils/ptr" + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" . "github.com/onsi/ginkgo/v2" @@ -26,49 +27,54 @@ import ( ) var _ = Describe("Backup webhook validate", func() { + var v *BackupCustomValidator + BeforeEach(func() { + v = &BackupCustomValidator{} + }) + It("doesn't complain if VolumeSnapshot CRD is present", func() { - backup := &Backup{ - Spec: BackupSpec{ - Method: BackupMethodVolumeSnapshot, + backup := &apiv1.Backup{ + Spec: apiv1.BackupSpec{ + Method: apiv1.BackupMethodVolumeSnapshot, }, } utils.SetVolumeSnapshot(true) - result := backup.validate() + result := v.validate(backup) Expect(result).To(BeEmpty()) }) It("complains if VolumeSnapshot CRD is not present", func() { - backup := &Backup{ - Spec: BackupSpec{ - Method: BackupMethodVolumeSnapshot, + backup := &apiv1.Backup{ + Spec: apiv1.BackupSpec{ + Method: apiv1.BackupMethodVolumeSnapshot, }, } utils.SetVolumeSnapshot(false) - result := backup.validate() + result := v.validate(backup) Expect(result).To(HaveLen(1)) Expect(result[0].Field).To(Equal("spec.method")) }) It("complains if online is set on a barman backup", func() { - backup := &Backup{ - Spec: BackupSpec{ - Method: BackupMethodBarmanObjectStore, + backup := &apiv1.Backup{ + Spec: apiv1.BackupSpec{ + Method: apiv1.BackupMethodBarmanObjectStore, Online: ptr.To(true), }, } - result := backup.validate() + result := v.validate(backup) Expect(result).To(HaveLen(1)) Expect(result[0].Field).To(Equal("spec.online")) }) It("complains if onlineConfiguration is set on a barman backup", func() { - backup := &Backup{ - Spec: BackupSpec{ - Method: BackupMethodBarmanObjectStore, - OnlineConfiguration: &OnlineConfiguration{}, + backup := &apiv1.Backup{ + Spec: apiv1.BackupSpec{ + Method: apiv1.BackupMethodBarmanObjectStore, + OnlineConfiguration: &apiv1.OnlineConfiguration{}, }, } - result := backup.validate() + result := v.validate(backup) Expect(result).To(HaveLen(1)) Expect(result[0].Field).To(Equal("spec.onlineConfiguration")) }) diff --git a/api/v1/cluster_webhook.go b/internal/webhook/v1/cluster_webhook.go similarity index 77% rename from api/v1/cluster_webhook.go rename to internal/webhook/v1/cluster_webhook.go index eec7a01f68..fcba1b0621 100644 --- a/api/v1/cluster_webhook.go +++ b/internal/webhook/v1/cluster_webhook.go @@ -17,6 +17,7 @@ limitations under the License. package v1 import ( + "context" "encoding/json" "fmt" "slices" @@ -30,7 +31,7 @@ import ( "github.com/cloudnative-pg/machinery/pkg/stringset" "github.com/cloudnative-pg/machinery/pkg/types" storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/apis/meta/v1/validation" @@ -43,397 +44,207 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) -const ( - // DefaultMonitoringKey is the key that should be used in the default metrics configmap to store the queries - DefaultMonitoringKey = "queries" - // DefaultMonitoringConfigMapName is the name of the target configmap with the default monitoring queries, - // if configured - DefaultMonitoringConfigMapName = "cnpg-default-monitoring" - // DefaultMonitoringSecretName is the name of the target secret with the default monitoring queries, - // if configured - DefaultMonitoringSecretName = DefaultMonitoringConfigMapName - // DefaultApplicationDatabaseName is the name of application database if not specified - DefaultApplicationDatabaseName = "app" - // DefaultApplicationUserName is the name of application database owner if not specified - DefaultApplicationUserName = DefaultApplicationDatabaseName -) - const sharedBuffersParameter = "shared_buffers" // clusterLog is for logging in this package. var clusterLog = log.WithName("cluster-resource").WithValues("version", "v1") -// SetupWebhookWithManager setup the webhook inside the controller manager -func (r *Cluster) SetupWebhookWithManager(mgr ctrl.Manager) error { - return ctrl.NewWebhookManagedBy(mgr). - For(r). +// SetupClusterWebhookWithManager registers the webhook for Cluster in the manager. +func SetupClusterWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr).For(&apiv1.Cluster{}). + WithValidator(&ClusterCustomValidator{}). + WithDefaulter(&ClusterCustomDefaulter{}). Complete() } +// NOTE: The 'path' attribute must follow a specific pattern and should not be modified directly here. +// Modifying the path for an invalid path can cause API server errors; failing to locate the webhook. // +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},path=/mutate-postgresql-cnpg-io-v1-cluster,mutating=true,failurePolicy=fail,groups=postgresql.cnpg.io,resources=clusters,verbs=create;update,versions=v1,name=mcluster.cnpg.io,sideEffects=None -var _ webhook.Defaulter = &Cluster{} - -// Default implements webhook.Defaulter so a webhook will be registered for the type -func (r *Cluster) Default() { - clusterLog.Info("default", "name", r.Name, "namespace", r.Namespace) - - r.setDefaults(true) -} - -// SetDefaults apply the defaults to undefined values in a Cluster -func (r *Cluster) SetDefaults() { - r.setDefaults(false) -} - -func (r *Cluster) setDefaults(preserveUserSettings bool) { - // Defaulting the image name if not specified - if r.Spec.ImageName == "" && r.Spec.ImageCatalogRef == nil { - r.Spec.ImageName = configuration.Current.PostgresImageName - } +// ClusterCustomDefaulter struct is responsible for setting default values on the custom resource of the +// Kind Cluster when those are created or updated. +type ClusterCustomDefaulter struct{} - // Defaulting the bootstrap method if not specified - if r.Spec.Bootstrap == nil { - r.Spec.Bootstrap = &BootstrapConfiguration{} - } +var _ webhook.CustomDefaulter = &ClusterCustomDefaulter{} - // Defaulting initDB if no other bootstrap method was passed - switch { - case r.Spec.Bootstrap.Recovery != nil: - r.defaultRecovery() - case r.Spec.Bootstrap.PgBaseBackup != nil: - r.defaultPgBaseBackup() - default: - r.defaultInitDB() +// Default implements webhook.CustomDefaulter so a webhook will be registered for the Kind Cluster. +func (d *ClusterCustomDefaulter) Default(_ context.Context, obj runtime.Object) error { + cluster, ok := obj.(*apiv1.Cluster) + if !ok { + return fmt.Errorf("expected an Cluster object but got %T", obj) } + clusterLog.Info("Defaulting for Cluster", "name", cluster.GetName(), "namespace", cluster.GetNamespace()) - // Defaulting the pod anti-affinity type if podAntiAffinity - if (r.Spec.Affinity.EnablePodAntiAffinity == nil || *r.Spec.Affinity.EnablePodAntiAffinity) && - r.Spec.Affinity.PodAntiAffinityType == "" { - r.Spec.Affinity.PodAntiAffinityType = PodAntiAffinityTypePreferred - } + cluster.Default() - if r.Spec.Backup != nil && r.Spec.Backup.Target == "" { - r.Spec.Backup.Target = DefaultBackupTarget - } - - psqlVersion, err := r.GetPostgresqlVersion() - if err == nil { - // The validation error will be already raised by the - // validateImageName function - info := postgres.ConfigurationInfo{ - Settings: postgres.CnpgConfigurationSettings, - Version: psqlVersion, - UserSettings: r.Spec.PostgresConfiguration.Parameters, - IsReplicaCluster: r.IsReplica(), - PreserveFixedSettingsFromUser: preserveUserSettings, - IsWalArchivingDisabled: utils.IsWalArchivingDisabled(&r.ObjectMeta), - IsAlterSystemEnabled: r.Spec.PostgresConfiguration.EnableAlterSystem, - } - sanitizedParameters := postgres.CreatePostgresqlConfiguration(info).GetConfigurationParameters() - r.Spec.PostgresConfiguration.Parameters = sanitizedParameters - } - - if r.Spec.LogLevel == "" { - r.Spec.LogLevel = log.InfoLevelString - } - - // we inject the defaultMonitoringQueries if the MonitoringQueriesConfigmap parameter is not empty - // and defaultQueries not disabled on cluster crd - if !r.Spec.Monitoring.AreDefaultQueriesDisabled() { - r.defaultMonitoringQueries(configuration.Current) - } + return nil +} - // If the ReplicationSlots or HighAvailability stanzas are nil, we create them and enable slots - if r.Spec.ReplicationSlots == nil { - r.Spec.ReplicationSlots = &ReplicationSlotsConfiguration{} - } - if r.Spec.ReplicationSlots.HighAvailability == nil { - r.Spec.ReplicationSlots.HighAvailability = &ReplicationSlotsHAConfiguration{ - Enabled: ptr.To(true), - SlotPrefix: "_cnpg_", - } - } - if r.Spec.ReplicationSlots.SynchronizeReplicas == nil { - r.Spec.ReplicationSlots.SynchronizeReplicas = &SynchronizeReplicasConfiguration{ - Enabled: ptr.To(true), - } - } +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. +// NOTE: The 'path' attribute must follow a specific pattern and should not be modified directly here. +// Modifying the path for an invalid path can cause API server errors; failing to locate the webhook. +// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},verbs=create;update,path=/validate-postgresql-cnpg-io-v1-cluster,mutating=false,failurePolicy=fail,groups=postgresql.cnpg.io,resources=clusters,versions=v1,name=vcluster.cnpg.io,sideEffects=None - if len(r.Spec.Tablespaces) > 0 { - r.defaultTablespaces() - } +// ClusterCustomValidator struct is responsible for validating the Cluster resource +// when it is created, updated, or deleted. +type ClusterCustomValidator struct{} - r.setDefaultPlugins(configuration.Current) -} +var _ webhook.CustomValidator = &ClusterCustomValidator{} -func (r *Cluster) setDefaultPlugins(config *configuration.Data) { - // Add the list of pre-defined plugins - foundPlugins := stringset.New() - for _, plugin := range r.Spec.Plugins { - foundPlugins.Put(plugin.Name) +// ValidateCreate implements webhook.CustomValidator so a webhook will be registered for the type Cluster. +func (v *ClusterCustomValidator) ValidateCreate(_ context.Context, obj runtime.Object) (admission.Warnings, error) { + cluster, ok := obj.(*apiv1.Cluster) + if !ok { + return nil, fmt.Errorf("expected a Cluster object but got %T", obj) } + clusterLog.Info("Validation for Cluster upon creation", "name", cluster.GetName(), "namespace", cluster.GetNamespace()) - for _, pluginName := range config.GetIncludePlugins() { - if !foundPlugins.Has(pluginName) { - r.Spec.Plugins = append(r.Spec.Plugins, PluginConfiguration{ - Name: pluginName, - Enabled: ptr.To(true), - }) - } - } -} + allErrs := v.validate(cluster) + allWarnings := v.getAdmissionWarnings(cluster) -// defaultTablespaces adds the tablespace owner where the -// user didn't specify it -func (r *Cluster) defaultTablespaces() { - defaultOwner := r.GetApplicationDatabaseOwner() - if len(defaultOwner) == 0 { - defaultOwner = "postgres" + if len(allErrs) == 0 { + return allWarnings, nil } - for name, tablespaceConfiguration := range r.Spec.Tablespaces { - if len(tablespaceConfiguration.Owner.Name) == 0 { - tablespaceConfiguration.Owner.Name = defaultOwner - } - r.Spec.Tablespaces[name] = tablespaceConfiguration - } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "postgresql.cnpg.io", Kind: "Cluster"}, + cluster.Name, allErrs) } -// defaultMonitoringQueries adds the default monitoring queries configMap -// if not already present in CustomQueriesConfigMap -func (r *Cluster) defaultMonitoringQueries(config *configuration.Data) { - if r.Spec.Monitoring == nil { - r.Spec.Monitoring = &MonitoringConfiguration{} +// ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type Cluster. +func (v *ClusterCustomValidator) ValidateUpdate( + _ context.Context, + oldObj, newObj runtime.Object, +) (admission.Warnings, error) { + cluster, ok := newObj.(*apiv1.Cluster) + if !ok { + return nil, fmt.Errorf("expected a Cluster object for the newObj but got %T", newObj) } - if config.MonitoringQueriesConfigmap != "" { - var defaultConfigMapQueriesAlreadyPresent bool - // We check if the default queries are already inserted in the monitoring configuration - for _, monitoringConfigMap := range r.Spec.Monitoring.CustomQueriesConfigMap { - if monitoringConfigMap.Name == DefaultMonitoringConfigMapName { - defaultConfigMapQueriesAlreadyPresent = true - break - } - } - - // If the default queries are already present there is no need to re-add them. - // Please note that in this case that the default configMap could overwrite user existing queries - // depending on the order. This is an accepted behavior because the user willingly defined the order of his array - if !defaultConfigMapQueriesAlreadyPresent { - r.Spec.Monitoring.CustomQueriesConfigMap = append([]ConfigMapKeySelector{ - { - LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringConfigMapName}, - Key: DefaultMonitoringKey, - }, - }, r.Spec.Monitoring.CustomQueriesConfigMap...) - } + oldCluster, ok := oldObj.(*apiv1.Cluster) + if !ok { + return nil, fmt.Errorf("expected a Cluster object for the oldObj but got %T", oldObj) } - if config.MonitoringQueriesSecret != "" { - var defaultSecretQueriesAlreadyPresent bool - // we check if the default queries are already inserted in the monitoring configuration - for _, monitoringSecret := range r.Spec.Monitoring.CustomQueriesSecret { - if monitoringSecret.Name == DefaultMonitoringSecretName { - defaultSecretQueriesAlreadyPresent = true - break - } - } + clusterLog.Info("Validation for Cluster upon update", "name", cluster.GetName(), "namespace", cluster.GetNamespace()) - if !defaultSecretQueriesAlreadyPresent { - r.Spec.Monitoring.CustomQueriesSecret = append([]SecretKeySelector{ - { - LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringSecretName}, - Key: DefaultMonitoringKey, - }, - }, r.Spec.Monitoring.CustomQueriesSecret...) - } - } -} + // applying defaults before validating updates to set any new default + oldCluster.SetDefaults() -// defaultInitDB enriches the initDB with defaults if not all the required arguments were passed -func (r *Cluster) defaultInitDB() { - if r.Spec.Bootstrap.InitDB == nil { - r.Spec.Bootstrap.InitDB = &BootstrapInitDB{ - Database: DefaultApplicationDatabaseName, - Owner: DefaultApplicationUserName, - } - } + allErrs := append( + v.validate(cluster), + v.validateClusterChanges(cluster, oldCluster)..., + ) - if r.Spec.Bootstrap.InitDB.Database == "" { - r.Spec.Bootstrap.InitDB.Database = DefaultApplicationDatabaseName - } - if r.Spec.Bootstrap.InitDB.Owner == "" { - r.Spec.Bootstrap.InitDB.Owner = r.Spec.Bootstrap.InitDB.Database - } - if r.Spec.Bootstrap.InitDB.Encoding == "" { - r.Spec.Bootstrap.InitDB.Encoding = "UTF8" - } - if r.Spec.Bootstrap.InitDB.LocaleCollate == "" { - r.Spec.Bootstrap.InitDB.LocaleCollate = "C" - } - if r.Spec.Bootstrap.InitDB.LocaleCType == "" { - r.Spec.Bootstrap.InitDB.LocaleCType = "C" + if len(allErrs) == 0 { + return v.getAdmissionWarnings(cluster), nil } -} -// defaultRecovery enriches the recovery with defaults if not all the required arguments were passed -func (r *Cluster) defaultRecovery() { - if r.Spec.Bootstrap.Recovery.Database == "" { - r.Spec.Bootstrap.Recovery.Database = DefaultApplicationDatabaseName - } - if r.Spec.Bootstrap.Recovery.Owner == "" { - r.Spec.Bootstrap.Recovery.Owner = r.Spec.Bootstrap.Recovery.Database - } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "cluster.cnpg.io", Kind: "Cluster"}, + cluster.Name, allErrs) } -// defaultPgBaseBackup enriches the pg_basebackup with defaults if not all the required arguments were passed -func (r *Cluster) defaultPgBaseBackup() { - if r.Spec.Bootstrap.PgBaseBackup.Database == "" { - r.Spec.Bootstrap.PgBaseBackup.Database = DefaultApplicationDatabaseName +// ValidateDelete implements webhook.CustomValidator so a webhook will be registered for the type Cluster. +func (v *ClusterCustomValidator) ValidateDelete(_ context.Context, obj runtime.Object) (admission.Warnings, error) { + cluster, ok := obj.(*apiv1.Cluster) + if !ok { + return nil, fmt.Errorf("expected a Cluster object but got %T", obj) } - if r.Spec.Bootstrap.PgBaseBackup.Owner == "" { - r.Spec.Bootstrap.PgBaseBackup.Owner = r.Spec.Bootstrap.PgBaseBackup.Database - } -} + clusterLog.Info("Validation for Cluster upon deletion", "name", cluster.GetName(), "namespace", cluster.GetNamespace()) -// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. -// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},verbs=create;update,path=/validate-postgresql-cnpg-io-v1-cluster,mutating=false,failurePolicy=fail,groups=postgresql.cnpg.io,resources=clusters,versions=v1,name=vcluster.cnpg.io,sideEffects=None - -var _ webhook.Validator = &Cluster{} - -// ValidateCreate implements webhook.Validator so a webhook will be registered for the type -func (r *Cluster) ValidateCreate() (admission.Warnings, error) { - clusterLog.Info("validate create", "name", r.Name, "namespace", r.Namespace) - allErrs := r.Validate() - allWarnings := r.getAdmissionWarnings() - - if len(allErrs) == 0 { - return allWarnings, nil - } + // TODO(user): fill in your validation logic upon object deletion. - return nil, apierrors.NewInvalid( - schema.GroupKind{Group: "postgresql.cnpg.io", Kind: "Cluster"}, - r.Name, allErrs) + return nil, nil } -// Validate groups the validation logic for clusters returning a list of all encountered errors -func (r *Cluster) Validate() (allErrs field.ErrorList) { - type validationFunc func() field.ErrorList +// validateCluster groups the validation logic for clusters returning a list of all encountered errors +func (v *ClusterCustomValidator) validate(r *apiv1.Cluster) (allErrs field.ErrorList) { + type validationFunc func(*apiv1.Cluster) field.ErrorList validations := []validationFunc{ - r.validateInitDB, - r.validateRecoveryApplicationDatabase, - r.validatePgBaseBackupApplicationDatabase, - r.validateImport, - r.validateSuperuserSecret, - r.validateCerts, - r.validateBootstrapMethod, - r.validateImageName, - r.validateImagePullPolicy, - r.validateRecoveryTarget, - r.validatePrimaryUpdateStrategy, - r.validateMinSyncReplicas, - r.validateMaxSyncReplicas, - r.validateStorageSize, - r.validateWalStorageSize, - r.validateEphemeralVolumeSource, - r.validateTablespaceStorageSize, - r.validateName, - r.validateTablespaceNames, - r.validateBootstrapPgBaseBackupSource, - r.validateTablespaceBackupSnapshot, - r.validateBootstrapRecoverySource, - r.validateBootstrapRecoveryDataSource, - r.validateExternalClusters, - r.validateTolerations, - r.validateAntiAffinity, - r.validateReplicaMode, - r.validateBackupConfiguration, - r.validateRetentionPolicy, - r.validateConfiguration, - r.validateSynchronousReplicaConfiguration, - r.validateLDAP, - r.validateReplicationSlots, - r.validateEnv, - r.validateManagedServices, - r.validateManagedRoles, - r.validateManagedExtensions, - r.validateResources, - r.validateHibernationAnnotation, - r.validatePromotionToken, + v.validateInitDB, + v.validateRecoveryApplicationDatabase, + v.validatePgBaseBackupApplicationDatabase, + v.validateImport, + v.validateSuperuserSecret, + v.validateCerts, + v.validateBootstrapMethod, + v.validateImageName, + v.validateImagePullPolicy, + v.validateRecoveryTarget, + v.validatePrimaryUpdateStrategy, + v.validateMinSyncReplicas, + v.validateMaxSyncReplicas, + v.validateStorageSize, + v.validateWalStorageSize, + v.validateEphemeralVolumeSource, + v.validateTablespaceStorageSize, + v.validateName, + v.validateTablespaceNames, + v.validateBootstrapPgBaseBackupSource, + v.validateTablespaceBackupSnapshot, + v.validateBootstrapRecoverySource, + v.validateBootstrapRecoveryDataSource, + v.validateExternalClusters, + v.validateTolerations, + v.validateAntiAffinity, + v.validateReplicaMode, + v.validateBackupConfiguration, + v.validateRetentionPolicy, + v.validateConfiguration, + v.validateSynchronousReplicaConfiguration, + v.validateLDAP, + v.validateReplicationSlots, + v.validateEnv, + v.validateManagedServices, + v.validateManagedRoles, + v.validateManagedExtensions, + v.validateResources, + v.validateHibernationAnnotation, + v.validatePromotionToken, } for _, validate := range validations { - allErrs = append(allErrs, validate()...) + allErrs = append(allErrs, validate(r)...) } return allErrs } -// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type -func (r *Cluster) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { - clusterLog.Info("validate update", "name", r.Name, "namespace", r.Namespace) - oldCluster := old.(*Cluster) - - // applying defaults before validating updates to set any new default - oldCluster.SetDefaults() - - allErrs := append( - r.Validate(), - r.ValidateChanges(oldCluster)..., - ) - - if len(allErrs) == 0 { - return r.getAdmissionWarnings(), nil - } - - return nil, apierrors.NewInvalid( - schema.GroupKind{Group: "cluster.cnpg.io", Kind: "Cluster"}, - r.Name, allErrs) -} - -// ValidateChanges groups the validation logic for cluster changes checking the differences between +// validateClusterChanges groups the validation logic for cluster changes checking the differences between // the previous version and the new one of the cluster, returning a list of all encountered errors -func (r *Cluster) ValidateChanges(old *Cluster) (allErrs field.ErrorList) { +func (v *ClusterCustomValidator) validateClusterChanges(r, old *apiv1.Cluster) (allErrs field.ErrorList) { if old == nil { clusterLog.Info("Received invalid old object, skipping old object validation", "old", old) return nil } - type validationFunc func(old *Cluster) field.ErrorList + type validationFunc func(*apiv1.Cluster, *apiv1.Cluster) field.ErrorList validations := []validationFunc{ - r.validateImageChange, - r.validateConfigurationChange, - r.validateStorageChange, - r.validateWalStorageChange, - r.validateTablespacesChange, - r.validateUnixPermissionIdentifierChange, - r.validateReplicationSlotsChange, - r.validateWALLevelChange, - r.validateReplicaClusterChange, + v.validateImageChange, + v.validateConfigurationChange, + v.validateStorageChange, + v.validateWalStorageChange, + v.validateTablespacesChange, + v.validateUnixPermissionIdentifierChange, + v.validateReplicationSlotsChange, + v.validateWALLevelChange, + v.validateReplicaClusterChange, } for _, validate := range validations { - allErrs = append(allErrs, validate(old)...) + allErrs = append(allErrs, validate(r, old)...) } return allErrs } -// ValidateDelete implements webhook.Validator so a webhook will be registered for the type -func (r *Cluster) ValidateDelete() (admission.Warnings, error) { - clusterLog.Info("validate delete", "name", r.Name) - - // TODO(user): fill in your validation logic upon object deletion. - return nil, nil -} - // validateLDAP validates the ldap postgres configuration -func (r *Cluster) validateLDAP() field.ErrorList { +func (v *ClusterCustomValidator) validateLDAP(r *apiv1.Cluster) field.ErrorList { // No validating if not specified if r.Spec.PostgresConfiguration.LDAP == nil { return nil @@ -460,7 +271,7 @@ func (r *Cluster) validateLDAP() field.ErrorList { } // validateEnv validate the environment variables settings proposed by the user -func (r *Cluster) validateEnv() field.ErrorList { +func (v *ClusterCustomValidator) validateEnv(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList for i := range r.Spec.Env { @@ -501,7 +312,7 @@ func isReservedEnvironmentVariable(name string) bool { // validateInitDB validate the bootstrapping options when initdb // method is used -func (r *Cluster) validateInitDB() field.ErrorList { +func (v *ClusterCustomValidator) validateInitDB(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList // If it's not configured, everything is ok @@ -516,7 +327,7 @@ func (r *Cluster) validateInitDB() field.ErrorList { // If you specify the database name, then you need also to specify the // owner user and vice-versa initDBOptions := r.Spec.Bootstrap.InitDB - result = r.validateApplicationDatabase(initDBOptions.Database, initDBOptions.Owner, + result = v.validateApplicationDatabase(initDBOptions.Database, initDBOptions.Owner, "initdb") if initDBOptions.WalSegmentSize != 0 && !utils.IsPowerOfTwo(initDBOptions.WalSegmentSize) { @@ -555,7 +366,7 @@ func (r *Cluster) validateInitDB() field.ErrorList { return result } -func (r *Cluster) validateImport() field.ErrorList { +func (v *ClusterCustomValidator) validateImport(r *apiv1.Cluster) field.ErrorList { // If it's not configured, everything is ok if r.Spec.Bootstrap == nil { return nil @@ -571,10 +382,10 @@ func (r *Cluster) validateImport() field.ErrorList { } switch importSpec.Type { - case MicroserviceSnapshotType: - return importSpec.validateMicroservice() - case MonolithSnapshotType: - return importSpec.validateMonolith() + case apiv1.MicroserviceSnapshotType: + return v.validateMicroservice(importSpec) + case apiv1.MonolithSnapshotType: + return v.validateMonolith(importSpec) default: return field.ErrorList{ field.Invalid( @@ -585,7 +396,7 @@ func (r *Cluster) validateImport() field.ErrorList { } } -func (s Import) validateMicroservice() field.ErrorList { +func (v *ClusterCustomValidator) validateMicroservice(s *apiv1.Import) field.ErrorList { var result field.ErrorList if len(s.Databases) != 1 { @@ -621,7 +432,7 @@ func (s Import) validateMicroservice() field.ErrorList { return result } -func (s Import) validateMonolith() field.ErrorList { +func (v *ClusterCustomValidator) validateMonolith(s *apiv1.Import) field.ErrorList { var result field.ErrorList if len(s.Databases) < 1 { @@ -669,7 +480,7 @@ func (s Import) validateMonolith() field.ErrorList { // validateRecovery validate the bootstrapping options when Recovery // method is used -func (r *Cluster) validateRecoveryApplicationDatabase() field.ErrorList { +func (v *ClusterCustomValidator) validateRecoveryApplicationDatabase(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList // If it's not configured, everything is ok @@ -682,13 +493,12 @@ func (r *Cluster) validateRecoveryApplicationDatabase() field.ErrorList { } recoveryOptions := r.Spec.Bootstrap.Recovery - return r.validateApplicationDatabase(recoveryOptions.Database, recoveryOptions.Owner, - "recovery") + return v.validateApplicationDatabase(recoveryOptions.Database, recoveryOptions.Owner, "recovery") } // validatePgBaseBackup validate the bootstrapping options when pg_basebackup // method is used -func (r *Cluster) validatePgBaseBackupApplicationDatabase() field.ErrorList { +func (v *ClusterCustomValidator) validatePgBaseBackupApplicationDatabase(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList // If it's not configured, everything is ok @@ -701,19 +511,19 @@ func (r *Cluster) validatePgBaseBackupApplicationDatabase() field.ErrorList { } pgBaseBackupOptions := r.Spec.Bootstrap.PgBaseBackup - return r.validateApplicationDatabase(pgBaseBackupOptions.Database, pgBaseBackupOptions.Owner, + return v.validateApplicationDatabase(pgBaseBackupOptions.Database, pgBaseBackupOptions.Owner, "pg_basebackup") } // validateApplicationDatabase validate the configuration for application database -func (r *Cluster) validateApplicationDatabase( +func (v *ClusterCustomValidator) validateApplicationDatabase( database string, owner string, command string, ) field.ErrorList { var result field.ErrorList // If you specify the database name, then you need also to specify the - // owner user and vice-versa + // owner user and vice versa if database != "" && owner == "" { result = append( result, @@ -734,7 +544,7 @@ func (r *Cluster) validateApplicationDatabase( } // validateCerts validate all the provided certs -func (r *Cluster) validateCerts() field.ErrorList { +func (v *ClusterCustomValidator) validateCerts(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList certificates := r.Spec.Certificates @@ -778,7 +588,7 @@ func (r *Cluster) validateCerts() field.ErrorList { } // ValidateSuperuserSecret validate super user secret value -func (r *Cluster) validateSuperuserSecret() field.ErrorList { +func (v *ClusterCustomValidator) validateSuperuserSecret(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList // If empty, we're ok! @@ -801,7 +611,7 @@ func (r *Cluster) validateSuperuserSecret() field.ErrorList { // validateBootstrapMethod is used to ensure we have only one // bootstrap methods active -func (r *Cluster) validateBootstrapMethod() field.ErrorList { +func (v *ClusterCustomValidator) validateBootstrapMethod(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList // If it's not configured, everything is ok @@ -834,7 +644,7 @@ func (r *Cluster) validateBootstrapMethod() field.ErrorList { // validateBootstrapPgBaseBackupSource is used to ensure that the source // server is correctly defined -func (r *Cluster) validateBootstrapPgBaseBackupSource() field.ErrorList { +func (v *ClusterCustomValidator) validateBootstrapPgBaseBackupSource(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList // This validation is only applicable for physical backup @@ -858,7 +668,7 @@ func (r *Cluster) validateBootstrapPgBaseBackupSource() field.ErrorList { // validateBootstrapRecoverySource is used to ensure that the source // server is correctly defined -func (r *Cluster) validateBootstrapRecoverySource() field.ErrorList { +func (v *ClusterCustomValidator) validateBootstrapRecoverySource(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList // This validation is only applicable for recovery based bootstrap @@ -895,7 +705,7 @@ func (r *Cluster) validateBootstrapRecoverySource() field.ErrorList { // validateBootstrapRecoveryDataSource is used to ensure that the data // source is correctly defined -func (r *Cluster) validateBootstrapRecoveryDataSource() field.ErrorList { +func (v *ClusterCustomValidator) validateBootstrapRecoveryDataSource(r *apiv1.Cluster) field.ErrorList { // This validation is only applicable for datasource-based recovery based bootstrap if r.Spec.Bootstrap == nil || r.Spec.Bootstrap.Recovery == nil || r.Spec.Bootstrap.Recovery.VolumeSnapshots == nil { return nil @@ -951,7 +761,7 @@ func (r *Cluster) validateBootstrapRecoveryDataSource() field.ErrorList { // validateVolumeSnapshotSource validates a source of a recovery snapshot. // The supported resources are VolumeSnapshots and PersistentVolumeClaim func validateVolumeSnapshotSource( - value v1.TypedLocalObjectReference, + value corev1.TypedLocalObjectReference, path *field.Path, ) field.ErrorList { apiGroup := "" @@ -961,7 +771,7 @@ func validateVolumeSnapshotSource( switch { case apiGroup == storagesnapshotv1.GroupName && value.Kind == "VolumeSnapshot": - case apiGroup == v1.GroupName && value.Kind == "PersistentVolumeClaim": + case apiGroup == corev1.GroupName && value.Kind == "PersistentVolumeClaim": default: return field.ErrorList{ field.Invalid(path, value, "Only VolumeSnapshots and PersistentVolumeClaims are supported"), @@ -973,7 +783,7 @@ func validateVolumeSnapshotSource( // validateImageName validates the image name ensuring we aren't // using the "latest" tag -func (r *Cluster) validateImageName() field.ErrorList { +func (v *ClusterCustomValidator) validateImageName(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList if r.Spec.ImageName == "" { @@ -1014,11 +824,11 @@ func (r *Cluster) validateImageName() field.ErrorList { // validateImagePullPolicy validates the image pull policy, // ensuring it is one of "Always", "Never" or "IfNotPresent" when defined -func (r *Cluster) validateImagePullPolicy() field.ErrorList { +func (v *ClusterCustomValidator) validateImagePullPolicy(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList switch r.Spec.ImagePullPolicy { - case v1.PullAlways, v1.PullNever, v1.PullIfNotPresent, "": + case corev1.PullAlways, corev1.PullNever, corev1.PullIfNotPresent, "": return result default: return append( @@ -1027,11 +837,11 @@ func (r *Cluster) validateImagePullPolicy() field.ErrorList { field.NewPath("spec", "imagePullPolicy"), r.Spec.ImagePullPolicy, fmt.Sprintf("invalid imagePullPolicy, if defined must be one of '%s', '%s' or '%s'", - v1.PullAlways, v1.PullNever, v1.PullIfNotPresent))) + corev1.PullAlways, corev1.PullNever, corev1.PullIfNotPresent))) } } -func (r *Cluster) validateResources() field.ErrorList { +func (v *ClusterCustomValidator) validateResources(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList cpuRequest := r.Spec.Resources.Requests.Cpu() @@ -1089,7 +899,7 @@ func (r *Cluster) validateResources() field.ErrorList { return result } -func (r *Cluster) validateSynchronousReplicaConfiguration() field.ErrorList { +func (v *ClusterCustomValidator) validateSynchronousReplicaConfiguration(r *apiv1.Cluster) field.ErrorList { if r.Spec.PostgresConfiguration.Synchronous == nil { return nil } @@ -1112,7 +922,7 @@ func (r *Cluster) validateSynchronousReplicaConfiguration() field.ErrorList { } // validateConfiguration determines whether a PostgreSQL configuration is valid -func (r *Cluster) validateConfiguration() field.ErrorList { +func (v *ClusterCustomValidator) validateConfiguration(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList // We cannot have both old-style synchronous replica configuration @@ -1249,7 +1059,7 @@ func (r *Cluster) validateConfiguration() field.ErrorList { // validateWalSizeConfiguration verifies that min_wal_size < max_wal_size < wal volume size func validateWalSizeConfiguration( - postgresConfig PostgresConfiguration, walVolumeSize *resource.Quantity, + postgresConfig apiv1.PostgresConfiguration, walVolumeSize *resource.Quantity, ) field.ErrorList { const ( minWalSizeKey = "min_wal_size" @@ -1365,7 +1175,7 @@ func parsePostgresQuantityValue(value string) (resource.Quantity, error) { // validateConfigurationChange determines whether a PostgreSQL configuration // change can be applied -func (r *Cluster) validateConfigurationChange(old *Cluster) field.ErrorList { +func (v *ClusterCustomValidator) validateConfigurationChange(r, old *apiv1.Cluster) field.ErrorList { var result field.ErrorList if old.Spec.ImageName != r.Spec.ImageName { @@ -1387,7 +1197,7 @@ func (r *Cluster) validateConfigurationChange(old *Cluster) field.ErrorList { return result } -func validateSyncReplicaElectionConstraint(constraints SyncReplicaElectionConstraints) *field.Error { +func validateSyncReplicaElectionConstraint(constraints apiv1.SyncReplicaElectionConstraints) *field.Error { if !constraints.Enabled { return nil } @@ -1406,7 +1216,7 @@ func validateSyncReplicaElectionConstraint(constraints SyncReplicaElectionConstr // validateImageChange validate the change from a certain image name // to a new one. -func (r *Cluster) validateImageChange(old *Cluster) field.ErrorList { +func (v *ClusterCustomValidator) validateImageChange(r, old *apiv1.Cluster) field.ErrorList { var result field.ErrorList var newVersion, oldVersion version.Data var err error @@ -1451,7 +1261,7 @@ func (r *Cluster) validateImageChange(old *Cluster) field.ErrorList { // Validate the recovery target to ensure that the mutual exclusivity // of options is respected and plus validating the format of targetTime // if specified -func (r *Cluster) validateRecoveryTarget() field.ErrorList { +func (v *ClusterCustomValidator) validateRecoveryTarget(r *apiv1.Cluster) field.ErrorList { if r.Spec.Bootstrap == nil || r.Spec.Bootstrap.Recovery == nil { return nil } @@ -1516,7 +1326,7 @@ func (r *Cluster) validateRecoveryTarget() field.ErrorList { return result } -func validateTargetExclusiveness(recoveryTarget *RecoveryTarget) field.ErrorList { +func validateTargetExclusiveness(recoveryTarget *apiv1.RecoveryTarget) field.ErrorList { targets := 0 if recoveryTarget.TargetImmediate != nil { targets++ @@ -1547,15 +1357,15 @@ func validateTargetExclusiveness(recoveryTarget *RecoveryTarget) field.ErrorList // Validate the update strategy related to the number of required // instances -func (r *Cluster) validatePrimaryUpdateStrategy() field.ErrorList { +func (v *ClusterCustomValidator) validatePrimaryUpdateStrategy(r *apiv1.Cluster) field.ErrorList { if r.Spec.PrimaryUpdateStrategy == "" { return nil } var result field.ErrorList - if r.Spec.PrimaryUpdateStrategy != PrimaryUpdateStrategySupervised && - r.Spec.PrimaryUpdateStrategy != PrimaryUpdateStrategyUnsupervised { + if r.Spec.PrimaryUpdateStrategy != apiv1.PrimaryUpdateStrategySupervised && + r.Spec.PrimaryUpdateStrategy != apiv1.PrimaryUpdateStrategyUnsupervised { result = append(result, field.Invalid( field.NewPath("spec", "primaryUpdateStrategy"), r.Spec.PrimaryUpdateStrategy, @@ -1563,7 +1373,7 @@ func (r *Cluster) validatePrimaryUpdateStrategy() field.ErrorList { return result } - if r.Spec.PrimaryUpdateStrategy == PrimaryUpdateStrategySupervised && r.Spec.Instances == 1 { + if r.Spec.PrimaryUpdateStrategy == apiv1.PrimaryUpdateStrategySupervised && r.Spec.Instances == 1 { result = append(result, field.Invalid( field.NewPath("spec", "primaryUpdateStrategy"), r.Spec.PrimaryUpdateStrategy, @@ -1576,7 +1386,7 @@ func (r *Cluster) validatePrimaryUpdateStrategy() field.ErrorList { // Validate the maximum number of synchronous instances // that should be kept in sync with the primary server -func (r *Cluster) validateMaxSyncReplicas() field.ErrorList { +func (v *ClusterCustomValidator) validateMaxSyncReplicas(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList if r.Spec.MaxSyncReplicas < 0 { @@ -1597,7 +1407,7 @@ func (r *Cluster) validateMaxSyncReplicas() field.ErrorList { } // Validate the minimum number of synchronous instances -func (r *Cluster) validateMinSyncReplicas() field.ErrorList { +func (v *ClusterCustomValidator) validateMinSyncReplicas(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList if r.Spec.MinSyncReplicas < 0 { @@ -1617,11 +1427,11 @@ func (r *Cluster) validateMinSyncReplicas() field.ErrorList { return result } -func (r *Cluster) validateStorageSize() field.ErrorList { +func (v *ClusterCustomValidator) validateStorageSize(r *apiv1.Cluster) field.ErrorList { return validateStorageConfigurationSize(*field.NewPath("spec", "storage"), r.Spec.StorageConfiguration) } -func (r *Cluster) validateWalStorageSize() field.ErrorList { +func (v *ClusterCustomValidator) validateWalStorageSize(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList if r.ShouldCreateWalArchiveVolume() { @@ -1632,7 +1442,7 @@ func (r *Cluster) validateWalStorageSize() field.ErrorList { return result } -func (r *Cluster) validateEphemeralVolumeSource() field.ErrorList { +func (v *ClusterCustomValidator) validateEphemeralVolumeSource(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList if r.Spec.EphemeralVolumeSource != nil && (r.Spec.EphemeralVolumesSizeLimit != nil && @@ -1647,7 +1457,7 @@ func (r *Cluster) validateEphemeralVolumeSource() field.ErrorList { return result } -func (r *Cluster) validateTablespaceStorageSize() field.ErrorList { +func (v *ClusterCustomValidator) validateTablespaceStorageSize(r *apiv1.Cluster) field.ErrorList { if r.Spec.Tablespaces == nil { return nil } @@ -1666,7 +1476,7 @@ func (r *Cluster) validateTablespaceStorageSize() field.ErrorList { func validateStorageConfigurationSize( structPath field.Path, - storageConfiguration StorageConfiguration, + storageConfiguration apiv1.StorageConfiguration, ) field.ErrorList { var result field.ErrorList @@ -1692,7 +1502,7 @@ func validateStorageConfigurationSize( } // Validate a change in the storage -func (r *Cluster) validateStorageChange(old *Cluster) field.ErrorList { +func (v *ClusterCustomValidator) validateStorageChange(r, old *apiv1.Cluster) field.ErrorList { return validateStorageConfigurationChange( field.NewPath("spec", "storage"), old.Spec.StorageConfiguration, @@ -1700,7 +1510,7 @@ func (r *Cluster) validateStorageChange(old *Cluster) field.ErrorList { ) } -func (r *Cluster) validateWalStorageChange(old *Cluster) field.ErrorList { +func (v *ClusterCustomValidator) validateWalStorageChange(r, old *apiv1.Cluster) field.ErrorList { if old.Spec.WalStorage == nil { return nil } @@ -1723,7 +1533,7 @@ func (r *Cluster) validateWalStorageChange(old *Cluster) field.ErrorList { // validateTablespacesChange checks that no tablespaces have been deleted, and that // no tablespaces have an invalid storage update -func (r *Cluster) validateTablespacesChange(old *Cluster) field.ErrorList { +func (v *ClusterCustomValidator) validateTablespacesChange(r, old *apiv1.Cluster) field.ErrorList { if old.Spec.Tablespaces == nil { return nil } @@ -1760,8 +1570,8 @@ func (r *Cluster) validateTablespacesChange(old *Cluster) field.ErrorList { // validateStorageConfigurationChange generates an error list by comparing two StorageConfiguration func validateStorageConfigurationChange( structPath *field.Path, - oldStorage StorageConfiguration, - newStorage StorageConfiguration, + oldStorage apiv1.StorageConfiguration, + newStorage apiv1.StorageConfiguration, ) field.ErrorList { oldSize := oldStorage.GetSizeOrNil() if oldSize == nil { @@ -1791,7 +1601,7 @@ func validateStorageConfigurationChange( // Validate the cluster name. This is important to avoid issues // while generating services, which don't support having dots in // their name -func (r *Cluster) validateName() field.ErrorList { +func (v *ClusterCustomValidator) validateName(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList if errs := validationutil.IsDNS1035Label(r.Name); len(errs) > 0 { @@ -1811,7 +1621,7 @@ func (r *Cluster) validateName() field.ErrorList { return result } -func (r *Cluster) validateTablespaceNames() field.ErrorList { +func (v *ClusterCustomValidator) validateTablespaceNames(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList if r.Spec.Tablespaces == nil { return nil @@ -1842,7 +1652,7 @@ func (r *Cluster) validateTablespaceNames() field.ErrorList { return result } -func (r *Cluster) validateTablespaceBackupSnapshot() field.ErrorList { +func (v *ClusterCustomValidator) validateTablespaceBackupSnapshot(r *apiv1.Cluster) field.ErrorList { if r.Spec.Backup == nil || r.Spec.Backup.VolumeSnapshot == nil || len(r.Spec.Backup.VolumeSnapshot.TablespaceClassName) == 0 { return nil @@ -1864,7 +1674,7 @@ func (r *Cluster) validateTablespaceBackupSnapshot() field.ErrorList { } // Check if the external clusters list contains two servers with the same name -func (r *Cluster) validateExternalClusters() field.ErrorList { +func (v *ClusterCustomValidator) validateExternalClusters(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList stringSet := stringset.New() @@ -1873,7 +1683,7 @@ func (r *Cluster) validateExternalClusters() field.ErrorList { stringSet.Put(externalCluster.Name) result = append( result, - r.validateExternalCluster(&r.Spec.ExternalClusters[idx], path)...) + v.validateExternalCluster(&r.Spec.ExternalClusters[idx], path)...) } if stringSet.Len() != len(r.Spec.ExternalClusters) { @@ -1887,7 +1697,10 @@ func (r *Cluster) validateExternalClusters() field.ErrorList { } // validateExternalCluster check the validity of a certain ExternalCluster -func (r *Cluster) validateExternalCluster(externalCluster *ExternalCluster, path *field.Path) field.ErrorList { +func (v *ClusterCustomValidator) validateExternalCluster( + externalCluster *apiv1.ExternalCluster, + path *field.Path, +) field.ErrorList { var result field.ErrorList if externalCluster.ConnectionParameters == nil && @@ -1903,7 +1716,7 @@ func (r *Cluster) validateExternalCluster(externalCluster *ExternalCluster, path return result } -func (r *Cluster) validateReplicaClusterChange(old *Cluster) field.ErrorList { +func (v *ClusterCustomValidator) validateReplicaClusterChange(r, old *apiv1.Cluster) field.ErrorList { // If the replication role didn't change then everything // is fine if r.IsReplica() == old.IsReplica() { @@ -1924,7 +1737,7 @@ func (r *Cluster) validateReplicaClusterChange(old *Cluster) field.ErrorList { return nil } -func (r *Cluster) validateUnixPermissionIdentifierChange(old *Cluster) field.ErrorList { +func (v *ClusterCustomValidator) validateUnixPermissionIdentifierChange(r, old *apiv1.Cluster) field.ErrorList { var result field.ErrorList if r.Spec.PostgresGID != old.Spec.PostgresGID { @@ -1944,7 +1757,7 @@ func (r *Cluster) validateUnixPermissionIdentifierChange(old *Cluster) field.Err return result } -func (r *Cluster) validatePromotionToken() field.ErrorList { +func (v *ClusterCustomValidator) validatePromotionToken(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList if r.Spec.ReplicaCluster == nil { @@ -2000,7 +1813,7 @@ func (r *Cluster) validatePromotionToken() field.ErrorList { // Check if the replica mode is used with an incompatible bootstrap // method -func (r *Cluster) validateReplicaMode() field.ErrorList { +func (v *ClusterCustomValidator) validateReplicaMode(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList replicaClusterConf := r.Spec.ReplicaCluster @@ -2037,12 +1850,12 @@ func (r *Cluster) validateReplicaMode() field.ErrorList { } } - result = append(result, r.validateReplicaClusterExternalClusters()...) + result = append(result, v.validateReplicaClusterExternalClusters(r)...) return result } -func (r *Cluster) validateReplicaClusterExternalClusters() field.ErrorList { +func (v *ClusterCustomValidator) validateReplicaClusterExternalClusters(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList replicaClusterConf := r.Spec.ReplicaCluster if replicaClusterConf == nil { @@ -2089,7 +1902,7 @@ func (r *Cluster) validateReplicaClusterExternalClusters() field.ErrorList { // validateTolerations check and validate the tolerations field // This code is almost a verbatim copy of // https://github.com/kubernetes/kubernetes/blob/4d38d21/pkg/apis/core/validation/validation.go#L3147 -func (r *Cluster) validateTolerations() field.ErrorList { +func (v *ClusterCustomValidator) validateTolerations(r *apiv1.Cluster) field.ErrorList { path := field.NewPath("spec", "affinity", "toleration") allErrors := field.ErrorList{} for i, toleration := range r.Spec.Affinity.Tolerations { @@ -2100,14 +1913,14 @@ func (r *Cluster) validateTolerations() field.ErrorList { } // empty toleration key with Exists operator and empty value means match all taints - if len(toleration.Key) == 0 && toleration.Operator != v1.TolerationOpExists { + if len(toleration.Key) == 0 && toleration.Operator != corev1.TolerationOpExists { allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration.Operator, "operator must be Exists when `key` is empty, which means \"match all values and all keys\"")) } - if toleration.TolerationSeconds != nil && toleration.Effect != v1.TaintEffectNoExecute { + if toleration.TolerationSeconds != nil && toleration.Effect != corev1.TaintEffectNoExecute { allErrors = append(allErrors, field.Invalid(idxPath.Child("effect"), toleration.Effect, @@ -2117,20 +1930,20 @@ func (r *Cluster) validateTolerations() field.ErrorList { // validate toleration operator and value switch toleration.Operator { // empty operator means Equal - case v1.TolerationOpEqual, "": + case corev1.TolerationOpEqual, "": if errs := validationutil.IsValidLabelValue(toleration.Value); len(errs) != 0 { allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration.Value, strings.Join(errs, ";"))) } - case v1.TolerationOpExists: + case corev1.TolerationOpExists: if len(toleration.Value) > 0 { allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration, "value must be empty when `operator` is 'Exists'")) } default: - validValues := []string{string(v1.TolerationOpEqual), string(v1.TolerationOpExists)} + validValues := []string{string(corev1.TolerationOpEqual), string(corev1.TolerationOpExists)} allErrors = append(allErrors, field.NotSupported(idxPath.Child("operator"), toleration.Operator, validValues)) @@ -2147,7 +1960,7 @@ func (r *Cluster) validateTolerations() field.ErrorList { // validateTaintEffect is used from validateTollerations and is a verbatim copy of the code // at https://github.com/kubernetes/kubernetes/blob/4d38d21/pkg/apis/core/validation/validation.go#L3087 -func validateTaintEffect(effect *v1.TaintEffect, allowEmpty bool, fldPath *field.Path) field.ErrorList { +func validateTaintEffect(effect *corev1.TaintEffect, allowEmpty bool, fldPath *field.Path) field.ErrorList { if !allowEmpty && len(*effect) == 0 { return field.ErrorList{field.Required(fldPath, "")} } @@ -2155,14 +1968,14 @@ func validateTaintEffect(effect *v1.TaintEffect, allowEmpty bool, fldPath *field allErrors := field.ErrorList{} switch *effect { // TODO: Replace next line with subsequent commented-out line when implement TaintEffectNoScheduleNoAdmit. - case v1.TaintEffectNoSchedule, v1.TaintEffectPreferNoSchedule, v1.TaintEffectNoExecute: + case corev1.TaintEffectNoSchedule, corev1.TaintEffectPreferNoSchedule, corev1.TaintEffectNoExecute: // case core.TaintEffectNoSchedule, core.TaintEffectPreferNoSchedule, core.TaintEffectNoScheduleNoAdmit, // core.TaintEffectNoExecute: default: validValues := []string{ - string(v1.TaintEffectNoSchedule), - string(v1.TaintEffectPreferNoSchedule), - string(v1.TaintEffectNoExecute), + string(corev1.TaintEffectNoSchedule), + string(corev1.TaintEffectPreferNoSchedule), + string(corev1.TaintEffectNoExecute), // TODO: Uncomment this block when implement TaintEffectNoScheduleNoAdmit. // string(core.TaintEffectNoScheduleNoAdmit), } @@ -2172,25 +1985,25 @@ func validateTaintEffect(effect *v1.TaintEffect, allowEmpty bool, fldPath *field } // validateAntiAffinity checks and validates the anti-affinity fields. -func (r *Cluster) validateAntiAffinity() field.ErrorList { +func (v *ClusterCustomValidator) validateAntiAffinity(r *apiv1.Cluster) field.ErrorList { path := field.NewPath("spec", "affinity", "podAntiAffinityType") allErrors := field.ErrorList{} - if r.Spec.Affinity.PodAntiAffinityType != PodAntiAffinityTypePreferred && - r.Spec.Affinity.PodAntiAffinityType != PodAntiAffinityTypeRequired && + if r.Spec.Affinity.PodAntiAffinityType != apiv1.PodAntiAffinityTypePreferred && + r.Spec.Affinity.PodAntiAffinityType != apiv1.PodAntiAffinityTypeRequired && r.Spec.Affinity.PodAntiAffinityType != "" { allErrors = append(allErrors, field.Invalid( path, r.Spec.Affinity.PodAntiAffinityType, fmt.Sprintf("pod anti-affinity type must be '%s' (default if empty) or '%s'", - PodAntiAffinityTypePreferred, PodAntiAffinityTypeRequired), + apiv1.PodAntiAffinityTypePreferred, apiv1.PodAntiAffinityTypeRequired), )) } return allErrors } // validateBackupConfiguration validates the backup configuration -func (r *Cluster) validateBackupConfiguration() field.ErrorList { +func (v *ClusterCustomValidator) validateBackupConfiguration(r *apiv1.Cluster) field.ErrorList { if r.Spec.Backup == nil { return nil } @@ -2201,7 +2014,7 @@ func (r *Cluster) validateBackupConfiguration() field.ErrorList { } // validateRetentionPolicy validates the retention policy configuration -func (r *Cluster) validateRetentionPolicy() field.ErrorList { +func (v *ClusterCustomValidator) validateRetentionPolicy(r *apiv1.Cluster) field.ErrorList { if r.Spec.Backup == nil { return nil } @@ -2211,13 +2024,13 @@ func (r *Cluster) validateRetentionPolicy() field.ErrorList { ) } -func (r *Cluster) validateReplicationSlots() field.ErrorList { +func (v *ClusterCustomValidator) validateReplicationSlots(r *apiv1.Cluster) field.ErrorList { if r.Spec.ReplicationSlots == nil { - r.Spec.ReplicationSlots = &ReplicationSlotsConfiguration{ - HighAvailability: &ReplicationSlotsHAConfiguration{ + r.Spec.ReplicationSlots = &apiv1.ReplicationSlotsConfiguration{ + HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{ Enabled: ptr.To(true), }, - SynchronizeReplicas: &SynchronizeReplicasConfiguration{ + SynchronizeReplicas: &apiv1.SynchronizeReplicasConfiguration{ Enabled: ptr.To(true), }, } @@ -2228,7 +2041,7 @@ func (r *Cluster) validateReplicationSlots() field.ErrorList { return nil } - if errs := r.Spec.ReplicationSlots.SynchronizeReplicas.compileRegex(); len(errs) > 0 { + if errs := r.Spec.ReplicationSlots.SynchronizeReplicas.ValidateRegex(); len(errs) > 0 { return field.ErrorList{ field.Invalid( field.NewPath("spec", "replicationSlots", "synchronizeReplicas", "excludePatterns"), @@ -2240,7 +2053,7 @@ func (r *Cluster) validateReplicationSlots() field.ErrorList { return nil } -func (r *Cluster) validateReplicationSlotsChange(old *Cluster) field.ErrorList { +func (v *ClusterCustomValidator) validateReplicationSlotsChange(r, old *apiv1.Cluster) field.ErrorList { newReplicationSlots := r.Spec.ReplicationSlots oldReplicationSlots := old.Spec.ReplicationSlots @@ -2276,7 +2089,7 @@ func (r *Cluster) validateReplicationSlotsChange(old *Cluster) field.ErrorList { return errs } -func (r *Cluster) validateWALLevelChange(old *Cluster) field.ErrorList { +func (v *ClusterCustomValidator) validateWALLevelChange(r, old *apiv1.Cluster) field.ErrorList { var errs field.ErrorList newWALLevel := r.Spec.PostgresConfiguration.Parameters[postgres.ParameterWalLevel] @@ -2293,7 +2106,7 @@ func (r *Cluster) validateWALLevelChange(old *Cluster) field.ErrorList { return errs } -func (r *Cluster) validateManagedServices() field.ErrorList { +func (v *ClusterCustomValidator) validateManagedServices(r *apiv1.Cluster) field.ErrorList { reservedNames := []string{ r.GetServiceReadWriteName(), r.GetServiceReadOnlyName(), @@ -2319,10 +2132,10 @@ func (r *Cluster) validateManagedServices() field.ErrorList { basePath := field.NewPath("spec", "managed", "services") var errs field.ErrorList - if slices.Contains(managedServices.DisabledDefaultServices, ServiceSelectorTypeRW) { + if slices.Contains(managedServices.DisabledDefaultServices, apiv1.ServiceSelectorTypeRW) { errs = append(errs, field.Invalid( basePath.Child("disabledDefaultServices"), - ServiceSelectorTypeRW, + apiv1.ServiceSelectorTypeRW, "service of type RW cannot be disabled.", )) } @@ -2366,7 +2179,7 @@ func (r *Cluster) validateManagedServices() field.ErrorList { func validateServiceTemplate( path *field.Path, nameRequired bool, - template ServiceTemplateSpec, + template apiv1.ServiceTemplateSpec, ) field.ErrorList { var errs field.ErrorList @@ -2386,7 +2199,7 @@ func validateServiceTemplate( } // validateManagedRoles validate the environment variables settings proposed by the user -func (r *Cluster) validateManagedRoles() field.ErrorList { +func (v *ClusterCustomValidator) validateManagedRoles(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList if r.Spec.Managed == nil { @@ -2435,14 +2248,14 @@ func (r *Cluster) validateManagedRoles() field.ErrorList { } // validateManagedExtensions validate the managed extensions parameters set by the user -func (r *Cluster) validateManagedExtensions() field.ErrorList { +func (v *ClusterCustomValidator) validateManagedExtensions(r *apiv1.Cluster) field.ErrorList { allErrors := field.ErrorList{} - allErrors = append(allErrors, r.validatePgFailoverSlots()...) + allErrors = append(allErrors, v.validatePgFailoverSlots(r)...) return allErrors } -func (r *Cluster) validatePgFailoverSlots() field.ErrorList { +func (v *ClusterCustomValidator) validatePgFailoverSlots(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList var pgFailoverSlots postgres.ManagedExtension @@ -2502,11 +2315,11 @@ func (r *Cluster) validatePgFailoverSlots() field.ErrorList { return result } -func (r *Cluster) getAdmissionWarnings() admission.Warnings { - return r.getMaintenanceWindowsAdmissionWarnings() +func (v *ClusterCustomValidator) getAdmissionWarnings(r *apiv1.Cluster) admission.Warnings { + return getMaintenanceWindowsAdmissionWarnings(r) } -func (r *Cluster) getMaintenanceWindowsAdmissionWarnings() admission.Warnings { +func getMaintenanceWindowsAdmissionWarnings(r *apiv1.Cluster) admission.Warnings { var result admission.Warnings if r.Spec.NodeMaintenanceWindow != nil { @@ -2518,7 +2331,7 @@ func (r *Cluster) getMaintenanceWindowsAdmissionWarnings() admission.Warnings { } // validate whether the hibernation configuration is valid -func (r *Cluster) validateHibernationAnnotation() field.ErrorList { +func (v *ClusterCustomValidator) validateHibernationAnnotation(r *apiv1.Cluster) field.ErrorList { value, ok := r.Annotations[utils.HibernationAnnotationName] isKnownValue := value == string(utils.HibernationAnnotationValueOn) || value == string(utils.HibernationAnnotationValueOff) diff --git a/api/v1/cluster_webhook_test.go b/internal/webhook/v1/cluster_webhook_test.go similarity index 55% rename from api/v1/cluster_webhook_test.go rename to internal/webhook/v1/cluster_webhook_test.go index bd08a26511..faa1f0f0d7 100644 --- a/api/v1/cluster_webhook_test.go +++ b/internal/webhook/v1/cluster_webhook_test.go @@ -33,7 +33,7 @@ import ( "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/utils/ptr" - "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" @@ -42,136 +42,154 @@ import ( ) var _ = Describe("bootstrap methods validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + It("doesn't complain if there isn't a configuration", func() { - emptyCluster := &Cluster{} - result := emptyCluster.validateBootstrapMethod() + emptyCluster := &apiv1.Cluster{} + result := v.validateBootstrapMethod(emptyCluster) Expect(result).To(BeEmpty()) }) It("doesn't complain if we are using initdb", func() { - initdbCluster := &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{}, + initdbCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{}, }, }, } - result := initdbCluster.validateBootstrapMethod() + result := v.validateBootstrapMethod(initdbCluster) Expect(result).To(BeEmpty()) }) It("doesn't complain if we are using recovery", func() { - recoveryCluster := &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{}, + recoveryCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{}, }, }, } - result := recoveryCluster.validateBootstrapMethod() + result := v.validateBootstrapMethod(recoveryCluster) Expect(result).To(BeEmpty()) }) It("complains where there are two active bootstrap methods", func() { - invalidCluster := &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{}, - InitDB: &BootstrapInitDB{}, + invalidCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{}, + InitDB: &apiv1.BootstrapInitDB{}, }, }, } - result := invalidCluster.validateBootstrapMethod() + result := v.validateBootstrapMethod(invalidCluster) Expect(result).To(HaveLen(1)) }) }) var _ = Describe("certificates options validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + It("doesn't complain if there isn't a configuration", func() { - emptyCluster := &Cluster{} - result := emptyCluster.validateCerts() + emptyCluster := &apiv1.Cluster{} + result := v.validateCerts(emptyCluster) Expect(result).To(BeEmpty()) }) + It("doesn't complain if you specify some valid secret names", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Certificates: &CertificatesConfiguration{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Certificates: &apiv1.CertificatesConfiguration{ ServerCASecret: "test-server-ca", ServerTLSSecret: "test-server-tls", }, }, } - result := cluster.validateCerts() + result := v.validateCerts(cluster) Expect(result).To(BeEmpty()) }) + It("does complain if you specify the TLS secret and not the CA", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Certificates: &CertificatesConfiguration{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Certificates: &apiv1.CertificatesConfiguration{ ServerTLSSecret: "test-server-tls", }, }, } - result := cluster.validateCerts() + result := v.validateCerts(cluster) Expect(result).To(HaveLen(1)) }) + It("does complain if you specify the TLS secret and AltDNSNames is not empty", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Certificates: &CertificatesConfiguration{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Certificates: &apiv1.CertificatesConfiguration{ ServerCASecret: "test-server-ca", ServerTLSSecret: "test-server-tls", ServerAltDNSNames: []string{"dns-name"}, }, }, } - result := cluster.validateCerts() + result := v.validateCerts(cluster) Expect(result).To(HaveLen(1)) }) }) var _ = Describe("initdb options validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + It("doesn't complain if there isn't a configuration", func() { - emptyCluster := &Cluster{} - result := emptyCluster.validateInitDB() + emptyCluster := &apiv1.Cluster{} + result := v.validateInitDB(emptyCluster) Expect(result).To(BeEmpty()) }) It("complains if you specify the database name but not the owner", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{ Database: "app", }, }, }, } - result := cluster.validateInitDB() + result := v.validateInitDB(cluster) Expect(result).To(HaveLen(1)) }) It("complains if you specify the owner but not the database name", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{ Owner: "app", }, }, }, } - result := cluster.validateInitDB() + result := v.validateInitDB(cluster) Expect(result).To(HaveLen(1)) }) It("doesn't complain if you specify both database name and owner user", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{ Database: "app", Owner: "app", }, @@ -179,21 +197,21 @@ var _ = Describe("initdb options validation", func() { }, } - result := cluster.validateInitDB() + result := v.validateInitDB(cluster) Expect(result).To(BeEmpty()) }) It("complain if key is missing in the secretRefs", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{ Database: "app", Owner: "app", - PostInitApplicationSQLRefs: &SQLRefs{ - SecretRefs: []SecretKeySelector{ + PostInitApplicationSQLRefs: &apiv1.SQLRefs{ + SecretRefs: []apiv1.SecretKeySelector{ { - LocalObjectReference: LocalObjectReference{Name: "secret1"}, + LocalObjectReference: apiv1.LocalObjectReference{Name: "secret1"}, }, }, }, @@ -202,19 +220,19 @@ var _ = Describe("initdb options validation", func() { }, } - result := cluster.validateInitDB() + result := v.validateInitDB(cluster) Expect(result).To(HaveLen(1)) }) It("complain if name is missing in the secretRefs", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{ Database: "app", Owner: "app", - PostInitApplicationSQLRefs: &SQLRefs{ - SecretRefs: []SecretKeySelector{ + PostInitApplicationSQLRefs: &apiv1.SQLRefs{ + SecretRefs: []apiv1.SecretKeySelector{ { Key: "key", }, @@ -225,21 +243,21 @@ var _ = Describe("initdb options validation", func() { }, } - result := cluster.validateInitDB() + result := v.validateInitDB(cluster) Expect(result).To(HaveLen(1)) }) It("complain if key is missing in the configMapRefs", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{ Database: "app", Owner: "app", - PostInitApplicationSQLRefs: &SQLRefs{ - ConfigMapRefs: []ConfigMapKeySelector{ + PostInitApplicationSQLRefs: &apiv1.SQLRefs{ + ConfigMapRefs: []apiv1.ConfigMapKeySelector{ { - LocalObjectReference: LocalObjectReference{Name: "configmap1"}, + LocalObjectReference: apiv1.LocalObjectReference{Name: "configmap1"}, }, }, }, @@ -248,19 +266,19 @@ var _ = Describe("initdb options validation", func() { }, } - result := cluster.validateInitDB() + result := v.validateInitDB(cluster) Expect(result).To(HaveLen(1)) }) It("complain if name is missing in the configMapRefs", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{ Database: "app", Owner: "app", - PostInitApplicationSQLRefs: &SQLRefs{ - ConfigMapRefs: []ConfigMapKeySelector{ + PostInitApplicationSQLRefs: &apiv1.SQLRefs{ + ConfigMapRefs: []apiv1.ConfigMapKeySelector{ { Key: "key", }, @@ -271,35 +289,35 @@ var _ = Describe("initdb options validation", func() { }, } - result := cluster.validateInitDB() + result := v.validateInitDB(cluster) Expect(result).To(HaveLen(1)) }) It("doesn't complain if configmapRefs and secretRefs are valid", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{ Database: "app", Owner: "app", - PostInitApplicationSQLRefs: &SQLRefs{ - ConfigMapRefs: []ConfigMapKeySelector{ + PostInitApplicationSQLRefs: &apiv1.SQLRefs{ + ConfigMapRefs: []apiv1.ConfigMapKeySelector{ { - LocalObjectReference: LocalObjectReference{Name: "configmap1"}, + LocalObjectReference: apiv1.LocalObjectReference{Name: "configmap1"}, Key: "key", }, { - LocalObjectReference: LocalObjectReference{Name: "configmap2"}, + LocalObjectReference: apiv1.LocalObjectReference{Name: "configmap2"}, Key: "key", }, }, - SecretRefs: []SecretKeySelector{ + SecretRefs: []apiv1.SecretKeySelector{ { - LocalObjectReference: LocalObjectReference{Name: "secret1"}, + LocalObjectReference: apiv1.LocalObjectReference{Name: "secret1"}, Key: "key", }, { - LocalObjectReference: LocalObjectReference{Name: "secret2"}, + LocalObjectReference: apiv1.LocalObjectReference{Name: "secret2"}, Key: "key", }, }, @@ -309,303 +327,157 @@ var _ = Describe("initdb options validation", func() { }, } - result := cluster.validateInitDB() + result := v.validateInitDB(cluster) Expect(result).To(BeEmpty()) }) It("doesn't complain if superuser secret it's empty", func() { - cluster := Cluster{ - Spec: ClusterSpec{}, + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{}, } - result := cluster.validateSuperuserSecret() + result := v.validateSuperuserSecret(cluster) Expect(result).To(BeEmpty()) }) It("complains if superuser secret name it's empty", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - SuperuserSecret: &LocalObjectReference{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + SuperuserSecret: &apiv1.LocalObjectReference{ Name: "", }, }, } - result := cluster.validateSuperuserSecret() + result := v.validateSuperuserSecret(cluster) Expect(result).To(HaveLen(1)) }) }) -var _ = Describe("cluster configuration", func() { - It("defaults to creating an application database", func() { - cluster := Cluster{} - cluster.Default() - Expect(cluster.Spec.Bootstrap.InitDB.Database).To(Equal("app")) - Expect(cluster.Spec.Bootstrap.InitDB.Owner).To(Equal("app")) - }) - - It("defaults the owner user with the database name", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ - Database: "appdb", - }, - }, - }, - } - - cluster.Default() - Expect(cluster.Spec.Bootstrap.InitDB.Owner).To(Equal("appdb")) - }) - - It("defaults to create an application database if recovery is used", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{}, - }, - }, - } - cluster.Default() - Expect(cluster.ShouldRecoveryCreateApplicationDatabase()).Should(BeTrue()) - Expect(cluster.Spec.Bootstrap.Recovery.Database).ShouldNot(BeEmpty()) - Expect(cluster.Spec.Bootstrap.Recovery.Owner).ShouldNot(BeEmpty()) - Expect(cluster.Spec.Bootstrap.Recovery.Secret).Should(BeNil()) - }) - - It("defaults the owner user with the database name for recovery", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - Database: "appdb", - }, - }, - }, - } - - cluster.Default() - Expect(cluster.Spec.Bootstrap.Recovery.Owner).To(Equal("appdb")) - }) - - It("defaults to create an application database if pg_basebackup is used", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - PgBaseBackup: &BootstrapPgBaseBackup{}, - }, - }, - } - cluster.Default() - Expect(cluster.ShouldPgBaseBackupCreateApplicationDatabase()).Should(BeTrue()) - Expect(cluster.Spec.Bootstrap.PgBaseBackup.Database).ShouldNot(BeEmpty()) - Expect(cluster.Spec.Bootstrap.PgBaseBackup.Owner).ShouldNot(BeEmpty()) - Expect(cluster.Spec.Bootstrap.PgBaseBackup.Secret).Should(BeNil()) - }) - - It("defaults the owner user with the database name for pg_basebackup", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - PgBaseBackup: &BootstrapPgBaseBackup{ - Database: "appdb", - }, - }, - }, - } - - cluster.Default() - Expect(cluster.Spec.Bootstrap.PgBaseBackup.Owner).To(Equal("appdb")) - }) - - It("defaults the PostgreSQL configuration with parameters from the operator", func() { - cluster := Cluster{} - cluster.Default() - Expect(cluster.Spec.PostgresConfiguration.Parameters).ToNot(BeEmpty()) - }) - - It("defaults the anti-affinity", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Affinity: AffinityConfiguration{}, - }, - } - cluster.Default() - Expect(cluster.Spec.Affinity.PodAntiAffinityType).To(BeEquivalentTo(PodAntiAffinityTypePreferred)) - Expect(cluster.Spec.Affinity.EnablePodAntiAffinity).To(BeNil()) +var _ = Describe("ImagePullPolicy validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} }) -}) -var _ = Describe("ImagePullPolicy validation", func() { It("complains if the imagePullPolicy isn't valid", func() { - cluster := Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ ImagePullPolicy: "wrong", }, } - result := cluster.validateImagePullPolicy() + result := v.validateImagePullPolicy(cluster) Expect(result).To(HaveLen(1)) }) + It("does not complain if the imagePullPolicy is valid", func() { - cluster := Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ ImagePullPolicy: "Always", }, } - result := cluster.validateImagePullPolicy() + result := v.validateImagePullPolicy(cluster) Expect(result).To(BeEmpty()) }) }) -var _ = Describe("Defaulting webhook", func() { - It("should fill the image name if isn't already set", func() { - cluster := Cluster{} - cluster.Default() - Expect(cluster.Spec.ImageName).To(Equal(configuration.Current.PostgresImageName)) - }) - - It("shouldn't set the image name if already present", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - ImageName: "test:13", - }, - } - cluster.Default() - Expect(cluster.Spec.ImageName).To(Equal("test:13")) - }) - - It("should setup the application database name", func() { - cluster := Cluster{} - cluster.Default() - Expect(cluster.Spec.Bootstrap.InitDB.Database).To(Equal("app")) - Expect(cluster.Spec.Bootstrap.InitDB.Owner).To(Equal("app")) - }) - - It("should set the owner name as the database name", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ - Database: "test", - }, - }, - }, - } - cluster.Default() - Expect(cluster.Spec.Bootstrap.InitDB.Database).To(Equal("test")) - Expect(cluster.Spec.Bootstrap.InitDB.Owner).To(Equal("test")) - }) - - It("should not overwrite application database and owner settings", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ - Database: "testdb", - Owner: "testuser", - }, - }, - }, - } - cluster.Default() - Expect(cluster.Spec.Bootstrap.InitDB.Database).To(Equal("testdb")) - Expect(cluster.Spec.Bootstrap.InitDB.Owner).To(Equal("testuser")) +var _ = Describe("Image name validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} }) -}) -var _ = Describe("Image name validation", func() { It("doesn't complain if the user simply accept the default", func() { - var cluster Cluster - Expect(cluster.validateImageName()).To(BeEmpty()) + var cluster apiv1.Cluster + Expect(v.validateImageName(&cluster)).To(BeEmpty()) // Let's apply the defaulting webhook, too cluster.Default() - Expect(cluster.validateImageName()).To(BeEmpty()) + Expect(v.validateImageName(&cluster)).To(BeEmpty()) }) It("complains when the 'latest' tag is detected", func() { - cluster := Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ ImageName: "postgres:latest", }, } - Expect(cluster.validateImageName()).To(HaveLen(1)) + Expect(v.validateImageName(cluster)).To(HaveLen(1)) }) It("doesn't complain when a alpha tag is used", func() { - cluster := Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ ImageName: "postgres:15alpha1", }, } - Expect(cluster.validateImageName()).To(BeEmpty()) + Expect(v.validateImageName(cluster)).To(BeEmpty()) }) It("doesn't complain when a beta tag is used", func() { - cluster := Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ ImageName: "postgres:15beta1", }, } - Expect(cluster.validateImageName()).To(BeEmpty()) + Expect(v.validateImageName(cluster)).To(BeEmpty()) }) It("doesn't complain when a release candidate tag is used", func() { - cluster := Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ ImageName: "postgres:15rc1", }, } - Expect(cluster.validateImageName()).To(BeEmpty()) + Expect(v.validateImageName(cluster)).To(BeEmpty()) }) It("complains when only the sha is passed", func() { - cluster := Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ ImageName: "postgres@sha256:cff94de382ca538861622bbe84cfe03f44f307a9846a5c5eda672cf4dc692866", }, } - Expect(cluster.validateImageName()).To(HaveLen(1)) + Expect(v.validateImageName(cluster)).To(HaveLen(1)) }) It("doesn't complain if the tag is valid", func() { - cluster := Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ ImageName: "postgres:10.4", }, } - Expect(cluster.validateImageName()).To(BeEmpty()) + Expect(v.validateImageName(cluster)).To(BeEmpty()) }) It("doesn't complain if the tag is valid", func() { - cluster := Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ ImageName: "postgres:14.4-1", }, } - Expect(cluster.validateImageName()).To(BeEmpty()) + Expect(v.validateImageName(cluster)).To(BeEmpty()) }) It("doesn't complain if the tag is valid and has sha", func() { - cluster := Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ ImageName: "postgres:10.4@sha256:cff94de382ca538861622bbe84cfe03f44f307a9846a5c5eda672cf4dc692866", }, } - Expect(cluster.validateImageName()).To(BeEmpty()) + Expect(v.validateImageName(cluster)).To(BeEmpty()) }) It("complain when the tag name is not a PostgreSQL version", func() { - cluster := Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ ImageName: "postgres:test_12", }, } - Expect(cluster.validateImageName()).To(HaveLen(1)) + Expect(v.validateImageName(cluster)).To(HaveLen(1)) }) }) @@ -634,329 +506,334 @@ var _ = DescribeTable("parsePostgresQuantityValue", ) var _ = Describe("configuration change validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + It("doesn't complain when the configuration is exactly the same", func() { - clusterOld := Cluster{ - Spec: ClusterSpec{ + clusterOld := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ ImageName: "postgres:10.4", }, } - clusterNew := clusterOld - Expect(clusterNew.validateConfigurationChange(&clusterOld)).To(BeEmpty()) + clusterNew := clusterOld.DeepCopy() + Expect(v.validateConfigurationChange(clusterNew, clusterOld)).To(BeEmpty()) }) It("doesn't complain when we change a setting which is not fixed", func() { - clusterOld := Cluster{ - Spec: ClusterSpec{ + clusterOld := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ ImageName: "postgres:10.4", }, } - clusterNew := Cluster{ - Spec: ClusterSpec{ + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ ImageName: "postgres:10.4", - PostgresConfiguration: PostgresConfiguration{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "shared_buffers": "4G", }, }, }, } - Expect(clusterNew.validateConfigurationChange(&clusterOld)).To(BeEmpty()) + Expect(v.validateConfigurationChange(clusterNew, clusterOld)).To(BeEmpty()) }) It("complains when changing postgres major version and settings", func() { - clusterOld := Cluster{ - Spec: ClusterSpec{ + clusterOld := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ ImageName: "postgres:10.4", }, } - clusterNew := Cluster{ - Spec: ClusterSpec{ + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ ImageName: "postgres:10.5", - PostgresConfiguration: PostgresConfiguration{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "shared_buffers": "4G", }, }, }, } - Expect(clusterNew.validateConfigurationChange(&clusterOld)).To(HaveLen(1)) + Expect(v.validateConfigurationChange(clusterNew, clusterOld)).To(HaveLen(1)) }) It("produces no error when WAL size settings are correct", func() { - clusterNew := Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "min_wal_size": "80MB", "max_wal_size": "1024", }, }, - StorageConfiguration: StorageConfiguration{ + StorageConfiguration: apiv1.StorageConfiguration{ Size: "10Gi", }, }, } - Expect(clusterNew.validateConfiguration()).To(BeEmpty()) + Expect(v.validateConfiguration(clusterNew)).To(BeEmpty()) - clusterNew = Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ + clusterNew = &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "min_wal_size": "1500", "max_wal_size": "2 GB", }, }, - WalStorage: &StorageConfiguration{ + WalStorage: &apiv1.StorageConfiguration{ Size: "3Gi", }, - StorageConfiguration: StorageConfiguration{ + StorageConfiguration: apiv1.StorageConfiguration{ Size: "10Gi", }, }, } - Expect(clusterNew.validateConfiguration()).To(BeEmpty()) + Expect(v.validateConfiguration(clusterNew)).To(BeEmpty()) - clusterNew = Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ + clusterNew = &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "min_wal_size": "1.5GB", "max_wal_size": "2000", }, }, - WalStorage: &StorageConfiguration{ + WalStorage: &apiv1.StorageConfiguration{ Size: "2Gi", }, - StorageConfiguration: StorageConfiguration{ + StorageConfiguration: apiv1.StorageConfiguration{ Size: "10Gi", }, }, } - Expect(clusterNew.validateConfiguration()).To(BeEmpty()) + Expect(v.validateConfiguration(clusterNew)).To(BeEmpty()) - clusterNew = Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ + clusterNew = &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "max_wal_size": "1GB", }, }, - WalStorage: &StorageConfiguration{ + WalStorage: &apiv1.StorageConfiguration{ Size: "2Gi", }, - StorageConfiguration: StorageConfiguration{ + StorageConfiguration: apiv1.StorageConfiguration{ Size: "10Gi", }, }, } - Expect(clusterNew.validateConfiguration()).To(BeEmpty()) + Expect(v.validateConfiguration(clusterNew)).To(BeEmpty()) - clusterNew = Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ + clusterNew = &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "min_wal_size": "100MB", }, }, - WalStorage: &StorageConfiguration{ + WalStorage: &apiv1.StorageConfiguration{ Size: "2Gi", }, - StorageConfiguration: StorageConfiguration{ + StorageConfiguration: apiv1.StorageConfiguration{ Size: "10Gi", }, }, } - Expect(clusterNew.validateConfiguration()).To(BeEmpty()) + Expect(v.validateConfiguration(clusterNew)).To(BeEmpty()) - clusterNew = Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ + clusterNew = &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{}, }, }, } - Expect(clusterNew.validateConfiguration()).To(BeEmpty()) + Expect(v.validateConfiguration(clusterNew)).To(BeEmpty()) }) It("produces one complaint when min_wal_size is bigger than max_wal_size", func() { - clusterNew := Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "min_wal_size": "1500", "max_wal_size": "1GB", }, }, - StorageConfiguration: StorageConfiguration{ + StorageConfiguration: apiv1.StorageConfiguration{ Size: "2Gi", }, }, } - Expect(clusterNew.validateConfiguration()).To(HaveLen(1)) + Expect(v.validateConfiguration(clusterNew)).To(HaveLen(1)) - clusterNew = Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ + clusterNew = &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "min_wal_size": "2G", "max_wal_size": "1GB", }, }, - WalStorage: &StorageConfiguration{ + WalStorage: &apiv1.StorageConfiguration{ Size: "2Gi", }, - StorageConfiguration: StorageConfiguration{ + StorageConfiguration: apiv1.StorageConfiguration{ Size: "4Gi", }, }, } - Expect(clusterNew.validateConfiguration()).To(HaveLen(1)) + Expect(v.validateConfiguration(clusterNew)).To(HaveLen(1)) }) It("produces one complaint when max_wal_size is bigger than WAL storage", func() { - clusterNew := Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "max_wal_size": "2GB", }, }, - WalStorage: &StorageConfiguration{ + WalStorage: &apiv1.StorageConfiguration{ Size: "1G", }, - StorageConfiguration: StorageConfiguration{ + StorageConfiguration: apiv1.StorageConfiguration{ Size: "4Gi", }, }, } - Expect(clusterNew.validateConfiguration()).To(HaveLen(1)) + Expect(v.validateConfiguration(clusterNew)).To(HaveLen(1)) - clusterNew = Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ + clusterNew = &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "min_wal_size": "80MB", "max_wal_size": "1500", }, }, - WalStorage: &StorageConfiguration{ + WalStorage: &apiv1.StorageConfiguration{ Size: "1G", }, - StorageConfiguration: StorageConfiguration{ + StorageConfiguration: apiv1.StorageConfiguration{ Size: "4Gi", }, }, } - Expect(clusterNew.validateConfiguration()).To(HaveLen(1)) + Expect(v.validateConfiguration(clusterNew)).To(HaveLen(1)) }) It("produces two complaints when min_wal_size is bigger than WAL storage and max_wal_size", func() { - clusterNew := Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "min_wal_size": "3GB", "max_wal_size": "1GB", }, }, - WalStorage: &StorageConfiguration{ + WalStorage: &apiv1.StorageConfiguration{ Size: "2Gi", }, - StorageConfiguration: StorageConfiguration{ + StorageConfiguration: apiv1.StorageConfiguration{ Size: "10Gi", }, }, } - Expect(clusterNew.validateConfiguration()).To(HaveLen(2)) + Expect(v.validateConfiguration(clusterNew)).To(HaveLen(2)) }) It("complains about invalid value for min_wal_size and max_wal_size", func() { - clusterNew := Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "min_wal_size": "xxx", "max_wal_size": "1GB", }, }, - StorageConfiguration: StorageConfiguration{ + StorageConfiguration: apiv1.StorageConfiguration{ Size: "10Gi", }, }, } - Expect(clusterNew.validateConfiguration()).To(HaveLen(1)) + Expect(v.validateConfiguration(clusterNew)).To(HaveLen(1)) - clusterNew = Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ + clusterNew = &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "min_wal_size": "80", "max_wal_size": "1Gb", }, }, - WalStorage: &StorageConfiguration{ + WalStorage: &apiv1.StorageConfiguration{ Size: "2Gi", }, - StorageConfiguration: StorageConfiguration{ + StorageConfiguration: apiv1.StorageConfiguration{ Size: "10Gi", }, }, } - Expect(clusterNew.validateConfiguration()).To(HaveLen(1)) + Expect(v.validateConfiguration(clusterNew)).To(HaveLen(1)) }) It("doesn't compare default values for min_wal_size and max_wal_size with WalStorage", func() { - clusterNew := Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{}, }, - WalStorage: &StorageConfiguration{ + WalStorage: &apiv1.StorageConfiguration{ Size: "100Mi", }, }, } - Expect(clusterNew.validateConfiguration()).To(BeEmpty()) + Expect(v.validateConfiguration(clusterNew)).To(BeEmpty()) - clusterNew = Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ + clusterNew = &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "min_wal_size": "1.5GB", // default for max_wal_size is 1GB }, }, - WalStorage: &StorageConfiguration{ + WalStorage: &apiv1.StorageConfiguration{ Size: "2Gi", }, - StorageConfiguration: StorageConfiguration{ + StorageConfiguration: apiv1.StorageConfiguration{ Size: "10Gi", }, }, } - Expect(clusterNew.validateConfiguration()).To(HaveLen(1)) + Expect(v.validateConfiguration(clusterNew)).To(HaveLen(1)) - clusterNew = Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ + clusterNew = &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "max_wal_size": "70M", // default for min_wal_size is 80M }, }, - WalStorage: &StorageConfiguration{ + WalStorage: &apiv1.StorageConfiguration{ Size: "2Gi", }, - StorageConfiguration: StorageConfiguration{ + StorageConfiguration: apiv1.StorageConfiguration{ Size: "4Gi", }, }, } - Expect(clusterNew.validateConfiguration()).To(HaveLen(1)) + Expect(v.validateConfiguration(clusterNew)).To(HaveLen(1)) }) It("should detect an invalid `shared_buffers` value", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "shared_buffers": "invalid", }, @@ -964,20 +841,20 @@ var _ = Describe("configuration change validation", func() { }, } - Expect(cluster.validateConfiguration()).To(HaveLen(1)) + Expect(v.validateConfiguration(cluster)).To(HaveLen(1)) }) It("should reject minimal wal_level when backup is configured", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Backup: &BackupConfiguration{ - BarmanObjectStore: &BarmanObjectStoreConfiguration{ - BarmanCredentials: BarmanCredentials{ - AWS: &S3Credentials{}, + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Backup: &apiv1.BackupConfiguration{ + BarmanObjectStore: &apiv1.BarmanObjectStoreConfiguration{ + BarmanCredentials: apiv1.BarmanCredentials{ + AWS: &apiv1.S3Credentials{}, }, }, }, - PostgresConfiguration: PostgresConfiguration{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "wal_level": "minimal", "max_wal_senders": "0", @@ -986,20 +863,20 @@ var _ = Describe("configuration change validation", func() { }, } Expect(cluster.Spec.Backup.IsBarmanBackupConfigured()).To(BeTrue()) - Expect(cluster.validateConfiguration()).To(HaveLen(1)) + Expect(v.validateConfiguration(cluster)).To(HaveLen(1)) }) It("should allow replica wal_level when backup is configured", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Backup: &BackupConfiguration{ - BarmanObjectStore: &BarmanObjectStoreConfiguration{ - BarmanCredentials: BarmanCredentials{ - AWS: &S3Credentials{}, + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Backup: &apiv1.BackupConfiguration{ + BarmanObjectStore: &apiv1.BarmanObjectStoreConfiguration{ + BarmanCredentials: apiv1.BarmanCredentials{ + AWS: &apiv1.S3Credentials{}, }, }, }, - PostgresConfiguration: PostgresConfiguration{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "wal_level": "replica", }, @@ -1007,20 +884,20 @@ var _ = Describe("configuration change validation", func() { }, } Expect(cluster.Spec.Backup.IsBarmanBackupConfigured()).To(BeTrue()) - Expect(cluster.validateConfiguration()).To(BeEmpty()) + Expect(v.validateConfiguration(cluster)).To(BeEmpty()) }) It("should allow logical wal_level when backup is configured", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Backup: &BackupConfiguration{ - BarmanObjectStore: &BarmanObjectStoreConfiguration{ - BarmanCredentials: BarmanCredentials{ - AWS: &S3Credentials{}, + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Backup: &apiv1.BackupConfiguration{ + BarmanObjectStore: &apiv1.BarmanObjectStoreConfiguration{ + BarmanCredentials: apiv1.BarmanCredentials{ + AWS: &apiv1.S3Credentials{}, }, }, }, - PostgresConfiguration: PostgresConfiguration{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "wal_level": "logical", }, @@ -1028,14 +905,14 @@ var _ = Describe("configuration change validation", func() { }, } Expect(cluster.Spec.Backup.IsBarmanBackupConfigured()).To(BeTrue()) - Expect(cluster.validateConfiguration()).To(BeEmpty()) + Expect(v.validateConfiguration(cluster)).To(BeEmpty()) }) It("should reject minimal wal_level when instances is greater than one", func() { - cluster := Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ Instances: 2, - PostgresConfiguration: PostgresConfiguration{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "wal_level": "minimal", "max_wal_senders": "0", @@ -1044,42 +921,42 @@ var _ = Describe("configuration change validation", func() { }, } - Expect(cluster.validateConfiguration()).To(HaveLen(1)) + Expect(v.validateConfiguration(cluster)).To(HaveLen(1)) }) It("should allow replica wal_level when instances is greater than one", func() { - cluster := Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ Instances: 2, - PostgresConfiguration: PostgresConfiguration{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "wal_level": "replica", }, }, }, } - Expect(cluster.validateConfiguration()).To(BeEmpty()) + Expect(v.validateConfiguration(cluster)).To(BeEmpty()) }) It("should allow logical wal_level when instances is greater than one", func() { - cluster := Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ Instances: 2, - PostgresConfiguration: PostgresConfiguration{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "wal_level": "logical", }, }, }, } - Expect(cluster.validateConfiguration()).To(BeEmpty()) + Expect(v.validateConfiguration(cluster)).To(BeEmpty()) }) It("should reject an unknown wal_level value", func() { - cluster := Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ Instances: 1, - PostgresConfiguration: PostgresConfiguration{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "wal_level": "test", }, @@ -1087,19 +964,19 @@ var _ = Describe("configuration change validation", func() { }, } - errs := cluster.validateConfiguration() + errs := v.validateConfiguration(cluster) Expect(errs).To(HaveLen(1)) Expect(errs[0].Detail).To(ContainSubstring("unrecognized `wal_level` value - allowed values")) }) It("should reject minimal if it is a replica cluster", func() { - cluster := Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ Instances: 1, - ReplicaCluster: &ReplicaClusterConfiguration{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ Enabled: ptr.To(true), }, - PostgresConfiguration: PostgresConfiguration{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "wal_level": "minimal", "max_wal_senders": "0", @@ -1108,19 +985,19 @@ var _ = Describe("configuration change validation", func() { }, } Expect(cluster.IsReplica()).To(BeTrue()) - Expect(cluster.validateConfiguration()).To(HaveLen(1)) + Expect(v.validateConfiguration(cluster)).To(HaveLen(1)) }) It("should allow minimal wal_level with one instance and without archive mode", func() { - cluster := Cluster{ + cluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ utils.SkipWalArchiving: "enabled", }, }, - Spec: ClusterSpec{ + Spec: apiv1.ClusterSpec{ Instances: 1, - PostgresConfiguration: PostgresConfiguration{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "wal_level": "minimal", "max_wal_senders": "0", @@ -1128,55 +1005,55 @@ var _ = Describe("configuration change validation", func() { }, }, } - Expect(cluster.validateConfiguration()).To(BeEmpty()) + Expect(v.validateConfiguration(cluster)).To(BeEmpty()) }) It("should disallow minimal wal_level with one instance, without max_wal_senders being specified", func() { - cluster := Cluster{ + cluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ utils.SkipWalArchiving: "enabled", }, }, - Spec: ClusterSpec{ + Spec: apiv1.ClusterSpec{ Instances: 1, - PostgresConfiguration: PostgresConfiguration{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "wal_level": "minimal", }, }, }, } - Expect(cluster.validateConfiguration()).To(HaveLen(1)) + Expect(v.validateConfiguration(cluster)).To(HaveLen(1)) }) It("should disallow changing wal_level to minimal for existing clusters", func() { - oldCluster := Cluster{ + oldCluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ utils.SkipWalArchiving: "enabled", }, }, - Spec: ClusterSpec{ + Spec: apiv1.ClusterSpec{ Instances: 1, - PostgresConfiguration: PostgresConfiguration{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "max_wal_senders": "0", }, }, }, } - oldCluster.setDefaults(true) + oldCluster.Default() - cluster := Cluster{ + cluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ utils.SkipWalArchiving: "enabled", }, }, - Spec: ClusterSpec{ + Spec: apiv1.ClusterSpec{ Instances: 1, - PostgresConfiguration: PostgresConfiguration{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "wal_level": "minimal", "max_wal_senders": "0", @@ -1184,19 +1061,19 @@ var _ = Describe("configuration change validation", func() { }, }, } - Expect(cluster.validateWALLevelChange(&oldCluster)).To(HaveLen(1)) + Expect(v.validateWALLevelChange(cluster, oldCluster)).To(HaveLen(1)) }) It("should allow retaining wal_level to minimal for existing clusters", func() { - oldCluster := Cluster{ + oldCluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ utils.SkipWalArchiving: "enabled", }, }, - Spec: ClusterSpec{ + Spec: apiv1.ClusterSpec{ Instances: 1, - PostgresConfiguration: PostgresConfiguration{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "wal_level": "minimal", "max_wal_senders": "0", @@ -1204,17 +1081,17 @@ var _ = Describe("configuration change validation", func() { }, }, } - oldCluster.setDefaults(true) + oldCluster.Default() - cluster := Cluster{ + cluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ utils.SkipWalArchiving: "enabled", }, }, - Spec: ClusterSpec{ + Spec: apiv1.ClusterSpec{ Instances: 1, - PostgresConfiguration: PostgresConfiguration{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "wal_level": "minimal", "max_wal_senders": "0", @@ -1223,147 +1100,152 @@ var _ = Describe("configuration change validation", func() { }, }, } - Expect(cluster.validateWALLevelChange(&oldCluster)).To(BeEmpty()) + Expect(v.validateWALLevelChange(cluster, oldCluster)).To(BeEmpty()) }) Describe("wal_log_hints", func() { It("should reject wal_log_hints set to an invalid value", func() { - cluster := Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ Instances: 1, - PostgresConfiguration: PostgresConfiguration{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "wal_log_hints": "foo", }, }, }, } - Expect(cluster.validateConfiguration()).To(HaveLen(1)) + Expect(v.validateConfiguration(cluster)).To(HaveLen(1)) }) It("should allow wal_log_hints set to off for clusters having just one instance", func() { - cluster := Cluster{ + cluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ utils.SkipWalArchiving: "enabled", }, }, - Spec: ClusterSpec{ + Spec: apiv1.ClusterSpec{ Instances: 1, - PostgresConfiguration: PostgresConfiguration{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "wal_log_hints": "off", }, }, }, } - Expect(cluster.validateConfiguration()).To(BeEmpty()) + Expect(v.validateConfiguration(cluster)).To(BeEmpty()) }) It("should not allow wal_log_hints set to off for clusters having more than one instance", func() { - cluster := Cluster{ + cluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ utils.SkipWalArchiving: "enabled", }, }, - Spec: ClusterSpec{ + Spec: apiv1.ClusterSpec{ Instances: 3, - PostgresConfiguration: PostgresConfiguration{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "wal_log_hints": "off", }, }, }, } - Expect(cluster.validateConfiguration()).ToNot(BeEmpty()) + Expect(v.validateConfiguration(cluster)).ToNot(BeEmpty()) }) It("should allow wal_log_hints set to on for clusters having just one instance", func() { - cluster := Cluster{ + cluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ utils.SkipWalArchiving: "enabled", }, }, - Spec: ClusterSpec{ + Spec: apiv1.ClusterSpec{ Instances: 1, - PostgresConfiguration: PostgresConfiguration{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "wal_log_hints": "on", }, }, }, } - Expect(cluster.validateConfiguration()).To(BeEmpty()) + Expect(v.validateConfiguration(cluster)).To(BeEmpty()) }) It("should not allow wal_log_hints set to on for clusters having more than one instance", func() { - cluster := Cluster{ + cluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ utils.SkipWalArchiving: "enabled", }, }, - Spec: ClusterSpec{ + Spec: apiv1.ClusterSpec{ Instances: 3, - PostgresConfiguration: PostgresConfiguration{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "wal_log_hints": "true", }, }, }, } - Expect(cluster.validateConfiguration()).To(BeEmpty()) + Expect(v.validateConfiguration(cluster)).To(BeEmpty()) }) }) }) var _ = Describe("validate image name change", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + Context("using image name", func() { It("doesn't complain with no changes", func() { - clusterOld := Cluster{ - Spec: ClusterSpec{}, + clusterOld := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{}, } - clusterNew := Cluster{ - Spec: ClusterSpec{}, + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{}, } - Expect(clusterNew.validateImageChange(&clusterOld)).To(BeEmpty()) + Expect(v.validateImageChange(clusterNew, clusterOld)).To(BeEmpty()) }) It("complains if it can't upgrade between mayor versions", func() { - clusterOld := Cluster{ - Spec: ClusterSpec{ + clusterOld := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ ImageName: "postgres:17.0", }, } - clusterNew := Cluster{ - Spec: ClusterSpec{ + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ ImageName: "postgres:16.0", }, } - Expect(clusterNew.validateImageChange(&clusterOld)).To(HaveLen(1)) + Expect(v.validateImageChange(clusterNew, clusterOld)).To(HaveLen(1)) }) It("doesn't complain if image change is valid", func() { - clusterOld := Cluster{ - Spec: ClusterSpec{ + clusterOld := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ ImageName: "postgres:17.1", }, } - clusterNew := Cluster{ - Spec: ClusterSpec{ + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ ImageName: "postgres:17.0", }, } - Expect(clusterNew.validateImageChange(&clusterOld)).To(BeEmpty()) + Expect(v.validateImageChange(clusterNew, clusterOld)).To(BeEmpty()) }) }) Context("using image catalog", func() { It("complains on major upgrades", func() { - clusterOld := Cluster{ - Spec: ClusterSpec{ - ImageCatalogRef: &ImageCatalogRef{ + clusterOld := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageCatalogRef: &apiv1.ImageCatalogRef{ TypedLocalObjectReference: corev1.TypedLocalObjectReference{ Name: "test", Kind: "ImageCatalog", @@ -1372,9 +1254,9 @@ var _ = Describe("validate image name change", func() { }, }, } - clusterNew := Cluster{ - Spec: ClusterSpec{ - ImageCatalogRef: &ImageCatalogRef{ + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageCatalogRef: &apiv1.ImageCatalogRef{ TypedLocalObjectReference: corev1.TypedLocalObjectReference{ Name: "test", Kind: "ImageCatalog", @@ -1383,19 +1265,19 @@ var _ = Describe("validate image name change", func() { }, }, } - Expect(clusterNew.validateImageChange(&clusterOld)).To(HaveLen(1)) + Expect(v.validateImageChange(clusterNew, clusterOld)).To(HaveLen(1)) }) }) Context("changing from imageName to imageCatalogRef", func() { It("doesn't complain when the major is the same", func() { - clusterOld := Cluster{ - Spec: ClusterSpec{ + clusterOld := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ ImageName: "postgres:16.1", }, } - clusterNew := Cluster{ - Spec: ClusterSpec{ - ImageCatalogRef: &ImageCatalogRef{ + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageCatalogRef: &apiv1.ImageCatalogRef{ TypedLocalObjectReference: corev1.TypedLocalObjectReference{ Name: "test", Kind: "ImageCatalog", @@ -1404,17 +1286,17 @@ var _ = Describe("validate image name change", func() { }, }, } - Expect(clusterNew.validateImageChange(&clusterOld)).To(BeEmpty()) + Expect(v.validateImageChange(clusterNew, clusterOld)).To(BeEmpty()) }) It("complains on major upgrades", func() { - clusterOld := Cluster{ - Spec: ClusterSpec{ + clusterOld := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ ImageName: "postgres:16.1", }, } - clusterNew := Cluster{ - Spec: ClusterSpec{ - ImageCatalogRef: &ImageCatalogRef{ + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageCatalogRef: &apiv1.ImageCatalogRef{ TypedLocalObjectReference: corev1.TypedLocalObjectReference{ Name: "test", Kind: "ImageCatalog", @@ -1423,15 +1305,15 @@ var _ = Describe("validate image name change", func() { }, }, } - Expect(clusterNew.validateImageChange(&clusterOld)).To(HaveLen(1)) + Expect(v.validateImageChange(clusterNew, clusterOld)).To(HaveLen(1)) }) It("complains going from default imageName to different major imageCatalogRef", func() { - clusterOld := Cluster{ - Spec: ClusterSpec{}, + clusterOld := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{}, } - clusterNew := Cluster{ - Spec: ClusterSpec{ - ImageCatalogRef: &ImageCatalogRef{ + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageCatalogRef: &apiv1.ImageCatalogRef{ TypedLocalObjectReference: corev1.TypedLocalObjectReference{ Name: "test", Kind: "ImageCatalog", @@ -1440,18 +1322,18 @@ var _ = Describe("validate image name change", func() { }, }, } - Expect(clusterNew.validateImageChange(&clusterOld)).To(HaveLen(1)) + Expect(v.validateImageChange(clusterNew, clusterOld)).To(HaveLen(1)) }) It("doesn't complain going from default imageName to same major imageCatalogRef", func() { defaultImageRef := reference.New(versions.DefaultImageName) version, err := pgversion.FromTag(defaultImageRef.Tag) Expect(err).ToNot(HaveOccurred()) - clusterOld := Cluster{ - Spec: ClusterSpec{}, + clusterOld := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{}, } - clusterNew := Cluster{ - Spec: ClusterSpec{ - ImageCatalogRef: &ImageCatalogRef{ + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageCatalogRef: &apiv1.ImageCatalogRef{ TypedLocalObjectReference: corev1.TypedLocalObjectReference{ Name: "test", Kind: "ImageCatalog", @@ -1460,15 +1342,15 @@ var _ = Describe("validate image name change", func() { }, }, } - Expect(clusterNew.validateImageChange(&clusterOld)).To(BeEmpty()) + Expect(v.validateImageChange(clusterNew, clusterOld)).To(BeEmpty()) }) }) Context("changing from imageCatalogRef to imageName", func() { It("doesn't complain when the major is the same", func() { - clusterOld := Cluster{ - Spec: ClusterSpec{ - ImageCatalogRef: &ImageCatalogRef{ + clusterOld := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageCatalogRef: &apiv1.ImageCatalogRef{ TypedLocalObjectReference: corev1.TypedLocalObjectReference{ Name: "test", Kind: "ImageCatalog", @@ -1477,17 +1359,17 @@ var _ = Describe("validate image name change", func() { }, }, } - clusterNew := Cluster{ - Spec: ClusterSpec{ + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ ImageName: "postgres:17.1", }, } - Expect(clusterNew.validateImageChange(&clusterOld)).To(BeEmpty()) + Expect(v.validateImageChange(clusterNew, clusterOld)).To(BeEmpty()) }) It("complains on major upgrades", func() { - clusterOld := Cluster{ - Spec: ClusterSpec{ - ImageCatalogRef: &ImageCatalogRef{ + clusterOld := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageCatalogRef: &apiv1.ImageCatalogRef{ TypedLocalObjectReference: corev1.TypedLocalObjectReference{ Name: "test", Kind: "ImageCatalog", @@ -1496,17 +1378,17 @@ var _ = Describe("validate image name change", func() { }, }, } - clusterNew := Cluster{ - Spec: ClusterSpec{ + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ ImageName: "postgres:17.1", }, } - Expect(clusterNew.validateImageChange(&clusterOld)).To(HaveLen(1)) + Expect(v.validateImageChange(clusterNew, clusterOld)).To(HaveLen(1)) }) It("complains going from imageCatalogRef to different major default imageName", func() { - clusterOld := Cluster{ - Spec: ClusterSpec{ - ImageCatalogRef: &ImageCatalogRef{ + clusterOld := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageCatalogRef: &apiv1.ImageCatalogRef{ TypedLocalObjectReference: corev1.TypedLocalObjectReference{ Name: "test", Kind: "ImageCatalog", @@ -1515,19 +1397,19 @@ var _ = Describe("validate image name change", func() { }, }, } - clusterNew := Cluster{ - Spec: ClusterSpec{}, + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{}, } - Expect(clusterNew.validateImageChange(&clusterOld)).To(HaveLen(1)) + Expect(v.validateImageChange(clusterNew, clusterOld)).To(HaveLen(1)) }) It("doesn't complain going from imageCatalogRef to same major default imageName", func() { imageNameRef := reference.New(versions.DefaultImageName) version, err := pgversion.FromTag(imageNameRef.Tag) Expect(err).ToNot(HaveOccurred()) - clusterOld := Cluster{ - Spec: ClusterSpec{ - ImageCatalogRef: &ImageCatalogRef{ + clusterOld := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageCatalogRef: &apiv1.ImageCatalogRef{ TypedLocalObjectReference: corev1.TypedLocalObjectReference{ Name: "test", Kind: "ImageCatalog", @@ -1536,21 +1418,26 @@ var _ = Describe("validate image name change", func() { }, }, } - clusterNew := Cluster{ - Spec: ClusterSpec{}, + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{}, } - Expect(clusterNew.validateImageChange(&clusterOld)).To(BeEmpty()) + Expect(v.validateImageChange(clusterNew, clusterOld)).To(BeEmpty()) }) }) }) var _ = Describe("recovery target", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + It("is mutually exclusive", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - RecoveryTarget: &RecoveryTarget{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + RecoveryTarget: &apiv1.RecoveryTarget{ TargetTLI: "", TargetXID: "", TargetName: "", @@ -1564,15 +1451,15 @@ var _ = Describe("recovery target", func() { }, } - Expect(cluster.validateRecoveryTarget()).To(HaveLen(1)) + Expect(v.validateRecoveryTarget(cluster)).To(HaveLen(1)) }) It("Requires BackupID to perform PITR with TargetName", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - RecoveryTarget: &RecoveryTarget{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + RecoveryTarget: &apiv1.RecoveryTarget{ BackupID: "20220616T031500", TargetTLI: "", TargetXID: "", @@ -1587,15 +1474,15 @@ var _ = Describe("recovery target", func() { }, } - Expect(cluster.validateRecoveryTarget()).To(BeEmpty()) + Expect(v.validateRecoveryTarget(cluster)).To(BeEmpty()) }) It("Fails when no BackupID is provided to perform PITR with TargetXID", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - RecoveryTarget: &RecoveryTarget{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + RecoveryTarget: &apiv1.RecoveryTarget{ BackupID: "", TargetTLI: "", TargetXID: "1/1", @@ -1610,15 +1497,15 @@ var _ = Describe("recovery target", func() { }, } - Expect(cluster.validateRecoveryTarget()).To(HaveLen(1)) + Expect(v.validateRecoveryTarget(cluster)).To(HaveLen(1)) }) It("TargetTime's format as `YYYY-MM-DD HH24:MI:SS.FF6TZH` is valid", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - RecoveryTarget: &RecoveryTarget{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + RecoveryTarget: &apiv1.RecoveryTarget{ TargetTLI: "", TargetXID: "", TargetName: "", @@ -1632,15 +1519,15 @@ var _ = Describe("recovery target", func() { }, } - Expect(cluster.validateRecoveryTarget()).To(BeEmpty()) + Expect(v.validateRecoveryTarget(cluster)).To(BeEmpty()) }) It("TargetTime's format as YYYY-MM-DD HH24:MI:SS.FF6TZH:TZM` is valid", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - RecoveryTarget: &RecoveryTarget{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + RecoveryTarget: &apiv1.RecoveryTarget{ TargetTLI: "", TargetXID: "", TargetName: "", @@ -1654,15 +1541,15 @@ var _ = Describe("recovery target", func() { }, } - Expect(cluster.validateRecoveryTarget()).To(BeEmpty()) + Expect(v.validateRecoveryTarget(cluster)).To(BeEmpty()) }) It("TargetTime's format as YYYY-MM-DD HH24:MI:SS.FF6 TZH:TZM` is invalid", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - RecoveryTarget: &RecoveryTarget{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + RecoveryTarget: &apiv1.RecoveryTarget{ TargetTLI: "", TargetXID: "", TargetName: "", @@ -1676,15 +1563,15 @@ var _ = Describe("recovery target", func() { }, } - Expect(cluster.validateRecoveryTarget()).To(HaveLen(1)) + Expect(v.validateRecoveryTarget(cluster)).To(HaveLen(1)) }) It("raises errors for invalid LSN", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - RecoveryTarget: &RecoveryTarget{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + RecoveryTarget: &apiv1.RecoveryTarget{ TargetTLI: "", TargetXID: "", TargetName: "", @@ -1698,15 +1585,15 @@ var _ = Describe("recovery target", func() { }, } - Expect(cluster.validateRecoveryTarget()).To(HaveLen(1)) + Expect(v.validateRecoveryTarget(cluster)).To(HaveLen(1)) }) It("valid LSN", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - RecoveryTarget: &RecoveryTarget{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + RecoveryTarget: &apiv1.RecoveryTarget{ TargetTLI: "", TargetXID: "", TargetName: "", @@ -1720,15 +1607,15 @@ var _ = Describe("recovery target", func() { }, } - Expect(cluster.validateRecoveryTarget()).To(BeEmpty()) + Expect(v.validateRecoveryTarget(cluster)).To(BeEmpty()) }) It("can be specified", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - RecoveryTarget: &RecoveryTarget{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + RecoveryTarget: &apiv1.RecoveryTarget{ TargetTime: "2020-01-01 01:01:00", }, }, @@ -1736,230 +1623,245 @@ var _ = Describe("recovery target", func() { }, } - Expect(cluster.validateRecoveryTarget()).To(BeEmpty()) + Expect(v.validateRecoveryTarget(cluster)).To(BeEmpty()) }) When("recoveryTLI is specified", func() { It("allows 'latest'", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - RecoveryTarget: &RecoveryTarget{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + RecoveryTarget: &apiv1.RecoveryTarget{ TargetTLI: "latest", }, }, }, }, } - Expect(cluster.validateRecoveryTarget()).To(BeEmpty()) + Expect(v.validateRecoveryTarget(cluster)).To(BeEmpty()) }) It("allows a positive integer", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - RecoveryTarget: &RecoveryTarget{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + RecoveryTarget: &apiv1.RecoveryTarget{ TargetTLI: "23", }, }, }, }, } - Expect(cluster.validateRecoveryTarget()).To(BeEmpty()) + Expect(v.validateRecoveryTarget(cluster)).To(BeEmpty()) }) It("prevents 0 value", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - RecoveryTarget: &RecoveryTarget{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + RecoveryTarget: &apiv1.RecoveryTarget{ TargetTLI: "0", }, }, }, }, } - Expect(cluster.validateRecoveryTarget()).To(HaveLen(1)) + Expect(v.validateRecoveryTarget(cluster)).To(HaveLen(1)) }) It("prevents negative values", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - RecoveryTarget: &RecoveryTarget{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + RecoveryTarget: &apiv1.RecoveryTarget{ TargetTLI: "-5", }, }, }, }, } - Expect(cluster.validateRecoveryTarget()).To(HaveLen(1)) + Expect(v.validateRecoveryTarget(cluster)).To(HaveLen(1)) }) It("prevents everything else beside the empty string", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - RecoveryTarget: &RecoveryTarget{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + RecoveryTarget: &apiv1.RecoveryTarget{ TargetTLI: "I don't remember", }, }, }, }, } - Expect(cluster.validateRecoveryTarget()).To(HaveLen(1)) + Expect(v.validateRecoveryTarget(cluster)).To(HaveLen(1)) }) }) }) var _ = Describe("primary update strategy", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + It("allows 'unsupervised'", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - PrimaryUpdateStrategy: PrimaryUpdateStrategyUnsupervised, + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PrimaryUpdateStrategy: apiv1.PrimaryUpdateStrategyUnsupervised, Instances: 3, }, } - Expect(cluster.validatePrimaryUpdateStrategy()).To(BeEmpty()) + Expect(v.validatePrimaryUpdateStrategy(cluster)).To(BeEmpty()) }) It("allows 'supervised'", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - PrimaryUpdateStrategy: PrimaryUpdateStrategySupervised, + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PrimaryUpdateStrategy: apiv1.PrimaryUpdateStrategySupervised, Instances: 3, }, } - Expect(cluster.validatePrimaryUpdateStrategy()).To(BeEmpty()) + Expect(v.validatePrimaryUpdateStrategy(cluster)).To(BeEmpty()) }) It("prevents 'supervised' for single-instance clusters", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - PrimaryUpdateStrategy: PrimaryUpdateStrategySupervised, + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PrimaryUpdateStrategy: apiv1.PrimaryUpdateStrategySupervised, Instances: 1, }, } - Expect(cluster.validatePrimaryUpdateStrategy()).ToNot(BeEmpty()) + Expect(v.validatePrimaryUpdateStrategy(cluster)).ToNot(BeEmpty()) }) It("allows 'unsupervised' for single-instance clusters", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - PrimaryUpdateStrategy: PrimaryUpdateStrategyUnsupervised, + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PrimaryUpdateStrategy: apiv1.PrimaryUpdateStrategyUnsupervised, Instances: 1, }, } - Expect(cluster.validatePrimaryUpdateStrategy()).To(BeEmpty()) + Expect(v.validatePrimaryUpdateStrategy(cluster)).To(BeEmpty()) }) It("prevents everything else", func() { - cluster := Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ PrimaryUpdateStrategy: "maybe", Instances: 3, }, } - Expect(cluster.validatePrimaryUpdateStrategy()).ToNot(BeEmpty()) + Expect(v.validatePrimaryUpdateStrategy(cluster)).ToNot(BeEmpty()) }) }) var _ = Describe("Number of synchronous replicas", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + Context("new-style configuration", func() { It("can't have both new-style configuration and legacy one", func() { - cluster := Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ Instances: 3, MinSyncReplicas: 1, MaxSyncReplicas: 2, - PostgresConfiguration: PostgresConfiguration{ - Synchronous: &SynchronousReplicaConfiguration{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Synchronous: &apiv1.SynchronousReplicaConfiguration{ Number: 2, }, }, }, } - Expect(cluster.validateConfiguration()).ToNot(BeEmpty()) + Expect(v.validateConfiguration(cluster)).ToNot(BeEmpty()) }) }) Context("legacy configuration", func() { It("should be a positive integer", func() { - cluster := Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ Instances: 3, MaxSyncReplicas: -3, }, } - Expect(cluster.validateMaxSyncReplicas()).ToNot(BeEmpty()) + Expect(v.validateMaxSyncReplicas(cluster)).ToNot(BeEmpty()) }) It("should not be equal than the number of replicas", func() { - cluster := Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ Instances: 3, MaxSyncReplicas: 3, }, } - Expect(cluster.validateMaxSyncReplicas()).ToNot(BeEmpty()) + Expect(v.validateMaxSyncReplicas(cluster)).ToNot(BeEmpty()) }) It("should not be greater than the number of replicas", func() { - cluster := Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ Instances: 3, MaxSyncReplicas: 5, }, } - Expect(cluster.validateMaxSyncReplicas()).ToNot(BeEmpty()) + Expect(v.validateMaxSyncReplicas(cluster)).ToNot(BeEmpty()) }) It("can be zero", func() { - cluster := Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ Instances: 3, MaxSyncReplicas: 0, }, } - Expect(cluster.validateMaxSyncReplicas()).To(BeEmpty()) + Expect(v.validateMaxSyncReplicas(cluster)).To(BeEmpty()) }) It("can be lower than the number of replicas", func() { - cluster := Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ Instances: 3, MaxSyncReplicas: 2, }, } - Expect(cluster.validateMaxSyncReplicas()).To(BeEmpty()) + Expect(v.validateMaxSyncReplicas(cluster)).To(BeEmpty()) }) }) }) var _ = Describe("validateSynchronousReplicaConfiguration", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + It("returns no error when synchronous configuration is nil", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Synchronous: nil, }, }, } - errors := cluster.validateSynchronousReplicaConfiguration() + errors := v.validateSynchronousReplicaConfiguration(cluster) Expect(errors).To(BeEmpty()) }) It("returns an error when number of synchronous replicas is greater than the total instances and standbys", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ Instances: 2, - PostgresConfiguration: PostgresConfiguration{ - Synchronous: &SynchronousReplicaConfiguration{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Synchronous: &apiv1.SynchronousReplicaConfiguration{ Number: 5, StandbyNamesPost: []string{"standby1"}, StandbyNamesPre: []string{"standby2"}, @@ -1967,7 +1869,7 @@ var _ = Describe("validateSynchronousReplicaConfiguration", func() { }, }, } - errors := cluster.validateSynchronousReplicaConfiguration() + errors := v.validateSynchronousReplicaConfiguration(cluster) Expect(errors).To(HaveLen(1)) Expect(errors[0].Detail).To( Equal("Invalid synchronous configuration: the number of synchronous replicas must be less than the " + @@ -1975,11 +1877,11 @@ var _ = Describe("validateSynchronousReplicaConfiguration", func() { }) It("returns an error when number of synchronous replicas is equal to total instances and standbys", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ Instances: 3, - PostgresConfiguration: PostgresConfiguration{ - Synchronous: &SynchronousReplicaConfiguration{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Synchronous: &apiv1.SynchronousReplicaConfiguration{ Number: 5, StandbyNamesPost: []string{"standby1"}, StandbyNamesPre: []string{"standby2"}, @@ -1987,18 +1889,18 @@ var _ = Describe("validateSynchronousReplicaConfiguration", func() { }, }, } - errors := cluster.validateSynchronousReplicaConfiguration() + errors := v.validateSynchronousReplicaConfiguration(cluster) Expect(errors).To(HaveLen(1)) Expect(errors[0].Detail).To(Equal("Invalid synchronous configuration: the number of synchronous replicas " + "must be less than the total number of instances and the provided standby names.")) }) It("returns no error when number of synchronous replicas is less than total instances and standbys", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ Instances: 2, - PostgresConfiguration: PostgresConfiguration{ - Synchronous: &SynchronousReplicaConfiguration{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Synchronous: &apiv1.SynchronousReplicaConfiguration{ Number: 2, StandbyNamesPost: []string{"standby1"}, StandbyNamesPre: []string{"standby2"}, @@ -2006,37 +1908,42 @@ var _ = Describe("validateSynchronousReplicaConfiguration", func() { }, }, } - errors := cluster.validateSynchronousReplicaConfiguration() + errors := v.validateSynchronousReplicaConfiguration(cluster) Expect(errors).To(BeEmpty()) }) }) var _ = Describe("storage configuration validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + It("complains if the size is being reduced", func() { - clusterOld := Cluster{ - Spec: ClusterSpec{ - StorageConfiguration: StorageConfiguration{ + clusterOld := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + StorageConfiguration: apiv1.StorageConfiguration{ Size: "1G", }, }, } - clusterNew := Cluster{ - Spec: ClusterSpec{ - StorageConfiguration: StorageConfiguration{ + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + StorageConfiguration: apiv1.StorageConfiguration{ Size: "512M", }, }, } - Expect(clusterNew.validateStorageChange(&clusterOld)).ToNot(BeEmpty()) + Expect(v.validateStorageChange(clusterNew, clusterOld)).ToNot(BeEmpty()) }) It("does not complain if nothing has been changed", func() { one := "one" - clusterOld := Cluster{ - Spec: ClusterSpec{ - StorageConfiguration: StorageConfiguration{ + clusterOld := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + StorageConfiguration: apiv1.StorageConfiguration{ Size: "1G", StorageClass: &one, }, @@ -2045,42 +1952,47 @@ var _ = Describe("storage configuration validation", func() { clusterNew := clusterOld.DeepCopy() - Expect(clusterNew.validateStorageChange(&clusterOld)).To(BeEmpty()) + Expect(v.validateStorageChange(clusterNew, clusterOld)).To(BeEmpty()) }) It("works fine is the size is being enlarged", func() { - clusterOld := Cluster{ - Spec: ClusterSpec{ - StorageConfiguration: StorageConfiguration{ + clusterOld := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + StorageConfiguration: apiv1.StorageConfiguration{ Size: "8G", }, }, } - clusterNew := Cluster{ - Spec: ClusterSpec{ - StorageConfiguration: StorageConfiguration{ + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + StorageConfiguration: apiv1.StorageConfiguration{ Size: "10G", }, }, } - Expect(clusterNew.validateStorageChange(&clusterOld)).To(BeEmpty()) + Expect(v.validateStorageChange(clusterNew, clusterOld)).To(BeEmpty()) }) }) var _ = Describe("Cluster name validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + It("should be a valid DNS label", func() { - cluster := Cluster{ + cluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "test.one", }, } - Expect(cluster.validateName()).ToNot(BeEmpty()) + Expect(v.validateName(cluster)).ToNot(BeEmpty()) }) It("should not be too long", func() { - cluster := Cluster{ + cluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "abcdefghi" + "abcdefghi" + @@ -2093,11 +2005,11 @@ var _ = Describe("Cluster name validation", func() { "abcdefghi", }, } - Expect(cluster.validateName()).ToNot(BeEmpty()) + Expect(v.validateName(cluster)).ToNot(BeEmpty()) }) It("should not raise errors when the name is ok", func() { - cluster := Cluster{ + cluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "abcdefghi" + "abcdefghi" + @@ -2105,47 +2017,52 @@ var _ = Describe("Cluster name validation", func() { "abcdefghi", }, } - Expect(cluster.validateName()).To(BeEmpty()) + Expect(v.validateName(cluster)).To(BeEmpty()) }) It("should return errors when the name is not DNS-1035 compliant", func() { - cluster := Cluster{ + cluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "4b96d026-a956-47eb-bae8-a99b840805c3", }, } - Expect(cluster.validateName()).NotTo(BeEmpty()) + Expect(v.validateName(cluster)).NotTo(BeEmpty()) }) It("should return errors when the name length is greater than 50", func() { - cluster := Cluster{ + cluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: strings.Repeat("toomuchlong", 4) + "-" + "after4times", }, } - Expect(cluster.validateName()).NotTo(BeEmpty()) + Expect(v.validateName(cluster)).NotTo(BeEmpty()) }) It("should return errors when having a name with dots", func() { - cluster := Cluster{ + cluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "wrong.name", }, } - Expect(cluster.validateName()).NotTo(BeEmpty()) + Expect(v.validateName(cluster)).NotTo(BeEmpty()) }) }) var _ = Describe("validation of the list of external clusters", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + It("is correct when it's empty", func() { - cluster := Cluster{} - Expect(cluster.validateExternalClusters()).To(BeEmpty()) + cluster := &apiv1.Cluster{} + Expect(v.validateExternalClusters(cluster)).To(BeEmpty()) }) It("complains when the list of clusters contains duplicates", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - ExternalClusters: []ExternalCluster{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ExternalClusters: []apiv1.ExternalCluster{ { Name: "one", ConnectionParameters: map[string]string{ @@ -2161,13 +2078,13 @@ var _ = Describe("validation of the list of external clusters", func() { }, }, } - Expect(cluster.validateExternalClusters()).ToNot(BeEmpty()) + Expect(v.validateExternalClusters(cluster)).ToNot(BeEmpty()) }) It("should not raise errors is the cluster name is unique", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - ExternalClusters: []ExternalCluster{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ExternalClusters: []apiv1.ExternalCluster{ { Name: "one", ConnectionParameters: map[string]string{ @@ -2183,69 +2100,79 @@ var _ = Describe("validation of the list of external clusters", func() { }, }, } - Expect(cluster.validateExternalClusters()).To(BeEmpty()) + Expect(v.validateExternalClusters(cluster)).To(BeEmpty()) }) }) var _ = Describe("validation of an external cluster", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + It("ensure that one of connectionParameters and barmanObjectStore is set", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - ExternalClusters: []ExternalCluster{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ExternalClusters: []apiv1.ExternalCluster{ {}, }, }, } - Expect(cluster.validateExternalClusters()).To(Not(BeEmpty())) + Expect(v.validateExternalClusters(cluster)).To(Not(BeEmpty())) cluster.Spec.ExternalClusters[0].ConnectionParameters = map[string]string{ "dbname": "postgres", } cluster.Spec.ExternalClusters[0].BarmanObjectStore = nil - Expect(cluster.validateExternalClusters()).To(BeEmpty()) + Expect(v.validateExternalClusters(cluster)).To(BeEmpty()) cluster.Spec.ExternalClusters[0].ConnectionParameters = nil - cluster.Spec.ExternalClusters[0].BarmanObjectStore = &BarmanObjectStoreConfiguration{} - Expect(cluster.validateExternalClusters()).To(BeEmpty()) + cluster.Spec.ExternalClusters[0].BarmanObjectStore = &apiv1.BarmanObjectStoreConfiguration{} + Expect(v.validateExternalClusters(cluster)).To(BeEmpty()) }) }) var _ = Describe("bootstrap base backup validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + It("complains if you specify the database name but not the owner for pg_basebackup", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - PgBaseBackup: &BootstrapPgBaseBackup{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + PgBaseBackup: &apiv1.BootstrapPgBaseBackup{ Database: "app", }, }, }, } - result := cluster.validatePgBaseBackupApplicationDatabase() + result := v.validatePgBaseBackupApplicationDatabase(cluster) Expect(result).To(HaveLen(1)) }) It("complains if you specify the owner but not the database name for pg_basebackup", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - PgBaseBackup: &BootstrapPgBaseBackup{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + PgBaseBackup: &apiv1.BootstrapPgBaseBackup{ Owner: "app", }, }, }, } - result := cluster.validatePgBaseBackupApplicationDatabase() + result := v.validatePgBaseBackupApplicationDatabase(cluster) Expect(result).To(HaveLen(1)) }) It("doesn't complain if you specify both database name and owner user for pg_basebackup", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - PgBaseBackup: &BootstrapPgBaseBackup{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + PgBaseBackup: &apiv1.BootstrapPgBaseBackup{ Database: "app", Owner: "app", }, @@ -2253,71 +2180,76 @@ var _ = Describe("bootstrap base backup validation", func() { }, } - result := cluster.validatePgBaseBackupApplicationDatabase() + result := v.validatePgBaseBackupApplicationDatabase(cluster) Expect(result).To(BeEmpty()) }) It("doesn't complain if we are not bootstrapping using pg_basebackup", func() { - recoveryCluster := &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{}, + recoveryCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{}, }, } - result := recoveryCluster.validateBootstrapPgBaseBackupSource() + result := v.validateBootstrapPgBaseBackupSource(recoveryCluster) Expect(result).To(BeEmpty()) }) It("complain when the source cluster doesn't exist", func() { - recoveryCluster := &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - PgBaseBackup: &BootstrapPgBaseBackup{ + recoveryCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + PgBaseBackup: &apiv1.BootstrapPgBaseBackup{ Source: "test", }, }, }, } - result := recoveryCluster.validateBootstrapPgBaseBackupSource() + result := v.validateBootstrapPgBaseBackupSource(recoveryCluster) Expect(result).ToNot(BeEmpty()) }) }) var _ = Describe("bootstrap recovery validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + It("complains if you specify the database name but not the owner for recovery", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ Database: "app", }, }, }, } - result := cluster.validateRecoveryApplicationDatabase() + result := v.validateRecoveryApplicationDatabase(cluster) Expect(result).To(HaveLen(1)) }) It("complains if you specify the owner but not the database name for recovery", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ Owner: "app", }, }, }, } - result := cluster.validateRecoveryApplicationDatabase() + result := v.validateRecoveryApplicationDatabase(cluster) Expect(result).To(HaveLen(1)) }) It("doesn't complain if you specify both database name and owner user for recovery", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ Database: "app", Owner: "app", }, @@ -2325,20 +2257,20 @@ var _ = Describe("bootstrap recovery validation", func() { }, } - result := cluster.validateRecoveryApplicationDatabase() + result := v.validateRecoveryApplicationDatabase(cluster) Expect(result).To(BeEmpty()) }) Context("does not complain when bootstrap recovery source matches one of the names of external clusters", func() { When("using a barman object store configuration", func() { - recoveryCluster := &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ + recoveryCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ Source: "test", }, }, - ExternalClusters: []ExternalCluster{ + ExternalClusters: []apiv1.ExternalCluster{ { Name: "test", BarmanObjectStore: &api.BarmanObjectStoreConfiguration{}, @@ -2346,75 +2278,80 @@ var _ = Describe("bootstrap recovery validation", func() { }, }, } - errorsList := recoveryCluster.validateBootstrapRecoverySource() + errorsList := v.validateBootstrapRecoverySource(recoveryCluster) Expect(errorsList).To(BeEmpty()) }) When("using a plugin configuration", func() { - recoveryCluster := &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ + recoveryCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ Source: "test", }, }, - ExternalClusters: []ExternalCluster{ + ExternalClusters: []apiv1.ExternalCluster{ { Name: "test", - PluginConfiguration: &PluginConfiguration{}, + PluginConfiguration: &apiv1.PluginConfiguration{}, }, }, }, } - errorsList := recoveryCluster.validateBootstrapRecoverySource() + errorsList := v.validateBootstrapRecoverySource(recoveryCluster) Expect(errorsList).To(BeEmpty()) }) }) It("complains when bootstrap recovery source does not match one of the names of external clusters", func() { - recoveryCluster := &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ + recoveryCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ Source: "test", }, }, - ExternalClusters: []ExternalCluster{ + ExternalClusters: []apiv1.ExternalCluster{ { Name: "another-test", }, }, }, } - errorsList := recoveryCluster.validateBootstrapRecoverySource() + errorsList := v.validateBootstrapRecoverySource(recoveryCluster) Expect(errorsList).ToNot(BeEmpty()) }) It("complains when bootstrap recovery source have no BarmanObjectStore nor plugin configuration", func() { - recoveryCluster := &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ + recoveryCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ Source: "test", }, }, - ExternalClusters: []ExternalCluster{ + ExternalClusters: []apiv1.ExternalCluster{ { Name: "test", }, }, }, } - errorsList := recoveryCluster.validateBootstrapRecoverySource() + errorsList := v.validateBootstrapRecoverySource(recoveryCluster) Expect(errorsList).To(HaveLen(1)) }) }) var _ = Describe("toleration validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + It("doesn't complain if we provide a proper toleration", func() { - recoveryCluster := &Cluster{ - Spec: ClusterSpec{ - Affinity: AffinityConfiguration{ + recoveryCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Affinity: apiv1.AffinityConfiguration{ Tolerations: []corev1.Toleration{ { Key: "test", @@ -2425,14 +2362,14 @@ var _ = Describe("toleration validation", func() { }, }, } - result := recoveryCluster.validateTolerations() + result := v.validateTolerations(recoveryCluster) Expect(result).To(BeEmpty()) }) It("complain when the toleration ", func() { - recoveryCluster := &Cluster{ - Spec: ClusterSpec{ - Affinity: AffinityConfiguration{ + recoveryCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Affinity: apiv1.AffinityConfiguration{ Tolerations: []corev1.Toleration{ { Key: "", @@ -2443,111 +2380,119 @@ var _ = Describe("toleration validation", func() { }, }, } - result := recoveryCluster.validateTolerations() + result := v.validateTolerations(recoveryCluster) Expect(result).ToNot(BeEmpty()) }) }) var _ = Describe("validate anti-affinity", func() { - t := true - f := false + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + It("doesn't complain if we provide an empty affinity section", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - Affinity: AffinityConfiguration{}, + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Affinity: apiv1.AffinityConfiguration{}, }, } - result := cluster.validateAntiAffinity() + result := v.validateAntiAffinity(cluster) Expect(result).To(BeEmpty()) }) It("doesn't complain if we provide a proper PodAntiAffinity with anti-affinity enabled", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - Affinity: AffinityConfiguration{ - EnablePodAntiAffinity: &t, + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Affinity: apiv1.AffinityConfiguration{ + EnablePodAntiAffinity: ptr.To(true), PodAntiAffinityType: "required", }, }, } - result := cluster.validateAntiAffinity() + result := v.validateAntiAffinity(cluster) Expect(result).To(BeEmpty()) }) It("doesn't complain if we provide a proper PodAntiAffinity with anti-affinity disabled", func() { - recoveryCluster := &Cluster{ - Spec: ClusterSpec{ - Affinity: AffinityConfiguration{ - EnablePodAntiAffinity: &f, + recoveryCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Affinity: apiv1.AffinityConfiguration{ + EnablePodAntiAffinity: ptr.To(false), PodAntiAffinityType: "required", }, }, } - result := recoveryCluster.validateAntiAffinity() + result := v.validateAntiAffinity(recoveryCluster) Expect(result).To(BeEmpty()) }) It("doesn't complain if we provide a proper PodAntiAffinity with anti-affinity enabled", func() { - recoveryCluster := &Cluster{ - Spec: ClusterSpec{ - Affinity: AffinityConfiguration{ - EnablePodAntiAffinity: &t, + recoveryCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Affinity: apiv1.AffinityConfiguration{ + EnablePodAntiAffinity: ptr.To(true), PodAntiAffinityType: "preferred", }, }, } - result := recoveryCluster.validateAntiAffinity() + result := v.validateAntiAffinity(recoveryCluster) Expect(result).To(BeEmpty()) }) It("doesn't complain if we provide a proper PodAntiAffinity default with anti-affinity enabled", func() { - recoveryCluster := &Cluster{ - Spec: ClusterSpec{ - Affinity: AffinityConfiguration{ - EnablePodAntiAffinity: &t, + recoveryCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Affinity: apiv1.AffinityConfiguration{ + EnablePodAntiAffinity: ptr.To(true), PodAntiAffinityType: "", }, }, } - result := recoveryCluster.validateAntiAffinity() + result := v.validateAntiAffinity(recoveryCluster) Expect(result).To(BeEmpty()) }) It("complains if we provide a wrong PodAntiAffinity with anti-affinity disabled", func() { - recoveryCluster := &Cluster{ - Spec: ClusterSpec{ - Affinity: AffinityConfiguration{ - EnablePodAntiAffinity: &f, + recoveryCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Affinity: apiv1.AffinityConfiguration{ + EnablePodAntiAffinity: ptr.To(false), PodAntiAffinityType: "error", }, }, } - result := recoveryCluster.validateAntiAffinity() + result := v.validateAntiAffinity(recoveryCluster) Expect(result).NotTo(BeEmpty()) }) It("complains if we provide a wrong PodAntiAffinity with anti-affinity enabled", func() { - recoveryCluster := &Cluster{ - Spec: ClusterSpec{ - Affinity: AffinityConfiguration{ - EnablePodAntiAffinity: &t, + recoveryCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Affinity: apiv1.AffinityConfiguration{ + EnablePodAntiAffinity: ptr.To(true), PodAntiAffinityType: "error", }, }, } - result := recoveryCluster.validateAntiAffinity() + result := v.validateAntiAffinity(recoveryCluster) Expect(result).NotTo(BeEmpty()) }) }) var _ = Describe("validation of the list of external clusters", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + It("is correct when it's empty", func() { - cluster := Cluster{} - Expect(cluster.validateExternalClusters()).To(BeEmpty()) + cluster := &apiv1.Cluster{} + Expect(v.validateExternalClusters(cluster)).To(BeEmpty()) }) It("complains when the list of servers contains duplicates", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - ExternalClusters: []ExternalCluster{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ExternalClusters: []apiv1.ExternalCluster{ { Name: "one", ConnectionParameters: map[string]string{}, @@ -2559,13 +2504,13 @@ var _ = Describe("validation of the list of external clusters", func() { }, }, } - Expect(cluster.validateExternalClusters()).ToNot(BeEmpty()) + Expect(v.validateExternalClusters(cluster)).ToNot(BeEmpty()) }) It("should not raise errors is the server name is unique", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - ExternalClusters: []ExternalCluster{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ExternalClusters: []apiv1.ExternalCluster{ { Name: "one", ConnectionParameters: map[string]string{}, @@ -2577,88 +2522,103 @@ var _ = Describe("validation of the list of external clusters", func() { }, }, } - Expect(cluster.validateExternalClusters()).To(BeEmpty()) + Expect(v.validateExternalClusters(cluster)).To(BeEmpty()) }) }) var _ = Describe("bootstrap base backup validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + It("complain when the source cluster doesn't exist", func() { - bootstrap := BootstrapConfiguration{} - bpb := BootstrapPgBaseBackup{Source: "test"} + bootstrap := apiv1.BootstrapConfiguration{} + bpb := apiv1.BootstrapPgBaseBackup{Source: "test"} bootstrap.PgBaseBackup = &bpb - recoveryCluster := &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - PgBaseBackup: &BootstrapPgBaseBackup{ + recoveryCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + PgBaseBackup: &apiv1.BootstrapPgBaseBackup{ Source: "test", }, }, }, } - result := recoveryCluster.validateBootstrapPgBaseBackupSource() + result := v.validateBootstrapPgBaseBackupSource(recoveryCluster) Expect(result).ToNot(BeEmpty()) }) }) var _ = Describe("unix permissions identifiers change validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + It("complains if the PostgresGID is changed", func() { - oldCluster := &Cluster{ - Spec: ClusterSpec{ - PostgresGID: defaultPostgresGID, + oldCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresGID: apiv1.DefaultPostgresGID, }, } - cluster := &Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ PostgresGID: 53, }, } - Expect(cluster.validateUnixPermissionIdentifierChange(oldCluster)).NotTo(BeEmpty()) + Expect(v.validateUnixPermissionIdentifierChange(cluster, oldCluster)).NotTo(BeEmpty()) }) It("complains if the PostgresUID is changed", func() { - oldCluster := &Cluster{ - Spec: ClusterSpec{ - PostgresUID: defaultPostgresUID, + oldCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresUID: apiv1.DefaultPostgresUID, }, } - cluster := &Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ PostgresGID: 74, }, } - Expect(cluster.validateUnixPermissionIdentifierChange(oldCluster)).NotTo(BeEmpty()) + Expect(v.validateUnixPermissionIdentifierChange(cluster, oldCluster)).NotTo(BeEmpty()) }) It("should not complain if the values havn't been changed", func() { - oldCluster := &Cluster{ - Spec: ClusterSpec{ + oldCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ PostgresUID: 74, PostgresGID: 76, }, } - cluster := &Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ PostgresUID: 74, PostgresGID: 76, }, } - Expect(cluster.validateUnixPermissionIdentifierChange(oldCluster)).To(BeEmpty()) + Expect(v.validateUnixPermissionIdentifierChange(cluster, oldCluster)).To(BeEmpty()) }) }) var _ = Describe("promotion token validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + It("complains if the replica token is not formatted in base64", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - ReplicaCluster: &ReplicaClusterConfiguration{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ Enabled: ptr.To(false), Source: "test", PromotionToken: "this-is-a-wrong-token", }, - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{}, + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{}, }, - ExternalClusters: []ExternalCluster{ + ExternalClusters: []apiv1.ExternalCluster{ { Name: "test", }, @@ -2666,22 +2626,22 @@ var _ = Describe("promotion token validation", func() { }, } - result := cluster.validatePromotionToken() + result := v.validatePromotionToken(cluster) Expect(result).ToNot(BeEmpty()) }) It("complains if the replica token is not valid", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - ReplicaCluster: &ReplicaClusterConfiguration{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ Enabled: ptr.To(false), Source: "test", PromotionToken: base64.StdEncoding.EncodeToString([]byte("{}")), }, - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{}, + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{}, }, - ExternalClusters: []ExternalCluster{ + ExternalClusters: []apiv1.ExternalCluster{ { Name: "test", }, @@ -2689,7 +2649,7 @@ var _ = Describe("promotion token validation", func() { }, } - result := cluster.validatePromotionToken() + result := v.validatePromotionToken(cluster) Expect(result).ToNot(BeEmpty()) }) @@ -2705,17 +2665,17 @@ var _ = Describe("promotion token validation", func() { jsonToken, err := json.Marshal(tokenContent) Expect(err).ToNot(HaveOccurred()) - cluster := &Cluster{ - Spec: ClusterSpec{ - ReplicaCluster: &ReplicaClusterConfiguration{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ Enabled: ptr.To(false), Source: "test", PromotionToken: base64.StdEncoding.EncodeToString(jsonToken), }, - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{}, + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{}, }, - ExternalClusters: []ExternalCluster{ + ExternalClusters: []apiv1.ExternalCluster{ { Name: "test", }, @@ -2723,7 +2683,7 @@ var _ = Describe("promotion token validation", func() { }, } - result := cluster.validatePromotionToken() + result := v.validatePromotionToken(cluster) Expect(result).To(BeEmpty()) }) @@ -2739,9 +2699,9 @@ var _ = Describe("promotion token validation", func() { jsonToken, err := json.Marshal(tokenContent) Expect(err).ToNot(HaveOccurred()) - cluster := &Cluster{ - Spec: ClusterSpec{ - ReplicaCluster: &ReplicaClusterConfiguration{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ Enabled: ptr.To(true), Source: "test", PromotionToken: base64.StdEncoding.EncodeToString(jsonToken), @@ -2749,7 +2709,7 @@ var _ = Describe("promotion token validation", func() { }, } - result := cluster.validatePromotionToken() + result := v.validatePromotionToken(cluster) Expect(result).NotTo(BeEmpty()) }) @@ -2765,12 +2725,12 @@ var _ = Describe("promotion token validation", func() { jsonToken, err := json.Marshal(tokenContent) Expect(err).ToNot(HaveOccurred()) - cluster := &Cluster{ + cluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "test2", }, - Spec: ClusterSpec{ - ReplicaCluster: &ReplicaClusterConfiguration{ + Spec: apiv1.ClusterSpec{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ Primary: "test", Source: "test", PromotionToken: base64.StdEncoding.EncodeToString(jsonToken), @@ -2778,7 +2738,7 @@ var _ = Describe("promotion token validation", func() { }, } - result := cluster.validatePromotionToken() + result := v.validatePromotionToken(cluster) Expect(result).NotTo(BeEmpty()) }) @@ -2794,9 +2754,9 @@ var _ = Describe("promotion token validation", func() { jsonToken, err := json.Marshal(tokenContent) Expect(err).ToNot(HaveOccurred()) - cluster := &Cluster{ - Spec: ClusterSpec{ - ReplicaCluster: &ReplicaClusterConfiguration{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ Primary: "test", Self: "test2", Source: "test", @@ -2805,7 +2765,7 @@ var _ = Describe("promotion token validation", func() { }, } - result := cluster.validatePromotionToken() + result := v.validatePromotionToken(cluster) Expect(result).NotTo(BeEmpty()) }) @@ -2821,9 +2781,9 @@ var _ = Describe("promotion token validation", func() { jsonToken, err := json.Marshal(tokenContent) Expect(err).ToNot(HaveOccurred()) - cluster := &Cluster{ - Spec: ClusterSpec{ - ReplicaCluster: &ReplicaClusterConfiguration{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ Primary: "test", Self: "test", Source: "test", @@ -2835,270 +2795,280 @@ var _ = Describe("promotion token validation", func() { }, } - result := cluster.validatePromotionToken() + result := v.validatePromotionToken(cluster) Expect(result).NotTo(BeEmpty()) }) }) var _ = Describe("replica mode validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + It("complains if the bootstrap method is not specified", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - ReplicaCluster: &ReplicaClusterConfiguration{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ Enabled: ptr.To(true), Source: "test", }, - ExternalClusters: []ExternalCluster{ + ExternalClusters: []apiv1.ExternalCluster{ { Name: "test", }, }, }, } - Expect(cluster.validateReplicaMode()).ToNot(BeEmpty()) + Expect(v.validateReplicaMode(cluster)).ToNot(BeEmpty()) }) It("complains if the initdb bootstrap method is used", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - ReplicaCluster: &ReplicaClusterConfiguration{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ Enabled: ptr.To(true), Source: "test", }, - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{}, + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{}, }, - ExternalClusters: []ExternalCluster{ + ExternalClusters: []apiv1.ExternalCluster{ { Name: "test", }, }, }, } - Expect(cluster.validateReplicaMode()).ToNot(BeEmpty()) + Expect(v.validateReplicaMode(cluster)).ToNot(BeEmpty()) }) It("doesn't complain about initdb if we enable the external cluster on an existing cluster", func() { - cluster := &Cluster{ + cluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ ResourceVersion: "existing", }, - Spec: ClusterSpec{ - ReplicaCluster: &ReplicaClusterConfiguration{ + Spec: apiv1.ClusterSpec{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ Enabled: ptr.To(true), Source: "test", }, - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{}, + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{}, }, - ExternalClusters: []ExternalCluster{ + ExternalClusters: []apiv1.ExternalCluster{ { Name: "test", }, }, }, } - result := cluster.validateReplicaMode() + result := v.validateReplicaMode(cluster) Expect(result).To(BeEmpty()) }) It("should complain if enabled is set to off during a transition", func() { - old := &Cluster{ + old := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ ResourceVersion: "existing", }, - Spec: ClusterSpec{ - ReplicaCluster: &ReplicaClusterConfiguration{ + Spec: apiv1.ClusterSpec{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ Enabled: ptr.To(true), Source: "test", }, - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{}, + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{}, }, - ExternalClusters: []ExternalCluster{ + ExternalClusters: []apiv1.ExternalCluster{ { Name: "test", }, }, }, - Status: ClusterStatus{ - SwitchReplicaClusterStatus: SwitchReplicaClusterStatus{ + Status: apiv1.ClusterStatus{ + SwitchReplicaClusterStatus: apiv1.SwitchReplicaClusterStatus{ InProgress: true, }, }, } - cluster := &Cluster{ + cluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ ResourceVersion: "existing", }, - Spec: ClusterSpec{ - ReplicaCluster: &ReplicaClusterConfiguration{ + Spec: apiv1.ClusterSpec{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ Enabled: ptr.To(false), Source: "test", }, - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{}, + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{}, }, - ExternalClusters: []ExternalCluster{ + ExternalClusters: []apiv1.ExternalCluster{ { Name: "test", }, }, }, - Status: ClusterStatus{ - SwitchReplicaClusterStatus: SwitchReplicaClusterStatus{ + Status: apiv1.ClusterStatus{ + SwitchReplicaClusterStatus: apiv1.SwitchReplicaClusterStatus{ InProgress: true, }, }, } - result := cluster.validateReplicaClusterChange(old) + result := v.validateReplicaClusterChange(cluster, old) Expect(result).To(HaveLen(1)) Expect(result[0].Type).To(Equal(field.ErrorTypeForbidden)) Expect(result[0].Field).To(Equal("spec.replica.enabled")) }) It("is valid when the pg_basebackup bootstrap option is used", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - ReplicaCluster: &ReplicaClusterConfiguration{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ Enabled: ptr.To(true), Source: "test", }, - Bootstrap: &BootstrapConfiguration{ - PgBaseBackup: &BootstrapPgBaseBackup{}, + Bootstrap: &apiv1.BootstrapConfiguration{ + PgBaseBackup: &apiv1.BootstrapPgBaseBackup{}, }, - ExternalClusters: []ExternalCluster{ + ExternalClusters: []apiv1.ExternalCluster{ { Name: "test", }, }, }, } - result := cluster.validateReplicaMode() + result := v.validateReplicaMode(cluster) Expect(result).To(BeEmpty()) }) It("is valid when the restore bootstrap option is used", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - ReplicaCluster: &ReplicaClusterConfiguration{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ Enabled: ptr.To(true), Source: "test", }, - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{}, + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{}, }, - ExternalClusters: []ExternalCluster{ + ExternalClusters: []apiv1.ExternalCluster{ { Name: "test", }, }, }, } - result := cluster.validateReplicaMode() + result := v.validateReplicaMode(cluster) Expect(result).To(BeEmpty()) }) It("complains when the primary field is used with the enabled field", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - ReplicaCluster: &ReplicaClusterConfiguration{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ Enabled: ptr.To(true), Primary: "toast", Source: "test", }, - Bootstrap: &BootstrapConfiguration{ - PgBaseBackup: &BootstrapPgBaseBackup{}, + Bootstrap: &apiv1.BootstrapConfiguration{ + PgBaseBackup: &apiv1.BootstrapPgBaseBackup{}, }, - ExternalClusters: []ExternalCluster{}, + ExternalClusters: []apiv1.ExternalCluster{}, }, } - result := cluster.validateReplicaMode() + result := v.validateReplicaMode(cluster) Expect(result).ToNot(BeEmpty()) }) It("doesn't complain when the enabled field is not specified", func() { - cluster := &Cluster{ + cluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "test-2", }, - Spec: ClusterSpec{ - ReplicaCluster: &ReplicaClusterConfiguration{ + Spec: apiv1.ClusterSpec{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ Primary: "test", Source: "test", }, - Bootstrap: &BootstrapConfiguration{ - PgBaseBackup: &BootstrapPgBaseBackup{}, + Bootstrap: &apiv1.BootstrapConfiguration{ + PgBaseBackup: &apiv1.BootstrapPgBaseBackup{}, }, - ExternalClusters: []ExternalCluster{ + ExternalClusters: []apiv1.ExternalCluster{ { Name: "test", }, }, }, } - result := cluster.validateReplicaMode() + result := v.validateReplicaMode(cluster) Expect(result).To(BeEmpty()) }) It("doesn't complain when creating a new primary cluster with the replication stanza set", func() { - cluster := &Cluster{ + cluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "test", }, - Spec: ClusterSpec{ - ReplicaCluster: &ReplicaClusterConfiguration{ + Spec: apiv1.ClusterSpec{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ Primary: "test", Source: "test", }, - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{}, + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{}, }, - ExternalClusters: []ExternalCluster{ + ExternalClusters: []apiv1.ExternalCluster{ { Name: "test", }, }, }, } - result := cluster.validateReplicaMode() + result := v.validateReplicaMode(cluster) Expect(result).To(BeEmpty()) }) }) var _ = Describe("validate the replica cluster external clusters", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + It("complains when the external cluster doesn't exist (source)", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - ReplicaCluster: &ReplicaClusterConfiguration{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ Enabled: ptr.To(true), Source: "test", }, - Bootstrap: &BootstrapConfiguration{ - PgBaseBackup: &BootstrapPgBaseBackup{}, + Bootstrap: &apiv1.BootstrapConfiguration{ + PgBaseBackup: &apiv1.BootstrapPgBaseBackup{}, }, - ExternalClusters: []ExternalCluster{}, + ExternalClusters: []apiv1.ExternalCluster{}, }, } cluster.Spec.Bootstrap.PgBaseBackup = nil - result := cluster.validateReplicaClusterExternalClusters() + result := v.validateReplicaClusterExternalClusters(cluster) Expect(result).ToNot(BeEmpty()) }) It("complains when the external cluster doesn't exist (primary)", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - ReplicaCluster: &ReplicaClusterConfiguration{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ Primary: "test2", Source: "test", }, - Bootstrap: &BootstrapConfiguration{ - PgBaseBackup: &BootstrapPgBaseBackup{}, + Bootstrap: &apiv1.BootstrapConfiguration{ + PgBaseBackup: &apiv1.BootstrapPgBaseBackup{}, }, - ExternalClusters: []ExternalCluster{ + ExternalClusters: []apiv1.ExternalCluster{ { Name: "test", }, @@ -3106,22 +3076,22 @@ var _ = Describe("validate the replica cluster external clusters", func() { }, } - result := cluster.validateReplicaClusterExternalClusters() + result := v.validateReplicaClusterExternalClusters(cluster) Expect(result).ToNot(BeEmpty()) }) It("complains when the external cluster doesn't exist (self)", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - ReplicaCluster: &ReplicaClusterConfiguration{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ReplicaCluster: &apiv1.ReplicaClusterConfiguration{ Self: "test2", Primary: "test", Source: "test", }, - Bootstrap: &BootstrapConfiguration{ - PgBaseBackup: &BootstrapPgBaseBackup{}, + Bootstrap: &apiv1.BootstrapConfiguration{ + PgBaseBackup: &apiv1.BootstrapPgBaseBackup{}, }, - ExternalClusters: []ExternalCluster{ + ExternalClusters: []apiv1.ExternalCluster{ { Name: "test", }, @@ -3129,155 +3099,98 @@ var _ = Describe("validate the replica cluster external clusters", func() { }, } - result := cluster.validateReplicaClusterExternalClusters() + result := v.validateReplicaClusterExternalClusters(cluster) Expect(result).ToNot(BeEmpty()) }) }) var _ = Describe("Validation changes", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + It("doesn't complain if given old cluster is nil", func() { - newCluster := &Cluster{} - err := newCluster.ValidateChanges(nil) + newCluster := &apiv1.Cluster{} + err := v.validateClusterChanges(newCluster, nil) Expect(err).To(BeNil()) }) }) var _ = Describe("Backup validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + It("complain if there's no credentials", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - Backup: &BackupConfiguration{ - BarmanObjectStore: &BarmanObjectStoreConfiguration{}, + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Backup: &apiv1.BackupConfiguration{ + BarmanObjectStore: &apiv1.BarmanObjectStoreConfiguration{}, }, }, } - err := cluster.validateBackupConfiguration() + err := v.validateBackupConfiguration(cluster) Expect(err).To(HaveLen(1)) }) }) var _ = Describe("Backup retention policy validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + It("doesn't complain if given policy is not provided", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - Backup: &BackupConfiguration{}, + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Backup: &apiv1.BackupConfiguration{}, }, } - err := cluster.validateRetentionPolicy() + err := v.validateRetentionPolicy(cluster) Expect(err).To(BeEmpty()) }) It("doesn't complain if given policy is valid", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - Backup: &BackupConfiguration{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Backup: &apiv1.BackupConfiguration{ RetentionPolicy: "90d", }, }, } - err := cluster.validateRetentionPolicy() + err := v.validateRetentionPolicy(cluster) Expect(err).To(BeEmpty()) }) It("complain if a given policy is not valid", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - Backup: &BackupConfiguration{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Backup: &apiv1.BackupConfiguration{ RetentionPolicy: "09", }, }, } - err := cluster.validateRetentionPolicy() + err := v.validateRetentionPolicy(cluster) Expect(err).To(HaveLen(1)) }) }) -var _ = Describe("Default monitoring queries", func() { - It("correctly set the default monitoring queries configmap and secret when none is already specified", func() { - cluster := &Cluster{} - cluster.defaultMonitoringQueries(&configuration.Data{ - MonitoringQueriesSecret: "test-secret", - MonitoringQueriesConfigmap: "test-configmap", - }) - Expect(cluster.Spec.Monitoring).NotTo(BeNil()) - Expect(cluster.Spec.Monitoring.CustomQueriesConfigMap).NotTo(BeEmpty()) - Expect(cluster.Spec.Monitoring.CustomQueriesConfigMap). - To(ContainElement(ConfigMapKeySelector{ - LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringConfigMapName}, - Key: DefaultMonitoringKey, - })) - Expect(cluster.Spec.Monitoring.CustomQueriesSecret).NotTo(BeEmpty()) - Expect(cluster.Spec.Monitoring.CustomQueriesSecret). - To(ContainElement(SecretKeySelector{ - LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringSecretName}, - Key: DefaultMonitoringKey, - })) - }) - testCluster := &Cluster{Spec: ClusterSpec{Monitoring: &MonitoringConfiguration{ - CustomQueriesConfigMap: []ConfigMapKeySelector{ - { - LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringConfigMapName}, - Key: "test2", - }, - }, - CustomQueriesSecret: []SecretKeySelector{ - { - LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringConfigMapName}, - Key: "test3", - }, - }, - }}} - It("correctly set the default monitoring queries configmap when other metrics are already specified", func() { - modifiedCluster := testCluster.DeepCopy() - modifiedCluster.defaultMonitoringQueries(&configuration.Data{ - MonitoringQueriesConfigmap: "test-configmap", - }) - - Expect(modifiedCluster.Spec.Monitoring).NotTo(BeNil()) - Expect(modifiedCluster.Spec.Monitoring.CustomQueriesConfigMap).NotTo(BeEmpty()) - Expect(modifiedCluster.Spec.Monitoring.CustomQueriesSecret).NotTo(BeEmpty()) - Expect(modifiedCluster.Spec.Monitoring.CustomQueriesConfigMap). - To(ContainElement(ConfigMapKeySelector{ - LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringConfigMapName}, - Key: "test2", - })) - - Expect(modifiedCluster.Spec.Monitoring.CustomQueriesSecret). - To(BeEquivalentTo(testCluster.Spec.Monitoring.CustomQueriesSecret)) - Expect(modifiedCluster.Spec.Monitoring.CustomQueriesConfigMap). - To(ContainElements(testCluster.Spec.Monitoring.CustomQueriesConfigMap)) - }) - It("correctly set the default monitoring queries secret when other metrics are already specified", func() { - modifiedCluster := testCluster.DeepCopy() - modifiedCluster.defaultMonitoringQueries(&configuration.Data{ - MonitoringQueriesSecret: "test-secret", - }) - - Expect(modifiedCluster.Spec.Monitoring).NotTo(BeNil()) - Expect(modifiedCluster.Spec.Monitoring.CustomQueriesSecret).NotTo(BeEmpty()) - Expect(modifiedCluster.Spec.Monitoring.CustomQueriesConfigMap).NotTo(BeEmpty()) - Expect(modifiedCluster.Spec.Monitoring.CustomQueriesSecret). - To(ContainElement(SecretKeySelector{ - LocalObjectReference: LocalObjectReference{Name: DefaultMonitoringSecretName}, - Key: "test3", - })) - - Expect(modifiedCluster.Spec.Monitoring.CustomQueriesConfigMap). - To(BeEquivalentTo(testCluster.Spec.Monitoring.CustomQueriesConfigMap)) - Expect(modifiedCluster.Spec.Monitoring.CustomQueriesSecret). - To(ContainElements(testCluster.Spec.Monitoring.CustomQueriesSecret)) +var _ = Describe("validation of imports", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} }) -}) -var _ = Describe("validation of imports", func() { It("rejects unrecognized import type", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{ Database: "app", Owner: "app", - Import: &Import{ + Import: &apiv1.Import{ Type: "fooBar", }, }, @@ -3285,19 +3198,19 @@ var _ = Describe("validation of imports", func() { }, } - result := cluster.validateImport() + result := v.validateImport(cluster) Expect(result).To(HaveLen(1)) }) It("rejects microservice import with roles", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{ Database: "app", Owner: "app", - Import: &Import{ - Type: MicroserviceSnapshotType, + Import: &apiv1.Import{ + Type: apiv1.MicroserviceSnapshotType, Databases: []string{"foo"}, Roles: []string{"bar"}, }, @@ -3306,19 +3219,19 @@ var _ = Describe("validation of imports", func() { }, } - result := cluster.validateImport() + result := v.validateImport(cluster) Expect(result).To(HaveLen(1)) }) It("rejects microservice import without exactly one database", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{ Database: "app", Owner: "app", - Import: &Import{ - Type: MicroserviceSnapshotType, + Import: &apiv1.Import{ + Type: apiv1.MicroserviceSnapshotType, Databases: []string{"foo", "bar"}, }, }, @@ -3326,19 +3239,19 @@ var _ = Describe("validation of imports", func() { }, } - result := cluster.validateImport() + result := v.validateImport(cluster) Expect(result).To(HaveLen(1)) }) It("rejects microservice import with a wildcard on the database name", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{ Database: "app", Owner: "app", - Import: &Import{ - Type: MicroserviceSnapshotType, + Import: &apiv1.Import{ + Type: apiv1.MicroserviceSnapshotType, Databases: []string{"*foo"}, }, }, @@ -3346,19 +3259,19 @@ var _ = Describe("validation of imports", func() { }, } - result := cluster.validateImport() + result := v.validateImport(cluster) Expect(result).To(HaveLen(1)) }) It("accepts microservice import when well specified", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{ Database: "app", Owner: "app", - Import: &Import{ - Type: MicroserviceSnapshotType, + Import: &apiv1.Import{ + Type: apiv1.MicroserviceSnapshotType, Databases: []string{"foo"}, }, }, @@ -3366,19 +3279,19 @@ var _ = Describe("validation of imports", func() { }, } - result := cluster.validateImport() + result := v.validateImport(cluster) Expect(result).To(BeEmpty()) }) It("rejects monolith import with no databases", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{ Database: "app", Owner: "app", - Import: &Import{ - Type: MonolithSnapshotType, + Import: &apiv1.Import{ + Type: apiv1.MonolithSnapshotType, Databases: []string{}, }, }, @@ -3386,19 +3299,19 @@ var _ = Describe("validation of imports", func() { }, } - result := cluster.validateImport() + result := v.validateImport(cluster) Expect(result).To(HaveLen(1)) }) It("rejects monolith import with PostImport Application SQL", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{ Database: "app", Owner: "app", - Import: &Import{ - Type: MonolithSnapshotType, + Import: &apiv1.Import{ + Type: apiv1.MonolithSnapshotType, Databases: []string{"foo"}, PostImportApplicationSQL: []string{"select * from bar"}, }, @@ -3407,19 +3320,19 @@ var _ = Describe("validation of imports", func() { }, } - result := cluster.validateImport() + result := v.validateImport(cluster) Expect(result).To(HaveLen(1)) }) It("rejects monolith import with wildcards alongside specific values", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{ Database: "app", Owner: "app", - Import: &Import{ - Type: MonolithSnapshotType, + Import: &apiv1.Import{ + Type: apiv1.MonolithSnapshotType, Databases: []string{"bar", "*"}, }, }, @@ -3427,17 +3340,17 @@ var _ = Describe("validation of imports", func() { }, } - result := cluster.validateImport() + result := v.validateImport(cluster) Expect(result).To(HaveLen(1)) - cluster = &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ + cluster = &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{ Database: "app", Owner: "app", - Import: &Import{ - Type: MonolithSnapshotType, + Import: &apiv1.Import{ + Type: apiv1.MonolithSnapshotType, Databases: []string{"foo"}, Roles: []string{"baz", "*"}, }, @@ -3446,19 +3359,19 @@ var _ = Describe("validation of imports", func() { }, } - result = cluster.validateImport() + result = v.validateImport(cluster) Expect(result).To(HaveLen(1)) }) It("accepts monolith import with proper values", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{ Database: "app", Owner: "app", - Import: &Import{ - Type: MonolithSnapshotType, + Import: &apiv1.Import{ + Type: apiv1.MonolithSnapshotType, Databases: []string{"foo"}, }, }, @@ -3466,19 +3379,19 @@ var _ = Describe("validation of imports", func() { }, } - result := cluster.validateImport() + result := v.validateImport(cluster) Expect(result).To(BeEmpty()) }) It("accepts monolith import with wildcards", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - InitDB: &BootstrapInitDB{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{ Database: "app", Owner: "app", - Import: &Import{ - Type: MonolithSnapshotType, + Import: &apiv1.Import{ + Type: apiv1.MonolithSnapshotType, Databases: []string{"*"}, Roles: []string{"*"}, }, @@ -3487,18 +3400,23 @@ var _ = Describe("validation of imports", func() { }, } - result := cluster.validateImport() + result := v.validateImport(cluster) Expect(result).To(BeEmpty()) }) }) var _ = Describe("validation of replication slots configuration", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + It("can be enabled on the default PostgreSQL image", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ ImageName: versions.DefaultImageName, - ReplicationSlots: &ReplicationSlotsConfiguration{ - HighAvailability: &ReplicationSlotsHAConfiguration{ + ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{ + HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{ Enabled: ptr.To(true), }, UpdateInterval: 0, @@ -3507,13 +3425,13 @@ var _ = Describe("validation of replication slots configuration", func() { } cluster.Default() - result := cluster.validateReplicationSlots() + result := v.validateReplicationSlots(cluster) Expect(result).To(BeEmpty()) }) It("set replicationSlots by default", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ ImageName: versions.DefaultImageName, }, } @@ -3522,15 +3440,15 @@ var _ = Describe("validation of replication slots configuration", func() { Expect(cluster.Spec.ReplicationSlots.HighAvailability).ToNot(BeNil()) Expect(cluster.Spec.ReplicationSlots.HighAvailability.Enabled).To(HaveValue(BeTrue())) - result := cluster.validateReplicationSlots() + result := v.validateReplicationSlots(cluster) Expect(result).To(BeEmpty()) }) It("set replicationSlots.highAvailability by default", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ ImageName: versions.DefaultImageName, - ReplicationSlots: &ReplicationSlotsConfiguration{ + ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{ UpdateInterval: 30, }, }, @@ -3539,16 +3457,16 @@ var _ = Describe("validation of replication slots configuration", func() { Expect(cluster.Spec.ReplicationSlots.HighAvailability).ToNot(BeNil()) Expect(cluster.Spec.ReplicationSlots.HighAvailability.Enabled).To(HaveValue(BeTrue())) - result := cluster.validateReplicationSlots() + result := v.validateReplicationSlots(cluster) Expect(result).To(BeEmpty()) }) It("allows enabling replication slots on the fly", func() { - oldCluster := &Cluster{ - Spec: ClusterSpec{ + oldCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ ImageName: versions.DefaultImageName, - ReplicationSlots: &ReplicationSlotsConfiguration{ - HighAvailability: &ReplicationSlotsHAConfiguration{ + ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{ + HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{ Enabled: ptr.To(false), }, }, @@ -3557,22 +3475,22 @@ var _ = Describe("validation of replication slots configuration", func() { oldCluster.Default() newCluster := oldCluster.DeepCopy() - newCluster.Spec.ReplicationSlots = &ReplicationSlotsConfiguration{ - HighAvailability: &ReplicationSlotsHAConfiguration{ + newCluster.Spec.ReplicationSlots = &apiv1.ReplicationSlotsConfiguration{ + HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{ Enabled: ptr.To(true), SlotPrefix: "_test_", }, } - Expect(newCluster.validateReplicationSlotsChange(oldCluster)).To(BeEmpty()) + Expect(v.validateReplicationSlotsChange(newCluster, oldCluster)).To(BeEmpty()) }) It("prevents changing the slot prefix while replication slots are enabled", func() { - oldCluster := &Cluster{ - Spec: ClusterSpec{ + oldCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ ImageName: versions.DefaultImageName, - ReplicationSlots: &ReplicationSlotsConfiguration{ - HighAvailability: &ReplicationSlotsHAConfiguration{ + ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{ + HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{ Enabled: ptr.To(true), SlotPrefix: "_test_", }, @@ -3583,15 +3501,15 @@ var _ = Describe("validation of replication slots configuration", func() { newCluster := oldCluster.DeepCopy() newCluster.Spec.ReplicationSlots.HighAvailability.SlotPrefix = "_toast_" - Expect(newCluster.validateReplicationSlotsChange(oldCluster)).To(HaveLen(1)) + Expect(v.validateReplicationSlotsChange(newCluster, oldCluster)).To(HaveLen(1)) }) It("prevents removing the replication slot section when replication slots are enabled", func() { - oldCluster := &Cluster{ - Spec: ClusterSpec{ + oldCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ ImageName: versions.DefaultImageName, - ReplicationSlots: &ReplicationSlotsConfiguration{ - HighAvailability: &ReplicationSlotsHAConfiguration{ + ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{ + HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{ Enabled: ptr.To(true), SlotPrefix: "_test_", }, @@ -3602,15 +3520,15 @@ var _ = Describe("validation of replication slots configuration", func() { newCluster := oldCluster.DeepCopy() newCluster.Spec.ReplicationSlots = nil - Expect(newCluster.validateReplicationSlotsChange(oldCluster)).To(HaveLen(1)) + Expect(v.validateReplicationSlotsChange(newCluster, oldCluster)).To(HaveLen(1)) }) It("allows disabling the replication slots", func() { - oldCluster := &Cluster{ - Spec: ClusterSpec{ + oldCluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ ImageName: versions.DefaultImageName, - ReplicationSlots: &ReplicationSlotsConfiguration{ - HighAvailability: &ReplicationSlotsHAConfiguration{ + ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{ + HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{ Enabled: ptr.To(true), SlotPrefix: "_test_", }, @@ -3621,55 +3539,60 @@ var _ = Describe("validation of replication slots configuration", func() { newCluster := oldCluster.DeepCopy() newCluster.Spec.ReplicationSlots.HighAvailability.Enabled = ptr.To(false) - Expect(newCluster.validateReplicationSlotsChange(oldCluster)).To(BeEmpty()) + Expect(v.validateReplicationSlotsChange(newCluster, oldCluster)).To(BeEmpty()) }) It("should return an error when SynchronizeReplicasConfiguration is not nil and has invalid regex", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ ImageName: versions.DefaultImageName, - ReplicationSlots: &ReplicationSlotsConfiguration{ - SynchronizeReplicas: &SynchronizeReplicasConfiguration{ + ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{ + SynchronizeReplicas: &apiv1.SynchronizeReplicasConfiguration{ ExcludePatterns: []string{"([a-zA-Z]+"}, }, }, }, } - errors := cluster.validateReplicationSlots() + errors := v.validateReplicationSlots(cluster) Expect(errors).To(HaveLen(1)) Expect(errors[0].Detail).To(Equal("Cannot configure synchronizeReplicas. Invalid regexes were found")) }) It("should not return an error when SynchronizeReplicasConfiguration is not nil and regex is valid", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ ImageName: versions.DefaultImageName, - ReplicationSlots: &ReplicationSlotsConfiguration{ - SynchronizeReplicas: &SynchronizeReplicasConfiguration{ + ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{ + SynchronizeReplicas: &apiv1.SynchronizeReplicasConfiguration{ ExcludePatterns: []string{"validpattern"}, }, }, }, } - errors := cluster.validateReplicationSlots() + errors := v.validateReplicationSlots(cluster) Expect(errors).To(BeEmpty()) }) It("should not return an error when SynchronizeReplicasConfiguration is nil", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ ImageName: versions.DefaultImageName, - ReplicationSlots: &ReplicationSlotsConfiguration{ + ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{ SynchronizeReplicas: nil, }, }, } - errors := cluster.validateReplicationSlots() + errors := v.validateReplicationSlots(cluster) Expect(errors).To(BeEmpty()) }) }) var _ = Describe("Environment variables validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + When("an environment variable is given", func() { It("detects if it is valid", func() { Expect(isReservedEnvironmentVariable("PGDATA")).To(BeTrue()) @@ -3682,8 +3605,8 @@ var _ = Describe("Environment variables validation", func() { When("a ClusterSpec is given", func() { It("detects if the environment variable list is correct", func() { - cluster := Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ Env: []corev1.EnvVar{ { Name: "TZ", @@ -3693,12 +3616,12 @@ var _ = Describe("Environment variables validation", func() { }, } - Expect(cluster.validateEnv()).To(BeEmpty()) + Expect(v.validateEnv(cluster)).To(BeEmpty()) }) It("detects if the environment variable list contains a reserved variable", func() { - cluster := Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ Env: []corev1.EnvVar{ { Name: "TZ", @@ -3712,37 +3635,42 @@ var _ = Describe("Environment variables validation", func() { }, } - Expect(cluster.validateEnv()).To(HaveLen(1)) + Expect(v.validateEnv(cluster)).To(HaveLen(1)) }) }) }) var _ = Describe("Storage configuration validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + When("a ClusterSpec is given", func() { It("produces one error if storage is not set at all", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - StorageConfiguration: StorageConfiguration{}, + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + StorageConfiguration: apiv1.StorageConfiguration{}, }, } - Expect(cluster.validateStorageSize()).To(HaveLen(1)) + Expect(v.validateStorageSize(cluster)).To(HaveLen(1)) }) It("succeeds if storage size is set", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - StorageConfiguration: StorageConfiguration{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + StorageConfiguration: apiv1.StorageConfiguration{ Size: "1G", }, }, } - Expect(cluster.validateStorageSize()).To(BeEmpty()) + Expect(v.validateStorageSize(cluster)).To(BeEmpty()) }) It("succeeds if storage is not set but a pvc template specifies storage", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - StorageConfiguration: StorageConfiguration{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + StorageConfiguration: apiv1.StorageConfiguration{ PersistentVolumeClaimTemplate: &corev1.PersistentVolumeClaimSpec{ Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{"storage": resource.MustParse("1Gi")}, @@ -3751,80 +3679,90 @@ var _ = Describe("Storage configuration validation", func() { }, }, } - Expect(cluster.validateStorageSize()).To(BeEmpty()) + Expect(v.validateStorageSize(cluster)).To(BeEmpty()) }) }) }) var _ = Describe("Ephemeral volume configuration validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + It("succeeds if no ephemeral configuration is present", func() { - cluster := Cluster{ - Spec: ClusterSpec{}, + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{}, } - Expect(cluster.validateEphemeralVolumeSource()).To(BeEmpty()) + Expect(v.validateEphemeralVolumeSource(cluster)).To(BeEmpty()) }) It("succeeds if ephemeralVolumeSource is set", func() { - cluster := Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ EphemeralVolumeSource: &corev1.EphemeralVolumeSource{}, }, } - Expect(cluster.validateEphemeralVolumeSource()).To(BeEmpty()) + Expect(v.validateEphemeralVolumeSource(cluster)).To(BeEmpty()) }) It("succeeds if ephemeralVolumesSizeLimit.temporaryData is set", func() { onegi := resource.MustParse("1Gi") - cluster := Cluster{ - Spec: ClusterSpec{ - EphemeralVolumesSizeLimit: &EphemeralVolumesSizeLimitConfiguration{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + EphemeralVolumesSizeLimit: &apiv1.EphemeralVolumesSizeLimitConfiguration{ TemporaryData: &onegi, }, }, } - Expect(cluster.validateEphemeralVolumeSource()).To(BeEmpty()) + Expect(v.validateEphemeralVolumeSource(cluster)).To(BeEmpty()) }) It("succeeds if ephemeralVolumeSource and ephemeralVolumesSizeLimit.shm are set", func() { onegi := resource.MustParse("1Gi") - cluster := Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ EphemeralVolumeSource: &corev1.EphemeralVolumeSource{}, - EphemeralVolumesSizeLimit: &EphemeralVolumesSizeLimitConfiguration{ + EphemeralVolumesSizeLimit: &apiv1.EphemeralVolumesSizeLimitConfiguration{ Shm: &onegi, }, }, } - Expect(cluster.validateEphemeralVolumeSource()).To(BeEmpty()) + Expect(v.validateEphemeralVolumeSource(cluster)).To(BeEmpty()) }) It("produces one error if conflicting ephemeral storage options are set", func() { onegi := resource.MustParse("1Gi") - cluster := Cluster{ - Spec: ClusterSpec{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ EphemeralVolumeSource: &corev1.EphemeralVolumeSource{}, - EphemeralVolumesSizeLimit: &EphemeralVolumesSizeLimitConfiguration{ + EphemeralVolumesSizeLimit: &apiv1.EphemeralVolumesSizeLimitConfiguration{ TemporaryData: &onegi, }, }, } - Expect(cluster.validateEphemeralVolumeSource()).To(HaveLen(1)) + Expect(v.validateEphemeralVolumeSource(cluster)).To(HaveLen(1)) }) }) var _ = Describe("Role management validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + It("should succeed if there is no management stanza", func() { - cluster := Cluster{ - Spec: ClusterSpec{}, + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{}, } - Expect(cluster.validateManagedRoles()).To(BeEmpty()) + Expect(v.validateManagedRoles(cluster)).To(BeEmpty()) }) It("should succeed if the role defined is not reserved", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Managed: &ManagedConfiguration{ - Roles: []RoleConfiguration{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Managed: &apiv1.ManagedConfiguration{ + Roles: []apiv1.RoleConfiguration{ { Name: "non-conflicting", }, @@ -3832,14 +3770,14 @@ var _ = Describe("Role management validation", func() { }, }, } - Expect(cluster.validateManagedRoles()).To(BeEmpty()) + Expect(v.validateManagedRoles(cluster)).To(BeEmpty()) }) It("should produce an error on invalid connection limit", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Managed: &ManagedConfiguration{ - Roles: []RoleConfiguration{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Managed: &apiv1.ManagedConfiguration{ + Roles: []apiv1.RoleConfiguration{ { Name: "non-conflicting", ConnectionLimit: -3, @@ -3848,14 +3786,14 @@ var _ = Describe("Role management validation", func() { }, }, } - Expect(cluster.validateManagedRoles()).To(HaveLen(1)) + Expect(v.validateManagedRoles(cluster)).To(HaveLen(1)) }) It("should produce an error if the role is reserved", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Managed: &ManagedConfiguration{ - Roles: []RoleConfiguration{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Managed: &apiv1.ManagedConfiguration{ + Roles: []apiv1.RoleConfiguration{ { Name: "postgres", }, @@ -3863,14 +3801,14 @@ var _ = Describe("Role management validation", func() { }, }, } - Expect(cluster.validateManagedRoles()).To(HaveLen(1)) + Expect(v.validateManagedRoles(cluster)).To(HaveLen(1)) }) It("should produce two errors if the role is reserved and the connection limit is invalid", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Managed: &ManagedConfiguration{ - Roles: []RoleConfiguration{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Managed: &apiv1.ManagedConfiguration{ + Roles: []apiv1.RoleConfiguration{ { Name: "postgres", ConnectionLimit: -3, @@ -3879,14 +3817,14 @@ var _ = Describe("Role management validation", func() { }, }, } - Expect(cluster.validateManagedRoles()).To(HaveLen(2)) + Expect(v.validateManagedRoles(cluster)).To(HaveLen(2)) }) It("should produce an error if we define two roles with the same name", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Managed: &ManagedConfiguration{ - Roles: []RoleConfiguration{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Managed: &apiv1.ManagedConfiguration{ + Roles: []apiv1.RoleConfiguration{ { Name: "my_test", ConnectionLimit: -1, @@ -3901,19 +3839,19 @@ var _ = Describe("Role management validation", func() { }, }, } - Expect(cluster.validateManagedRoles()).To(HaveLen(1)) + Expect(v.validateManagedRoles(cluster)).To(HaveLen(1)) }) It("should produce an error if we have a password secret AND DisablePassword in a role", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Managed: &ManagedConfiguration{ - Roles: []RoleConfiguration{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Managed: &apiv1.ManagedConfiguration{ + Roles: []apiv1.RoleConfiguration{ { Name: "my_test", Superuser: true, BypassRLS: true, DisablePassword: true, - PasswordSecret: &LocalObjectReference{ + PasswordSecret: &apiv1.LocalObjectReference{ Name: "myPassword", }, ConnectionLimit: -1, @@ -3922,27 +3860,32 @@ var _ = Describe("Role management validation", func() { }, }, } - Expect(cluster.validateManagedRoles()).To(HaveLen(1)) + Expect(v.validateManagedRoles(cluster)).To(HaveLen(1)) }) }) var _ = Describe("Managed Extensions validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + It("should succeed if no extension is enabled", func() { - cluster := Cluster{ - Spec: ClusterSpec{}, + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{}, } - Expect(cluster.validateManagedExtensions()).To(BeEmpty()) + Expect(v.validateManagedExtensions(cluster)).To(BeEmpty()) }) It("should fail if hot_standby_feedback is set to an invalid value", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - ReplicationSlots: &ReplicationSlotsConfiguration{ - HighAvailability: &ReplicationSlotsHAConfiguration{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{ + HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{ Enabled: ptr.To(true), }, }, - PostgresConfiguration: PostgresConfiguration{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "hot_standby_feedback": "foo", "pg_failover_slots.synchronize_slot_names": "my_slot", @@ -3950,18 +3893,18 @@ var _ = Describe("Managed Extensions validation", func() { }, }, } - Expect(cluster.validatePgFailoverSlots()).To(HaveLen(2)) + Expect(v.validatePgFailoverSlots(cluster)).To(HaveLen(2)) }) It("should succeed if pg_failover_slots and its prerequisites are enabled", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - ReplicationSlots: &ReplicationSlotsConfiguration{ - HighAvailability: &ReplicationSlotsHAConfiguration{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{ + HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{ Enabled: ptr.To(true), }, }, - PostgresConfiguration: PostgresConfiguration{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "hot_standby_feedback": "on", "pg_failover_slots.synchronize_slot_names": "my_slot", @@ -3969,26 +3912,26 @@ var _ = Describe("Managed Extensions validation", func() { }, }, } - Expect(cluster.validatePgFailoverSlots()).To(BeEmpty()) + Expect(v.validatePgFailoverSlots(cluster)).To(BeEmpty()) }) It("should produce two errors if pg_failover_slots is enabled and its prerequisites are disabled", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "pg_failover_slots.synchronize_slot_names": "my_slot", }, }, }, } - Expect(cluster.validatePgFailoverSlots()).To(HaveLen(2)) + Expect(v.validatePgFailoverSlots(cluster)).To(HaveLen(2)) }) It("should produce an error if pg_failover_slots is enabled and HA slots are disabled", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "hot_standby_feedback": "yes", "pg_failover_slots.synchronize_slot_names": "my_slot", @@ -3996,64 +3939,69 @@ var _ = Describe("Managed Extensions validation", func() { }, }, } - Expect(cluster.validatePgFailoverSlots()).To(HaveLen(1)) + Expect(v.validatePgFailoverSlots(cluster)).To(HaveLen(1)) }) It("should produce an error if pg_failover_slots is enabled and hot_standby_feedback is disabled", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - ReplicationSlots: &ReplicationSlotsConfiguration{ - HighAvailability: &ReplicationSlotsHAConfiguration{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{ + HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{ Enabled: ptr.To(true), }, }, - PostgresConfiguration: PostgresConfiguration{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{ "pg_failover_slots.synchronize_slot_names": "my_slot", }, }, }, } - Expect(cluster.validatePgFailoverSlots()).To(HaveLen(1)) + Expect(v.validatePgFailoverSlots(cluster)).To(HaveLen(1)) }) }) var _ = Describe("Recovery from volume snapshot validation", func() { - clusterFromRecovery := func(recovery *BootstrapRecovery) *Cluster { - return &Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + clusterFromRecovery := func(recovery *apiv1.BootstrapRecovery) *apiv1.Cluster { + return &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ Recovery: recovery, }, - WalStorage: &StorageConfiguration{}, + WalStorage: &apiv1.StorageConfiguration{}, }, } } It("should produce an error when defining two recovery sources at the same time", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ Source: "sourceName", - Backup: &BackupSource{}, - VolumeSnapshots: &DataSource{}, + Backup: &apiv1.BackupSource{}, + VolumeSnapshots: &apiv1.DataSource{}, }, }, }, } - Expect(cluster.validateBootstrapRecoveryDataSource()).To(HaveLen(1)) + Expect(v.validateBootstrapRecoveryDataSource(cluster)).To(HaveLen(1)) }) It("should produce an error when defining a backupID while recovering using a DataSource", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - RecoveryTarget: &RecoveryTarget{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + RecoveryTarget: &apiv1.RecoveryTarget{ BackupID: "20220616T031500", }, - VolumeSnapshots: &DataSource{ + VolumeSnapshots: &apiv1.DataSource{ Storage: corev1.TypedLocalObjectReference{ APIGroup: ptr.To(""), Kind: "PersistentVolumeClaim", @@ -4064,15 +4012,15 @@ var _ = Describe("Recovery from volume snapshot validation", func() { }, }, } - Expect(cluster.validateBootstrapRecoveryDataSource()).To(HaveLen(1)) + Expect(v.validateBootstrapRecoveryDataSource(cluster)).To(HaveLen(1)) }) It("should produce an error when asking to recovery WALs from a snapshot without having storage for it", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - VolumeSnapshots: &DataSource{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + VolumeSnapshots: &apiv1.DataSource{ Storage: corev1.TypedLocalObjectReference{ APIGroup: ptr.To(storagesnapshotv1.GroupName), Kind: "VolumeSnapshot", @@ -4088,15 +4036,15 @@ var _ = Describe("Recovery from volume snapshot validation", func() { }, }, } - Expect(cluster.validateBootstrapRecoveryDataSource()).To(HaveLen(1)) + Expect(v.validateBootstrapRecoveryDataSource(cluster)).To(HaveLen(1)) }) It("should not produce an error when the configuration is sound", func() { - cluster := Cluster{ - Spec: ClusterSpec{ - Bootstrap: &BootstrapConfiguration{ - Recovery: &BootstrapRecovery{ - VolumeSnapshots: &DataSource{ + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Bootstrap: &apiv1.BootstrapConfiguration{ + Recovery: &apiv1.BootstrapRecovery{ + VolumeSnapshots: &apiv1.DataSource{ Storage: corev1.TypedLocalObjectReference{ APIGroup: ptr.To(storagesnapshotv1.GroupName), Kind: "VolumeSnapshot", @@ -4110,49 +4058,49 @@ var _ = Describe("Recovery from volume snapshot validation", func() { }, }, }, - WalStorage: &StorageConfiguration{}, + WalStorage: &apiv1.StorageConfiguration{}, }, } - Expect(cluster.validateBootstrapRecoveryDataSource()).To(BeEmpty()) + Expect(v.validateBootstrapRecoveryDataSource(cluster)).To(BeEmpty()) }) It("accepts recovery from a VolumeSnapshot", func() { - cluster := clusterFromRecovery(&BootstrapRecovery{ - VolumeSnapshots: &DataSource{ + cluster := clusterFromRecovery(&apiv1.BootstrapRecovery{ + VolumeSnapshots: &apiv1.DataSource{ Storage: corev1.TypedLocalObjectReference{ APIGroup: ptr.To(storagesnapshotv1.GroupName), - Kind: VolumeSnapshotKind, + Kind: apiv1.VolumeSnapshotKind, Name: "pgdata", }, WalStorage: &corev1.TypedLocalObjectReference{ APIGroup: ptr.To(storagesnapshotv1.GroupName), - Kind: VolumeSnapshotKind, + Kind: apiv1.VolumeSnapshotKind, Name: "pgwal", }, }, }) - Expect(cluster.validateBootstrapRecoveryDataSource()).To(BeEmpty()) + Expect(v.validateBootstrapRecoveryDataSource(cluster)).To(BeEmpty()) }) It("accepts recovery from a VolumeSnapshot, while restoring WALs from an object store", func() { - cluster := clusterFromRecovery(&BootstrapRecovery{ - VolumeSnapshots: &DataSource{ + cluster := clusterFromRecovery(&apiv1.BootstrapRecovery{ + VolumeSnapshots: &apiv1.DataSource{ Storage: corev1.TypedLocalObjectReference{ APIGroup: ptr.To(storagesnapshotv1.GroupName), - Kind: VolumeSnapshotKind, + Kind: apiv1.VolumeSnapshotKind, Name: "pgdata", }, }, Source: "pg-cluster", }) - Expect(cluster.validateBootstrapRecoveryDataSource()).To(BeEmpty()) + Expect(v.validateBootstrapRecoveryDataSource(cluster)).To(BeEmpty()) }) When("using an nil apiGroup", func() { It("accepts recovery from a PersistentVolumeClaim", func() { - cluster := clusterFromRecovery(&BootstrapRecovery{ - VolumeSnapshots: &DataSource{ + cluster := clusterFromRecovery(&apiv1.BootstrapRecovery{ + VolumeSnapshots: &apiv1.DataSource{ Storage: corev1.TypedLocalObjectReference{ APIGroup: ptr.To(storagesnapshotv1.GroupName), Kind: "VolumeSnapshot", @@ -4165,14 +4113,14 @@ var _ = Describe("Recovery from volume snapshot validation", func() { }, }, }) - Expect(cluster.validateBootstrapRecoveryDataSource()).To(BeEmpty()) + Expect(v.validateBootstrapRecoveryDataSource(cluster)).To(BeEmpty()) }) }) When("using an empty apiGroup", func() { It("accepts recovery from a PersistentVolumeClaim", func() { - cluster := clusterFromRecovery(&BootstrapRecovery{ - VolumeSnapshots: &DataSource{ + cluster := clusterFromRecovery(&apiv1.BootstrapRecovery{ + VolumeSnapshots: &apiv1.DataSource{ Storage: corev1.TypedLocalObjectReference{ APIGroup: ptr.To(storagesnapshotv1.GroupName), Kind: "VolumeSnapshot", @@ -4185,13 +4133,13 @@ var _ = Describe("Recovery from volume snapshot validation", func() { }, }, }) - Expect(cluster.validateBootstrapRecoveryDataSource()).To(BeEmpty()) + Expect(v.validateBootstrapRecoveryDataSource(cluster)).To(BeEmpty()) }) }) It("prevent recovery from other Objects", func() { - cluster := clusterFromRecovery(&BootstrapRecovery{ - VolumeSnapshots: &DataSource{ + cluster := clusterFromRecovery(&apiv1.BootstrapRecovery{ + VolumeSnapshots: &apiv1.DataSource{ Storage: corev1.TypedLocalObjectReference{ APIGroup: ptr.To(""), Kind: "Secret", @@ -4204,17 +4152,18 @@ var _ = Describe("Recovery from volume snapshot validation", func() { }, }, }) - Expect(cluster.validateBootstrapRecoveryDataSource()).To(HaveLen(2)) + Expect(v.validateBootstrapRecoveryDataSource(cluster)).To(HaveLen(2)) }) }) var _ = Describe("validateResources", func() { - var cluster *Cluster + var cluster *apiv1.Cluster + var v *ClusterCustomValidator BeforeEach(func() { - cluster = &Cluster{ - Spec: ClusterSpec{ - PostgresConfiguration: PostgresConfiguration{ + cluster = &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ Parameters: map[string]string{}, }, Resources: corev1.ResourceRequirements{ @@ -4223,13 +4172,14 @@ var _ = Describe("validateResources", func() { }, }, } + v = &ClusterCustomValidator{} }) It("returns an error when the CPU request is greater than CPU limit", func() { cluster.Spec.Resources.Requests["cpu"] = resource.MustParse("2") cluster.Spec.Resources.Limits["cpu"] = resource.MustParse("1") - errors := cluster.validateResources() + errors := v.validateResources(cluster) Expect(errors).To(HaveLen(1)) Expect(errors[0].Detail).To(Equal("CPU request is greater than the limit")) }) @@ -4238,7 +4188,7 @@ var _ = Describe("validateResources", func() { cluster.Spec.Resources.Requests["memory"] = resource.MustParse("2Gi") cluster.Spec.Resources.Limits["memory"] = resource.MustParse("1Gi") - errors := cluster.validateResources() + errors := v.validateResources(cluster) Expect(errors).To(HaveLen(1)) Expect(errors[0].Detail).To(Equal("Memory request is greater than the limit")) }) @@ -4247,7 +4197,7 @@ var _ = Describe("validateResources", func() { cluster.Spec.Resources.Requests["ephemeral-storage"] = resource.MustParse("1") cluster.Spec.Resources.Limits["ephemeral-storage"] = resource.MustParse("1") - errors := cluster.validateResources() + errors := v.validateResources(cluster) Expect(errors).To(BeEmpty()) }) @@ -4255,7 +4205,7 @@ var _ = Describe("validateResources", func() { cluster.Spec.Resources.Requests["ephemeral-storage"] = resource.MustParse("2") cluster.Spec.Resources.Limits["ephemeral-storage"] = resource.MustParse("1") - errors := cluster.validateResources() + errors := v.validateResources(cluster) Expect(errors).To(HaveLen(1)) Expect(errors[0].Detail).To(Equal("Ephemeral storage request is greater than the limit")) }) @@ -4268,7 +4218,7 @@ var _ = Describe("validateResources", func() { cluster.Spec.Resources.Requests["ephemeral-storage"] = resource.MustParse("2") cluster.Spec.Resources.Limits["ephemeral-storage"] = resource.MustParse("1") - errors := cluster.validateResources() + errors := v.validateResources(cluster) Expect(errors).To(HaveLen(3)) Expect(errors[0].Detail).To(Equal("CPU request is greater than the limit")) Expect(errors[1].Detail).To(Equal("Memory request is greater than the limit")) @@ -4281,7 +4231,7 @@ var _ = Describe("validateResources", func() { cluster.Spec.Resources.Requests["memory"] = resource.MustParse("2Gi") cluster.Spec.Resources.Limits["memory"] = resource.MustParse("1Gi") - errors := cluster.validateResources() + errors := v.validateResources(cluster) Expect(errors).To(HaveLen(2)) Expect(errors[0].Detail).To(Equal("CPU request is greater than the limit")) Expect(errors[1].Detail).To(Equal("Memory request is greater than the limit")) @@ -4293,38 +4243,38 @@ var _ = Describe("validateResources", func() { cluster.Spec.Resources.Requests["memory"] = resource.MustParse("1Gi") cluster.Spec.Resources.Limits["memory"] = resource.MustParse("2Gi") - errors := cluster.validateResources() + errors := v.validateResources(cluster) Expect(errors).To(BeEmpty()) }) It("returns no errors when CPU request is set but limit is nil", func() { cluster.Spec.Resources.Requests["cpu"] = resource.MustParse("1") - errors := cluster.validateResources() + errors := v.validateResources(cluster) Expect(errors).To(BeEmpty()) }) It("returns no errors when CPU limit is set but request is nil", func() { cluster.Spec.Resources.Limits["cpu"] = resource.MustParse("1") - errors := cluster.validateResources() + errors := v.validateResources(cluster) Expect(errors).To(BeEmpty()) }) It("returns no errors when Memory request is set but limit is nil", func() { cluster.Spec.Resources.Requests["memory"] = resource.MustParse("1Gi") - errors := cluster.validateResources() + errors := v.validateResources(cluster) Expect(errors).To(BeEmpty()) }) It("returns no errors when Memory limit is set but request is nil", func() { cluster.Spec.Resources.Limits["memory"] = resource.MustParse("1Gi") - errors := cluster.validateResources() + errors := v.validateResources(cluster) Expect(errors).To(BeEmpty()) }) It("returns an error when memoryRequest is less than shared_buffers in kB", func() { cluster.Spec.Resources.Requests["memory"] = resource.MustParse("1Gi") cluster.Spec.PostgresConfiguration.Parameters["shared_buffers"] = "2000000kB" - errors := cluster.validateResources() + errors := v.validateResources(cluster) Expect(errors).To(HaveLen(1)) Expect(errors[0].Detail).To(Equal("Memory request is lower than PostgreSQL `shared_buffers` value")) }) @@ -4332,7 +4282,7 @@ var _ = Describe("validateResources", func() { It("returns an error when memoryRequest is less than shared_buffers in MB", func() { cluster.Spec.Resources.Requests["memory"] = resource.MustParse("1000Mi") cluster.Spec.PostgresConfiguration.Parameters["shared_buffers"] = "2000MB" - errors := cluster.validateResources() + errors := v.validateResources(cluster) Expect(errors).To(HaveLen(1)) Expect(errors[0].Detail).To(Equal("Memory request is lower than PostgreSQL `shared_buffers` value")) }) @@ -4340,176 +4290,181 @@ var _ = Describe("validateResources", func() { It("returns no errors when memoryRequest is greater than or equal to shared_buffers in GB", func() { cluster.Spec.Resources.Requests["memory"] = resource.MustParse("2Gi") cluster.Spec.PostgresConfiguration.Parameters["shared_buffers"] = "1GB" - errors := cluster.validateResources() + errors := v.validateResources(cluster) Expect(errors).To(BeEmpty()) }) It("returns no errors when shared_buffers is in a format that can't be parsed", func() { cluster.Spec.Resources.Requests["memory"] = resource.MustParse("1Gi") cluster.Spec.PostgresConfiguration.Parameters["shared_buffers"] = "invalid_value" - errors := cluster.validateResources() + errors := v.validateResources(cluster) Expect(errors).To(BeEmpty()) }) }) var _ = Describe("Tablespaces validation", func() { - createFakeTemporaryTbsConf := func(name string) TablespaceConfiguration { - return TablespaceConfiguration{ + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + createFakeTemporaryTbsConf := func(name string) apiv1.TablespaceConfiguration { + return apiv1.TablespaceConfiguration{ Name: name, - Storage: StorageConfiguration{ + Storage: apiv1.StorageConfiguration{ Size: "10Gi", }, } } It("should succeed if there is no tablespaces section", func() { - cluster := Cluster{ + cluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", }, - Spec: ClusterSpec{ + Spec: apiv1.ClusterSpec{ Instances: 3, - StorageConfiguration: StorageConfiguration{ + StorageConfiguration: apiv1.StorageConfiguration{ Size: "10Gi", }, }, } - Expect(cluster.Validate()).To(BeEmpty()) + Expect(v.validate(cluster)).To(BeEmpty()) }) It("should succeed if the tablespaces are ok", func() { - cluster := &Cluster{ + cluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", }, - Spec: ClusterSpec{ + Spec: apiv1.ClusterSpec{ Instances: 3, - StorageConfiguration: StorageConfiguration{ + StorageConfiguration: apiv1.StorageConfiguration{ Size: "10Gi", }, - Tablespaces: []TablespaceConfiguration{ + Tablespaces: []apiv1.TablespaceConfiguration{ createFakeTemporaryTbsConf("my_tablespace"), }, }, } - Expect(cluster.Validate()).To(BeEmpty()) + Expect(v.validate(cluster)).To(BeEmpty()) }) It("should produce an error if the tablespace name is too long", func() { - cluster := &Cluster{ + cluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", }, - Spec: ClusterSpec{ + Spec: apiv1.ClusterSpec{ Instances: 3, - StorageConfiguration: StorageConfiguration{ + StorageConfiguration: apiv1.StorageConfiguration{ Size: "10Gi", }, - Tablespaces: []TablespaceConfiguration{ + Tablespaces: []apiv1.TablespaceConfiguration{ // each repetition is 14 char long, so 5x14 = 70 char > postgres limit createFakeTemporaryTbsConf("my_tablespace1my_tablespace2my_tablespace3my_tablespace4my_tablespace5"), }, }, } - Expect(cluster.Validate()).To(HaveLen(1)) + Expect(v.validate(cluster)).To(HaveLen(1)) }) It("should produce an error if the tablespace name is reserved by Postgres", func() { - cluster := &Cluster{ + cluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", }, - Spec: ClusterSpec{ + Spec: apiv1.ClusterSpec{ Instances: 3, - StorageConfiguration: StorageConfiguration{ + StorageConfiguration: apiv1.StorageConfiguration{ Size: "10Gi", }, - Tablespaces: []TablespaceConfiguration{ + Tablespaces: []apiv1.TablespaceConfiguration{ createFakeTemporaryTbsConf("pg_foo"), }, }, } - Expect(cluster.Validate()).To(HaveLen(1)) + Expect(v.validate(cluster)).To(HaveLen(1)) }) It("should produce an error if the tablespace name is not valid", func() { - cluster := &Cluster{ + cluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", }, - Spec: ClusterSpec{ + Spec: apiv1.ClusterSpec{ Instances: 3, - StorageConfiguration: StorageConfiguration{ + StorageConfiguration: apiv1.StorageConfiguration{ Size: "10Gi", }, - Tablespaces: []TablespaceConfiguration{ + Tablespaces: []apiv1.TablespaceConfiguration{ // each repetition is 14 char long, so 5x14 = 70 char > postgres limit createFakeTemporaryTbsConf("my-^&sdf;"), }, }, } - Expect(cluster.Validate()).To(HaveLen(1)) + Expect(v.validate(cluster)).To(HaveLen(1)) }) It("should produce an error if there are duplicate tablespaces", func() { - cluster := &Cluster{ + cluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", }, - Spec: ClusterSpec{ + Spec: apiv1.ClusterSpec{ Instances: 3, - StorageConfiguration: StorageConfiguration{ + StorageConfiguration: apiv1.StorageConfiguration{ Size: "10Gi", }, - Tablespaces: []TablespaceConfiguration{ + Tablespaces: []apiv1.TablespaceConfiguration{ createFakeTemporaryTbsConf("my_tablespace"), createFakeTemporaryTbsConf("my_TAblespace"), createFakeTemporaryTbsConf("another"), }, }, } - Expect(cluster.Validate()).To(HaveLen(1)) + Expect(v.validate(cluster)).To(HaveLen(1)) }) It("should produce an error if the storage configured for the tablespace is invalid", func() { - cluster := &Cluster{ + cluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", }, - Spec: ClusterSpec{ + Spec: apiv1.ClusterSpec{ Instances: 3, - StorageConfiguration: StorageConfiguration{ + StorageConfiguration: apiv1.StorageConfiguration{ Size: "10Gi", }, - Tablespaces: []TablespaceConfiguration{ + Tablespaces: []apiv1.TablespaceConfiguration{ // each repetition is 14 char long, so 5x14 = 70 char > postgres limit { Name: "my_tablespace1", - Storage: StorageConfiguration{ + Storage: apiv1.StorageConfiguration{ Size: "10Gibberish", }, }, }, }, } - Expect(cluster.Validate()).To(HaveLen(1)) + Expect(v.validate(cluster)).To(HaveLen(1)) }) It("should produce two errors if two tablespaces have errors", func() { - cluster := &Cluster{ + cluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", }, - Spec: ClusterSpec{ + Spec: apiv1.ClusterSpec{ Instances: 3, - StorageConfiguration: StorageConfiguration{ + StorageConfiguration: apiv1.StorageConfiguration{ Size: "10Gi", }, - Tablespaces: []TablespaceConfiguration{ + Tablespaces: []apiv1.TablespaceConfiguration{ // each repetition is 14 char long, so 5x14 = 70 char > postgres limit { Name: "my_tablespace1", - Storage: StorageConfiguration{ + Storage: apiv1.StorageConfiguration{ Size: "10Gibberish", }, }, @@ -4518,125 +4473,125 @@ var _ = Describe("Tablespaces validation", func() { }, }, } - Expect(cluster.Validate()).To(HaveLen(2)) + Expect(v.validate(cluster)).To(HaveLen(2)) }) It("should produce an error if the tablespaces section is deleted", func() { - oldCluster := &Cluster{ + oldCluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", }, - Spec: ClusterSpec{ + Spec: apiv1.ClusterSpec{ Instances: 3, - StorageConfiguration: StorageConfiguration{ + StorageConfiguration: apiv1.StorageConfiguration{ Size: "10Gi", }, - Tablespaces: []TablespaceConfiguration{ + Tablespaces: []apiv1.TablespaceConfiguration{ createFakeTemporaryTbsConf("my-tablespace1"), }, }, } - cluster := &Cluster{ + cluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", }, - Spec: ClusterSpec{ + Spec: apiv1.ClusterSpec{ Instances: 3, }, } - Expect(cluster.ValidateChanges(oldCluster)).To(HaveLen(1)) + Expect(v.validateClusterChanges(cluster, oldCluster)).To(HaveLen(1)) }) It("should produce an error if a tablespace is deleted", func() { - oldCluster := &Cluster{ + oldCluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", }, - Spec: ClusterSpec{ + Spec: apiv1.ClusterSpec{ Instances: 3, - StorageConfiguration: StorageConfiguration{ + StorageConfiguration: apiv1.StorageConfiguration{ Size: "10Gi", }, - Tablespaces: []TablespaceConfiguration{ + Tablespaces: []apiv1.TablespaceConfiguration{ createFakeTemporaryTbsConf("my-tablespace1"), createFakeTemporaryTbsConf("my-tablespace2"), }, }, } - cluster := &Cluster{ + cluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", }, - Spec: ClusterSpec{ + Spec: apiv1.ClusterSpec{ Instances: 3, - StorageConfiguration: StorageConfiguration{ + StorageConfiguration: apiv1.StorageConfiguration{ Size: "10Gi", }, - Tablespaces: []TablespaceConfiguration{ + Tablespaces: []apiv1.TablespaceConfiguration{ createFakeTemporaryTbsConf("my-tablespace1"), }, }, } - Expect(cluster.ValidateChanges(oldCluster)).To(HaveLen(1)) + Expect(v.validateClusterChanges(cluster, oldCluster)).To(HaveLen(1)) }) It("should produce an error if a tablespace is reduced in size", func() { - oldCluster := &Cluster{ + oldCluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", }, - Spec: ClusterSpec{ + Spec: apiv1.ClusterSpec{ Instances: 3, - StorageConfiguration: StorageConfiguration{ + StorageConfiguration: apiv1.StorageConfiguration{ Size: "10Gi", }, - Tablespaces: []TablespaceConfiguration{ + Tablespaces: []apiv1.TablespaceConfiguration{ createFakeTemporaryTbsConf("my-tablespace1"), }, }, } - cluster := &Cluster{ + cluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", }, - Spec: ClusterSpec{ + Spec: apiv1.ClusterSpec{ Instances: 3, - StorageConfiguration: StorageConfiguration{ + StorageConfiguration: apiv1.StorageConfiguration{ Size: "10Gi", }, - Tablespaces: []TablespaceConfiguration{ + Tablespaces: []apiv1.TablespaceConfiguration{ { Name: "my-tablespace1", - Storage: StorageConfiguration{ + Storage: apiv1.StorageConfiguration{ Size: "9Gi", }, }, }, }, } - Expect(cluster.ValidateChanges(oldCluster)).To(HaveLen(1)) + Expect(v.validateClusterChanges(cluster, oldCluster)).To(HaveLen(1)) }) It("should not complain when the backup section refers to a tbs that is defined", func() { - cluster := &Cluster{ + cluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", }, - Spec: ClusterSpec{ + Spec: apiv1.ClusterSpec{ Instances: 3, - StorageConfiguration: StorageConfiguration{ + StorageConfiguration: apiv1.StorageConfiguration{ Size: "10Gi", }, - Tablespaces: []TablespaceConfiguration{ + Tablespaces: []apiv1.TablespaceConfiguration{ { Name: "my-tablespace1", - Storage: StorageConfiguration{ + Storage: apiv1.StorageConfiguration{ Size: "9Gi", }, }, }, - Backup: &BackupConfiguration{ - VolumeSnapshot: &VolumeSnapshotConfiguration{ + Backup: &apiv1.BackupConfiguration{ + VolumeSnapshot: &apiv1.VolumeSnapshotConfiguration{ TablespaceClassName: map[string]string{ "my-tablespace1": "random", }, @@ -4644,29 +4599,29 @@ var _ = Describe("Tablespaces validation", func() { }, }, } - Expect(cluster.validateTablespaceBackupSnapshot()).To(BeEmpty()) + Expect(v.validateTablespaceBackupSnapshot(cluster)).To(BeEmpty()) }) It("should complain when the backup section refers to a tbs that is not defined", func() { - cluster := &Cluster{ + cluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", }, - Spec: ClusterSpec{ + Spec: apiv1.ClusterSpec{ Instances: 3, - StorageConfiguration: StorageConfiguration{ + StorageConfiguration: apiv1.StorageConfiguration{ Size: "10Gi", }, - Tablespaces: []TablespaceConfiguration{ + Tablespaces: []apiv1.TablespaceConfiguration{ { Name: "my-tablespace1", - Storage: StorageConfiguration{ + Storage: apiv1.StorageConfiguration{ Size: "9Gi", }, }, }, - Backup: &BackupConfiguration{ - VolumeSnapshot: &VolumeSnapshotConfiguration{ + Backup: &apiv1.BackupConfiguration{ + VolumeSnapshot: &apiv1.VolumeSnapshotConfiguration{ TablespaceClassName: map[string]string{ "not-present": "random", }, @@ -4674,107 +4629,114 @@ var _ = Describe("Tablespaces validation", func() { }, }, } - Expect(cluster.validateTablespaceBackupSnapshot()).To(HaveLen(1)) + Expect(v.validateTablespaceBackupSnapshot(cluster)).To(HaveLen(1)) }) }) var _ = Describe("Validate hibernation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + It("should succeed if hibernation is set to 'on'", func() { - cluster := &Cluster{ + cluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ utils.HibernationAnnotationName: string(utils.HibernationAnnotationValueOn), }, }, } - Expect(cluster.validateHibernationAnnotation()).To(BeEmpty()) + Expect(v.validateHibernationAnnotation(cluster)).To(BeEmpty()) }) It("should succeed if hibernation is set to 'off'", func() { - cluster := &Cluster{ + cluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ utils.HibernationAnnotationName: string(utils.HibernationAnnotationValueOff), }, }, } - Expect(cluster.validateHibernationAnnotation()).To(BeEmpty()) + Expect(v.validateHibernationAnnotation(cluster)).To(BeEmpty()) }) It("should fail if hibernation is set to an invalid value", func() { - cluster := &Cluster{ + cluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ utils.HibernationAnnotationName: "", }, }, } - Expect(cluster.validateHibernationAnnotation()).To(HaveLen(1)) + Expect(v.validateHibernationAnnotation(cluster)).To(HaveLen(1)) }) }) var _ = Describe("validateManagedServices", func() { - var cluster *Cluster + var cluster *apiv1.Cluster + var v *ClusterCustomValidator BeforeEach(func() { - cluster = &Cluster{ + cluster = &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "test", }, - Spec: ClusterSpec{ - Managed: &ManagedConfiguration{ - Services: &ManagedServices{ - Additional: []ManagedService{}, + Spec: apiv1.ClusterSpec{ + Managed: &apiv1.ManagedConfiguration{ + Services: &apiv1.ManagedServices{ + Additional: []apiv1.ManagedService{}, }, }, }, } + v = &ClusterCustomValidator{} }) Context("when Managed or Services is nil", func() { It("should return no errors", func() { cluster.Spec.Managed = nil - Expect(cluster.validateManagedServices()).To(BeNil()) + Expect(v.validateManagedServices(cluster)).To(BeNil()) - cluster.Spec.Managed = &ManagedConfiguration{} + cluster.Spec.Managed = &apiv1.ManagedConfiguration{} cluster.Spec.Managed.Services = nil - Expect(cluster.validateManagedServices()).To(BeNil()) + Expect(v.validateManagedServices(cluster)).To(BeNil()) }) }) Context("when there are no duplicate names", func() { It("should return no errors", func() { - cluster.Spec.Managed.Services.Additional = []ManagedService{ + cluster.Spec.Managed.Services.Additional = []apiv1.ManagedService{ { - ServiceTemplate: ServiceTemplateSpec{ - ObjectMeta: Metadata{Name: "service1"}, + ServiceTemplate: apiv1.ServiceTemplateSpec{ + ObjectMeta: apiv1.Metadata{Name: "service1"}, }, }, { - ServiceTemplate: ServiceTemplateSpec{ - ObjectMeta: Metadata{Name: "service2"}, + ServiceTemplate: apiv1.ServiceTemplateSpec{ + ObjectMeta: apiv1.Metadata{Name: "service2"}, }, }, } - Expect(cluster.validateManagedServices()).To(BeNil()) + Expect(v.validateManagedServices(cluster)).To(BeNil()) }) }) Context("when there are duplicate names", func() { It("should return an error", func() { - cluster.Spec.Managed.Services.Additional = []ManagedService{ + cluster.Spec.Managed.Services.Additional = []apiv1.ManagedService{ { - ServiceTemplate: ServiceTemplateSpec{ - ObjectMeta: Metadata{Name: "service1"}, + ServiceTemplate: apiv1.ServiceTemplateSpec{ + ObjectMeta: apiv1.Metadata{Name: "service1"}, }, }, { - ServiceTemplate: ServiceTemplateSpec{ - ObjectMeta: Metadata{Name: "service1"}, + ServiceTemplate: apiv1.ServiceTemplateSpec{ + ObjectMeta: apiv1.Metadata{Name: "service1"}, }, }, } - errs := cluster.validateManagedServices() + errs := v.validateManagedServices(cluster) Expect(errs).To(HaveLen(1)) Expect(errs[0].Type).To(Equal(field.ErrorTypeInvalid)) Expect(errs[0].Field).To(Equal("spec.managed.services.additional")) @@ -4784,14 +4746,14 @@ var _ = Describe("validateManagedServices", func() { Context("when service template validation fails", func() { It("should return an error", func() { - cluster.Spec.Managed.Services.Additional = []ManagedService{ + cluster.Spec.Managed.Services.Additional = []apiv1.ManagedService{ { - ServiceTemplate: ServiceTemplateSpec{ - ObjectMeta: Metadata{Name: ""}, + ServiceTemplate: apiv1.ServiceTemplateSpec{ + ObjectMeta: apiv1.Metadata{Name: ""}, }, }, } - errs := cluster.validateManagedServices() + errs := v.validateManagedServices(cluster) Expect(errs).To(HaveLen(1)) Expect(errs[0].Type).To(Equal(field.ErrorTypeInvalid)) Expect(errs[0].Field).To(Equal("spec.managed.services.additional[0]")) @@ -4804,13 +4766,13 @@ var _ = Describe("validateManagedServices", func() { Expect(err.Field).To(Equal(fmt.Sprintf("spec.managed.services.additional[%d]", index))) Expect(err.Detail).To(Equal(expectedDetail)) } - cluster.Spec.Managed.Services.Additional = []ManagedService{ - {ServiceTemplate: ServiceTemplateSpec{ObjectMeta: Metadata{Name: cluster.GetServiceReadWriteName()}}}, - {ServiceTemplate: ServiceTemplateSpec{ObjectMeta: Metadata{Name: cluster.GetServiceReadName()}}}, - {ServiceTemplate: ServiceTemplateSpec{ObjectMeta: Metadata{Name: cluster.GetServiceReadOnlyName()}}}, - {ServiceTemplate: ServiceTemplateSpec{ObjectMeta: Metadata{Name: cluster.GetServiceAnyName()}}}, + cluster.Spec.Managed.Services.Additional = []apiv1.ManagedService{ + {ServiceTemplate: apiv1.ServiceTemplateSpec{ObjectMeta: apiv1.Metadata{Name: cluster.GetServiceReadWriteName()}}}, + {ServiceTemplate: apiv1.ServiceTemplateSpec{ObjectMeta: apiv1.Metadata{Name: cluster.GetServiceReadName()}}}, + {ServiceTemplate: apiv1.ServiceTemplateSpec{ObjectMeta: apiv1.Metadata{Name: cluster.GetServiceReadOnlyName()}}}, + {ServiceTemplate: apiv1.ServiceTemplateSpec{ObjectMeta: apiv1.Metadata{Name: cluster.GetServiceAnyName()}}}, } - errs := cluster.validateManagedServices() + errs := v.validateManagedServices(cluster) Expect(errs).To(HaveLen(4)) assertError("test-rw", 0, errs[0]) assertError("test-r", 1, errs[1]) @@ -4821,19 +4783,19 @@ var _ = Describe("validateManagedServices", func() { Context("disabledDefault service validation", func() { It("should allow the disablement of ro and r service", func() { - cluster.Spec.Managed.Services.DisabledDefaultServices = []ServiceSelectorType{ - ServiceSelectorTypeR, - ServiceSelectorTypeRO, + cluster.Spec.Managed.Services.DisabledDefaultServices = []apiv1.ServiceSelectorType{ + apiv1.ServiceSelectorTypeR, + apiv1.ServiceSelectorTypeRO, } - errs := cluster.validateManagedServices() + errs := v.validateManagedServices(cluster) Expect(errs).To(BeEmpty()) }) It("should not allow the disablement of rw service", func() { - cluster.Spec.Managed.Services.DisabledDefaultServices = []ServiceSelectorType{ - ServiceSelectorTypeRW, + cluster.Spec.Managed.Services.DisabledDefaultServices = []apiv1.ServiceSelectorType{ + apiv1.ServiceSelectorTypeRW, } - errs := cluster.validateManagedServices() + errs := v.validateManagedServices(cluster) Expect(errs).To(HaveLen(1)) Expect(errs[0].Type).To(Equal(field.ErrorTypeInvalid)) Expect(errs[0].Field).To(Equal("spec.managed.services.disabledDefaultServices")) @@ -4844,7 +4806,7 @@ var _ = Describe("validateManagedServices", func() { var _ = Describe("ServiceTemplate Validation", func() { var ( path *field.Path - serviceSpecs ServiceTemplateSpec + serviceSpecs apiv1.ServiceTemplateSpec ) BeforeEach(func() { @@ -4854,8 +4816,8 @@ var _ = Describe("ServiceTemplate Validation", func() { Describe("validateServiceTemplate", func() { Context("when name is required", func() { It("should return an error if the name is empty", func() { - serviceSpecs = ServiceTemplateSpec{ - ObjectMeta: Metadata{Name: ""}, + serviceSpecs = apiv1.ServiceTemplateSpec{ + ObjectMeta: apiv1.Metadata{Name: ""}, } errs := validateServiceTemplate(path, true, serviceSpecs) @@ -4864,8 +4826,8 @@ var _ = Describe("ServiceTemplate Validation", func() { }) It("should not return an error if the name is present", func() { - serviceSpecs = ServiceTemplateSpec{ - ObjectMeta: Metadata{Name: "valid-name"}, + serviceSpecs = apiv1.ServiceTemplateSpec{ + ObjectMeta: apiv1.Metadata{Name: "valid-name"}, } errs := validateServiceTemplate(path, true, serviceSpecs) @@ -4875,8 +4837,8 @@ var _ = Describe("ServiceTemplate Validation", func() { Context("when name is not allowed", func() { It("should return an error if the name is present", func() { - serviceSpecs = ServiceTemplateSpec{ - ObjectMeta: Metadata{Name: "invalid-name"}, + serviceSpecs = apiv1.ServiceTemplateSpec{ + ObjectMeta: apiv1.Metadata{Name: "invalid-name"}, } errs := validateServiceTemplate(path, false, serviceSpecs) @@ -4885,8 +4847,8 @@ var _ = Describe("ServiceTemplate Validation", func() { }) It("should not return an error if the name is empty", func() { - serviceSpecs = ServiceTemplateSpec{ - ObjectMeta: Metadata{Name: ""}, + serviceSpecs = apiv1.ServiceTemplateSpec{ + ObjectMeta: apiv1.Metadata{Name: ""}, } errs := validateServiceTemplate(path, false, serviceSpecs) @@ -4896,8 +4858,8 @@ var _ = Describe("ServiceTemplate Validation", func() { Context("when selector is present", func() { It("should return an error if the selector is present", func() { - serviceSpecs = ServiceTemplateSpec{ - ObjectMeta: Metadata{Name: "valid-name"}, + serviceSpecs = apiv1.ServiceTemplateSpec{ + ObjectMeta: apiv1.Metadata{Name: "valid-name"}, Spec: corev1.ServiceSpec{ Selector: map[string]string{"app": "test"}, }, @@ -4909,8 +4871,8 @@ var _ = Describe("ServiceTemplate Validation", func() { }) It("should not return an error if the selector is absent", func() { - serviceSpecs = ServiceTemplateSpec{ - ObjectMeta: Metadata{Name: "valid-name"}, + serviceSpecs = apiv1.ServiceTemplateSpec{ + ObjectMeta: apiv1.Metadata{Name: "valid-name"}, Spec: corev1.ServiceSpec{ Selector: map[string]string{}, }, @@ -4922,61 +4884,3 @@ var _ = Describe("ServiceTemplate Validation", func() { }) }) }) - -var _ = Describe("setDefaultPlugins", func() { - It("adds pre-defined plugins if not already present", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - Plugins: []PluginConfiguration{ - {Name: "existing-plugin", Enabled: ptr.To(true)}, - }, - }, - } - config := &configuration.Data{ - IncludePlugins: "predefined-plugin1,predefined-plugin2", - } - - cluster.setDefaultPlugins(config) - - Expect(cluster.Spec.Plugins).To( - ContainElement(PluginConfiguration{Name: "existing-plugin", Enabled: ptr.To(true)})) - Expect(cluster.Spec.Plugins).To( - ContainElement(PluginConfiguration{Name: "predefined-plugin1", Enabled: ptr.To(true)})) - Expect(cluster.Spec.Plugins).To( - ContainElement(PluginConfiguration{Name: "predefined-plugin2", Enabled: ptr.To(true)})) - }) - - It("does not add pre-defined plugins if already present", func() { - cluster := &Cluster{ - Spec: ClusterSpec{ - Plugins: []PluginConfiguration{ - {Name: "predefined-plugin1", Enabled: ptr.To(false)}, - }, - }, - } - config := &configuration.Data{ - IncludePlugins: "predefined-plugin1,predefined-plugin2", - } - - cluster.setDefaultPlugins(config) - - Expect(cluster.Spec.Plugins).To(HaveLen(2)) - Expect(cluster.Spec.Plugins).To( - ContainElement(PluginConfiguration{Name: "predefined-plugin1", Enabled: ptr.To(false)})) - Expect(cluster.Spec.Plugins).To( - ContainElement(PluginConfiguration{Name: "predefined-plugin2", Enabled: ptr.To(true)})) - }) - - It("handles empty plugin list gracefully", func() { - cluster := &Cluster{} - config := &configuration.Data{ - IncludePlugins: "predefined-plugin1", - } - - cluster.setDefaultPlugins(config) - - Expect(cluster.Spec.Plugins).To(HaveLen(1)) - Expect(cluster.Spec.Plugins).To( - ContainElement(PluginConfiguration{Name: "predefined-plugin1", Enabled: ptr.To(true)})) - }) -}) diff --git a/internal/webhook/v1/doc.go b/internal/webhook/v1/doc.go new file mode 100644 index 0000000000..8298d1e71a --- /dev/null +++ b/internal/webhook/v1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1 contains the webhooks for the postgresql v1 API group +package v1 diff --git a/internal/webhook/v1/pooler_webhook.go b/internal/webhook/v1/pooler_webhook.go new file mode 100644 index 0000000000..5526955a89 --- /dev/null +++ b/internal/webhook/v1/pooler_webhook.go @@ -0,0 +1,254 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "fmt" + + "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/stringset" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" +) + +// AllowedPgbouncerGenericConfigurationParameters is the list of allowed parameters for PgBouncer +var AllowedPgbouncerGenericConfigurationParameters = stringset.From([]string{ + "application_name_add_host", + "autodb_idle_timeout", + "cancel_wait_timeout", + "client_idle_timeout", + "client_login_timeout", + "default_pool_size", + "disable_pqexec", + "dns_max_ttl", + "dns_nxdomain_ttl", + "idle_transaction_timeout", + "ignore_startup_parameters", + "listen_backlog", + "log_connections", + "log_disconnections", + "log_pooler_errors", + "log_stats", + "max_client_conn", + "max_db_connections", + "max_packet_size", + "max_prepared_statements", + "max_user_connections", + "min_pool_size", + "pkt_buf", + "query_timeout", + "query_wait_timeout", + "reserve_pool_size", + "reserve_pool_timeout", + "sbuf_loopcnt", + "server_check_delay", + "server_check_query", + "server_connect_timeout", + "server_fast_close", + "server_idle_timeout", + "server_lifetime", + "server_login_retry", + "server_reset_query", + "server_reset_query_always", + "server_round_robin", + "server_tls_ciphers", + "server_tls_protocols", + "stats_period", + "suspend_timeout", + "tcp_defer_accept", + "tcp_socket_buffer", + "tcp_keepalive", + "tcp_keepcnt", + "tcp_keepidle", + "tcp_keepintvl", + "tcp_user_timeout", + "track_extra_parameters", + "verbose", +}) + +// poolerLog is for logging in this package. +var poolerLog = log.WithName("pooler-resource").WithValues("version", "v1") + +// SetupPoolerWebhookWithManager registers the webhook for Pooler in the manager. +func SetupPoolerWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr).For(&apiv1.Pooler{}). + WithValidator(&PoolerCustomValidator{}). + Complete() +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. +// NOTE: The 'path' attribute must follow a specific pattern and should not be modified directly here. +// Modifying the path for an invalid path can cause API server errors; failing to locate the webhook. +// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},verbs=create;update,path=/validate-postgresql-cnpg-io-v1-pooler,mutating=false,failurePolicy=fail,groups=postgresql.cnpg.io,resources=poolers,versions=v1,name=vpooler.cnpg.io,sideEffects=None + +// PoolerCustomValidator struct is responsible for validating the Pooler resource +// when it is created, updated, or deleted. +type PoolerCustomValidator struct{} + +var _ webhook.CustomValidator = &PoolerCustomValidator{} + +// ValidateCreate implements webhook.CustomValidator so a webhook will be registered for the type Pooler. +func (v *PoolerCustomValidator) ValidateCreate(_ context.Context, obj runtime.Object) (admission.Warnings, error) { + pooler, ok := obj.(*apiv1.Pooler) + if !ok { + return nil, fmt.Errorf("expected a Pooler object but got %T", obj) + } + poolerLog.Info("Validation for Pooler upon creation", "name", pooler.GetName(), "namespace", pooler.GetNamespace()) + + var warns admission.Warnings + if !pooler.IsAutomatedIntegration() { + poolerLog.Info("Pooler not automatically configured, manual configuration required", + "name", pooler.Name, "namespace", pooler.Namespace, "cluster", pooler.Spec.Cluster.Name) + warns = append(warns, fmt.Sprintf("The operator won't handle the Pooler %q integration with the Cluster %q (%q). "+ + "Manually configure it as described in the docs.", pooler.Name, pooler.Spec.Cluster.Name, pooler.Namespace)) + } + + allErrs := v.validate(pooler) + + if len(allErrs) == 0 { + return warns, nil + } + + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "postgresql.cnpg.io", Kind: "Pooler"}, + pooler.Name, allErrs) +} + +// ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type Pooler. +func (v *PoolerCustomValidator) ValidateUpdate( + _ context.Context, + oldObj, newObj runtime.Object, +) (admission.Warnings, error) { + pooler, ok := newObj.(*apiv1.Pooler) + if !ok { + return nil, fmt.Errorf("expected a Pooler object for the newObj but got %T", newObj) + } + + oldPooler, ok := oldObj.(*apiv1.Pooler) + if !ok { + return nil, fmt.Errorf("expected a Pooler object for the oldObj but got %T", oldObj) + } + + poolerLog.Info("Validation for Pooler upon update", "name", pooler.GetName(), "namespace", pooler.GetNamespace()) + + var warns admission.Warnings + if oldPooler.IsAutomatedIntegration() && !pooler.IsAutomatedIntegration() { + poolerLog.Info("Pooler not automatically configured, manual configuration required", + "name", pooler.Name, "namespace", pooler.Namespace, "cluster", pooler.Spec.Cluster.Name) + warns = append(warns, fmt.Sprintf("The operator won't handle the Pooler %q integration with the Cluster %q (%q). "+ + "Manually configure it as described in the docs.", pooler.Name, pooler.Spec.Cluster.Name, pooler.Namespace)) + } + + allErrs := v.validate(pooler) + if len(allErrs) == 0 { + return warns, nil + } + + return warns, apierrors.NewInvalid( + schema.GroupKind{Group: "pooler.cnpg.io", Kind: "Pooler"}, + pooler.Name, allErrs) +} + +// ValidateDelete implements webhook.CustomValidator so a webhook will be registered for the type Pooler. +func (v *PoolerCustomValidator) ValidateDelete(_ context.Context, obj runtime.Object) (admission.Warnings, error) { + pooler, ok := obj.(*apiv1.Pooler) + if !ok { + return nil, fmt.Errorf("expected a Pooler object but got %T", obj) + } + poolerLog.Info("Validation for Pooler upon deletion", "name", pooler.GetName(), "namespace", pooler.GetNamespace()) + + // TODO(user): fill in your validation logic upon object deletion. + + return nil, nil +} + +func (v *PoolerCustomValidator) validatePgBouncer(r *apiv1.Pooler) field.ErrorList { + var result field.ErrorList + switch { + case r.Spec.PgBouncer == nil: + result = append(result, + field.Invalid( + field.NewPath("spec", "pgbouncer"), + "", "required pgbouncer configuration")) + case r.Spec.PgBouncer.AuthQuerySecret != nil && r.Spec.PgBouncer.AuthQuerySecret.Name != "" && + r.Spec.PgBouncer.AuthQuery == "": + result = append(result, + field.Invalid( + field.NewPath("spec", "pgbouncer", "authQuery"), + "", "must specify an auth query when providing an auth query secret")) + case (r.Spec.PgBouncer.AuthQuerySecret == nil || r.Spec.PgBouncer.AuthQuerySecret.Name == "") && + r.Spec.PgBouncer.AuthQuery != "": + result = append(result, + field.Invalid( + field.NewPath("spec", "pgbouncer", "authQuerySecret", "name"), + "", "must specify an existing auth query secret when providing an auth query secret")) + } + + if r.Spec.PgBouncer != nil && len(r.Spec.PgBouncer.Parameters) > 0 { + result = append(result, v.validatePgbouncerGenericParameters(r)...) + } + + return result +} + +func (v *PoolerCustomValidator) validateCluster(r *apiv1.Pooler) field.ErrorList { + var result field.ErrorList + if r.Spec.Cluster.Name == "" { + result = append(result, + field.Invalid( + field.NewPath("spec", "cluster", "name"), + "", "must specify a cluster name")) + } + if r.Spec.Cluster.Name == r.Name { + result = append(result, + field.Invalid( + field.NewPath("metadata", "name"), + r.Name, "the pooler resource cannot have the same name of a cluster")) + } + return result +} + +// validate validates the configuration of a Pooler, returning +// a list of errors +func (v *PoolerCustomValidator) validate(r *apiv1.Pooler) (allErrs field.ErrorList) { + allErrs = append(allErrs, v.validatePgBouncer(r)...) + allErrs = append(allErrs, v.validateCluster(r)...) + return allErrs +} + +// validatePgbouncerGenericParameters validates pgbouncer parameters +func (v *PoolerCustomValidator) validatePgbouncerGenericParameters(r *apiv1.Pooler) field.ErrorList { + var result field.ErrorList + + for param := range r.Spec.PgBouncer.Parameters { + if !AllowedPgbouncerGenericConfigurationParameters.Has(param) { + result = append(result, + field.Invalid( + field.NewPath("spec", "cluster", "parameters"), + param, "Invalid or reserved parameter")) + } + } + return result +} diff --git a/api/v1/pooler_webhook_test.go b/internal/webhook/v1/pooler_webhook_test.go similarity index 52% rename from api/v1/pooler_webhook_test.go rename to internal/webhook/v1/pooler_webhook_test.go index a1791248c9..c49da31e18 100644 --- a/api/v1/pooler_webhook_test.go +++ b/internal/webhook/v1/pooler_webhook_test.go @@ -19,113 +19,120 @@ package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) var _ = Describe("Pooler validation", func() { + var v *PoolerCustomValidator + BeforeEach(func() { + v = &PoolerCustomValidator{} + }) + It("doesn't allow specifying authQuerySecret without any authQuery", func() { - pooler := Pooler{ - Spec: PoolerSpec{ - PgBouncer: &PgBouncerSpec{ - AuthQuerySecret: &LocalObjectReference{ + pooler := &apiv1.Pooler{ + Spec: apiv1.PoolerSpec{ + PgBouncer: &apiv1.PgBouncerSpec{ + AuthQuerySecret: &apiv1.LocalObjectReference{ Name: "test", }, }, }, } - Expect(pooler.validatePgBouncer()).NotTo(BeEmpty()) + Expect(v.validatePgBouncer(pooler)).NotTo(BeEmpty()) }) It("doesn't allow specifying authQuery without any authQuerySecret", func() { - pooler := Pooler{ - Spec: PoolerSpec{ - PgBouncer: &PgBouncerSpec{ + pooler := &apiv1.Pooler{ + Spec: apiv1.PoolerSpec{ + PgBouncer: &apiv1.PgBouncerSpec{ AuthQuery: "test", }, }, } - Expect(pooler.validatePgBouncer()).NotTo(BeEmpty()) + Expect(v.validatePgBouncer(pooler)).NotTo(BeEmpty()) }) It("allows having both authQuery and authQuerySecret", func() { - pooler := Pooler{ - Spec: PoolerSpec{ - PgBouncer: &PgBouncerSpec{ + pooler := &apiv1.Pooler{ + Spec: apiv1.PoolerSpec{ + PgBouncer: &apiv1.PgBouncerSpec{ AuthQuery: "test", - AuthQuerySecret: &LocalObjectReference{ + AuthQuerySecret: &apiv1.LocalObjectReference{ Name: "test", }, }, }, } - Expect(pooler.validatePgBouncer()).To(BeEmpty()) + Expect(v.validatePgBouncer(pooler)).To(BeEmpty()) }) It("allows the autoconfiguration mode", func() { - pooler := Pooler{ - Spec: PoolerSpec{ - PgBouncer: &PgBouncerSpec{}, + pooler := &apiv1.Pooler{ + Spec: apiv1.PoolerSpec{ + PgBouncer: &apiv1.PgBouncerSpec{}, }, } - Expect(pooler.validatePgBouncer()).To(BeEmpty()) + Expect(v.validatePgBouncer(pooler)).To(BeEmpty()) }) It("doesn't allow not specifying a cluster name", func() { - pooler := Pooler{ - Spec: PoolerSpec{ - Cluster: LocalObjectReference{Name: ""}, + pooler := &apiv1.Pooler{ + Spec: apiv1.PoolerSpec{ + Cluster: apiv1.LocalObjectReference{Name: ""}, }, } - Expect(pooler.validateCluster()).NotTo(BeEmpty()) + Expect(v.validateCluster(pooler)).NotTo(BeEmpty()) }) It("doesn't allow to have a pooler with the same name of the cluster", func() { - pooler := Pooler{ + pooler := &apiv1.Pooler{ ObjectMeta: metav1.ObjectMeta{ Name: "test", }, - Spec: PoolerSpec{ - Cluster: LocalObjectReference{ + Spec: apiv1.PoolerSpec{ + Cluster: apiv1.LocalObjectReference{ Name: "test", }, }, } - Expect(pooler.validateCluster()).NotTo(BeEmpty()) + Expect(v.validateCluster(pooler)).NotTo(BeEmpty()) }) It("doesn't complain when specifying a cluster name", func() { - pooler := Pooler{ - Spec: PoolerSpec{ - Cluster: LocalObjectReference{Name: "cluster-example"}, + pooler := &apiv1.Pooler{ + Spec: apiv1.PoolerSpec{ + Cluster: apiv1.LocalObjectReference{Name: "cluster-example"}, }, } - Expect(pooler.validateCluster()).To(BeEmpty()) + Expect(v.validateCluster(pooler)).To(BeEmpty()) }) It("does complain when given a fixed parameter", func() { - pooler := Pooler{ - Spec: PoolerSpec{ - PgBouncer: &PgBouncerSpec{ + pooler := &apiv1.Pooler{ + Spec: apiv1.PoolerSpec{ + PgBouncer: &apiv1.PgBouncerSpec{ Parameters: map[string]string{"pool_mode": "test"}, }, }, } - Expect(pooler.validatePgbouncerGenericParameters()).NotTo(BeEmpty()) + Expect(v.validatePgbouncerGenericParameters(pooler)).NotTo(BeEmpty()) }) It("does not complain when given a valid parameter", func() { - pooler := Pooler{ - Spec: PoolerSpec{ - PgBouncer: &PgBouncerSpec{ + pooler := &apiv1.Pooler{ + Spec: apiv1.PoolerSpec{ + PgBouncer: &apiv1.PgBouncerSpec{ Parameters: map[string]string{"verbose": "10"}, }, }, } - Expect(pooler.validatePgbouncerGenericParameters()).To(BeEmpty()) + Expect(v.validatePgbouncerGenericParameters(pooler)).To(BeEmpty()) }) }) diff --git a/internal/webhook/v1/scheduledbackup_webhook.go b/internal/webhook/v1/scheduledbackup_webhook.go new file mode 100644 index 0000000000..fdf6ccdbf3 --- /dev/null +++ b/internal/webhook/v1/scheduledbackup_webhook.go @@ -0,0 +1,190 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "fmt" + "strings" + + "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/robfig/cron" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" +) + +// scheduledBackupLog is for logging in this package. +var scheduledBackupLog = log.WithName("scheduledbackup-resource").WithValues("version", "v1") + +// SetupScheduledBackupWebhookWithManager registers the webhook for ScheduledBackup in the manager. +func SetupScheduledBackupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr).For(&apiv1.ScheduledBackup{}). + WithValidator(&ScheduledBackupCustomValidator{}). + WithDefaulter(&ScheduledBackupCustomDefaulter{}). + Complete() +} + +// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},path=/mutate-postgresql-cnpg-io-v1-scheduledbackup,mutating=true,failurePolicy=fail,groups=postgresql.cnpg.io,resources=scheduledbackups,verbs=create;update,versions=v1,name=mscheduledbackup.cnpg.io,sideEffects=None + +// ScheduledBackupCustomDefaulter struct is responsible for setting default values on the custom resource of the +// Kind ScheduledBackup when those are created or updated. +type ScheduledBackupCustomDefaulter struct{} + +var _ webhook.CustomDefaulter = &ScheduledBackupCustomDefaulter{} + +// Default implements webhook.CustomDefaulter so a webhook will be registered for the Kind ScheduledBackup. +func (d *ScheduledBackupCustomDefaulter) Default(_ context.Context, obj runtime.Object) error { + scheduledBackup, ok := obj.(*apiv1.ScheduledBackup) + if !ok { + return fmt.Errorf("expected an ScheduledBackup object but got %T", obj) + } + scheduledBackupLog.Info("Defaulting for ScheduledBackup", + "name", scheduledBackup.GetName(), "namespace", scheduledBackup.GetNamespace()) + + // TODO(user): fill in your defaulting logic. + + return nil +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. +// NOTE: The 'path' attribute must follow a specific pattern and should not be modified directly here. +// Modifying the path for an invalid path can cause API server errors; failing to locate the webhook. +// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},verbs=create;update,path=/validate-postgresql-cnpg-io-v1-scheduledbackup,mutating=false,failurePolicy=fail,groups=postgresql.cnpg.io,resources=scheduledbackups,versions=v1,name=vscheduledbackup.cnpg.io,sideEffects=None + +// ScheduledBackupCustomValidator struct is responsible for validating the ScheduledBackup resource +// when it is created, updated, or deleted. +type ScheduledBackupCustomValidator struct { + // TODO(user): Add more fields as needed for validation +} + +var _ webhook.CustomValidator = &ScheduledBackupCustomValidator{} + +// ValidateCreate implements webhook.CustomValidator so a webhook will be registered for the type ScheduledBackup. +func (v *ScheduledBackupCustomValidator) ValidateCreate( + _ context.Context, + obj runtime.Object, +) (admission.Warnings, error) { + scheduledBackup, ok := obj.(*apiv1.ScheduledBackup) + if !ok { + return nil, fmt.Errorf("expected a ScheduledBackup object but got %T", obj) + } + scheduledBackupLog.Info("Validation for ScheduledBackup upon creation", + "name", scheduledBackup.GetName(), "namespace", scheduledBackup.GetNamespace()) + + warnings, allErrs := v.validate(scheduledBackup) + if len(allErrs) == 0 { + return warnings, nil + } + + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "postgresql.cnpg.io", Kind: "ScheduledBackup"}, + scheduledBackup.Name, allErrs) +} + +// ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type ScheduledBackup. +func (v *ScheduledBackupCustomValidator) ValidateUpdate( + _ context.Context, + _, newObj runtime.Object, +) (admission.Warnings, error) { + scheduledBackup, ok := newObj.(*apiv1.ScheduledBackup) + if !ok { + return nil, fmt.Errorf("expected a ScheduledBackup object for the newObj but got %T", newObj) + } + scheduledBackupLog.Info("Validation for ScheduledBackup upon update", + "name", scheduledBackup.GetName(), "namespace", scheduledBackup.GetNamespace()) + + warnings, allErrs := v.validate(scheduledBackup) + if len(allErrs) == 0 { + return warnings, nil + } + + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "scheduledBackup.cnpg.io", Kind: "ScheduledBackup"}, + scheduledBackup.Name, allErrs) +} + +// ValidateDelete implements webhook.CustomValidator so a webhook will be registered for the type ScheduledBackup. +func (v *ScheduledBackupCustomValidator) ValidateDelete( + _ context.Context, + obj runtime.Object, +) (admission.Warnings, error) { + scheduledBackup, ok := obj.(*apiv1.ScheduledBackup) + if !ok { + return nil, fmt.Errorf("expected a ScheduledBackup object but got %T", obj) + } + scheduledBackupLog.Info("Validation for ScheduledBackup upon deletion", + "name", scheduledBackup.GetName(), "namespace", scheduledBackup.GetNamespace()) + + // TODO(user): fill in your validation logic upon object deletion. + + return nil, nil +} + +func (v *ScheduledBackupCustomValidator) validate(r *apiv1.ScheduledBackup) (admission.Warnings, field.ErrorList) { + var result field.ErrorList + var warnings admission.Warnings + + if _, err := cron.Parse(r.GetSchedule()); err != nil { + result = append(result, + field.Invalid( + field.NewPath("spec", "schedule"), + r.Spec.Schedule, err.Error())) + } else if len(strings.Fields(r.Spec.Schedule)) != 6 { + warnings = append( + warnings, + "Schedule parameter may not have the right number of arguments "+ + "(usually six arguments are needed)", + ) + } + + if r.Spec.Method == apiv1.BackupMethodVolumeSnapshot && !utils.HaveVolumeSnapshot() { + result = append(result, field.Invalid( + field.NewPath("spec", "method"), + r.Spec.Method, + "Cannot use volumeSnapshot backup method due to missing "+ + "VolumeSnapshot CRD. If you installed the CRD after having "+ + "started the operator, please restart it to enable "+ + "VolumeSnapshot support", + )) + } + + if r.Spec.Method == apiv1.BackupMethodBarmanObjectStore && r.Spec.Online != nil { + result = append(result, field.Invalid( + field.NewPath("spec", "online"), + r.Spec.Online, + "Online parameter can be specified only if the method is volumeSnapshot", + )) + } + + if r.Spec.Method == apiv1.BackupMethodBarmanObjectStore && r.Spec.OnlineConfiguration != nil { + result = append(result, field.Invalid( + field.NewPath("spec", "onlineConfiguration"), + r.Spec.OnlineConfiguration, + "OnlineConfiguration parameter can be specified only if the method is volumeSnapshot", + )) + } + + return warnings, result +} diff --git a/internal/webhook/v1/scheduledbackup_webhook_test.go b/internal/webhook/v1/scheduledbackup_webhook_test.go new file mode 100644 index 0000000000..173df06ebc --- /dev/null +++ b/internal/webhook/v1/scheduledbackup_webhook_test.go @@ -0,0 +1,126 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/utils/ptr" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Validate schedule", func() { + var v *ScheduledBackupCustomValidator + BeforeEach(func() { + v = &ScheduledBackupCustomValidator{} + }) + + It("doesn't complain if there's a schedule", func() { + schedule := &apiv1.ScheduledBackup{ + Spec: apiv1.ScheduledBackupSpec{ + Schedule: "0 0 0 * * *", + }, + } + + warnings, result := v.validate(schedule) + Expect(warnings).To(BeEmpty()) + Expect(result).To(BeEmpty()) + }) + + It("warn the user if the schedule has a wrong number of arguments", func() { + schedule := &apiv1.ScheduledBackup{ + Spec: apiv1.ScheduledBackupSpec{ + Schedule: "1 2 3 4 5", + }, + } + + warnings, result := v.validate(schedule) + Expect(warnings).To(HaveLen(1)) + Expect(result).To(BeEmpty()) + }) + + It("complain with a wrong time", func() { + schedule := &apiv1.ScheduledBackup{ + Spec: apiv1.ScheduledBackupSpec{ + Schedule: "0 0 0 * * * 1996", + }, + } + + warnings, result := v.validate(schedule) + Expect(warnings).To(BeEmpty()) + Expect(result).To(HaveLen(1)) + }) + + It("doesn't complain if VolumeSnapshot CRD is present", func() { + schedule := &apiv1.ScheduledBackup{ + Spec: apiv1.ScheduledBackupSpec{ + Schedule: "0 0 0 * * *", + Method: apiv1.BackupMethodVolumeSnapshot, + }, + } + utils.SetVolumeSnapshot(true) + + warnings, result := v.validate(schedule) + Expect(warnings).To(BeEmpty()) + Expect(result).To(BeEmpty()) + }) + + It("complains if VolumeSnapshot CRD is not present", func() { + schedule := &apiv1.ScheduledBackup{ + Spec: apiv1.ScheduledBackupSpec{ + Schedule: "0 0 0 * * *", + Method: apiv1.BackupMethodVolumeSnapshot, + }, + } + utils.SetVolumeSnapshot(false) + warnings, result := v.validate(schedule) + Expect(warnings).To(BeEmpty()) + Expect(result).To(HaveLen(1)) + Expect(result[0].Field).To(Equal("spec.method")) + }) + + It("complains if online is set on a barman backup", func() { + scheduledBackup := &apiv1.ScheduledBackup{ + Spec: apiv1.ScheduledBackupSpec{ + Method: apiv1.BackupMethodBarmanObjectStore, + Online: ptr.To(true), + Schedule: "* * * * * *", + }, + } + warnings, result := v.validate(scheduledBackup) + Expect(warnings).To(BeEmpty()) + Expect(result).To(HaveLen(1)) + Expect(result[0].Field).To(Equal("spec.online")) + }) + + It("complains if onlineConfiguration is set on a barman backup", func() { + scheduledBackup := &apiv1.ScheduledBackup{ + Spec: apiv1.ScheduledBackupSpec{ + Method: apiv1.BackupMethodBarmanObjectStore, + OnlineConfiguration: &apiv1.OnlineConfiguration{}, + Schedule: "* * * * * *", + }, + } + warnings, result := v.validate(scheduledBackup) + Expect(warnings).To(BeEmpty()) + Expect(result).To(HaveLen(1)) + Expect(result[0].Field).To(Equal("spec.onlineConfiguration")) + }) +}) diff --git a/internal/webhook/v1/suite_test.go b/internal/webhook/v1/suite_test.go new file mode 100644 index 0000000000..5bd0c55f58 --- /dev/null +++ b/internal/webhook/v1/suite_test.go @@ -0,0 +1,30 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Webhook Suite") +} diff --git a/tests/e2e/asserts_test.go b/tests/e2e/asserts_test.go index 2c4806bc06..200bc57248 100644 --- a/tests/e2e/asserts_test.go +++ b/tests/e2e/asserts_test.go @@ -41,6 +41,7 @@ import ( ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + webhookv1 "github.com/cloudnative-pg/cloudnative-pg/internal/webhook/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/certs" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" @@ -320,7 +321,6 @@ func AssertClusterIsReady(namespace string, clusterName string, timeout int, env func AssertClusterDefault( namespace string, clusterName string, - isExpectedToDefault bool, env *environment.TestingEnvironment, ) { By("having a Cluster object populated with default values", func() { @@ -335,12 +335,10 @@ func AssertClusterDefault( g.Expect(err).ToNot(HaveOccurred()) }).Should(Succeed()) - validationErr := cluster.Validate() - if isExpectedToDefault { - Expect(validationErr).Should(BeEmpty(), validationErr) - } else { - Expect(validationErr).ShouldNot(BeEmpty(), validationErr) - } + validator := webhookv1.ClusterCustomValidator{} + validationWarn, validationErr := validator.ValidateCreate(env.Ctx, cluster) + Expect(validationWarn).To(BeEmpty()) + Expect(validationErr).ToNot(HaveOccurred()) }) } diff --git a/tests/e2e/webhook_test.go b/tests/e2e/webhook_test.go index 2171638e14..e0d496f0b2 100644 --- a/tests/e2e/webhook_test.go +++ b/tests/e2e/webhook_test.go @@ -48,7 +48,6 @@ var _ = Describe("webhook", Serial, Label(tests.LabelDisruptive, tests.LabelOper ) var webhookNamespace, clusterName string - var clusterIsDefaulted bool var err error BeforeEach(func() { @@ -64,7 +63,6 @@ var _ = Describe("webhook", Serial, Label(tests.LabelDisruptive, tests.LabelOper It("checks if webhook works as expected", func() { webhookNamespacePrefix := "webhook-test" - clusterIsDefaulted = true By("having a deployment for the operator in state ready", func() { // Make sure that we have at least one operator already working err := operator.ScaleOperatorDeployment(env.Ctx, env.Client, 1) @@ -80,12 +78,11 @@ var _ = Describe("webhook", Serial, Label(tests.LabelDisruptive, tests.LabelOper Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(webhookNamespace, clusterName, sampleFile, env) // Check if cluster is ready and the default values are populated - AssertClusterDefault(webhookNamespace, clusterName, clusterIsDefaulted, env) + AssertClusterDefault(webhookNamespace, clusterName, env) }) It("Does not crash the operator when disabled", func() { webhookNamespacePrefix := "no-webhook-test" - clusterIsDefaulted = true mWebhook, admissionNumber, err := operator.GetMutatingWebhookByName(env.Ctx, env.Client, mutatingWebhook) Expect(err).ToNot(HaveOccurred()) @@ -120,7 +117,7 @@ var _ = Describe("webhook", Serial, Label(tests.LabelDisruptive, tests.LabelOper Expect(err).ToNot(HaveOccurred()) AssertCreateCluster(webhookNamespace, clusterName, sampleFile, env) // Check if cluster is ready and has no default value in the object - AssertClusterDefault(webhookNamespace, clusterName, clusterIsDefaulted, env) + AssertClusterDefault(webhookNamespace, clusterName, env) // Make sure the operator is intact and not crashing By("having a deployment for the operator in state ready", func() { From e5ad1e0f41689cfd2f8437acb27aeff8d09eea3a Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 10 Jan 2025 10:39:31 +0100 Subject: [PATCH 300/836] chore(deps): update spellcheck to v0.46.0 (main) (#6539) --- .github/workflows/spellcheck.yml | 2 +- Makefile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/spellcheck.yml b/.github/workflows/spellcheck.yml index 1fd12f3085..27de6b2c8e 100644 --- a/.github/workflows/spellcheck.yml +++ b/.github/workflows/spellcheck.yml @@ -28,4 +28,4 @@ jobs: uses: actions/checkout@v4 - name: Spellcheck - uses: rojopolis/spellcheck-github-actions@0.45.0 + uses: rojopolis/spellcheck-github-actions@0.46.0 diff --git a/Makefile b/Makefile index d08b303300..6f7e280063 100644 --- a/Makefile +++ b/Makefile @@ -44,7 +44,7 @@ POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions KUSTOMIZE_VERSION ?= v5.5.0 CONTROLLER_TOOLS_VERSION ?= v0.16.5 GORELEASER_VERSION ?= v2.5.1 -SPELLCHECK_VERSION ?= 0.45.0 +SPELLCHECK_VERSION ?= 0.46.0 WOKE_VERSION ?= 0.19.0 OPERATOR_SDK_VERSION ?= v1.38.0 OPM_VERSION ?= v1.49.0 From a0800ca858b4ad260c8a9fc965b482f445090a6a Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 10 Jan 2025 11:08:37 +0100 Subject: [PATCH 301/836] chore(deps): update agilepathway/pull-request-label-checker docker tag to v1.6.61 (main) (#6553) --- .github/workflows/require-labels.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/require-labels.yml b/.github/workflows/require-labels.yml index 1bbbfb1d23..1bb64dfd17 100644 --- a/.github/workflows/require-labels.yml +++ b/.github/workflows/require-labels.yml @@ -19,7 +19,7 @@ jobs: runs-on: ubuntu-24.04 steps: - name: Require labels - uses: docker://agilepathway/pull-request-label-checker:v1.6.60 + uses: docker://agilepathway/pull-request-label-checker:v1.6.61 with: any_of: "ok to merge :ok_hand:" none_of: "do not merge" From c764308547f834df1fc2f05c32b462b05af9d4be Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sat, 11 Jan 2025 10:43:19 +0100 Subject: [PATCH 302/836] chore(deps): update dependency kubernetes-csi/external-resizer to v1.13.0 (main) (#6558) --- hack/setup-cluster.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh index 7bd5e66072..3d4ee152dc 100755 --- a/hack/setup-cluster.sh +++ b/hack/setup-cluster.sh @@ -29,7 +29,7 @@ K3D_NODE_DEFAULT_VERSION=v1.30.3 CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=v1.15.0 EXTERNAL_SNAPSHOTTER_VERSION=v8.2.0 EXTERNAL_PROVISIONER_VERSION=v5.1.0 -EXTERNAL_RESIZER_VERSION=v1.12.0 +EXTERNAL_RESIZER_VERSION=v1.13.0 EXTERNAL_ATTACHER_VERSION=v4.8.0 K8S_VERSION=${K8S_VERSION-} KUBECTL_VERSION=${KUBECTL_VERSION-} From 3d52abbc0c16762dd5716fcb52cc3fa02fc629fc Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sat, 11 Jan 2025 13:18:36 +0100 Subject: [PATCH 303/836] fix(deps): update module sigs.k8s.io/controller-runtime to v0.19.4 (main) (#6538) --- go.mod | 3 ++- go.sum | 6 ++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 5058c8e429..508e56fbe8 100644 --- a/go.mod +++ b/go.mod @@ -46,7 +46,7 @@ require ( k8s.io/cli-runtime v0.32.0 k8s.io/client-go v0.32.0 k8s.io/utils v0.0.0-20241210054802-24370beab758 - sigs.k8s.io/controller-runtime v0.19.3 + sigs.k8s.io/controller-runtime v0.19.4 sigs.k8s.io/yaml v1.4.0 ) @@ -57,6 +57,7 @@ require ( github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/emicklei/go-restful/v3 v3.12.1 // indirect github.com/fatih/color v1.17.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-errors/errors v1.5.1 // indirect github.com/go-logr/zapr v1.3.0 // indirect diff --git a/go.sum b/go.sum index ff99792126..1c0c76ec97 100644 --- a/go.sum +++ b/go.sum @@ -39,6 +39,8 @@ github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0 github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= @@ -295,8 +297,8 @@ k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJ k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0= k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/controller-runtime v0.19.3 h1:XO2GvC9OPftRst6xWCpTgBZO04S2cbp0Qqkj8bX1sPw= -sigs.k8s.io/controller-runtime v0.19.3/go.mod h1:j4j87DqtsThvwTv5/Tc5NFRyyF/RF0ip4+62tbTSIUM= +sigs.k8s.io/controller-runtime v0.19.4 h1:SUmheabttt0nx8uJtoII4oIP27BVVvAKFvdvGFwV/Qo= +sigs.k8s.io/controller-runtime v0.19.4/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/kustomize/api v0.18.0 h1:hTzp67k+3NEVInwz5BHyzc9rGxIauoXferXyjv5lWPo= From 4a150f42a7ffd6f09c099017a7058f54019a2364 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sat, 11 Jan 2025 21:29:42 +0100 Subject: [PATCH 304/836] chore(deps): update dependency kubernetes-csi/external-resizer to v1.13.1 (main) (#6573) --- hack/setup-cluster.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh index 3d4ee152dc..a6c9c29f9e 100755 --- a/hack/setup-cluster.sh +++ b/hack/setup-cluster.sh @@ -29,7 +29,7 @@ K3D_NODE_DEFAULT_VERSION=v1.30.3 CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=v1.15.0 EXTERNAL_SNAPSHOTTER_VERSION=v8.2.0 EXTERNAL_PROVISIONER_VERSION=v5.1.0 -EXTERNAL_RESIZER_VERSION=v1.13.0 +EXTERNAL_RESIZER_VERSION=v1.13.1 EXTERNAL_ATTACHER_VERSION=v4.8.0 K8S_VERSION=${K8S_VERSION-} KUBECTL_VERSION=${KUBECTL_VERSION-} From 25e061312c472b7406c901d1adae33771eb4d515 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 09:38:57 +0100 Subject: [PATCH 305/836] chore(deps): update operator framework to v1.39.0 (main) (#6581) --- Makefile | 2 +- config/olm-scorecard/patches/basic.config.yaml | 2 +- config/olm-scorecard/patches/olm.config.yaml | 10 +++++----- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Makefile b/Makefile index 6f7e280063..c71f2e7f95 100644 --- a/Makefile +++ b/Makefile @@ -46,7 +46,7 @@ CONTROLLER_TOOLS_VERSION ?= v0.16.5 GORELEASER_VERSION ?= v2.5.1 SPELLCHECK_VERSION ?= 0.46.0 WOKE_VERSION ?= 0.19.0 -OPERATOR_SDK_VERSION ?= v1.38.0 +OPERATOR_SDK_VERSION ?= v1.39.0 OPM_VERSION ?= v1.49.0 PREFLIGHT_VERSION ?= 1.11.1 OPENSHIFT_VERSIONS ?= v4.12-v4.18 diff --git a/config/olm-scorecard/patches/basic.config.yaml b/config/olm-scorecard/patches/basic.config.yaml index 84683cf8d7..b89ce3bf90 100644 --- a/config/olm-scorecard/patches/basic.config.yaml +++ b/config/olm-scorecard/patches/basic.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - basic-check-spec - image: quay.io/operator-framework/scorecard-test:v1.38.0 + image: quay.io/operator-framework/scorecard-test:v1.39.0 labels: suite: basic test: basic-check-spec-test diff --git a/config/olm-scorecard/patches/olm.config.yaml b/config/olm-scorecard/patches/olm.config.yaml index 43f40a8b3f..7eff5c9099 100644 --- a/config/olm-scorecard/patches/olm.config.yaml +++ b/config/olm-scorecard/patches/olm.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - olm-bundle-validation - image: quay.io/operator-framework/scorecard-test:v1.38.0 + image: quay.io/operator-framework/scorecard-test:v1.39.0 labels: suite: olm test: olm-bundle-validation-test @@ -14,7 +14,7 @@ entrypoint: - scorecard-test - olm-crds-have-validation - image: quay.io/operator-framework/scorecard-test:v1.38.0 + image: quay.io/operator-framework/scorecard-test:v1.39.0 labels: suite: olm test: olm-crds-have-validation-test @@ -24,7 +24,7 @@ entrypoint: - scorecard-test - olm-crds-have-resources - image: quay.io/operator-framework/scorecard-test:v1.38.0 + image: quay.io/operator-framework/scorecard-test:v1.39.0 labels: suite: olm test: olm-crds-have-resources-test @@ -34,7 +34,7 @@ entrypoint: - scorecard-test - olm-spec-descriptors - image: quay.io/operator-framework/scorecard-test:v1.38.0 + image: quay.io/operator-framework/scorecard-test:v1.39.0 labels: suite: olm test: olm-spec-descriptors-test @@ -44,7 +44,7 @@ entrypoint: - scorecard-test - olm-status-descriptors - image: quay.io/operator-framework/scorecard-test:v1.38.0 + image: quay.io/operator-framework/scorecard-test:v1.39.0 labels: suite: olm test: olm-status-descriptors-test From 347558ee3dffb8a69f136dd6fafa1090554b21d7 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 11:49:45 +0100 Subject: [PATCH 306/836] chore(deps): update dependency vmware-tanzu/velero to v1.15.1 (main) (#6580) --- .github/workflows/continuous-delivery.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index 6ccdc1b64b..96260e749b 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -1343,7 +1343,7 @@ jobs: name: Setup Velero uses: nick-fields/retry@v3 env: - VELERO_VERSION: "v1.15.0" + VELERO_VERSION: "v1.15.1" VELERO_AWS_PLUGIN_VERSION: "v1.11.0" with: timeout_minutes: 10 From 6f780d35c3c094f0bf57482568dd4377c41c3e15 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 15 Jan 2025 20:51:55 +0100 Subject: [PATCH 307/836] fix(deps): update module google.golang.org/grpc to v1.69.4 (main) (#6587) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 508e56fbe8..7ec79071cc 100644 --- a/go.mod +++ b/go.mod @@ -38,7 +38,7 @@ require ( go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 golang.org/x/term v0.28.0 - google.golang.org/grpc v1.69.2 + google.golang.org/grpc v1.69.4 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.32.0 k8s.io/apiextensions-apiserver v0.32.0 diff --git a/go.sum b/go.sum index 1c0c76ec97..9f88f10806 100644 --- a/go.sum +++ b/go.sum @@ -266,8 +266,8 @@ gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE= google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= -google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU= -google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= +google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From 93d40efde1473c5e07522703ca6ba38639098a3c Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 16 Jan 2025 10:21:20 +0100 Subject: [PATCH 308/836] chore(deps): update dependency vmware-tanzu/velero-plugin-for-aws to v1.11.1 (main) (#6610) --- .github/workflows/continuous-delivery.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index 96260e749b..7eb762fd35 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -1344,7 +1344,7 @@ jobs: uses: nick-fields/retry@v3 env: VELERO_VERSION: "v1.15.1" - VELERO_AWS_PLUGIN_VERSION: "v1.11.0" + VELERO_AWS_PLUGIN_VERSION: "v1.11.1" with: timeout_minutes: 10 max_attempts: 3 From 404f0acb2087485408339a0a6041ebcc1c99240e Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Tue, 21 Jan 2025 13:35:08 +0100 Subject: [PATCH 309/836] docs: add OpenSSF best practices badge (#6627) Closes #6626 Signed-off-by: Jonathan Gonzalez V. Signed-off-by: Gabriele Bartolini Co-authored-by: Gabriele Bartolini --- README.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 8a8e8df0d6..e9e3eda545 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,7 @@ -[![CNCF Landscape](https://img.shields.io/badge/CNCF%20Landscape-5699C6)](https://landscape.cncf.io/?item=app-definition-and-development--database--cloudnativepg) +[![CNCF Landscape](https://img.shields.io/badge/CNCF%20Landscape-5699C6)][cncf-landscape] [![Latest Release](https://img.shields.io/github/v/release/cloudnative-pg/cloudnative-pg.svg)][latest-release] [![GitHub License](https://img.shields.io/github/license/cloudnative-pg/cloudnative-pg)][license] +[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/9933/badge)][openssf] [![Documentation][documentation-badge]][documentation] [![Stack Overflow](https://img.shields.io/badge/stackoverflow-cloudnative--pg-blue?logo=stackoverflow&logoColor=%23F48024&link=https%3A%2F%2Fstackoverflow.com%2Fquestions%2Ftagged%2Fcloudnative-pg)][stackoverflow] @@ -160,8 +161,10 @@ organization to this list! are trademarks or registered trademarks of the PostgreSQL Community Association of Canada, and used with their permission.* +[cncf-landscape]: https://landscape.cncf.io/?item=app-definition-and-development--database--cloudnativepg [stackoverflow]: https://stackoverflow.com/questions/tagged/cloudnative-pg [latest-release]: https://github.com/cloudnative-pg/cloudnative-pg/releases/latest [documentation]: https://cloudnative-pg.io/documentation/current/ [license]: https://github.com/cloudnative-pg/cloudnative-pg?tab=Apache-2.0-1-ov-file#readme +[openssf]: https://www.bestpractices.dev/projects/9933 [documentation-badge]: https://img.shields.io/badge/Documentation-white?logo=data%3Aimage%2Fpng%3Bbase64%2CiVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAGN0lEQVR4nJRXXWwcVxU%2B8%2F%2BzP%2BPZtR2v7dqy07jUJUALNaiK6lZyUVVKWgGKaIv8QCMekBAVQlQICcEzVZFQVYFKQhASEBHlISJPCRJEshTFChgrIYHEiYMh69jetffHM7Mzc%2B9Bs7vjnTs7yZpZWbt37s%2F5zne%2Bc861CD0eXRkbHc3NfjeffvxNAGEAgULD2756v35%2B3qe1Nc4fnQVEXlA2LnOcXlCF8S%2B6vvVgq%2FL3M65X3e51PvfQCU4WJgZe%2B8GQ8fS7AKgjBB8KEHwjDXZSjkf0CREAaXM2eI9c65siqWxWl360Xl74ANHz%2Fy8AitxnTBfmz%2BhyYS4wGhwObQCIHSA0AigOMBzvOsXzd4pnjyL6NMmWEH8hi2b28Og3%2FqRJA0ewfQy0v1vGO2NovwPo%2FEU%2FwVgSU1PI%2BSu79v3lJAB8HM%2BTI%2FO%2FUUXzM4xHIe0xI4DdRqOAwnF%2F38ePPyzaDIDh%2FMxcWh462m08aojuGY97C0nrAEHg9BlF0fmeAPr0J15vbaKsp0BZQzEDEAlP9B209UIIVXUta%2FQEQHwxgxFjTc%2BRskAwrgVWmHtg22vMPJwLDqGUNJIAMHVAkGu3WdpZz6NAkgSXpINSycluV28er1a3rJ4M3F2%2F9AtCvXKycRrTQttrjINjxxxIL9jevxdaDHU%2FTBr6pL5ruzuLZubgUQBOY2hPij3GBUe7tBCMBRE2KrXVSz0BBI%2FtPVgtV%2F%2FxkZ5WSjI%2F%2BFIXC3sHJwgT4yFqrZFFTSlVrp3sGYLwcfxSmXCbS00j2Ms4K7qkOsFx6qdTuiHtG4AimfmM8NyvOvR2G48qXtZ2fsfrN7%2BqpcRyUp0glKiimDm4TwAcHBp%2B9WeA4ki0GMWNR9OVF8BZvn7xtI%2FF09H8jzLEgz6yLwCDuelnFXHkTZZOytCOEdqDOtGwsm%2BNj00fXt%2B6%2Bj4vcA7bwNrZwENmXwAKuZnvsNRThs5ozMPfPiHyoDF7xiduHcXb70A8dRFheHjiySQATBZk0nl9MHPkBEWUoEtYjyrPFNwGzfdlD37Zdu98KCv%2BMmD2BYpUCvcST39e0%2BS1Wr249FAAg7mPzWrS5NstEbE0xrsiA6QN1PfRFLnhr%2BspxVJTlY8Mw1DqNXeyCQFREEXz9cHB0QOev73QaNhOF4B%2B45PHFHFgDhJTqjuubJFqX1KQco7NTTuW8kq95k2G4eLEGzM7lfItnjNeTKcOfV%2FT8hOuV77A9IK0XjgMpCO0ZiuV3L%2F6njCFAOmucGB3OII5XgCXEJTDdZLElVbu3Vz0fWexvL30k0B6ggBACOmIUBAEUKX0dDTvW7RCYcdZPq6n%2FSsQnUO2RuyBRgQ9Rc5mMvJ6CNIj1nXfd9qWAsCkaZzJAk1L8UjVqY737dSjfCGrPHWqXL32Q0mB%2F2BXnke00WaEYv2aTzAbnuV5pcWkDGAAGJmhSafh6hjr%2BW2SVYHrP7bb%2BOdPW%2FUgflGlTM2gaK%2Ft7tp6%2BN6yixdN89DcIwGktIFPABfNbwoQqQWEUnDJzg1g0jDeK5p7Kp7nensXFI7uyAr%2FLyM7fYLnpa6LYScE8vDnot5hrKlslm%2BfE3nVxJgO4o3KcYu%2FF8XM8yFQ27n%2F65Te%2FzKl3Jhpjj6TCIDneRD5%2FItxr1vdkALw7p1qfeWPpjHxMtsXaPxu6FLc%2BrnbSB1r7fcrlr36nqwMzQfnplJDryQCGOh%2FbLjhcM%2FEvQ4Pdund9xRV5m1LfTXaF%2BK9gsLGB9nsgddcz8thM%2FarPzYM8%2FFazf9sMFaU%2Fi%2FwvNANwEhPvUGR8ozn7d%2BiDKXixtKpbHp81nV9E7puRy31ixKUbOe%2Fv3Ud891ghhDrL5Z975eaOvV%2BCNRp0Gfz%2BcJjDABdTwlpdfKbId0t5XYAcHz5D5ZVtWUp9%2Flog2L7PgVJqZx0HOE5Cqghemv1%2Bt%2FeGBmZ%2BdB2yNN72UEpnzXG32YADA186i3bIpPxMhuKrFK%2Fd77JUnbkKbYvRJlC8DzKSZK76Lq1he2dKy%2BZuSfesSz5a2xHDbLJ%2BJaqdv5H4EUY%2BzbG2m9HgN7mg81bfw4W1uu7AjvHaqDhqF%2FZ3Fq5XFy%2FcESSDsx5fvZ7wLEsNfXk%2BjlVHfpSCOB%2FAQAA%2F%2F8zd8orZc2N9AAAAABJRU5ErkJggg%3D%3D From 721233c1fb976ca3d6b12c20409eb1fe9f3a6848 Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Tue, 21 Jan 2025 13:40:02 +0100 Subject: [PATCH 310/836] docs: clarify readiness probe for replicas (#6629) Closes #6628 Signed-off-by: Gabriele Bartolini Signed-off-by: Francesco Canovai Co-authored-by: Jaime Silvela Co-authored-by: Francesco Canovai --- docs/src/instance_manager.md | 43 ++++++++++++++++++++++++++---------- docs/src/replication.md | 10 +++++++-- 2 files changed, 39 insertions(+), 14 deletions(-) diff --git a/docs/src/instance_manager.md b/docs/src/instance_manager.md index 53d13c4e4d..c1335e67cc 100644 --- a/docs/src/instance_manager.md +++ b/docs/src/instance_manager.md @@ -15,17 +15,27 @@ main container, which in turn runs the PostgreSQL instance. During the lifetime of the Pod, the instance manager acts as a backend to handle the [startup, liveness and readiness probes](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes). -## Startup, liveness and readiness probes +## Startup, Liveness, and Readiness Probes -The startup and liveness probes rely on `pg_isready`, while the readiness -probe checks if the database is up and able to accept connections. +CloudNativePG leverages [PostgreSQL's `pg_isready`](https://www.postgresql.org/docs/current/app-pg-isready.html) +to implement Kubernetes startup, liveness, and readiness probes. ### Startup Probe -The `.spec.startDelay` parameter specifies the delay (in seconds) before the -liveness probe activates after a PostgreSQL Pod starts. By default, this is set -to `3600` seconds. You should adjust this value based on the time PostgreSQL -requires to fully initialize in your environment. +The startup probe ensures that a PostgreSQL instance, whether a primary or +standby, has fully started according to `pg_isready`. +While the startup probe is running, the liveness and readiness probes remain +disabled. Following Kubernetes standards, if the startup probe fails, the +kubelet will terminate the container, which will then be restarted. + +The startup probe provided by CloudNativePG is configurable via the +parameter `.spec.startDelay`, which specifies the maximum time, in seconds, +allowed for the startup probe to succeed. At a minimum, the probe requires +`pg_isready` to return `0` or `1`. + +By default, the `startDelay` is set to `3600` seconds. It is recommended to +adjust this setting based on the time PostgreSQL needs to fully initialize in +your specific environment. !!! Warning Setting `.spec.startDelay` too low can cause the liveness probe to activate @@ -71,9 +81,14 @@ spec: ### Liveness Probe -The liveness probe begins after the startup probe succeeds and is responsible -for detecting if the PostgreSQL instance has entered a broken state that -requires a restart of the pod. +The liveness probe begins after the startup probe successfully completes. Its +primary role is to ensure the PostgreSQL instance—whether primary or standby—is +operating correctly. This is achieved using the `pg_isready` utility. Both exit +codes `0` (indicating the server is accepting connections) and `1` (indicating +the server is rejecting connections, such as during startup or a smart +shutdown) are treated as valid outcomes. +Following Kubernetes standards, if the liveness probe fails, the +kubelet will terminate the container, which will then be restarted. The amount of time before a Pod is classified as not alive is configurable via the `.spec.livenessProbeTimeout` parameter. @@ -123,8 +138,12 @@ spec: ### Readiness Probe -The readiness probe determines when a pod running a PostgreSQL instance is -prepared to accept traffic and serve requests. +The readiness probe begins once the startup probe has successfully completed. +Its purpose is to check whether the PostgreSQL instance is ready to accept +traffic and serve requests. +For streaming replicas, it also requires that they have connected to the source +at least once. Following Kubernetes standards, if the readiness probe fails, +the pod will be marked unready and will not receive traffic from any services. CloudNativePG uses the following default configuration for the readiness probe: diff --git a/docs/src/replication.md b/docs/src/replication.md index fbc37595bb..4c10899d1d 100644 --- a/docs/src/replication.md +++ b/docs/src/replication.md @@ -375,7 +375,7 @@ spec: ``` ANY 1 ("foo-2","foo-3","foo-1") ``` - + At this point no write operations will be allowed until at least one of the standbys is available again. @@ -390,6 +390,12 @@ attempt to replicate WAL records to the designated number of synchronous standbys, but write operations will continue even if fewer than the requested number of standbys are available. +!!! Important + Make sure you have a clear understanding of what *ready/available* means + for a replica and set your expectations accordingly. By default, a replica is + considered ready when it has successfully connected to the source at least + once. + This setting balances data safety with availability, enabling applications to continue writing during temporary standby unavailability—hence, it’s also known as *self-healing mode*. @@ -485,7 +491,7 @@ ANY q (pod1, pod2, ...) Where: -- `q` is an integer automatically calculated by the operator to be: +- `q` is an integer automatically calculated by the operator to be: `1 <= minSyncReplicas <= q <= maxSyncReplicas <= readyReplicas` - `pod1, pod2, ...` is the list of all PostgreSQL pods in the cluster From 0a4729b17610da8b12a5c4a15a6b79e2b376fd52 Mon Sep 17 00:00:00 2001 From: Jonathan Battiato Date: Fri, 24 Jan 2025 16:01:31 +0100 Subject: [PATCH 311/836] chore(test): add preflight check for operator in CD workflow (#5690) This patch adds the preflight check for operator in the `continuous-delivery.yaml` workflow. The operator check is a requirement to run properly on an OLM environment. Closes #5642 Signed-off-by: Jonathan Battiato Signed-off-by: Jonathan Gonzalez V. Co-authored-by: Jonathan Gonzalez V. --- .github/workflows/continuous-delivery.yml | 39 +++++++++++++++++++++++ Makefile | 3 ++ 2 files changed, 42 insertions(+) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index 7eb762fd35..f86f5172c1 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -258,6 +258,7 @@ jobs: author_email: ${{ steps.build-meta.outputs.author_email }} controller_img: ${{ env.CONTROLLER_IMG }} controller_img_ubi8: ${{ env.CONTROLLER_IMG_UBI8 }} + index_img: ${{ env.INDEX_IMG }} bundle_img: ${{ env.BUNDLE_IMG }} catalog_img: ${{ env.CATALOG_IMG }} steps: @@ -433,6 +434,7 @@ jobs: echo "CONTROLLER_IMG=${LOWERCASE_OPERATOR_IMAGE_NAME}:${TAG}" >> $GITHUB_ENV echo "CONTROLLER_IMG_UBI8=${LOWERCASE_OPERATOR_IMAGE_NAME}:${TAG_UBI}" >> $GITHUB_ENV echo "BUNDLE_IMG=${LOWERCASE_OPERATOR_IMAGE_NAME}:bundle-${TAG}" >> $GITHUB_ENV + echo "INDEX_IMG=${LOWERCASE_OPERATOR_IMAGE_NAME}:index-${TAG}" >> $GITHUB_ENV echo "CATALOG_IMG=${LOWERCASE_OPERATOR_IMAGE_NAME}:catalog-${TAG}" >> $GITHUB_ENV - name: Generate manifest for operator deployment @@ -1970,6 +1972,7 @@ jobs: env: CONTROLLER_IMG: ${{ needs.buildx.outputs.controller_img_ubi8 }} BUNDLE_IMG: ${{ needs.buildx.outputs.bundle_img }} + INDEX_IMG: ${{ needs.buildx.outputs.index_img }} CATALOG_IMG: ${{ needs.buildx.outputs.catalog_img }} run: | make olm-catalog @@ -1985,6 +1988,42 @@ jobs: run: | envsubst < hack/install-config.yaml.template > hack/install-config.yaml openshift-install create cluster --dir hack/ --log-level warn + - + name: Install operator-sdk + run: | + make operator-sdk + - + name: Install preflight + run: | + make preflight + - + name: Create Secret + run: | + export KUBECONFIG=$(pwd)/hack/auth/kubeconfig + oc create ns cloudnative-pg + oc -n cloudnative-pg create secret generic cnpg-pull-secret \ + --from-file=.dockerconfigjson=$HOME/.docker/config.json \ + --type=kubernetes.io/dockerconfigjson + - + name: Run preflight operator test + env: + BUNDLE_IMG: ${{ needs.buildx.outputs.bundle_img }} + PFLT_INDEXIMAGE: ${{ needs.buildx.outputs.index_img }} + PFLT_SCORECARD_WAIT_TIME: "1200" + PFLT_ARTIFACTS: "preflight_operator_results" + run: | + PATH=$(pwd)/bin/:${PATH} \ + KUBECONFIG=$(pwd)/hack/auth/kubeconfig \ + bin/preflight check operator ${BUNDLE_IMG} \ + --docker-config $HOME/.docker/config.json --loglevel trace + - + name: Check preflight operator results + run: | + PASS=`jq -r .passed preflight_operator_results/results.json` + if [[ "$PASS" == "false" ]] + then + exit 1 + fi - name: Run E2E tests if: (always() && !cancelled()) diff --git a/Makefile b/Makefile index c71f2e7f95..de3d0d7ab2 100644 --- a/Makefile +++ b/Makefile @@ -27,6 +27,7 @@ endif endif CATALOG_IMG ?= $(shell echo "${CONTROLLER_IMG}" | sed -e 's/:/:catalog-/') BUNDLE_IMG ?= $(shell echo "${CONTROLLER_IMG}" | sed -e 's/:/:bundle-/') +INDEX_IMG ?= $(shell echo "${CONTROLLER_IMG}" | sed -e 's/:/:index-/') COMMIT := $(shell git rev-parse --short HEAD || echo unknown) DATE := $(shell git log -1 --pretty=format:'%ad' --date short) @@ -170,6 +171,8 @@ olm-catalog: olm-bundle opm ## Build and push the index image for OLM Catalog - Image: ${BUNDLE_IMG}" | envsubst > cloudnative-pg-operator-template.yaml $(OPM) alpha render-template semver -o yaml < cloudnative-pg-operator-template.yaml > catalog/catalog.yaml ;\ $(OPM) validate catalog/ ;\ + $(OPM) index add --mode semver --container-tool docker --bundles "${BUNDLE_IMG}" --tag "${INDEX_IMG}" ;\ + docker push ${INDEX_IMG} ;\ DOCKER_BUILDKIT=1 docker build --push -f catalog.Dockerfile -t ${CATALOG_IMG} . ;\ echo -e "apiVersion: operators.coreos.com/v1alpha1\n\ kind: CatalogSource\n\ From 53b2647da9e44c1c44d48a38e5cd1f1e0a289abe Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Fri, 24 Jan 2025 17:50:31 +0100 Subject: [PATCH 312/836] fix: consistent threshold calculation for probes (#6656) In version 1.25.0, we introduced an inconsistent behavior in determining the default value of the standard probe knobs when a stanza under `.spec.probes` is defined (#6266). This patch rectifies that behavior by allowing users to override any of the settings, including `failureThreshold`. When `failureThreshold` is not specified in the startup probe, its value is calculated by dividing `.spec.startupDelay` by `periodSeconds` (which defaults to 10 and is now overridable). The same principle applies to the liveness probe with the `.spec.livenessProbeTimeout` option. Closes: #6655 Signed-off-by: Leonardo Cecchi Signed-off-by: Armando Ruocco Signed-off-by: Gabriele Bartolini Co-authored-by: Armando Ruocco Co-authored-by: Gabriele Bartolini --- api/v1/cluster_funcs.go | 24 +++++++++++++----- api/v1/cluster_funcs_test.go | 10 +++++++- docs/src/instance_manager.md | 48 +++++++++++++++++++----------------- pkg/specs/pods.go | 47 +++++++++++++++-------------------- pkg/specs/pods_test.go | 16 +++--------- 5 files changed, 77 insertions(+), 68 deletions(-) diff --git a/api/v1/cluster_funcs.go b/api/v1/cluster_funcs.go index 132656c4db..6fa6eba800 100644 --- a/api/v1/cluster_funcs.go +++ b/api/v1/cluster_funcs.go @@ -1457,10 +1457,22 @@ func (p *Probe) ApplyInto(k8sProbe *corev1.Probe) { return } - k8sProbe.InitialDelaySeconds = p.InitialDelaySeconds - k8sProbe.TimeoutSeconds = p.TimeoutSeconds - k8sProbe.PeriodSeconds = p.PeriodSeconds - k8sProbe.SuccessThreshold = p.SuccessThreshold - k8sProbe.FailureThreshold = p.FailureThreshold - k8sProbe.TerminationGracePeriodSeconds = p.TerminationGracePeriodSeconds + if p.InitialDelaySeconds != 0 { + k8sProbe.InitialDelaySeconds = p.InitialDelaySeconds + } + if p.TimeoutSeconds != 0 { + k8sProbe.TimeoutSeconds = p.TimeoutSeconds + } + if p.PeriodSeconds != 0 { + k8sProbe.PeriodSeconds = p.PeriodSeconds + } + if p.SuccessThreshold != 0 { + k8sProbe.SuccessThreshold = p.SuccessThreshold + } + if p.FailureThreshold != 0 { + k8sProbe.FailureThreshold = p.FailureThreshold + } + if p.TerminationGracePeriodSeconds != nil { + k8sProbe.TerminationGracePeriodSeconds = p.TerminationGracePeriodSeconds + } } diff --git a/api/v1/cluster_funcs_test.go b/api/v1/cluster_funcs_test.go index d0126362f5..fd2d93e5c1 100644 --- a/api/v1/cluster_funcs_test.go +++ b/api/v1/cluster_funcs_test.go @@ -1723,6 +1723,14 @@ var _ = Describe("Probes configuration", func() { Expect(configuredProbe.PeriodSeconds).To(Equal(config.PeriodSeconds)) Expect(configuredProbe.SuccessThreshold).To(Equal(config.SuccessThreshold)) Expect(configuredProbe.FailureThreshold).To(Equal(config.FailureThreshold)) - Expect(configuredProbe.TerminationGracePeriodSeconds).To(BeNil()) + Expect(*configuredProbe.TerminationGracePeriodSeconds).To(BeEquivalentTo(23)) + }) + + It("should not overwrite any field", func() { + config := &Probe{} + configuredProbe := originalProbe.DeepCopy() + config.ApplyInto(configuredProbe) + Expect(originalProbe).To(BeEquivalentTo(*configuredProbe), + "configured probe should not be modified with zero values") }) }) diff --git a/docs/src/instance_manager.md b/docs/src/instance_manager.md index c1335e67cc..df01927359 100644 --- a/docs/src/instance_manager.md +++ b/docs/src/instance_manager.md @@ -51,23 +51,25 @@ successThreshold: 1 timeoutSeconds: 5 ``` -Here, `FAILURE_THRESHOLD` is calculated as `startDelay` divided by -`periodSeconds`. +The `failureThreshold` value is automatically calculated by dividing +`startDelay` by `periodSeconds`. -If the default behavior based on `startDelay` is not suitable for your use -case, you can take full control of the startup probe by specifying custom -parameters in the `.spec.probes.startup` stanza. Note that defining this stanza -will override the default behavior, including the use of `startDelay`. +You can customize any of the probe settings in the `.spec.probes.startup` +section of your configuration. !!! Warning - Ensure that any custom probe settings are aligned with your cluster’s - operational requirements to prevent unintended disruptions. + Be sure that any custom probe settings are tailored to your cluster's + operational requirements to avoid unintended disruptions. !!! Info - For detailed information about probe configuration, refer to the - [probe API](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-Probe). + For more details on probe configuration, refer to the + [probe API documentation](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-Probe). + +If you manually specify `.spec.probes.startup.failureThreshold`, it will +override the default behavior and disable the automatic use of `startDelay`. -For example, the following configuration bypasses `startDelay` entirely: +For example, the following configuration explicitly sets custom probe +parameters, bypassing `startDelay`: ```yaml # ... snip @@ -103,28 +105,30 @@ successThreshold: 1 timeoutSeconds: 5 ``` -Here, `FAILURE_THRESHOLD` is calculated as `livenessProbeTimeout` divided by -`periodSeconds`. +The `failureThreshold` value is automatically calculated by dividing +`livenessProbeTimeout` by `periodSeconds`. By default, `.spec.livenessProbeTimeout` is set to `30` seconds. This means the liveness probe will report a failure if it detects three consecutive probe failures, with a 10-second interval between each check. -If the default behavior using `livenessProbeTimeout` does not meet your needs, -you can fully customize the liveness probe by defining parameters in the -`.spec.probes.liveness` stanza. Keep in mind that specifying this stanza will -override the default behavior, including the use of `livenessProbeTimeout`. +You can customize any of the probe settings in the `.spec.probes.liveness` +section of your configuration. !!! Warning - Ensure that any custom probe settings are aligned with your cluster’s - operational requirements to prevent unintended disruptions. + Be sure that any custom probe settings are tailored to your cluster's + operational requirements to avoid unintended disruptions. !!! Info For more details on probe configuration, refer to the - [probe API](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-Probe). + [probe API documentation](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-Probe). + +If you manually specify `.spec.probes.liveness.failureThreshold`, it will +override the default behavior and disable the automatic use of +`livenessProbeTimeout`. -For example, the following configuration overrides the default behavior and -bypasses `livenessProbeTimeout`: +For example, the following configuration explicitly sets custom probe +parameters, bypassing `livenessProbeTimeout`: ```yaml # ... snip diff --git a/pkg/specs/pods.go b/pkg/specs/pods.go index 3fe970313b..b20b704f32 100644 --- a/pkg/specs/pods.go +++ b/pkg/specs/pods.go @@ -201,9 +201,8 @@ func createPostgresContainers(cluster apiv1.Cluster, envConfig EnvConfig, enable // This is the default startup probe, and can be overridden // the user configuration in cluster.spec.probes.startup StartupProbe: &corev1.Probe{ - FailureThreshold: getStartupProbeFailureThreshold(cluster.GetMaxStartDelay()), - PeriodSeconds: StartupProbePeriod, - TimeoutSeconds: 5, + PeriodSeconds: StartupProbePeriod, + TimeoutSeconds: 5, ProbeHandler: corev1.ProbeHandler{ HTTPGet: &corev1.HTTPGetAction{ Path: url.PathHealth, @@ -275,22 +274,25 @@ func createPostgresContainers(cluster apiv1.Cluster, envConfig EnvConfig, enable addManagerLoggingOptions(cluster, &containers[0]) - // if user customizes the liveness probe timeout, we need to adjust the failure threshold - addLivenessProbeFailureThreshold(cluster, &containers[0]) - // use the custom probe configuration if provided ensureCustomProbesConfiguration(&cluster, &containers[0]) - return containers -} + // ensure a proper threshold is set + if containers[0].StartupProbe.FailureThreshold == 0 { + containers[0].StartupProbe.FailureThreshold = getFailureThreshold( + cluster.GetMaxStartDelay(), + containers[0].StartupProbe.PeriodSeconds, + ) + } -// addLivenessProbeFailureThreshold adjusts the liveness probe failure threshold -// based on the `spec.livenessProbeTimeout` value -func addLivenessProbeFailureThreshold(cluster apiv1.Cluster, container *corev1.Container) { - if cluster.Spec.LivenessProbeTimeout != nil { - timeout := *cluster.Spec.LivenessProbeTimeout - container.LivenessProbe.FailureThreshold = getLivenessProbeFailureThreshold(timeout) + if cluster.Spec.LivenessProbeTimeout != nil && containers[0].LivenessProbe.FailureThreshold == 0 { + containers[0].LivenessProbe.FailureThreshold = getFailureThreshold( + *cluster.Spec.LivenessProbeTimeout, + containers[0].LivenessProbe.PeriodSeconds, + ) } + + return containers } // ensureCustomProbesConfiguration applies the custom probe configuration @@ -308,22 +310,13 @@ func ensureCustomProbesConfiguration(cluster *apiv1.Cluster, container *corev1.C cluster.Spec.Probes.Startup.ApplyInto(container.StartupProbe) } -// getStartupProbeFailureThreshold get the startup probe failure threshold +// getFailureThreshold get the startup probe failure threshold // FAILURE_THRESHOLD = ceil(startDelay / periodSeconds) and minimum value is 1 -func getStartupProbeFailureThreshold(startupDelay int32) int32 { - if startupDelay <= StartupProbePeriod { - return 1 - } - return int32(math.Ceil(float64(startupDelay) / float64(StartupProbePeriod))) -} - -// getLivenessProbeFailureThreshold get the liveness probe failure threshold -// FAILURE_THRESHOLD = ceil(livenessTimeout / periodSeconds) and minimum value is 1 -func getLivenessProbeFailureThreshold(livenessTimeout int32) int32 { - if livenessTimeout <= LivenessProbePeriod { +func getFailureThreshold(startupDelay, period int32) int32 { + if startupDelay <= period { return 1 } - return int32(math.Ceil(float64(livenessTimeout) / float64(LivenessProbePeriod))) + return int32(math.Ceil(float64(startupDelay) / float64(period))) } // CreateAffinitySection creates the affinity sections for Pods, given the configuration diff --git a/pkg/specs/pods_test.go b/pkg/specs/pods_test.go index 17f9494d15..ff4a9c48f8 100644 --- a/pkg/specs/pods_test.go +++ b/pkg/specs/pods_test.go @@ -917,20 +917,12 @@ var _ = Describe("PodSpec drift detection", func() { var _ = Describe("Compute startup probe failure threshold", func() { It("should take the minimum value 1", func() { - Expect(getStartupProbeFailureThreshold(5)).To(BeNumerically("==", 1)) + Expect(getFailureThreshold(5, StartupProbePeriod)).To(BeNumerically("==", 1)) + Expect(getFailureThreshold(5, LivenessProbePeriod)).To(BeNumerically("==", 1)) }) It("should take the value from 'startDelay / periodSeconds'", func() { - Expect(getStartupProbeFailureThreshold(109)).To(BeNumerically("==", 11)) - }) -}) - -var _ = Describe("Compute liveness probe failure threshold", func() { - It("should take the minimum value 1", func() { - Expect(getLivenessProbeFailureThreshold(5)).To(BeNumerically("==", 1)) - }) - - It("should take the value from 'startDelay / periodSeconds'", func() { - Expect(getLivenessProbeFailureThreshold(31)).To(BeNumerically("==", 4)) + Expect(getFailureThreshold(109, StartupProbePeriod)).To(BeNumerically("==", 11)) + Expect(getFailureThreshold(31, LivenessProbePeriod)).To(BeNumerically("==", 4)) }) }) From 1fb7202cfeb089e0572e22e83b289c089d8dc758 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Sat, 25 Jan 2025 23:33:28 +0100 Subject: [PATCH 313/836] fix(ci): make sure to run only on created comments (#6666) The action we use doesn't trigger and error when users delete a comment, now we make sure that the event comes with the action created until the GitHub action fix the issue. Signed-off-by: Jonathan Gonzalez V. --- .github/workflows/continuous-delivery.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index f86f5172c1..3f8299f62b 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -81,6 +81,7 @@ jobs: check_commenter: if: | github.event_name == 'issue_comment' && + github.event.action == 'created' && github.event.issue.pull_request && startsWith(github.event.comment.body, '/test') name: Retrieve command From a06f18096420624b40856910cf8f7326812b1aa2 Mon Sep 17 00:00:00 2001 From: Yurii Vlasov Date: Mon, 27 Jan 2025 11:08:00 +0200 Subject: [PATCH 314/836] fix(import): skip role import if no roles are specified (#6646) Closes #6639 Signed-off-by: Yurii Vlasov --- pkg/management/postgres/logicalimport/monolith.go | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/pkg/management/postgres/logicalimport/monolith.go b/pkg/management/postgres/logicalimport/monolith.go index c63d787e91..5d95d7561c 100644 --- a/pkg/management/postgres/logicalimport/monolith.go +++ b/pkg/management/postgres/logicalimport/monolith.go @@ -35,12 +35,13 @@ func Monolith( contextLogger := log.FromContext(ctx) contextLogger.Info("starting monolith clone process") - if err := cloneRoles(ctx, cluster, destination, origin); err != nil { - return err - } - - if err := cloneRoleInheritance(ctx, destination, origin); err != nil { - return err + if len(cluster.Spec.Bootstrap.InitDB.Import.Roles) > 0 { + if err := cloneRoles(ctx, cluster, destination, origin); err != nil { + return err + } + if err := cloneRoleInheritance(ctx, destination, origin); err != nil { + return err + } } ds := databaseSnapshotter{cluster: cluster} From 7fe4331db8d526eca386c1fdbd443f806634cc4e Mon Sep 17 00:00:00 2001 From: Aelxander Date: Mon, 27 Jan 2025 14:38:02 +0100 Subject: [PATCH 315/836] docs(fix): correct `dataDurability` setting in preferred section example (#6635) Update the documentation to use the correct `dataDurability` setting in the preferred section example. Signed-off-by: Aelxander --- docs/src/replication.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/replication.md b/docs/src/replication.md index 4c10899d1d..fbe01b2d58 100644 --- a/docs/src/replication.md +++ b/docs/src/replication.md @@ -422,7 +422,7 @@ spec: synchronous: method: any number: 2 - dataDurability: required + dataDurability: preferred ``` 1. Initial state. The content of `synchronous_standby_names` is: From 617de79db9642dfc1961ef02f591b951e0df8a85 Mon Sep 17 00:00:00 2001 From: Francesco Canovai Date: Tue, 28 Jan 2025 10:34:37 +0100 Subject: [PATCH 316/836] docs: describe `tcp_syn_retries` behavior (#6673) Add documentation for the replication connection behavior in relation to the `tcp_syn_retries` setting. Signed-off-by: Francesco Canovai Signed-off-by: Marco Nenciarini Signed-off-by: Gabriele Bartolini Co-authored-by: Marco Nenciarini Co-authored-by: Gabriele Bartolini --- .wordlist-en-custom.txt | 1 + docs/src/troubleshooting.md | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+) diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index bb773940fd..b80faf7d54 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -1131,6 +1131,7 @@ readthedocs readyInstances reconciler reconciliationLoop +reconnection recoverability recoveredCluster recoveryTarget diff --git a/docs/src/troubleshooting.md b/docs/src/troubleshooting.md index aa67c8be17..6003f2ac96 100644 --- a/docs/src/troubleshooting.md +++ b/docs/src/troubleshooting.md @@ -794,3 +794,21 @@ API. Please check your networking. Another possible cause is when you have sidecar injection configured. Sidecars such as Istio may make the network temporarily unavailable during startup. If you have sidecar injection enabled, retry with injection disabled. + +### Replicas take over two minutes to reconnect after a failover + +When the primary instance fails, the operator promotes the most advanced +standby to the primary role. Other standby instances then attempt to reconnect +to the `-rw` service for replication. However, during this reconnection +process, `kube-proxy` may not yet have updated its routing information. +As a result, the initial `SYN` packet sent by the standby instances can fail +to reach the intended destination. + +On Linux systems, the default value for the `tcp_syn_retries` kernel parameter +is set to 6. This configuration means the system will retry a failed connection +for approximately 127 seconds before giving up. This extended retry period can +significantly delay the reconnection process. For more details, consult the +[tcp_syn_retries documentation](https://www.kernel.org/doc/Documentation/networking/ip-sysctl.txt). + +Altering this behavior will require changing the `tcp_syn_retries` +parameter on the host node. From cf64c5826af7b3335cdf25b162c8ecfb249efba6 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Wed, 29 Jan 2025 12:15:18 +0100 Subject: [PATCH 317/836] test(e2e): enable race detector in the E2e tests (#6634) This patch enables the Go built-in race condition detector when running the E2e tests. Closes #6633 Signed-off-by: Jonathan Gonzalez V. --- .github/workflows/continuous-integration.yml | 2 +- .goreleaser.yml | 19 +++++++++++++++++++ Makefile | 9 +++++++++ 3 files changed, 29 insertions(+), 1 deletion(-) diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index d5c39b2e61..bdd2c33d3c 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -30,7 +30,7 @@ env: BUILD_PUSH_CACHE_FROM: "" BUILD_PUSH_CACHE_TO: "" BUILD_PLUGIN_RELEASE_ARGS: "build --skip=validate --clean --id kubectl-cnpg --timeout 60m" - BUILD_MANAGER_RELEASE_ARGS: "build --skip=validate --clean --id manager" + BUILD_MANAGER_RELEASE_ARGS: "build --skip=validate --clean --id manager-race" REPOSITORY_OWNER: "cloudnative-pg" REGISTRY: "ghcr.io" REGISTRY_USER: ${{ github.actor }} diff --git a/.goreleaser.yml b/.goreleaser.yml index bfa792b814..55a38dcd61 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -33,6 +33,25 @@ builds: - amd64 - arm64 +- id: manager-race + binary: manager/manager_{{ .Arch }} + main: cmd/manager/main.go + no_unique_dist_dir: true + gcflags: + - all=-trimpath={{.Env.GOPATH}};{{.Env.PWD}} + ldflags: + - -race + - -s + - -w + - -X github.com/cloudnative-pg/cloudnative-pg/pkg/versions.buildVersion={{.Env.VERSION}} + - -X github.com/cloudnative-pg/cloudnative-pg/pkg/versions.buildCommit={{.Env.COMMIT}} + - -X github.com/cloudnative-pg/cloudnative-pg/pkg/versions.buildDate={{.Env.DATE}} + goos: + - linux + goarch: + - amd64 + - arm64 + - id: kubectl-cnpg binary: kubectl-cnpg main: cmd/kubectl-cnpg/main.go diff --git a/Makefile b/Makefile index de3d0d7ab2..607ea89f00 100644 --- a/Makefile +++ b/Makefile @@ -128,6 +128,15 @@ build-manager: generate fmt vet ## Build manager binary. build-plugin: generate fmt vet ## Build plugin binary. go build -o bin/kubectl-cnpg -ldflags ${LDFLAGS} ./cmd/kubectl-cnpg +build-race: generate fmt vet build-manager-race build-plugin-race ## Build the binaries adding the -race option. + +build-manager-race: generate fmt vet ## Build manager binary with -race option. + go build -race -o bin/manager -ldflags ${LDFLAGS} ./cmd/manager + +build-plugin-race: generate fmt vet ## Build plugin binary. + go build -race -o bin/kubectl-cnpg -ldflags ${LDFLAGS} ./cmd/kubectl-cnpg + + run: generate fmt vet manifests ## Run against the configured Kubernetes cluster in ~/.kube/config. go run ./cmd/manager From 762c282c47493abb521f2e38ba21061090ca7303 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Wed, 29 Jan 2025 15:12:16 +0100 Subject: [PATCH 318/836] fix: add support for PgBouncer 1.24 (#6630) PgBouncer 1.24 introduces new metrics as per the release notes at https://github.com/pgbouncer/pgbouncer/releases/tag/pgbouncer_1_24_0 This commit adds support for the following metrics: - `total_bind_count` - `total_client_parse_count` - `total_server_parse_count` - `avg_bind_count` (corrected typo) - `avg_client_parse_count` - `avg_server_parse_count` Closes #6566 Signed-off-by: Jonathan Gonzalez V. Signed-off-by: Armando Ruocco Co-authored-by: Armando Ruocco --- docs/src/connection_pooling.md | 90 +++++----- .../pgbouncer/metricsserver/pools.go | 43 ++++- .../pgbouncer/metricsserver/stats.go | 159 +++++++++++++++--- pkg/specs/pgbouncer/deployments.go | 2 +- 4 files changed, 222 insertions(+), 72 deletions(-) diff --git a/docs/src/connection_pooling.md b/docs/src/connection_pooling.md index 0ac9d50bfe..3073f3b3f9 100644 --- a/docs/src/connection_pooling.md +++ b/docs/src/connection_pooling.md @@ -421,165 +421,165 @@ This example shows the output for `cnpg_pgbouncer` metrics: ```text # HELP cnpg_pgbouncer_collection_duration_seconds Collection time duration in seconds # TYPE cnpg_pgbouncer_collection_duration_seconds gauge -cnpg_pgbouncer_collection_duration_seconds{collector="Collect.up"} 0.002443168 - +cnpg_pgbouncer_collection_duration_seconds{collector="Collect.up"} 0.002338805 +# HELP cnpg_pgbouncer_collection_errors_total Total errors occurred accessing PostgreSQL for metrics. +# TYPE cnpg_pgbouncer_collection_errors_total counter +cnpg_pgbouncer_collection_errors_total{collector="sql: Scan error on column index 16, name \"load_balance_hosts\": converting NULL to int is unsupported"} 5 # HELP cnpg_pgbouncer_collections_total Total number of times PostgreSQL was accessed for metrics. # TYPE cnpg_pgbouncer_collections_total counter -cnpg_pgbouncer_collections_total 1 - +cnpg_pgbouncer_collections_total 5 # HELP cnpg_pgbouncer_last_collection_error 1 if the last collection ended with error, 0 otherwise. # TYPE cnpg_pgbouncer_last_collection_error gauge cnpg_pgbouncer_last_collection_error 0 - # HELP cnpg_pgbouncer_lists_databases Count of databases. # TYPE cnpg_pgbouncer_lists_databases gauge cnpg_pgbouncer_lists_databases 1 - # HELP cnpg_pgbouncer_lists_dns_names Count of DNS names in the cache. # TYPE cnpg_pgbouncer_lists_dns_names gauge cnpg_pgbouncer_lists_dns_names 0 - # HELP cnpg_pgbouncer_lists_dns_pending Not used. # TYPE cnpg_pgbouncer_lists_dns_pending gauge cnpg_pgbouncer_lists_dns_pending 0 - # HELP cnpg_pgbouncer_lists_dns_queries Count of in-flight DNS queries. # TYPE cnpg_pgbouncer_lists_dns_queries gauge cnpg_pgbouncer_lists_dns_queries 0 - # HELP cnpg_pgbouncer_lists_dns_zones Count of DNS zones in the cache. # TYPE cnpg_pgbouncer_lists_dns_zones gauge cnpg_pgbouncer_lists_dns_zones 0 - # HELP cnpg_pgbouncer_lists_free_clients Count of free clients. # TYPE cnpg_pgbouncer_lists_free_clients gauge cnpg_pgbouncer_lists_free_clients 49 - # HELP cnpg_pgbouncer_lists_free_servers Count of free servers. # TYPE cnpg_pgbouncer_lists_free_servers gauge cnpg_pgbouncer_lists_free_servers 0 - # HELP cnpg_pgbouncer_lists_login_clients Count of clients in login state. # TYPE cnpg_pgbouncer_lists_login_clients gauge cnpg_pgbouncer_lists_login_clients 0 - # HELP cnpg_pgbouncer_lists_pools Count of pools. # TYPE cnpg_pgbouncer_lists_pools gauge cnpg_pgbouncer_lists_pools 1 - # HELP cnpg_pgbouncer_lists_used_clients Count of used clients. # TYPE cnpg_pgbouncer_lists_used_clients gauge cnpg_pgbouncer_lists_used_clients 1 - # HELP cnpg_pgbouncer_lists_used_servers Count of used servers. # TYPE cnpg_pgbouncer_lists_used_servers gauge cnpg_pgbouncer_lists_used_servers 0 - # HELP cnpg_pgbouncer_lists_users Count of users. # TYPE cnpg_pgbouncer_lists_users gauge cnpg_pgbouncer_lists_users 2 - # HELP cnpg_pgbouncer_pools_cl_active Client connections that are linked to server connection and can process queries. # TYPE cnpg_pgbouncer_pools_cl_active gauge cnpg_pgbouncer_pools_cl_active{database="pgbouncer",user="pgbouncer"} 1 - +# HELP cnpg_pgbouncer_pools_cl_active_cancel_req Client connections that have forwarded query cancellations to the server and are waiting for the server response. +# TYPE cnpg_pgbouncer_pools_cl_active_cancel_req gauge +cnpg_pgbouncer_pools_cl_active_cancel_req{database="pgbouncer",user="pgbouncer"} 0 # HELP cnpg_pgbouncer_pools_cl_cancel_req Client connections that have not forwarded query cancellations to the server yet. # TYPE cnpg_pgbouncer_pools_cl_cancel_req gauge cnpg_pgbouncer_pools_cl_cancel_req{database="pgbouncer",user="pgbouncer"} 0 - # HELP cnpg_pgbouncer_pools_cl_waiting Client connections that have sent queries but have not yet got a server connection. # TYPE cnpg_pgbouncer_pools_cl_waiting gauge cnpg_pgbouncer_pools_cl_waiting{database="pgbouncer",user="pgbouncer"} 0 - +# HELP cnpg_pgbouncer_pools_cl_waiting_cancel_req Client connections that have not forwarded query cancellations to the server yet. +# TYPE cnpg_pgbouncer_pools_cl_waiting_cancel_req gauge +cnpg_pgbouncer_pools_cl_waiting_cancel_req{database="pgbouncer",user="pgbouncer"} 0 +# HELP cnpg_pgbouncer_pools_load_balance_hosts Number of hosts not load balancing between hosts +# TYPE cnpg_pgbouncer_pools_load_balance_hosts gauge +cnpg_pgbouncer_pools_load_balance_hosts{database="pgbouncer",user="pgbouncer"} 0 # HELP cnpg_pgbouncer_pools_maxwait How long the first (oldest) client in the queue has waited, in seconds. If this starts increasing, then the current pool of servers does not handle requests quickly enough. The reason may be either an overloaded server or just too small of a pool_size setting. # TYPE cnpg_pgbouncer_pools_maxwait gauge cnpg_pgbouncer_pools_maxwait{database="pgbouncer",user="pgbouncer"} 0 - # HELP cnpg_pgbouncer_pools_maxwait_us Microsecond part of the maximum waiting time. # TYPE cnpg_pgbouncer_pools_maxwait_us gauge cnpg_pgbouncer_pools_maxwait_us{database="pgbouncer",user="pgbouncer"} 0 - # HELP cnpg_pgbouncer_pools_pool_mode The pooling mode in use. 1 for session, 2 for transaction, 3 for statement, -1 if unknown # TYPE cnpg_pgbouncer_pools_pool_mode gauge cnpg_pgbouncer_pools_pool_mode{database="pgbouncer",user="pgbouncer"} 3 - # HELP cnpg_pgbouncer_pools_sv_active Server connections that are linked to a client. # TYPE cnpg_pgbouncer_pools_sv_active gauge cnpg_pgbouncer_pools_sv_active{database="pgbouncer",user="pgbouncer"} 0 - +# HELP cnpg_pgbouncer_pools_sv_active_cancel Server connections that are currently forwarding a cancel request +# TYPE cnpg_pgbouncer_pools_sv_active_cancel gauge +cnpg_pgbouncer_pools_sv_active_cancel{database="pgbouncer",user="pgbouncer"} 0 # HELP cnpg_pgbouncer_pools_sv_idle Server connections that are unused and immediately usable for client queries. # TYPE cnpg_pgbouncer_pools_sv_idle gauge cnpg_pgbouncer_pools_sv_idle{database="pgbouncer",user="pgbouncer"} 0 - # HELP cnpg_pgbouncer_pools_sv_login Server connections currently in the process of logging in. # TYPE cnpg_pgbouncer_pools_sv_login gauge cnpg_pgbouncer_pools_sv_login{database="pgbouncer",user="pgbouncer"} 0 - # HELP cnpg_pgbouncer_pools_sv_tested Server connections that are currently running either server_reset_query or server_check_query. # TYPE cnpg_pgbouncer_pools_sv_tested gauge cnpg_pgbouncer_pools_sv_tested{database="pgbouncer",user="pgbouncer"} 0 - # HELP cnpg_pgbouncer_pools_sv_used Server connections that have been idle for more than server_check_delay, so they need server_check_query to run on them before they can be used again. # TYPE cnpg_pgbouncer_pools_sv_used gauge cnpg_pgbouncer_pools_sv_used{database="pgbouncer",user="pgbouncer"} 0 - +# HELP cnpg_pgbouncer_pools_sv_wait_cancels Servers that normally could become idle, but are waiting to do so until all in-flight cancel requests have completed that were sent to cancel a query on this server. +# TYPE cnpg_pgbouncer_pools_sv_wait_cancels gauge +cnpg_pgbouncer_pools_sv_wait_cancels{database="pgbouncer",user="pgbouncer"} 0 +# HELP cnpg_pgbouncer_stats_avg_bind_count Average number of prepared statements readied for execution by clients and forwarded to PostgreSQL by pgbouncer. +# TYPE cnpg_pgbouncer_stats_avg_bind_count gauge +cnpg_pgbouncer_stats_avg_bind_count{database="pgbouncer"} 0 +# HELP cnpg_pgbouncer_stats_avg_client_parse_count Average number of prepared statements created by clients. +# TYPE cnpg_pgbouncer_stats_avg_client_parse_count gauge +cnpg_pgbouncer_stats_avg_client_parse_count{database="pgbouncer"} 0 # HELP cnpg_pgbouncer_stats_avg_query_count Average queries per second in last stat period. # TYPE cnpg_pgbouncer_stats_avg_query_count gauge -cnpg_pgbouncer_stats_avg_query_count{database="pgbouncer"} 1 - +cnpg_pgbouncer_stats_avg_query_count{database="pgbouncer"} 0 # HELP cnpg_pgbouncer_stats_avg_query_time Average query duration, in microseconds. # TYPE cnpg_pgbouncer_stats_avg_query_time gauge cnpg_pgbouncer_stats_avg_query_time{database="pgbouncer"} 0 - # HELP cnpg_pgbouncer_stats_avg_recv Average received (from clients) bytes per second. # TYPE cnpg_pgbouncer_stats_avg_recv gauge cnpg_pgbouncer_stats_avg_recv{database="pgbouncer"} 0 - # HELP cnpg_pgbouncer_stats_avg_sent Average sent (to clients) bytes per second. # TYPE cnpg_pgbouncer_stats_avg_sent gauge cnpg_pgbouncer_stats_avg_sent{database="pgbouncer"} 0 - +# HELP cnpg_pgbouncer_stats_avg_server_parse_count Average number of prepared statements created by pgbouncer on a server. +# TYPE cnpg_pgbouncer_stats_avg_server_parse_count gauge +cnpg_pgbouncer_stats_avg_server_parse_count{database="pgbouncer"} 0 # HELP cnpg_pgbouncer_stats_avg_wait_time Time spent by clients waiting for a server, in microseconds (average per second). # TYPE cnpg_pgbouncer_stats_avg_wait_time gauge cnpg_pgbouncer_stats_avg_wait_time{database="pgbouncer"} 0 - # HELP cnpg_pgbouncer_stats_avg_xact_count Average transactions per second in last stat period. # TYPE cnpg_pgbouncer_stats_avg_xact_count gauge -cnpg_pgbouncer_stats_avg_xact_count{database="pgbouncer"} 1 - +cnpg_pgbouncer_stats_avg_xact_count{database="pgbouncer"} 0 # HELP cnpg_pgbouncer_stats_avg_xact_time Average transaction duration, in microseconds. # TYPE cnpg_pgbouncer_stats_avg_xact_time gauge cnpg_pgbouncer_stats_avg_xact_time{database="pgbouncer"} 0 - +# HELP cnpg_pgbouncer_stats_total_bind_count Total number of prepared statements readied for execution by clients and forwarded to PostgreSQL by pgbouncer +# TYPE cnpg_pgbouncer_stats_total_bind_count gauge +cnpg_pgbouncer_stats_total_bind_count{database="pgbouncer"} 0 +# HELP cnpg_pgbouncer_stats_total_client_parse_count Total number of prepared statements created by clients. +# TYPE cnpg_pgbouncer_stats_total_client_parse_count gauge +cnpg_pgbouncer_stats_total_client_parse_count{database="pgbouncer"} 0 # HELP cnpg_pgbouncer_stats_total_query_count Total number of SQL queries pooled by pgbouncer. # TYPE cnpg_pgbouncer_stats_total_query_count gauge -cnpg_pgbouncer_stats_total_query_count{database="pgbouncer"} 3 - +cnpg_pgbouncer_stats_total_query_count{database="pgbouncer"} 15 # HELP cnpg_pgbouncer_stats_total_query_time Total number of microseconds spent by pgbouncer when actively connected to PostgreSQL, executing queries. # TYPE cnpg_pgbouncer_stats_total_query_time gauge cnpg_pgbouncer_stats_total_query_time{database="pgbouncer"} 0 - # HELP cnpg_pgbouncer_stats_total_received Total volume in bytes of network traffic received by pgbouncer. # TYPE cnpg_pgbouncer_stats_total_received gauge cnpg_pgbouncer_stats_total_received{database="pgbouncer"} 0 - # HELP cnpg_pgbouncer_stats_total_sent Total volume in bytes of network traffic sent by pgbouncer. # TYPE cnpg_pgbouncer_stats_total_sent gauge cnpg_pgbouncer_stats_total_sent{database="pgbouncer"} 0 - +# HELP cnpg_pgbouncer_stats_total_server_parse_count Total number of prepared statements created by pgbouncer on a server. +# TYPE cnpg_pgbouncer_stats_total_server_parse_count gauge +cnpg_pgbouncer_stats_total_server_parse_count{database="pgbouncer"} 0 # HELP cnpg_pgbouncer_stats_total_wait_time Time spent by clients waiting for a server, in microseconds. # TYPE cnpg_pgbouncer_stats_total_wait_time gauge cnpg_pgbouncer_stats_total_wait_time{database="pgbouncer"} 0 - # HELP cnpg_pgbouncer_stats_total_xact_count Total number of SQL transactions pooled by pgbouncer. # TYPE cnpg_pgbouncer_stats_total_xact_count gauge -cnpg_pgbouncer_stats_total_xact_count{database="pgbouncer"} 3 - +cnpg_pgbouncer_stats_total_xact_count{database="pgbouncer"} 15 # HELP cnpg_pgbouncer_stats_total_xact_time Total number of microseconds spent by pgbouncer when connected to PostgreSQL in a transaction, either idle in transaction or executing queries. # TYPE cnpg_pgbouncer_stats_total_xact_time gauge cnpg_pgbouncer_stats_total_xact_time{database="pgbouncer"} 0 ``` +!!! Info + For a better understanding of the metrics please refer to the PgBouncer documentation. + As for clusters, a specific pooler can be monitored using the [Prometheus operator's](https://github.com/prometheus-operator/prometheus-operator) resource [PodMonitor](https://github.com/prometheus-operator/prometheus-operator/blob/v0.47.1/Documentation/api.md#podmonitor). diff --git a/pkg/management/pgbouncer/metricsserver/pools.go b/pkg/management/pgbouncer/metricsserver/pools.go index b6d95b799d..0c7ee4ae41 100644 --- a/pkg/management/pgbouncer/metricsserver/pools.go +++ b/pkg/management/pgbouncer/metricsserver/pools.go @@ -39,7 +39,8 @@ type ShowPoolsMetrics struct { SvLogin, MaxWait, MaxWaitUs, - PoolMode *prometheus.GaugeVec + PoolMode, + LoadBalanceHosts *prometheus.GaugeVec } // Describe produces the description for all the contained Metrics @@ -180,6 +181,12 @@ func NewShowPoolsMetrics(subsystem string) *ShowPoolsMetrics { Name: "pool_mode", Help: "The pooling mode in use. 1 for session, 2 for transaction, 3 for statement, -1 if unknown", }, []string{"database", "user"}), + LoadBalanceHosts: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: PrometheusNamespace, + Subsystem: subsystem, + Name: "load_balance_hosts", + Help: "Number of hosts not load balancing between hosts", + }, []string{"database", "user"}), } } @@ -233,6 +240,10 @@ func (e *Exporter) collectShowPools(ch chan<- prometheus.Metric, db *sql.DB) { svActiveCancel int svBeingCanceled int ) + // PGBouncer 1.24.0 or above + var ( + loadBalanceHosts int + ) cols, err := rows.Columns() if err != nil { @@ -242,7 +253,11 @@ func (e *Exporter) collectShowPools(ch chan<- prometheus.Metric, db *sql.DB) { return } for rows.Next() { - const poolsColumnsPgBouncer1180 = 16 + const ( + poolsColumnsPgBouncer1180 = 16 + poolsColumnsPgBouncer1240 = 17 + ) + switch len(cols) { case poolsColumnsPgBouncer1180: if err = rows.Scan(&database, &user, @@ -265,6 +280,28 @@ func (e *Exporter) collectShowPools(ch chan<- prometheus.Metric, db *sql.DB) { e.Metrics.Error.Set(1) e.Metrics.PgCollectionErrors.WithLabelValues(err.Error()).Inc() } + case poolsColumnsPgBouncer1240: + if err = rows.Scan(&database, &user, + &clActive, + &clWaiting, + &clActiveCancelReq, + &clWaitingCancelReq, + &svActive, + &svActiveCancel, + &svBeingCanceled, + &svIdle, + &svUsed, + &svTested, + &svLogin, + &maxWait, + &maxWaitUs, + &poolMode, + &loadBalanceHosts, + ); err != nil { + contextLogger.Error(err, "Error while executing SHOW POOLS") + e.Metrics.Error.Set(1) + e.Metrics.PgCollectionErrors.WithLabelValues(err.Error()).Inc() + } default: if err = rows.Scan(&database, &user, &clActive, @@ -299,6 +336,7 @@ func (e *Exporter) collectShowPools(ch chan<- prometheus.Metric, db *sql.DB) { e.Metrics.ShowPools.MaxWait.WithLabelValues(database, user).Set(float64(maxWait)) e.Metrics.ShowPools.MaxWaitUs.WithLabelValues(database, user).Set(float64(maxWaitUs)) e.Metrics.ShowPools.PoolMode.WithLabelValues(database, user).Set(float64(poolModeToInt(poolMode))) + e.Metrics.ShowPools.LoadBalanceHosts.WithLabelValues(database, user).Set(float64(loadBalanceHosts)) } e.Metrics.ShowPools.ClActive.Collect(ch) @@ -316,6 +354,7 @@ func (e *Exporter) collectShowPools(ch chan<- prometheus.Metric, db *sql.DB) { e.Metrics.ShowPools.MaxWait.Collect(ch) e.Metrics.ShowPools.MaxWaitUs.Collect(ch) e.Metrics.ShowPools.PoolMode.Collect(ch) + e.Metrics.ShowPools.LoadBalanceHosts.Collect(ch) if err = rows.Err(); err != nil { e.Metrics.Error.Set(1) diff --git a/pkg/management/pgbouncer/metricsserver/stats.go b/pkg/management/pgbouncer/metricsserver/stats.go index 4bad1c4075..1001699f79 100644 --- a/pkg/management/pgbouncer/metricsserver/stats.go +++ b/pkg/management/pgbouncer/metricsserver/stats.go @@ -25,7 +25,10 @@ import ( // ShowStatsMetrics contains all the SHOW STATS Metrics type ShowStatsMetrics struct { - TotalServerAssigCount, + TotalBindCount, + TotalClientParseCount, + TotalServerAssignCount, + TotalServerParseCount, TotalXactCount, TotalQueryCount, TotalReceived, @@ -33,7 +36,10 @@ type ShowStatsMetrics struct { TotalXactTime, TotalQueryTime, TotalWaitTime, - AvgServerAssigCount, + AvgBindCount, + AvgClientParseCount, + AvgServerAssignCount, + AvgServerParseCount, AvgXactCount, AvgQueryCount, AvgRecv, @@ -45,7 +51,10 @@ type ShowStatsMetrics struct { // Describe produces the description for all the contained Metrics func (r *ShowStatsMetrics) Describe(ch chan<- *prometheus.Desc) { - r.TotalServerAssigCount.Describe(ch) + r.TotalBindCount.Describe(ch) + r.TotalClientParseCount.Describe(ch) + r.TotalServerAssignCount.Describe(ch) + r.TotalServerParseCount.Describe(ch) r.TotalXactCount.Describe(ch) r.TotalQueryCount.Describe(ch) r.TotalReceived.Describe(ch) @@ -53,7 +62,10 @@ func (r *ShowStatsMetrics) Describe(ch chan<- *prometheus.Desc) { r.TotalXactTime.Describe(ch) r.TotalQueryTime.Describe(ch) r.TotalWaitTime.Describe(ch) - r.AvgServerAssigCount.Describe(ch) + r.AvgBindCount.Describe(ch) + r.AvgClientParseCount.Describe(ch) + r.AvgServerAssignCount.Describe(ch) + r.AvgServerParseCount.Describe(ch) r.AvgXactCount.Describe(ch) r.AvgQueryCount.Describe(ch) r.AvgRecv.Describe(ch) @@ -65,7 +77,10 @@ func (r *ShowStatsMetrics) Describe(ch chan<- *prometheus.Desc) { // Reset resets all the contained Metrics func (r *ShowStatsMetrics) Reset() { - r.AvgServerAssigCount.Reset() + r.TotalBindCount.Reset() + r.TotalClientParseCount.Reset() + r.TotalServerAssignCount.Reset() + r.TotalServerParseCount.Reset() r.TotalXactCount.Reset() r.TotalQueryCount.Reset() r.TotalReceived.Reset() @@ -73,7 +88,10 @@ func (r *ShowStatsMetrics) Reset() { r.TotalXactTime.Reset() r.TotalQueryTime.Reset() r.TotalWaitTime.Reset() - r.AvgServerAssigCount.Reset() + r.AvgBindCount.Reset() + r.TotalClientParseCount.Reset() + r.AvgServerAssignCount.Reset() + r.TotalServerParseCount.Reset() r.AvgXactCount.Reset() r.AvgQueryCount.Reset() r.AvgRecv.Reset() @@ -87,12 +105,31 @@ func (r *ShowStatsMetrics) Reset() { func NewShowStatsMetrics(subsystem string) *ShowStatsMetrics { subsystem += "_stats" return &ShowStatsMetrics{ - TotalServerAssigCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + TotalBindCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: PrometheusNamespace, + Subsystem: subsystem, + Name: "total_bind_count", + Help: "Total number of prepared statements readied for execution by clients and forwarded to " + + "PostgreSQL by pgbouncer", + }, []string{"database"}), + TotalClientParseCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: PrometheusNamespace, + Subsystem: subsystem, + Name: "total_client_parse_count", + Help: "Total number of prepared statements created by clients.", + }, []string{"database"}), + TotalServerAssignCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: PrometheusNamespace, Subsystem: subsystem, Name: "total_server_assignment_count", Help: "Total time a server was assigned to a client.", }, []string{"database"}), + TotalServerParseCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: PrometheusNamespace, + Subsystem: subsystem, + Name: "total_server_parse_count", + Help: "Total number of prepared statements created by pgbouncer on a server.", + }, []string{"database"}), TotalXactCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: PrometheusNamespace, Subsystem: subsystem, @@ -137,13 +174,32 @@ func NewShowStatsMetrics(subsystem string) *ShowStatsMetrics { Name: "total_wait_time", Help: "Time spent by clients waiting for a server, in microseconds.", }, []string{"database"}), - AvgServerAssigCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + AvgBindCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: PrometheusNamespace, + Subsystem: subsystem, + Name: "avg_bind_count", + Help: "Average number of prepared statements readied for execution by clients and forwarded to " + + "PostgreSQL by pgbouncer.", + }, []string{"database"}), + AvgClientParseCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: PrometheusNamespace, + Subsystem: subsystem, + Name: "avg_client_parse_count", + Help: "Average number of prepared statements created by clients.", + }, []string{"database"}), + AvgServerAssignCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: PrometheusNamespace, Subsystem: subsystem, Name: "avg_server_assignment_count", Help: "Average number of times a server was assigned to a client per second in " + "the last stat period.", }, []string{"database"}), + AvgServerParseCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: PrometheusNamespace, + Subsystem: subsystem, + Name: "avg_server_parse_count", + Help: "Average number of prepared statements created by pgbouncer on a server.", + }, []string{"database"}), AvgXactCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: PrometheusNamespace, Subsystem: subsystem, @@ -230,10 +286,19 @@ func (e *Exporter) collectShowStats(ch chan<- prometheus.Metric, db *sql.DB) { // PGBouncer >= 1.23.0 var ( - totalServerAssigCount, - avgServerAssigCount int + totalServerAssignCount, + avgServerAssignCount int ) + // PGBouncer >= 1.24.0 + var ( + totalClientParseCount, + totalServerParseCount, + totalBindCount, + avgClientParseCount, + avgServerParseCount, + avgBindCount int + ) statCols, err := rows.Columns() if err != nil { contextLogger.Error(err, "Error while reading SHOW STATS") @@ -244,7 +309,8 @@ func (e *Exporter) collectShowStats(ch chan<- prometheus.Metric, db *sql.DB) { for rows.Next() { var err error - if statColsCount < 16 { + switch { + case statColsCount < 16: err = rows.Scan(&database, &totalXactCount, &totalQueryCount, @@ -261,9 +327,9 @@ func (e *Exporter) collectShowStats(ch chan<- prometheus.Metric, db *sql.DB) { &avgQueryTime, &avgWaitTime, ) - } else { + case statColsCount == 17: err = rows.Scan(&database, - &totalServerAssigCount, + &totalServerAssignCount, &totalXactCount, &totalQueryCount, &totalReceived, @@ -271,7 +337,7 @@ func (e *Exporter) collectShowStats(ch chan<- prometheus.Metric, db *sql.DB) { &totalXactTime, &totalQueryTime, &totalWaitTime, - &avgServerAssigCount, + &avgServerAssignCount, &avgXactCount, &avgQueryCount, &avgRecv, @@ -280,6 +346,31 @@ func (e *Exporter) collectShowStats(ch chan<- prometheus.Metric, db *sql.DB) { &avgQueryTime, &avgWaitTime, ) + default: + err = rows.Scan(&database, + &totalServerAssignCount, + &totalXactCount, + &totalQueryCount, + &totalReceived, + &totalSent, + &totalXactTime, + &totalQueryTime, + &totalWaitTime, + &totalClientParseCount, + &totalServerParseCount, + &totalBindCount, + &avgServerAssignCount, + &avgXactCount, + &avgQueryCount, + &avgRecv, + &avgSent, + &avgXactTime, + &avgQueryTime, + &avgWaitTime, + &avgClientParseCount, + &avgServerParseCount, + &avgBindCount, + ) } if err != nil { contextLogger.Error(err, "Error while executing SHOW STATS") @@ -302,19 +393,27 @@ func (e *Exporter) collectShowStats(ch chan<- prometheus.Metric, db *sql.DB) { e.Metrics.ShowStats.AvgQueryTime.WithLabelValues(database).Set(float64(avgQueryTime)) e.Metrics.ShowStats.AvgWaitTime.WithLabelValues(database).Set(float64(avgWaitTime)) - if statColsCount >= 16 { - e.Metrics.ShowStats.TotalServerAssigCount.WithLabelValues(database).Set( - float64(totalServerAssigCount)) - e.Metrics.ShowStats.AvgServerAssigCount.WithLabelValues(database).Set( - float64(avgServerAssigCount)) + if statColsCount == 16 { + e.Metrics.ShowStats.TotalServerAssignCount.WithLabelValues(database).Set( + float64(totalServerAssignCount)) + e.Metrics.ShowStats.AvgServerAssignCount.WithLabelValues(database).Set( + float64(avgServerAssignCount)) + } else { + e.Metrics.ShowStats.TotalClientParseCount.WithLabelValues(database).Set( + float64(totalClientParseCount)) + e.Metrics.ShowStats.TotalServerParseCount.WithLabelValues(database).Set( + float64(totalServerParseCount)) + e.Metrics.ShowStats.TotalBindCount.WithLabelValues(database).Set( + float64(totalBindCount)) + e.Metrics.ShowStats.AvgClientParseCount.WithLabelValues(database).Set( + float64(avgClientParseCount)) + e.Metrics.ShowStats.AvgServerParseCount.WithLabelValues(database).Set( + float64(avgServerParseCount)) + e.Metrics.ShowStats.AvgBindCount.WithLabelValues(database).Set( + float64(avgBindCount)) } } - if statColsCount >= 16 { - e.Metrics.ShowStats.TotalServerAssigCount.Collect(ch) - e.Metrics.ShowStats.AvgServerAssigCount.Collect(ch) - } - e.Metrics.ShowStats.TotalXactCount.Collect(ch) e.Metrics.ShowStats.TotalQueryCount.Collect(ch) e.Metrics.ShowStats.TotalReceived.Collect(ch) @@ -330,6 +429,18 @@ func (e *Exporter) collectShowStats(ch chan<- prometheus.Metric, db *sql.DB) { e.Metrics.ShowStats.AvgQueryTime.Collect(ch) e.Metrics.ShowStats.AvgWaitTime.Collect(ch) + if statColsCount == 16 { + e.Metrics.ShowStats.TotalServerAssignCount.Collect(ch) + e.Metrics.ShowStats.AvgServerAssignCount.Collect(ch) + } else { + e.Metrics.ShowStats.TotalClientParseCount.Collect(ch) + e.Metrics.ShowStats.TotalServerParseCount.Collect(ch) + e.Metrics.ShowStats.TotalBindCount.Collect(ch) + e.Metrics.ShowStats.AvgClientParseCount.Collect(ch) + e.Metrics.ShowStats.AvgServerParseCount.Collect(ch) + e.Metrics.ShowStats.AvgBindCount.Collect(ch) + } + if err = rows.Err(); err != nil { e.Metrics.Error.Set(1) e.Metrics.PgCollectionErrors.WithLabelValues(err.Error()).Inc() diff --git a/pkg/specs/pgbouncer/deployments.go b/pkg/specs/pgbouncer/deployments.go index 2f5d639502..78553d186a 100644 --- a/pkg/specs/pgbouncer/deployments.go +++ b/pkg/specs/pgbouncer/deployments.go @@ -39,7 +39,7 @@ import ( const ( // DefaultPgbouncerImage is the name of the pgbouncer image used by default - DefaultPgbouncerImage = "ghcr.io/cloudnative-pg/pgbouncer:1.23.0" + DefaultPgbouncerImage = "ghcr.io/cloudnative-pg/pgbouncer:1.24.0" ) // Deployment create the deployment of pgbouncer, given From 7fc99bf8bb2b24926f5e3692f6de29547a9fdbe1 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 30 Jan 2025 09:50:13 +0100 Subject: [PATCH 319/836] chore(deps): update dependency go to v1.23.5 (main) (#6679) --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 7ec79071cc..66a1bf84cd 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module github.com/cloudnative-pg/cloudnative-pg go 1.23.0 -toolchain go1.23.4 +toolchain go1.23.5 require ( github.com/DATA-DOG/go-sqlmock v1.5.2 From aad17635719942e8344907c99298acecee862b72 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Thu, 30 Jan 2025 14:56:16 +0100 Subject: [PATCH 320/836] fix(instance-manager): use pgdata content to discover PostgreSQL version (#6659) Use a precise check of the data directory to determine the PostgreSQL major version during PostgreSQL configuration generation. Closes #6658 Signed-off-by: Marco Nenciarini Signed-off-by: Armando Ruocco Signed-off-by: Jaime Silvela Co-authored-by: Armando Ruocco Co-authored-by: Jaime Silvela --- .../cmd/manager/instance/run/lifecycle/run.go | 9 ++++--- pkg/management/postgres/configuration.go | 22 +++++++-------- pkg/management/postgres/configuration_test.go | 27 ++++++++++--------- pkg/management/postgres/utils/version.go | 21 +++++++++------ 4 files changed, 44 insertions(+), 35 deletions(-) diff --git a/internal/cmd/manager/instance/run/lifecycle/run.go b/internal/cmd/manager/instance/run/lifecycle/run.go index 6fde183430..d5d8036d5d 100644 --- a/internal/cmd/manager/instance/run/lifecycle/run.go +++ b/internal/cmd/manager/instance/run/lifecycle/run.go @@ -22,6 +22,7 @@ import ( "fmt" "sync" + "github.com/blang/semver" "github.com/cloudnative-pg/machinery/pkg/log" "github.com/jackc/pgx/v5" @@ -149,7 +150,7 @@ func configureInstancePermissions(ctx context.Context, instance *postgres.Instan return nil } - majorVersion, err := postgresutils.GetMajorVersion(instance.PgData) + pgVersion, err := postgresutils.GetPgdataVersion(instance.PgData) if err != nil { return fmt.Errorf("while getting major version: %w", err) } @@ -179,7 +180,7 @@ func configureInstancePermissions(ctx context.Context, instance *postgres.Instan return err } - err = configurePgRewindPrivileges(majorVersion, hasSuperuser, tx) + err = configurePgRewindPrivileges(pgVersion, hasSuperuser, tx) if err != nil { _ = tx.Rollback() return err @@ -227,10 +228,10 @@ func configureStreamingReplicaUser(tx *sql.Tx) (bool, error) { } // configurePgRewindPrivileges ensures that the StreamingReplicationUser has enough rights to execute pg_rewind -func configurePgRewindPrivileges(majorVersion int, hasSuperuser bool, tx *sql.Tx) error { +func configurePgRewindPrivileges(pgVersion semver.Version, hasSuperuser bool, tx *sql.Tx) error { // We need the superuser bit for the streaming-replication user since pg_rewind in PostgreSQL <= 10 // will require it. - if majorVersion <= 10 { + if pgVersion.Major <= 10 { if !hasSuperuser { _, err := tx.Exec(fmt.Sprintf( "ALTER USER %v SUPERUSER", diff --git a/pkg/management/postgres/configuration.go b/pkg/management/postgres/configuration.go index 1cc414ea92..3c0b29e399 100644 --- a/pkg/management/postgres/configuration.go +++ b/pkg/management/postgres/configuration.go @@ -27,10 +27,12 @@ import ( "github.com/cloudnative-pg/machinery/pkg/fileutils" "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/postgres/version" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/configfile" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/constants" + postgresutils "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/utils" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres/replication" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" @@ -65,11 +67,12 @@ func (instance *Instance) RefreshConfigurationFilesFromCluster( cluster *apiv1.Cluster, preserveUserSettings bool, ) (bool, error) { - postgresConfiguration, sha256, err := createPostgresqlConfiguration(cluster, preserveUserSettings) + pgVersion, err := postgresutils.GetPgdataVersion(instance.PgData) if err != nil { return false, err } + postgresConfiguration, sha256 := createPostgresqlConfiguration(cluster, preserveUserSettings, pgVersion.Major) postgresConfigurationChanged, err := InstallPgDataFileContent( ctx, instance.PgData, @@ -376,16 +379,14 @@ func (instance *Instance) migratePostgresAutoConfFile(ctx context.Context) (bool // createPostgresqlConfiguration creates the PostgreSQL configuration to be // used for this cluster and return it and its sha256 checksum -func createPostgresqlConfiguration(cluster *apiv1.Cluster, preserveUserSettings bool) (string, string, error) { - // Extract the PostgreSQL major version - fromVersion, err := cluster.GetPostgresqlVersion() - if err != nil { - return "", "", err - } - +func createPostgresqlConfiguration( + cluster *apiv1.Cluster, + preserveUserSettings bool, + majorVersion uint64, +) (string, string) { info := postgres.ConfigurationInfo{ Settings: postgres.CnpgConfigurationSettings, - Version: fromVersion, + Version: version.New(majorVersion, 0), UserSettings: cluster.Spec.PostgresConfiguration.Parameters, IncludingSharedPreloadLibraries: true, AdditionalSharedPreloadLibraries: cluster.Spec.PostgresConfiguration.AdditionalLibraries, @@ -417,8 +418,7 @@ func createPostgresqlConfiguration(cluster *apiv1.Cluster, preserveUserSettings info.RecoveryMinApplyDelay = cluster.Spec.ReplicaCluster.MinApplyDelay.Duration } - conf, sha256 := postgres.CreatePostgresqlConfFile(postgres.CreatePostgresqlConfiguration(info)) - return conf, sha256, nil + return postgres.CreatePostgresqlConfFile(postgres.CreatePostgresqlConfiguration(info)) } // configurePostgresForImport configures Postgres to be optimized for the firt import diff --git a/pkg/management/postgres/configuration_test.go b/pkg/management/postgres/configuration_test.go index f4a9d2f20d..2e3b211397 100644 --- a/pkg/management/postgres/configuration_test.go +++ b/pkg/management/postgres/configuration_test.go @@ -21,11 +21,14 @@ import ( "strings" "time" + "github.com/cloudnative-pg/machinery/pkg/image/reference" + "github.com/cloudnative-pg/machinery/pkg/postgres/version" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -116,6 +119,9 @@ var _ = Describe("testing the building of the ldap config string", func() { }) var _ = Describe("Test building of the list of temporary tablespaces", func() { + defaultVersion, err := version.FromTag(reference.New(versions.DefaultImageName).Tag) + Expect(err).ToNot(HaveOccurred()) + clusterWithoutTablespaces := apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "configurationTest", @@ -166,25 +172,25 @@ var _ = Describe("Test building of the list of temporary tablespaces", func() { } It("doesn't set temp_tablespaces if there are no declared tablespaces", func() { - config, _, err := createPostgresqlConfiguration(&clusterWithoutTablespaces, true) - Expect(err).ShouldNot(HaveOccurred()) + config, _ := createPostgresqlConfiguration(&clusterWithoutTablespaces, true, defaultVersion.Major()) Expect(config).ToNot(ContainSubstring("temp_tablespaces")) }) It("doesn't set temp_tablespaces if there are no temporary tablespaces", func() { - config, _, err := createPostgresqlConfiguration(&clusterWithoutTemporaryTablespaces, true) - Expect(err).ShouldNot(HaveOccurred()) + config, _ := createPostgresqlConfiguration(&clusterWithoutTemporaryTablespaces, true, defaultVersion.Major()) Expect(config).ToNot(ContainSubstring("temp_tablespaces")) }) It("sets temp_tablespaces when there are temporary tablespaces", func() { - config, _, err := createPostgresqlConfiguration(&clusterWithTemporaryTablespaces, true) - Expect(err).ShouldNot(HaveOccurred()) + config, _ := createPostgresqlConfiguration(&clusterWithTemporaryTablespaces, true, defaultVersion.Major()) Expect(config).To(ContainSubstring("temp_tablespaces = 'other_temporary_tablespace,temporary_tablespace'")) }) }) var _ = Describe("recovery_min_apply_delay", func() { + defaultVersion, err := version.FromTag(reference.New(versions.DefaultImageName).Tag) + Expect(err).ToNot(HaveOccurred()) + primaryCluster := apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "configurationTest", @@ -233,24 +239,21 @@ var _ = Describe("recovery_min_apply_delay", func() { It("do not set recovery_min_apply_delay in primary clusters", func() { Expect(primaryCluster.IsReplica()).To(BeFalse()) - config, _, err := createPostgresqlConfiguration(&primaryCluster, true) - Expect(err).ShouldNot(HaveOccurred()) + config, _ := createPostgresqlConfiguration(&primaryCluster, true, defaultVersion.Major()) Expect(config).ToNot(ContainSubstring("recovery_min_apply_delay")) }) It("set recovery_min_apply_delay in replica clusters when set", func() { Expect(replicaCluster.IsReplica()).To(BeTrue()) - config, _, err := createPostgresqlConfiguration(&replicaCluster, true) - Expect(err).ShouldNot(HaveOccurred()) + config, _ := createPostgresqlConfiguration(&replicaCluster, true, defaultVersion.Major()) Expect(config).To(ContainSubstring("recovery_min_apply_delay = '3600s'")) }) It("do not set recovery_min_apply_delay in replica clusters when not set", func() { Expect(replicaClusterWithNoDelay.IsReplica()).To(BeTrue()) - config, _, err := createPostgresqlConfiguration(&replicaClusterWithNoDelay, true) - Expect(err).ShouldNot(HaveOccurred()) + config, _ := createPostgresqlConfiguration(&replicaClusterWithNoDelay, true, defaultVersion.Major()) Expect(config).ToNot(ContainSubstring("recovery_min_apply_delay")) }) }) diff --git a/pkg/management/postgres/utils/version.go b/pkg/management/postgres/utils/version.go index 23f99846e0..348a132dc3 100644 --- a/pkg/management/postgres/utils/version.go +++ b/pkg/management/postgres/utils/version.go @@ -38,25 +38,30 @@ func GetPgVersion(db *sql.DB) (*semver.Version, error) { } func parseVersionNum(versionNum string) (*semver.Version, error) { - versionInt, err := strconv.Atoi(versionNum) + versionInt, err := strconv.ParseUint(versionNum, 10, 64) if err != nil { return nil, err } return &semver.Version{ - Major: uint64(versionInt / 10000), //nolint:gosec - Minor: uint64((versionInt / 100) % 100), //nolint:gosec - Patch: uint64(versionInt % 100), //nolint:gosec + Major: versionInt / 10000, + Minor: (versionInt / 100) % 100, + Patch: versionInt % 100, }, nil } -// GetMajorVersion read the PG_VERSION file in the data directory +// GetPgdataVersion read the PG_VERSION file in the data directory // returning the major version of the database -func GetMajorVersion(pgData string) (int, error) { +func GetPgdataVersion(pgData string) (semver.Version, error) { content, err := os.ReadFile(path.Join(pgData, "PG_VERSION")) // #nosec if err != nil { - return 0, err + return semver.Version{}, err } - return strconv.Atoi(strings.TrimSpace(string(content))) + major, err := strconv.ParseUint(strings.TrimSpace(string(content)), 10, 64) + if err != nil { + return semver.Version{}, err + } + + return semver.Version{Major: major}, nil } From 0f373b985f1adb3bdeef44d93410b8dc34044650 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Thu, 30 Jan 2025 16:40:15 +0100 Subject: [PATCH 321/836] fix: initialize `override.conf` before invoking `pg_rewind` (#6670) This patch ensures `override.conf` is correctly initialized before invoking `pg_rewind`, resolving an issue where configuration misalignment could cause failures during the demotion of a former primary. Closes #6669 Signed-off-by: Marco Nenciarini --- internal/management/controller/instance_startup.go | 9 ++------- pkg/management/postgres/instance.go | 10 +++++++--- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/internal/management/controller/instance_startup.go b/internal/management/controller/instance_startup.go index c5c8ffcf12..a2a593cd39 100644 --- a/internal/management/controller/instance_startup.go +++ b/internal/management/controller/instance_startup.go @@ -33,6 +33,7 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/internal/controller" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/archiver" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/utils" postgresSpec "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" ) @@ -237,7 +238,7 @@ func (r *InstanceReconciler) verifyPgDataCoherenceForPrimary(ctx context.Context return err } - pgVersion, err := cluster.GetPostgresqlVersion() + pgVersion, err := utils.GetPgdataVersion(r.instance.PgData) if err != nil { return err } @@ -262,12 +263,6 @@ func (r *InstanceReconciler) verifyPgDataCoherenceForPrimary(ctx context.Context return fmt.Errorf("while ensuring all WAL files are archived: %w", err) } - // pg_rewind could require a clean shutdown of the old primary to - // work. Unfortunately, if the old primary is already clean starting - // it up may make it advance in respect to the new one. - // The only way to check if we really need to start it up before - // invoking pg_rewind is to try using pg_rewind and, on failures, - // retrying after having started up the instance. err = r.instance.Rewind(ctx, pgVersion) if err != nil { return fmt.Errorf("while exucuting pg_rewind: %w", err) diff --git a/pkg/management/postgres/instance.go b/pkg/management/postgres/instance.go index 2a207a63e4..f19fa468c5 100644 --- a/pkg/management/postgres/instance.go +++ b/pkg/management/postgres/instance.go @@ -36,7 +36,6 @@ import ( "github.com/cloudnative-pg/machinery/pkg/fileutils" "github.com/cloudnative-pg/machinery/pkg/fileutils/compatibility" "github.com/cloudnative-pg/machinery/pkg/log" - "github.com/cloudnative-pg/machinery/pkg/postgres/version" "go.uber.org/atomic" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/retry" @@ -999,7 +998,7 @@ func (instance *Instance) removePgControlFileBackup() error { // Rewind uses pg_rewind to align this data directory with the contents of the primary node. // If postgres major version is >= 13, add "--restore-target-wal" option -func (instance *Instance) Rewind(ctx context.Context, postgresVersion version.Data) error { +func (instance *Instance) Rewind(ctx context.Context, postgresVersion semver.Version) error { contextLogger := log.FromContext(ctx) // Signal the liveness probe that we are running pg_rewind before starting postgres @@ -1019,7 +1018,12 @@ func (instance *Instance) Rewind(ctx context.Context, postgresVersion version.Da // As PostgreSQL 13 introduces support of restore from the WAL archive in pg_rewind, // let’s automatically use it, if possible - if postgresVersion.Major() >= 13 { + if postgresVersion.Major >= 13 { + // make sure restore_command is set in override.conf + if _, err := configurePostgresOverrideConfFile(instance.PgData, primaryConnInfo, ""); err != nil { + return err + } + options = append(options, "--restore-target-wal") } From 05087bbfe7ad678d3eb1cd1cc52d6cc7b309665a Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 30 Jan 2025 17:29:17 +0100 Subject: [PATCH 322/836] chore(deps): update kindest/node docker tag to v1.32.1 (main) (#6689) --- hack/e2e/run-e2e-kind.sh | 2 +- hack/setup-cluster.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hack/e2e/run-e2e-kind.sh b/hack/e2e/run-e2e-kind.sh index c28579750c..129bccffff 100755 --- a/hack/e2e/run-e2e-kind.sh +++ b/hack/e2e/run-e2e-kind.sh @@ -29,7 +29,7 @@ E2E_DIR="${HACK_DIR}/e2e" export PRESERVE_CLUSTER=${PRESERVE_CLUSTER:-false} export BUILD_IMAGE=${BUILD_IMAGE:-false} -KIND_NODE_DEFAULT_VERSION=v1.32.0 +KIND_NODE_DEFAULT_VERSION=v1.32.1 export K8S_VERSION=${K8S_VERSION:-$KIND_NODE_DEFAULT_VERSION} export CLUSTER_ENGINE=kind export CLUSTER_NAME=pg-operator-e2e-${K8S_VERSION//./-} diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh index a6c9c29f9e..dbcfe1f799 100755 --- a/hack/setup-cluster.sh +++ b/hack/setup-cluster.sh @@ -24,7 +24,7 @@ if [ "${DEBUG-}" = true ]; then fi # Defaults -KIND_NODE_DEFAULT_VERSION=v1.32.0 +KIND_NODE_DEFAULT_VERSION=v1.32.1 K3D_NODE_DEFAULT_VERSION=v1.30.3 CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=v1.15.0 EXTERNAL_SNAPSHOTTER_VERSION=v8.2.0 From ff6478a3159d00e132b3dd610717f9e0371301e2 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 30 Jan 2025 23:17:34 +0100 Subject: [PATCH 323/836] fix(deps): update kubernetes patches to v0.32.1 (main) (#6711) This PR contains the following updates: https://redirect.github.com/kubernetes/api `v0.32.0` -> `v0.32.1` https://redirect.github.com/kubernetes/apiextensions-apiserver `v0.32.0` -> `v0.32.1` https://redirect.github.com/kubernetes/apimachinery `v0.32.0` -> `v0.32.1` https://redirect.github.com/kubernetes/cli-runtime `v0.32.0` -> `v0.32.1` https://redirect.github.com/kubernetes/client-go `v0.32.0` -> `v0.32.1` --- go.mod | 10 +++++----- go.sum | 20 ++++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index 66a1bf84cd..787645d0ef 100644 --- a/go.mod +++ b/go.mod @@ -40,11 +40,11 @@ require ( golang.org/x/term v0.28.0 google.golang.org/grpc v1.69.4 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.32.0 - k8s.io/apiextensions-apiserver v0.32.0 - k8s.io/apimachinery v0.32.0 - k8s.io/cli-runtime v0.32.0 - k8s.io/client-go v0.32.0 + k8s.io/api v0.32.1 + k8s.io/apiextensions-apiserver v0.32.1 + k8s.io/apimachinery v0.32.1 + k8s.io/cli-runtime v0.32.1 + k8s.io/client-go v0.32.1 k8s.io/utils v0.0.0-20241210054802-24370beab758 sigs.k8s.io/controller-runtime v0.19.4 sigs.k8s.io/yaml v1.4.0 diff --git a/go.sum b/go.sum index 9f88f10806..dbb1a17380 100644 --- a/go.sum +++ b/go.sum @@ -281,16 +281,16 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.32.0 h1:OL9JpbvAU5ny9ga2fb24X8H6xQlVp+aJMFlgtQjR9CE= -k8s.io/api v0.32.0/go.mod h1:4LEwHZEf6Q/cG96F3dqR965sYOfmPM7rq81BLgsE0p0= -k8s.io/apiextensions-apiserver v0.32.0 h1:S0Xlqt51qzzqjKPxfgX1xh4HBZE+p8KKBq+k2SWNOE0= -k8s.io/apiextensions-apiserver v0.32.0/go.mod h1:86hblMvN5yxMvZrZFX2OhIHAuFIMJIZ19bTvzkP+Fmw= -k8s.io/apimachinery v0.32.0 h1:cFSE7N3rmEEtv4ei5X6DaJPHHX0C+upp+v5lVPiEwpg= -k8s.io/apimachinery v0.32.0/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= -k8s.io/cli-runtime v0.32.0 h1:dP+OZqs7zHPpGQMCGAhectbHU2SNCuZtIimRKTv2T1c= -k8s.io/cli-runtime v0.32.0/go.mod h1:Mai8ht2+esoDRK5hr861KRy6z0zHsSTYttNVJXgP3YQ= -k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8= -k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8= +k8s.io/api v0.32.1 h1:f562zw9cy+GvXzXf0CKlVQ7yHJVYzLfL6JAS4kOAaOc= +k8s.io/api v0.32.1/go.mod h1:/Yi/BqkuueW1BgpoePYBRdDYfjPF5sgTr5+YqDZra5k= +k8s.io/apiextensions-apiserver v0.32.1 h1:hjkALhRUeCariC8DiVmb5jj0VjIc1N0DREP32+6UXZw= +k8s.io/apiextensions-apiserver v0.32.1/go.mod h1:sxWIGuGiYov7Io1fAS2X06NjMIk5CbRHc2StSmbaQto= +k8s.io/apimachinery v0.32.1 h1:683ENpaCBjma4CYqsmZyhEzrGz6cjn1MY/X2jB2hkZs= +k8s.io/apimachinery v0.32.1/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/cli-runtime v0.32.1 h1:19nwZPlYGJPUDbhAxDIS2/oydCikvKMHsxroKNGA2mM= +k8s.io/cli-runtime v0.32.1/go.mod h1:NJPbeadVFnV2E7B7vF+FvU09mpwYlZCu8PqjzfuOnkY= +k8s.io/client-go v0.32.1 h1:otM0AxdhdBIaQh7l1Q0jQpmo7WOFIk5FFa4bg6YMdUU= +k8s.io/client-go v0.32.1/go.mod h1:aTTKZY7MdxUaJ/KiUs8D+GssR9zJZi77ZqtzcGXIiDg= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= From fcd0b2a05429201e10e5d0059944ade0c2214095 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 31 Jan 2025 10:07:58 +0100 Subject: [PATCH 324/836] fix(deps): update all non-major go dependencies (main) (#6690) This PR contains the following updates: https://github.com/evanphx/json-patch `v5.9.0` -> `v5.9.11` https://github.com/goreleaser/goreleaser `v2.5.1` -> `v2.6.1` https://github.com/stern/stern `v1.31.0` -> `v1.32.0` https://github.com/grpc/grpc-go `v1.69.4` -> `v1.70.0` --- Makefile | 2 +- go.mod | 40 ++++++++++++------------ go.sum | 94 +++++++++++++++++++++++++++----------------------------- 3 files changed, 66 insertions(+), 70 deletions(-) diff --git a/Makefile b/Makefile index 607ea89f00..41c6cdf962 100644 --- a/Makefile +++ b/Makefile @@ -44,7 +44,7 @@ BUILD_IMAGE ?= true POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions.go" | cut -f 2 -d \") KUSTOMIZE_VERSION ?= v5.5.0 CONTROLLER_TOOLS_VERSION ?= v0.16.5 -GORELEASER_VERSION ?= v2.5.1 +GORELEASER_VERSION ?= v2.6.1 SPELLCHECK_VERSION ?= 0.46.0 WOKE_VERSION ?= 0.19.0 OPERATOR_SDK_VERSION ?= v1.39.0 diff --git a/go.mod b/go.mod index 787645d0ef..bd79c41a71 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,6 @@ module github.com/cloudnative-pg/cloudnative-pg -go 1.23.0 - -toolchain go1.23.5 +go 1.23.5 require ( github.com/DATA-DOG/go-sqlmock v1.5.2 @@ -14,7 +12,7 @@ require ( github.com/cloudnative-pg/cnpg-i v0.0.0-20241224161104-7e2cfa59debc github.com/cloudnative-pg/machinery v0.0.0-20250102082645-95c37fe624d0 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc - github.com/evanphx/json-patch/v5 v5.9.0 + github.com/evanphx/json-patch/v5 v5.9.11 github.com/go-logr/logr v1.4.2 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0 @@ -32,13 +30,13 @@ require ( github.com/robfig/cron v1.2.0 github.com/sethvargo/go-password v0.3.1 github.com/spf13/cobra v1.8.1 - github.com/stern/stern v1.31.0 + github.com/stern/stern v1.32.0 github.com/thoas/go-funk v0.9.3 go.uber.org/atomic v1.11.0 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 golang.org/x/term v0.28.0 - google.golang.org/grpc v1.69.4 + google.golang.org/grpc v1.70.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.32.1 k8s.io/apiextensions-apiserver v0.32.1 @@ -51,12 +49,12 @@ require ( ) require ( - github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect + github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/emicklei/go-restful/v3 v3.12.1 // indirect - github.com/fatih/color v1.17.0 // indirect + github.com/fatih/color v1.18.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-errors/errors v1.5.1 // indirect @@ -68,7 +66,7 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.3 // indirect - github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/gnostic-models v0.6.9 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect @@ -82,11 +80,11 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.17.9 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect - github.com/mailru/easyjson v0.7.7 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mailru/easyjson v0.9.0 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/moby/spdystream v0.5.0 // indirect - github.com/moby/term v0.5.0 // indirect + github.com/moby/term v0.5.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect @@ -100,24 +98,24 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xlab/treeprint v1.2.0 // indirect - golang.org/x/crypto v0.31.0 // indirect + golang.org/x/crypto v0.32.0 // indirect golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect - golang.org/x/net v0.33.0 // indirect - golang.org/x/oauth2 v0.23.0 // indirect + golang.org/x/net v0.34.0 // indirect + golang.org/x/oauth2 v0.25.0 // indirect golang.org/x/sync v0.10.0 // indirect golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect - golang.org/x/time v0.7.0 // indirect + golang.org/x/time v0.9.0 // indirect golang.org/x/tools v0.28.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect - google.golang.org/protobuf v1.36.1 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241202173237-19429a94021a // indirect + google.golang.org/protobuf v1.36.3 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect + k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect - sigs.k8s.io/kustomize/api v0.18.0 // indirect - sigs.k8s.io/kustomize/kyaml v0.18.1 // indirect + sigs.k8s.io/kustomize/api v0.19.0 // indirect + sigs.k8s.io/kustomize/kyaml v0.19.0 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.5.0 // indirect ) diff --git a/go.sum b/go.sum index dbb1a17380..76ef9bb906 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,5 @@ -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4= @@ -35,10 +35,10 @@ github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtz github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= -github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= -github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= -github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= +github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= +github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= @@ -65,8 +65,8 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= -github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= +github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -120,19 +120,18 @@ github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhn github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/logrusorgru/aurora/v4 v4.0.0 h1:sRjfPpun/63iADiSvGGjgA1cAYegEWMPCJdUpJYn9JA= github.com/logrusorgru/aurora/v4 v4.0.0/go.mod h1:lP0iIa2nrnT/qoFXcOZSrZQpJ1o6n2CUf/hyHi2Q4ZQ= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc= github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= -github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= -github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -178,8 +177,8 @@ github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stern/stern v1.31.0 h1:kKHVgEmIgqbC6/sFZahUeU9TbxDH+0l3l5/ornLlQLs= -github.com/stern/stern v1.31.0/go.mod h1:BfAeaPQhkMhQPTaFV81pS8YWCBmxg6IBL8fPGalt0qY= +github.com/stern/stern v1.32.0 h1:xNw0CizB7/4CkWpI46cAo8tArDnS14eYKLaaDevEnrM= +github.com/stern/stern v1.32.0/go.mod h1:Nv6yoHcb2E1HvklagJyd4rjoysJM4WxvcGVQtE651Xw= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= @@ -196,16 +195,16 @@ github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= -go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= -go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= -go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= -go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= -go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= -go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= -go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= -go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= -go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= +go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= +go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= +go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= +go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= +go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= +go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -217,8 +216,8 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= -golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= +golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -227,10 +226,10 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= -golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= -golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= +golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= +golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -240,7 +239,6 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= @@ -250,8 +248,8 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -264,12 +262,12 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= -google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= -google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= -google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= -google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241202173237-19429a94021a h1:hgh8P4EuoxpsuKMXX/To36nOFD7vixReXgn8lPGnt+o= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241202173237-19429a94021a/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= +google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ= +google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -293,18 +291,18 @@ k8s.io/client-go v0.32.1 h1:otM0AxdhdBIaQh7l1Q0jQpmo7WOFIk5FFa4bg6YMdUU= k8s.io/client-go v0.32.1/go.mod h1:aTTKZY7MdxUaJ/KiUs8D+GssR9zJZi77ZqtzcGXIiDg= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= +k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 h1:hcha5B1kVACrLujCKLbr8XWMxCxzQx42DY8QKYJrDLg= +k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7/go.mod h1:GewRfANuJ70iYzvn+i4lezLDAFzvjxZYK1gn1lWcfas= k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0= k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/controller-runtime v0.19.4 h1:SUmheabttt0nx8uJtoII4oIP27BVVvAKFvdvGFwV/Qo= sigs.k8s.io/controller-runtime v0.19.4/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= -sigs.k8s.io/kustomize/api v0.18.0 h1:hTzp67k+3NEVInwz5BHyzc9rGxIauoXferXyjv5lWPo= -sigs.k8s.io/kustomize/api v0.18.0/go.mod h1:f8isXnX+8b+SGLHQ6yO4JG1rdkZlvhaCf/uZbLVMb0U= -sigs.k8s.io/kustomize/kyaml v0.18.1 h1:WvBo56Wzw3fjS+7vBjN6TeivvpbW9GmRaWZ9CIVmt4E= -sigs.k8s.io/kustomize/kyaml v0.18.1/go.mod h1:C3L2BFVU1jgcddNBE1TxuVLgS46TjObMwW5FT9FcjYo= +sigs.k8s.io/kustomize/api v0.19.0 h1:F+2HB2mU1MSiR9Hp1NEgoU2q9ItNOaBJl0I4Dlus5SQ= +sigs.k8s.io/kustomize/api v0.19.0/go.mod h1:/BbwnivGVcBh1r+8m3tH1VNxJmHSk1PzP5fkP6lbL1o= +sigs.k8s.io/kustomize/kyaml v0.19.0 h1:RFge5qsO1uHhwJsu3ipV7RNolC7Uozc0jUBC/61XSlA= +sigs.k8s.io/kustomize/kyaml v0.19.0/go.mod h1:FeKD5jEOH+FbZPpqUghBP8mrLjJ3+zD3/rf9NNu1cwY= sigs.k8s.io/structured-merge-diff/v4 v4.5.0 h1:nbCitCK2hfnhyiKo6uf2HxUPTCodY6Qaf85SbDIaMBk= sigs.k8s.io/structured-merge-diff/v4 v4.5.0/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= From c0603fa64ffc01342cb4bad2f518f6c5bfbaa8da Mon Sep 17 00:00:00 2001 From: Itay Grudev Date: Fri, 31 Jan 2025 11:12:06 +0200 Subject: [PATCH 325/836] docs: Typo in Service Management documentation: updateStrategy: replace (#6716) The Service Management documentation incorrectly refers to the `replace` option as `recreate`. Closes #6717 Signed-off-by: Itay Grudev --- docs/src/service_management.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/src/service_management.md b/docs/src/service_management.md index e39357bd20..b02274e231 100644 --- a/docs/src/service_management.md +++ b/docs/src/service_management.md @@ -82,11 +82,11 @@ field, as it is managed by the operator. The `updateStrategy` field allows you to control how the operator updates a service definition. By default, the operator uses the `patch` strategy, applying changes directly to the service. -Alternatively, the `recreate` strategy deletes the existing service and +Alternatively, the `replace` strategy deletes the existing service and recreates it from the template. !!! Warning - The `recreate` strategy will cause a service disruption with every + The `replace` strategy will cause a service disruption with every change. However, it may be necessary for modifying certain parameters that can only be set during service creation. From 8787a489344c5a8fcfaea386ccc68f95ea9fc4dd Mon Sep 17 00:00:00 2001 From: Pierrick <139142330+pchovelon@users.noreply.github.com> Date: Fri, 31 Jan 2025 10:27:19 +0100 Subject: [PATCH 326/836] docs: fix formatting of disk-full-failure section (#6676) Fix the rendering issue in the disk-full-failure section of the Instance Manager documentation. The numbered list was incorrectly displayed due to a missing new line. Signed-off-by: Pierrick --- docs/src/instance_manager.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/src/instance_manager.md b/docs/src/instance_manager.md index df01927359..30dceab78d 100644 --- a/docs/src/instance_manager.md +++ b/docs/src/instance_manager.md @@ -264,6 +264,7 @@ That allows a human administrator to address the root cause. In such a case, if supported by the storage class, the quickest course of action is currently to: + 1. Expand the storage size of the full PVC 2. Increase the size in the `Cluster` resource to the same value From 9227612261c7822b444ef1dde70976c86e08d82e Mon Sep 17 00:00:00 2001 From: Josh Earlenbaugh Date: Fri, 31 Jan 2025 04:31:38 -0500 Subject: [PATCH 327/836] docs: fixed typo in Connection Pooling documentation (#6707) This patch fixes an incorrect key reference in the connection pooling documentation by replacing `server_tls_server_tls_protocols` with `server_tls_protocols`. Signed-off-by: Josh Earlenbaugh --- docs/src/connection_pooling.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/connection_pooling.md b/docs/src/connection_pooling.md index 3073f3b3f9..4b66f0564d 100644 --- a/docs/src/connection_pooling.md +++ b/docs/src/connection_pooling.md @@ -371,7 +371,7 @@ are the ones directly set by PgBouncer. - [`server_reset_query_always`](https://www.pgbouncer.org/config.html#server_reset_query_always) - [`server_round_robin`](https://www.pgbouncer.org/config.html#server_round_robin) - [`server_tls_ciphers`](https://www.pgbouncer.org/config.html#server_tls_ciphers) -- [`server_tls_server_tls_protocols`](https://www.pgbouncer.org/config.html#server_tls_protocols) +- [`server_tls_protocols`](https://www.pgbouncer.org/config.html#server_tls_protocols) - [`stats_period`](https://www.pgbouncer.org/config.html#stats_period) - [`suspend_timeout`](https://www.pgbouncer.org/config.html#suspend_timeout) - [`tcp_defer_accept`](https://www.pgbouncer.org/config.html#tcp_defer_accept) From 52f550f46123375d22b48640c3b2fd43a3d41657 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Fri, 31 Jan 2025 10:50:21 +0100 Subject: [PATCH 328/836] docs: fix formatting of reconcilePodSpec annotation (#6706) This patch fixes incorrect formatting of the `reconcilePodSpec` annotation documentation. Signed-off-by: Marco Nenciarini --- docs/src/labels_annotations.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/src/labels_annotations.md b/docs/src/labels_annotations.md index 299758c434..4ab081903d 100644 --- a/docs/src/labels_annotations.md +++ b/docs/src/labels_annotations.md @@ -171,17 +171,17 @@ These predefined annotations are managed by CloudNativePG. : Current status of the PVC: `initializing`, `ready`, or `detached`. `cnpg.io/reconcilePodSpec` -: Annotation can be applied to a `Cluster` or `Pooler` to prevent restarts. +: Annotation can be applied to a `Cluster` or `Pooler` to prevent restarts. - When set to `disabled` on a `Cluster`, the operator prevents instances - from restarting due to changes in the PodSpec. This includes changes to: + When set to `disabled` on a `Cluster`, the operator prevents instances + from restarting due to changes in the PodSpec. This includes changes to: - - Topology or affinity - - Scheduler - - Volumes or containers + - Topology or affinity + - Scheduler + - Volumes or containers - When set to `disabled` on a `Pooler`, the operator restricts any modifications - to the deployment specification, except for changes to `spec.instances`. + When set to `disabled` on a `Pooler`, the operator restricts any modifications + to the deployment specification, except for changes to `spec.instances`. `cnpg.io/reconciliationLoop` : When set to `disabled` on a `Cluster`, the operator prevents the From 0a31b921a27eadf156a78d5bce105b3eaec4a619 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 31 Jan 2025 11:48:50 +0100 Subject: [PATCH 329/836] chore(deps): update operator framework (main) (#6719) --- Makefile | 4 ++-- config/olm-scorecard/patches/basic.config.yaml | 2 +- config/olm-scorecard/patches/olm.config.yaml | 10 +++++----- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Makefile b/Makefile index 41c6cdf962..3aad4b4d8d 100644 --- a/Makefile +++ b/Makefile @@ -47,8 +47,8 @@ CONTROLLER_TOOLS_VERSION ?= v0.16.5 GORELEASER_VERSION ?= v2.6.1 SPELLCHECK_VERSION ?= 0.46.0 WOKE_VERSION ?= 0.19.0 -OPERATOR_SDK_VERSION ?= v1.39.0 -OPM_VERSION ?= v1.49.0 +OPERATOR_SDK_VERSION ?= v1.39.1 +OPM_VERSION ?= v1.50.0 PREFLIGHT_VERSION ?= 1.11.1 OPENSHIFT_VERSIONS ?= v4.12-v4.18 ARCH ?= amd64 diff --git a/config/olm-scorecard/patches/basic.config.yaml b/config/olm-scorecard/patches/basic.config.yaml index b89ce3bf90..b9ec7c6c82 100644 --- a/config/olm-scorecard/patches/basic.config.yaml +++ b/config/olm-scorecard/patches/basic.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - basic-check-spec - image: quay.io/operator-framework/scorecard-test:v1.39.0 + image: quay.io/operator-framework/scorecard-test:v1.39.1 labels: suite: basic test: basic-check-spec-test diff --git a/config/olm-scorecard/patches/olm.config.yaml b/config/olm-scorecard/patches/olm.config.yaml index 7eff5c9099..25d83f98f2 100644 --- a/config/olm-scorecard/patches/olm.config.yaml +++ b/config/olm-scorecard/patches/olm.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - olm-bundle-validation - image: quay.io/operator-framework/scorecard-test:v1.39.0 + image: quay.io/operator-framework/scorecard-test:v1.39.1 labels: suite: olm test: olm-bundle-validation-test @@ -14,7 +14,7 @@ entrypoint: - scorecard-test - olm-crds-have-validation - image: quay.io/operator-framework/scorecard-test:v1.39.0 + image: quay.io/operator-framework/scorecard-test:v1.39.1 labels: suite: olm test: olm-crds-have-validation-test @@ -24,7 +24,7 @@ entrypoint: - scorecard-test - olm-crds-have-resources - image: quay.io/operator-framework/scorecard-test:v1.39.0 + image: quay.io/operator-framework/scorecard-test:v1.39.1 labels: suite: olm test: olm-crds-have-resources-test @@ -34,7 +34,7 @@ entrypoint: - scorecard-test - olm-spec-descriptors - image: quay.io/operator-framework/scorecard-test:v1.39.0 + image: quay.io/operator-framework/scorecard-test:v1.39.1 labels: suite: olm test: olm-spec-descriptors-test @@ -44,7 +44,7 @@ entrypoint: - scorecard-test - olm-status-descriptors - image: quay.io/operator-framework/scorecard-test:v1.39.0 + image: quay.io/operator-framework/scorecard-test:v1.39.1 labels: suite: olm test: olm-status-descriptors-test From 02e4e4dd08f5f5b342e607b6d6ad6cfc36ece4c4 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Fri, 31 Jan 2025 12:00:22 +0100 Subject: [PATCH 330/836] feat: support customizable pod patches via annotations (#6323) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch enables precise customization of pod specifications through the `cnpg.io/podPatch` annotation on clusters. By applying valid JSON patches, users can fine-tune pod configurations, including areas not directly configurable within the Cluster resource, offering greater flexibility for advanced use cases. **⚠️ WARNING:** This feature may introduce discrepancies between the operator’s expectations and Kubernetes behavior. Use with caution and only as a last resort. Closes #6234 ## Release notes Added support for custom pod patches using the `cnpg.io/podPatch` annotation, enabling user-defined pod adjustments. --------- Signed-off-by: Armando Ruocco Signed-off-by: Leonardo Cecchi Signed-off-by: Marco Nenciarini Signed-off-by: Gabriele Bartolini Co-authored-by: Leonardo Cecchi Co-authored-by: Marco Nenciarini Co-authored-by: Gabriele Bartolini --- .wordlist-en-custom.txt | 1 + docs/src/labels_annotations.md | 13 +++ internal/controller/cluster_create.go | 2 +- internal/controller/cluster_upgrade_test.go | 75 ++++++++++----- internal/controller/suite_test.go | 2 +- internal/webhook/v1/cluster_webhook.go | 32 +++++++ internal/webhook/v1/cluster_webhook_test.go | 53 ++++++++++ pkg/specs/pg_pods_test.go | 3 +- pkg/specs/pods.go | 26 ++++- pkg/specs/pods_test.go | 36 +++++++ pkg/utils/labels_annotations.go | 4 + tests/e2e/pod_patch_test.go | 101 ++++++++++++++++++++ 12 files changed, 319 insertions(+), 29 deletions(-) create mode 100644 tests/e2e/pod_patch_test.go diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index b80faf7d54..6ee17eac76 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -661,6 +661,7 @@ configmapkeyselector configmaps configs configurability +congruency conn connectionLimit connectionParameters diff --git a/docs/src/labels_annotations.md b/docs/src/labels_annotations.md index 4ab081903d..9acdbb13a6 100644 --- a/docs/src/labels_annotations.md +++ b/docs/src/labels_annotations.md @@ -160,6 +160,19 @@ These predefined annotations are managed by CloudNativePG. `cnpg.io/podEnvHash` : Deprecated, as the `cnpg.io/podSpec` annotation now also contains the pod environment. +`cnpg.io/podPatch` +: Annotation can be applied on a `Cluster` resource. + + When set to JSON-patch formatted patch, the patch will be applied on the instance Pods. + + **⚠️ WARNING:** This feature may introduce discrepancies between the + operator’s expectations and Kubernetes behavior. Use with caution and only as a + last resort. + + **IMPORTANT**: adding or changing this annotation won't trigger a rolling deployment + of the generated Pods. The latter can be triggered manually by the user with + `kubectl cnpg restart`. + `cnpg.io/podSpec` : Snapshot of the `spec` of the pod generated by the operator. This annotation replaces the old, deprecated `cnpg.io/podEnvHash` annotation. diff --git a/internal/controller/cluster_create.go b/internal/controller/cluster_create.go index 39029dd44f..2a88deec31 100644 --- a/internal/controller/cluster_create.go +++ b/internal/controller/cluster_create.go @@ -1443,7 +1443,7 @@ func findInstancePodToCreate( if err != nil { return nil, err } - return specs.PodWithExistingStorage(*cluster, serial), nil + return specs.PodWithExistingStorage(*cluster, serial) } return nil, nil diff --git a/internal/controller/cluster_upgrade_test.go b/internal/controller/cluster_upgrade_test.go index 2037ce1245..a1540166e6 100644 --- a/internal/controller/cluster_upgrade_test.go +++ b/internal/controller/cluster_upgrade_test.go @@ -57,7 +57,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { }) It("will not require a restart for just created Pods", func(ctx SpecContext) { - pod := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.PodWithExistingStorage(cluster, 1) + Expect(err).ToNot(HaveOccurred()) status := postgres.PostgresqlStatus{ Pod: pod, @@ -71,7 +72,9 @@ var _ = Describe("Pod upgrade", Ordered, func() { }) It("requires rollout when running a different image name", func(ctx SpecContext) { - pod := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.PodWithExistingStorage(cluster, 1) + Expect(err).ToNot(HaveOccurred()) + pod.Spec.Containers[0].Image = "postgres:13.10" status := postgres.PostgresqlStatus{ Pod: pod, @@ -86,7 +89,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { }) It("requires rollout when a restart annotation has been added to the cluster", func(ctx SpecContext) { - pod := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.PodWithExistingStorage(cluster, 1) + Expect(err).ToNot(HaveOccurred()) clusterRestart := cluster clusterRestart.Annotations = make(map[string]string) clusterRestart.Annotations[utils.ClusterRestartAnnotationName] = "now" @@ -110,7 +114,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { }) It("should prioritize full rollout over inplace restarts", func(ctx SpecContext) { - pod := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.PodWithExistingStorage(cluster, 1) + Expect(err).ToNot(HaveOccurred()) status := postgres.PostgresqlStatus{ Pod: pod, @@ -140,7 +145,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { }) It("requires rollout when PostgreSQL needs to be restarted", func(ctx SpecContext) { - pod := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.PodWithExistingStorage(cluster, 1) + Expect(err).ToNot(HaveOccurred()) status := postgres.PostgresqlStatus{ Pod: pod, @@ -166,7 +172,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { }) It("requires pod rollout if executable does not have a hash", func(ctx SpecContext) { - pod := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.PodWithExistingStorage(cluster, 1) + Expect(err).ToNot(HaveOccurred()) status := postgres.PostgresqlStatus{ Pod: pod, PendingRestart: false, @@ -181,7 +188,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { }) It("checkPodSpecIsOutdated should not return any error", func() { - pod := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.PodWithExistingStorage(cluster, 1) + Expect(err).ToNot(HaveOccurred()) rollout, err := checkPodSpecIsOutdated(pod, &cluster) Expect(rollout.required).To(BeFalse()) Expect(rollout.canBeInPlace).To(BeFalse()) @@ -190,7 +198,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { }) It("checks when a rollout is needed for any reason", func(ctx SpecContext) { - pod := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.PodWithExistingStorage(cluster, 1) + Expect(err).ToNot(HaveOccurred()) status := postgres.PostgresqlStatus{ Pod: pod, PendingRestart: true, @@ -216,7 +225,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { When("the PodSpec annotation is not available", func() { It("should trigger a rollout when the scheduler changes", func(ctx SpecContext) { - pod := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.PodWithExistingStorage(cluster, 1) + Expect(err).ToNot(HaveOccurred()) cluster.Spec.SchedulerName = "newScheduler" delete(pod.Annotations, utils.PodSpecAnnotationName) @@ -241,7 +251,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { ImageName: "postgres:13.11", }, } - pod := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.PodWithExistingStorage(cluster, 1) + Expect(err).ToNot(HaveOccurred()) cluster.Spec.SchedulerName = "newScheduler" status := postgres.PostgresqlStatus{ @@ -272,7 +283,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { }, } It("should trigger a rollout when the cluster has a Resource changed", func(ctx SpecContext) { - pod := specs.PodWithExistingStorage(clusterWithResources, 1) + pod, err := specs.PodWithExistingStorage(clusterWithResources, 1) + Expect(err).ToNot(HaveOccurred()) clusterWithResources.Spec.Resources.Limits["cpu"] = resource.MustParse("3") // was "2" status := postgres.PostgresqlStatus{ @@ -290,7 +302,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { Expect(rollout.needsChangeOperatorImage).To(BeFalse()) }) It("should trigger a rollout when the cluster has Resources deleted from spec", func(ctx SpecContext) { - pod := specs.PodWithExistingStorage(clusterWithResources, 1) + pod, err := specs.PodWithExistingStorage(clusterWithResources, 1) + Expect(err).ToNot(HaveOccurred()) clusterWithResources.Spec.Resources = corev1.ResourceRequirements{} status := postgres.PostgresqlStatus{ @@ -311,7 +324,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { When("the PodSpec annotation is not available", func() { It("detects when a new custom environment variable is set", func(ctx SpecContext) { - pod := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.PodWithExistingStorage(cluster, 1) + Expect(err).ToNot(HaveOccurred()) delete(pod.Annotations, utils.PodSpecAnnotationName) cluster := cluster.DeepCopy() @@ -341,7 +355,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { ImageName: "postgres:13.11", }, } - pod := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.PodWithExistingStorage(cluster, 1) + Expect(err).ToNot(HaveOccurred()) delete(pod.Annotations, utils.PodSpecAnnotationName) status := postgres.PostgresqlStatus{ @@ -365,7 +380,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { ImageName: "postgres:13.11", }, } - pod := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.PodWithExistingStorage(cluster, 1) + Expect(err).ToNot(HaveOccurred()) delete(pod.Annotations, utils.PodSpecAnnotationName) status := postgres.PostgresqlStatus{ @@ -388,7 +404,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { When("the podSpec annotation is available", func() { It("detects when a new custom environment variable is set", func(ctx SpecContext) { - pod := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.PodWithExistingStorage(cluster, 1) + Expect(err).ToNot(HaveOccurred()) cluster := cluster.DeepCopy() cluster.Spec.Env = []corev1.EnvVar{ @@ -418,7 +435,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { ImageName: "postgres:13.11", }, } - pod := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.PodWithExistingStorage(cluster, 1) + Expect(err).ToNot(HaveOccurred()) status := postgres.PostgresqlStatus{ Pod: pod, @@ -441,7 +459,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { ImageName: "postgres:13.11", }, } - pod := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.PodWithExistingStorage(cluster, 1) + Expect(err).ToNot(HaveOccurred()) status := postgres.PostgresqlStatus{ Pod: pod, @@ -467,7 +486,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { cluster.Spec.ProjectedVolumeTemplate = &corev1.ProjectedVolumeSource{ Sources: []corev1.VolumeProjection{}, } - pod := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.PodWithExistingStorage(cluster, 1) + Expect(err).ToNot(HaveOccurred()) status := postgres.PostgresqlStatus{ Pod: pod, IsPodReady: true, @@ -484,7 +504,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { cluster.Spec.ProjectedVolumeTemplate = &corev1.ProjectedVolumeSource{ Sources: nil, } - pod := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.PodWithExistingStorage(cluster, 1) + Expect(err).ToNot(HaveOccurred()) status := postgres.PostgresqlStatus{ Pod: pod, IsPodReady: true, @@ -499,7 +520,8 @@ var _ = Describe("Pod upgrade", Ordered, func() { It("should not require rollout if projected volume is nil", func(ctx SpecContext) { cluster.Spec.ProjectedVolumeTemplate = nil - pod := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.PodWithExistingStorage(cluster, 1) + Expect(err).ToNot(HaveOccurred()) status := postgres.PostgresqlStatus{ Pod: pod, IsPodReady: true, @@ -531,7 +553,9 @@ var _ = Describe("Test pod rollout due to topology", func() { TopologySpreadConstraints: []corev1.TopologySpreadConstraint{topology}, }, } - pod = specs.PodWithExistingStorage(*cluster, 1) + var err error + pod, err = specs.PodWithExistingStorage(*cluster, 1) + Expect(err).ToNot(HaveOccurred()) }) When("the original podSpec annotation is available", func() { @@ -596,7 +620,9 @@ var _ = Describe("Test pod rollout due to topology", func() { It("should not require rollout if pod and spec both lack TopologySpreadConstraints", func(ctx SpecContext) { cluster.Spec.TopologySpreadConstraints = nil - pod = specs.PodWithExistingStorage(*cluster, 1) + var err error + pod, err = specs.PodWithExistingStorage(*cluster, 1) + Expect(err).ToNot(HaveOccurred()) Expect(pod.Spec.TopologySpreadConstraints).To(BeNil()) status := postgres.PostgresqlStatus{ @@ -755,7 +781,8 @@ var _ = Describe("Cluster upgrade with podSpec reconciliation disabled", func() It("skips the rollout if the annotation that disables PodSpec reconciliation is set", func(ctx SpecContext) { cluster.ObjectMeta.Annotations[utils.ReconcilePodSpecAnnotationName] = "disabled" - pod := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.PodWithExistingStorage(cluster, 1) + Expect(err).ToNot(HaveOccurred()) cluster.Spec.SchedulerName = "newScheduler" delete(pod.Annotations, utils.PodSpecAnnotationName) diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index ae8f8e8638..5f1396d865 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -313,7 +313,7 @@ func generateFakeClusterPods( var pods []corev1.Pod for idx < cluster.Spec.Instances { idx++ - pod := specs.PodWithExistingStorage(*cluster, idx) + pod, _ := specs.PodWithExistingStorage(*cluster, idx) cluster.SetInheritedDataAndOwnership(&pod.ObjectMeta) err := c.Create(context.Background(), pod) diff --git a/internal/webhook/v1/cluster_webhook.go b/internal/webhook/v1/cluster_webhook.go index fcba1b0621..dc7fd675fb 100644 --- a/internal/webhook/v1/cluster_webhook.go +++ b/internal/webhook/v1/cluster_webhook.go @@ -30,6 +30,7 @@ import ( "github.com/cloudnative-pg/machinery/pkg/postgres/version" "github.com/cloudnative-pg/machinery/pkg/stringset" "github.com/cloudnative-pg/machinery/pkg/types" + jsonpatch "github.com/evanphx/json-patch/v5" storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -46,6 +47,7 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" + "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) @@ -206,6 +208,7 @@ func (v *ClusterCustomValidator) validate(r *apiv1.Cluster) (allErrs field.Error v.validateManagedExtensions, v.validateResources, v.validateHibernationAnnotation, + v.validatePodPatchAnnotation, v.validatePromotionToken, } @@ -2350,3 +2353,32 @@ func (v *ClusterCustomValidator) validateHibernationAnnotation(r *apiv1.Cluster) ), } } + +func (v *ClusterCustomValidator) validatePodPatchAnnotation(r *apiv1.Cluster) field.ErrorList { + jsonPatch, ok := r.Annotations[utils.PodPatchAnnotationName] + if !ok { + return nil + } + + if _, err := jsonpatch.DecodePatch([]byte(jsonPatch)); err != nil { + return field.ErrorList{ + field.Invalid( + field.NewPath("metadata", "annotations", utils.PodPatchAnnotationName), + jsonPatch, + fmt.Sprintf("error decoding JSON patch: %s", err.Error()), + ), + } + } + + if _, err := specs.PodWithExistingStorage(*r, 1); err != nil { + return field.ErrorList{ + field.Invalid( + field.NewPath("metadata", "annotations", utils.PodPatchAnnotationName), + jsonPatch, + fmt.Sprintf("jsonpatch doesn't apply cleanly to the pod: %s", err.Error()), + ), + } + } + + return nil +} diff --git a/internal/webhook/v1/cluster_webhook_test.go b/internal/webhook/v1/cluster_webhook_test.go index faa1f0f0d7..a6828768cc 100644 --- a/internal/webhook/v1/cluster_webhook_test.go +++ b/internal/webhook/v1/cluster_webhook_test.go @@ -4884,3 +4884,56 @@ var _ = Describe("ServiceTemplate Validation", func() { }) }) }) + +var _ = Describe("validatePodPatchAnnotation", func() { + var v *ClusterCustomValidator + + It("returns nil if the annotation is not present", func() { + cluster := &apiv1.Cluster{ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{}}} + Expect(v.validatePodPatchAnnotation(cluster)).To(BeNil()) + }) + + It("returns an error if decoding the JSON patch fails to decode", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.PodPatchAnnotationName: "invalid-json-patch", + }, + }, + } + + errors := v.validatePodPatchAnnotation(cluster) + Expect(errors).To(HaveLen(1)) + Expect(errors[0].Type).To(Equal(field.ErrorTypeInvalid)) + Expect(errors[0].Field).To(Equal("metadata.annotations." + utils.PodPatchAnnotationName)) + Expect(errors[0].Detail).To(ContainSubstring("error decoding JSON patch")) + }) + + It("returns an error if decoding the JSON patch fails to apply", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.PodPatchAnnotationName: `[{"op": "replace", "path": "/spec/podInvalidSection", "value": "test"}]`, + }, + }, + } + + errors := v.validatePodPatchAnnotation(cluster) + Expect(errors).To(HaveLen(1)) + Expect(errors[0].Type).To(Equal(field.ErrorTypeInvalid)) + Expect(errors[0].Field).To(Equal("metadata.annotations." + utils.PodPatchAnnotationName)) + Expect(errors[0].Detail).To(ContainSubstring("jsonpatch doesn't apply cleanly to the pod")) + }) + + It("returns nil if the JSON patch is decoded successfully", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.PodPatchAnnotationName: `[{"op": "replace", "path": "/metadata/name", "value": "test"}]`, + }, + }, + } + + Expect(v.validatePodPatchAnnotation(cluster)).To(BeNil()) + }) +}) diff --git a/pkg/specs/pg_pods_test.go b/pkg/specs/pg_pods_test.go index f7a5794e1c..4fc732690c 100644 --- a/pkg/specs/pg_pods_test.go +++ b/pkg/specs/pg_pods_test.go @@ -33,7 +33,8 @@ var _ = Describe("Extract the used image name", func() { Namespace: "default", }, } - pod := PodWithExistingStorage(cluster, 1) + pod, err := PodWithExistingStorage(cluster, 1) + Expect(err).ToNot(HaveOccurred()) It("extract the default image name", func() { Expect(GetPostgresImageName(*pod)).To(Equal(configuration.Current.PostgresImageName)) diff --git a/pkg/specs/pods.go b/pkg/specs/pods.go index b20b704f32..41482b5dea 100644 --- a/pkg/specs/pods.go +++ b/pkg/specs/pods.go @@ -27,6 +27,7 @@ import ( "slices" "strconv" + jsonpatch "github.com/evanphx/json-patch/v5" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" @@ -433,7 +434,7 @@ func CreatePodSecurityContext(seccompProfile *corev1.SeccompProfile, user, group } // PodWithExistingStorage create a new instance with an existing storage -func PodWithExistingStorage(cluster apiv1.Cluster, nodeSerial int) *corev1.Pod { +func PodWithExistingStorage(cluster apiv1.Cluster, nodeSerial int) (*corev1.Pod, error) { podName := GetInstanceName(cluster.Name, nodeSerial) gracePeriod := int64(cluster.GetMaxStopDelay()) @@ -474,7 +475,28 @@ func PodWithExistingStorage(cluster apiv1.Cluster, nodeSerial int) *corev1.Pod { if utils.IsAnnotationAppArmorPresent(&pod.Spec, cluster.Annotations) { utils.AnnotateAppArmor(&pod.ObjectMeta, &pod.Spec, cluster.Annotations) } - return pod + + if jsonPatch := cluster.Annotations[utils.PodPatchAnnotationName]; jsonPatch != "" { + serializedObject, err := json.Marshal(pod) + if err != nil { + return nil, fmt.Errorf("while serializing pod to JSON: %w", err) + } + patch, err := jsonpatch.DecodePatch([]byte(jsonPatch)) + if err != nil { + return nil, fmt.Errorf("while decoding JSON patch from annotation: %w", err) + } + + serializedObject, err = patch.Apply(serializedObject) + if err != nil { + return nil, fmt.Errorf("while applying JSON patch from annotation: %w", err) + } + + if err = json.Unmarshal(serializedObject, pod); err != nil { + return nil, fmt.Errorf("while deserializing pod to JSON: %w", err) + } + } + + return pod, nil } // GetInstanceName returns a string indicating the instance name diff --git a/pkg/specs/pods_test.go b/pkg/specs/pods_test.go index ff4a9c48f8..de99a9a026 100644 --- a/pkg/specs/pods_test.go +++ b/pkg/specs/pods_test.go @@ -26,6 +26,7 @@ import ( v1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -926,3 +927,38 @@ var _ = Describe("Compute startup probe failure threshold", func() { Expect(getFailureThreshold(31, LivenessProbePeriod)).To(BeNumerically("==", 4)) }) }) + +var _ = Describe("PodWithExistingStorage", func() { + It("applies JSON patch from annotation", func() { + cluster := v1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "default", + Annotations: map[string]string{ + utils.PodPatchAnnotationName: `[{"op": "replace", "path": "/spec/containers/0/image", "value": "new-image:latest"}]`, // nolint: lll + }, + }, + } + + pod, err := PodWithExistingStorage(cluster, 1) + Expect(err).NotTo(HaveOccurred()) + Expect(pod).NotTo(BeNil()) + Expect(pod.Spec.Containers[0].Image).To(Equal("new-image:latest")) + }) + + It("returns error if JSON patch is invalid", func() { + cluster := v1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "default", + Annotations: map[string]string{ + utils.PodPatchAnnotationName: `invalid-json-patch`, + }, + }, + } + + _, err := PodWithExistingStorage(cluster, 1) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("while decoding JSON patch from annotation")) + }) +}) diff --git a/pkg/utils/labels_annotations.go b/pkg/utils/labels_annotations.go index 187325013f..028ab14c3d 100644 --- a/pkg/utils/labels_annotations.go +++ b/pkg/utils/labels_annotations.go @@ -233,6 +233,10 @@ const ( // PluginPortAnnotationName is the name of the annotation containing the // port the plugin is listening to PluginPortAnnotationName = MetadataNamespace + "/pluginPort" + + // PodPatchAnnotationName is the name of the annotation containing the + // patch to apply to the pod + PodPatchAnnotationName = MetadataNamespace + "/podPatch" ) type annotationStatus string diff --git a/tests/e2e/pod_patch_test.go b/tests/e2e/pod_patch_test.go new file mode 100644 index 0000000000..3ad5a84eb9 --- /dev/null +++ b/tests/e2e/pod_patch_test.go @@ -0,0 +1,101 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Pod patch", Label(tests.LabelSmoke, tests.LabelBasic), func() { + const ( + sampleFile = fixturesDir + "/base/cluster-storage-class.yaml.template" + clusterName = "postgresql-storage-class" + level = tests.Lowest + ) + + var namespace string + + BeforeEach(func() { + if testLevelEnv.Depth < int(level) { + Skip("Test depth is lower than the amount requested for this test") + } + }) + + It("use the podPatch annotation to generate Pods", func(_ SpecContext) { + const namespacePrefix = "cluster-patch-e2e" + var err error + + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) + Expect(err).ToNot(HaveOccurred()) + + AssertCreateCluster(namespace, clusterName, sampleFile, env) + + By("adding the podPatch annotation", func() { + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + + patchedCluster := cluster.DeepCopy() + + patchedCluster.SetAnnotations(map[string]string{ + utils.PodPatchAnnotationName: ` + [ + { + "op": "add", + "path": "/metadata/annotations/e2e.cnpg.io", + "value": "this-test" + } + ] + `, + }) + err = env.Client.Patch(env.Ctx, patchedCluster, client.MergeFrom(cluster)) + Expect(err).ToNot(HaveOccurred()) + }) + + By("deleting all the Pods", func() { + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + + for i := range podList.Items { + err := env.Client.Delete(env.Ctx, &podList.Items[i]) + Expect(err).ToNot(HaveOccurred()) + } + }) + + By("waiting for the new annotation to be applied to the new Pods", func() { + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + + timeout := 120 + Eventually(func(g Gomega) { + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(podList.Items).To(HaveLen(cluster.Spec.Instances)) + + for _, pod := range podList.Items { + g.Expect(pod.Annotations).To(HaveKeyWithValue("e2e.cnpg.io", "this-test")) + } + }, timeout).Should(Succeed()) + }) + }) +}) From f982fcb719b7f56dddf5163680c76da10fb1c486 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Fri, 31 Jan 2025 12:11:06 +0100 Subject: [PATCH 331/836] fix(webhook): return warnings alongside errors in webhook responses (#6579) This patch fixes an issue where webhook responses only included errors, omitting warnings when both were present. Closes #6578 Signed-off-by: Marco Nenciarini Signed-off-by: Gabriele Quaresima Co-authored-by: Gabriele Quaresima --- internal/webhook/v1/cluster_webhook.go | 9 +++++---- internal/webhook/v1/scheduledbackup_webhook.go | 4 ++-- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/internal/webhook/v1/cluster_webhook.go b/internal/webhook/v1/cluster_webhook.go index dc7fd675fb..4be9f552b5 100644 --- a/internal/webhook/v1/cluster_webhook.go +++ b/internal/webhook/v1/cluster_webhook.go @@ -78,7 +78,7 @@ var _ webhook.CustomDefaulter = &ClusterCustomDefaulter{} func (d *ClusterCustomDefaulter) Default(_ context.Context, obj runtime.Object) error { cluster, ok := obj.(*apiv1.Cluster) if !ok { - return fmt.Errorf("expected an Cluster object but got %T", obj) + return fmt.Errorf("expected a Cluster object but got %T", obj) } clusterLog.Info("Defaulting for Cluster", "name", cluster.GetName(), "namespace", cluster.GetNamespace()) @@ -113,7 +113,7 @@ func (v *ClusterCustomValidator) ValidateCreate(_ context.Context, obj runtime.O return allWarnings, nil } - return nil, apierrors.NewInvalid( + return allWarnings, apierrors.NewInvalid( schema.GroupKind{Group: "postgresql.cnpg.io", Kind: "Cluster"}, cluster.Name, allErrs) } @@ -142,12 +142,13 @@ func (v *ClusterCustomValidator) ValidateUpdate( v.validate(cluster), v.validateClusterChanges(cluster, oldCluster)..., ) + allWarnings := v.getAdmissionWarnings(cluster) if len(allErrs) == 0 { - return v.getAdmissionWarnings(cluster), nil + return allWarnings, nil } - return nil, apierrors.NewInvalid( + return allWarnings, apierrors.NewInvalid( schema.GroupKind{Group: "cluster.cnpg.io", Kind: "Cluster"}, cluster.Name, allErrs) } diff --git a/internal/webhook/v1/scheduledbackup_webhook.go b/internal/webhook/v1/scheduledbackup_webhook.go index fdf6ccdbf3..4cfeb98cc7 100644 --- a/internal/webhook/v1/scheduledbackup_webhook.go +++ b/internal/webhook/v1/scheduledbackup_webhook.go @@ -98,7 +98,7 @@ func (v *ScheduledBackupCustomValidator) ValidateCreate( return warnings, nil } - return nil, apierrors.NewInvalid( + return warnings, apierrors.NewInvalid( schema.GroupKind{Group: "postgresql.cnpg.io", Kind: "ScheduledBackup"}, scheduledBackup.Name, allErrs) } @@ -120,7 +120,7 @@ func (v *ScheduledBackupCustomValidator) ValidateUpdate( return warnings, nil } - return nil, apierrors.NewInvalid( + return warnings, apierrors.NewInvalid( schema.GroupKind{Group: "scheduledBackup.cnpg.io", Kind: "ScheduledBackup"}, scheduledBackup.Name, allErrs) } From ccf40a135387031bea7d84eb9e32b85810f5d889 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 31 Jan 2025 13:42:14 +0100 Subject: [PATCH 332/836] chore(deps): update module sigs.k8s.io/kustomize/kustomize/v5 to v5.6.0 (main) (#6718) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 3aad4b4d8d..82a2d24a20 100644 --- a/Makefile +++ b/Makefile @@ -42,7 +42,7 @@ LOCALBIN ?= $(shell pwd)/bin BUILD_IMAGE ?= true POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions.go" | cut -f 2 -d \") -KUSTOMIZE_VERSION ?= v5.5.0 +KUSTOMIZE_VERSION ?= v5.6.0 CONTROLLER_TOOLS_VERSION ?= v0.16.5 GORELEASER_VERSION ?= v2.6.1 SPELLCHECK_VERSION ?= 0.46.0 From 83aef66ceb4f2b9bee5acd6c3b60308268b32370 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Fri, 31 Jan 2025 13:50:24 +0100 Subject: [PATCH 333/836] chore: remove unused code (#6598) This code should have been removed in 28923eb80b190524c93334e4531259cd5f67e857 Signed-off-by: Marco Nenciarini --- api/v1/cluster_types.go | 11 ----------- api/v1/zz_generated.deepcopy.go | 16 ---------------- 2 files changed, 27 deletions(-) diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go index cc14768848..a32b66d91e 100644 --- a/api/v1/cluster_types.go +++ b/api/v1/cluster_types.go @@ -1952,17 +1952,6 @@ type AffinityConfiguration struct { AdditionalPodAffinity *corev1.PodAffinity `json:"additionalPodAffinity,omitempty"` } -// RollingUpdateStatus contains the information about an instance which is -// being updated -type RollingUpdateStatus struct { - // The image which we put into the Pod - ImageName string `json:"imageName"` - - // When the update has been started - // +optional - StartedAt metav1.Time `json:"startedAt,omitempty"` -} - // BackupTarget describes the preferred targets for a backup type BackupTarget string diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index b4b9d5b295..c8396cfd7f 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -2523,22 +2523,6 @@ func (in *RoleConfiguration) DeepCopy() *RoleConfiguration { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RollingUpdateStatus) DeepCopyInto(out *RollingUpdateStatus) { - *out = *in - in.StartedAt.DeepCopyInto(&out.StartedAt) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateStatus. -func (in *RollingUpdateStatus) DeepCopy() *RollingUpdateStatus { - if in == nil { - return nil - } - out := new(RollingUpdateStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SQLRefs) DeepCopyInto(out *SQLRefs) { *out = *in From 130037a3d0ecc57df60997e50a70497626688b33 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 31 Jan 2025 15:05:24 +0100 Subject: [PATCH 334/836] chore(deps): update kubernetes csi (main) (#6697) This PR contains the following updates: https://github.com/kubernetes-csi/external-provisioner `v5.1.0` -> `v5.2.0` https://github.com/rook/rook `v1.16.1` -> `v1.16.2` --- .github/workflows/continuous-delivery.yml | 2 +- hack/setup-cluster.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index 3f8299f62b..5ae69682f1 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -37,7 +37,7 @@ env: GOLANG_VERSION: "1.23.x" KUBEBUILDER_VERSION: "2.3.1" KIND_VERSION: "v0.26.0" - ROOK_VERSION: "v1.16.1" + ROOK_VERSION: "v1.16.2" EXTERNAL_SNAPSHOTTER_VERSION: "v8.2.0" OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing" BUILD_PUSH_PROVENANCE: "" diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh index dbcfe1f799..884f1e3e09 100755 --- a/hack/setup-cluster.sh +++ b/hack/setup-cluster.sh @@ -28,7 +28,7 @@ KIND_NODE_DEFAULT_VERSION=v1.32.1 K3D_NODE_DEFAULT_VERSION=v1.30.3 CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=v1.15.0 EXTERNAL_SNAPSHOTTER_VERSION=v8.2.0 -EXTERNAL_PROVISIONER_VERSION=v5.1.0 +EXTERNAL_PROVISIONER_VERSION=v5.2.0 EXTERNAL_RESIZER_VERSION=v1.13.1 EXTERNAL_ATTACHER_VERSION=v4.8.0 K8S_VERSION=${K8S_VERSION-} From 62d48282bdd4c640d1af104b9cf637087148075e Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 31 Jan 2025 17:39:31 +0100 Subject: [PATCH 335/836] fix(deps): update module sigs.k8s.io/controller-runtime to v0.20.1 (main) (#6730) --- go.mod | 3 +-- go.sum | 6 ++---- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index bd79c41a71..86cb8cdfd2 100644 --- a/go.mod +++ b/go.mod @@ -44,7 +44,7 @@ require ( k8s.io/cli-runtime v0.32.1 k8s.io/client-go v0.32.1 k8s.io/utils v0.0.0-20241210054802-24370beab758 - sigs.k8s.io/controller-runtime v0.19.4 + sigs.k8s.io/controller-runtime v0.20.1 sigs.k8s.io/yaml v1.4.0 ) @@ -99,7 +99,6 @@ require ( github.com/x448/float16 v0.8.4 // indirect github.com/xlab/treeprint v1.2.0 // indirect golang.org/x/crypto v0.32.0 // indirect - golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect golang.org/x/net v0.34.0 // indirect golang.org/x/oauth2 v0.25.0 // indirect golang.org/x/sync v0.10.0 // indirect diff --git a/go.sum b/go.sum index 76ef9bb906..3dc9308dce 100644 --- a/go.sum +++ b/go.sum @@ -218,8 +218,6 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= -golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= -golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -295,8 +293,8 @@ k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 h1:hcha5B1kVACrLujCKLbr8X k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7/go.mod h1:GewRfANuJ70iYzvn+i4lezLDAFzvjxZYK1gn1lWcfas= k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0= k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/controller-runtime v0.19.4 h1:SUmheabttt0nx8uJtoII4oIP27BVVvAKFvdvGFwV/Qo= -sigs.k8s.io/controller-runtime v0.19.4/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= +sigs.k8s.io/controller-runtime v0.20.1 h1:JbGMAG/X94NeM3xvjenVUaBjy6Ui4Ogd/J5ZtjZnHaE= +sigs.k8s.io/controller-runtime v0.20.1/go.mod h1:BrP3w158MwvB3ZbNpaAcIKkHQ7YGpYnzpoSTZ8E14WU= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/kustomize/api v0.19.0 h1:F+2HB2mU1MSiR9Hp1NEgoU2q9ItNOaBJl0I4Dlus5SQ= From 98e27966ddb337afe3769fcb7347f0d7deae889b Mon Sep 17 00:00:00 2001 From: thomasboussekey Date: Tue, 4 Feb 2025 09:34:01 +0100 Subject: [PATCH 336/836] docs: add Mirakl to `ADOPTERS.md` (#6751) Signed-off-by: thomasboussekey Signed-off-by: thomasboussekey Co-authored-by: Gabriele Bartolini --- ADOPTERS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/ADOPTERS.md b/ADOPTERS.md index 4a4601f02f..2936cc88b3 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -60,3 +60,4 @@ This list is sorted in chronological order, based on the submission date. | [Novo Nordisk](https://www.novonordisk.com/) | [scni@novonordisk.com](mailto:scni@novonordisk.com) ([@CasperGN](https://github.com/CasperGN)) | 2024-11-20 | Backing of Grafana UI states for central Observability platform and datastore for our Developer Portal based off Backstage. | | [Docaposte](https://docaposte.fr) | @albundy83 | 2024-11-20 | Docaposte is the digital trust leader in France. We use CloudNativePG because it is the most elegant and efficient solution for running PostgreSQL in production. | | [Obmondo](https://obmondo.com) | @Obmondo | 2024-11-25 | At Obmondo we use CloudNativePG in our open-source Kubernetes meta-management platform called [KubeAid](https://kubeaid.io/) to easily manage all PostgreSQL databases across clusters from a centralized interface. | +| [Mirakl](https://www.mirakl.com/) | @ThomasBoussekey | 2025-02-03 | CloudNativePG is our default hosting solution for marketplace instances. With over 300 CloudNativePG clusters managing 8 TB of data, we have developed highly customizable Helm charts that support connection pooling, logical replication, and many other advanced features. | From a725d1f2cc57b09bd3df4c838341ab29b3f63c9b Mon Sep 17 00:00:00 2001 From: solidDoWant Date: Tue, 4 Feb 2025 05:41:34 -0600 Subject: [PATCH 337/836] feat: add kubernetes `client-gen` tool support (#6695) This patch introduces support for the Kubernetes `client-gen` tool, enabling the automated generation of Go clients for all custom resources defined by the operator's CRDs. Closes #6585 Signed-off-by: Fred Heinecke Signed-off-by: Marco Nenciarini Signed-off-by: Armando Ruocco Co-authored-by: Marco Nenciarini Co-authored-by: Armando Ruocco --- api/v1/backup_funcs.go | 4 ++-- api/v1/cluster_funcs.go | 4 ++-- api/v1/clusterimagecatalog_types.go | 1 + api/v1/groupversion_info.go | 9 +++------ internal/cmd/plugin/report/operator_utils.go | 6 +++--- internal/controller/cluster_controller.go | 4 ++-- internal/controller/cluster_create_test.go | 4 ++-- internal/controller/cluster_image.go | 2 +- internal/controller/cluster_restore_test.go | 2 +- internal/controller/pooler_controller.go | 2 +- internal/controller/scheduledbackup_controller.go | 2 +- internal/controller/suite_test.go | 6 +++--- pkg/management/client.go | 2 +- tests/e2e/rolling_update_test.go | 2 +- tests/utils/operator/upgrade.go | 4 ++-- 15 files changed, 26 insertions(+), 28 deletions(-) diff --git a/api/v1/backup_funcs.go b/api/v1/backup_funcs.go index c41e09ee12..e7ec0411ba 100644 --- a/api/v1/backup_funcs.go +++ b/api/v1/backup_funcs.go @@ -236,8 +236,8 @@ func (backup *Backup) GetVolumeSnapshotConfiguration( // By setting the GVK, we ensure that components such as the plugins have enough metadata to typecheck the object. func (backup *Backup) EnsureGVKIsPresent() { backup.SetGroupVersionKind(schema.GroupVersionKind{ - Group: GroupVersion.Group, - Version: GroupVersion.Version, + Group: SchemeGroupVersion.Group, + Version: SchemeGroupVersion.Version, Kind: BackupKind, }) } diff --git a/api/v1/cluster_funcs.go b/api/v1/cluster_funcs.go index 6fa6eba800..432e46ee89 100644 --- a/api/v1/cluster_funcs.go +++ b/api/v1/cluster_funcs.go @@ -1397,8 +1397,8 @@ func (cluster *Cluster) GetRecoverySourcePlugin() *PluginConfiguration { // By setting the GVK, we ensure that components such as the plugins have enough metadata to typecheck the object. func (cluster *Cluster) EnsureGVKIsPresent() { cluster.SetGroupVersionKind(schema.GroupVersionKind{ - Group: GroupVersion.Group, - Version: GroupVersion.Version, + Group: SchemeGroupVersion.Group, + Version: SchemeGroupVersion.Version, Kind: ClusterKind, }) } diff --git a/api/v1/clusterimagecatalog_types.go b/api/v1/clusterimagecatalog_types.go index 850822fbec..7f0a7dc970 100644 --- a/api/v1/clusterimagecatalog_types.go +++ b/api/v1/clusterimagecatalog_types.go @@ -19,6 +19,7 @@ package v1 import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +genclient +// +genclient:nonNamespaced // +kubebuilder:object:root=true // +kubebuilder:resource:scope=Cluster // +kubebuilder:storageversion diff --git a/api/v1/groupversion_info.go b/api/v1/groupversion_info.go index bb665cb83b..ded686668d 100644 --- a/api/v1/groupversion_info.go +++ b/api/v1/groupversion_info.go @@ -14,9 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package v1 contains API Schema definitions for the postgresql v1 API group -// +kubebuilder:object:generate=true -// +groupName=postgresql.cnpg.io package v1 import ( @@ -51,11 +48,11 @@ const ( ) var ( - // GroupVersion is group version used to register these objects - GroupVersion = schema.GroupVersion{Group: "postgresql.cnpg.io", Version: "v1"} + // SchemeGroupVersion is group version used to register these objects + SchemeGroupVersion = schema.GroupVersion{Group: "postgresql.cnpg.io", Version: "v1"} // SchemeBuilder is used to add go types to the GroupVersionKind scheme - SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} // AddToScheme adds the types in this group-version to the given scheme. AddToScheme = SchemeBuilder.AddToScheme diff --git a/internal/cmd/plugin/report/operator_utils.go b/internal/cmd/plugin/report/operator_utils.go index e51aea6491..a0876ed4f1 100644 --- a/internal/cmd/plugin/report/operator_utils.go +++ b/internal/cmd/plugin/report/operator_utils.go @@ -44,7 +44,7 @@ func getWebhooks(ctx context.Context, stopRedact bool) ( for _, item := range mutatingWebhookConfigList.Items { for _, webhook := range item.Webhooks { - if len(webhook.Rules) > 0 && webhook.Rules[0].APIGroups[0] == apiv1.GroupVersion.Group { + if len(webhook.Rules) > 0 && webhook.Rules[0].APIGroups[0] == apiv1.SchemeGroupVersion.Group { mWebhookConfig.Items = append(mWebhookConfig.Items, item) } } @@ -63,7 +63,7 @@ func getWebhooks(ctx context.Context, stopRedact bool) ( for _, item := range validatingWebhookConfigList.Items { for _, webhook := range item.Webhooks { - if len(webhook.Rules) > 0 && webhook.Rules[0].APIGroups[0] == apiv1.GroupVersion.Group { + if len(webhook.Rules) > 0 && webhook.Rules[0].APIGroups[0] == apiv1.SchemeGroupVersion.Group { vWebhookConfig.Items = append(vWebhookConfig.Items, item) } } @@ -79,7 +79,7 @@ func getWebhooks(ctx context.Context, stopRedact bool) ( if len(mWebhookConfig.Items) == 0 || len(vWebhookConfig.Items) == 0 { return nil, nil, fmt.Errorf( "can't find the webhooks that targeting resources within the group %s", - apiv1.GroupVersion.Group, + apiv1.SchemeGroupVersion.Group, ) } diff --git a/internal/controller/cluster_controller.go b/internal/controller/cluster_controller.go index bc46d91bae..1c3f0c4d68 100644 --- a/internal/controller/cluster_controller.go +++ b/internal/controller/cluster_controller.go @@ -69,7 +69,7 @@ const ( imageCatalogKey = ".spec.imageCatalog.name" ) -var apiGVString = apiv1.GroupVersion.String() +var apiSGVString = apiv1.SchemeGroupVersion.String() // errOldPrimaryDetected occurs when a primary Pod loses connectivity with the // API server and, upon reconnection, attempts to retain its previous primary @@ -1205,7 +1205,7 @@ func IsOwnedByCluster(obj client.Object) (string, bool) { return "", false } - if owner.APIVersion != apiGVString { + if owner.APIVersion != apiSGVString { return "", false } diff --git a/internal/controller/cluster_create_test.go b/internal/controller/cluster_create_test.go index d6aa79bcbf..9d40db537a 100644 --- a/internal/controller/cluster_create_test.go +++ b/internal/controller/cluster_create_test.go @@ -868,7 +868,7 @@ var _ = Describe("createOrPatchClusterCredentialSecret", func() { cluster := apiv1.Cluster{ TypeMeta: metav1.TypeMeta{ Kind: apiv1.ClusterKind, - APIVersion: apiGVString, + APIVersion: apiSGVString, }, ObjectMeta: metav1.ObjectMeta{Name: "test-cluster", Namespace: namespace}, } @@ -1159,7 +1159,7 @@ var _ = Describe("Service Reconciling", func() { cluster = apiv1.Cluster{ TypeMeta: metav1.TypeMeta{ Kind: apiv1.ClusterKind, - APIVersion: apiv1.GroupVersion.String(), + APIVersion: apiv1.SchemeGroupVersion.String(), }, ObjectMeta: metav1.ObjectMeta{ Name: "test-cluster", diff --git a/internal/controller/cluster_image.go b/internal/controller/cluster_image.go index 547b610cc9..46f106bd53 100644 --- a/internal/controller/cluster_image.go +++ b/internal/controller/cluster_image.go @@ -76,7 +76,7 @@ func (r *ClusterReconciler) reconcileImage(ctx context.Context, cluster *apiv1.C } apiGroup := cluster.Spec.ImageCatalogRef.APIGroup - if apiGroup == nil || *apiGroup != apiv1.GroupVersion.Group { + if apiGroup == nil || *apiGroup != apiv1.SchemeGroupVersion.Group { contextLogger.Info("Unknown catalog group") return &ctrl.Result{}, r.RegisterPhase(ctx, cluster, apiv1.PhaseImageCatalogError, "Invalid image catalog group") diff --git a/internal/controller/cluster_restore_test.go b/internal/controller/cluster_restore_test.go index cb68fc565b..364f7aa821 100644 --- a/internal/controller/cluster_restore_test.go +++ b/internal/controller/cluster_restore_test.go @@ -483,7 +483,7 @@ var _ = Describe("ensureOrphanServicesAreNotPresent", func() { Namespace: cluster.Namespace, }, } - cluster.TypeMeta = metav1.TypeMeta{Kind: apiv1.ClusterKind, APIVersion: apiv1.GroupVersion.String()} + cluster.TypeMeta = metav1.TypeMeta{Kind: apiv1.ClusterKind, APIVersion: apiv1.SchemeGroupVersion.String()} cluster.SetInheritedDataAndOwnership(&svc.ObjectMeta) mockCli = fake.NewClientBuilder(). WithScheme(k8scheme.BuildWithAllKnownScheme()). diff --git a/internal/controller/pooler_controller.go b/internal/controller/pooler_controller.go index e1d343f35b..63a1651175 100644 --- a/internal/controller/pooler_controller.go +++ b/internal/controller/pooler_controller.go @@ -157,7 +157,7 @@ func isOwnedByPoolerKind(obj client.Object) (string, bool) { return "", false } - if owner.APIVersion != apiGVString { + if owner.APIVersion != apiSGVString { return "", false } diff --git a/internal/controller/scheduledbackup_controller.go b/internal/controller/scheduledbackup_controller.go index 8d4d3fa248..4fd0138e64 100644 --- a/internal/controller/scheduledbackup_controller.go +++ b/internal/controller/scheduledbackup_controller.go @@ -348,7 +348,7 @@ func (r *ScheduledBackupReconciler) SetupWithManager( return nil } - if owner.APIVersion != apiGVString { + if owner.APIVersion != apiSGVString { return nil } diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index 5f1396d865..2a753bfb52 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -151,7 +151,7 @@ func newFakePooler(k8sClient client.Client, cluster *apiv1.Cluster) *apiv1.Poole // upstream issue, go client cleans typemeta: https://github.com/kubernetes/client-go/issues/308 pooler.TypeMeta = metav1.TypeMeta{ Kind: apiv1.PoolerKind, - APIVersion: apiv1.GroupVersion.String(), + APIVersion: apiv1.SchemeGroupVersion.String(), } return pooler @@ -219,7 +219,7 @@ func newFakeCNPGCluster( // upstream issue, go client cleans typemeta: https://github.com/kubernetes/client-go/issues/308 cluster.TypeMeta = metav1.TypeMeta{ Kind: apiv1.ClusterKind, - APIVersion: apiv1.GroupVersion.String(), + APIVersion: apiv1.SchemeGroupVersion.String(), } return cluster @@ -270,7 +270,7 @@ func newFakeCNPGClusterWithPGWal(k8sClient client.Client, namespace string) *api // upstream issue, go client cleans typemeta: https://github.com/kubernetes/client-go/issues/308 cluster.TypeMeta = metav1.TypeMeta{ Kind: apiv1.ClusterKind, - APIVersion: apiv1.GroupVersion.String(), + APIVersion: apiv1.SchemeGroupVersion.String(), } return cluster diff --git a/pkg/management/client.go b/pkg/management/client.go index 95105530f6..51f1fc02a7 100644 --- a/pkg/management/client.go +++ b/pkg/management/client.go @@ -73,7 +73,7 @@ func NewControllerRuntimeClient() (client.WithWatch, error) { return nil, err } - mapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{apiv1.GroupVersion}) + mapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{apiv1.SchemeGroupVersion}) // add here any resource that need to be registered. objectsToRegister := []runtime.Object{ // custom resources diff --git a/tests/e2e/rolling_update_test.go b/tests/e2e/rolling_update_test.go index 2440e0e299..aa4e6c9528 100644 --- a/tests/e2e/rolling_update_test.go +++ b/tests/e2e/rolling_update_test.go @@ -352,7 +352,7 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun Instances: instances, ImageCatalogRef: &apiv1.ImageCatalogRef{ TypedLocalObjectReference: corev1.TypedLocalObjectReference{ - APIGroup: &apiv1.GroupVersion.Group, + APIGroup: &apiv1.SchemeGroupVersion.Group, Name: name, Kind: "ImageCatalog", }, diff --git a/tests/utils/operator/upgrade.go b/tests/utils/operator/upgrade.go index e8a2e7af21..1959c2d557 100644 --- a/tests/utils/operator/upgrade.go +++ b/tests/utils/operator/upgrade.go @@ -104,8 +104,8 @@ func InstallLatest( Eventually(func() error { mapping, err := crudClient.RESTMapper().RESTMapping( - schema.GroupKind{Group: apiv1.GroupVersion.Group, Kind: apiv1.ClusterKind}, - apiv1.GroupVersion.Version) + schema.GroupKind{Group: apiv1.SchemeGroupVersion.Group, Kind: apiv1.ClusterKind}, + apiv1.SchemeGroupVersion.Version) if err != nil { return err } From 0b1d1405743da8dffe875e5717ef93a6a73afcbc Mon Sep 17 00:00:00 2001 From: Peggie Date: Tue, 4 Feb 2025 14:46:15 +0100 Subject: [PATCH 338/836] feat: Public Cloud K8S versions update (#6595) Update the versions used to test the operator on public cloud providers Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Signed-off-by: Jonathan Gonzalez V. Co-authored-by: public-cloud-k8s-versions-check --- .github/aks_versions.json | 4 ++-- .github/eks_versions.json | 4 ++-- .github/kind_versions.json | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/aks_versions.json b/.github/aks_versions.json index b5a3eed520..c1825b7a7e 100644 --- a/.github/aks_versions.json +++ b/.github/aks_versions.json @@ -1,6 +1,6 @@ [ - "1.31.2", - "1.30.6", + "1.31.3", + "1.30.7", "1.29.9", "1.28.9" ] diff --git a/.github/eks_versions.json b/.github/eks_versions.json index 3121122733..49228d19da 100644 --- a/.github/eks_versions.json +++ b/.github/eks_versions.json @@ -1,6 +1,6 @@ [ + "1.32", "1.31", "1.30", - "1.29", - "1.28" + "1.29" ] diff --git a/.github/kind_versions.json b/.github/kind_versions.json index b39d642e5d..10e6039591 100644 --- a/.github/kind_versions.json +++ b/.github/kind_versions.json @@ -1,5 +1,5 @@ [ - "v1.32.0", + "v1.32.1", "v1.31.4", "v1.30.8", "v1.29.12", From 69a65e7eaf800b9d157dd43e29150f0acc50b3fb Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Wed, 5 Feb 2025 15:14:08 +0100 Subject: [PATCH 339/836] fix: avoid loading helper images when running test on kind (#6770) Avoid loading the images due to a known issue that makes kind v0.26.0 with kindest/node v1.32.1 fails when loading the images. This issue was reported here https://github.com/kubernetes-sigs/kind/issues/3853 Signed-off-by: Jonathan Gonzalez V. --- hack/e2e/run-e2e-kind.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hack/e2e/run-e2e-kind.sh b/hack/e2e/run-e2e-kind.sh index 129bccffff..a7e6f07763 100755 --- a/hack/e2e/run-e2e-kind.sh +++ b/hack/e2e/run-e2e-kind.sh @@ -84,7 +84,8 @@ main() { "${HACK_DIR}/setup-cluster.sh" load fi - "${HACK_DIR}/setup-cluster.sh" load-helper-images + # Comment out when the a new release of kindest/node is release newer than v1.32.1 + # "${HACK_DIR}/setup-cluster.sh" load-helper-images RC=0 From 80ae3c3efd6b19e0d108b47a8e7afc917c5b3fb8 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 6 Feb 2025 09:21:08 +0100 Subject: [PATCH 340/836] fix(deps): update module github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring to v0.80.0 (main) (#6763) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 86cb8cdfd2..a1fdf634ae 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( github.com/mitchellh/go-ps v1.0.0 github.com/onsi/ginkgo/v2 v2.22.2 github.com/onsi/gomega v1.36.2 - github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.79.2 + github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.80.0 github.com/prometheus/client_golang v1.20.5 github.com/robfig/cron v1.2.0 github.com/sethvargo/go-password v0.3.1 diff --git a/go.sum b/go.sum index 3dc9308dce..4353561378 100644 --- a/go.sum +++ b/go.sum @@ -154,8 +154,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.79.2 h1:DGv150w4UyxnjNHlkCw85R3+lspOxegtdnbpP2vKRrk= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.79.2/go.mod h1:AVMP4QEW8xuGWnxaWSpI3kKjP9fDA31nO68zsyREJZA= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.80.0 h1:ckSycH7xHtpcvXsmEY/qEziRhDQKqKqbsHi9kX/BO7A= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.80.0/go.mod h1:6x4x0t9BP35g4XcjkHE9EB3RxhyfxpdpmZKd/Qyk8+M= github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= From 93c26736b4d63de718f8296fc66970e5d83efab0 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 6 Feb 2025 14:57:48 +0100 Subject: [PATCH 341/836] fix(deps): update module golang.org/x/term to v0.29.0 (main) (#6765) --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index a1fdf634ae..a8d260f438 100644 --- a/go.mod +++ b/go.mod @@ -35,7 +35,7 @@ require ( go.uber.org/atomic v1.11.0 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 - golang.org/x/term v0.28.0 + golang.org/x/term v0.29.0 google.golang.org/grpc v1.70.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.32.1 @@ -102,7 +102,7 @@ require ( golang.org/x/net v0.34.0 // indirect golang.org/x/oauth2 v0.25.0 // indirect golang.org/x/sync v0.10.0 // indirect - golang.org/x/sys v0.29.0 // indirect + golang.org/x/sys v0.30.0 // indirect golang.org/x/text v0.21.0 // indirect golang.org/x/time v0.9.0 // indirect golang.org/x/tools v0.28.0 // indirect diff --git a/go.sum b/go.sum index 4353561378..6d5c36b461 100644 --- a/go.sum +++ b/go.sum @@ -238,10 +238,10 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= -golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= -golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= +golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= +golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU= +golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= From ec911af1ca1275384cb4bceef894eba5da329526 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 6 Feb 2025 16:08:35 +0100 Subject: [PATCH 342/836] chore(deps): update dependency rook/rook to v1.16.3 (main) (#6778) --- .github/workflows/continuous-delivery.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index 5ae69682f1..bbe6bf737a 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -37,7 +37,7 @@ env: GOLANG_VERSION: "1.23.x" KUBEBUILDER_VERSION: "2.3.1" KIND_VERSION: "v0.26.0" - ROOK_VERSION: "v1.16.2" + ROOK_VERSION: "v1.16.3" EXTERNAL_SNAPSHOTTER_VERSION: "v8.2.0" OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing" BUILD_PUSH_PROVENANCE: "" From 7aaa52d557c036e59a582c7e41b611c4e4f04d5c Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Thu, 6 Feb 2025 17:49:25 +0100 Subject: [PATCH 343/836] chore: remove unused yaml files (#6436) Some yaml files were unused since some years and not required, they were also including and mentioning the use of kube-rbac-proxy which is something we don't use. Closes #6223 Signed-off-by: Jonathan Gonzalez V. --- config/default/kustomization.yaml | 7 +----- config/default/manager_auth_proxy_patch.yaml | 25 ------------------- .../rbac/auth_proxy_client_clusterrole.yaml | 7 ------ config/rbac/auth_proxy_role.yaml | 13 ---------- config/rbac/auth_proxy_role_binding.yaml | 12 --------- config/rbac/auth_proxy_service.yaml | 14 ----------- config/rbac/kustomization.yaml | 9 +------ 7 files changed, 2 insertions(+), 85 deletions(-) delete mode 100644 config/default/manager_auth_proxy_patch.yaml delete mode 100644 config/rbac/auth_proxy_client_clusterrole.yaml delete mode 100644 config/rbac/auth_proxy_role.yaml delete mode 100644 config/rbac/auth_proxy_role_binding.yaml delete mode 100644 config/rbac/auth_proxy_service.yaml diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index 9a299052ba..079ffd5c70 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -22,7 +22,7 @@ resources: - ../webhook # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. #- ../certmanager -# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. +# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. #- ../prometheus patches: @@ -40,11 +40,6 @@ patches: name: controller-manager version: v1 -# Protect the /metrics endpoint by putting it behind auth. -# If you want your controller-manager to expose the /metrics -# endpoint w/o any authn/z, please comment the following line. -#- manager_auth_proxy_patch.yaml - # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. # Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. # 'CERTMANAGER' needs to be enabled to use ca injection diff --git a/config/default/manager_auth_proxy_patch.yaml b/config/default/manager_auth_proxy_patch.yaml deleted file mode 100644 index 43095c5fb2..0000000000 --- a/config/default/manager_auth_proxy_patch.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# This patch inject a sidecar container which is a HTTP proxy for the controller manager, -# it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system -spec: - template: - spec: - containers: - - name: manager - args: - - "--metrics-bind-address=127.0.0.1:8080" - - "--leader-elect" - - name: kube-rbac-proxy - image: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.1 - args: - - "--secure-listen-address=0.0.0.0:8443" - - "--upstream=http://127.0.0.1:8080/" - - "--logtostderr=true" - - "--v=10" - ports: - - containerPort: 8443 - name: https diff --git a/config/rbac/auth_proxy_client_clusterrole.yaml b/config/rbac/auth_proxy_client_clusterrole.yaml deleted file mode 100644 index bd4af137a9..0000000000 --- a/config/rbac/auth_proxy_client_clusterrole.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: metrics-reader -rules: -- nonResourceURLs: ["/metrics"] - verbs: ["get"] diff --git a/config/rbac/auth_proxy_role.yaml b/config/rbac/auth_proxy_role.yaml deleted file mode 100644 index 618f5e4177..0000000000 --- a/config/rbac/auth_proxy_role.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: proxy-role -rules: -- apiGroups: ["authentication.k8s.io"] - resources: - - tokenreviews - verbs: ["create"] -- apiGroups: ["authorization.k8s.io"] - resources: - - subjectaccessreviews - verbs: ["create"] diff --git a/config/rbac/auth_proxy_role_binding.yaml b/config/rbac/auth_proxy_role_binding.yaml deleted file mode 100644 index 46f50c4d66..0000000000 --- a/config/rbac/auth_proxy_role_binding.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: proxy-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: proxy-role -subjects: -- kind: ServiceAccount - name: cnpg-manager - namespace: cnpg-system diff --git a/config/rbac/auth_proxy_service.yaml b/config/rbac/auth_proxy_service.yaml deleted file mode 100644 index 1a0b3a02e0..0000000000 --- a/config/rbac/auth_proxy_service.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - labels: - app.kubernetes.io/name: cloudnative-pg - name: controller-manager-metrics-service - namespace: system -spec: - ports: - - name: https - port: 8443 - targetPort: https - selector: - app.kubernetes.io/name: cloudnative-pg diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml index a561c73dc9..3d9a82e989 100644 --- a/config/rbac/kustomization.yaml +++ b/config/rbac/kustomization.yaml @@ -3,13 +3,7 @@ kind: Kustomization resources: - role.yaml - role_binding.yaml -# Comment the following 4 lines if you want to disable -# the auth proxy (https://github.com/brancz/kube-rbac-proxy) -# which protects your /metrics endpoint. -#- auth_proxy_service.yaml -#- auth_proxy_role.yaml -#- auth_proxy_role_binding.yaml -#- auth_proxy_client_clusterrole.yaml + # For each CRD, "Editor" and "Viewer" roles are scaffolded by # default, aiding admins in cluster management. Those roles are # not used by the Project itself. You can comment the following lines @@ -20,4 +14,3 @@ resources: - publication_viewer_role.yaml - database_editor_role.yaml - database_viewer_role.yaml - From f44d4e829f6dc191f9642fd80ed3723d3a3c9307 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 11 Feb 2025 11:58:33 +0100 Subject: [PATCH 344/836] chore(deps): update dependency vmware-tanzu/velero to v1.15.2 (main) (#6811) --- .github/workflows/continuous-delivery.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index bbe6bf737a..6db7e8e51a 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -1346,7 +1346,7 @@ jobs: name: Setup Velero uses: nick-fields/retry@v3 env: - VELERO_VERSION: "v1.15.1" + VELERO_VERSION: "v1.15.2" VELERO_AWS_PLUGIN_VERSION: "v1.11.1" with: timeout_minutes: 10 From 14c1a0382747c5e22f1b3d77353c9f9a12fd6ae0 Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Tue, 11 Feb 2025 15:59:55 +0100 Subject: [PATCH 345/836] chore: introduce the `no-stale` label to issues (#6817) Use the `no-stale` action to control which issues should be exempted from being considered inactive. Signed-off-by: Gabriele Bartolini --- .github/workflows/close-inactive-issues.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/close-inactive-issues.yml b/.github/workflows/close-inactive-issues.yml index 3ee8af2c20..f24654a23f 100644 --- a/.github/workflows/close-inactive-issues.yml +++ b/.github/workflows/close-inactive-issues.yml @@ -21,3 +21,4 @@ jobs: days-before-pr-stale: -1 days-before-pr-close: -1 ascending: true + exempt-issue-labels: "no-stale" From 1ec07e0e5d38e9c972082a5856079e3125b8eb32 Mon Sep 17 00:00:00 2001 From: Tao Li Date: Tue, 11 Feb 2025 23:28:06 +0800 Subject: [PATCH 346/836] fix(cnpg-plugin): collect logs from all containers, including sidecars, in instance pods (#6636) Fixes an issue in `kubectl cnpg report --logs ` where sidecar container logs were not being collected. The update ensures all containers in instance pods are included in the log collection process. Closes #6632 Signed-off-by: Tao Li Signed-off-by: Armando Ruocco Co-authored-by: Armando Ruocco --- internal/cmd/plugin/report/logs.go | 65 ++++++++++++++++-------------- 1 file changed, 35 insertions(+), 30 deletions(-) diff --git a/internal/cmd/plugin/report/logs.go b/internal/cmd/plugin/report/logs.go index 220c6bf3b9..e6858696d9 100644 --- a/internal/cmd/plugin/report/logs.go +++ b/internal/cmd/plugin/report/logs.go @@ -113,28 +113,31 @@ func streamClusterLogsToZip( Previous: true, } - for _, pod := range podList.Items { - writer, err := zipper.Create(filepath.Join(logsdir, pod.Name) + ".jsonl") - if err != nil { - return fmt.Errorf("could not add '%s' to zip: %w", - filepath.Join(logsdir, pod.Name), err) - } - podPointer := pod - streamPodLogs.Pod = &podPointer + for idx := range podList.Items { + pod := podList.Items[idx] + for _, container := range pod.Spec.Containers { + path := filepath.Join(logsdir, fmt.Sprintf("%s-%s.jsonl", pod.Name, container.Name)) + writer, err := zipper.Create(path) + if err != nil { + return fmt.Errorf("could not add '%s' to zip: %w", path, err) + } + streamPodLogs.Options.Container = container.Name + streamPodLogs.Pod = &pod - if _, err := fmt.Fprint(writer, "\n\"====== Begin of Previous Log =====\"\n"); err != nil { - return err - } - // We ignore the error because it will error if there are no previous logs - _ = streamPodLogs.Stream(ctx, writer) - if _, err := fmt.Fprint(writer, "\n\"====== End of Previous Log =====\"\n"); err != nil { - return err - } + if _, err := fmt.Fprint(writer, "\n\"====== Begin of Previous Log =====\"\n"); err != nil { + return err + } + // We ignore the error because it will error if there are no previous logs + _ = streamPodLogs.Stream(ctx, writer) + if _, err := fmt.Fprint(writer, "\n\"====== End of Previous Log =====\"\n"); err != nil { + return err + } - streamPodLogs.Previous = false + streamPodLogs.Previous = false - if err := streamPodLogs.Stream(ctx, writer); err != nil { - return err + if err := streamPodLogs.Stream(ctx, writer); err != nil { + return err + } } } @@ -180,17 +183,19 @@ func streamClusterJobLogsToZip(ctx context.Context, clusterName, namespace strin Options: podLogOptions, Previous: false, } - for _, pod := range podList.Items { - writer, err := zipper.Create(filepath.Join(logsdir, pod.Name) + ".jsonl") - if err != nil { - return fmt.Errorf("could not add '%s' to zip: %w", - filepath.Join(logsdir, pod.Name), err) - } - podPointer := pod - streamPodLogs.Pod = &podPointer - err = streamPodLogs.Stream(ctx, writer) - if err != nil { - return err + for idx := range podList.Items { + pod := podList.Items[idx] + for _, container := range pod.Spec.Containers { + path := filepath.Join(logsdir, fmt.Sprintf("%s-%s.jsonl", pod.Name, container.Name)) + writer, err := zipper.Create(path) + if err != nil { + return fmt.Errorf("could not add '%s' to zip: %w", path, err) + } + streamPodLogs.Options.Container = container.Name + streamPodLogs.Pod = &pod + if err = streamPodLogs.Stream(ctx, writer); err != nil { + return err + } } } } From dfb09582c37df9ee91aead7ab5ad41b46180ed15 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 11 Feb 2025 19:29:06 +0100 Subject: [PATCH 347/836] chore(deps): update module sigs.k8s.io/controller-tools to v0.17.2 (main) (#6591) --- Makefile | 2 +- config/crd/bases/postgresql.cnpg.io_backups.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_clusters.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_databases.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_poolers.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_publications.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_subscriptions.yaml | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Makefile b/Makefile index 82a2d24a20..9da0c2ac46 100644 --- a/Makefile +++ b/Makefile @@ -43,7 +43,7 @@ LOCALBIN ?= $(shell pwd)/bin BUILD_IMAGE ?= true POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions.go" | cut -f 2 -d \") KUSTOMIZE_VERSION ?= v5.6.0 -CONTROLLER_TOOLS_VERSION ?= v0.16.5 +CONTROLLER_TOOLS_VERSION ?= v0.17.2 GORELEASER_VERSION ?= v2.6.1 SPELLCHECK_VERSION ?= 0.46.0 WOKE_VERSION ?= 0.19.0 diff --git a/config/crd/bases/postgresql.cnpg.io_backups.yaml b/config/crd/bases/postgresql.cnpg.io_backups.yaml index d4d5b3bc97..2d352fbf40 100644 --- a/config/crd/bases/postgresql.cnpg.io_backups.yaml +++ b/config/crd/bases/postgresql.cnpg.io_backups.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.5 + controller-gen.kubebuilder.io/version: v0.17.2 name: backups.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml b/config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml index 06d1592286..83d116861f 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.5 + controller-gen.kubebuilder.io/version: v0.17.2 name: clusterimagecatalogs.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml index 1057f16c99..c7de1196c6 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.5 + controller-gen.kubebuilder.io/version: v0.17.2 name: clusters.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_databases.yaml b/config/crd/bases/postgresql.cnpg.io_databases.yaml index b9db5db349..d8ae251677 100644 --- a/config/crd/bases/postgresql.cnpg.io_databases.yaml +++ b/config/crd/bases/postgresql.cnpg.io_databases.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.5 + controller-gen.kubebuilder.io/version: v0.17.2 name: databases.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml b/config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml index cf90a01fa3..a28ad6132e 100644 --- a/config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml +++ b/config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.5 + controller-gen.kubebuilder.io/version: v0.17.2 name: imagecatalogs.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_poolers.yaml b/config/crd/bases/postgresql.cnpg.io_poolers.yaml index 6039e1e5ea..162ba3b2da 100644 --- a/config/crd/bases/postgresql.cnpg.io_poolers.yaml +++ b/config/crd/bases/postgresql.cnpg.io_poolers.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.5 + controller-gen.kubebuilder.io/version: v0.17.2 name: poolers.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_publications.yaml b/config/crd/bases/postgresql.cnpg.io_publications.yaml index 2e0fdaf0e9..bbeb13ee9e 100644 --- a/config/crd/bases/postgresql.cnpg.io_publications.yaml +++ b/config/crd/bases/postgresql.cnpg.io_publications.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.5 + controller-gen.kubebuilder.io/version: v0.17.2 name: publications.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml b/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml index 534a4e423c..6c2406a879 100644 --- a/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml +++ b/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.5 + controller-gen.kubebuilder.io/version: v0.17.2 name: scheduledbackups.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_subscriptions.yaml b/config/crd/bases/postgresql.cnpg.io_subscriptions.yaml index 24a9ff12a1..e93bf37d10 100644 --- a/config/crd/bases/postgresql.cnpg.io_subscriptions.yaml +++ b/config/crd/bases/postgresql.cnpg.io_subscriptions.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.5 + controller-gen.kubebuilder.io/version: v0.17.2 name: subscriptions.postgresql.cnpg.io spec: group: postgresql.cnpg.io From 3a0ec8613e6226d9f7b311547318e9e6ce0dbaa4 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 11 Feb 2025 20:19:30 +0100 Subject: [PATCH 348/836] chore(deps): update module github.com/goreleaser/goreleaser to v2.7.0 (main) (#6818) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 9da0c2ac46..95f7cea662 100644 --- a/Makefile +++ b/Makefile @@ -44,7 +44,7 @@ BUILD_IMAGE ?= true POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions.go" | cut -f 2 -d \") KUSTOMIZE_VERSION ?= v5.6.0 CONTROLLER_TOOLS_VERSION ?= v0.17.2 -GORELEASER_VERSION ?= v2.6.1 +GORELEASER_VERSION ?= v2.7.0 SPELLCHECK_VERSION ?= 0.46.0 WOKE_VERSION ?= 0.19.0 OPERATOR_SDK_VERSION ?= v1.39.1 From 4286c0f2ab954092d3af30906115cffb91440304 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 12 Feb 2025 06:53:10 +0100 Subject: [PATCH 349/836] chore(deps): update spellcheck to v0.47.0 (main) (#6832) --- .github/workflows/spellcheck.yml | 2 +- Makefile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/spellcheck.yml b/.github/workflows/spellcheck.yml index 27de6b2c8e..27e4d031f8 100644 --- a/.github/workflows/spellcheck.yml +++ b/.github/workflows/spellcheck.yml @@ -28,4 +28,4 @@ jobs: uses: actions/checkout@v4 - name: Spellcheck - uses: rojopolis/spellcheck-github-actions@0.46.0 + uses: rojopolis/spellcheck-github-actions@0.47.0 diff --git a/Makefile b/Makefile index 95f7cea662..12313c2d47 100644 --- a/Makefile +++ b/Makefile @@ -45,7 +45,7 @@ POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions KUSTOMIZE_VERSION ?= v5.6.0 CONTROLLER_TOOLS_VERSION ?= v0.17.2 GORELEASER_VERSION ?= v2.7.0 -SPELLCHECK_VERSION ?= 0.46.0 +SPELLCHECK_VERSION ?= 0.47.0 WOKE_VERSION ?= 0.19.0 OPERATOR_SDK_VERSION ?= v1.39.1 OPM_VERSION ?= v1.50.0 From 957082b0b367195e9ce3706f58e039e89d56cd88 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 12 Feb 2025 09:23:36 +0100 Subject: [PATCH 350/836] chore(deps): update dependency redhat-openshift-ecosystem/openshift-preflight to v1.12.0 (main) (#6831) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 12313c2d47..5a52830ccd 100644 --- a/Makefile +++ b/Makefile @@ -49,7 +49,7 @@ SPELLCHECK_VERSION ?= 0.47.0 WOKE_VERSION ?= 0.19.0 OPERATOR_SDK_VERSION ?= v1.39.1 OPM_VERSION ?= v1.50.0 -PREFLIGHT_VERSION ?= 1.11.1 +PREFLIGHT_VERSION ?= 1.12.0 OPENSHIFT_VERSIONS ?= v4.12-v4.18 ARCH ?= amd64 From d0e8f5f60ef72b0ea7201fd9f28ac487ef016eac Mon Sep 17 00:00:00 2001 From: Daniil Zakhlystov <47750602+usernamedt@users.noreply.github.com> Date: Wed, 12 Feb 2025 18:39:52 +0100 Subject: [PATCH 351/836] fix(pgbouncer): handle `load_balance_hosts` null value (#6810) When using the `SHOW STATS` command to handle metrics for pgBouncer, expect NULL as a result and ensure it is handled properly. This issue was introduced in PR #6630. Relates #6566 Signed-off-by: Daniil Zakhlystov Co-authored-by: Daniil Zakhlystov --- pkg/management/pgbouncer/metricsserver/pools.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/management/pgbouncer/metricsserver/pools.go b/pkg/management/pgbouncer/metricsserver/pools.go index 0c7ee4ae41..d6edd77fd9 100644 --- a/pkg/management/pgbouncer/metricsserver/pools.go +++ b/pkg/management/pgbouncer/metricsserver/pools.go @@ -242,7 +242,7 @@ func (e *Exporter) collectShowPools(ch chan<- prometheus.Metric, db *sql.DB) { ) // PGBouncer 1.24.0 or above var ( - loadBalanceHosts int + loadBalanceHosts sql.NullInt32 ) cols, err := rows.Columns() @@ -336,7 +336,7 @@ func (e *Exporter) collectShowPools(ch chan<- prometheus.Metric, db *sql.DB) { e.Metrics.ShowPools.MaxWait.WithLabelValues(database, user).Set(float64(maxWait)) e.Metrics.ShowPools.MaxWaitUs.WithLabelValues(database, user).Set(float64(maxWaitUs)) e.Metrics.ShowPools.PoolMode.WithLabelValues(database, user).Set(float64(poolModeToInt(poolMode))) - e.Metrics.ShowPools.LoadBalanceHosts.WithLabelValues(database, user).Set(float64(loadBalanceHosts)) + e.Metrics.ShowPools.LoadBalanceHosts.WithLabelValues(database, user).Set(float64(loadBalanceHosts.Int32)) } e.Metrics.ShowPools.ClActive.Collect(ch) From 17706712785a40447ad7300a86816eb2a2c134f1 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 12 Feb 2025 22:07:32 +0100 Subject: [PATCH 352/836] chore(deps): update dependency golangci/golangci-lint to v1.64.3 (main) (#6839) --- .github/workflows/continuous-integration.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index bdd2c33d3c..65f145d0eb 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -17,7 +17,7 @@ on: # set up environment variables to be used across all the jobs env: GOLANG_VERSION: "1.23.x" - GOLANGCI_LINT_VERSION: "v1.63.4" + GOLANGCI_LINT_VERSION: "v1.64.3" KUBEBUILDER_VERSION: "2.3.1" KIND_VERSION: "v0.26.0" OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing" From 6604e846f4ebb830d459038adb9400be4e9a8a7b Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 13 Feb 2025 14:12:26 +0100 Subject: [PATCH 353/836] chore(deps): update dependency golangci/golangci-lint to v1.64.4 (main) (#6851) --- .github/workflows/continuous-integration.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 65f145d0eb..b18a19dd7c 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -17,7 +17,7 @@ on: # set up environment variables to be used across all the jobs env: GOLANG_VERSION: "1.23.x" - GOLANGCI_LINT_VERSION: "v1.64.3" + GOLANGCI_LINT_VERSION: "v1.64.4" KUBEBUILDER_VERSION: "2.3.1" KIND_VERSION: "v0.26.0" OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing" From 902724703b876d196c155f2da1e9c5275ea07ff2 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Mon, 17 Feb 2025 10:44:59 +0100 Subject: [PATCH 354/836] chore: fix generated files controller-tools 1.17.2 (#6862) The commit dfb09582c37df9ee91aead7ab5ad41b46180ed15 upgraded the controller-tools dependency from version 1.16.5 to 1.17.2. While it includes updates to the generated manifests, it omits the corresponding changes to the generated DeepCopy functions. Signed-off-by: Marco Nenciarini --- api/v1/zz_generated.deepcopy.go | 40 ++++++++++++++++----------------- 1 file changed, 19 insertions(+), 21 deletions(-) diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index c8396cfd7f..bde8690abd 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -21,8 +21,6 @@ limitations under the License. package v1 import ( - pkgapi "github.com/cloudnative-pg/barman-cloud/pkg/api" - "github.com/cloudnative-pg/machinery/pkg/api" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -131,7 +129,7 @@ func (in *BackupConfiguration) DeepCopyInto(out *BackupConfiguration) { } if in.BarmanObjectStore != nil { in, out := &in.BarmanObjectStore, &out.BarmanObjectStore - *out = new(pkgapi.BarmanObjectStoreConfiguration) + *out = new(BarmanObjectStoreConfiguration) (*in).DeepCopyInto(*out) } } @@ -238,10 +236,10 @@ func (in *BackupSnapshotStatus) DeepCopy() *BackupSnapshotStatus { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BackupSource) DeepCopyInto(out *BackupSource) { *out = *in - out.LocalObjectReference = in.LocalObjectReference + in.LocalObjectReference.DeepCopyInto(&out.LocalObjectReference) if in.EndpointCA != nil { in, out := &in.EndpointCA, &out.EndpointCA - *out = new(api.SecretKeySelector) + *out = new(SecretKeySelector) **out = **in } } @@ -259,7 +257,7 @@ func (in *BackupSource) DeepCopy() *BackupSource { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BackupSpec) DeepCopyInto(out *BackupSpec) { *out = *in - out.Cluster = in.Cluster + in.Cluster.DeepCopyInto(&out.Cluster) if in.PluginConfiguration != nil { in, out := &in.PluginConfiguration, &out.PluginConfiguration *out = new(BackupPluginConfiguration) @@ -293,7 +291,7 @@ func (in *BackupStatus) DeepCopyInto(out *BackupStatus) { in.BarmanCredentials.DeepCopyInto(&out.BarmanCredentials) if in.EndpointCA != nil { in, out := &in.EndpointCA, &out.EndpointCA - *out = new(api.SecretKeySelector) + *out = new(SecretKeySelector) **out = **in } if in.StartedAt != nil { @@ -379,7 +377,7 @@ func (in *BootstrapInitDB) DeepCopyInto(out *BootstrapInitDB) { *out = *in if in.Secret != nil { in, out := &in.Secret, &out.Secret - *out = new(api.LocalObjectReference) + *out = new(LocalObjectReference) **out = **in } if in.Options != nil { @@ -444,7 +442,7 @@ func (in *BootstrapPgBaseBackup) DeepCopyInto(out *BootstrapPgBaseBackup) { *out = *in if in.Secret != nil { in, out := &in.Secret, &out.Secret - *out = new(api.LocalObjectReference) + *out = new(LocalObjectReference) **out = **in } } @@ -479,7 +477,7 @@ func (in *BootstrapRecovery) DeepCopyInto(out *BootstrapRecovery) { } if in.Secret != nil { in, out := &in.Secret, &out.Secret - *out = new(api.LocalObjectReference) + *out = new(LocalObjectReference) **out = **in } } @@ -715,7 +713,7 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { } if in.SuperuserSecret != nil { in, out := &in.SuperuserSecret, &out.SuperuserSecret - *out = new(api.LocalObjectReference) + *out = new(LocalObjectReference) **out = **in } if in.EnableSuperuserAccess != nil { @@ -730,7 +728,7 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { } if in.ImagePullSecrets != nil { in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]api.LocalObjectReference, len(*in)) + *out = make([]LocalObjectReference, len(*in)) copy(*out, *in) } in.StorageConfiguration.DeepCopyInto(&out.StorageConfiguration) @@ -1238,7 +1236,7 @@ func (in *ExternalCluster) DeepCopyInto(out *ExternalCluster) { } if in.BarmanObjectStore != nil { in, out := &in.BarmanObjectStore, &out.BarmanObjectStore - *out = new(pkgapi.BarmanObjectStoreConfiguration) + *out = new(BarmanObjectStoreConfiguration) (*in).DeepCopyInto(*out) } if in.PluginConfiguration != nil { @@ -1661,12 +1659,12 @@ func (in *MonitoringConfiguration) DeepCopyInto(out *MonitoringConfiguration) { } if in.CustomQueriesConfigMap != nil { in, out := &in.CustomQueriesConfigMap, &out.CustomQueriesConfigMap - *out = make([]api.ConfigMapKeySelector, len(*in)) + *out = make([]ConfigMapKeySelector, len(*in)) copy(*out, *in) } if in.CustomQueriesSecret != nil { in, out := &in.CustomQueriesSecret, &out.CustomQueriesSecret - *out = make([]api.SecretKeySelector, len(*in)) + *out = make([]SecretKeySelector, len(*in)) copy(*out, *in) } if in.TLSConfig != nil { @@ -1801,7 +1799,7 @@ func (in *PgBouncerSpec) DeepCopyInto(out *PgBouncerSpec) { *out = *in if in.AuthQuerySecret != nil { in, out := &in.AuthQuerySecret, &out.AuthQuerySecret - *out = new(api.LocalObjectReference) + *out = new(LocalObjectReference) **out = **in } if in.Parameters != nil { @@ -2068,7 +2066,7 @@ func (in *PoolerSecrets) DeepCopy() *PoolerSecrets { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PoolerSpec) DeepCopyInto(out *PoolerSpec) { *out = *in - out.Cluster = in.Cluster + in.Cluster.DeepCopyInto(&out.Cluster) if in.Instances != nil { in, out := &in.Instances, &out.Instances *out = new(int32) @@ -2494,7 +2492,7 @@ func (in *RoleConfiguration) DeepCopyInto(out *RoleConfiguration) { *out = *in if in.PasswordSecret != nil { in, out := &in.PasswordSecret, &out.PasswordSecret - *out = new(api.LocalObjectReference) + *out = new(LocalObjectReference) **out = **in } if in.ValidUntil != nil { @@ -2528,12 +2526,12 @@ func (in *SQLRefs) DeepCopyInto(out *SQLRefs) { *out = *in if in.SecretRefs != nil { in, out := &in.SecretRefs, &out.SecretRefs - *out = make([]api.SecretKeySelector, len(*in)) + *out = make([]SecretKeySelector, len(*in)) copy(*out, *in) } if in.ConfigMapRefs != nil { in, out := &in.ConfigMapRefs, &out.ConfigMapRefs - *out = make([]api.ConfigMapKeySelector, len(*in)) + *out = make([]ConfigMapKeySelector, len(*in)) copy(*out, *in) } } @@ -2620,7 +2618,7 @@ func (in *ScheduledBackupSpec) DeepCopyInto(out *ScheduledBackupSpec) { *out = new(bool) **out = **in } - out.Cluster = in.Cluster + in.Cluster.DeepCopyInto(&out.Cluster) if in.PluginConfiguration != nil { in, out := &in.PluginConfiguration, &out.PluginConfiguration *out = new(BackupPluginConfiguration) From 5d826575479eca77567f9ae02e39d83b635909bf Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Mon, 17 Feb 2025 11:10:15 +0100 Subject: [PATCH 355/836] chore: stop building UBI8 operator images (#6802) We were building UBI8 and UBI9, but only one is required and needed for any case, this one aims to stop building UBI8 images and from now on, we provide only UBI9 images. Closes #6801 Signed-off-by: Jonathan Gonzalez V. --- .github/workflows/continuous-delivery.yml | 22 ++++---- .github/workflows/continuous-integration.yml | 57 +++----------------- .github/workflows/release-publish.yml | 24 +-------- Dockerfile-ubi8 | 29 ---------- docs/src/index.md | 1 - 5 files changed, 18 insertions(+), 115 deletions(-) delete mode 100644 Dockerfile-ubi8 diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index 6db7e8e51a..3d19469a94 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -258,7 +258,7 @@ jobs: author_name: ${{ steps.build-meta.outputs.author_name }} author_email: ${{ steps.build-meta.outputs.author_email }} controller_img: ${{ env.CONTROLLER_IMG }} - controller_img_ubi8: ${{ env.CONTROLLER_IMG_UBI8 }} + controller_img_ubi9: ${{ env.CONTROLLER_IMG_UBI9 }} index_img: ${{ env.INDEX_IMG }} bundle_img: ${{ env.BUNDLE_IMG }} catalog_img: ${{ env.CATALOG_IMG }} @@ -359,13 +359,13 @@ jobs: tags: | type=raw,value=${{ steps.build-meta.outputs.tag_name }} - - name: Docker meta UBI8 - id: docker-meta-ubi8 + name: Docker meta UBI9 + id: docker-meta-ubi9 uses: docker/metadata-action@v5 with: images: ${{ env.IMAGES }} flavor: | - suffix=-ubi8 + suffix=-ubi9 tags: | type=raw,value=${{ steps.build-meta.outputs.tag_name }} - @@ -399,16 +399,16 @@ jobs: cache-from: ${{ env.BUILD_PUSH_CACHE_FROM }} cache-to: ${{ env.BUILD_PUSH_CACHE_TO }} - - name: Build and push UBI8 + name: Build and push UBI9 uses: docker/build-push-action@v6 with: platforms: ${{ env.PLATFORMS }} context: . - file: Dockerfile-ubi8 + file: Dockerfile-ubi9 push: true build-args: | VERSION=${{ env.VERSION }} - tags: ${{ steps.docker-meta-ubi8.outputs.tags }} + tags: ${{ steps.docker-meta-ubi9.outputs.tags }} labels: ${{ env.LABELS }} provenance: ${{ env.BUILD_PUSH_PROVENANCE }} cache-from: ${{ env.BUILD_PUSH_CACHE_FROM }} @@ -427,13 +427,13 @@ jobs: name: Output images env: TAGS: ${{ steps.docker-meta.outputs.tags }} - TAGS_UBI8: ${{ steps.docker-meta-ubi8.outputs.tags }} + TAGS_UBI9: ${{ steps.docker-meta-ubi9.outputs.tags }} run: | LOWERCASE_OPERATOR_IMAGE_NAME=${OPERATOR_IMAGE_NAME,,} TAG=${TAGS#*:} - TAG_UBI=${TAGS_UBI8#*:} + TAG_UBI=${TAGS_UBI9#*:} echo "CONTROLLER_IMG=${LOWERCASE_OPERATOR_IMAGE_NAME}:${TAG}" >> $GITHUB_ENV - echo "CONTROLLER_IMG_UBI8=${LOWERCASE_OPERATOR_IMAGE_NAME}:${TAG_UBI}" >> $GITHUB_ENV + echo "CONTROLLER_IMG_UBI9=${LOWERCASE_OPERATOR_IMAGE_NAME}:${TAG_UBI}" >> $GITHUB_ENV echo "BUNDLE_IMG=${LOWERCASE_OPERATOR_IMAGE_NAME}:bundle-${TAG}" >> $GITHUB_ENV echo "INDEX_IMG=${LOWERCASE_OPERATOR_IMAGE_NAME}:index-${TAG}" >> $GITHUB_ENV echo "CATALOG_IMG=${LOWERCASE_OPERATOR_IMAGE_NAME}:catalog-${TAG}" >> $GITHUB_ENV @@ -1971,7 +1971,7 @@ jobs: - name: Build and push the operator and catalog env: - CONTROLLER_IMG: ${{ needs.buildx.outputs.controller_img_ubi8 }} + CONTROLLER_IMG: ${{ needs.buildx.outputs.controller_img_ubi9 }} BUNDLE_IMG: ${{ needs.buildx.outputs.bundle_img }} INDEX_IMG: ${{ needs.buildx.outputs.index_img }} CATALOG_IMG: ${{ needs.buildx.outputs.catalog_img }} diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index b18a19dd7c..8a233dfef5 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -120,7 +120,6 @@ jobs: - '.github/workflows/continuous-integration.yml' - '.goreleaser*.yml' - 'Dockerfile' - - 'Dockerfile-ubi8' - 'Dockerfile-ubi9' - 'Makefile' - 'go.mod' @@ -413,7 +412,7 @@ jobs: commit_version: ${{ env.VERSION }} commit: ${{ env.COMMIT_SHA }} controller_img: ${{ env.CONTROLLER_IMG }} - controller_img_ubi8: ${{ env.CONTROLLER_IMG_UBI8 }} + controller_img_ubi9: ${{ env.CONTROLLER_IMG_UBI9 }} bundle_img: ${{ env.BUNDLE_IMG }} catalog_img: ${{ env.CATALOG_IMG }} push: ${{ env.PUSH }} @@ -529,17 +528,6 @@ jobs: type=ref,event=branch type=ref,event=pr - - name: Docker meta UBI8 - id: docker-meta-ubi8 - uses: docker/metadata-action@v5 - with: - images: ${{ env.OPERATOR_IMAGE_NAME }} - flavor: | - suffix=-ubi8 - tags: | - type=ref,event=branch - type=ref,event=pr - - name: Docker meta UBI9 id: docker-meta-ubi9 uses: docker/metadata-action@v5 @@ -586,28 +574,6 @@ jobs: failure-threshold: WARN accept-keywords: key - - name: Build for scan UBI8 image - uses: docker/build-push-action@v6 - with: - platforms: "linux/amd64" - context: . - file: Dockerfile-ubi8 - push: false - load: true - build-args: | - VERSION=${{ env.VERSION }} - tags: ${{ steps.docker-meta-ubi8.outputs.tags }} - - - name: Dockle scan UBI8 image - uses: erzz/dockle-action@v1 - env: - DOCKLE_IGNORES: CIS-DI-0009 - with: - image: ${{ steps.docker-meta-ubi8.outputs.tags }} - exit-code: '1' - failure-threshold: WARN - accept-keywords: key - - name: Build for scan UBI9 image uses: docker/build-push-action@v6 with: @@ -665,17 +631,6 @@ jobs: cache-from: ${{ env.BUILD_PUSH_CACHE_FROM }} cache-to: ${{ env.BUILD_PUSH_CACHE_TO }} - - name: Build and push UBI8 - uses: docker/build-push-action@v6 - with: - platforms: ${{ env.PLATFORMS }} - context: . - file: Dockerfile-ubi8 - push: ${{ env.PUSH }} - build-args: | - VERSION=${{ env.VERSION }} - tags: ${{ steps.docker-meta-ubi8.outputs.tags }} - - name: Build and push UBI9 uses: docker/build-push-action@v6 with: @@ -690,13 +645,13 @@ jobs: - name: Output images env: TAGS: ${{ steps.docker-meta.outputs.tags }} - TAGS_UBI8: ${{ steps.docker-meta-ubi8.outputs.tags }} + TAGS_UBI9: ${{ steps.docker-meta-ubi9.outputs.tags }} run: | LOWERCASE_OPERATOR_IMAGE_NAME=${OPERATOR_IMAGE_NAME,,} TAG=${TAGS#*:} - TAG_UBI=${TAGS_UBI8#*:} + TAG_UBI=${TAGS_UBI9#*:} echo "CONTROLLER_IMG=${LOWERCASE_OPERATOR_IMAGE_NAME}:${TAG}" >> $GITHUB_ENV - echo "CONTROLLER_IMG_UBI8=${LOWERCASE_OPERATOR_IMAGE_NAME}:${TAG_UBI}" >> $GITHUB_ENV + echo "CONTROLLER_IMG_UBI9=${LOWERCASE_OPERATOR_IMAGE_NAME}:${TAG_UBI}" >> $GITHUB_ENV echo "BUNDLE_IMG=${LOWERCASE_OPERATOR_IMAGE_NAME}:bundle-${TAG}" >> $GITHUB_ENV echo "CATALOG_IMG=${LOWERCASE_OPERATOR_IMAGE_NAME}:catalog-${TAG}" >> $GITHUB_ENV @@ -742,7 +697,7 @@ jobs: - name: Create bundle env: - CONTROLLER_IMG: ${{ needs.buildx.outputs.controller_img_ubi8 }} + CONTROLLER_IMG: ${{ needs.buildx.outputs.controller_img_ubi9 }} BUNDLE_IMG: ${{ needs.buildx.outputs.bundle_img }} CATALOG_IMG: ${{ needs.buildx.outputs.catalog_img }} run: | @@ -790,7 +745,7 @@ jobs: - name: Run preflight container test env: - CONTROLLER_IMG: ${{ needs.buildx.outputs.controller_img_ubi8 }} + CONTROLLER_IMG: ${{ needs.buildx.outputs.controller_img_ubi9 }} PFLT_ARTIFACTS: "preflight_results" run: | bin/preflight check container ${CONTROLLER_IMG} \ diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml index d1b429c154..bb7764fa16 100644 --- a/.github/workflows/release-publish.yml +++ b/.github/workflows/release-publish.yml @@ -180,17 +180,6 @@ jobs: latest=${{ env.IS_LATEST }} tags: | type=semver,pattern={{version}} - - - name: Docker meta UBI8 - id: docker-meta-ubi8 - uses: docker/metadata-action@v5 - with: - images: ${{ env.IMAGES }} - flavor: | - latest=false - suffix=-ubi8 - tags: | - type=semver,pattern={{version}} - name: Docker meta UBI9 id: docker-meta-ubi9 @@ -235,17 +224,6 @@ jobs: build-args: | VERSION=${{ steps.build-meta.outputs.version }} tags: ${{ steps.docker-meta.outputs.tags }} - - - name: Build and push UBI8 - uses: docker/build-push-action@v6 - with: - platforms: ${{ env.PLATFORMS }} - context: . - file: Dockerfile-ubi8 - push: true - build-args: | - VERSION=${{ steps.build-meta.outputs.version }} - tags: ${{ steps.docker-meta-ubi8.outputs.tags }} - name: Build and push UBI9 uses: docker/build-push-action@v6 @@ -302,7 +280,7 @@ jobs: version="${tag#v}" LOWERCASE_CNPG_IMAGE_NAME=${CNPG_IMAGE_NAME,,} echo "IMAGE_NAME=${LOWERCASE_CNPG_IMAGE_NAME}" >> $GITHUB_ENV - echo "CONTROLLER_IMG=${LOWERCASE_CNPG_IMAGE_NAME}:${version}-ubi8" >> $GITHUB_ENV + echo "CONTROLLER_IMG=${LOWERCASE_CNPG_IMAGE_NAME}:${version}-ubi9" >> $GITHUB_ENV echo "BUNDLE_IMG=${LOWERCASE_CNPG_IMAGE_NAME}:bundle-${version}" >> $GITHUB_ENV echo "CATALOG_IMG=${LOWERCASE_CNPG_IMAGE_NAME}:catalog-${version}" >> $GITHUB_ENV diff --git a/Dockerfile-ubi8 b/Dockerfile-ubi8 deleted file mode 100644 index 1aea9e40ac..0000000000 --- a/Dockerfile-ubi8 +++ /dev/null @@ -1,29 +0,0 @@ -FROM registry.access.redhat.com/ubi8/ubi-micro -ARG VERSION="dev" -ARG TARGETARCH - -ENV SUMMARY="CloudNativePG Operator Container Image." \ - DESCRIPTION="This Docker image contains CloudNativePG Operator." \ - MAINTAINER="CloudNativePG Contributors." - -LABEL summary="$SUMMARY" \ - description="$DESCRIPTION" \ - io.k8s.display-name="$SUMMARY" \ - io.k8s.description="$DESCRIPTION" \ - name="CloudNativePG Operator" \ - vendor="$MAINTAINER" \ - maintainer="$MAINTAINER" \ - url="https://cloudnative-pg.io/" \ - version="$VERSION" \ - release="1" - -COPY licenses /licenses -COPY LICENSE /licenses - -WORKDIR / - -COPY dist/manager/* /bin/ -RUN ln -sf /bin/manager_${TARGETARCH} manager -USER 65532:65532 - -ENTRYPOINT ["/manager"] diff --git a/docs/src/index.md b/docs/src/index.md index 06dec9712e..02afa896d7 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -55,7 +55,6 @@ in three different flavors: - Debian 12 distroless - Red Hat UBI 9 micro (suffix `-ubi9`) -- Red Hat UBI 8 micro (suffix `-ubi8`) Red Hat UBI images are primarily intended for OLM consumption. From 167b2e03abb6f40060c1610d85e4b626bcbcb6eb Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Mon, 17 Feb 2025 11:46:09 +0100 Subject: [PATCH 356/836] fix(bootstrap): ensure `override.conf` is included in replica setup (#6808) This patch ensures that `override.conf` is always included when bootstrapping a replica cluster. Previously, if the `postgresql.conf` from the primary lacked an explicit `include 'override.conf'` directive, the replica would fail to start due to missing critical configuration, such as `primary_conninfo`. This change modifies the bootstrap process to guarantee that `override.conf` is always included, ensuring proper replication setup. Closes #5747 #1338 Signed-off-by: Armando Ruocco --- pkg/management/postgres/configuration.go | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/pkg/management/postgres/configuration.go b/pkg/management/postgres/configuration.go index 3c0b29e399..76748351fe 100644 --- a/pkg/management/postgres/configuration.go +++ b/pkg/management/postgres/configuration.go @@ -320,21 +320,29 @@ var cleanupAutoConfOptions = []string{ // migratePostgresAutoConfFile migrates options managed by the operator from `postgresql.auto.conf` file, // to `override.conf` file for an upgrade case. // Returns a boolean indicating if any changes were done and any errors encountered -func (instance *Instance) migratePostgresAutoConfFile(ctx context.Context) (bool, error) { +func (instance *Instance) migratePostgresAutoConfFile(ctx context.Context) (changed bool, err error) { contextLogger := log.FromContext(ctx).WithName("migratePostgresAutoConfFile") + // this is an idempotent operation. Ensures that we always include the override import. + // See: #5747 + if changed, err = configfile.EnsureIncludes(path.Join(instance.PgData, "postgresql.conf"), + constants.PostgresqlOverrideConfigurationFile); err != nil { + return false, fmt.Errorf("migrating replication settings: %w", + err) + } + overrideConfPath := filepath.Join(instance.PgData, constants.PostgresqlOverrideConfigurationFile) autoConfFile := filepath.Join(instance.PgData, "postgresql.auto.conf") autoConfContent, readLinesErr := fileutils.ReadFileLines(autoConfFile) if readLinesErr != nil { - return false, fmt.Errorf("error while reading postgresql.auto.conf file: %w", readLinesErr) + return changed, fmt.Errorf("error while reading postgresql.auto.conf file: %w", readLinesErr) } overrideConfExists, _ := fileutils.FileExists(overrideConfPath) options := configfile.ReadLinesFromConfigurationContents(autoConfContent, migrateAutoConfOptions...) if len(options) == 0 && overrideConfExists { contextLogger.Trace("no action taken, options slice is empty") - return false, nil + return changed, nil } contextLogger.Info("Start to migrate replication settings", @@ -348,15 +356,7 @@ func (instance *Instance) migratePostgresAutoConfFile(ctx context.Context) (bool // later during the configuration update. We create it here just as a precaution. if !overrideConfExists { if _, err := fileutils.WriteLinesToFile(overrideConfPath, options); err != nil { - return false, fmt.Errorf("migrating replication settings: %w", - err) - } - - if _, err := configfile.EnsureIncludes( - path.Join(instance.PgData, "postgresql.conf"), - constants.PostgresqlOverrideConfigurationFile, - ); err != nil { - return false, fmt.Errorf("migrating replication settings: %w", + return changed, fmt.Errorf("migrating replication settings: %w", err) } } From 9c7febca2d0d81993d6bccd6dec6a2b956a1217f Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Wed, 19 Feb 2025 09:23:31 +0100 Subject: [PATCH 357/836] chore(deps): pin genref version (#6871) A recent commit in https://github.com/kubernetes-sigs/reference-docs/genref introduced changes to the generated documentation, causing the "Verify API doc is up to date" checks to fail across all PRs. This commit pins the `genref` dependency to the previous known working commit. This will allow us to later integrate it with Renovate for proper version management and controlled upgrades. Signed-off-by: Marco Nenciarini Signed-off-by: Jonathan Gonzalez V. Co-authored-by: Jonathan Gonzalez V. --- .github/renovate.json5 | 11 +++++++++++ Makefile | 3 ++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/.github/renovate.json5 b/.github/renovate.json5 index af007aa499..4906f5adaf 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -50,6 +50,17 @@ datasourceTemplate: 'go', depNameTemplate: 'sigs.k8s.io/controller-tools', }, +{ + customType: 'regex', + fileMatch: [ + '^Makefile$', + ], + matchStrings: [ + 'GENREF_VERSION \\?= (?.*?)\\n', + ], + datasourceTemplate: 'go', + depNameTemplate: 'github.com/kubernetes-sigs/reference-docs/genref', + }, { customType: 'regex', fileMatch: [ diff --git a/Makefile b/Makefile index 5a52830ccd..ed76622910 100644 --- a/Makefile +++ b/Makefile @@ -44,6 +44,7 @@ BUILD_IMAGE ?= true POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions.go" | cut -f 2 -d \") KUSTOMIZE_VERSION ?= v5.6.0 CONTROLLER_TOOLS_VERSION ?= v0.17.2 +GENREF_VERSION ?= 015aaac611407c4fe591bc8700d2c67b7521efca GORELEASER_VERSION ?= v2.7.0 SPELLCHECK_VERSION ?= 0.47.0 WOKE_VERSION ?= 0.19.0 @@ -324,7 +325,7 @@ $(ENVTEST): $(LOCALBIN) GENREF = $(LOCALBIN)/genref genref: ## Download kubernetes-sigs/reference-docs/genref locally if necessary. - $(call go-install-tool,$(GENREF),github.com/kubernetes-sigs/reference-docs/genref@master) # wokeignore:rule=master + $(call go-install-tool,$(GENREF),github.com/kubernetes-sigs/reference-docs/genref@$(GENREF_VERSION)) GO_LICENSES = $(LOCALBIN)/go-licenses go-licenses: ## Download go-licenses locally if necessary. From 7401668527896556e1042e4e6066e3970f02a7bd Mon Sep 17 00:00:00 2001 From: Pierrick <139142330+pchovelon@users.noreply.github.com> Date: Wed, 19 Feb 2025 09:36:59 +0100 Subject: [PATCH 358/836] fix: Use targetPod.Name in WithValues() instead of targetPod (#6547) Use the proper name of the `targetPod` to avoid spamming the logs with the pod definition. To make it clear now the field is `targetPodName` instead of `targetPod`. Closes #6400 Signed-off-by: Pierrick --- internal/controller/backup_controller.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/controller/backup_controller.go b/internal/controller/backup_controller.go index 5b5906bccd..5f267d02b2 100644 --- a/internal/controller/backup_controller.go +++ b/internal/controller/backup_controller.go @@ -381,7 +381,7 @@ func (r *BackupReconciler) reconcileSnapshotBackup( return &ctrl.Result{}, nil } - ctx = log.IntoContext(ctx, contextLogger.WithValues("targetPod", targetPod)) + ctx = log.IntoContext(ctx, contextLogger.WithValues("targetPodName", targetPod.Name)) // Validate we don't have other running backups var clusterBackups apiv1.BackupList From 932aafc834b2b656bd1bbfd7e01f11ca2f521c6a Mon Sep 17 00:00:00 2001 From: hoka <187822495+hokaxbt@users.noreply.github.com> Date: Wed, 19 Feb 2025 16:06:31 +0700 Subject: [PATCH 359/836] docs: fix broken URL of Image Catalog (#6784) The ImageCatalog now has a suffix that we prefer which is `-bookworm` that was missing on the documentation pointing the catalogs. Signed-off-by: hoka <187822495+hokaxbt@users.noreply.github.com> Signed-off-by: Jonathan Gonzalez V. Co-authored-by: Jonathan Gonzalez V. --- docs/src/image_catalog.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/src/image_catalog.md b/docs/src/image_catalog.md index 6078124fa6..09e148209d 100644 --- a/docs/src/image_catalog.md +++ b/docs/src/image_catalog.md @@ -85,13 +85,13 @@ specified major release. ### PostgreSQL Container Images You can install the -[latest version of the cluster catalog for the PostgreSQL Container Images](https://raw.githubusercontent.com/cloudnative-pg/postgres-containers/main/Debian/ClusterImageCatalog.yaml) +[latest version of the cluster catalog for the PostgreSQL Container Images](https://raw.githubusercontent.com/cloudnative-pg/postgres-containers/main/Debian/ClusterImageCatalog-bookworm.yaml) ([cloudnative-pg/postgres-containers](https://github.com/cloudnative-pg/postgres-containers) repository) with: ```shell kubectl apply \ - -f https://raw.githubusercontent.com/cloudnative-pg/postgres-containers/main/Debian/ClusterImageCatalog.yaml + -f https://raw.githubusercontent.com/cloudnative-pg/postgres-containers/main/Debian/ClusterImageCatalog-bookworm.yaml ``` ### PostGIS Container Images From 012d1fc3789ce7cdc07385a08aa544d57109e6a0 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Wed, 19 Feb 2025 10:32:13 +0100 Subject: [PATCH 360/836] fix: ensure operator version is reconciled (#6496) In the current code, the `operatorVersion` annotation is set manually when creating resources. This has led to several consistency issues, as it is not always invoked. This patch proposes a new approach that integrates the `operatorVersion` reconciliation within the `GetFixedInheritedAnnotations` function. This method offers several advantages: - The function is already called throughout the code, making it easy to implement the change consistently. - It simplifies code maintenance since the `operatorVersion` no longer requires a separate handling approach from the developer's perspective. Closes #6457 Signed-off-by: Armando Ruocco Signed-off-by: Marco Nenciarini Co-authored-by: Marco Nenciarini --- api/v1/cluster_funcs.go | 13 +++++--- internal/controller/cluster_create.go | 3 -- pkg/reconciler/instance/metadata_test.go | 23 ++++++++++---- .../persistentvolumeclaim/reconciler_test.go | 30 +++++++++++-------- 4 files changed, 44 insertions(+), 25 deletions(-) diff --git a/api/v1/cluster_funcs.go b/api/v1/cluster_funcs.go index 432e46ee89..f8315bd7f0 100644 --- a/api/v1/cluster_funcs.go +++ b/api/v1/cluster_funcs.go @@ -601,12 +601,18 @@ func (cluster *Cluster) GetClientCASecretName() string { } // GetFixedInheritedAnnotations gets the annotations that should be -// inherited by all resources according the cluster spec +// inherited by all resources according to the cluster spec and the operator version func (cluster *Cluster) GetFixedInheritedAnnotations() map[string]string { + var meta metav1.ObjectMeta + utils.SetOperatorVersion(&meta, versions.Version) + if cluster.Spec.InheritedMetadata == nil || cluster.Spec.InheritedMetadata.Annotations == nil { - return nil + return meta.Annotations } - return cluster.Spec.InheritedMetadata.Annotations + + utils.MergeMap(meta.Annotations, cluster.Spec.InheritedMetadata.Annotations) + + return meta.Annotations } // GetFixedInheritedLabels gets the labels that should be @@ -1210,7 +1216,6 @@ func (cluster *Cluster) SetInheritedData(obj *metav1.ObjectMeta) { utils.InheritAnnotations(obj, cluster.Annotations, cluster.GetFixedInheritedAnnotations(), configuration.Current) utils.InheritLabels(obj, cluster.Labels, cluster.GetFixedInheritedLabels(), configuration.Current) utils.LabelClusterName(obj, cluster.GetName()) - utils.SetOperatorVersion(obj, versions.Version) } // ShouldForceLegacyBackup if present takes a backup without passing the name argument even on barman version 3.3.0+. diff --git a/internal/controller/cluster_create.go b/internal/controller/cluster_create.go index 2a88deec31..0eda7e8002 100644 --- a/internal/controller/cluster_create.go +++ b/internal/controller/cluster_create.go @@ -1169,7 +1169,6 @@ func (r *ClusterReconciler) createPrimaryInstance( "jobName", job.Name, "primary", true) - utils.SetOperatorVersion(&job.ObjectMeta, versions.Version) utils.InheritAnnotations(&job.ObjectMeta, cluster.Annotations, cluster.GetFixedInheritedAnnotations(), configuration.Current) utils.InheritAnnotations(&job.Spec.Template.ObjectMeta, cluster.Annotations, @@ -1266,7 +1265,6 @@ func (r *ClusterReconciler) joinReplicaInstance( return ctrl.Result{}, err } - utils.SetOperatorVersion(&job.ObjectMeta, versions.Version) utils.InheritAnnotations(&job.ObjectMeta, cluster.Annotations, cluster.GetFixedInheritedAnnotations(), configuration.Current) utils.InheritAnnotations(&job.Spec.Template.ObjectMeta, cluster.Annotations, @@ -1375,7 +1373,6 @@ func (r *ClusterReconciler) ensureInstancesAreCreated( return ctrl.Result{}, fmt.Errorf("unable to set the owner reference for the Pod: %w", err) } - utils.SetOperatorVersion(&instanceToCreate.ObjectMeta, versions.Version) utils.InheritAnnotations(&instanceToCreate.ObjectMeta, cluster.Annotations, cluster.GetFixedInheritedAnnotations(), configuration.Current) utils.InheritLabels(&instanceToCreate.ObjectMeta, cluster.Labels, diff --git a/pkg/reconciler/instance/metadata_test.go b/pkg/reconciler/instance/metadata_test.go index 8b99f3753e..5bbcd571d1 100644 --- a/pkg/reconciler/instance/metadata_test.go +++ b/pkg/reconciler/instance/metadata_test.go @@ -27,6 +27,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/internal/scheme" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -395,8 +396,11 @@ var _ = Describe("object metadata test", func() { It("Should not change annotations if they already match the cluster's", func() { pod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Name: "pod1", - Annotations: map[string]string{key: value}, + Name: "pod1", + Annotations: map[string]string{ + key: value, + utils.OperatorVersionAnnotationName: versions.Version, + }, }, } @@ -409,12 +413,16 @@ var _ = Describe("object metadata test", func() { }, } - Expect(cluster.Spec.InheritedMetadata.Annotations).To(Equal(cluster.GetFixedInheritedAnnotations())) + expectedAnnotations := cluster.GetFixedInheritedAnnotations() + expectedAnnotations[utils.OperatorVersionAnnotationName] = versions.Version + + Expect(expectedAnnotations).To(Equal(cluster.GetFixedInheritedAnnotations())) updated := updateClusterAnnotations(context.Background(), cluster, pod) Expect(updated).To(BeFalse()) - Expect(pod.Annotations).To(HaveLen(1)) + Expect(pod.Annotations).To(HaveLen(2)) Expect(pod.Annotations[key]).To(Equal(value)) + Expect(pod.Annotations[utils.OperatorVersionAnnotationName]).To(Equal(versions.Version)) }) It("Should correctly add AppArmor annotations if present in the cluster's annotations", func() { @@ -445,13 +453,16 @@ var _ = Describe("object metadata test", func() { pod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod1", + Annotations: map[string]string{ + utils.OperatorVersionAnnotationName: versions.Version, + }, }, } cluster := &apiv1.Cluster{} updated := updateClusterAnnotations(context.Background(), cluster, pod) Expect(updated).To(BeFalse()) - Expect(pod.Annotations).To(BeEmpty()) + Expect(pod.Annotations).To(HaveLen(1)) }) }) }) @@ -564,7 +575,7 @@ var _ = Describe("metadata update functions", func() { It("Should updateClusterAnnotations correctly", func() { modified := updateClusterAnnotations(ctx, cluster, instance) Expect(modified).To(BeTrue()) - Expect(instance.Annotations).To(Equal(cluster.Spec.InheritedMetadata.Annotations)) + Expect(instance.Annotations).To(Equal(cluster.GetFixedInheritedAnnotations())) }) }) }) diff --git a/pkg/reconciler/persistentvolumeclaim/reconciler_test.go b/pkg/reconciler/persistentvolumeclaim/reconciler_test.go index 05937ce383..78e5a44e42 100644 --- a/pkg/reconciler/persistentvolumeclaim/reconciler_test.go +++ b/pkg/reconciler/persistentvolumeclaim/reconciler_test.go @@ -33,6 +33,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/internal/scheme" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -255,10 +256,11 @@ var _ = Describe("PVC reconciliation", Ordered, func() { ) Expect(err).ToNot(HaveOccurred()) Expect(pvcs.Items[2].Annotations).To(BeEquivalentTo(map[string]string{ - utils.PVCStatusAnnotationName: "ready", - utils.ClusterSerialAnnotationName: "3", - "annotation1": "value", - "annotation2": "value", + utils.PVCStatusAnnotationName: "ready", + utils.ClusterSerialAnnotationName: "3", + "annotation1": "value", + "annotation2": "value", + utils.OperatorVersionAnnotationName: versions.Version, })) }) @@ -387,8 +389,9 @@ var _ = Describe("PVC reconciliation", Ordered, func() { utils.ClusterInstanceRoleLabelName: "primary", })) Expect(patchedPvc.Annotations).To(Equal(map[string]string{ - utils.ClusterSerialAnnotationName: "1", - utils.PVCStatusAnnotationName: "ready", + utils.ClusterSerialAnnotationName: "1", + utils.PVCStatusAnnotationName: "ready", + utils.OperatorVersionAnnotationName: versions.Version, })) patchedPvc2 := fetchPVC(cl, pvc2) @@ -399,8 +402,9 @@ var _ = Describe("PVC reconciliation", Ordered, func() { utils.ClusterInstanceRoleLabelName: "replica", })) Expect(patchedPvc2.Annotations).To(Equal(map[string]string{ - utils.ClusterSerialAnnotationName: "2", - utils.PVCStatusAnnotationName: "ready", + utils.OperatorVersionAnnotationName: versions.Version, + utils.ClusterSerialAnnotationName: "2", + utils.PVCStatusAnnotationName: "ready", })) patchedPvc3Wal := fetchPVC(cl, pvc3Wal) @@ -411,8 +415,9 @@ var _ = Describe("PVC reconciliation", Ordered, func() { utils.ClusterInstanceRoleLabelName: "replica", })) Expect(patchedPvc3Wal.Annotations).To(Equal(map[string]string{ - utils.ClusterSerialAnnotationName: "3", - utils.PVCStatusAnnotationName: "ready", + utils.OperatorVersionAnnotationName: versions.Version, + utils.ClusterSerialAnnotationName: "3", + utils.PVCStatusAnnotationName: "ready", })) patchedPvc3Data := fetchPVC(cl, pvc3Data) @@ -423,8 +428,9 @@ var _ = Describe("PVC reconciliation", Ordered, func() { utils.ClusterInstanceRoleLabelName: "replica", })) Expect(patchedPvc3Data.Annotations).To(Equal(map[string]string{ - utils.ClusterSerialAnnotationName: "3", - utils.PVCStatusAnnotationName: "ready", + utils.OperatorVersionAnnotationName: versions.Version, + utils.ClusterSerialAnnotationName: "3", + utils.PVCStatusAnnotationName: "ready", })) }) }) From c49afe40f54f6d424d3a8fba55ef185ca1d582d8 Mon Sep 17 00:00:00 2001 From: Gabriele Fedi <91485518+GabriFedi97@users.noreply.github.com> Date: Wed, 19 Feb 2025 12:34:39 +0100 Subject: [PATCH 361/836] fix(metrics): ensure WAL metrics load after instance restart (#6816) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch resolves an issue where WAL metrics were not available after a PostgreSQL instance restart until a configuration change occurred. The fix ensures that WAL metrics are correctly loaded upon startup. Closes #6815 Signed-off-by: Gabriele Fedi Signed-off-by: Niccolò Fei Co-authored-by: Niccolò Fei --- pkg/management/postgres/configuration.go | 5 +---- pkg/management/postgres/webserver/metricserver/wal.go | 2 +- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/pkg/management/postgres/configuration.go b/pkg/management/postgres/configuration.go index 76748351fe..3a9cfa557f 100644 --- a/pkg/management/postgres/configuration.go +++ b/pkg/management/postgres/configuration.go @@ -83,10 +83,7 @@ func (instance *Instance) RefreshConfigurationFilesFromCluster( "installing postgresql configuration: %w", err) } - - if sha256 != "" && postgresConfigurationChanged { - instance.ConfigSha256 = sha256 - } + instance.ConfigSha256 = sha256 return postgresConfigurationChanged, nil } diff --git a/pkg/management/postgres/webserver/metricserver/wal.go b/pkg/management/postgres/webserver/metricserver/wal.go index 47a502a6d8..543bce5f61 100644 --- a/pkg/management/postgres/webserver/metricserver/wal.go +++ b/pkg/management/postgres/webserver/metricserver/wal.go @@ -73,7 +73,7 @@ type walSettings struct { } func (s *walSettings) synchronize(db *sql.DB, configSha256 string) error { - if s.configSha256 == configSha256 { + if s.configSha256 != "" && s.configSha256 == configSha256 { return nil } From 06cc1e7c46b68c4ae00080e943acac2c2b104988 Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Wed, 19 Feb 2025 13:28:52 +0100 Subject: [PATCH 362/836] chore: add `pg_catalog` schema where needed (#6622) Add the `pg_catalog` schema to fully qualify the usage of system functions and views provided by PostgreSQL. Signed-off-by: Gabriele Bartolini Signed-off-by: Armando Ruocco Signed-off-by: Gabriele Quaresima Signed-off-by: Marco Nenciarini Signed-off-by: Pierrick Co-authored-by: Armando Ruocco Co-authored-by: Gabriele Quaresima Co-authored-by: Marco Nenciarini Co-authored-by: Pierrick --- .../cmd/manager/instance/run/lifecycle/run.go | 14 +++--- .../subscription/syncsequences/update.go | 2 +- .../controller/database_controller_sql.go | 2 +- .../database_controller_sql_test.go | 4 +- .../controller/database_controller_test.go | 2 +- .../controller/instance_controller.go | 8 ++-- .../controller/publication_controller_sql.go | 2 +- .../controller/publication_controller_test.go | 2 +- .../management/controller/roles/postgres.go | 8 ++-- .../management/controller/roles/suite_test.go | 8 ++-- .../slots/infrastructure/postgresmanager.go | 8 ++-- .../infrastructure/postgresmanager_test.go | 8 ++-- .../slots/reconciler/replicationslot_test.go | 18 ++++---- .../controller/slots/runner/runner_test.go | 12 ++--- .../controller/subscription_controller_sql.go | 2 +- .../subscription_controller_test.go | 2 +- .../controller/tablespaces/controller_test.go | 10 ++--- .../tablespaces/infrastructure/postgres.go | 4 +- .../infrastructure/postgres_test.go | 4 +- pkg/management/postgres/initdb.go | 4 +- pkg/management/postgres/instance.go | 6 +-- .../postgres/logicalimport/database.go | 4 +- .../postgres/logicalimport/database_test.go | 4 +- pkg/management/postgres/logicalimport/role.go | 8 ++-- .../postgres/logicalimport/role_test.go | 6 +-- .../postgres/logicalimport/roleinheritance.go | 8 ++-- .../logicalimport/roleinheritance_test.go | 6 +-- pkg/management/postgres/probes.go | 44 +++++++++---------- .../postgres/readiness/readiness.go | 6 +-- pkg/management/postgres/restore.go | 2 +- pkg/management/postgres/utils/utils.go | 2 +- pkg/management/postgres/wal.go | 4 +- pkg/management/postgres/wal_test.go | 4 +- .../postgres/webserver/backup_connection.go | 11 ++--- .../webserver/metricserver/pg_collector.go | 4 +- .../postgres/webserver/metricserver/wal.go | 2 +- .../webserver/metricserver/wal_test.go | 2 +- tests/e2e/asserts_test.go | 20 ++++----- tests/e2e/cluster_microservice_test.go | 2 +- tests/e2e/cluster_monolithic_test.go | 6 ++- tests/e2e/configuration_update_test.go | 4 +- .../declarative_database_management_test.go | 2 +- tests/e2e/disk_space_test.go | 4 +- tests/e2e/failover_test.go | 8 ++-- tests/e2e/fencing_test.go | 2 +- tests/e2e/initdb_test.go | 4 +- tests/e2e/managed_roles_test.go | 16 +++---- tests/e2e/publication_subscription_test.go | 4 +- tests/e2e/replica_mode_cluster_test.go | 8 ++-- tests/e2e/replication_slot_test.go | 2 +- tests/e2e/syncreplicas_test.go | 4 +- tests/e2e/tablespaces_test.go | 9 ++-- tests/e2e/update_user_test.go | 2 +- tests/e2e/upgrade_test.go | 2 +- tests/utils/postgres/postgres.go | 2 +- .../replicationslot/replication_slots.go | 6 +-- 56 files changed, 182 insertions(+), 172 deletions(-) diff --git a/internal/cmd/manager/instance/run/lifecycle/run.go b/internal/cmd/manager/instance/run/lifecycle/run.go index d5d8036d5d..58127a192b 100644 --- a/internal/cmd/manager/instance/run/lifecycle/run.go +++ b/internal/cmd/manager/instance/run/lifecycle/run.go @@ -193,7 +193,7 @@ func configureInstancePermissions(ctx context.Context, instance *postgres.Instan // and has the required rights func configureStreamingReplicaUser(tx *sql.Tx) (bool, error) { var hasLoginRight, hasReplicationRight, hasSuperuser bool - row := tx.QueryRow("SELECT rolcanlogin, rolreplication, rolsuper FROM pg_roles WHERE rolname = $1", + row := tx.QueryRow("SELECT rolcanlogin, rolreplication, rolsuper FROM pg_catalog.pg_roles WHERE rolname = $1", apiv1.StreamingReplicationUser) err := row.Scan(&hasLoginRight, &hasReplicationRight, &hasSuperuser) if err != nil { @@ -247,10 +247,14 @@ func configurePgRewindPrivileges(pgVersion semver.Version, hasSuperuser bool, tx var hasPgRewindPrivileges bool row := tx.QueryRow( ` - SELECT has_function_privilege($1, 'pg_ls_dir(text, boolean, boolean)', 'execute') AND - has_function_privilege($2, 'pg_stat_file(text, boolean)', 'execute') AND - has_function_privilege($3, 'pg_read_binary_file(text)', 'execute') AND - has_function_privilege($4, 'pg_read_binary_file(text, bigint, bigint, boolean)', 'execute')`, + SELECT pg_catalog.has_function_privilege($1, + 'pg_catalog.pg_ls_dir(text, boolean, boolean)', 'execute') AND + pg_catalog.has_function_privilege($2, + 'pg_catalog.pg_stat_file(text, boolean)', 'execute') AND + pg_catalog.has_function_privilege($3, + 'pg_catalog.pg_read_binary_file(text)', 'execute') AND + pg_catalog.has_function_privilege($4, + 'pg_catalog.pg_read_binary_file(text, bigint, bigint, boolean)', 'execute')`, apiv1.StreamingReplicationUser, apiv1.StreamingReplicationUser, apiv1.StreamingReplicationUser, diff --git a/internal/cmd/plugin/logical/subscription/syncsequences/update.go b/internal/cmd/plugin/logical/subscription/syncsequences/update.go index da6a7315d0..82d4170770 100644 --- a/internal/cmd/plugin/logical/subscription/syncsequences/update.go +++ b/internal/cmd/plugin/logical/subscription/syncsequences/update.go @@ -44,7 +44,7 @@ func CreateSyncScript(source, destination SequenceMap, offset int) string { } script += fmt.Sprintf( - "SELECT setval(%s, %v);\n", + "SELECT pg_catalog.setval(%s, %v);\n", pq.QuoteLiteral(name), sqlTargetValue) } diff --git a/internal/management/controller/database_controller_sql.go b/internal/management/controller/database_controller_sql.go index 1a5f527918..fc6ccce66a 100644 --- a/internal/management/controller/database_controller_sql.go +++ b/internal/management/controller/database_controller_sql.go @@ -37,7 +37,7 @@ func detectDatabase( ctx, ` SELECT count(*) - FROM pg_database + FROM pg_catalog.pg_database WHERE datname = $1 `, obj.Spec.Name) diff --git a/internal/management/controller/database_controller_sql_test.go b/internal/management/controller/database_controller_sql_test.go index 25a697db47..cc5fe74ced 100644 --- a/internal/management/controller/database_controller_sql_test.go +++ b/internal/management/controller/database_controller_sql_test.go @@ -66,7 +66,7 @@ var _ = Describe("Managed Database SQL", func() { It("returns true when it detects an existing Database", func(ctx SpecContext) { expectedValue := sqlmock.NewRows([]string{""}).AddRow("1") dbMock.ExpectQuery(`SELECT count(*) - FROM pg_database + FROM pg_catalog.pg_database WHERE datname = $1`).WithArgs(database.Spec.Name).WillReturnRows(expectedValue) dbExists, err := detectDatabase(ctx, db, database) @@ -77,7 +77,7 @@ var _ = Describe("Managed Database SQL", func() { It("returns false when a Database is missing", func(ctx SpecContext) { expectedValue := sqlmock.NewRows([]string{""}).AddRow("0") dbMock.ExpectQuery(`SELECT count(*) - FROM pg_database + FROM pg_catalog.pg_database WHERE datname = $1`).WithArgs(database.Spec.Name).WillReturnRows(expectedValue) dbExists, err := detectDatabase(ctx, db, database) diff --git a/internal/management/controller/database_controller_test.go b/internal/management/controller/database_controller_test.go index be9c8487cf..f872322e5c 100644 --- a/internal/management/controller/database_controller_test.go +++ b/internal/management/controller/database_controller_test.go @@ -42,7 +42,7 @@ import ( ) const databaseDetectionQuery = `SELECT count(*) - FROM pg_database + FROM pg_catalog.pg_database WHERE datname = $1` var _ = Describe("Managed Database status", func() { diff --git a/internal/management/controller/instance_controller.go b/internal/management/controller/instance_controller.go index d6900697c1..19af5b93c4 100644 --- a/internal/management/controller/instance_controller.go +++ b/internal/management/controller/instance_controller.go @@ -642,7 +642,7 @@ func (r *InstanceReconciler) reconcileExtensions( for _, extension := range postgres.ManagedExtensions { extensionIsUsed := extension.IsUsed(userSettings) - row := tx.QueryRow("SELECT COUNT(*) > 0 FROM pg_extension WHERE extname = $1", extension.Name) + row := tx.QueryRow("SELECT COUNT(*) > 0 FROM pg_catalog.pg_extension WHERE extname = $1", extension.Name) err = row.Err() if err != nil { break @@ -710,7 +710,7 @@ func (r *InstanceReconciler) reconcilePoolers( } var existsFunction bool - row = tx.QueryRow(fmt.Sprintf("SELECT COUNT(*) > 0 FROM pg_proc WHERE proname='%s' and prosrc='%s'", + row = tx.QueryRow(fmt.Sprintf("SELECT COUNT(*) > 0 FROM pg_catalog.pg_proc WHERE proname='%s' and prosrc='%s'", userSearchFunctionName, userSearchFunction)) err = row.Scan(&existsFunction) @@ -1507,8 +1507,8 @@ func (r *InstanceReconciler) dropStaleReplicationConnections( result, err := conn.ExecContext( ctx, - `SELECT pg_terminate_backend(pid) - FROM pg_stat_replication + `SELECT pg_catalog.pg_terminate_backend(pid) + FROM pg_catalog.pg_stat_replication WHERE application_name LIKE $1`, fmt.Sprintf("%v-%%", cluster.Name), ) diff --git a/internal/management/controller/publication_controller_sql.go b/internal/management/controller/publication_controller_sql.go index 0938111885..269b5ab04a 100644 --- a/internal/management/controller/publication_controller_sql.go +++ b/internal/management/controller/publication_controller_sql.go @@ -37,7 +37,7 @@ func (r *PublicationReconciler) alignPublication(ctx context.Context, obj *apiv1 ctx, ` SELECT count(*) - FROM pg_publication + FROM pg_catalog.pg_publication WHERE pubname = $1 `, obj.Spec.Name) diff --git a/internal/management/controller/publication_controller_test.go b/internal/management/controller/publication_controller_test.go index ea77ba6002..cd4a051ea5 100644 --- a/internal/management/controller/publication_controller_test.go +++ b/internal/management/controller/publication_controller_test.go @@ -42,7 +42,7 @@ import ( ) const publicationDetectionQuery = `SELECT count(*) - FROM pg_publication + FROM pg_catalog.pg_publication WHERE pubname = $1` var _ = Describe("Managed publication controller tests", func() { diff --git a/internal/management/controller/roles/postgres.go b/internal/management/controller/roles/postgres.go index 26c909d4b2..b93089a381 100644 --- a/internal/management/controller/roles/postgres.go +++ b/internal/management/controller/roles/postgres.go @@ -44,8 +44,8 @@ func List(ctx context.Context, db *sql.DB) ([]DatabaseRole, error) { mem.inroles FROM pg_catalog.pg_authid as auth LEFT JOIN ( - SELECT array_agg(pg_get_userbyid(roleid)) as inroles, member - FROM pg_auth_members GROUP BY member + SELECT pg_catalog.array_agg(pg_catalog.pg_get_userbyid(roleid)) as inroles, member + FROM pg_catalog.pg_auth_members GROUP BY member ) mem ON member = oid WHERE rolname not like 'pg\_%'`) if err != nil { @@ -282,8 +282,8 @@ func GetParentRoles(ctx context.Context, db *sql.DB, role DatabaseRole) ([]strin query := `SELECT mem.inroles FROM pg_catalog.pg_authid as auth LEFT JOIN ( - SELECT array_agg(pg_get_userbyid(roleid)) as inroles, member - FROM pg_auth_members GROUP BY member + SELECT pg_catalog.array_agg(pg_catalog.pg_get_userbyid(roleid)) as inroles, member + FROM pg_catalog.pg_auth_members GROUP BY member ) mem ON member = oid WHERE rolname = $1` contextLog.Debug("get parent role", "query", query) diff --git a/internal/management/controller/roles/suite_test.go b/internal/management/controller/roles/suite_test.go index 82061021e3..460878349a 100644 --- a/internal/management/controller/roles/suite_test.go +++ b/internal/management/controller/roles/suite_test.go @@ -30,16 +30,16 @@ const ( mem.inroles FROM pg_catalog.pg_authid as auth LEFT JOIN ( - SELECT array_agg(pg_get_userbyid(roleid)) as inroles, member - FROM pg_auth_members GROUP BY member + SELECT pg_catalog.array_agg(pg_catalog.pg_get_userbyid(roleid)) as inroles, member + FROM pg_catalog.pg_auth_members GROUP BY member ) mem ON member = oid WHERE rolname not like 'pg\_%'` expectedMembershipStmt = `SELECT mem.inroles FROM pg_catalog.pg_authid as auth LEFT JOIN ( - SELECT array_agg(pg_get_userbyid(roleid)) as inroles, member - FROM pg_auth_members GROUP BY member + SELECT pg_catalog.array_agg(pg_catalog.pg_get_userbyid(roleid)) as inroles, member + FROM pg_catalog.pg_auth_members GROUP BY member ) mem ON member = oid WHERE rolname = $1` diff --git a/internal/management/controller/slots/infrastructure/postgresmanager.go b/internal/management/controller/slots/infrastructure/postgresmanager.go index 726a33986f..9f192a852a 100644 --- a/internal/management/controller/slots/infrastructure/postgresmanager.go +++ b/internal/management/controller/slots/infrastructure/postgresmanager.go @@ -32,7 +32,7 @@ func List(ctx context.Context, db *sql.DB, config *v1.ReplicationSlotsConfigurat ctx, `SELECT slot_name, slot_type, active, coalesce(restart_lsn::TEXT, '') AS restart_lsn, xmin IS NOT NULL OR catalog_xmin IS NOT NULL AS holds_xmin - FROM pg_replication_slots + FROM pg_catalog.pg_replication_slots WHERE NOT temporary AND slot_type = 'physical'`, ) if err != nil { @@ -83,7 +83,7 @@ func Update(ctx context.Context, db *sql.DB, slot ReplicationSlot) error { return nil } - _, err := db.ExecContext(ctx, "SELECT pg_replication_slot_advance($1, $2)", slot.SlotName, slot.RestartLSN) + _, err := db.ExecContext(ctx, "SELECT pg_catalog.pg_replication_slot_advance($1, $2)", slot.SlotName, slot.RestartLSN) return err } @@ -92,7 +92,7 @@ func Create(ctx context.Context, db *sql.DB, slot ReplicationSlot) error { contextLog := log.FromContext(ctx).WithName("createSlot") contextLog.Trace("Invoked", "slot", slot) - _, err := db.ExecContext(ctx, "SELECT pg_create_physical_replication_slot($1, $2)", + _, err := db.ExecContext(ctx, "SELECT pg_catalog.pg_create_physical_replication_slot($1, $2)", slot.SlotName, slot.RestartLSN != "") return err } @@ -105,6 +105,6 @@ func Delete(ctx context.Context, db *sql.DB, slot ReplicationSlot) error { return nil } - _, err := db.ExecContext(ctx, "SELECT pg_drop_replication_slot($1)", slot.SlotName) + _, err := db.ExecContext(ctx, "SELECT pg_catalog.pg_drop_replication_slot($1)", slot.SlotName) return err } diff --git a/internal/management/controller/slots/infrastructure/postgresmanager_test.go b/internal/management/controller/slots/infrastructure/postgresmanager_test.go index 5fdbf41718..79352e29df 100644 --- a/internal/management/controller/slots/infrastructure/postgresmanager_test.go +++ b/internal/management/controller/slots/infrastructure/postgresmanager_test.go @@ -52,7 +52,7 @@ var _ = Describe("PostgresManager", func() { }) Context("Create", func() { - const expectedSQL = "SELECT pg_create_physical_replication_slot" + const expectedSQL = "SELECT pg_catalog.pg_create_physical_replication_slot" It("should successfully create a replication slot", func(ctx SpecContext) { mock.ExpectExec(expectedSQL). WithArgs(slot.SlotName, slot.RestartLSN != ""). @@ -73,7 +73,7 @@ var _ = Describe("PostgresManager", func() { }) Context("List", func() { - const expectedSQL = "^SELECT (.+) FROM pg_replication_slots" + const expectedSQL = "^SELECT (.+) FROM pg_catalog.pg_replication_slots" var config *v1.ReplicationSlotsConfiguration BeforeEach(func() { @@ -123,7 +123,7 @@ var _ = Describe("PostgresManager", func() { }) Context("Update", func() { - const expectedSQL = "SELECT pg_replication_slot_advance" + const expectedSQL = "SELECT pg_catalog.pg_replication_slot_advance" It("should successfully update a replication slot", func(ctx SpecContext) { mock.ExpectExec(expectedSQL). @@ -151,7 +151,7 @@ var _ = Describe("PostgresManager", func() { }) Context("Delete", func() { - const expectedSQL = "SELECT pg_drop_replication_slot" + const expectedSQL = "SELECT pg_catalog.pg_drop_replication_slot" It("should successfully delete a replication slot", func(ctx SpecContext) { slot.Active = false diff --git a/internal/management/controller/slots/reconciler/replicationslot_test.go b/internal/management/controller/slots/reconciler/replicationslot_test.go index c124597df7..8179b80b03 100644 --- a/internal/management/controller/slots/reconciler/replicationslot_test.go +++ b/internal/management/controller/slots/reconciler/replicationslot_test.go @@ -78,10 +78,10 @@ var _ = Describe("HA Replication Slots reconciliation in Primary", func() { AddRow(newRepSlot("instance1", true, "lsn1")...). AddRow(newRepSlot("instance2", true, "lsn2")...) - mock.ExpectQuery("^SELECT (.+) FROM pg_replication_slots"). + mock.ExpectQuery("^SELECT (.+) FROM pg_catalog.pg_replication_slots"). WillReturnRows(rows) - mock.ExpectExec("SELECT pg_create_physical_replication_slot"). + mock.ExpectExec("SELECT pg_catalog.pg_create_physical_replication_slot"). WithArgs(slotPrefix+"instance3", false). WillReturnResult(sqlmock.NewResult(1, 1)) @@ -97,10 +97,10 @@ var _ = Describe("HA Replication Slots reconciliation in Primary", func() { AddRow(newRepSlot("instance2", true, "lsn2")...). AddRow(newRepSlot("instance3", false, "lsn2")...) - mock.ExpectQuery("^SELECT (.+) FROM pg_replication_slots"). + mock.ExpectQuery("^SELECT (.+) FROM pg_catalog.pg_replication_slots"). WillReturnRows(rows) - mock.ExpectExec("SELECT pg_drop_replication_slot").WithArgs(slotPrefix + "instance3"). + mock.ExpectExec("SELECT pg_catalog.pg_drop_replication_slot").WithArgs(slotPrefix + "instance3"). WillReturnResult(sqlmock.NewResult(1, 1)) cluster := makeClusterWithInstanceNames([]string{"instance1", "instance2"}, "instance1") @@ -115,7 +115,7 @@ var _ = Describe("HA Replication Slots reconciliation in Primary", func() { AddRow(newRepSlot("instance2", true, "lsn2")...). AddRow(newRepSlot("instance3", true, "lsn2")...) - mock.ExpectQuery("^SELECT (.+) FROM pg_replication_slots"). + mock.ExpectQuery("^SELECT (.+) FROM pg_catalog.pg_replication_slots"). WillReturnRows(rows) cluster := makeClusterWithInstanceNames([]string{"instance1", "instance2"}, "instance1") @@ -126,7 +126,7 @@ var _ = Describe("HA Replication Slots reconciliation in Primary", func() { }) var _ = Describe("dropReplicationSlots", func() { - const selectPgRepSlot = "^SELECT (.+) FROM pg_replication_slots" + const selectPgRepSlot = "^SELECT (.+) FROM pg_catalog.pg_replication_slots" var ( db *sql.DB @@ -166,7 +166,7 @@ var _ = Describe("dropReplicationSlots", func() { It("skips the deletion of user defined replication slots on the primary", func(ctx SpecContext) { rows := sqlmock.NewRows(repSlotColumns). AddRow("custom-slot", string(infrastructure.SlotTypePhysical), true, "lsn1", false) - mock.ExpectQuery("^SELECT (.+) FROM pg_replication_slots"). + mock.ExpectQuery("^SELECT (.+) FROM pg_catalog.pg_replication_slots"). WillReturnRows(rows) cluster := makeClusterWithInstanceNames([]string{}, "") @@ -182,7 +182,7 @@ var _ = Describe("dropReplicationSlots", func() { AddRow(newRepSlot("instance1", false, "lsn1")...) mock.ExpectQuery(selectPgRepSlot).WillReturnRows(rows) - mock.ExpectExec("SELECT pg_drop_replication_slot").WithArgs(slotPrefix + "instance1"). + mock.ExpectExec("SELECT pg_catalog.pg_drop_replication_slot").WithArgs(slotPrefix + "instance1"). WillReturnError(errors.New("delete error")) cluster := makeClusterWithInstanceNames([]string{}, "") @@ -197,7 +197,7 @@ var _ = Describe("dropReplicationSlots", func() { AddRow(newRepSlot("instance1", false, "lsn1")...) mock.ExpectQuery(selectPgRepSlot).WillReturnRows(rows) - mock.ExpectExec("SELECT pg_drop_replication_slot").WithArgs(slotPrefix + "instance1"). + mock.ExpectExec("SELECT pg_catalog.pg_drop_replication_slot").WithArgs(slotPrefix + "instance1"). WillReturnResult(sqlmock.NewResult(1, 1)) cluster := makeClusterWithInstanceNames([]string{}, "") diff --git a/internal/management/controller/slots/runner/runner_test.go b/internal/management/controller/slots/runner/runner_test.go index 87ebe69350..32442d754e 100644 --- a/internal/management/controller/slots/runner/runner_test.go +++ b/internal/management/controller/slots/runner/runner_test.go @@ -31,8 +31,8 @@ import ( var _ = Describe("Slot synchronization", Ordered, func() { const ( - selectPgReplicationSlots = "^SELECT (.+) FROM pg_replication_slots" - selectPgSlotAdvance = "SELECT pg_replication_slot_advance" + selectPgReplicationSlots = "^SELECT (.+) FROM pg_catalog.pg_replication_slots" + selectPgSlotAdvance = "SELECT pg_catalog.pg_replication_slot_advance" localPodName = "cluster-2" localSlotName = "_cnpg_cluster_2" @@ -81,7 +81,7 @@ var _ = Describe("Slot synchronization", Ordered, func() { mockLocal.ExpectQuery(selectPgReplicationSlots). WillReturnRows(sqlmock.NewRows(columns)) - mockLocal.ExpectExec("SELECT pg_create_physical_replication_slot"). + mockLocal.ExpectExec("SELECT pg_catalog.pg_create_physical_replication_slot"). WithArgs(slot3, true). WillReturnResult(sqlmock.NewResult(1, 1)) @@ -89,7 +89,7 @@ var _ = Describe("Slot synchronization", Ordered, func() { WithArgs(slot3, lsnSlot3). WillReturnResult(sqlmock.NewResult(1, 1)) - mockLocal.ExpectExec("SELECT pg_create_physical_replication_slot"). + mockLocal.ExpectExec("SELECT pg_catalog.pg_create_physical_replication_slot"). WithArgs(slot4, true). WillReturnResult(sqlmock.NewResult(1, 1)) @@ -137,7 +137,7 @@ var _ = Describe("Slot synchronization", Ordered, func() { WillReturnRows(sqlmock.NewRows(columns). AddRow(slot4, string(infrastructure.SlotTypePhysical), false, lsnSlot4, false)) - mockLocal.ExpectExec("SELECT pg_drop_replication_slot").WithArgs(slot4). + mockLocal.ExpectExec("SELECT pg_catalog.pg_drop_replication_slot").WithArgs(slot4). WillReturnResult(sqlmock.NewResult(1, 1)) err := synchronizeReplicationSlots(ctx, dbPrimary, dbLocal, localPodName, &config) @@ -157,7 +157,7 @@ var _ = Describe("Slot synchronization", Ordered, func() { mockLocal.ExpectExec(selectPgSlotAdvance).WithArgs(slotWithXmin, "0/301C4D8"). WillReturnResult(sqlmock.NewResult(1, 1)) - mockLocal.ExpectExec("SELECT pg_drop_replication_slot").WithArgs(slotWithXmin). + mockLocal.ExpectExec("SELECT pg_catalog.pg_drop_replication_slot").WithArgs(slotWithXmin). WillReturnResult(sqlmock.NewResult(1, 1)) err := synchronizeReplicationSlots(ctx, dbPrimary, dbLocal, localPodName, &config) diff --git a/internal/management/controller/subscription_controller_sql.go b/internal/management/controller/subscription_controller_sql.go index fcb61bc3ab..c9b7e9857d 100644 --- a/internal/management/controller/subscription_controller_sql.go +++ b/internal/management/controller/subscription_controller_sql.go @@ -41,7 +41,7 @@ func (r *SubscriptionReconciler) alignSubscription( ctx, ` SELECT count(*) - FROM pg_subscription + FROM pg_catalog.pg_subscription WHERE subname = $1 `, obj.Spec.Name) diff --git a/internal/management/controller/subscription_controller_test.go b/internal/management/controller/subscription_controller_test.go index f699324805..49eacfe556 100644 --- a/internal/management/controller/subscription_controller_test.go +++ b/internal/management/controller/subscription_controller_test.go @@ -43,7 +43,7 @@ import ( ) const subscriptionDetectionQuery = `SELECT count(*) - FROM pg_subscription + FROM pg_catalog.pg_subscription WHERE subname = $1` var _ = Describe("Managed subscription controller tests", func() { diff --git a/internal/management/controller/tablespaces/controller_test.go b/internal/management/controller/tablespaces/controller_test.go index 4bb80e5409..5d62d6da72 100644 --- a/internal/management/controller/tablespaces/controller_test.go +++ b/internal/management/controller/tablespaces/controller_test.go @@ -76,8 +76,8 @@ const ( SELECT pg_tablespace.spcname spcname, COALESCE(pg_roles.rolname, '') rolname - FROM pg_tablespace - LEFT JOIN pg_roles ON pg_tablespace.spcowner = pg_roles.oid + FROM pg_catalog.pg_tablespace + LEFT JOIN pg_catalog.pg_roles ON pg_tablespace.spcowner = pg_roles.oid WHERE spcname NOT LIKE $1 ` expectedCreateStmt = "CREATE TABLESPACE \"%s\" OWNER \"%s\" " + @@ -87,9 +87,9 @@ const ( expectedReadinessCheck = ` SELECT - NOT pg_is_in_recovery() - OR (SELECT coalesce(setting, '') = '' FROM pg_settings WHERE name = 'primary_conninfo') - OR pg_last_wal_replay_lsn() IS NOT NULL + NOT pg_catalog.pg_is_in_recovery() + OR (SELECT coalesce(setting, '') = '' FROM pg_catalog.pg_settings WHERE name = 'primary_conninfo') + OR pg_catalog.pg_last_wal_replay_lsn() IS NOT NULL ` ) diff --git a/internal/management/controller/tablespaces/infrastructure/postgres.go b/internal/management/controller/tablespaces/infrastructure/postgres.go index 16f6eb0ae7..b83fac27b4 100644 --- a/internal/management/controller/tablespaces/infrastructure/postgres.go +++ b/internal/management/controller/tablespaces/infrastructure/postgres.go @@ -41,8 +41,8 @@ func List(ctx context.Context, db *sql.DB) ([]Tablespace, error) { SELECT pg_tablespace.spcname spcname, COALESCE(pg_roles.rolname, '') rolname - FROM pg_tablespace - LEFT JOIN pg_roles ON pg_tablespace.spcowner = pg_roles.oid + FROM pg_catalog.pg_tablespace + LEFT JOIN pg_catalog.pg_roles ON pg_tablespace.spcowner = pg_roles.oid WHERE spcname NOT LIKE $1 `, postgres.SystemTablespacesPrefix, diff --git a/internal/management/controller/tablespaces/infrastructure/postgres_test.go b/internal/management/controller/tablespaces/infrastructure/postgres_test.go index 78b6e10f9a..4d9618bb73 100644 --- a/internal/management/controller/tablespaces/infrastructure/postgres_test.go +++ b/internal/management/controller/tablespaces/infrastructure/postgres_test.go @@ -30,8 +30,8 @@ var _ = Describe("Postgres tablespaces functions test", func() { SELECT pg_tablespace.spcname spcname, COALESCE(pg_roles.rolname, '') rolname - FROM pg_tablespace - LEFT JOIN pg_roles ON pg_tablespace.spcowner = pg_roles.oid + FROM pg_catalog.pg_tablespace + LEFT JOIN pg_catalog.pg_roles ON pg_tablespace.spcowner = pg_roles.oid WHERE spcname NOT LIKE $1 ` expectedCreateStmt := "CREATE TABLESPACE \"%s\" OWNER \"%s\" " + diff --git a/pkg/management/postgres/initdb.go b/pkg/management/postgres/initdb.go index 4f0b70d364..6fe2d9a37f 100644 --- a/pkg/management/postgres/initdb.go +++ b/pkg/management/postgres/initdb.go @@ -340,7 +340,9 @@ func (info InitInfo) ConfigureNewInstance(instance *Instance) error { } var existsDB bool - dbRow := dbSuperUser.QueryRow("SELECT COUNT(*) > 0 FROM pg_database WHERE datname = $1", info.ApplicationDatabase) + dbRow := dbSuperUser.QueryRow( + "SELECT COUNT(*) > 0 FROM pg_catalog.pg_database WHERE datname = $1", + info.ApplicationDatabase) err = dbRow.Scan(&existsDB) if err != nil { return err diff --git a/pkg/management/postgres/instance.go b/pkg/management/postgres/instance.go index f19fa468c5..f7402cc49d 100644 --- a/pkg/management/postgres/instance.go +++ b/pkg/management/postgres/instance.go @@ -1237,7 +1237,7 @@ func (instance *Instance) waitForInstanceRestarted(ctx context.Context, after ti return err } var startTime time.Time - row := db.QueryRowContext(ctx, "SELECT pg_postmaster_start_time()") + row := db.QueryRowContext(ctx, "SELECT pg_catalog.pg_postmaster_start_time()") err = row.Scan(&startTime) if err != nil { return err @@ -1257,8 +1257,8 @@ func (instance *Instance) DropConnections() error { } if _, err := conn.Exec( - `SELECT pg_terminate_backend(pid) - FROM pg_stat_activity + `SELECT pg_catalog.pg_terminate_backend(pid) + FROM pg_catalog.pg_stat_activity WHERE pid <> pg_backend_pid() AND backend_type = 'client backend';`, ); err != nil { diff --git a/pkg/management/postgres/logicalimport/database.go b/pkg/management/postgres/logicalimport/database.go index e09adf3e4a..82885a91c1 100644 --- a/pkg/management/postgres/logicalimport/database.go +++ b/pkg/management/postgres/logicalimport/database.go @@ -51,7 +51,7 @@ func (ds *databaseSnapshotter) getDatabaseList(ctx context.Context, target pool. if err != nil { return nil, err } - query := `SELECT datname FROM pg_database d WHERE datallowconn + query := `SELECT datname FROM pg_catalog.pg_database d WHERE datallowconn AND NOT datistemplate AND datallowconn AND datname != 'postgres' @@ -331,7 +331,7 @@ func (ds *databaseSnapshotter) dropExtensionsFromDatabase( // In Postgres, OID 16384 is the first non system ID that can be used in the database // catalog, as defined in the `FirstNormalObjectId` constant (src/include/access/transam.h) - rows, err := db.QueryContext(ctx, "SELECT extname FROM pg_extension WHERE oid >= 16384") + rows, err := db.QueryContext(ctx, "SELECT extname FROM pg_catalog.pg_extension WHERE oid >= 16384") if err != nil { return err } diff --git a/pkg/management/postgres/logicalimport/database_test.go b/pkg/management/postgres/logicalimport/database_test.go index cb3a6eb63a..945d2f78cd 100644 --- a/pkg/management/postgres/logicalimport/database_test.go +++ b/pkg/management/postgres/logicalimport/database_test.go @@ -130,7 +130,7 @@ var _ = Describe("databaseSnapshotter methods test", func() { var expectedQuery *sqlmock.ExpectedQuery BeforeEach(func() { - expectedQuery = mock.ExpectQuery("SELECT extname FROM pg_extension WHERE oid >= 16384") + expectedQuery = mock.ExpectQuery("SELECT extname FROM pg_catalog.pg_extension WHERE oid >= 16384") }) It("should drop the user-defined extensions successfully", func(ctx SpecContext) { @@ -169,7 +169,7 @@ var _ = Describe("databaseSnapshotter methods test", func() { }) Context("getDatabaseList testing", func() { - const query = "SELECT datname FROM pg_database d " + + const query = "SELECT datname FROM pg_catalog.pg_database d " + "WHERE datallowconn AND NOT datistemplate AND datallowconn AND datname != 'postgres' " + "ORDER BY datname" diff --git a/pkg/management/postgres/logicalimport/role.go b/pkg/management/postgres/logicalimport/role.go index d3eae34624..9594d320f8 100644 --- a/pkg/management/postgres/logicalimport/role.go +++ b/pkg/management/postgres/logicalimport/role.go @@ -155,8 +155,8 @@ func (rs *roleManager) getRoles(ctx context.Context) ([]Role, error) { "rolcanlogin, rolconnlimit, rolpassword, " + "rolvaliduntil, rolreplication, rolbypassrls, " + "pg_catalog.shobj_description(oid, 'pg_authid') as rolcomment, " + - "rolname = current_user AS is_current_user " + - "FROM pg_authid " + + "rolname = CURRENT_USER AS is_current_user " + + "FROM pg_catalog.pg_authid " + "WHERE oid >= 16384 " + "ORDER BY 2" } else { @@ -166,8 +166,8 @@ func (rs *roleManager) getRoles(ctx context.Context) ([]Role, error) { "rolvaliduntil, rolreplication, " + "false as rolbypassrls, " + "pg_catalog.shobj_description(oid, 'pg_authid') as rolcomment, " + - "rolname = current_user AS is_current_user " + - "FROM pg_authid " + + "rolname = CURRENT_USER AS is_current_user " + + "FROM pg_catalog.pg_authid " + "WHERE oid >= 16384 " + "ORDER BY 2" } diff --git a/pkg/management/postgres/logicalimport/role_test.go b/pkg/management/postgres/logicalimport/role_test.go index a42fb001fd..006301bd57 100644 --- a/pkg/management/postgres/logicalimport/role_test.go +++ b/pkg/management/postgres/logicalimport/role_test.go @@ -28,9 +28,9 @@ import ( var _ = Describe("", func() { const inhQuery = "SELECT ur.rolname AS roleid, um.rolname AS member, a.admin_option, ug.rolname AS grantor " + - "FROM pg_auth_members a LEFT JOIN pg_authid ur on ur.oid = a.roleid " + - "LEFT JOIN pg_authid um on um.oid = a.member " + - "LEFT JOIN pg_authid ug on ug.oid = a.grantor " + + "FROM pg_catalog.pg_auth_members a LEFT JOIN pg_catalog.pg_authid ur on ur.oid = a.roleid " + + "LEFT JOIN pg_catalog.pg_authid um on um.oid = a.member " + + "LEFT JOIN pg_catalog.pg_authid ug on ug.oid = a.grantor " + "WHERE ur.oid >= 16384 AND um.oid >= 16384" var ( diff --git a/pkg/management/postgres/logicalimport/roleinheritance.go b/pkg/management/postgres/logicalimport/roleinheritance.go index 749ba1c33c..9866a0d3a8 100644 --- a/pkg/management/postgres/logicalimport/roleinheritance.go +++ b/pkg/management/postgres/logicalimport/roleinheritance.go @@ -98,10 +98,10 @@ func (rs *roleInheritanceManager) getRoleInheritance(ctx context.Context) ([]Rol "um.rolname AS member, " + "a.admin_option, " + "ug.rolname AS grantor " + - "FROM pg_auth_members a " + - "LEFT JOIN pg_authid ur on ur.oid = a.roleid " + - "LEFT JOIN pg_authid um on um.oid = a.member " + - "LEFT JOIN pg_authid ug on ug.oid = a.grantor " + + "FROM pg_catalog.pg_auth_members a " + + "LEFT JOIN pg_catalog.pg_authid ur on ur.oid = a.roleid " + + "LEFT JOIN pg_catalog.pg_authid um on um.oid = a.member " + + "LEFT JOIN pg_catalog.pg_authid ug on ug.oid = a.grantor " + "WHERE ur.oid >= 16384 AND um.oid >= 16384" rows, err := originDB.Query(query) diff --git a/pkg/management/postgres/logicalimport/roleinheritance_test.go b/pkg/management/postgres/logicalimport/roleinheritance_test.go index c652224755..bcea42216f 100644 --- a/pkg/management/postgres/logicalimport/roleinheritance_test.go +++ b/pkg/management/postgres/logicalimport/roleinheritance_test.go @@ -66,9 +66,9 @@ var _ = Describe("RoleInheritanceManager", func() { AddRow("role2", "member2", false, nil) query := "SELECT ur\\.rolname AS roleid, um\\.rolname AS member, a\\.admin_option, ug\\.rolname AS grantor " + - "FROM pg_auth_members a LEFT JOIN pg_authid ur on ur\\.oid = a\\.roleid " + - "LEFT JOIN pg_authid um on um\\.oid = a\\.member " + - "LEFT JOIN pg_authid ug on ug\\.oid = a\\.grantor " + + "FROM pg_catalog.pg_auth_members a LEFT JOIN pg_catalog.pg_authid ur on ur\\.oid = a\\.roleid " + + "LEFT JOIN pg_catalog.pg_authid um on um\\.oid = a\\.member " + + "LEFT JOIN pg_catalog.pg_authid ug on ug\\.oid = a\\.grantor " + "WHERE ur\\.oid >= 16384 AND um\\.oid >= 16384" mock.ExpectQuery(query).WillReturnRows(rows) diff --git a/pkg/management/postgres/probes.go b/pkg/management/postgres/probes.go index fde50a9f48..e087e1111e 100644 --- a/pkg/management/postgres/probes.go +++ b/pkg/management/postgres/probes.go @@ -88,11 +88,11 @@ func (instance *Instance) GetStatus() (result *postgres.PostgresqlStatus, err er row := superUserDB.QueryRow( `SELECT - (pg_control_system()).system_identifier, + (pg_catalog.pg_control_system()).system_identifier, -- True if this is a primary instance - NOT pg_is_in_recovery() as primary, + NOT pg_catalog.pg_is_in_recovery() as primary, -- True if at least one column requires a restart - EXISTS(SELECT 1 FROM pg_settings WHERE pending_restart)`) + EXISTS(SELECT 1 FROM pg_catalog.pg_settings WHERE pending_restart)`) err = row.Scan(&result.SystemID, &result.IsPrimary, &result.PendingRestart) if err != nil { return result, err @@ -181,7 +181,7 @@ FROM SELECT name, setting as current_setting, boot_val as default_setting - FROM pg_settings + FROM pg_catalog.pg_settings WHERE pending_restart ) pending_settings LEFT OUTER JOIN @@ -192,7 +192,7 @@ LEFT OUTER JOIN setting as new_setting, rank() OVER (PARTITION BY name ORDER BY seqno DESC) as rank, applied - FROM pg_file_settings + FROM pg_catalog.pg_file_settings ) c WHERE rank = 1 AND not applied ) file_settings @@ -275,9 +275,9 @@ func (instance *Instance) fillBasebackupStats( var basebackupList []postgres.PgStatBasebackup rows, err := superUserDB.Query(`SELECT - usename, - application_name, - backend_start, + usename, + application_name, + backend_start, phase, COALESCE(backup_total, 0) AS backup_total, COALESCE(backup_streamed, 0) AS backup_streamed, @@ -285,8 +285,8 @@ func (instance *Instance) fillBasebackupStats( COALESCE(pg_size_pretty(backup_streamed), '') AS backup_streamed_pretty, COALESCE(tablespaces_total, 0) AS tablespaces_total, COALESCE(tablespaces_streamed, 0) AS tablespaces_streamed - FROM pg_stat_progress_basebackup b - JOIN pg_stat_activity a USING (pid) + FROM pg_catalog.pg_stat_progress_basebackup b + JOIN pg_catalog.pg_stat_activity a USING (pid) WHERE application_name ~ '-join$' ORDER BY 1, 2`) if err != nil { @@ -335,9 +335,9 @@ func (instance *Instance) fillStatusFromPrimary(result *postgres.PostgresqlStatu ` SELECT (SELECT COALESCE(last_archived_wal, '') FROM pg_catalog.pg_stat_archiver), - pg_walfile_name(pg_current_wal_lsn()) as current_wal, - pg_current_wal_lsn(), - (SELECT timeline_id FROM pg_control_checkpoint()) as timeline_id + pg_catalog.pg_walfile_name(pg_catalog.pg_current_wal_lsn()) as current_wal, + pg_catalog.pg_current_wal_lsn(), + (SELECT timeline_id FROM pg_catalog.pg_control_checkpoint()) as timeline_id `) err = row.Scan(&result.LastArchivedWAL, &result.CurrentWAL, @@ -386,7 +386,7 @@ func (instance *Instance) fillReplicationSlotsStatus(result *postgres.Postgresql } rows, err := superUserDB.Query( - `SELECT + `SELECT slot_name, coalesce(plugin::text, ''), coalesce(slot_type::text, ''), @@ -398,7 +398,7 @@ func (instance *Instance) fillReplicationSlotsStatus(result *postgres.Postgresql coalesce(restart_lsn::text, ''), coalesce(wal_status::text, ''), safe_wal_size - FROM pg_replication_slots`) + FROM pg_catalog.pg_replication_slots`) if err != nil { return err } @@ -525,10 +525,10 @@ func (instance *Instance) fillStatusFromReplica(result *postgres.PostgresqlStatu // replicas row := superUserDB.QueryRow( "SELECT " + - "(SELECT timeline_id FROM pg_control_checkpoint()), " + - "COALESCE(pg_last_wal_receive_lsn()::varchar, ''), " + - "COALESCE(pg_last_wal_replay_lsn()::varchar, ''), " + - "pg_is_wal_replay_paused()") + "(SELECT timeline_id FROM pg_catalog.pg_control_checkpoint()), " + + "COALESCE(pg_catalog.pg_last_wal_receive_lsn()::varchar, ''), " + + "COALESCE(pg_catalog.pg_last_wal_replay_lsn()::varchar, ''), " + + "pg_catalog.pg_is_wal_replay_paused()") if err := row.Scan(&result.TimeLineID, &result.ReceivedLsn, &result.ReplayLsn, &result.ReplayPaused); err != nil { return err } @@ -559,7 +559,7 @@ func (instance *Instance) IsWALReceiverActive() (bool, error) { return false, err } - row := superUserDB.QueryRow("SELECT COUNT(*) FROM pg_stat_wal_receiver") + row := superUserDB.QueryRow("SELECT COUNT(*) FROM pg_catalog.pg_stat_wal_receiver") err = row.Scan(&result) if err != nil { return false, err @@ -596,7 +596,7 @@ func (instance *Instance) TryGetPgStatWAL() (*PgStatWal, error) { var pgWalStat PgStatWal row := superUserDB.QueryRow( `SELECT - wal_records, + wal_records, wal_fpi, wal_bytes, wal_buffers_full, @@ -605,7 +605,7 @@ func (instance *Instance) TryGetPgStatWAL() (*PgStatWal, error) { wal_write_time, wal_sync_time, stats_reset - FROM pg_stat_wal`) + FROM pg_catalog.pg_stat_wal`) if err := row.Scan( &pgWalStat.WalRecords, &pgWalStat.WalFpi, diff --git a/pkg/management/postgres/readiness/readiness.go b/pkg/management/postgres/readiness/readiness.go index ed1edb90dc..c359853801 100644 --- a/pkg/management/postgres/readiness/readiness.go +++ b/pkg/management/postgres/readiness/readiness.go @@ -78,9 +78,9 @@ func (data *Data) IsServerReady(ctx context.Context) error { ctx, ` SELECT - NOT pg_is_in_recovery() - OR (SELECT coalesce(setting, '') = '' FROM pg_settings WHERE name = 'primary_conninfo') - OR pg_last_wal_replay_lsn() IS NOT NULL + NOT pg_catalog.pg_is_in_recovery() + OR (SELECT coalesce(setting, '') = '' FROM pg_catalog.pg_settings WHERE name = 'primary_conninfo') + OR pg_catalog.pg_last_wal_replay_lsn() IS NOT NULL `, ) if err := row.Err(); err != nil { diff --git a/pkg/management/postgres/restore.go b/pkg/management/postgres/restore.go index 979ed57f53..1737646db2 100644 --- a/pkg/management/postgres/restore.go +++ b/pkg/management/postgres/restore.go @@ -1035,7 +1035,7 @@ func waitUntilRecoveryFinishes(db *sql.DB) error { } return retry.OnError(RetryUntilRecoveryDone, errorIsRetriable, func() error { - row := db.QueryRow("SELECT pg_is_in_recovery()") + row := db.QueryRow("SELECT pg_catalog.pg_is_in_recovery()") var status bool if err := row.Scan(&status); err != nil { diff --git a/pkg/management/postgres/utils/utils.go b/pkg/management/postgres/utils/utils.go index 9b552050cb..e4e7ab3f39 100644 --- a/pkg/management/postgres/utils/utils.go +++ b/pkg/management/postgres/utils/utils.go @@ -132,7 +132,7 @@ func DBToString(t interface{}) (string, bool) { // GetAllAccessibleDatabases returns the list of all the accessible databases using the superuser func GetAllAccessibleDatabases(tx *sql.Tx, whereClause string) (databases []string, errors []error) { rows, err := tx.Query(strings.Join( - []string{"SELECT datname FROM pg_database", whereClause}, + []string{"SELECT datname FROM pg_catalog.pg_database", whereClause}, " WHERE "), ) if err != nil { diff --git a/pkg/management/postgres/wal.go b/pkg/management/postgres/wal.go index 3338175597..3a8288994e 100644 --- a/pkg/management/postgres/wal.go +++ b/pkg/management/postgres/wal.go @@ -91,7 +91,7 @@ func (w *walArchiveAnalyzer) mustHaveFirstWalArchivedWithBackoff(backoff wait.Ba func (w *walArchiveAnalyzer) mustHaveFirstWalArchived(db *sql.DB) error { row := db.QueryRow("SELECT COALESCE(last_archived_time,'-infinity') > " + "COALESCE(last_failed_time, '-infinity') AS is_archiving, last_failed_time IS NOT NULL " + - "FROM pg_stat_archiver") + "FROM pg_catalog.pg_stat_archiver") var walArchivingWorking, lastFailedTimePresent bool @@ -183,7 +183,7 @@ func (w *walArchiveBootstrapper) shipWalFile(db *sql.DB) error { return fmt.Errorf("error while requiring a checkpoint: %w", err) } - if _, err := db.Exec("SELECT pg_switch_wal()"); err != nil { + if _, err := db.Exec("SELECT pg_catalog.pg_switch_wal()"); err != nil { return fmt.Errorf("error while switching to a new WAL: %w", err) } diff --git a/pkg/management/postgres/wal_test.go b/pkg/management/postgres/wal_test.go index d1dc94a930..83006abbac 100644 --- a/pkg/management/postgres/wal_test.go +++ b/pkg/management/postgres/wal_test.go @@ -27,7 +27,7 @@ import ( ) var _ = Describe("ensure isWalArchiveWorking works correctly", func() { - const flexibleCoalescenceQuery = "SELECT COALESCE.*FROM pg_stat_archiver" + const flexibleCoalescenceQuery = "SELECT COALESCE.*FROM pg_catalog.pg_stat_archiver" var ( db *sql.DB mock sqlmock.Sqlmock @@ -75,7 +75,7 @@ var _ = Describe("ensure isWalArchiveWorking works correctly", func() { rows := sqlmock.NewRows([]string{"is_archiving", "last_failed_time_present"}).AddRow(false, false) mock.ExpectQuery(flexibleCoalescenceQuery).WillReturnRows(rows) mock.ExpectExec("CHECKPOINT").WillReturnResult(fakeResult) - mock.ExpectExec("SELECT pg_switch_wal()").WillReturnResult(fakeResult) + mock.ExpectExec("SELECT pg_catalog.pg_switch_wal()").WillReturnResult(fakeResult) // Call the function err := bootstrapper.mustHaveFirstWalArchived(db) diff --git a/pkg/management/postgres/webserver/backup_connection.go b/pkg/management/postgres/webserver/backup_connection.go index aae2fb2c72..bd65173ba5 100644 --- a/pkg/management/postgres/webserver/backup_connection.go +++ b/pkg/management/postgres/webserver/backup_connection.go @@ -156,7 +156,8 @@ func (bc *backupConnection) startBackup(ctx context.Context, backupName string) slotName := replicationSlotInvalidCharacters.ReplaceAllString(bc.data.BackupName, "_") if _, err := bc.conn.ExecContext( ctx, - "SELECT pg_create_physical_replication_slot(slot_name => $1, immediately_reserve => true, temporary => true)", + "SELECT pg_catalog.pg_create_physical_replication_slot("+ + "slot_name => $1, immediately_reserve => true, temporary => true)", slotName, ); err != nil { bc.err = fmt.Errorf("while creating the replication slot: %w", bc.err) @@ -165,10 +166,10 @@ func (bc *backupConnection) startBackup(ctx context.Context, backupName string) var row *sql.Row if bc.postgresMajorVersion < 15 { - row = bc.conn.QueryRowContext(ctx, "SELECT pg_start_backup($1, $2, false);", bc.data.BackupName, + row = bc.conn.QueryRowContext(ctx, "SELECT pg_catalog.pg_start_backup($1, $2, false);", bc.data.BackupName, bc.immediateCheckpoint) } else { - row = bc.conn.QueryRowContext(ctx, "SELECT pg_backup_start(label => $1, fast => $2);", bc.data.BackupName, + row = bc.conn.QueryRowContext(ctx, "SELECT pg_catalog.pg_backup_start(label => $1, fast => $2);", bc.data.BackupName, bc.immediateCheckpoint) } @@ -204,10 +205,10 @@ func (bc *backupConnection) stopBackup(ctx context.Context, backupName string) { var row *sql.Row if bc.postgresMajorVersion < 15 { row = bc.conn.QueryRowContext(ctx, - "SELECT lsn, labelfile, spcmapfile FROM pg_stop_backup(false, $1);", bc.waitForArchive) + "SELECT lsn, labelfile, spcmapfile FROM pg_catalog.pg_stop_backup(false, $1);", bc.waitForArchive) } else { row = bc.conn.QueryRowContext(ctx, - "SELECT lsn, labelfile, spcmapfile FROM pg_backup_stop(wait_for_archive => $1);", bc.waitForArchive) + "SELECT lsn, labelfile, spcmapfile FROM pg_catalog.pg_backup_stop(wait_for_archive => $1);", bc.waitForArchive) } bc.executeWithLock(backupName, func() error { diff --git a/pkg/management/postgres/webserver/metricserver/pg_collector.go b/pkg/management/postgres/webserver/metricserver/pg_collector.go index 1ae00b82d1..2fd2cc7ad0 100644 --- a/pkg/management/postgres/webserver/metricserver/pg_collector.go +++ b/pkg/management/postgres/webserver/metricserver/pg_collector.go @@ -583,8 +583,8 @@ func (e *Exporter) SetCustomQueries(queries *m.QueriesCollector) { // DefaultQueries is the set of default queries for postgresql var DefaultQueries = m.UserQueries{ "collector": m.UserQuery{ - Query: "SELECT current_database() as datname, relpages as lo_pages " + - "FROM pg_class c JOIN pg_namespace n ON (n.oid = c.relnamespace) " + + Query: "SELECT pg_catalog.current_database() as datname, relpages as lo_pages " + + "FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n ON (n.oid = c.relnamespace) " + "WHERE n.nspname = 'pg_catalog' AND c.relname = 'pg_largeobject';", TargetDatabases: []string{"*"}, Metrics: []m.Mapping{ diff --git a/pkg/management/postgres/webserver/metricserver/wal.go b/pkg/management/postgres/webserver/metricserver/wal.go index 543bce5f61..6d0aa73245 100644 --- a/pkg/management/postgres/webserver/metricserver/wal.go +++ b/pkg/management/postgres/webserver/metricserver/wal.go @@ -78,7 +78,7 @@ func (s *walSettings) synchronize(db *sql.DB, configSha256 string) error { } rows, err := db.Query(` -SELECT name, setting FROM pg_settings +SELECT name, setting FROM pg_catalog.pg_settings WHERE pg_settings.name IN ('wal_segment_size', 'min_wal_size', 'max_wal_size', 'wal_keep_size', 'wal_keep_segments', 'max_slot_wal_keep_size')`) // nolint: lll if err != nil { diff --git a/pkg/management/postgres/webserver/metricserver/wal_test.go b/pkg/management/postgres/webserver/metricserver/wal_test.go index 5685bfa262..9542711290 100644 --- a/pkg/management/postgres/webserver/metricserver/wal_test.go +++ b/pkg/management/postgres/webserver/metricserver/wal_test.go @@ -38,7 +38,7 @@ var _ = Describe("ensures walSettings works correctly", func() { maxSlotWalKeepSize float64 = -1 walKeepSegments float64 = 25 query = ` -SELECT name, setting FROM pg_settings +SELECT name, setting FROM pg_catalog.pg_settings WHERE pg_settings.name IN ('wal_segment_size', 'min_wal_size', 'max_wal_size', 'wal_keep_size', 'wal_keep_segments', 'max_slot_wal_keep_size')` ) diff --git a/tests/e2e/asserts_test.go b/tests/e2e/asserts_test.go index 200bc57248..32a01e734e 100644 --- a/tests/e2e/asserts_test.go +++ b/tests/e2e/asserts_test.go @@ -307,7 +307,7 @@ func AssertClusterIsReady(namespace string, clusterName string, timeout int, env PodName: primaryPod.Name, }, "postgres", - fmt.Sprintf("SELECT COUNT(*) FROM pg_stat_replication WHERE application_name IN (%s)", + fmt.Sprintf("SELECT COUNT(*) FROM pg_catalog.pg_stat_replication WHERE application_name IN (%s)", replicaNamesString), ) g.Expect(err).ToNot(HaveOccurred(), "cannot extract the list of streaming replicas") @@ -554,11 +554,11 @@ func QueryMatchExpectationPredicate( } func roleExistsQuery(roleName string) string { - return fmt.Sprintf("SELECT EXISTS(SELECT 1 FROM pg_roles WHERE rolname='%v')", roleName) + return fmt.Sprintf("SELECT EXISTS(SELECT 1 FROM pg_catalog.pg_roles WHERE rolname='%v')", roleName) } func databaseExistsQuery(dbName string) string { - return fmt.Sprintf("SELECT EXISTS(SELECT 1 FROM pg_database WHERE datname='%v')", dbName) + return fmt.Sprintf("SELECT EXISTS(SELECT 1 FROM pg_catalog.pg_database WHERE datname='%v')", dbName) } // AssertDataExpectedCount verifies that an expected amount of rows exists on the table @@ -617,7 +617,7 @@ func AssertLargeObjectValue(namespace, clusterName string, oid int, data string) // AssertClusterStandbysAreStreaming verifies that all the standbys of a cluster have a wal-receiver running. func AssertClusterStandbysAreStreaming(namespace string, clusterName string, timeout int32) { - query := "SELECT count(*) FROM pg_stat_wal_receiver" + query := "SELECT count(*) FROM pg_catalog.pg_stat_wal_receiver" Eventually(func() error { standbyPods, err := clusterutils.GetReplicas(env.Ctx, env.Client, namespace, clusterName) if err != nil { @@ -914,7 +914,7 @@ func AssertPgRecoveryMode(pod *corev1.Pod, expectedValue bool) { PodName: pod.Name, }, postgres.PostgresDBName, - "select pg_is_in_recovery();") + "select pg_catalog.pg_is_in_recovery()") if err != nil { GinkgoWriter.Printf("stdout: %v\nstderr: %v\n", stdOut, stdErr) } @@ -1157,7 +1157,7 @@ func AssertWritesToReplicaFails( var rawValue string // Expect to be connected to a replica - row := conn.QueryRow("SELECT pg_is_in_recovery()") + row := conn.QueryRow("SELECT pg_catalog.pg_is_in_recovery()") err = row.Scan(&rawValue) g.Expect(err).ToNot(HaveOccurred()) isReplica := strings.TrimSpace(rawValue) @@ -1186,7 +1186,7 @@ func AssertWritesToPrimarySucceeds(namespace, service, appDBName, appDBUser, app var rawValue string // Expect to be connected to a primary - row := conn.QueryRow("SELECT pg_is_in_recovery()") + row := conn.QueryRow("SELECT pg_catalog.pg_is_in_recovery()") err = row.Scan(&rawValue) g.Expect(err).ToNot(HaveOccurred()) isReplica := strings.TrimSpace(rawValue) @@ -2021,7 +2021,7 @@ func switchWalAndGetLatestArchive(namespace, podName string) string { PodName: podName, }, postgres.PostgresDBName, - "SELECT pg_walfile_name(pg_switch_wal());", + "SELECT pg_catalog.pg_walfile_name(pg_switch_wal())", ) Expect(err).ToNot( HaveOccurred(), @@ -2795,12 +2795,12 @@ func AssertReplicationSlotsOnPod( for _, slot := range expectedSlots { query := fmt.Sprintf( - "SELECT EXISTS (SELECT 1 FROM pg_replication_slots "+ + "SELECT EXISTS (SELECT 1 FROM pg_catalog.pg_replication_slots "+ "WHERE slot_name = '%v' AND active = '%t' "+ "AND temporary = 'f' AND slot_type = 'physical')", slot, isActiveOnReplica) if specs.IsPodPrimary(pod) { query = fmt.Sprintf( - "SELECT EXISTS (SELECT 1 FROM pg_replication_slots "+ + "SELECT EXISTS (SELECT 1 FROM pg_catalog.pg_replication_slots "+ "WHERE slot_name = '%v' AND active = '%t' "+ "AND temporary = 'f' AND slot_type = 'physical')", slot, isActiveOnPrimary) } diff --git a/tests/e2e/cluster_microservice_test.go b/tests/e2e/cluster_microservice_test.go index b48712f541..7db2359986 100644 --- a/tests/e2e/cluster_microservice_test.go +++ b/tests/e2e/cluster_microservice_test.go @@ -251,7 +251,7 @@ func assertTableAndDataOnImportedCluster( By("Verifying imported table has owner app user", func() { queryImported := fmt.Sprintf( - "select * from pg_tables where tablename = '%v' and tableowner = '%v'", + "select * from pg_catalog.pg_tables where tablename = '%v' and tableowner = '%v'", tableName, postgres.AppUser, ) diff --git a/tests/e2e/cluster_monolithic_test.go b/tests/e2e/cluster_monolithic_test.go index 05099d1ebf..5d52f4fb94 100644 --- a/tests/e2e/cluster_monolithic_test.go +++ b/tests/e2e/cluster_monolithic_test.go @@ -170,7 +170,7 @@ var _ = Describe("Imports with Monolithic Approach", Label(tests.LabelImportingD }) By("verifying that the specified source databases were imported", func() { - stmt, err := connTarget.Prepare("SELECT datname FROM pg_database WHERE datname IN ($1)") + stmt, err := connTarget.Prepare("SELECT datname FROM pg_catalog.pg_database WHERE datname IN ($1)") Expect(err).ToNot(HaveOccurred()) rows, err := stmt.QueryContext(env.Ctx, pq.Array(sourceDatabases)) Expect(err).ToNot(HaveOccurred()) @@ -184,7 +184,9 @@ var _ = Describe("Imports with Monolithic Approach", Label(tests.LabelImportingD By(fmt.Sprintf("verifying that the source superuser '%s' became a normal user in target", databaseSuperUser), func() { - row := connTarget.QueryRow(fmt.Sprintf("SELECT usesuper FROM pg_user WHERE usename='%s'", databaseSuperUser)) + row := connTarget.QueryRow(fmt.Sprintf( + "SELECT usesuper FROM pg_catalog.pg_user WHERE usename='%s'", + databaseSuperUser)) var superUser bool err := row.Scan(&superUser) Expect(err).ToNot(HaveOccurred()) diff --git a/tests/e2e/configuration_update_test.go b/tests/e2e/configuration_update_test.go index 46b7117b41..4690105b1f 100644 --- a/tests/e2e/configuration_update_test.go +++ b/tests/e2e/configuration_update_test.go @@ -199,7 +199,7 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada By("verify that connections succeed after pg_hba_reload", func() { // The new pg_hba rule should be present in every pod - query := "select count(*) from pg_hba_file_rules where type = 'host' and auth_method = 'trust'" + query := "select count(*) from pg_catalog.pg_hba_file_rules where type = 'host' and auth_method = 'trust'" for _, pod := range podList.Items { Eventually(func() (string, error) { stdout, _, err := exec.QueryInInstancePod( @@ -429,7 +429,7 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada if env.PostgresVersion > 14 { primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - query := "select count(1) from pg_ident_file_mappings;" + query := "select count(1) from pg_catalog.pg_ident_file_mappings;" By("check that there is only one entry in pg_ident_file_mappings", func() { Eventually(func() (string, error) { diff --git a/tests/e2e/declarative_database_management_test.go b/tests/e2e/declarative_database_management_test.go index 5cf4c6b010..ab6afe1131 100644 --- a/tests/e2e/declarative_database_management_test.go +++ b/tests/e2e/declarative_database_management_test.go @@ -75,7 +75,7 @@ var _ = Describe("Declarative database management", Label(tests.LabelSmoke, test }) assertDatabaseHasExpectedFields := func(namespace, primaryPod string, db apiv1.Database) { - query := fmt.Sprintf("select count(*) from pg_database where datname = '%s' "+ + query := fmt.Sprintf("select count(*) from pg_catalog.pg_database where datname = '%s' "+ "and encoding = pg_char_to_encoding('%s') and datctype = '%s' and datcollate = '%s'", db.Spec.Name, db.Spec.Encoding, db.Spec.LcCtype, db.Spec.LcCollate) Eventually(func(g Gomega) { diff --git a/tests/e2e/disk_space_test.go b/tests/e2e/disk_space_test.go index c616dd8950..3915a66f1f 100644 --- a/tests/e2e/disk_space_test.go +++ b/tests/e2e/disk_space_test.go @@ -88,7 +88,7 @@ var _ = Describe("Volume space unavailable", Label(tests.LabelStorage), func() { query) Expect(err).To(HaveOccurred()) - query = "CHECKPOINT; SELECT pg_switch_wal(); CHECKPOINT" + query = "CHECKPOINT; SELECT pg_catalog.pg_switch_wal(); CHECKPOINT" _, _, err = exec.QueryInInstancePod( env.Ctx, env.Client, env.Interface, env.RestClientConfig, exec.PodLocator{ @@ -172,7 +172,7 @@ var _ = Describe("Volume space unavailable", Label(tests.LabelStorage), func() { }).WithTimeout(10 * time.Minute).Should(BeTrue()) }) By("writing some WAL", func() { - query := "CHECKPOINT; SELECT pg_switch_wal(); CHECKPOINT" + query := "CHECKPOINT; SELECT pg_catalog.pg_switch_wal(); CHECKPOINT" _, _, err := exec.QueryInInstancePod( env.Ctx, env.Client, env.Interface, env.RestClientConfig, exec.PodLocator{ diff --git a/tests/e2e/failover_test.go b/tests/e2e/failover_test.go index 6940c4b9fe..657321c73a 100644 --- a/tests/e2e/failover_test.go +++ b/tests/e2e/failover_test.go @@ -84,7 +84,7 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() { Expect(err).ToNot(HaveOccurred()) // Get the walreceiver pid - query := "SELECT pid FROM pg_stat_activity WHERE backend_type = 'walreceiver'" + query := "SELECT pid FROM pg_catalog.pg_stat_activity WHERE backend_type = 'walreceiver'" out, _, err := exec.EventuallyExecQueryInInstancePod( env.Ctx, env.Client, env.Interface, env.RestClientConfig, exec.PodLocator{ @@ -105,7 +105,7 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() { // Terminate the pausedReplica walsender on the primary. // We don't want to wait for the replication timeout. - query = fmt.Sprintf("SELECT pg_terminate_backend(pid) FROM pg_stat_replication "+ + query = fmt.Sprintf("SELECT pg_catalog.pg_terminate_backend(pid) FROM pg_catalog.pg_stat_replication "+ "WHERE application_name = '%v'", pausedReplica) _, _, err = exec.EventuallyExecQueryInInstancePod( env.Ctx, env.Client, env.Interface, env.RestClientConfig, @@ -142,7 +142,7 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() { Namespace: primaryPod.Namespace, PodName: primaryPod.Name, }, postgres.PostgresDBName, - "SELECT pg_current_wal_lsn()", + "SELECT pg_catalog.pg_current_wal_lsn()", RetryTimeout, PollingTime, ) @@ -161,7 +161,7 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() { ) Expect(err).ToNot(HaveOccurred()) - query := fmt.Sprintf("SELECT true FROM pg_stat_replication "+ + query := fmt.Sprintf("SELECT true FROM pg_catalog.pg_stat_replication "+ "WHERE application_name = '%v' AND replay_lsn > '%v'", targetPrimary, strings.Trim(initialLSN, "\n")) // The replay_lsn of the targetPrimary should be ahead diff --git a/tests/e2e/fencing_test.go b/tests/e2e/fencing_test.go index acf8291104..366b805dd1 100644 --- a/tests/e2e/fencing_test.go +++ b/tests/e2e/fencing_test.go @@ -76,7 +76,7 @@ var _ = Describe("Fencing", Label(tests.LabelPlugin), func() { } checkInstanceIsStreaming := func(instanceName, namespace string) { - query := "SELECT count(*) FROM pg_stat_wal_receiver" + query := "SELECT count(*) FROM pg_catalog.pg_stat_wal_receiver" Eventually(func() (int, error) { err := env.Client.Get(env.Ctx, ctrlclient.ObjectKey{Namespace: namespace, Name: instanceName}, diff --git a/tests/e2e/initdb_test.go b/tests/e2e/initdb_test.go index a7922a946b..ef9fc8e739 100644 --- a/tests/e2e/initdb_test.go +++ b/tests/e2e/initdb_test.go @@ -138,7 +138,7 @@ var _ = Describe("InitDB settings", Label(tests.LabelSmoke, tests.LabelBasic), f Namespace: namespace, PodName: primary.Name, }, "postgres", - "select datcollate from pg_database where datname='template0'") + "select datcollate from pg_catalog.pg_database where datname='template0'") Expect(err).ToNot(HaveOccurred()) Expect(stdout, err).To(Equal("C\n")) }) @@ -171,7 +171,7 @@ var _ = Describe("InitDB settings", Label(tests.LabelSmoke, tests.LabelBasic), f Namespace: namespace, PodName: primary.Name, }, "postgres", - "select datcollate from pg_database where datname='template0'") + "select datcollate from pg_catalog.pg_database where datname='template0'") Expect(err).ToNot(HaveOccurred()) Expect(stdout, err).To(Equal("en_US.utf8\n")) }) diff --git a/tests/e2e/managed_roles_test.go b/tests/e2e/managed_roles_test.go index 9f431f330d..b1e671fd8f 100644 --- a/tests/e2e/managed_roles_test.go +++ b/tests/e2e/managed_roles_test.go @@ -92,8 +92,8 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic query := `SELECT mem.inroles FROM pg_catalog.pg_authid as auth LEFT JOIN ( - SELECT string_agg(pg_get_userbyid(roleid), ',') as inroles, member - FROM pg_auth_members GROUP BY member + SELECT string_agg(pg_catalog.pg_get_userbyid(roleid), ',') as inroles, member + FROM pg_catalog.pg_auth_members GROUP BY member ) mem ON member = oid WHERE rolname =` + pq.QuoteLiteral(roleName) stdout, _, err := exec.QueryInInstancePod( @@ -155,12 +155,12 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic Eventually(QueryMatchExpectationPredicate(primaryPod, postgres.PostgresDBName, roleExistsQuery(unrealizableUser), "f"), 30).Should(Succeed()) - query := fmt.Sprintf("SELECT true FROM pg_roles WHERE rolname='%s' and rolcanlogin=%v and rolsuper=%v "+ + query := fmt.Sprintf("SELECT true FROM pg_catalog.pg_roles WHERE rolname='%s' and rolcanlogin=%v and rolsuper=%v "+ "and rolcreatedb=%v and rolcreaterole=%v and rolinherit=%v and rolreplication=%v "+ "and rolbypassrls=%v and rolconnlimit=%v", username, rolCanLoginInSpec, rolSuperInSpec, rolCreateDBInSpec, rolCreateRoleInSpec, rolInheritInSpec, rolReplicationInSpec, rolByPassRLSInSpec, rolConnLimitInSpec) - query2 := fmt.Sprintf("SELECT rolvaliduntil is NULL FROM pg_roles WHERE rolname='%s'", + query2 := fmt.Sprintf("SELECT rolvaliduntil is NULL FROM pg_catalog.pg_roles WHERE rolname='%s'", userWithPerpetualPass) for _, q := range []string{query, query2} { @@ -191,7 +191,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic roleExistsQuery(appUsername), "t"), 30).Should(Succeed()) query := fmt.Sprintf("SELECT rolcreatedb and rolvaliduntil='infinity' "+ - "FROM pg_roles WHERE rolname='%s'", appUsername) + "FROM pg_catalog.pg_roles WHERE rolname='%s'", appUsername) assertRoleStatus(namespace, clusterName, query, "t") }) @@ -250,7 +250,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic }) By("Verify the role has been updated in the database", func() { - query := fmt.Sprintf("SELECT 1 FROM pg_roles WHERE rolname='%s' and rolcanlogin=%v "+ + query := fmt.Sprintf("SELECT 1 FROM pg_catalog.pg_roles WHERE rolname='%s' and rolcanlogin=%v "+ "and rolcreatedb=%v and rolcreaterole=%v and rolconnlimit=%v", username, expectedLogin, expectedCreateDB, expectedCreateRole, expectedConnLmt) assertRoleStatus(namespace, clusterName, query, "1") @@ -283,7 +283,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic By("verifying Login is now enabled", func() { expectedLogin = true - query := fmt.Sprintf("SELECT 1 FROM pg_roles WHERE rolname='%s' and rolcanlogin=%v "+ + query := fmt.Sprintf("SELECT 1 FROM pg_catalog.pg_roles WHERE rolname='%s' and rolcanlogin=%v "+ "and rolcreatedb=%v and rolcreaterole=%v and rolconnlimit=%v", username, expectedLogin, expectedCreateDB, expectedCreateRole, expectedConnLmt) assertRoleStatus(namespace, clusterName, query, "1") @@ -321,7 +321,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic }) By("Verify new_role exists with all attribute default", func() { - query := fmt.Sprintf("SELECT 1 FROM pg_roles WHERE rolname='%s' and rolcanlogin=%v and rolsuper=%v "+ + query := fmt.Sprintf("SELECT 1 FROM pg_catalog.pg_roles WHERE rolname='%s' and rolcanlogin=%v and rolsuper=%v "+ "and rolcreatedb=%v and rolcreaterole=%v and rolinherit=%v and rolreplication=%v "+ "and rolbypassrls=%v and rolconnlimit=%v", newUserName, defaultRolCanLogin, defaultRolSuper, defaultRolCreateDB, diff --git a/tests/e2e/publication_subscription_test.go b/tests/e2e/publication_subscription_test.go index ae1910fa29..e2537d5690 100644 --- a/tests/e2e/publication_subscription_test.go +++ b/tests/e2e/publication_subscription_test.go @@ -350,9 +350,9 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelPublicationSub }) func publicationExistsQuery(pubName string) string { - return fmt.Sprintf("SELECT EXISTS(SELECT 1 FROM pg_publication WHERE pubname='%s')", pubName) + return fmt.Sprintf("SELECT EXISTS(SELECT 1 FROM pg_catalog.pg_publication WHERE pubname='%s')", pubName) } func subscriptionExistsQuery(subName string) string { - return fmt.Sprintf("SELECT EXISTS(SELECT 1 FROM pg_subscription WHERE subname='%s')", subName) + return fmt.Sprintf("SELECT EXISTS(SELECT 1 FROM pg_catalog.pg_subscription WHERE subname='%s')", subName) } diff --git a/tests/e2e/replica_mode_cluster_test.go b/tests/e2e/replica_mode_cluster_test.go index dd38fd09a0..88d606b0df 100644 --- a/tests/e2e/replica_mode_cluster_test.go +++ b/tests/e2e/replica_mode_cluster_test.go @@ -550,7 +550,7 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f env.Ctx, env.Client, env.Interface, env.RestClientConfig, exec.PodLocator{Namespace: namespace, PodName: primary.Name}, "postgres", - "SELECT timeline_id FROM pg_control_checkpoint();", + "SELECT timeline_id FROM pg_catalog.pg_control_checkpoint()", ) g.Expect(err).ToNot(HaveOccurred()) g.Expect(strings.TrimSpace(stdout)).To(Equal(fmt.Sprintf("%d", expectedTimeline))) @@ -725,7 +725,7 @@ var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, f PodName: pod.Name, }, postgres.PostgresDBName, - "select pg_is_in_recovery();") + "select pg_catalog.pg_is_in_recovery()") g.Expect(err).ToNot(HaveOccurred()) g.Expect(strings.Trim(stdOut, "\n")).To(Equal("t")) }, 60, 10).Should(Succeed()) @@ -799,7 +799,7 @@ func assertReplicaClusterTopology(namespace, clusterName string) { }, &commandTimeout, "psql", "-U", "postgres", "-tAc", - "select string_agg(application_name, ',') from pg_stat_replication;", + "select string_agg(application_name, ',') from pg_catalog.pg_stat_replication;", ) if err != nil { return nil, err @@ -844,7 +844,7 @@ func assertReplicaClusterTopology(namespace, clusterName string) { }, &commandTimeout, "psql", "-U", "postgres", "-tAc", - "select sender_host from pg_stat_wal_receiver limit 1;", + "select sender_host from pg_catalog.pg_stat_wal_receiver limit 1", ) g.Expect(err).ToNot(HaveOccurred()) g.Expect(strings.TrimSpace(stdout)).To(BeEquivalentTo(sourceHost)) diff --git a/tests/e2e/replication_slot_test.go b/tests/e2e/replication_slot_test.go index e832a1fada..73c4ba87d3 100644 --- a/tests/e2e/replication_slot_test.go +++ b/tests/e2e/replication_slot_test.go @@ -117,7 +117,7 @@ var _ = Describe("Replication Slot", Label(tests.LabelReplication), func() { primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - query := fmt.Sprintf("SELECT pg_create_physical_replication_slot('%s');", userPhysicalSlot) + query := fmt.Sprintf("SELECT pg_catalog.pg_create_physical_replication_slot('%s')", userPhysicalSlot) _, _, err = exec.QueryInInstancePod( env.Ctx, env.Client, env.Interface, env.RestClientConfig, exec.PodLocator{ diff --git a/tests/e2e/syncreplicas_test.go b/tests/e2e/syncreplicas_test.go index c6bc7dc3ef..9731ae3f73 100644 --- a/tests/e2e/syncreplicas_test.go +++ b/tests/e2e/syncreplicas_test.go @@ -57,7 +57,7 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { PodName: primaryPod.GetName(), }, "postgres", - fmt.Sprintf("SELECT count(*) from pg_stat_replication WHERE sync_state = '%s'", syncState)) + fmt.Sprintf("SELECT count(*) from pg_catalog.pg_stat_replication WHERE sync_state = '%s'", syncState)) Expect(stdErr).To(BeEmpty()) Expect(err).ShouldNot(HaveOccurred()) @@ -78,7 +78,7 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { PodName: primaryPod.GetName(), }, "postgres", - "select setting from pg_settings where name = 'synchronous_standby_names'") + "select setting from pg_catalog.pg_settings where name = 'synchronous_standby_names'") Expect(stdErr).To(BeEmpty()) Expect(err).ShouldNot(HaveOccurred()) diff --git a/tests/e2e/tablespaces_test.go b/tests/e2e/tablespaces_test.go index 53c1dc3e62..d3e6314bc6 100644 --- a/tests/e2e/tablespaces_test.go +++ b/tests/e2e/tablespaces_test.go @@ -1057,7 +1057,7 @@ func AssertDatabaseContainsTablespaces(cluster *apiv1.Cluster, timeout int) { Namespace: namespace, PodName: instance.Name, }, postgres.AppDBName, - "SELECT oid, spcname, pg_get_userbyid(spcowner) FROM pg_tablespace;", + "SELECT oid, spcname, pg_catalog.pg_get_userbyid(spcowner) FROM pg_catalog.pg_tablespace", ) g.Expect(stdErr).To(BeEmpty()) g.Expect(err).ShouldNot(HaveOccurred()) @@ -1113,8 +1113,8 @@ func AssertTempTablespaceBehavior(cluster *apiv1.Cluster, expectedTempTablespace PodName: primary.Name, }, postgres.AppDBName, "CREATE TEMPORARY TABLE cnp_e2e_test_table (i INTEGER); "+ - "SELECT spcname FROM pg_tablespace WHERE OID="+ - "(SELECT reltablespace FROM pg_class WHERE oid = 'cnp_e2e_test_table'::regclass)", + "SELECT spcname FROM pg_catalog.pg_tablespace WHERE OID="+ + "(SELECT reltablespace FROM pg_catalog.pg_class WHERE oid = 'cnp_e2e_test_table'::regclass)", ) Expect(stdErr).To(BeEmpty()) Expect(err).ShouldNot(HaveOccurred()) @@ -1135,7 +1135,8 @@ func AssertTablespaceAndOwnerExist(cluster *apiv1.Cluster, tablespace, owner str Namespace: namespace, PodName: primaryPod.Name, }, postgres.AppDBName, - fmt.Sprintf("SELECT 1 FROM pg_tablespace WHERE spcname = '%s' AND pg_get_userbyid(spcowner) = '%s';", + fmt.Sprintf("SELECT 1 FROM pg_catalog.pg_tablespace WHERE spcname = '%s' "+ + "AND pg_catalog.pg_get_userbyid(spcowner) = '%s'", tablespace, owner), ) diff --git a/tests/e2e/update_user_test.go b/tests/e2e/update_user_test.go index 44d4784f70..424e6adf66 100644 --- a/tests/e2e/update_user_test.go +++ b/tests/e2e/update_user_test.go @@ -164,7 +164,7 @@ var _ = Describe("Enable superuser password", Label(tests.LabelServiceConnectivi g.Expect(apierrors.IsNotFound(err)).To(BeTrue()) }, 200).Should(Succeed()) - query := "SELECT rolpassword IS NULL FROM pg_authid WHERE rolname='postgres'" + query := "SELECT rolpassword IS NULL FROM pg_catalog.pg_authid WHERE rolname='postgres'" // We should have the `postgres` user with a null password Eventually(func() string { stdout, _, err := exec.QueryInInstancePod( diff --git a/tests/e2e/upgrade_test.go b/tests/e2e/upgrade_test.go index c96704c38c..aceaaa27de 100644 --- a/tests/e2e/upgrade_test.go +++ b/tests/e2e/upgrade_test.go @@ -726,7 +726,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O PodName: primary, }, exec.DatabaseName(databaseName), - "SELECT count(*) FROM pg_stat_replication") + "SELECT count(*) FROM pg_catalog.pg_stat_replication") return strings.Trim(out, "\n"), err }, 180).Should(BeEquivalentTo("2")) }) diff --git a/tests/utils/postgres/postgres.go b/tests/utils/postgres/postgres.go index 5db3eb088d..e06de1e5b6 100644 --- a/tests/utils/postgres/postgres.go +++ b/tests/utils/postgres/postgres.go @@ -59,7 +59,7 @@ func CountReplicas( pod *corev1.Pod, retryTimeout int, ) (int, error) { - query := "SELECT count(*) FROM pg_stat_replication" + query := "SELECT count(*) FROM pg_catalog.pg_stat_replication" stdOut, _, err := exec.EventuallyExecQueryInInstancePod( ctx, crudClient, kubeInterface, restConfig, exec.PodLocator{ diff --git a/tests/utils/replicationslot/replication_slots.go b/tests/utils/replicationslot/replication_slots.go index 6268e27eb9..f1913ffdb1 100644 --- a/tests/utils/replicationslot/replication_slots.go +++ b/tests/utils/replicationslot/replication_slots.go @@ -66,7 +66,7 @@ func PrintReplicationSlots( } m := make(map[string]string) for _, slot := range slots { - query := fmt.Sprintf("SELECT restart_lsn FROM pg_replication_slots WHERE slot_name = '%v'", slot) + query := fmt.Sprintf("SELECT restart_lsn FROM pg_catalog.pg_replication_slots WHERE slot_name = '%v'", slot) restartLsn, _, err := exec.QueryInInstancePod( ctx, crudClient, kubeInterface, restConfig, exec.PodLocator{ @@ -146,7 +146,7 @@ func GetReplicationSlotsOnPod( return nil, err } - query := "SELECT slot_name FROM pg_replication_slots WHERE temporary = 'f' AND slot_type = 'physical'" + query := "SELECT slot_name FROM pg_catalog.pg_replication_slots WHERE temporary = 'f' AND slot_type = 'physical'" stdout, _, err := exec.QueryInInstancePod( ctx, crudClient, kubeInterface, restConfig, exec.PodLocator{ @@ -185,7 +185,7 @@ func GetReplicationSlotLsnsOnPod( lsnList := make([]string, 0, len(slots)) for _, slot := range slots { - query := fmt.Sprintf("SELECT restart_lsn FROM pg_replication_slots WHERE slot_name = '%v'", + query := fmt.Sprintf("SELECT restart_lsn FROM pg_catalog.pg_replication_slots WHERE slot_name = '%v'", slot) restartLsn, _, err := exec.QueryInInstancePod( ctx, crudClient, kubeInterface, restConfig, From fd7b6527c02c95b5600e2d5e48feec5f843b911a Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Wed, 19 Feb 2025 15:52:55 +0100 Subject: [PATCH 363/836] fix: improve efficiency of replication-sensitive parameter updates (#6440) When decreasing values of replication-sensitive parameters, such as `max_connections`, the reconciliation process could experience delays due to inefficient update propagation. In cases where these parameters are reduced using an unsupervised method, the instance manager does not trigger a reconciliation loop after the primary server restarts, leading to potential delays in applying the new configuration. This patch resolves the issue by ensuring that the status is updated with a known phase and reason, allowing the operator to promptly detect and synchronize the changes. Closes #6409 Signed-off-by: Leonardo Cecchi Signed-off-by: Armando Ruocco Signed-off-by: Gabriele Quaresima Signed-off-by: Jaime Silvela Co-authored-by: Armando Ruocco Co-authored-by: Gabriele Quaresima Co-authored-by: Jaime Silvela --- api/v1/cluster_funcs.go | 4 +-- .../controller/instance_controller.go | 31 ++++++++++++++++--- 2 files changed, 28 insertions(+), 7 deletions(-) diff --git a/api/v1/cluster_funcs.go b/api/v1/cluster_funcs.go index f8315bd7f0..c21acfe3c9 100644 --- a/api/v1/cluster_funcs.go +++ b/api/v1/cluster_funcs.go @@ -682,8 +682,8 @@ func (cluster *Cluster) GetSmartShutdownTimeout() int32 { // GetRestartTimeout is used to have a timeout for operations that involve // a restart of a PostgreSQL instance -func (cluster *Cluster) GetRestartTimeout() int32 { - return cluster.GetMaxStopDelay() + cluster.GetMaxStartDelay() +func (cluster *Cluster) GetRestartTimeout() time.Duration { + return time.Duration(cluster.GetMaxStopDelay()+cluster.GetMaxStartDelay()) * time.Second } // GetMaxSwitchoverDelay get the amount of time PostgreSQL has to stop before switchover diff --git a/internal/management/controller/instance_controller.go b/internal/management/controller/instance_controller.go index 19af5b93c4..c1386796c2 100644 --- a/internal/management/controller/instance_controller.go +++ b/internal/management/controller/instance_controller.go @@ -318,11 +318,10 @@ func (r *InstanceReconciler) restartPrimaryInplaceIfRequested( if cluster.Status.CurrentPrimary != cluster.Status.TargetPrimary { return false, fmt.Errorf("cannot restart the primary in-place when a switchover is in progress") } - restartTimeout := cluster.GetRestartTimeout() if err := r.instance.RequestAndWaitRestartSmartFast( ctx, - time.Duration(restartTimeout)*time.Second, + cluster.GetRestartTimeout(), ); err != nil { return true, err } @@ -1042,9 +1041,7 @@ func (r *InstanceReconciler) processConfigReloadAndManageRestart(ctx context.Con phaseReason := "PostgreSQL configuration changed" if status.IsPrimary && status.PendingRestartForDecrease { if cluster.GetPrimaryUpdateStrategy() == apiv1.PrimaryUpdateStrategyUnsupervised { - contextLogger.Info("Restarting primary in-place due to hot standby sensible parameters decrease") - restartTimeout := time.Duration(cluster.GetRestartTimeout()) * time.Second - return r.Instance().RequestAndWaitRestartSmartFast(ctx, restartTimeout) + return r.triggerRestartForDecrease(ctx, cluster) } reason := "decrease of hot standby sensitive parameters" contextLogger.Info("Waiting for the user to request a restart of the primary instance or a switchover "+ @@ -1074,6 +1071,30 @@ func (r *InstanceReconciler) processConfigReloadAndManageRestart(ctx context.Con ) } +// triggerRestartForDecrease triggers an in-place restart and then asks +// the operator to continue with the reconciliation. This is needed to +// apply a change in replica-sensitive parameters that need to be done +// on the primary node and, after that, to the replicas +func (r *InstanceReconciler) triggerRestartForDecrease(ctx context.Context, cluster *apiv1.Cluster) error { + contextLogger := log.FromContext(ctx) + + contextLogger.Info("Restarting primary in-place due to hot standby sensible parameters decrease") + if err := r.Instance().RequestAndWaitRestartSmartFast(ctx, cluster.GetRestartTimeout()); err != nil { + return err + } + + phase := apiv1.PhaseApplyingConfiguration + phaseReason := "Decrease of hot standby sensitive parameters" + + return clusterstatus.PatchWithOptimisticLock( + ctx, + r.client, + cluster, + clusterstatus.SetPhaseTX(phase, phaseReason), + clusterstatus.SetClusterReadyConditionTX, + ) +} + // refreshCertificateFilesFromSecret receive a secret and rewrite the file // corresponding to the server certificate func (r *InstanceReconciler) refreshInstanceCertificateFromSecret( From 36cb5188264d14509e14c6d583d111606b8bebdc Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Wed, 19 Feb 2025 17:39:31 +0100 Subject: [PATCH 364/836] refactor: simplify configparser by removing redundant logic (#6739) This patch refactors the configparser to remove redundant logic and improve code clarity. It eliminates unnecessary indirections. No functional changes are introduced, ensuring compatibility with existing behavior. Closes #6739 Signed-off-by: Armando Ruocco Signed-off-by: Marco Nenciarini Co-authored-by: Marco Nenciarini --- internal/configuration/configuration.go | 2 +- pkg/configparser/configparser.go | 5 ++- pkg/configparser/configparser_test.go | 53 ++++++++----------------- pkg/configparser/environment.go | 34 ---------------- 4 files changed, 21 insertions(+), 73 deletions(-) delete mode 100644 pkg/configparser/environment.go diff --git a/internal/configuration/configuration.go b/internal/configuration/configuration.go index a70989aa55..bd0be9355a 100644 --- a/internal/configuration/configuration.go +++ b/internal/configuration/configuration.go @@ -153,7 +153,7 @@ func NewConfiguration() *Data { // ReadConfigMap reads the configuration from the environment and the passed in data map func (config *Data) ReadConfigMap(data map[string]string) { - configparser.ReadConfigMap(config, newDefaultConfig(), data, configparser.OsEnvironment{}) + configparser.ReadConfigMap(config, newDefaultConfig(), data) } // IsAnnotationInherited checks if an annotation with a certain name should diff --git a/pkg/configparser/configparser.go b/pkg/configparser/configparser.go index 40c2a8ed14..e9b9580323 100644 --- a/pkg/configparser/configparser.go +++ b/pkg/configparser/configparser.go @@ -49,6 +49,7 @@ package configparser import ( "fmt" + "os" "reflect" "strconv" "strings" @@ -60,7 +61,7 @@ var configparserLog = log.WithName("configparser") // ReadConfigMap reads the configuration from the environment and the passed in data map. // Config and defaults are supposed to be pointers to structs of the same type -func ReadConfigMap(target interface{}, defaults interface{}, data map[string]string, env EnvironmentSource) { +func ReadConfigMap(target interface{}, defaults interface{}, data map[string]string) { ensurePointerToCompatibleStruct("target", target, "default", defaults) count := reflect.TypeOf(defaults).Elem().NumField() @@ -98,7 +99,7 @@ func ReadConfigMap(target interface{}, defaults interface{}, data map[string]str value = valueField.String() } // If the key is present in the environment, use its value - if envValue := env.Getenv(envName); envValue != "" { + if envValue := os.Getenv(envName); envValue != "" { value = envValue } // If the key is present in the passed data, use its value diff --git a/pkg/configparser/configparser_test.go b/pkg/configparser/configparser_test.go index a58b8852c8..7c5193aae7 100644 --- a/pkg/configparser/configparser_test.go +++ b/pkg/configparser/configparser_test.go @@ -52,8 +52,8 @@ var defaultInheritedAnnotations = []string{ const oneNamespace = "one-namespace" // readConfigMap reads the configuration from the environment and the passed in data map -func (config *FakeData) readConfigMap(data map[string]string, env EnvironmentSource) { - ReadConfigMap(config, &FakeData{InheritedAnnotations: defaultInheritedAnnotations}, data, env) +func (config *FakeData) readConfigMap(data map[string]string) { + ReadConfigMap(config, &FakeData{InheritedAnnotations: defaultInheritedAnnotations}, data) } var _ = Describe("Data test suite", func() { @@ -64,11 +64,14 @@ var _ = Describe("Data test suite", func() { It("loads values from a map", func() { config := &FakeData{} + GinkgoT().Setenv("WATCH_NAMESPACE", "") + GinkgoT().Setenv("INHERITED_ANNOTATIONS", "") + GinkgoT().Setenv("INHERITED_LABELS", "") config.readConfigMap(map[string]string{ "WATCH_NAMESPACE": oneNamespace, "INHERITED_ANNOTATIONS": "one, two", "INHERITED_LABELS": "alpha, beta", - }, NewFakeEnvironment(nil)) + }) Expect(config.WatchNamespace).To(Equal(oneNamespace)) Expect(config.InheritedAnnotations).To(Equal([]string{"one", "two"})) Expect(config.InheritedLabels).To(Equal([]string{"alpha", "beta"})) @@ -76,13 +79,11 @@ var _ = Describe("Data test suite", func() { It("loads values from environment", func() { config := &FakeData{} - fakeEnv := NewFakeEnvironment(map[string]string{ - "WATCH_NAMESPACE": oneNamespace, - "INHERITED_ANNOTATIONS": "one, two", - "INHERITED_LABELS": "alpha, beta", - "EXPIRING_CHECK_THRESHOLD": "2", - }) - config.readConfigMap(nil, fakeEnv) + GinkgoT().Setenv("WATCH_NAMESPACE", oneNamespace) + GinkgoT().Setenv("INHERITED_ANNOTATIONS", "one, two") + GinkgoT().Setenv("INHERITED_LABELS", "alpha, beta") + GinkgoT().Setenv("EXPIRING_CHECK_THRESHOLD", "2") + config.readConfigMap(nil) Expect(config.WatchNamespace).To(Equal(oneNamespace)) Expect(config.InheritedAnnotations).To(Equal([]string{"one", "two"})) Expect(config.InheritedLabels).To(Equal([]string{"alpha", "beta"})) @@ -94,43 +95,23 @@ var _ = Describe("Data test suite", func() { CertificateDuration: 90, ExpiringCheckThreshold: 7, } - fakeEnv := NewFakeEnvironment(map[string]string{ - "EXPIRING_CHECK_THRESHOLD": "3600min", - "CERTIFICATE_DURATION": "unknown", - }) + GinkgoT().Setenv("EXPIRING_CHECK_THRESHOLD", "3600min") + GinkgoT().Setenv("CERTIFICATE_DURATION", "unknown") defaultData := &FakeData{ CertificateDuration: 90, ExpiringCheckThreshold: 7, } - ReadConfigMap(config, defaultData, nil, fakeEnv) + ReadConfigMap(config, defaultData, nil) Expect(config.ExpiringCheckThreshold).To(Equal(7)) Expect(config.CertificateDuration).To(Equal(90)) }) It("handles correctly default values of slices", func() { + GinkgoT().Setenv("INHERITED_ANNOTATIONS", "") + GinkgoT().Setenv("INHERITED_LABELS", "") config := &FakeData{} - config.readConfigMap(nil, NewFakeEnvironment(nil)) + config.readConfigMap(nil) Expect(config.InheritedAnnotations).To(Equal(defaultInheritedAnnotations)) Expect(config.InheritedLabels).To(BeNil()) }) }) - -// FakeEnvironment is an EnvironmentSource that fetches data from an internal map -type FakeEnvironment struct { - values map[string]string -} - -// NewFakeEnvironment creates a FakeEnvironment with the specified data inside -func NewFakeEnvironment(data map[string]string) FakeEnvironment { - f := FakeEnvironment{} - if data == nil { - data = make(map[string]string) - } - f.values = data - return f -} - -// Getenv retrieves the value of the environment variable named by the key -func (f FakeEnvironment) Getenv(key string) string { - return f.values[key] -} diff --git a/pkg/configparser/environment.go b/pkg/configparser/environment.go deleted file mode 100644 index 4c89740ce7..0000000000 --- a/pkg/configparser/environment.go +++ /dev/null @@ -1,34 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package configparser - -import ( - "os" -) - -// EnvironmentSource is an interface to identify an environment values source. -type EnvironmentSource interface { - Getenv(key string) string -} - -// OsEnvironment is an EnvironmentSource that fetch data from the OS environment. -type OsEnvironment struct{} - -// Getenv retrieves the value of the environment variable named by the key. -func (OsEnvironment) Getenv(key string) string { - return os.Getenv(key) -} From fee1bc55466a4e052e38fcc6c6c04a2566daabf5 Mon Sep 17 00:00:00 2001 From: Pierrick <139142330+pchovelon@users.noreply.github.com> Date: Wed, 19 Feb 2025 18:15:09 +0100 Subject: [PATCH 365/836] feat(plugin): add `--ttl` option to `pgbench` command (#6701) This patch introduces the `--ttl` option to the `pgbench` plugin command, allowing users to configure automatic cleanup of completed jobs after a specified duration. By default, jobs do not expire, preserving existing behavior. Closes #4374 Signed-off-by: Pierrick Signed-off-by: Leonardo Cecchi Co-authored-by: Leonardo Cecchi --- internal/cmd/plugin/pgbench/cmd.go | 7 +++++++ internal/cmd/plugin/pgbench/cmd_test.go | 4 +++- internal/cmd/plugin/pgbench/pgbench.go | 28 ++++++++++++++++++------- 3 files changed, 30 insertions(+), 9 deletions(-) diff --git a/internal/cmd/plugin/pgbench/cmd.go b/internal/cmd/plugin/pgbench/cmd.go index 11260d37d1..2e697b04fe 100644 --- a/internal/cmd/plugin/pgbench/cmd.go +++ b/internal/cmd/plugin/pgbench/cmd.go @@ -64,6 +64,13 @@ func NewCmd() *cobra.Command { "The name of the database that will be used by pgbench. Defaults to: app", ) + pgBenchCmd.Flags().Int32Var( + &run.ttlSecondsAfterFinished, + "ttl", + 0, + "Time to live of the pgbench job. Defaults to no TTL.", + ) + pgBenchCmd.Flags().BoolVar( &run.dryRun, "dry-run", diff --git a/internal/cmd/plugin/pgbench/cmd_test.go b/internal/cmd/plugin/pgbench/cmd_test.go index b1e539d24f..d75d1e383b 100644 --- a/internal/cmd/plugin/pgbench/cmd_test.go +++ b/internal/cmd/plugin/pgbench/cmd_test.go @@ -62,7 +62,7 @@ var _ = Describe("NewCmd", func() { testRun.dbName, _ = cmd.Flags().GetString("db-name") testRun.dryRun, _ = cmd.Flags().GetBool("dry-run") testRun.nodeSelector, _ = cmd.Flags().GetStringSlice("node-selector") - + testRun.ttlSecondsAfterFinished, _ = cmd.Flags().GetInt32("ttl") testRun.clusterName = args[0] testRun.pgBenchCommandArgs = args[1:] return nil @@ -75,6 +75,7 @@ var _ = Describe("NewCmd", func() { "--db-name=mydb", "--dry-run=true", "--node-selector=label=value", + "--ttl=86400", "arg1", "arg2", } @@ -91,6 +92,7 @@ var _ = Describe("NewCmd", func() { Expect(testRun.dbName).To(Equal("mydb")) Expect(testRun.dryRun).To(BeTrue()) Expect(testRun.nodeSelector).To(Equal([]string{"label=value"})) + Expect(testRun.ttlSecondsAfterFinished).To(Equal(int32(86400))) Expect(testRun.pgBenchCommandArgs).To(Equal([]string{"arg1", "arg2"})) }) }) diff --git a/internal/cmd/plugin/pgbench/pgbench.go b/internal/cmd/plugin/pgbench/pgbench.go index 0887374219..22ccbc7bf3 100644 --- a/internal/cmd/plugin/pgbench/pgbench.go +++ b/internal/cmd/plugin/pgbench/pgbench.go @@ -34,12 +34,13 @@ import ( ) type pgBenchRun struct { - jobName string - clusterName string - dbName string - nodeSelector []string - pgBenchCommandArgs []string - dryRun bool + jobName string + clusterName string + dbName string + nodeSelector []string + pgBenchCommandArgs []string + dryRun bool + ttlSecondsAfterFinished int32 } const ( @@ -59,6 +60,10 @@ var jobExample = ` # Create a job with given values and [cluster] "cluster-example" kubectl-cnpg pgbench cluster-example --db-name pgbenchDBName --job-name job-name -- \ + --time 30 --client 1 --jobs 1 + + # Create a job with given values on[cluster] "cluster-example". The job will be cleaned after 10 minutes. + kubectl-cnpg pgbench cluster-example --db-name pgbenchDBName --job-name job-name --ttl 600 -- \ --time 30 --client 1 --jobs 1` func (cmd *pgBenchRun) execute(ctx context.Context) error { @@ -124,8 +129,9 @@ func (cmd *pgBenchRun) buildJob(cluster *apiv1.Cluster) *batchv1.Job { labels := map[string]string{ "pgBenchJob": cluster.Name, } - return &batchv1.Job{ - // To ensure we have manifest with Kind and APi in --dry-run + + result := &batchv1.Job{ + // To ensure we have manifest with Kind and API in --dry-run TypeMeta: metav1.TypeMeta{ APIVersion: "batch/v1", Kind: "Job", @@ -158,6 +164,12 @@ func (cmd *pgBenchRun) buildJob(cluster *apiv1.Cluster) *batchv1.Job { }, }, } + + if cmd.ttlSecondsAfterFinished != 0 { + result.Spec.TTLSecondsAfterFinished = &cmd.ttlSecondsAfterFinished + } + + return result } func (cmd *pgBenchRun) buildEnvVariables() []corev1.EnvVar { From 50f0980b3dab83132c081303178e8dee565d9218 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Wed, 19 Feb 2025 18:47:18 +0100 Subject: [PATCH 366/836] fix: prevent leaking connection when snapshot backup fail (#6879) The instance manager allocates a PostgreSQL connection every time a volume snapshot backup need to be taken and then runs `pg_backup_start` on it. That connection will stay up until the backup is done. If the snapshotting process fails, the backup will be marked as failed but the connection won't be closed. This patch ensures that a successive backup is able to proceed correctly by closing the previous connection. A successive commit will allow the operator to close that stale commit even when there's no successive backup. Partially closes #6761 Signed-off-by: Leonardo Cecchi Signed-off-by: Armando Ruocco Co-authored-by: Armando Ruocco --- pkg/management/postgres/webserver/backup_connection.go | 7 +++++++ pkg/management/postgres/webserver/remote.go | 5 ++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/pkg/management/postgres/webserver/backup_connection.go b/pkg/management/postgres/webserver/backup_connection.go index bd65173ba5..0da0096bcd 100644 --- a/pkg/management/postgres/webserver/backup_connection.go +++ b/pkg/management/postgres/webserver/backup_connection.go @@ -85,6 +85,13 @@ func (bc *backupConnection) closeConnection(backupName string) error { return bc.conn.Close() } +func (bc *backupConnection) forceCloseConnection() error { + bc.sync.Lock() + defer bc.sync.Unlock() + + return bc.conn.Close() +} + func (bc *backupConnection) executeWithLock(backupName string, cb func() error) { bc.sync.Lock() defer bc.sync.Unlock() diff --git a/pkg/management/postgres/webserver/remote.go b/pkg/management/postgres/webserver/remote.go index 95eab40892..a4b5fb3cf1 100644 --- a/pkg/management/postgres/webserver/remote.go +++ b/pkg/management/postgres/webserver/remote.go @@ -277,7 +277,10 @@ func (ws *remoteWebserverEndpoints) backup(w http.ResponseWriter, req *http.Requ sendUnprocessableEntityJSONResponse(w, "PROCESS_ALREADY_RUNNING", "") return } - if err := ws.currentBackup.closeConnection(p.BackupName); err != nil { + log.Info("trying to close the current backup connection", + "backupName", ws.currentBackup.data.BackupName, + ) + if err := ws.currentBackup.forceCloseConnection(); err != nil { if !errors.Is(err, sql.ErrConnDone) { log.Error(err, "Error while closing backup connection (start)") } From 9f3eddf9bef2580b32285d04ca90b6e84580c589 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Wed, 19 Feb 2025 19:06:44 +0100 Subject: [PATCH 367/836] fix(backup): clean unused backup connections (#6882) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit An error while taking a snapshot backup could leave an unused connection open until the next backup. This patch ensures that backup connections are explicitly closed when no longer needed, preventing potential connection leaks. Partially closes #6761 Signed-off-by: Armando Ruocco Signed-off-by: Leonardo Cecchi Signed-off-by: Niccolò Fei Co-authored-by: Leonardo Cecchi Co-authored-by: Niccolò Fei --- pkg/management/postgres/webserver/remote.go | 81 ++++++++++++++++++- .../postgres/webserver/webserver.go | 10 ++- tests/e2e/volume_snapshot_test.go | 76 +++++++++++++++++ 3 files changed, 165 insertions(+), 2 deletions(-) diff --git a/pkg/management/postgres/webserver/remote.go b/pkg/management/postgres/webserver/remote.go index a4b5fb3cf1..dad7df94c9 100644 --- a/pkg/management/postgres/webserver/remote.go +++ b/pkg/management/postgres/webserver/remote.go @@ -27,10 +27,12 @@ import ( "os" "os/exec" "path" + "time" "github.com/cloudnative-pg/machinery/pkg/execlog" "github.com/cloudnative-pg/machinery/pkg/fileutils" "github.com/cloudnative-pg/machinery/pkg/log" + apierrs "k8s.io/apimachinery/pkg/api/errors" "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" @@ -111,7 +113,84 @@ func NewRemoteWebServer( } } - return NewWebServer(server), nil + srv := NewWebServer(server) + + srv.routines = append(srv.routines, endpoints.cleanupStaleCollections) + + return srv, nil +} + +func (ws *remoteWebserverEndpoints) cleanupStaleCollections(ctx context.Context) { + closeBackupConnection := func(bc *backupConnection) { + log := log.WithValues( + "backupName", bc.data.BackupName, + "phase", bc.data.Phase, + ) + log.Warning("Closing stale PostgreSQL backup connection") + + if err := bc.conn.Close(); err != nil { + log.Error(err, "Error while closing stale PostgreSQL backup connection") + } + bc.data.Phase = Completed + } + + innerRoutine := func() { + if ws == nil { + return + } + bc := ws.currentBackup + if bc == nil || bc.conn == nil { + return + } + + if bc.data.Phase == Completed || bc.data.BackupName == "" { + return + } + + bc.sync.Lock() + defer bc.sync.Unlock() + + if bc.err != nil { + closeBackupConnection(bc) + return + } + + if err := bc.conn.PingContext(ctx); err != nil { + bc.err = fmt.Errorf("error while pinging: %w", err) + closeBackupConnection(bc) + return + } + + var backup apiv1.Backup + + err := ws.typedClient.Get(ctx, client.ObjectKey{ + Namespace: ws.instance.GetNamespaceName(), + Name: bc.data.BackupName, + }, &backup) + if apierrs.IsNotFound(err) { + bc.err = fmt.Errorf("backup %s not found", bc.data.BackupName) + closeBackupConnection(bc) + return + } + if err != nil { + return + } + + if backup.Status.IsDone() { + bc.err = fmt.Errorf("backup %s is done", bc.data.BackupName) + closeBackupConnection(bc) + return + } + } + + for { + select { + case <-ctx.Done(): + return + case <-time.After(1 * time.Minute): + innerRoutine() + } + } } func (ws *remoteWebserverEndpoints) isServerHealthy(w http.ResponseWriter, _ *http.Request) { diff --git a/pkg/management/postgres/webserver/webserver.go b/pkg/management/postgres/webserver/webserver.go index 9c6b0b90e3..a358dc5ae6 100644 --- a/pkg/management/postgres/webserver/webserver.go +++ b/pkg/management/postgres/webserver/webserver.go @@ -67,7 +67,8 @@ func (body Response[T]) EnsureDataIsPresent() error { // Webserver wraps a webserver to make it a kubernetes Runnable type Webserver struct { - server *http.Server + server *http.Server + routines []func(ctx context.Context) } // NewWebServer creates a Webserver as a Kubernetes Runnable, given a http.Server @@ -96,6 +97,13 @@ func (ws *Webserver) Start(ctx context.Context) error { } }() + subCtx, cancel := context.WithCancel(ctx) + defer cancel() + + for _, routine := range ws.routines { + routine(subCtx) + } + select { // we exit with error code, potentially we could do a retry logic, but rarely a webserver that doesn't start will run // on subsequent tries diff --git a/tests/e2e/volume_snapshot_test.go b/tests/e2e/volume_snapshot_test.go index fce9351788..2dff661848 100644 --- a/tests/e2e/volume_snapshot_test.go +++ b/tests/e2e/volume_snapshot_test.go @@ -20,12 +20,14 @@ import ( "encoding/json" "fmt" "os" + "strconv" "strings" "time" volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/retry" k8client "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" @@ -33,6 +35,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/tests" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/backups" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/minio" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/secrets" @@ -66,6 +69,18 @@ var _ = Describe("Verify Volume Snapshot", return snapshotList, nil } + updateClusterSnapshotClass := func(namespace, clusterName, className string) { + cluster := &apiv1.Cluster{} + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + var err error + cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + cluster.Spec.Backup.VolumeSnapshot.ClassName = className + return env.Client.Update(env.Ctx, cluster) + }) + Expect(err).ToNot(HaveOccurred()) + } + var namespace string Context("using the kubectl cnpg plugin", Ordered, func() { @@ -842,5 +857,66 @@ var _ = Describe("Verify Volume Snapshot", AssertDataExpectedCount(env, tableLocator, 6) }) }) + + It("should clean up unused backup connections", func() { + By("setting a non-existing snapshotClass", func() { + updateClusterSnapshotClass(namespace, clusterToSnapshotName, "wrongSnapshotClass") + }) + + By("starting a new backup that will fail", func() { + backupName := fmt.Sprintf("%s-failed", clusterToSnapshotName) + failedBackup, err := backups.Create( + env.Ctx, env.Client, + apiv1.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: backupName, + }, + Spec: apiv1.BackupSpec{ + Target: apiv1.BackupTargetPrimary, + Method: apiv1.BackupMethodVolumeSnapshot, + Cluster: apiv1.LocalObjectReference{Name: clusterToSnapshotName}, + }, + }, + ) + Expect(err).ToNot(HaveOccurred()) + + Eventually(func(g Gomega) { + err = env.Client.Get(env.Ctx, types.NamespacedName{ + Namespace: namespace, + Name: backupName, + }, failedBackup) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(failedBackup.Status.Phase).To(BeEquivalentTo(apiv1.BackupPhaseFailed)) + g.Expect(failedBackup.Status.Error).To(ContainSubstring("Failed to get snapshot class")) + }, RetryTimeout).Should(Succeed()) + }) + + By("verifying that the backup connection is cleaned up", func() { + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, + clusterToSnapshotName) + Expect(err).ToNot(HaveOccurred()) + query := "SELECT count(*) FROM pg_stat_activity WHERE query ILIKE '%pg_backup_start%' " + + "AND application_name = 'cnpg-instance-manager'" + + Eventually(func() (int, error, error) { + stdout, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: primaryPod.Namespace, + PodName: primaryPod.Name, + }, + postgres.PostgresDBName, + query) + value, atoiErr := strconv.Atoi(strings.TrimSpace(stdout)) + return value, err, atoiErr + }, RetryTimeout).Should(BeEquivalentTo(0), + "Stale backup connection should have been dropped") + }) + + By("resetting the snapshotClass value", func() { + updateClusterSnapshotClass(namespace, clusterToSnapshotName, os.Getenv("E2E_CSI_STORAGE_CLASS")) + }) + }) }) }) From 8881db1ea9ef81aad5b4b158743e0e488e5f0542 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Thu, 20 Feb 2025 07:46:23 +0100 Subject: [PATCH 368/836] feat: allow using '-r' service for Pooler (#6800) Allowing the `-r` service to be added to the Pooler object let the pooler connect to any of the pods without specifying if it will be a read-only or read-write pod. Signed-off-by: Leonardo Cecchi --- api/v1/pooler_types.go | 5 ++++- config/crd/bases/postgresql.cnpg.io_poolers.yaml | 1 + 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/api/v1/pooler_types.go b/api/v1/pooler_types.go index b3f06fcbf2..e81b08a50b 100644 --- a/api/v1/pooler_types.go +++ b/api/v1/pooler_types.go @@ -25,7 +25,7 @@ import ( // PoolerType is the type of the connection pool, meaning the service // we are targeting. Allowed values are `rw` and `ro`. -// +kubebuilder:validation:Enum=rw;ro +// +kubebuilder:validation:Enum=rw;ro;r type PoolerType string const ( @@ -35,6 +35,9 @@ const ( // PoolerTypeRO means that the pooler involves only the replicas PoolerTypeRO = PoolerType("ro") + // PoolerTypeR means that the pooler involves every instance + PoolerTypeR = PoolerType("r") + // DefaultPgBouncerPoolerAuthQuery is the default auth_query for PgBouncer DefaultPgBouncerPoolerAuthQuery = "SELECT usename, passwd FROM public.user_search($1)" ) diff --git a/config/crd/bases/postgresql.cnpg.io_poolers.yaml b/config/crd/bases/postgresql.cnpg.io_poolers.yaml index 162ba3b2da..bb2c120f1e 100644 --- a/config/crd/bases/postgresql.cnpg.io_poolers.yaml +++ b/config/crd/bases/postgresql.cnpg.io_poolers.yaml @@ -8781,6 +8781,7 @@ spec: enum: - rw - ro + - r type: string required: - cluster From 4751eb78d5d7959d46676cce8fca92b6ede66643 Mon Sep 17 00:00:00 2001 From: sharifmshaker <87284853+sharifmshaker@users.noreply.github.com> Date: Thu, 20 Feb 2025 01:27:40 -0600 Subject: [PATCH 369/836] docs: Remove deprecated Pod Security Policies from docs (#6303) The PodSecurityPolicy was removed in Kubernetes 1.25 and that version is not supported anymore. The section covering this was updated and now replaced with new documentation about the containers and the SecurityContext's that are in use for containers and pods. Closes #5996 Signed-off-by: Sharif Shaker --- docs/src/operator_capability_levels.md | 2 +- docs/src/security.md | 55 ++++++++++++++++++-------- 2 files changed, 39 insertions(+), 18 deletions(-) diff --git a/docs/src/operator_capability_levels.md b/docs/src/operator_capability_levels.md index 3975036803..0aaaf5772a 100644 --- a/docs/src/operator_capability_levels.md +++ b/docs/src/operator_capability_levels.md @@ -159,7 +159,7 @@ CloudNativePG supports [management of PostgreSQL roles, users, and groups through declarative configuration](declarative_role_management.md) using the `.spec.managed.roles` stanza. -### Pod security policies +### Pod security standards For InfoSec requirements, the operator doesn't require privileged mode for any container. It enforces a read-only root filesystem to guarantee containers diff --git a/docs/src/security.md b/docs/src/security.md index ec14f35d46..c5058c7f4a 100644 --- a/docs/src/security.md +++ b/docs/src/security.md @@ -293,28 +293,49 @@ CloudNativePG. : The instance manager requires to `update` and `patch` the status of any `Backup` resource in the namespace -### Pod Security Policies +### Pod and Container Security Contexts -!!! Important - Starting from Kubernetes v1.21, the use of `PodSecurityPolicy` has been - deprecated, and as of Kubernetes v1.25, it has been completely removed. Despite - this deprecation, we acknowledge that the operator is currently undergoing - testing in older and unsupported versions of Kubernetes. Therefore, this - section is retained for those specific scenarios. +A [Security Context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) +defines privilege and access control settings for a pod or container. + +CloudNativePG does not require *privileged* mode for container execution. +The PostgreSQL containers run as the `postgres` system user. No component +whatsoever requires running as `root`. + +Likewise, Volume access does not require *privileged* mode nor `root` +privileges. Proper permissions must be assigned by the Kubernetes platform +and/or administrators. The PostgreSQL containers run with a read-only root +filesystem (i.e. no writable layer). -A [Pod Security Policy](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) -is the Kubernetes way to define security rules and specifications that a pod needs to meet -to run in a cluster. -For InfoSec reasons, every Kubernetes platform should implement them. +The operator manages the setting of security contexts for all pods and +containers of a PostgreSQL cluster. The [Seccomp Profile](https://kubernetes.io/docs/tutorials/security/seccomp/) +to be used for the PostgreSQL containers can be configured with the +`spec.seccompProfile` section of the `Cluster` resource. If this section is left +blank, the containers will use a seccompProfile `Type` of `RuntimeDefault`, that +is, the container runtime default. -CloudNativePG does not require *privileged* mode for containers execution. -The PostgreSQL containers run as `postgres` system user. No component whatsoever requires running as `root`. +The security context of PostgreSQL containers using the default `seccompProfile` +will look like this: +``` +securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault +``` -Likewise, Volumes access does not require *privileges* mode or `root` privileges either. -Proper permissions must be properly assigned by the Kubernetes platform and/or administrators. -The PostgreSQL containers run with a read-only root filesystem (i.e. no writable layer). +#### Security Context Constraints -The operator explicitly sets the required security contexts. +When running in an environment that is utilizing +[Security Context Constraints (SCC)](https://docs.openshift.com/container-platform/4.17/authentication/managing-security-context-constraints.html) +the operator does not explicitly set the security context of the PostgreSQL +cluster pods, but rather allows the pods to inherit the restricted Security +Context Constraints that are already defined. ### Restricting Pod access using AppArmor From 627e8d31415a363cae0c0dea1b640a8f675ba0c1 Mon Sep 17 00:00:00 2001 From: Pierrick <139142330+pchovelon@users.noreply.github.com> Date: Thu, 20 Feb 2025 08:45:58 +0100 Subject: [PATCH 370/836] docs: add missing string in files names of the report command (#6866) There was a string missing, `(secret)` in the documentation when referencing the files created by the report command in the plugin. This was added to the output examples and the `head` command that show part of the files. Signed-off-by: Pierrick --- docs/src/kubectl-plugin.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/src/kubectl-plugin.md b/docs/src/kubectl-plugin.md index 485e1d9aed..648ded33e2 100644 --- a/docs/src/kubectl-plugin.md +++ b/docs/src/kubectl-plugin.md @@ -553,8 +553,8 @@ Archive: reportRedacted.zip inflating: report_operator_/manifests/validating-webhook-configuration.yaml inflating: report_operator_/manifests/mutating-webhook-configuration.yaml inflating: report_operator_/manifests/webhook-service.yaml - inflating: report_operator_/manifests/cnpg-ca-secret.yaml - inflating: report_operator_/manifests/cnpg-webhook-cert.yaml + inflating: report_operator_/manifests/cnpg-ca-secret(secret).yaml + inflating: report_operator_/manifests/cnpg-webhook-cert(secret).yaml ``` If you activated the `--logs` option, you'd see an extra subdirectory: @@ -590,7 +590,7 @@ You can verify that the confidential information is REDACTED by default: ```sh cd report_operator_/manifests/ -head cnpg-ca-secret.yaml +head cnpg-ca-secret\(secret\).yaml ``` ```yaml @@ -620,7 +620,7 @@ Successfully written report to "reportNonRedacted.zip" (format: "yaml") ```sh unzip reportNonRedacted.zip -head cnpg-ca-secret.yaml +head cnpg-ca-secret\(secret\).yaml ``` ```yaml From e2ee58e8046e1ca09b240d2fb56f355c7e2a4b80 Mon Sep 17 00:00:00 2001 From: Pierrick <139142330+pchovelon@users.noreply.github.com> Date: Thu, 20 Feb 2025 09:26:03 +0100 Subject: [PATCH 371/836] docs: add doc for the pgbench `ttl` option (#6889) Following: #6701 Signed-off-by: Pierrick Signed-off-by: Leonardo Cecchi Co-authored-by: Leonardo Cecchi --- docs/src/benchmarking.md | 11 +++++++++++ internal/cmd/plugin/pgbench/pgbench.go | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/docs/src/benchmarking.md b/docs/src/benchmarking.md index 57cb7588fe..2526049477 100644 --- a/docs/src/benchmarking.md +++ b/docs/src/benchmarking.md @@ -78,6 +78,17 @@ kubectl cnpg pgbench \ -- --time 30 --client 1 --jobs 1 ``` +By default, jobs do not expire. You can enable automatic deletion with the +`--ttl` flag. The job will be deleted after the specified duration (in seconds). + +```shell +kubectl cnpg pgbench \ + --job-name pgbench-run \ + --ttl 600 \ + cluster-example \ + -- --time 30 --client 1 --jobs 1 +``` + If you want to run a `pgbench` job on a specific worker node, you can use the `--node-selector` option. Suppose you want to run the previous initialization job on a node having the `workload=pgbench` label, you can run: diff --git a/internal/cmd/plugin/pgbench/pgbench.go b/internal/cmd/plugin/pgbench/pgbench.go index 22ccbc7bf3..b6b15d12d9 100644 --- a/internal/cmd/plugin/pgbench/pgbench.go +++ b/internal/cmd/plugin/pgbench/pgbench.go @@ -62,7 +62,7 @@ var jobExample = ` kubectl-cnpg pgbench cluster-example --db-name pgbenchDBName --job-name job-name -- \ --time 30 --client 1 --jobs 1 - # Create a job with given values on[cluster] "cluster-example". The job will be cleaned after 10 minutes. + # Create a job with given values on [cluster] "cluster-example". The job will be cleaned after 10 minutes. kubectl-cnpg pgbench cluster-example --db-name pgbenchDBName --job-name job-name --ttl 600 -- \ --time 30 --client 1 --jobs 1` From f323a402f96f191653f741bda7642aa4549a3115 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niccol=C3=B2=20Fei?= Date: Thu, 20 Feb 2025 09:49:43 +0100 Subject: [PATCH 372/836] fix(cnpg-plugin): pbench job when a Cluster is using an ImageCatalog (#6868) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes #6825 Signed-off-by: Niccolò Fei --- internal/cmd/plugin/pgbench/pgbench.go | 3 +-- internal/controller/cluster_upgrade.go | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/internal/cmd/plugin/pgbench/pgbench.go b/internal/cmd/plugin/pgbench/pgbench.go index b6b15d12d9..6b415ab949 100644 --- a/internal/cmd/plugin/pgbench/pgbench.go +++ b/internal/cmd/plugin/pgbench/pgbench.go @@ -125,7 +125,6 @@ func (cmd *pgBenchRun) buildNodeSelector() map[string]string { } func (cmd *pgBenchRun) buildJob(cluster *apiv1.Cluster) *batchv1.Job { - clusterImageName := cluster.Spec.ImageName labels := map[string]string{ "pgBenchJob": cluster.Name, } @@ -152,7 +151,7 @@ func (cmd *pgBenchRun) buildJob(cluster *apiv1.Cluster) *batchv1.Job { Containers: []corev1.Container{ { Name: "pgbench", - Image: clusterImageName, + Image: cluster.GetImageName(), ImagePullPolicy: corev1.PullAlways, Env: cmd.buildEnvVariables(), Command: []string{pgBenchKeyWord}, diff --git a/internal/controller/cluster_upgrade.go b/internal/controller/cluster_upgrade.go index b29dee1929..dfcc54dc8f 100644 --- a/internal/controller/cluster_upgrade.go +++ b/internal/controller/cluster_upgrade.go @@ -666,7 +666,7 @@ func (r *ClusterReconciler) upgradePod( ) error { log.FromContext(ctx).Info("Recreating instance pod", "pod", pod.Name, - "to", cluster.Spec.ImageName, + "to", cluster.GetImageName(), "reason", reason, ) From 91ed60d4c15900e3d3a8ffca99056de6a13cfea5 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Thu, 20 Feb 2025 14:59:50 +0100 Subject: [PATCH 373/836] fix: ensure cluster dependant resources reconcile after cluster rehydratation (#6607) Closes #6550 Signed-off-by: Armando Ruocco Signed-off-by: Marco Nenciarini Co-authored-by: Marco Nenciarini --- api/v1/database_funcs.go | 5 +++++ api/v1/publication_funcs.go | 5 +++++ api/v1/subscription_funcs.go | 5 +++++ internal/controller/finalizers_delete.go | 8 ++++++-- 4 files changed, 21 insertions(+), 2 deletions(-) diff --git a/api/v1/database_funcs.go b/api/v1/database_funcs.go index 2e87eba148..91511c411c 100644 --- a/api/v1/database_funcs.go +++ b/api/v1/database_funcs.go @@ -65,6 +65,11 @@ func (db *Database) HasReconciliations() bool { return db.Status.ObservedGeneration > 0 } +// SetStatusObservedGeneration sets the observed generation of the database +func (db *Database) SetStatusObservedGeneration(obsGeneration int64) { + db.Status.ObservedGeneration = obsGeneration +} + // MustHaveManagedResourceExclusivity detects conflicting databases func (dbList *DatabaseList) MustHaveManagedResourceExclusivity(reference *Database) error { pointers := toSliceWithPointers(dbList.Items) diff --git a/api/v1/publication_funcs.go b/api/v1/publication_funcs.go index c32cc0c0cb..f6076abafc 100644 --- a/api/v1/publication_funcs.go +++ b/api/v1/publication_funcs.go @@ -65,6 +65,11 @@ func (pub *Publication) GetName() string { return pub.Name } +// SetStatusObservedGeneration sets the observed generation of the publication +func (pub *Publication) SetStatusObservedGeneration(obsGeneration int64) { + pub.Status.ObservedGeneration = obsGeneration +} + // MustHaveManagedResourceExclusivity detects conflicting publications func (pub *PublicationList) MustHaveManagedResourceExclusivity(reference *Publication) error { pointers := toSliceWithPointers(pub.Items) diff --git a/api/v1/subscription_funcs.go b/api/v1/subscription_funcs.go index a337bb04a3..e19c7ae24f 100644 --- a/api/v1/subscription_funcs.go +++ b/api/v1/subscription_funcs.go @@ -65,6 +65,11 @@ func (sub *Subscription) HasReconciliations() bool { return sub.Status.ObservedGeneration > 0 } +// SetStatusObservedGeneration sets the observed generation of the subscription +func (sub *Subscription) SetStatusObservedGeneration(obsGeneration int64) { + sub.Status.ObservedGeneration = obsGeneration +} + // MustHaveManagedResourceExclusivity detects conflicting subscriptions func (pub *SubscriptionList) MustHaveManagedResourceExclusivity(reference *Subscription) error { pointers := toSliceWithPointers(pub.Items) diff --git a/internal/controller/finalizers_delete.go b/internal/controller/finalizers_delete.go index 6af03883b1..e630576ca6 100644 --- a/internal/controller/finalizers_delete.go +++ b/internal/controller/finalizers_delete.go @@ -86,6 +86,7 @@ type clusterOwnedResourceWithStatus interface { GetClusterRef() corev1.LocalObjectReference GetStatusMessage() string SetAsFailed(err error) + SetStatusObservedGeneration(obsGeneration int64) } func toSliceWithPointers[T any](items []T) []*T { @@ -121,8 +122,11 @@ func notifyOwnedResourceDeletion[T clusterOwnedResourceWithStatus]( if obj.GetStatusMessage() != statusMessage { obj.SetAsFailed(errors.New(statusMessage)) - if err := cli.Status().Patch(ctx, obj, client.MergeFrom(origObj)); err != nil { - itemLogger.Error(err, "error while setting failed status for cluster deletion") + obj.SetStatusObservedGeneration(0) + // We need to use an update here because of the observed generation set to 0 + // that would be ignored with the patch method. + if err := cli.Status().Update(ctx, obj); err != nil { + itemLogger.Error(err, "error while updating failed status for cluster deletion") return err } } From 8bb54e096cbdc5ef2f0b0ab55b5783b91faa874b Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 20 Feb 2025 17:21:34 +0100 Subject: [PATCH 374/836] chore(deps): update dependency golang to v1.24 (main) (#6827) --- .github/workflows/backport.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/continuous-delivery.yml | 2 +- .github/workflows/continuous-integration.yml | 2 +- .github/workflows/release-publish.yml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 37b2e1ec97..666ae4db8b 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -9,7 +9,7 @@ on: - main env: - GOLANG_VERSION: "1.23.x" + GOLANG_VERSION: "1.24.x" jobs: # Label the source pull request with 'backport-requested' and all supported releases label, the goal is, by default diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index aa638e4e5a..7e2134c70c 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -32,7 +32,7 @@ on: # set up environment variables to be used across all the jobs env: - GOLANG_VERSION: "1.23.x" + GOLANG_VERSION: "1.24.x" jobs: duplicate_runs: diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index 3d19469a94..1bde259d93 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -34,7 +34,7 @@ on: # set up environment variables to be used across all the jobs env: - GOLANG_VERSION: "1.23.x" + GOLANG_VERSION: "1.24.x" KUBEBUILDER_VERSION: "2.3.1" KIND_VERSION: "v0.26.0" ROOK_VERSION: "v1.16.3" diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 8a233dfef5..74a7d4dd50 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -16,7 +16,7 @@ on: # set up environment variables to be used across all the jobs env: - GOLANG_VERSION: "1.23.x" + GOLANG_VERSION: "1.24.x" GOLANGCI_LINT_VERSION: "v1.64.4" KUBEBUILDER_VERSION: "2.3.1" KIND_VERSION: "v0.26.0" diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml index bb7764fa16..590e81febe 100644 --- a/.github/workflows/release-publish.yml +++ b/.github/workflows/release-publish.yml @@ -8,7 +8,7 @@ on: - v* env: - GOLANG_VERSION: "1.23.x" + GOLANG_VERSION: "1.24.x" CNPG_IMAGE_NAME: "ghcr.io/${{ github.repository }}" permissions: From 3ce228aba329697c98b99ee6a1eee44c49f0e924 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 21 Feb 2025 07:20:35 +0100 Subject: [PATCH 375/836] chore(deps): update dependency golangci/golangci-lint to v1.64.5 (main) (#6907) --- .github/workflows/continuous-integration.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 74a7d4dd50..db4e642035 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -17,7 +17,7 @@ on: # set up environment variables to be used across all the jobs env: GOLANG_VERSION: "1.24.x" - GOLANGCI_LINT_VERSION: "v1.64.4" + GOLANGCI_LINT_VERSION: "v1.64.5" KUBEBUILDER_VERSION: "2.3.1" KIND_VERSION: "v0.26.0" OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing" From da0f2a50b16a29473f32fd1e5c5e64cfb06ea0f6 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Fri, 21 Feb 2025 08:12:12 +0100 Subject: [PATCH 376/836] test(snapshot): dump VolumeSnapshot objects when E2e test fail (#6908) Fixes: #6408 Signed-off-by: Leonardo Cecchi --- tests/utils/namespaces/namespace.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tests/utils/namespaces/namespace.go b/tests/utils/namespaces/namespace.go index a4e27dc91e..70717fe816 100644 --- a/tests/utils/namespaces/namespace.go +++ b/tests/utils/namespaces/namespace.go @@ -330,6 +330,14 @@ func DumpNamespaceObjects( _, _ = fmt.Fprintln(w, string(out)) } + // dump volumesnapshot info + volumeSnaphostList, _ := storage.GetSnapshotList(ctx, crudClient, namespace) + for _, volumeSnapshot := range volumeSnaphostList.Items { + out, _ := json.MarshalIndent(volumeSnapshot, "", " ") + _, _ = fmt.Fprintf(w, "Dumping %v/%v VolumeSnapshot\n", namespace, volumeSnapshot.Name) + _, _ = fmt.Fprintln(w, string(out)) + } + err = w.Flush() if err != nil { fmt.Println(err) From 209162dbc1433e24372be4092eac3878c29fd5c3 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Fri, 21 Feb 2025 08:53:27 +0100 Subject: [PATCH 377/836] chore(backup,snapshot): remove the force parameter (#6898) Signed-off-by: Armando Ruocco --- pkg/management/postgres/webserver/remote.go | 5 ----- pkg/reconciler/backup/volumesnapshot/online.go | 1 - 2 files changed, 6 deletions(-) diff --git a/pkg/management/postgres/webserver/remote.go b/pkg/management/postgres/webserver/remote.go index dad7df94c9..68d5097302 100644 --- a/pkg/management/postgres/webserver/remote.go +++ b/pkg/management/postgres/webserver/remote.go @@ -58,7 +58,6 @@ type StartBackupRequest struct { ImmediateCheckpoint bool `json:"immediateCheckpoint"` WaitForArchive bool `json:"waitForArchive"` BackupName string `json:"backupName"` - Force bool `json:"force,omitempty"` } // StopBackupRequest the required data to execute the pg_stop_backup @@ -352,10 +351,6 @@ func (ws *remoteWebserverEndpoints) backup(w http.ResponseWriter, req *http.Requ } }() if ws.currentBackup != nil { - if !p.Force { - sendUnprocessableEntityJSONResponse(w, "PROCESS_ALREADY_RUNNING", "") - return - } log.Info("trying to close the current backup connection", "backupName", ws.currentBackup.data.BackupName, ) diff --git a/pkg/reconciler/backup/volumesnapshot/online.go b/pkg/reconciler/backup/volumesnapshot/online.go index 62de9f4bb3..6611d1b3bf 100644 --- a/pkg/reconciler/backup/volumesnapshot/online.go +++ b/pkg/reconciler/backup/volumesnapshot/online.go @@ -111,7 +111,6 @@ func (o *onlineExecutor) prepare( ImmediateCheckpoint: volumeSnapshotConfig.OnlineConfiguration.GetImmediateCheckpoint(), WaitForArchive: volumeSnapshotConfig.OnlineConfiguration.GetWaitForArchive(), BackupName: backup.Name, - Force: true, } if err := o.backupClient.Start(ctx, targetPod, req); err != nil { return nil, fmt.Errorf("while trying to start the backup: %w", err) From 318e46487733986838d70f19354e850e0dc7e68e Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Fri, 21 Feb 2025 15:36:02 +0100 Subject: [PATCH 378/836] ci: fix regexp to retrieve EKS versions (#6921) The EKS documentation page changed the formatting of the version list, this patch changes our detection code to work with the new format. Signed-off-by: Jonathan Gonzalez V. --- .github/workflows/k8s-versions-check.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/k8s-versions-check.yml b/.github/workflows/k8s-versions-check.yml index 9db538b271..d966d8e2c1 100644 --- a/.github/workflows/k8s-versions-check.yml +++ b/.github/workflows/k8s-versions-check.yml @@ -43,7 +43,7 @@ jobs: name: Get updated EKS versions run: | DOC_URL="https://raw.githubusercontent.com/awsdocs/amazon-eks-user-guide/mainline/latest/ug/clusters/kubernetes-versions-standard.adoc" - curl --silent "${DOC_URL}" | sed -e 's/.*`Kubernetes` \([0-9].[0-9][0-9]\).*/\1/;/^[0-9]\./!d' | uniq | \ + curl --silent "${DOC_URL}" | sed -e 's/.*Kubernetes \([0-9].[0-9][0-9]\).*/\1/;/^[0-9]\./!d' | uniq | \ awk -vv=$MINIMAL_K8S '$0>=v {print $0}' | \ jq -Rn '[inputs]' | tee .github/eks_versions.json if: github.event.inputs.limit == null || github.event.inputs.limit == 'eks' From cd6613edab888b9807223a73f4898f3fc86b6282 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 21 Feb 2025 16:37:48 +0100 Subject: [PATCH 379/836] chore(deps): update kindest/node docker tag to v1.32.2 (main) (#6912) --- hack/e2e/run-e2e-kind.sh | 2 +- hack/setup-cluster.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hack/e2e/run-e2e-kind.sh b/hack/e2e/run-e2e-kind.sh index a7e6f07763..ef8e8879da 100755 --- a/hack/e2e/run-e2e-kind.sh +++ b/hack/e2e/run-e2e-kind.sh @@ -29,7 +29,7 @@ E2E_DIR="${HACK_DIR}/e2e" export PRESERVE_CLUSTER=${PRESERVE_CLUSTER:-false} export BUILD_IMAGE=${BUILD_IMAGE:-false} -KIND_NODE_DEFAULT_VERSION=v1.32.1 +KIND_NODE_DEFAULT_VERSION=v1.32.2 export K8S_VERSION=${K8S_VERSION:-$KIND_NODE_DEFAULT_VERSION} export CLUSTER_ENGINE=kind export CLUSTER_NAME=pg-operator-e2e-${K8S_VERSION//./-} diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh index 884f1e3e09..229f28c013 100755 --- a/hack/setup-cluster.sh +++ b/hack/setup-cluster.sh @@ -24,7 +24,7 @@ if [ "${DEBUG-}" = true ]; then fi # Defaults -KIND_NODE_DEFAULT_VERSION=v1.32.1 +KIND_NODE_DEFAULT_VERSION=v1.32.2 K3D_NODE_DEFAULT_VERSION=v1.30.3 CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=v1.15.0 EXTERNAL_SNAPSHOTTER_VERSION=v8.2.0 From 42dbcef1f3340fa7b9429fc57f713e0652bb54d7 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sat, 22 Feb 2025 08:43:40 +0100 Subject: [PATCH 380/836] chore(deps): update dependency kubernetes-sigs/kind to v0.27.0 (main) (#6924) --- .github/workflows/continuous-delivery.yml | 2 +- .github/workflows/continuous-integration.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index 1bde259d93..303f3f1b3f 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -36,7 +36,7 @@ on: env: GOLANG_VERSION: "1.24.x" KUBEBUILDER_VERSION: "2.3.1" - KIND_VERSION: "v0.26.0" + KIND_VERSION: "v0.27.0" ROOK_VERSION: "v1.16.3" EXTERNAL_SNAPSHOTTER_VERSION: "v8.2.0" OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing" diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index db4e642035..66082c715f 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -19,7 +19,7 @@ env: GOLANG_VERSION: "1.24.x" GOLANGCI_LINT_VERSION: "v1.64.5" KUBEBUILDER_VERSION: "2.3.1" - KIND_VERSION: "v0.26.0" + KIND_VERSION: "v0.27.0" OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing" API_DOC_NAME: "cloudnative-pg.v1.md" SLACK_USERNAME: "cnpg-bot" From 6c927a72852f14507b0a1d57039515f4e7c8d448 Mon Sep 17 00:00:00 2001 From: Peggie Date: Sat, 22 Feb 2025 08:59:45 +0100 Subject: [PATCH 381/836] feat: Public Cloud K8S versions update (#6859) Update the versions used to test the operator on public cloud providers Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Signed-off-by: Jonathan Gonzalez V. Co-authored-by: public-cloud-k8s-versions-check --- .github/gke_versions.json | 4 ++-- .github/kind_versions.json | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/gke_versions.json b/.github/gke_versions.json index 3121122733..49228d19da 100644 --- a/.github/gke_versions.json +++ b/.github/gke_versions.json @@ -1,6 +1,6 @@ [ + "1.32", "1.31", "1.30", - "1.29", - "1.28" + "1.29" ] diff --git a/.github/kind_versions.json b/.github/kind_versions.json index 10e6039591..096cd24228 100644 --- a/.github/kind_versions.json +++ b/.github/kind_versions.json @@ -1,8 +1,8 @@ [ - "v1.32.1", - "v1.31.4", - "v1.30.8", - "v1.29.12", + "v1.32.2", + "v1.31.6", + "v1.30.10", + "v1.29.14", "v1.28.15", "v1.27.16" ] From 1145c7e6cc8399905e178d9b6a34651f93866e8f Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Sat, 22 Feb 2025 13:30:31 +0100 Subject: [PATCH 382/836] chore: enable unit tests on Kubernetes 1.32 (#6930) Also, update the min versions for GKE, AKS and EKS Signed-off-by: Jonathan Gonzalez V. --- .github/k8s_versions_scope.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/k8s_versions_scope.json b/.github/k8s_versions_scope.json index 4842004177..2bd3e8cbee 100644 --- a/.github/k8s_versions_scope.json +++ b/.github/k8s_versions_scope.json @@ -1,10 +1,10 @@ { "e2e_test": { "KIND": {"min": "1.27", "max": ""}, - "AKS": {"min": "1.27", "max": ""}, - "EKS": {"min": "1.27", "max": ""}, - "GKE": {"min": "1.27", "max": ""}, + "AKS": {"min": "1.28", "max": ""}, + "EKS": {"min": "1.29", "max": ""}, + "GKE": {"min": "1.29", "max": ""}, "OPENSHIFT": {"min": "4.12", "max": ""} }, - "unit_test": {"min": "1.27", "max": "1.31"} + "unit_test": {"min": "1.27", "max": "1.32"} } From 76bde75504a7ca67ecd3530178fa224713e30cb7 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sun, 23 Feb 2025 17:55:36 +0100 Subject: [PATCH 383/836] fix(deps): update kubernetes patches (main) (#6923) https://github.com/prometheus-operator/prometheus-operator `v0.80.0` -> `v0.80.1` https://github.com/kubernetes/api `v0.32.1` -> `v0.32.2` https://github.com/kubernetes/apiextensions-apiserver `v0.32.1` -> `v0.32.2` https://github.com/kubernetes/apimachinery `v0.32.1` -> `v0.32.2` https://github.com/kubernetes/cli-runtime `v0.32.1` -> `v0.32.2` https://github.com/kubernetes/client-go `v0.32.1` -> `v0.32.2` https://github.com/kubernetes-sigs/controller-runtime `v0.20.1` -> `v0.20.2` --- go.mod | 14 +++++++------- go.sum | 28 ++++++++++++++-------------- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/go.mod b/go.mod index a8d260f438..1e5e2c8ca2 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( github.com/mitchellh/go-ps v1.0.0 github.com/onsi/ginkgo/v2 v2.22.2 github.com/onsi/gomega v1.36.2 - github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.80.0 + github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.80.1 github.com/prometheus/client_golang v1.20.5 github.com/robfig/cron v1.2.0 github.com/sethvargo/go-password v0.3.1 @@ -38,13 +38,13 @@ require ( golang.org/x/term v0.29.0 google.golang.org/grpc v1.70.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.32.1 - k8s.io/apiextensions-apiserver v0.32.1 - k8s.io/apimachinery v0.32.1 - k8s.io/cli-runtime v0.32.1 - k8s.io/client-go v0.32.1 + k8s.io/api v0.32.2 + k8s.io/apiextensions-apiserver v0.32.2 + k8s.io/apimachinery v0.32.2 + k8s.io/cli-runtime v0.32.2 + k8s.io/client-go v0.32.2 k8s.io/utils v0.0.0-20241210054802-24370beab758 - sigs.k8s.io/controller-runtime v0.20.1 + sigs.k8s.io/controller-runtime v0.20.2 sigs.k8s.io/yaml v1.4.0 ) diff --git a/go.sum b/go.sum index 6d5c36b461..9f30a06a43 100644 --- a/go.sum +++ b/go.sum @@ -154,8 +154,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.80.0 h1:ckSycH7xHtpcvXsmEY/qEziRhDQKqKqbsHi9kX/BO7A= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.80.0/go.mod h1:6x4x0t9BP35g4XcjkHE9EB3RxhyfxpdpmZKd/Qyk8+M= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.80.1 h1:DP+PUNVOc+Bkft8a4QunLzaZ0RspWuD3tBbcPHr2PeE= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.80.1/go.mod h1:6x4x0t9BP35g4XcjkHE9EB3RxhyfxpdpmZKd/Qyk8+M= github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= @@ -277,24 +277,24 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.32.1 h1:f562zw9cy+GvXzXf0CKlVQ7yHJVYzLfL6JAS4kOAaOc= -k8s.io/api v0.32.1/go.mod h1:/Yi/BqkuueW1BgpoePYBRdDYfjPF5sgTr5+YqDZra5k= -k8s.io/apiextensions-apiserver v0.32.1 h1:hjkALhRUeCariC8DiVmb5jj0VjIc1N0DREP32+6UXZw= -k8s.io/apiextensions-apiserver v0.32.1/go.mod h1:sxWIGuGiYov7Io1fAS2X06NjMIk5CbRHc2StSmbaQto= -k8s.io/apimachinery v0.32.1 h1:683ENpaCBjma4CYqsmZyhEzrGz6cjn1MY/X2jB2hkZs= -k8s.io/apimachinery v0.32.1/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= -k8s.io/cli-runtime v0.32.1 h1:19nwZPlYGJPUDbhAxDIS2/oydCikvKMHsxroKNGA2mM= -k8s.io/cli-runtime v0.32.1/go.mod h1:NJPbeadVFnV2E7B7vF+FvU09mpwYlZCu8PqjzfuOnkY= -k8s.io/client-go v0.32.1 h1:otM0AxdhdBIaQh7l1Q0jQpmo7WOFIk5FFa4bg6YMdUU= -k8s.io/client-go v0.32.1/go.mod h1:aTTKZY7MdxUaJ/KiUs8D+GssR9zJZi77ZqtzcGXIiDg= +k8s.io/api v0.32.2 h1:bZrMLEkgizC24G9eViHGOPbW+aRo9duEISRIJKfdJuw= +k8s.io/api v0.32.2/go.mod h1:hKlhk4x1sJyYnHENsrdCWw31FEmCijNGPJO5WzHiJ6Y= +k8s.io/apiextensions-apiserver v0.32.2 h1:2YMk285jWMk2188V2AERy5yDwBYrjgWYggscghPCvV4= +k8s.io/apiextensions-apiserver v0.32.2/go.mod h1:GPwf8sph7YlJT3H6aKUWtd0E+oyShk/YHWQHf/OOgCA= +k8s.io/apimachinery v0.32.2 h1:yoQBR9ZGkA6Rgmhbp/yuT9/g+4lxtsGYwW6dR6BDPLQ= +k8s.io/apimachinery v0.32.2/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/cli-runtime v0.32.2 h1:aKQR4foh9qeyckKRkNXUccP9moxzffyndZAvr+IXMks= +k8s.io/cli-runtime v0.32.2/go.mod h1:a/JpeMztz3xDa7GCyyShcwe55p8pbcCVQxvqZnIwXN8= +k8s.io/client-go v0.32.2 h1:4dYCD4Nz+9RApM2b/3BtVvBHw54QjMFUl1OLcJG5yOA= +k8s.io/client-go v0.32.2/go.mod h1:fpZ4oJXclZ3r2nDOv+Ux3XcJutfrwjKTCHz2H3sww94= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 h1:hcha5B1kVACrLujCKLbr8XWMxCxzQx42DY8QKYJrDLg= k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7/go.mod h1:GewRfANuJ70iYzvn+i4lezLDAFzvjxZYK1gn1lWcfas= k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0= k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/controller-runtime v0.20.1 h1:JbGMAG/X94NeM3xvjenVUaBjy6Ui4Ogd/J5ZtjZnHaE= -sigs.k8s.io/controller-runtime v0.20.1/go.mod h1:BrP3w158MwvB3ZbNpaAcIKkHQ7YGpYnzpoSTZ8E14WU= +sigs.k8s.io/controller-runtime v0.20.2 h1:/439OZVxoEc02psi1h4QO3bHzTgu49bb347Xp4gW1pc= +sigs.k8s.io/controller-runtime v0.20.2/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/kustomize/api v0.19.0 h1:F+2HB2mU1MSiR9Hp1NEgoU2q9ItNOaBJl0I4Dlus5SQ= From c4233d6c5dabc27932bbb2dfa552d6b137a30087 Mon Sep 17 00:00:00 2001 From: Francesco Canovai Date: Mon, 24 Feb 2025 10:27:18 +0100 Subject: [PATCH 384/836] test(e2e): fix race condition in replica mode test (#6897) Fix an issue where we could fail to connect to a newly promoted cluster if our connection happens before the password is update. This could randomly cause suite failures. Fixes #6893 Signed-off-by: Francesco Canovai --- tests/e2e/replica_mode_cluster_test.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tests/e2e/replica_mode_cluster_test.go b/tests/e2e/replica_mode_cluster_test.go index 88d606b0df..6c4b942bf9 100644 --- a/tests/e2e/replica_mode_cluster_test.go +++ b/tests/e2e/replica_mode_cluster_test.go @@ -226,6 +226,17 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { DatabaseName: sourceDBName, TableName: "new_test_table", } + Eventually(func() error { + _, err := postgres.RunExecOverForward(ctx, + env.Client, + env.Interface, + env.RestClientConfig, + namespace, clusterTwoName, sourceDBName, + apiv1.ApplicationUserSecretSuffix, + "SELECT 1;", + ) + return err + }, testTimeouts[timeouts.Short]).Should(Succeed()) AssertCreateTestData(env, tableLocator) }) From 4e55204bfd4edc09b4deee82a26eeb9954af64f7 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 24 Feb 2025 13:32:17 +0100 Subject: [PATCH 385/836] fix(deps): update all non-major go dependencies (main) (#6938) This PR contains the following updates: https://github.com/avast/retry-go `v4.6.0` -> `v4.6.1` https://github.com/grpc-ecosystem/go-grpc-middleware `v2.2.0` -> `v2.3.0` https://github.com/prometheus/client_golang `v1.20.5` -> `v1.21.0` https://github.com/spf13/cobra `v1.8.1` -> `v1.9.1` --- go.mod | 24 ++++++++++++------------ go.sum | 54 +++++++++++++++++++++++++++--------------------------- 2 files changed, 39 insertions(+), 39 deletions(-) diff --git a/go.mod b/go.mod index 1e5e2c8ca2..c131e3af51 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.23.5 require ( github.com/DATA-DOG/go-sqlmock v1.5.2 github.com/Masterminds/semver/v3 v3.3.1 - github.com/avast/retry-go/v4 v4.6.0 + github.com/avast/retry-go/v4 v4.6.1 github.com/blang/semver v3.5.1+incompatible github.com/cheynewallace/tabby v1.1.1 github.com/cloudnative-pg/barman-cloud v0.0.0-20250104195650-c1472628b450 @@ -15,7 +15,7 @@ require ( github.com/evanphx/json-patch/v5 v5.9.11 github.com/go-logr/logr v1.4.2 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 - github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0 + github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.0 github.com/jackc/pgx/v5 v5.7.2 github.com/jackc/puddle/v2 v2.2.2 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 @@ -26,10 +26,10 @@ require ( github.com/onsi/ginkgo/v2 v2.22.2 github.com/onsi/gomega v1.36.2 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.80.1 - github.com/prometheus/client_golang v1.20.5 + github.com/prometheus/client_golang v1.21.0 github.com/robfig/cron v1.2.0 github.com/sethvargo/go-password v0.3.1 - github.com/spf13/cobra v1.8.1 + github.com/spf13/cobra v1.9.1 github.com/stern/stern v1.32.0 github.com/thoas/go-funk v0.9.3 go.uber.org/atomic v1.11.0 @@ -78,7 +78,7 @@ require ( github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.9 // indirect + github.com/klauspost/compress v1.17.11 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/mailru/easyjson v0.9.0 // indirect github.com/mattn/go-colorable v0.1.14 // indirect @@ -93,22 +93,22 @@ require ( github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.59.1 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect - github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/pflag v1.0.6 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xlab/treeprint v1.2.0 // indirect - golang.org/x/crypto v0.32.0 // indirect - golang.org/x/net v0.34.0 // indirect + golang.org/x/crypto v0.33.0 // indirect + golang.org/x/net v0.35.0 // indirect golang.org/x/oauth2 v0.25.0 // indirect - golang.org/x/sync v0.10.0 // indirect + golang.org/x/sync v0.11.0 // indirect golang.org/x/sys v0.30.0 // indirect - golang.org/x/text v0.21.0 // indirect + golang.org/x/text v0.22.0 // indirect golang.org/x/time v0.9.0 // indirect golang.org/x/tools v0.28.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20241202173237-19429a94021a // indirect - google.golang.org/protobuf v1.36.3 // indirect + google.golang.org/protobuf v1.36.4 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect diff --git a/go.sum b/go.sum index 9f30a06a43..6a7af17e01 100644 --- a/go.sum +++ b/go.sum @@ -6,8 +6,8 @@ github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7r github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/avast/retry-go/v4 v4.6.0 h1:K9xNA+KeB8HHc2aWFuLb25Offp+0iVRXEvFx8IinRJA= -github.com/avast/retry-go/v4 v4.6.0/go.mod h1:gvWlPhBVsvBbLkVGDg/KwvBv0bEkCOLRRSHKIr2PyOE= +github.com/avast/retry-go/v4 v4.6.1 h1:VkOLRubHdisGrHnTu89g08aQEWEgRU7LVEop3GbIcMk= +github.com/avast/retry-go/v4 v4.6.1/go.mod h1:V6oF8njAwxJ5gRo1Q7Cxab24xs5NCWZBeaHHBklR8mA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= @@ -24,7 +24,7 @@ github.com/cloudnative-pg/cnpg-i v0.0.0-20241224161104-7e2cfa59debc h1:wo0KfZ4NR github.com/cloudnative-pg/cnpg-i v0.0.0-20241224161104-7e2cfa59debc/go.mod h1:wmXfeji9qPPW+F/1OgDHdkI97ISN1I2f8vJKv/7sssY= github.com/cloudnative-pg/machinery v0.0.0-20250102082645-95c37fe624d0 h1:RvwDA4W8K8NQNVQTOzrf9o8P328N7NXztvnq3cUncww= github.com/cloudnative-pg/machinery v0.0.0-20250102082645-95c37fe624d0/go.mod h1:pitcj6ztiuxfSFH5EbVHv8iCVxF+yQkzf9o9A1KoDvI= -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -83,8 +83,8 @@ github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWm github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0 h1:kQ0NI7W1B3HwiN5gAYtY+XFItDPbLBwYRxAqbFTyDes= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0/go.mod h1:zrT2dxOAjNFPRGjTUe2Xmb4q4YdUwVvQFV6xiCSf+z0= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.0 h1:FbSCl+KggFl+Ocym490i/EyXF4lPgLoUtcSWquBM0Rs= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.0/go.mod h1:qOchhhIlmRcqk/O9uCo/puJlyo07YINaIqdZfZG3Jkc= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= @@ -104,8 +104,8 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:C github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -156,12 +156,12 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.80.1 h1:DP+PUNVOc+Bkft8a4QunLzaZ0RspWuD3tBbcPHr2PeE= github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.80.1/go.mod h1:6x4x0t9BP35g4XcjkHE9EB3RxhyfxpdpmZKd/Qyk8+M= -github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= -github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.21.0 h1:DIsaGmiaBkSangBgMtWdNfxbMNdku5IK6iNhrEqWvdA= +github.com/prometheus/client_golang v1.21.0/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.59.1 h1:LXb1quJHWm1P6wq/U824uxYi4Sg0oGvNeUm1z5dJoX0= -github.com/prometheus/common v0.59.1/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= @@ -173,10 +173,10 @@ github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sethvargo/go-password v0.3.1 h1:WqrLTjo7X6AcVYfC6R7GtSyuUQR9hGyAj/f1PYQZCJU= github.com/sethvargo/go-password v0.3.1/go.mod h1:rXofC1zT54N7R8K/h1WDUdkf9BOx5OptoxrMBcrXzvs= -github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= -github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= +github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stern/stern v1.32.0 h1:xNw0CizB7/4CkWpI46cAo8tArDnS14eYKLaaDevEnrM= github.com/stern/stern v1.32.0/go.mod h1:Nv6yoHcb2E1HvklagJyd4rjoysJM4WxvcGVQtE651Xw= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -185,8 +185,8 @@ github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/thoas/go-funk v0.9.3 h1:7+nAEx3kn5ZJcnDm2Bh23N2yOtweO14bi//dvRtgLpw= github.com/thoas/go-funk v0.9.3/go.mod h1:+IWnUfUmFO1+WVYQWQtIJHeRRdaIyyYglZN7xzUPe4Q= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= @@ -216,23 +216,23 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= -golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= +golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus= +golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= -golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= +golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= +golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= -golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= +golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -244,8 +244,8 @@ golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU= golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= +golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -264,8 +264,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20241202173237-19429a94021a h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20241202173237-19429a94021a/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ= google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw= -google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= -google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.4 h1:6A3ZDJHn/eNqc1i+IdefRzy/9PokBTPvcqMySR7NNIM= +google.golang.org/protobuf v1.36.4/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= From b63bda679c23607f1d3de6af53c7e5981c06e388 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 24 Feb 2025 13:44:07 +0100 Subject: [PATCH 386/836] chore(deps): update agilepathway/pull-request-label-checker docker tag to v1.6.65 (main) (#6948) --- .github/workflows/require-labels.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/require-labels.yml b/.github/workflows/require-labels.yml index 1bb64dfd17..078a677012 100644 --- a/.github/workflows/require-labels.yml +++ b/.github/workflows/require-labels.yml @@ -19,7 +19,7 @@ jobs: runs-on: ubuntu-24.04 steps: - name: Require labels - uses: docker://agilepathway/pull-request-label-checker:v1.6.61 + uses: docker://agilepathway/pull-request-label-checker:v1.6.65 with: any_of: "ok to merge :ok_hand:" none_of: "do not merge" From 7434655ed2d1887dab6f8e8909b8f2e857e6e707 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Mon, 24 Feb 2025 17:13:08 +0100 Subject: [PATCH 387/836] fix(cnpgi-plugins): add archive and backup capabilities fields to configuration (#6593) This patch provides the configuration of plugin capabilities for WAL archiving. Ensure that one and only one plugin can be used for WAL Archiving if the in-tree barman-cloud support is not used. Signed-off-by: Armando Ruocco Signed-off-by: Marco Nenciarini Signed-off-by: Tao Li Signed-off-by: Leonardo Cecchi Co-authored-by: Marco Nenciarini Co-authored-by: Gabriele Quaresima Co-authored-by: Tao Li Co-authored-by: Leonardo Cecchi --- .wordlist-en-custom.txt | 2 + api/v1/cluster_funcs.go | 12 +++++ api/v1/cluster_types.go | 6 +++ api/v1/zz_generated.deepcopy.go | 5 ++ .../bases/postgresql.cnpg.io_clusters.yaml | 12 +++++ docs/src/cloudnative-pg.v1.md | 8 +++ internal/controller/backup_controller.go | 18 ++++++- internal/webhook/v1/cluster_webhook.go | 37 ++++++++++++++ internal/webhook/v1/cluster_webhook_test.go | 51 +++++++++++++++++++ pkg/management/postgres/archiver/archiver.go | 18 ++++++- .../postgres/webserver/plugin_backup.go | 21 +++++++- 11 files changed, 185 insertions(+), 5 deletions(-) diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index 6ee17eac76..a65db04731 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -492,6 +492,7 @@ VolumeSnapshotConfiguration VolumeSnapshots WAL WAL's +WALArchiver WALBackupConfiguration WALCapabilities WALs @@ -877,6 +878,7 @@ ipcs ips isPrimary isTemplate +isWALArchiver issuecomment italy jdbc diff --git a/api/v1/cluster_funcs.go b/api/v1/cluster_funcs.go index c21acfe3c9..3fd8e8aacc 100644 --- a/api/v1/cluster_funcs.go +++ b/api/v1/cluster_funcs.go @@ -1481,3 +1481,15 @@ func (p *Probe) ApplyInto(k8sProbe *corev1.Probe) { k8sProbe.TerminationGracePeriodSeconds = p.TerminationGracePeriodSeconds } } + +// GetEnabledWALArchivePluginName returns the name of the enabled backup plugin or an empty string +// if no backup plugin is enabled +func (cluster *Cluster) GetEnabledWALArchivePluginName() string { + for _, plugin := range cluster.Spec.Plugins { + if plugin.IsEnabled() && plugin.IsWALArchiver != nil && *plugin.IsWALArchiver { + return plugin.Name + } + } + + return "" +} diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go index a32b66d91e..97b9ed3b4c 100644 --- a/api/v1/cluster_types.go +++ b/api/v1/cluster_types.go @@ -2174,6 +2174,12 @@ type PluginConfiguration struct { // +optional Enabled *bool `json:"enabled,omitempty"` + // Only one plugin can be declared as WALArchiver. + // Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. + // +kubebuilder:default:=false + // +optional + IsWALArchiver *bool `json:"isWALArchiver,omitempty"` + // Parameters is the configuration of the plugin // +optional Parameters map[string]string `json:"parameters,omitempty"` diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index bde8690abd..e6b7801fa0 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -1839,6 +1839,11 @@ func (in *PluginConfiguration) DeepCopyInto(out *PluginConfiguration) { *out = new(bool) **out = **in } + if in.IsWALArchiver != nil { + in, out := &in.IsWALArchiver, &out.IsWALArchiver + *out = new(bool) + **out = **in + } if in.Parameters != nil { in, out := &in.Parameters, &out.Parameters *out = make(map[string]string, len(*in)) diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml index c7de1196c6..ed7ece5b06 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml @@ -2942,6 +2942,12 @@ spec: default: true description: Enabled is true if this plugin will be used type: boolean + isWALArchiver: + default: false + description: |- + Only one plugin can be declared as WALArchiver. + Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. + type: boolean name: description: Name is the plugin name type: string @@ -3968,6 +3974,12 @@ spec: default: true description: Enabled is true if this plugin will be used type: boolean + isWALArchiver: + default: false + description: |- + Only one plugin can be declared as WALArchiver. + Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. + type: boolean name: description: Name is the plugin name type: string diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md index 1734270290..236213281c 100644 --- a/docs/src/cloudnative-pg.v1.md +++ b/docs/src/cloudnative-pg.v1.md @@ -3712,6 +3712,14 @@ cluster to be reconciled

Enabled is true if this plugin will be used

+isWALArchiver
+bool + + +

Only one plugin can be declared as WALArchiver. +Cannot be active if ".spec.backup.barmanObjectStore" configuration is present.

+ + parameters
map[string]string diff --git a/internal/controller/backup_controller.go b/internal/controller/backup_controller.go index 5f267d02b2..5d588b62af 100644 --- a/internal/controller/backup_controller.go +++ b/internal/controller/backup_controller.go @@ -99,7 +99,7 @@ func NewBackupReconciler( // +kubebuilder:rbac:groups="",resources=pods,verbs=get // Reconcile is the main reconciliation loop -// nolint: gocognit +// nolint: gocognit,gocyclo func (r *BackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { contextLogger, ctx := log.SetupLogger(ctx) contextLogger.Debug(fmt.Sprintf("reconciling object %#q", req.NamespacedName)) @@ -135,6 +135,22 @@ func (r *BackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr return ctrl.Result{}, nil } + if backup.Spec.Method == apiv1.BackupMethodPlugin && len(cluster.Spec.Plugins) == 0 { + message := "cannot proceed with the backup as the cluster has no plugin configured" + contextLogger.Warning(message) + r.Recorder.Event(&backup, "Warning", "ClusterHasNoBackupExecutorPlugin", message) + tryFlagBackupAsFailed(ctx, r.Client, &backup, errors.New(message)) + return ctrl.Result{}, nil + } + + if backup.Spec.Method != apiv1.BackupMethodPlugin && cluster.Spec.Backup == nil { + message := "cannot proceed with the backup as the cluster has no backup section" + contextLogger.Warning(message) + r.Recorder.Event(&backup, "Warning", "ClusterHasBackupConfigured", message) + tryFlagBackupAsFailed(ctx, r.Client, &backup, errors.New(message)) + return ctrl.Result{}, nil + } + // Load the required plugins pluginClient, err := cnpgiClient.WithPlugins( ctx, diff --git a/internal/webhook/v1/cluster_webhook.go b/internal/webhook/v1/cluster_webhook.go index 4be9f552b5..06945b4ed2 100644 --- a/internal/webhook/v1/cluster_webhook.go +++ b/internal/webhook/v1/cluster_webhook.go @@ -211,6 +211,7 @@ func (v *ClusterCustomValidator) validate(r *apiv1.Cluster) (allErrs field.Error v.validateHibernationAnnotation, v.validatePodPatchAnnotation, v.validatePromotionToken, + v.validatePluginConfiguration, } for _, validate := range validations { @@ -2383,3 +2384,39 @@ func (v *ClusterCustomValidator) validatePodPatchAnnotation(r *apiv1.Cluster) fi return nil } + +func (v *ClusterCustomValidator) validatePluginConfiguration(r *apiv1.Cluster) field.ErrorList { + if len(r.Spec.Plugins) == 0 { + return nil + } + isBarmanObjectStoreConfigured := r.Spec.Backup != nil && r.Spec.Backup.BarmanObjectStore != nil + var walArchiverEnabled []string + + for _, plugin := range r.Spec.Plugins { + if !plugin.IsEnabled() { + continue + } + if plugin.IsWALArchiver != nil && *plugin.IsWALArchiver { + walArchiverEnabled = append(walArchiverEnabled, plugin.Name) + } + } + + var errorList field.ErrorList + if isBarmanObjectStoreConfigured { + if len(walArchiverEnabled) > 0 { + errorList = append(errorList, field.Invalid( + field.NewPath("spec", "plugins"), + walArchiverEnabled, + "Cannot enable a WAL archiver plugin when barmanObjectStore is configured")) + } + } + + if len(walArchiverEnabled) > 1 { + errorList = append(errorList, field.Invalid( + field.NewPath("spec", "plugins"), + walArchiverEnabled, + "Cannot enable more than one WAL archiver plugin")) + } + + return errorList +} diff --git a/internal/webhook/v1/cluster_webhook_test.go b/internal/webhook/v1/cluster_webhook_test.go index a6828768cc..38e9b3b57c 100644 --- a/internal/webhook/v1/cluster_webhook_test.go +++ b/internal/webhook/v1/cluster_webhook_test.go @@ -4937,3 +4937,54 @@ var _ = Describe("validatePodPatchAnnotation", func() { Expect(v.validatePodPatchAnnotation(cluster)).To(BeNil()) }) }) + +var _ = Describe("validatePluginConfiguration", func() { + var v *ClusterCustomValidator + var cluster *apiv1.Cluster + walPlugin1 := apiv1.PluginConfiguration{ + Name: "walArchiverPlugin1", + Enabled: ptr.To(true), + IsWALArchiver: ptr.To(true), + } + walPlugin2 := apiv1.PluginConfiguration{ + Name: "walArchiverPlugin2", + Enabled: ptr.To(true), + IsWALArchiver: ptr.To(true), + } + + BeforeEach(func() { + v = &ClusterCustomValidator{} + cluster = &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Plugins: []apiv1.PluginConfiguration{}, + }, + } + }) + + It("returns no errors if no plugins are enabled", func() { + Expect(v.validatePluginConfiguration(cluster)).To(BeNil()) + }) + + It("returns an error if a WAL archiver plugin is enabled when barmanObjectStore is configured", func() { + cluster.Spec.Backup = &apiv1.BackupConfiguration{ + BarmanObjectStore: &apiv1.BarmanObjectStoreConfiguration{}, + } + cluster.Spec.Plugins = append(cluster.Spec.Plugins, walPlugin1) + errs := v.validatePluginConfiguration(cluster) + Expect(errs).To(HaveLen(1)) + Expect(errs[0].Error()).To(ContainSubstring( + "Cannot enable a WAL archiver plugin when barmanObjectStore is configured")) + }) + + It("returns an error if more than one WAL archiver plugin is enabled", func() { + cluster.Spec.Plugins = append(cluster.Spec.Plugins, walPlugin1, walPlugin2) + errs := v.validatePluginConfiguration(cluster) + Expect(errs).To(HaveLen(1)) + Expect(errs[0].Error()).To(ContainSubstring("Cannot enable more than one WAL archiver plugin")) + }) + + It("returns no errors when WAL archiver is enabled", func() { + cluster.Spec.Plugins = append(cluster.Spec.Plugins, walPlugin1) + Expect(v.validatePluginConfiguration(cluster)).To(BeNil()) + }) +}) diff --git a/pkg/management/postgres/archiver/archiver.go b/pkg/management/postgres/archiver/archiver.go index 9e6feed0a8..ea40498848 100644 --- a/pkg/management/postgres/archiver/archiver.go +++ b/pkg/management/postgres/archiver/archiver.go @@ -148,11 +148,19 @@ func internalRun( contextLog := log.FromContext(ctx) startTime := time.Now() - // Request the plugins to archive this WAL + // We allow plugins to archive WALs even if there is no plugin + // directly enabled by the user, to retain compatibility with + // the old API. if err := archiveWALViaPlugins(ctx, cluster, path.Join(pgData, walName)); err != nil { return err } + // If the used chosen a plugin to do WAL archiving, we don't + // trigger the legacy archiving process. + if cluster.GetEnabledWALArchivePluginName() != "" { + return nil + } + // Request Barman Cloud to archive this WAL if cluster.Spec.Backup == nil || cluster.Spec.Backup.BarmanObjectStore == nil { // Backup not configured, skipping WAL @@ -266,11 +274,17 @@ func archiveWALViaPlugins( availablePluginNamesSet := stringset.From(availablePluginNames) enabledPluginNamesSet := stringset.From( apiv1.GetPluginConfigurationEnabledPluginNames(cluster.Spec.Plugins)) + availableAndEnabled := stringset.From(availablePluginNamesSet.Intersect(enabledPluginNamesSet).ToList()) + + enabledArchiverPluginName := cluster.GetEnabledWALArchivePluginName() + if enabledArchiverPluginName != "" && !availableAndEnabled.Has(enabledArchiverPluginName) { + return fmt.Errorf("wal archive plugin is not available: %s", enabledArchiverPluginName) + } client, err := pluginClient.WithPlugins( ctx, plugins, - availablePluginNamesSet.Intersect(enabledPluginNamesSet).ToList()..., + availableAndEnabled.ToList()..., ) if err != nil { contextLogger.Error(err, "Error while loading required plugins") diff --git a/pkg/management/postgres/webserver/plugin_backup.go b/pkg/management/postgres/webserver/plugin_backup.go index f694f58716..44cd2afeba 100644 --- a/pkg/management/postgres/webserver/plugin_backup.go +++ b/pkg/management/postgres/webserver/plugin_backup.go @@ -18,10 +18,12 @@ package webserver import ( "context" + "fmt" "time" "github.com/cloudnative-pg/machinery/pkg/log" pgTime "github.com/cloudnative-pg/machinery/pkg/postgres/time" + "github.com/cloudnative-pg/machinery/pkg/stringset" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" @@ -85,15 +87,30 @@ func (b *PluginBackupCommand) invokeStart(ctx context.Context) { "backupNamespace", b.Backup.Name) plugins := repository.New() - if _, err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir); err != nil { + availablePlugins, err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir) + if err != nil { contextLogger.Error(err, "Error while discovering plugins") } defer plugins.Close() + availablePluginNamesSet := stringset.From(availablePlugins) + + enabledPluginNamesSet := stringset.From( + apiv1.GetPluginConfigurationEnabledPluginNames(b.Cluster.Spec.Plugins)) + availableAndEnabled := stringset.From(availablePluginNamesSet.Intersect(enabledPluginNamesSet).ToList()) + + if !availableAndEnabled.Has(b.Backup.Spec.PluginConfiguration.Name) { + b.markBackupAsFailed( + ctx, + fmt.Errorf("requested plugin is not available: %s", b.Backup.Spec.PluginConfiguration.Name), + ) + return + } + cli, err := pluginClient.WithPlugins( ctx, plugins, - apiv1.GetPluginConfigurationEnabledPluginNames(b.Cluster.Spec.Plugins)..., + availableAndEnabled.ToList()..., ) if err != nil { b.markBackupAsFailed(ctx, err) From 6d01b4ff7dacb9bf72e7fa91923b8ea2c2839c7d Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 25 Feb 2025 09:45:12 +0100 Subject: [PATCH 388/836] feat: update default PostgreSQL version to 17.4 (#6960) Update default PostgreSQL version from 17.2 to 17.4 Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Signed-off-by: Jonathan Gonzalez V. Co-authored-by: postgres-versions-updater --- .github/pg_versions.json | 20 ++++++++++---------- docs/src/bootstrap.md | 10 +++++----- docs/src/declarative_hibernation.md | 2 +- docs/src/image_catalog.md | 4 ++-- docs/src/kubectl-plugin.md | 4 ++-- docs/src/monitoring.md | 2 +- docs/src/postgis.md | 2 +- docs/src/samples/cluster-example-full.yaml | 2 +- docs/src/scheduling.md | 2 +- docs/src/ssl_connections.md | 2 +- docs/src/troubleshooting.md | 4 ++-- pkg/versions/versions.go | 2 +- 12 files changed, 28 insertions(+), 28 deletions(-) diff --git a/.github/pg_versions.json b/.github/pg_versions.json index a6a9696f2b..24c0505e80 100644 --- a/.github/pg_versions.json +++ b/.github/pg_versions.json @@ -1,22 +1,22 @@ { "17": [ - "17.2", - "17.1" + "17.4", + "17.2" ], "16": [ - "16.6", - "16.5" + "16.8", + "16.6" ], "15": [ - "15.10", - "15.9" + "15.12", + "15.10" ], "14": [ - "14.15", - "14.14" + "14.17", + "14.15" ], "13": [ - "13.18", - "13.17" + "13.20", + "13.18" ] } \ No newline at end of file diff --git a/docs/src/bootstrap.md b/docs/src/bootstrap.md index 2a0518f67c..45b632240f 100644 --- a/docs/src/bootstrap.md +++ b/docs/src/bootstrap.md @@ -570,7 +570,7 @@ file on the source PostgreSQL instance: host replication streaming_replica all md5 ``` -The following manifest creates a new PostgreSQL 17.2 cluster, +The following manifest creates a new PostgreSQL 17.4 cluster, called `target-db`, using the `pg_basebackup` bootstrap method to clone an external PostgreSQL cluster defined as `source-db` (in the `externalClusters` array). As you can see, the `source-db` @@ -585,7 +585,7 @@ metadata: name: target-db spec: instances: 3 - imageName: ghcr.io/cloudnative-pg/postgresql:17.2 + imageName: ghcr.io/cloudnative-pg/postgresql:17.4 bootstrap: pg_basebackup: @@ -605,7 +605,7 @@ spec: ``` All the requirements must be met for the clone operation to work, including -the same PostgreSQL version (in our case 17.2). +the same PostgreSQL version (in our case 17.4). #### TLS certificate authentication @@ -620,7 +620,7 @@ in the same Kubernetes cluster. This example can be easily adapted to cover an instance that resides outside the Kubernetes cluster. -The manifest defines a new PostgreSQL 17.2 cluster called `cluster-clone-tls`, +The manifest defines a new PostgreSQL 17.4 cluster called `cluster-clone-tls`, which is bootstrapped using the `pg_basebackup` method from the `cluster-example` external cluster. The host is identified by the read/write service in the same cluster, while the `streaming_replica` user is authenticated @@ -635,7 +635,7 @@ metadata: name: cluster-clone-tls spec: instances: 3 - imageName: ghcr.io/cloudnative-pg/postgresql:17.2 + imageName: ghcr.io/cloudnative-pg/postgresql:17.4 bootstrap: pg_basebackup: diff --git a/docs/src/declarative_hibernation.md b/docs/src/declarative_hibernation.md index 4df6e3403d..36eda0bf35 100644 --- a/docs/src/declarative_hibernation.md +++ b/docs/src/declarative_hibernation.md @@ -58,7 +58,7 @@ $ kubectl cnpg status Cluster Summary Name: cluster-example Namespace: default -PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:17.2 +PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:17.4 Primary instance: cluster-example-2 Status: Cluster in healthy state Instances: 3 diff --git a/docs/src/image_catalog.md b/docs/src/image_catalog.md index 09e148209d..1f42752e80 100644 --- a/docs/src/image_catalog.md +++ b/docs/src/image_catalog.md @@ -32,7 +32,7 @@ spec: - major: 15 image: ghcr.io/cloudnative-pg/postgresql:15.6 - major: 16 - image: ghcr.io/cloudnative-pg/postgresql:17.2 + image: ghcr.io/cloudnative-pg/postgresql:17.4 ``` **Example of a Cluster-Wide Catalog using `ClusterImageCatalog` Resource:** @@ -47,7 +47,7 @@ spec: - major: 15 image: ghcr.io/cloudnative-pg/postgresql:15.6 - major: 16 - image: ghcr.io/cloudnative-pg/postgresql:17.2 + image: ghcr.io/cloudnative-pg/postgresql:17.4 ``` A `Cluster` resource has the flexibility to reference either an `ImageCatalog` diff --git a/docs/src/kubectl-plugin.md b/docs/src/kubectl-plugin.md index 648ded33e2..1e4e101864 100644 --- a/docs/src/kubectl-plugin.md +++ b/docs/src/kubectl-plugin.md @@ -1022,7 +1022,7 @@ it from the actual pod. This means that you will be using the `postgres` user. ```console $ kubectl cnpg psql cluster-example -psql (17.2 (Debian 17.2-1.pgdg110+1)) +psql (17.4 (Debian 17.4-1.pgdg110+1)) Type "help" for help. postgres=# @@ -1034,7 +1034,7 @@ select to work against a replica by using the `--replica` option: ```console $ kubectl cnpg psql --replica cluster-example -psql (17.2 (Debian 17.2-1.pgdg110+1)) +psql (17.4 (Debian 17.4-1.pgdg110+1)) Type "help" for help. diff --git a/docs/src/monitoring.md b/docs/src/monitoring.md index 3fa83cb2f6..06efd3a610 100644 --- a/docs/src/monitoring.md +++ b/docs/src/monitoring.md @@ -217,7 +217,7 @@ cnpg_collector_up{cluster="cluster-example"} 1 # HELP cnpg_collector_postgres_version Postgres version # TYPE cnpg_collector_postgres_version gauge -cnpg_collector_postgres_version{cluster="cluster-example",full="17.2"} 17.2 +cnpg_collector_postgres_version{cluster="cluster-example",full="17.4"} 17.4 # HELP cnpg_collector_last_failed_backup_timestamp The last failed backup as a unix timestamp # TYPE cnpg_collector_last_failed_backup_timestamp gauge diff --git a/docs/src/postgis.md b/docs/src/postgis.md index cd139ac5d7..9df998c16e 100644 --- a/docs/src/postgis.md +++ b/docs/src/postgis.md @@ -100,7 +100,7 @@ values from the ones in this document): ```console $ kubectl exec -ti postgis-example-1 -- psql app Defaulted container "postgres" out of: postgres, bootstrap-controller (init) -psql (17.2 (Debian 17.2-1.pgdg110+1)) +psql (17.4 (Debian 17.4-1.pgdg110+1)) Type "help" for help. app=# SELECT * FROM pg_available_extensions WHERE name ~ '^postgis' ORDER BY 1; diff --git a/docs/src/samples/cluster-example-full.yaml b/docs/src/samples/cluster-example-full.yaml index 321e94a2fe..1551e5318c 100644 --- a/docs/src/samples/cluster-example-full.yaml +++ b/docs/src/samples/cluster-example-full.yaml @@ -35,7 +35,7 @@ metadata: name: cluster-example-full spec: description: "Example of cluster" - imageName: ghcr.io/cloudnative-pg/postgresql:17.2 + imageName: ghcr.io/cloudnative-pg/postgresql:17.4 # imagePullSecret is only required if the images are located in a private registry # imagePullSecrets: # - name: private_registry_access diff --git a/docs/src/scheduling.md b/docs/src/scheduling.md index a681f412a6..79ea6fcddd 100644 --- a/docs/src/scheduling.md +++ b/docs/src/scheduling.md @@ -40,7 +40,7 @@ metadata: name: cluster-example spec: instances: 3 - imageName: ghcr.io/cloudnative-pg/postgresql:17.2 + imageName: ghcr.io/cloudnative-pg/postgresql:17.4 affinity: enablePodAntiAffinity: true # Default value diff --git a/docs/src/ssl_connections.md b/docs/src/ssl_connections.md index 3762ab95d8..1fde48eeda 100644 --- a/docs/src/ssl_connections.md +++ b/docs/src/ssl_connections.md @@ -173,7 +173,7 @@ Output: version -------------------------------------------------------------------------------------- ------------------ -PostgreSQL 17.2 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 8.3.1 20191121 (Red Hat +PostgreSQL 17.4 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 8.3.1 20191121 (Red Hat 8.3.1-5), 64-bit (1 row) ``` diff --git a/docs/src/troubleshooting.md b/docs/src/troubleshooting.md index 6003f2ac96..721dd3ca35 100644 --- a/docs/src/troubleshooting.md +++ b/docs/src/troubleshooting.md @@ -220,7 +220,7 @@ Cluster in healthy state Name: cluster-example Namespace: default System ID: 7044925089871458324 -PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:17.2-3 +PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:17.4-3 Primary instance: cluster-example-1 Instances: 3 Ready instances: 3 @@ -288,7 +288,7 @@ kubectl describe cluster -n | grep "Image Name" Output: ```shell - Image Name: ghcr.io/cloudnative-pg/postgresql:17.2-3 + Image Name: ghcr.io/cloudnative-pg/postgresql:17.4-3 ``` !!! Note diff --git a/pkg/versions/versions.go b/pkg/versions/versions.go index c4b1c95414..a098860e52 100644 --- a/pkg/versions/versions.go +++ b/pkg/versions/versions.go @@ -23,7 +23,7 @@ const ( Version = "1.25.0" // DefaultImageName is the default image used by the operator to create pods - DefaultImageName = "ghcr.io/cloudnative-pg/postgresql:17.2" + DefaultImageName = "ghcr.io/cloudnative-pg/postgresql:17.4" // DefaultOperatorImageName is the default operator image used by the controller in the pods running PostgreSQL DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.25.0" From e2bcca88f8bb1b65d7b60009a6216cecc89330f6 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Tue, 25 Feb 2025 09:48:07 +0100 Subject: [PATCH 389/836] fix(backup,snapshot): avoid parallel actions on endpoints (#6890) Multiple backup operations on the same instance were able to execute simultaneously, potentially causing conflicts and data inconsistencies. This patch introduces a mechanism to serialize these operations, ensuring that only one backup can proceed at a time per instance. Partially Closes #6761 Signed-off-by: Armando Ruocco Signed-off-by: Leonardo Cecchi Signed-off-by: Marco Nenciarini Co-authored-by: Leonardo Cecchi Co-authored-by: Marco Nenciarini --- .../postgres/webserver/backup_connection.go | 78 +++++-------------- .../postgres/webserver/client/local/backup.go | 38 ++++++--- pkg/management/postgres/webserver/remote.go | 69 +++++++++++----- .../postgres/webserver/webserver.go | 15 ++-- .../backup/volumesnapshot/online.go | 51 +++++++++--- .../backup/volumesnapshot/online_test.go | 20 ++++- 6 files changed, 160 insertions(+), 111 deletions(-) diff --git a/pkg/management/postgres/webserver/backup_connection.go b/pkg/management/postgres/webserver/backup_connection.go index 0da0096bcd..60b3559918 100644 --- a/pkg/management/postgres/webserver/backup_connection.go +++ b/pkg/management/postgres/webserver/backup_connection.go @@ -57,7 +57,6 @@ const ( var replicationSlotInvalidCharacters = regexp.MustCompile(`[^a-z0-9_]`) type backupConnection struct { - sync sync.Mutex immediateCheckpoint bool waitForArchive bool conn *sql.Conn @@ -66,44 +65,6 @@ type backupConnection struct { err error } -func (bc *backupConnection) setPhase(phase BackupConnectionPhase, backupName string) { - bc.sync.Lock() - defer bc.sync.Unlock() - if backupName != bc.data.BackupName { - return - } - bc.data.Phase = phase -} - -func (bc *backupConnection) closeConnection(backupName string) error { - bc.sync.Lock() - defer bc.sync.Unlock() - if backupName != bc.data.BackupName { - return nil - } - - return bc.conn.Close() -} - -func (bc *backupConnection) forceCloseConnection() error { - bc.sync.Lock() - defer bc.sync.Unlock() - - return bc.conn.Close() -} - -func (bc *backupConnection) executeWithLock(backupName string, cb func() error) { - bc.sync.Lock() - defer bc.sync.Unlock() - if backupName != bc.data.BackupName { - return - } - - if err := cb(); err != nil { - bc.err = err - } -} - func newBackupConnection( ctx context.Context, instance *postgres.Instance, @@ -139,8 +100,10 @@ func newBackupConnection( }, nil } -func (bc *backupConnection) startBackup(ctx context.Context, backupName string) { +func (bc *backupConnection) startBackup(ctx context.Context, sync *sync.Mutex) { contextLogger := log.FromContext(ctx).WithValues("step", "start") + sync.Lock() + defer sync.Unlock() if bc == nil { return @@ -152,7 +115,7 @@ func (bc *backupConnection) startBackup(ctx context.Context, backupName string) } contextLogger.Error(bc.err, "encountered error while starting backup") - if err := bc.closeConnection(backupName); err != nil { + if err := bc.conn.Close(); err != nil { if !errors.Is(err, sql.ErrConnDone) { contextLogger.Error(err, "while closing backup connection") } @@ -180,25 +143,25 @@ func (bc *backupConnection) startBackup(ctx context.Context, backupName string) bc.immediateCheckpoint) } - bc.executeWithLock(backupName, func() error { - if err := row.Scan(&bc.data.BeginLSN); err != nil { - return fmt.Errorf("while scanning backup start: %w", err) - } - bc.data.Phase = Started + if err := row.Scan(&bc.data.BeginLSN); err != nil { + bc.err = fmt.Errorf("while scanning backup start: %w", err) + return + } - return nil - }) + bc.data.Phase = Started } -func (bc *backupConnection) stopBackup(ctx context.Context, backupName string) { +func (bc *backupConnection) stopBackup(ctx context.Context, sync *sync.Mutex) { contextLogger := log.FromContext(ctx).WithValues("step", "stop") + sync.Lock() + defer sync.Unlock() if bc == nil { return } defer func() { - if err := bc.closeConnection(backupName); err != nil { + if err := bc.conn.Close(); err != nil { if !errors.Is(err, sql.ErrConnDone) { contextLogger.Error(err, "while closing backup connection") } @@ -218,12 +181,11 @@ func (bc *backupConnection) stopBackup(ctx context.Context, backupName string) { "SELECT lsn, labelfile, spcmapfile FROM pg_catalog.pg_backup_stop(wait_for_archive => $1);", bc.waitForArchive) } - bc.executeWithLock(backupName, func() error { - if err := row.Scan(&bc.data.EndLSN, &bc.data.LabelFile, &bc.data.SpcmapFile); err != nil { - contextLogger.Error(err, "while stopping PostgreSQL physical backup") - return fmt.Errorf("while scanning backup stop: %w", err) - } - bc.data.Phase = Completed - return nil - }) + if err := row.Scan(&bc.data.EndLSN, &bc.data.LabelFile, &bc.data.SpcmapFile); err != nil { + contextLogger.Error(err, "while stopping PostgreSQL physical backup") + bc.err = fmt.Errorf("while scanning backup stop: %w", err) + return + } + + bc.data.Phase = Completed } diff --git a/pkg/management/postgres/webserver/client/local/backup.go b/pkg/management/postgres/webserver/client/local/backup.go index 8a7d4eb57f..0e30140738 100644 --- a/pkg/management/postgres/webserver/client/local/backup.go +++ b/pkg/management/postgres/webserver/client/local/backup.go @@ -33,8 +33,16 @@ import ( // BackupClient is the interface to interact with the backup endpoints type BackupClient interface { StatusWithErrors(ctx context.Context, pod *corev1.Pod) (*webserver.Response[webserver.BackupResultData], error) - Start(ctx context.Context, pod *corev1.Pod, sbq webserver.StartBackupRequest) error - Stop(ctx context.Context, pod *corev1.Pod, sbq webserver.StopBackupRequest) error + Start( + ctx context.Context, + pod *corev1.Pod, + sbq webserver.StartBackupRequest, + ) (*webserver.Response[webserver.BackupResultData], error) + Stop( + ctx context.Context, + pod *corev1.Pod, + sbq webserver.StopBackupRequest, + ) (*webserver.Response[webserver.BackupResultData], error) } // backupClientImpl a client to interact with the instance backup endpoints @@ -59,40 +67,46 @@ func (c *backupClientImpl) StatusWithErrors( } // Start runs the pg_start_backup -func (c *backupClientImpl) Start(ctx context.Context, pod *corev1.Pod, sbq webserver.StartBackupRequest) error { +func (c *backupClientImpl) Start( + ctx context.Context, + pod *corev1.Pod, + sbq webserver.StartBackupRequest, +) (*webserver.Response[webserver.BackupResultData], error) { scheme := remote.GetStatusSchemeFromPod(pod) httpURL := url.Build(scheme.ToString(), pod.Status.PodIP, url.PathPgModeBackup, url.StatusPort) // Marshalling the payload to JSON jsonBody, err := json.Marshal(sbq) if err != nil { - return fmt.Errorf("failed to marshal start payload: %w", err) + return nil, fmt.Errorf("failed to marshal start payload: %w", err) } req, err := http.NewRequestWithContext(ctx, "POST", httpURL, bytes.NewReader(jsonBody)) if err != nil { - return err + return nil, err } req.Header.Set("Content-Type", "application/json") - _, err = executeRequestWithError[struct{}](ctx, c.cli, req, false) - return err + return executeRequestWithError[webserver.BackupResultData](ctx, c.cli, req, true) } // Stop runs the command pg_stop_backup -func (c *backupClientImpl) Stop(ctx context.Context, pod *corev1.Pod, sbq webserver.StopBackupRequest) error { +func (c *backupClientImpl) Stop( + ctx context.Context, + pod *corev1.Pod, + sbq webserver.StopBackupRequest, +) (*webserver.Response[webserver.BackupResultData], error) { scheme := remote.GetStatusSchemeFromPod(pod) httpURL := url.Build(scheme.ToString(), pod.Status.PodIP, url.PathPgModeBackup, url.StatusPort) // Marshalling the payload to JSON jsonBody, err := json.Marshal(sbq) if err != nil { - return fmt.Errorf("failed to marshal stop payload: %w", err) + return nil, fmt.Errorf("failed to marshal stop payload: %w", err) } req, err := http.NewRequestWithContext(ctx, "PUT", httpURL, bytes.NewReader(jsonBody)) if err != nil { - return err + return nil, err } - _, err = executeRequestWithError[webserver.BackupResultData](ctx, c.cli, req, false) - return err + return executeRequestWithError[webserver.BackupResultData](ctx, c.cli, req, true) } diff --git a/pkg/management/postgres/webserver/remote.go b/pkg/management/postgres/webserver/remote.go index 68d5097302..d0c26c702d 100644 --- a/pkg/management/postgres/webserver/remote.go +++ b/pkg/management/postgres/webserver/remote.go @@ -27,11 +27,13 @@ import ( "os" "os/exec" "path" + "sync" "time" "github.com/cloudnative-pg/machinery/pkg/execlog" "github.com/cloudnative-pg/machinery/pkg/fileutils" "github.com/cloudnative-pg/machinery/pkg/log" + "go.uber.org/multierr" apierrs "k8s.io/apimachinery/pkg/api/errors" "sigs.k8s.io/controller-runtime/pkg/client" @@ -46,11 +48,22 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) +const errCodeAnotherRequestInProgress = "ANOTHER_REQUEST_IN_PROGRESS" + +// IsRetryableError checks if the error is retryable +func IsRetryableError(err *Error) bool { + if err == nil { + return false + } + return err.Code == errCodeAnotherRequestInProgress +} + type remoteWebserverEndpoints struct { typedClient client.Client instance *postgres.Instance currentBackup *backupConnection readinessChecker *readiness.Data + ongoingRequest sync.Mutex } // StartBackupRequest the required data to execute the pg_start_backup @@ -128,6 +141,7 @@ func (ws *remoteWebserverEndpoints) cleanupStaleCollections(ctx context.Context) log.Warning("Closing stale PostgreSQL backup connection") if err := bc.conn.Close(); err != nil { + bc.err = multierr.Append(bc.err, err) log.Error(err, "Error while closing stale PostgreSQL backup connection") } bc.data.Phase = Completed @@ -142,13 +156,13 @@ func (ws *remoteWebserverEndpoints) cleanupStaleCollections(ctx context.Context) return } + ws.ongoingRequest.Lock() + defer ws.ongoingRequest.Unlock() + if bc.data.Phase == Completed || bc.data.BackupName == "" { return } - bc.sync.Lock() - defer bc.sync.Unlock() - if bc.err != nil { closeBackupConnection(bc) return @@ -317,6 +331,11 @@ func (ws *remoteWebserverEndpoints) updateInstanceManager( // nolint: gocognit func (ws *remoteWebserverEndpoints) backup(w http.ResponseWriter, req *http.Request) { log.Trace("request method", "method", req.Method) + if !ws.ongoingRequest.TryLock() { + sendUnprocessableEntityJSONResponse(w, errCodeAnotherRequestInProgress, "") + return + } + defer ws.ongoingRequest.Unlock() switch req.Method { case http.MethodGet: @@ -351,10 +370,10 @@ func (ws *remoteWebserverEndpoints) backup(w http.ResponseWriter, req *http.Requ } }() if ws.currentBackup != nil { - log.Info("trying to close the current backup connection", + log.Debug("trying to close the current backup connection", "backupName", ws.currentBackup.data.BackupName, ) - if err := ws.currentBackup.forceCloseConnection(); err != nil { + if err := ws.currentBackup.conn.Close(); err != nil { if !errors.Is(err, sql.ErrConnDone) { log.Error(err, "Error while closing backup connection (start)") } @@ -371,8 +390,12 @@ func (ws *remoteWebserverEndpoints) backup(w http.ResponseWriter, req *http.Requ sendUnprocessableEntityJSONResponse(w, "CANNOT_INITIALIZE_CONNECTION", err.Error()) return } - go ws.currentBackup.startBackup(context.Background(), p.BackupName) - sendJSONResponseWithData(w, 200, struct{}{}) + go ws.currentBackup.startBackup(context.Background(), &ws.ongoingRequest) + + res := Response[BackupResultData]{ + Data: &ws.currentBackup.data, + } + sendJSONResponseWithData(w, 200, res) return case http.MethodPut: @@ -398,8 +421,23 @@ func (ws *remoteWebserverEndpoints) backup(w http.ResponseWriter, req *http.Requ return } + if ws.currentBackup.err != nil { + if err := ws.currentBackup.conn.Close(); err != nil { + if !errors.Is(err, sql.ErrConnDone) { + log.Error(err, "Error while closing backup connection (stop)") + } + } + + sendUnprocessableEntityJSONResponse(w, "BACKUP_FAILED", ws.currentBackup.err.Error()) + return + } + + res := Response[BackupResultData]{ + Data: &ws.currentBackup.data, + } + if ws.currentBackup.data.Phase == Closing { - sendJSONResponseWithData(w, 200, struct{}{}) + sendJSONResponseWithData(w, 200, res) return } @@ -409,19 +447,10 @@ func (ws *remoteWebserverEndpoints) backup(w http.ResponseWriter, req *http.Requ return } - if ws.currentBackup.err != nil { - if err := ws.currentBackup.closeConnection(p.BackupName); err != nil { - if !errors.Is(err, sql.ErrConnDone) { - log.Error(err, "Error while closing backup connection (stop)") - } - } + ws.currentBackup.data.Phase = Closing - sendJSONResponseWithData(w, 200, struct{}{}) - return - } - ws.currentBackup.setPhase(Closing, p.BackupName) - go ws.currentBackup.stopBackup(context.Background(), p.BackupName) - sendJSONResponseWithData(w, 200, struct{}{}) + go ws.currentBackup.stopBackup(context.Background(), &ws.ongoingRequest) + sendJSONResponseWithData(w, 200, res) return } } diff --git a/pkg/management/postgres/webserver/webserver.go b/pkg/management/postgres/webserver/webserver.go index a358dc5ae6..a9ff87d4b3 100644 --- a/pkg/management/postgres/webserver/webserver.go +++ b/pkg/management/postgres/webserver/webserver.go @@ -50,18 +50,19 @@ type Response[T interface{}] struct { Error *Error `json:"error,omitempty"` } -// EnsureDataIsPresent returns an error if the data is field is nil -func (body Response[T]) EnsureDataIsPresent() error { - status := body.Data - if status != nil { - return nil - } - +// GetError returns an error if an error response is detected or if the data +// field is nil +func (body Response[T]) GetError() error { if body.Error != nil { return fmt.Errorf("encountered a body error while preparing, code: '%s', message: %s", body.Error.Code, body.Error.Message) } + status := body.Data + if status != nil { + return nil + } + return fmt.Errorf("encounteered an empty body while expecting it to not be empty") } diff --git a/pkg/reconciler/backup/volumesnapshot/online.go b/pkg/reconciler/backup/volumesnapshot/online.go index 6611d1b3bf..99dba6b6ec 100644 --- a/pkg/reconciler/backup/volumesnapshot/online.go +++ b/pkg/reconciler/backup/volumesnapshot/online.go @@ -43,16 +43,20 @@ func (o *onlineExecutor) finalize( backup *apiv1.Backup, targetPod *corev1.Pod, ) (*ctrl.Result, error) { - body, err := o.backupClient.StatusWithErrors(ctx, targetPod) + statusBody, err := o.backupClient.StatusWithErrors(ctx, targetPod) if err != nil { return nil, fmt.Errorf("while getting status while finalizing: %w", err) } - if err := body.EnsureDataIsPresent(); err != nil { + if webserver.IsRetryableError(statusBody.Error) { + return &ctrl.Result{RequeueAfter: time.Second * 5}, nil + } + + if err := statusBody.GetError(); err != nil { return nil, err } - status := body.Data + status := statusBody.Data if status.BackupName != backup.Name { return nil, fmt.Errorf("trying to stop backup with name: %s, while reconciling backup with name: %s", status.BackupName, @@ -72,9 +76,19 @@ func (o *onlineExecutor) finalize( switch status.Phase { case webserver.Started: - if err := o.backupClient.Stop(ctx, targetPod, *webserver.NewStopBackupRequest(backup.Name)); err != nil { + res, err := o.backupClient.Stop(ctx, targetPod, *webserver.NewStopBackupRequest(backup.Name)) + if err != nil { return nil, fmt.Errorf("while stopping the backup client: %w", err) } + + if webserver.IsRetryableError(res.Error) { + return &ctrl.Result{RequeueAfter: time.Second * 5}, nil + } + + if err := res.GetError(); err != nil { + return nil, err + } + return &ctrl.Result{RequeueAfter: time.Second * 5}, nil case webserver.Closing: return &ctrl.Result{RequeueAfter: time.Second * 5}, nil @@ -95,29 +109,46 @@ func (o *onlineExecutor) prepare( volumeSnapshotConfig := backup.GetVolumeSnapshotConfiguration(*cluster.Spec.Backup.VolumeSnapshot) // Handle hot snapshots - body, err := o.backupClient.StatusWithErrors(ctx, targetPod) + statusBody, err := o.backupClient.StatusWithErrors(ctx, targetPod) if err != nil { return nil, fmt.Errorf("while getting status while preparing: %w", err) } - if err := body.EnsureDataIsPresent(); err != nil { - return nil, err + if webserver.IsRetryableError(statusBody.Error) { + return &ctrl.Result{RequeueAfter: time.Second * 5}, nil } - status := body.Data + status := statusBody.Data // if the backupName doesn't match it means we have an old stuck pending backup that we have to force out. - if backup.Name != status.BackupName || status.Phase == "" { + if status != nil && (backup.Name != status.BackupName || status.Phase == "") { req := webserver.StartBackupRequest{ ImmediateCheckpoint: volumeSnapshotConfig.OnlineConfiguration.GetImmediateCheckpoint(), WaitForArchive: volumeSnapshotConfig.OnlineConfiguration.GetWaitForArchive(), BackupName: backup.Name, } - if err := o.backupClient.Start(ctx, targetPod, req); err != nil { + res, err := o.backupClient.Start(ctx, targetPod, req) + if err != nil { return nil, fmt.Errorf("while trying to start the backup: %w", err) } + + if webserver.IsRetryableError(res.Error) { + return &ctrl.Result{RequeueAfter: time.Second * 5}, nil + } + + if err := res.GetError(); err != nil { + return nil, err + } + return &ctrl.Result{RequeueAfter: 5 * time.Second}, nil } + // If we are here, the status either contains errors + // or the running backup is the desired one. + // Handle the error case first + if err := statusBody.GetError(); err != nil { + return nil, err + } + switch status.Phase { case webserver.Starting: return &ctrl.Result{RequeueAfter: 5 * time.Second}, nil diff --git a/pkg/reconciler/backup/volumesnapshot/online_test.go b/pkg/reconciler/backup/volumesnapshot/online_test.go index a0f70cceaa..7c00dd17f4 100644 --- a/pkg/reconciler/backup/volumesnapshot/online_test.go +++ b/pkg/reconciler/backup/volumesnapshot/online_test.go @@ -51,14 +51,26 @@ func (f *fakeBackupClient) StatusWithErrors( return f.response, f.injectStatusError } -func (f *fakeBackupClient) Start(_ context.Context, _ *corev1.Pod, _ webserver.StartBackupRequest) error { +func (f *fakeBackupClient) Start( + _ context.Context, + _ *corev1.Pod, + _ webserver.StartBackupRequest, +) (*webserver.Response[webserver.BackupResultData], error) { f.startCalled = true - return f.injectStartError + return &webserver.Response[webserver.BackupResultData]{ + Data: &webserver.BackupResultData{}, + }, f.injectStartError } -func (f *fakeBackupClient) Stop(_ context.Context, _ *corev1.Pod, _ webserver.StopBackupRequest) error { +func (f *fakeBackupClient) Stop( + _ context.Context, + _ *corev1.Pod, + _ webserver.StopBackupRequest, +) (*webserver.Response[webserver.BackupResultData], error) { f.stopCalled = true - return f.injectStopError + return &webserver.Response[webserver.BackupResultData]{ + Data: &webserver.BackupResultData{}, + }, f.injectStopError } var _ = Describe("onlineExecutor prepare", func() { From 66e6e2f07bf2b2dafc205e05f23fd8e4bda79786 Mon Sep 17 00:00:00 2001 From: Francesco Canovai Date: Tue, 25 Feb 2025 16:02:20 +0100 Subject: [PATCH 390/836] test(e2e): fix concurrency issue in pgbouncer tests (#6919) Fix an issue in the pgbouncer tests where the tests could fail if multiple pgbouncer tests were running in parallel. Fixes #6905 Signed-off-by: Francesco Canovai --- tests/e2e/pgbouncer_test.go | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/tests/e2e/pgbouncer_test.go b/tests/e2e/pgbouncer_test.go index edbd2560d9..c8c21fbd69 100644 --- a/tests/e2e/pgbouncer_test.go +++ b/tests/e2e/pgbouncer_test.go @@ -84,11 +84,9 @@ var _ = Describe("PGBouncer Connections", Label(tests.LabelServiceConnectivity), }) By("executing psql within the pgbouncer pod", func() { - pod, err := getPgbouncerPod(poolerBasicAuthRWSampleFile) + pod, err := getPgbouncerPod(namespace, poolerBasicAuthRWSampleFile) Expect(err).ToNot(HaveOccurred()) - GinkgoWriter.Println(pod.Name) - err = runShowHelpInPod(pod) Expect(err).ToNot(HaveOccurred()) }) @@ -194,7 +192,7 @@ var _ = Describe("PGBouncer Connections", Label(tests.LabelServiceConnectivity), }) }) -func getPgbouncerPod(sampleFile string) (*corev1.Pod, error) { +func getPgbouncerPod(namespace, sampleFile string) (*corev1.Pod, error) { poolerKey, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) if err != nil { return nil, err @@ -203,7 +201,7 @@ func getPgbouncerPod(sampleFile string) (*corev1.Pod, error) { Expect(err).ToNot(HaveOccurred()) var podList corev1.PodList - err = env.Client.List(env.Ctx, &podList, ctrlclient.InNamespace(""), + err = env.Client.List(env.Ctx, &podList, ctrlclient.InNamespace(namespace), ctrlclient.MatchingLabels{utils.PgbouncerNameLabel: poolerKey}) Expect(err).ToNot(HaveOccurred()) Expect(len(podList.Items)).Should(BeEquivalentTo(1)) From f01bcc823dd8325a48f81760dd140e9b95ef572e Mon Sep 17 00:00:00 2001 From: Jaime Silvela Date: Tue, 25 Feb 2025 18:15:23 +0100 Subject: [PATCH 391/836] docs: clarify the bootstrap section (#6749) Signed-off-by: Jaime Silvela Signed-off-by: Gabriele Bartolini Co-authored-by: Gabriele Bartolini --- docs/src/bootstrap.md | 87 ++++++++++++++++++++++++++----------------- 1 file changed, 52 insertions(+), 35 deletions(-) diff --git a/docs/src/bootstrap.md b/docs/src/bootstrap.md index 45b632240f..8315dafa23 100644 --- a/docs/src/bootstrap.md +++ b/docs/src/bootstrap.md @@ -1,6 +1,6 @@ # Bootstrap -This section describes the options you have to create a new +This section describes the options available to create a new PostgreSQL cluster and the design rationale behind them. There are primarily two ways to bootstrap a new cluster: @@ -8,22 +8,23 @@ There are primarily two ways to bootstrap a new cluster: - from an existing PostgreSQL cluster, either directly (`pg_basebackup`) or indirectly through a physical base backup (`recovery`) -The `initdb` bootstrap also offers the possibility to import one or more -databases from an existing Postgres cluster, even outside Kubernetes, and -having a different major version of Postgres. +The `initdb` bootstrap also provides the option to import one or more +databases from an existing PostgreSQL cluster, even if it's outside +Kubernetes or running a different major version of PostgreSQL. For more detailed information about this feature, please refer to the ["Importing Postgres databases"](database_import.md) section. !!! Important - Bootstrapping from an existing cluster opens up the possibility - to create a **replica cluster**, that is an independent PostgreSQL - cluster which is in continuous recovery, synchronized with the source - and that accepts read-only connections. + Bootstrapping from an existing cluster enables the creation of a + **replica cluster**—an independent PostgreSQL cluster that remains in + continuous recovery, stays synchronized with the source cluster, and + accepts read-only connections. + For more details, refer to the [Replica Cluster section](replica_cluster.md). !!! Warning CloudNativePG requires both the `postgres` user and database to - always exists. Using the local Unix Domain Socket, it needs to connect - as `postgres` user to the `postgres` database via `peer` authentication in + always exist. Using the local Unix Domain Socket, it needs to connect + as the `postgres` user to the `postgres` database via `peer` authentication in order to perform administrative tasks on the cluster. **DO NOT DELETE** the `postgres` user or the `postgres` database!!! @@ -45,18 +46,22 @@ specification. CloudNativePG currently supports the following bootstrap methods: existing cluster and, if needed, replaying all the available WAL files or up to a given *point in time* - `pg_basebackup`: create a PostgreSQL cluster by cloning an existing one of - the same major version using `pg_basebackup` via streaming replication protocol - - useful if you want to migrate databases to CloudNativePG, even - from outside Kubernetes. - -Differently from the `initdb` method, both `recovery` and `pg_basebackup` + the same major version using `pg_basebackup` through the streaming + replication protocol. This method is particularly useful for migrating + databases to CloudNativePG, although meeting all requirements can be + challenging. Be sure to review the warnings in the + [`pg_basebackup` subsection](#bootstrap-from-a-live-cluster-pg_basebackup) + carefully. + +In contrast to the `initdb` method, both `recovery` and `pg_basebackup` create a new cluster based on another one (either offline or online) and can be used to spin up replica clusters. They both rely on the definition of external clusters. +Refer to the [replica cluster section](replica_cluster.md) for more information. -Given that there are several possible backup methods and combinations of backup -storage that the CloudNativePG operator provides, please refer to the -["Recovery" section](recovery.md) for guidance on each method. +Given the amount of possible backup methods and combinations of backup +storage that the CloudNativePG operator provides for `recovery`, please refer to +the dedicated ["Recovery" section](recovery.md) for guidance on each method. !!! Seealso "API reference" Please refer to the ["API reference for the `bootstrap` section](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-BootstrapConfiguration) @@ -64,9 +69,9 @@ storage that the CloudNativePG operator provides, please refer to the ## The `externalClusters` section -The `externalClusters` section provides a mechanism for specifying one or more -PostgreSQL clusters associated with the current configuration. Its primary use -cases include: +The `externalClusters` section of the cluster manifest can be used to configure +access to one or more PostgreSQL clusters as *sources*. +The primary use cases include: 1. **Importing Databases:** Specify an external source to be utilized during the [importation of databases](database_import.md) via logical backup and @@ -87,7 +92,7 @@ As far as bootstrapping is concerned, `externalClusters` can be used to define the source PostgreSQL cluster for either the `pg_basebackup` method or the `recovery` one. An external cluster needs to have: -- a name that identifies the origin cluster, to be used as a reference via the +- a name that identifies the external cluster, to be used as a reference via the `source` option - at least one of the following: @@ -98,13 +103,20 @@ method or the `recovery` one. An external cluster needs to have: - the catalog of physical base backups for the Postgres cluster !!! Note - A recovery object store is normally an AWS S3, or an Azure Blob Storage, - or a Google Cloud Storage source that is managed by Barman Cloud. + A recovery object store is normally an AWS S3, Azure Blob Storage, + or Google Cloud Storage source that is managed by Barman Cloud. When only the streaming connection is defined, the source can be used for the `pg_basebackup` method. When only the recovery object store is defined, the -source can be used for the `recovery` method. When both are defined, any of the -two bootstrap methods can be chosen. +source can be used for the `recovery` method. When both are defined, any of +the two bootstrap methods can be chosen. The following table summarizes your +options: + +| Content of externalClusters | pg_basebackup | recovery | +|:----------------------------|:-------------:|:--------:| +| Only streaming | ✓ | | +| Only object store | | ✓ | +| Streaming and object store | ✓ | ✓ | Furthermore, in case of `pg_basebackup` or full `recovery` point in time, the cluster is eligible for replica cluster mode. This means that the cluster is @@ -121,7 +133,7 @@ Whenever a password is supplied within an `externalClusters` entry, CloudNativePG autonomously manages a [PostgreSQL password file](https://www.postgresql.org/docs/current/libpq-pgpass.html) for it, residing at `/controller/external/NAME/pgpass` in each instance. -This approach empowers CloudNativePG to securely establish connections with an +This approach enables CloudNativePG to securely establish connections with an external server without exposing any passwords in the connection string. Instead, the connection safely references the aforementioned file through the `passfile` connection parameter. @@ -415,9 +427,9 @@ spec: ## Bootstrap from another cluster -CloudNativePG enables the bootstrap of a cluster starting from +CloudNativePG enables bootstrapping a cluster starting from another one of the same major version. -This operation can happen by connecting directly to the source cluster via +This operation can be carried out either connecting directly to the source cluster via streaming replication (`pg_basebackup`), or indirectly via an existing physical *base backup* (`recovery`). @@ -434,9 +446,10 @@ by `name` (our recommendation is to use the same `name` of the origin cluster). ### Bootstrap from a backup (`recovery`) -Given the several possibilities, methods, and combinations that the -CloudNativePG operator provides in terms of backup and recovery, please refer -to the ["Recovery" section](recovery.md). +Given the variety of backup methods and combinations of backup storage +options provided by the CloudNativePG operator for `recovery`, please refer +to the dedicated ["Recovery" section](recovery.md) for detailed guidance on +each method. ### Bootstrap from a live cluster (`pg_basebackup`) @@ -460,10 +473,11 @@ The primary use cases for this method include: !!! Important Avoid using this method, based on physical replication, to migrate an - existing PostgreSQL cluster outside of Kubernetes into CloudNativePG unless you - are completely certain that all requirements are met and the operation has been + existing PostgreSQL cluster outside of Kubernetes into CloudNativePG, unless you + are completely certain that all [requirements](#requirements) are met and + the operation has been thoroughly tested. The CloudNativePG community does not endorse this approach - for such use cases and recommends using logical import instead. It is + for such use cases, and recommends using logical import instead. It is exceedingly rare that all requirements for physical replication are met in a way that seamlessly works with CloudNativePG. @@ -718,6 +732,9 @@ and diverge from the source. For this reason, it is advised to stop all write operations to the source database before migrating to the target database. +Note that this limitation applies only if the target cluster is not defined as +a replica cluster. + !!! Important Before you attempt a migration, you must test both the procedure and the applications. In particular, it is fundamental that you run the migration From c98e0c3995ee72e2e61f034c14051286aae204c8 Mon Sep 17 00:00:00 2001 From: Francesco Canovai Date: Tue, 25 Feb 2025 18:29:18 +0100 Subject: [PATCH 392/836] test: fix race in metrics tests (#6969) Wait for the GRANT to be replicated on the replica server before querying the metrics endpoint. Closes #6968 Signed-off-by: Francesco Canovai --- tests/e2e/metrics_test.go | 29 +++++++++++++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-) diff --git a/tests/e2e/metrics_test.go b/tests/e2e/metrics_test.go index 430c89fa17..00675f9b8c 100644 --- a/tests/e2e/metrics_test.go +++ b/tests/e2e/metrics_test.go @@ -21,6 +21,7 @@ import ( "regexp" "strconv" "strings" + "time" corev1 "k8s.io/api/core/v1" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" @@ -29,8 +30,10 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/tests" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/proxy" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" . "github.com/onsi/ginkgo/v2" @@ -328,10 +331,32 @@ var _ = Describe("Metrics", Label(tests.LabelObservability), func() { // Gather metrics in each pod expectedMetric := fmt.Sprintf("cnpg_%v_row_count 3", testTableName) for _, pod := range podList.Items { + // Wait a few seconds for the GRANT to be replicated + Eventually(func(g Gomega) { + out, _, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: pod.Namespace, + PodName: pod.Name, + }, + srcClusterDatabaseName, + fmt.Sprintf( + "SELECT has_table_privilege('pg_monitor', '%v', 'SELECT')", + testTableName, + ), + ) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(strings.TrimSpace(out)).To(BeEquivalentTo("t")) + }).WithTimeout( + time.Duration(timeouts.DefaultTestTimeouts[timeouts.Short])*time.Second, + ).WithPolling(time.Second).Should(Succeed(), fmt.Sprintf("on pod %v", pod.Name)) + out, err := proxy.RetrieveMetricsFromInstance(env.Ctx, env.Interface, pod, replicaCluster.IsMetricsTLSEnabled()) - Expect(err).Should(Not(HaveOccurred())) - Expect(strings.Split(out, "\n")).Should(ContainElement(expectedMetric)) + Expect(err).ShouldNot(HaveOccurred(), + fmt.Sprintf("while getting pod metrics for pod: %v", pod.Name)) + Expect(strings.Split(out, "\n")).Should(ContainElement(expectedMetric), + fmt.Sprintf("expected metric %v not found in pod %v", expectedMetric, pod.Name)) } }) From d309193a4fd92376d0ce29acf20c87f5f723e6ad Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Tue, 25 Feb 2025 18:30:08 +0100 Subject: [PATCH 393/836] fix: remove any replaces or skipRange in the OLM config (#6967) Since the operator shouldn't be using anything else but the new file based catalog, we need to remove anything related to the old catalog system. Closes #6966 Signed-off-by: Jonathan Gonzalez V. --- Makefile | 2 -- .../bases/cloudnative-pg.clusterserviceversion.yaml | 2 -- 2 files changed, 4 deletions(-) diff --git a/Makefile b/Makefile index ed76622910..f0377b0d9e 100644 --- a/Makefile +++ b/Makefile @@ -32,7 +32,6 @@ INDEX_IMG ?= $(shell echo "${CONTROLLER_IMG}" | sed -e 's/:/:index-/') COMMIT := $(shell git rev-parse --short HEAD || echo unknown) DATE := $(shell git log -1 --pretty=format:'%ad' --date short) VERSION := $(shell git describe --tags --match 'v*' | sed -e 's/^v//; s/-g[0-9a-f]\+$$//; s/-\([0-9]\+\)$$/-dev\1/') -REPLACE_VERSION := $(shell git describe --tags --abbrev=0 $(shell git describe --tags --match 'v*' --abbrev=0)^) LDFLAGS= "-X github.com/cloudnative-pg/cloudnative-pg/pkg/versions.buildVersion=${VERSION} $\ -X github.com/cloudnative-pg/cloudnative-pg/pkg/versions.buildCommit=${COMMIT} $\ -X github.com/cloudnative-pg/cloudnative-pg/pkg/versions.buildDate=${DATE}" @@ -161,7 +160,6 @@ olm-bundle: manifests kustomize operator-sdk ## Build the bundle for OLM install rm -fr bundle bundle.Dockerfile ;\ sed -i -e "s/ClusterRole/Role/" "$${CONFIG_TMP_DIR}/config/rbac/role.yaml" "$${CONFIG_TMP_DIR}/config/rbac/role_binding.yaml" ;\ ($(KUSTOMIZE) build "$${CONFIG_TMP_DIR}/config/olm-manifests") | \ - sed -e "s@\$${VERSION}@${VERSION}@g; s@\$${REPLACE_VERSION}@${REPLACE_VERSION}@g" | \ $(OPERATOR_SDK) generate bundle --verbose --overwrite --manifests --metadata --package cloudnative-pg --channels stable-v1 --use-image-digests --default-channel stable-v1 --version "${VERSION}" ; \ echo -e "\n # OpenShift annotations." >> bundle/metadata/annotations.yaml ;\ echo -e " com.redhat.openshift.versions: $(OPENSHIFT_VERSIONS)" >> bundle/metadata/annotations.yaml ;\ diff --git a/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml b/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml index b78e7a927d..cf046f2cbb 100644 --- a/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml +++ b/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml @@ -12,7 +12,6 @@ metadata: containerImage: $(OPERATOR_IMAGE_NAME) repository: https://github.com/cloudnative-pg/cloudnative-pg support: Community - olm.skipRange: '>= 1.18.0 < ${VERSION}' features.operators.openshift.io/disconnected: "true" features.operators.openshift.io/fips-compliant: "false" features.operators.openshift.io/proxy-aware: "false" @@ -116,7 +115,6 @@ spec: maturity: stable provider: name: The CloudNativePG Contributors - replaces: 'cloudnative-pg.${REPLACE_VERSION}' icon: - base64data: <?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 27.6.1, SVG Export Plug-In . SVG Version: 6.00 Build 0)  -->
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
	 viewBox="0 0 420 500" style="enable-background:new 0 0 420 500;" xml:space="preserve">
<style type="text/css">
	.st0{fill:#121646;}
	.st1{fill:url(#SVGID_1_);}
	.st2{fill:url(#SVGID_00000155867176934933239400000010055260633525782145_);}
</style>
<g id="Layer_2">
</g>
<g id="Layer_1">
	<g>
		<g>
			<g>
				<path class="st0" d="M359,369.61c-3.49-8.49-6.07-17.37-8.68-26.19c-2.98-10.05-5.44-20.24-8.39-30.3
					c-0.87-2.96-2.34-5.84-4.04-8.43c-1.69-2.58-3.44-2.19-4.33,0.73c-2.4,7.81-4.44,15.75-7.12,23.46
					c-4.83,13.92-11.42,26.87-20.43,38.72c-4.5,5.91-9.53,11.39-14.73,16.68c-2.6,2.65-5.26,5.25-7.91,7.84
					c-2.33,2.27-4.76,4.65-3.12,8.1c1.27,2.67,4.11,2.93,6.73,2.91c0.38,0,0.75-0.01,1.12-0.01c13.44-0.22,26.88,0.15,40.32,0.3
					c9.38,0.11,18.77,0.49,28.15,0.28c3.96-0.09,8.3,0.04,11.52-3.2c3.62-3.64,4.2-6.42,1.6-10.87
					C365.86,383.09,361.86,376.57,359,369.61z"/>
				<path class="st0" d="M385.31,258.33c-1.75-0.43-3.46-0.9-5.12-1.44c-17.21-5.5-29.1-16.67-42.19-29.26
					c-2-1.93-2.65-4.13-2.51-6.89c0.74-12.86-0.81-25.56-3.07-38.17c-3.19-17.59-8.46-34.6-15.64-51.01
					c-1.62-3.68-3.77-6.93-4.89-10.84c-0.11-0.43-0.2-0.92,0.09-1.26c0.31-0.38,0.94-0.29,1.41-0.11
					c4.17,1.71,10.82,20.58,16.18,15.75c1.14-1.03,1.57-2.6,1.86-4.11c4.56-22.48-1.19-57.13-21.27-71.04
					c-9.22-6.4-19.34-12.61-30.41-15.1c-5.66-1.26-11.8-1.19-17.01,1.37c-2.58,1.26-4.91,3.12-7.72,3.79
					c-5,1.23-9.99-1.57-14.92-3.07c-5.99-1.8-12.52-1.71-18.45,0.29c-4.76,1.59-9.51,4.42-14.45,3.5
					c-6.84-1.28-11.78-9.18-18.87-11.6c-8.5-2.92-17.95-2.92-26.66-0.96c-25,5.61-48.47,23.16-65.95,41.22
					c-19.19,19.81-26.82,46.02-9.85,69.94c6.06,8.55,14.97,15.21,25.13,17.84c1.97,0.52,4.06,0.92,5.7,2.13
					c6.46,4.8-7.18,17.66-10.1,21.83c-6.28,9.04-12.14,18.42-16.49,28.57c-8.08,18.78-11.6,37.68-14.43,57.78
					c-4.89,34.78-11.44,70.53-27.71,101.94c-2.63,5.05-5.9,10.08-7.97,15.37c-0.49,1.26-0.65,2.74,0,3.9
					c0.38,0.67,1.01,1.19,1.68,1.55c3.64,2.02,9.16,1.57,13.17,1.59c12.12,0.04,24.24,0.09,36.37,0.11
					c6.17,0.02,12.34,0.04,18.51,0.04c4.44,0,10.97,1.12,15.01-1.01c2.6-1.37,4.04-4.29,4.76-7.16c1.05-4.22,1.73-8.55,2.85-12.77
					c3.46-12.9,8.37-25.18,14.68-36.98c10.01-18.74,20.46-39.92,1.88-57.02c-4.89-4.49-10.52-8.06-15.78-12.09
					c-1.01-0.79-2-2.38-0.94-3.1c0.4-0.29,0.97-0.22,1.48-0.11c20.26,4.24,39,26.01,37.74,46.85c-0.67,10.88-5.05,20.64-8.37,30.74
					c-3.37,10.26-7.32,20.4-8.26,31.3c-0.38,4.31-0.83,8.66-0.88,12.99c-0.07,5.61,2.04,8.5,8.42,8.53
					c23.32,0.04,46.63,0.4,69.95,0.63c5.92,0.07,18.09,2.02,20.46-5.63c0.88-2.83-0.25-5.86-1.71-8.42
					c-1.46-2.56-3.32-4.96-4.11-7.81c-1.44-5.09,0.81-10.46,3.19-15.19c2.76-5.48,6.19-10.61,8.93-16.09
					c3.21-5.79,6.98-11.18,9.87-17.17c3.05-6.26,5.52-12.79,7.56-19.43c2.76-8.91,4.76-18.04,6.33-27.22
					c0.34-1.99,0.55-3.99,0.79-5.99c0.12-1.01,0.24-2.01,0.39-3.02c0.16-1.13,0.41-2.46,1.87-2.35c8.7,0.61-4.36,50.4-5.5,53.44
					c-2.67,7.14-5.88,14.07-9.65,20.71c-4.73,8.33-13.44,15.89-11.31,26.41c0.76,3.73,1.66,9.07,5.03,11.42
					c4.62,3.25,10.57-2.15,13.85-5.09c5.16-4.65,9.31-10.19,13.67-15.57c11.98-14.81,21.21-31.66,27.92-49.46
					c7.29-19.43,10.55-38.75,13.1-59.22c0.16-1.17,0.4-2.49,1.39-3.12c1.12-0.72,2.58-0.18,3.79,0.36
					c14.81,6.55,30.81,11.85,46.92,10.3c1.97-0.2,4.44-1.03,4.49-3.01C389.61,259.72,387.21,258.78,385.31,258.33z M306.52,248.21
					c-0.16,0.56-0.58,1.05-1.08,1.37c-0.94,0.58-2.2,0.74-3.28,0.85c-7.18,0.74-14.25-1.46-20.71-4.38
					c-1.03-0.45-2.06-0.92-3.1-1.44c-9.85-4.89-19.01-11.87-26.57-19.86c-6.42-6.78-11.49-14.81-14.97-23.47
					c-0.74-1.84-1.62-3.81-2.04-5.77c-0.36-1.66-0.13-3.61,1.14-4.76c0.81-0.72,1.91-0.99,2.96-1.21c3.52-0.7,7.27-1.08,10.84-0.76
					c1.46,0.09,2.98,0.47,4.02,1.48c0.92,0.87,1.37,2.11,1.77,3.32c1.75,5.25,3.21,10.66,5.7,15.64c2.51,5.05,5.83,9.78,9.29,14.23
					c1.26,1.62,2.56,3.19,3.9,4.73c6.66,7.65,15.42,14.5,25.65,16.45c0.4,0.09,0.81,0.16,1.23,0.22c1.41,0.2,2.9,0.34,4.11,1.12
					c0.58,0.38,1.1,0.99,1.17,1.68C306.56,247.85,306.56,248.03,306.52,248.21z"/>
			</g>
		</g>
		<g>
			<g>
				<radialGradient id="SVGID_1_" cx="-302.7225" cy="-304.0343" r="1177.4547" gradientUnits="userSpaceOnUse">
					<stop  offset="0" style="stop-color:#732DD9"/>
					<stop  offset="0.1185" style="stop-color:#6A2BCB"/>
					<stop  offset="0.3434" style="stop-color:#5125A5"/>
					<stop  offset="0.6486" style="stop-color:#291C69"/>
					<stop  offset="0.8139" style="stop-color:#121646"/>
					<stop  offset="1" style="stop-color:#121646"/>
				</radialGradient>
				<path class="st1" d="M359,369.61c-3.49-8.49-6.07-17.37-8.68-26.19c-2.98-10.05-5.44-20.24-8.39-30.3
					c-0.87-2.96-2.34-5.84-4.04-8.43c-1.69-2.58-3.44-2.19-4.33,0.73c-2.4,7.81-4.44,15.75-7.12,23.46
					c-4.83,13.92-11.42,26.87-20.43,38.72c-4.5,5.91-9.53,11.39-14.73,16.68c-2.6,2.65-5.26,5.25-7.91,7.84
					c-2.33,2.27-4.76,4.65-3.12,8.1c1.27,2.67,4.11,2.93,6.73,2.91c0.38,0,0.75-0.01,1.12-0.01c13.44-0.22,26.88,0.15,40.32,0.3
					c9.38,0.11,18.77,0.49,28.15,0.28c3.96-0.09,8.3,0.04,11.52-3.2c3.62-3.64,4.2-6.42,1.6-10.87
					C365.86,383.09,361.86,376.57,359,369.61z"/>
				
					<radialGradient id="SVGID_00000142875328974202796550000005776013639545705360_" cx="-302.7225" cy="-304.0343" r="1177.4547" gradientUnits="userSpaceOnUse">
					<stop  offset="0" style="stop-color:#732DD9"/>
					<stop  offset="0.1185" style="stop-color:#6A2BCB"/>
					<stop  offset="0.3434" style="stop-color:#5125A5"/>
					<stop  offset="0.6486" style="stop-color:#291C69"/>
					<stop  offset="0.8139" style="stop-color:#121646"/>
					<stop  offset="1" style="stop-color:#121646"/>
				</radialGradient>
				<path style="fill:url(#SVGID_00000142875328974202796550000005776013639545705360_);" d="M385.31,258.33
					c-1.75-0.43-3.46-0.9-5.12-1.44c-17.21-5.5-29.1-16.67-42.19-29.26c-2-1.93-2.65-4.13-2.51-6.89
					c0.74-12.86-0.81-25.56-3.07-38.17c-3.19-17.59-8.46-34.6-15.64-51.01c-1.62-3.68-3.77-6.93-4.89-10.84
					c-0.11-0.43-0.2-0.92,0.09-1.26c0.31-0.38,0.94-0.29,1.41-0.11c4.17,1.71,10.82,20.58,16.18,15.75c1.14-1.03,1.57-2.6,1.86-4.11
					c4.56-22.48-1.19-57.13-21.27-71.04c-9.22-6.4-19.34-12.61-30.41-15.1c-5.66-1.26-11.8-1.19-17.01,1.37
					c-2.58,1.26-4.91,3.12-7.72,3.79c-5,1.23-9.99-1.57-14.92-3.07c-5.99-1.8-12.52-1.71-18.45,0.29c-4.76,1.59-9.51,4.42-14.45,3.5
					c-6.84-1.28-11.78-9.18-18.87-11.6c-8.5-2.92-17.95-2.92-26.66-0.96c-25,5.61-48.47,23.16-65.95,41.22
					c-19.19,19.81-26.82,46.02-9.85,69.94c6.06,8.55,14.97,15.21,25.13,17.84c1.97,0.52,4.06,0.92,5.7,2.13
					c6.46,4.8-7.18,17.66-10.1,21.83c-6.28,9.04-12.14,18.42-16.49,28.57c-8.08,18.78-11.6,37.68-14.43,57.78
					c-4.89,34.78-11.44,70.53-27.71,101.94c-2.63,5.05-5.9,10.08-7.97,15.37c-0.49,1.26-0.65,2.74,0,3.9
					c0.38,0.67,1.01,1.19,1.68,1.55c3.64,2.02,9.16,1.57,13.17,1.59c12.12,0.04,24.24,0.09,36.37,0.11
					c6.17,0.02,12.34,0.04,18.51,0.04c4.44,0,10.97,1.12,15.01-1.01c2.6-1.37,4.04-4.29,4.76-7.16c1.05-4.22,1.73-8.55,2.85-12.77
					c3.46-12.9,8.37-25.18,14.68-36.98c10.01-18.74,20.46-39.92,1.88-57.02c-4.89-4.49-10.52-8.06-15.78-12.09
					c-1.01-0.79-2-2.38-0.94-3.1c0.4-0.29,0.97-0.22,1.48-0.11c20.26,4.24,39,26.01,37.74,46.85c-0.67,10.88-5.05,20.64-8.37,30.74
					c-3.37,10.26-7.32,20.4-8.26,31.3c-0.38,4.31-0.83,8.66-0.88,12.99c-0.07,5.61,2.04,8.5,8.42,8.53
					c23.32,0.04,46.63,0.4,69.95,0.63c5.92,0.07,18.09,2.02,20.46-5.63c0.88-2.83-0.25-5.86-1.71-8.42
					c-1.46-2.56-3.32-4.96-4.11-7.81c-1.44-5.09,0.81-10.46,3.19-15.19c2.76-5.48,6.19-10.61,8.93-16.09
					c3.21-5.79,6.98-11.18,9.87-17.17c3.05-6.26,5.52-12.79,7.56-19.43c2.76-8.91,4.76-18.04,6.33-27.22
					c0.34-1.99,0.55-3.99,0.79-5.99c0.12-1.01,0.24-2.01,0.39-3.02c0.16-1.13,0.41-2.46,1.87-2.35c8.7,0.61-4.36,50.4-5.5,53.44
					c-2.67,7.14-5.88,14.07-9.65,20.71c-4.73,8.33-13.44,15.89-11.31,26.41c0.76,3.73,1.66,9.07,5.03,11.42
					c4.62,3.25,10.57-2.15,13.85-5.09c5.16-4.65,9.31-10.19,13.67-15.57c11.98-14.81,21.21-31.66,27.92-49.46
					c7.29-19.43,10.55-38.75,13.1-59.22c0.16-1.17,0.4-2.49,1.39-3.12c1.12-0.72,2.58-0.18,3.79,0.36
					c14.81,6.55,30.81,11.85,46.92,10.3c1.97-0.2,4.44-1.03,4.49-3.01C389.61,259.72,387.21,258.78,385.31,258.33z M306.52,248.21
					c-0.16,0.56-0.58,1.05-1.08,1.37c-0.94,0.58-2.2,0.74-3.28,0.85c-7.18,0.74-14.25-1.46-20.71-4.38
					c-1.03-0.45-2.06-0.92-3.1-1.44c-9.85-4.89-19.01-11.87-26.57-19.86c-6.42-6.78-11.49-14.81-14.97-23.47
					c-0.74-1.84-1.62-3.81-2.04-5.77c-0.36-1.66-0.13-3.61,1.14-4.76c0.81-0.72,1.91-0.99,2.96-1.21c3.52-0.7,7.27-1.08,10.84-0.76
					c1.46,0.09,2.98,0.47,4.02,1.48c0.92,0.87,1.37,2.11,1.77,3.32c1.75,5.25,3.21,10.66,5.7,15.64c2.51,5.05,5.83,9.78,9.29,14.23
					c1.26,1.62,2.56,3.19,3.9,4.73c6.66,7.65,15.42,14.5,25.65,16.45c0.4,0.09,0.81,0.16,1.23,0.22c1.41,0.2,2.9,0.34,4.11,1.12
					c0.58,0.38,1.1,0.99,1.17,1.68C306.56,247.85,306.56,248.03,306.52,248.21z"/>
			</g>
		</g>
	</g>
	<g>
		<g>
			<g>
				<path class="st0" d="M49.62,435.45c1.26-1.81,3.15-2.72,5.67-2.72c2.33,0,4.06,0.54,5.19,1.6c1.14,1.07,1.8,2.79,1.98,5.17h7.58
					c-0.4-4.11-1.9-7.3-4.5-9.58c-2.6-2.27-6.02-3.41-10.26-3.41c-3.07,0-5.76,0.72-8.08,2.16c-2.32,1.44-4.1,3.51-5.35,6.21
					c-1.25,2.7-1.87,5.81-1.87,9.35v2.25c0,5.54,1.35,9.9,4.06,13.08c2.7,3.18,6.42,4.76,11.13,4.76c4.31,0,7.79-1.14,10.43-3.41
					c2.64-2.27,4.1-5.39,4.39-9.35h-7.58c-0.17,2.24-0.83,3.9-1.98,4.97c-1.15,1.07-2.9,1.61-5.24,1.61c-2.6,0-4.49-0.92-5.7-2.76
					c-1.2-1.84-1.81-4.74-1.81-8.72v-2.78C47.72,440.07,48.36,437.26,49.62,435.45z"/>
				<rect x="74.88" y="425" class="st0" width="7.33" height="38.82"/>
				<path class="st0" d="M109.04,439.36c-2.33-2.26-5.35-3.39-9.04-3.39c-2.6,0-4.87,0.58-6.84,1.74c-1.96,1.16-3.47,2.81-4.51,4.93
					c-1.04,2.12-1.57,4.54-1.57,7.25v0.33c0,4.35,1.18,7.79,3.53,10.31c2.35,2.53,5.5,3.79,9.44,3.79c3.94,0,7.09-1.27,9.43-3.8
					c2.34-2.54,3.51-5.9,3.51-10.1l-0.05-1.87C112.68,444.68,111.38,441.62,109.04,439.36z M104.2,456.4
					c-0.99,1.36-2.38,2.04-4.15,2.04c-1.82,0-3.22-0.69-4.2-2.06c-0.98-1.38-1.47-3.36-1.47-5.96c0-2.92,0.49-5.08,1.47-6.47
					c0.98-1.39,2.36-2.09,4.15-2.09c1.8,0,3.2,0.7,4.2,2.1c0.99,1.4,1.49,3.38,1.49,5.92C105.69,452.87,105.2,455.04,104.2,456.4z"
					/>
				<path class="st0" d="M133.62,455.86c-0.93,1.72-2.63,2.58-5.11,2.58c-2.6,0-3.89-1.43-3.89-4.28v-17.69h-7.3v17.92
					c0.03,3.25,0.82,5.72,2.36,7.41c1.54,1.68,3.78,2.53,6.71,2.53c3.18,0,5.68-1.1,7.48-3.29l0.2,2.78h6.88v-27.35h-7.33V455.86z"
					/>
				<path class="st0" d="M162.64,438.95c-1.74-1.99-3.93-2.98-6.6-2.98c-3.32,0-5.94,1.26-7.85,3.79
					c-1.91,2.53-2.87,5.92-2.87,10.19c0,4.52,0.97,8.04,2.92,10.58c1.95,2.54,4.53,3.8,7.75,3.8c2.86,0,5.21-1.14,7.03-3.41
					l0.35,2.91h6.6V425h-7.33V438.95z M162.64,455.58c-0.93,1.91-2.52,2.86-4.78,2.86c-1.7,0-3-0.68-3.89-2.05
					c-0.89-1.37-1.34-3.33-1.34-5.9c0-5.72,1.76-8.58,5.28-8.58c2.24,0,3.82,0.95,4.73,2.86V455.58z"/>
				<polygon class="st0" points="198.48,451.29 183.7,427.02 176.11,427.02 176.11,463.82 183.7,463.82 183.7,439.61 198.46,463.82 
					206.04,463.82 206.04,427.02 198.48,427.02 				"/>
				<path class="st0" d="M234.32,445.65c0-3.07-0.99-5.45-2.97-7.14c-1.98-1.69-4.66-2.54-8.03-2.54c-2.22,0-4.23,0.38-6.02,1.15
					c-1.79,0.77-3.17,1.82-4.15,3.17c-0.98,1.35-1.47,2.81-1.47,4.4h7.3c0-1.03,0.35-1.84,1.05-2.43c0.7-0.59,1.66-0.88,2.89-0.88
					c1.4,0,2.42,0.38,3.07,1.14c0.65,0.76,0.97,1.77,0.97,3.03v1.57h-3.36c-4.06,0.02-7.17,0.8-9.31,2.35
					c-2.15,1.55-3.22,3.77-3.22,6.67c0,2.36,0.88,4.31,2.64,5.86c1.76,1.55,3.98,2.33,6.66,2.33c2.83,0,5.13-0.99,6.9-2.96
					c0.15,0.98,0.4,1.79,0.73,2.45h7.38v-0.43c-0.71-1.33-1.07-3.29-1.09-5.89V445.65z M226.99,456.09
					c-0.44,0.81-1.11,1.45-2.02,1.93c-0.91,0.48-1.9,0.72-2.98,0.72c-1.11,0-1.99-0.29-2.63-0.88c-0.64-0.59-0.96-1.34-0.96-2.25
					l0.03-0.43c0.24-2.53,2.18-3.79,5.84-3.79h2.73V456.09z"/>
				<path class="st0" d="M249.26,457.66c-0.39-0.44-0.58-1.16-0.58-2.17v-13.65h4.68v-5.36h-4.68v-6.72h-7.3v6.72h-3.99v5.36h3.99
					v14.74c0.1,5.17,2.71,7.76,7.84,7.76c1.52,0,2.99-0.22,4.42-0.66v-5.54c-0.62,0.12-1.34,0.18-2.15,0.18
					C250.39,458.31,249.65,458.1,249.26,457.66z"/>
				<path class="st0" d="M261.47,425.63c-1.26,0-2.26,0.35-3,1.06c-0.73,0.71-1.1,1.61-1.1,2.7c0,1.11,0.37,2.02,1.12,2.73
					c0.75,0.71,1.74,1.06,2.97,1.06c1.23,0,2.22-0.35,2.97-1.06c0.75-0.71,1.12-1.62,1.12-2.73c0-1.1-0.37-2-1.11-2.7
					C263.71,425.99,262.72,425.63,261.47,425.63z"/>
				<rect x="257.81" y="436.48" class="st0" width="7.33" height="27.35"/>
				<polygon class="st0" points="281.39,454.9 276.31,436.48 268.67,436.48 277.9,463.82 284.88,463.82 294.1,436.48 286.47,436.48 
									"/>
				<path class="st0" d="M320.82,449.54c0-4.33-1.07-7.68-3.22-10.03c-2.15-2.36-5.09-3.54-8.83-3.54c-2.49,0-4.71,0.59-6.66,1.76
					c-1.95,1.17-3.45,2.84-4.51,5c-1.06,2.17-1.59,4.62-1.59,7.37v0.71c0,4.09,1.26,7.37,3.78,9.83c2.52,2.46,5.78,3.69,9.79,3.69
					c2.27,0,4.35-0.43,6.22-1.3c1.87-0.87,3.35-2.08,4.45-3.63l-3.59-4.02c-1.58,2.04-3.78,3.06-6.6,3.06
					c-1.82,0-3.32-0.54-4.51-1.62c-1.19-1.08-1.9-2.51-2.14-4.3h17.42V449.54z M313.64,447.55h-10.16c0.25-1.83,0.83-3.23,1.73-4.21
					c0.9-0.98,2.08-1.47,3.53-1.47c1.55,0,2.75,0.44,3.59,1.33c0.84,0.89,1.28,2.15,1.31,3.77V447.55z"/>
			</g>
			<g>
				<path class="st0" d="M49.62,435.45c1.26-1.81,3.15-2.72,5.67-2.72c2.33,0,4.06,0.54,5.19,1.6c1.14,1.07,1.8,2.79,1.98,5.17h7.58
					c-0.4-4.11-1.9-7.3-4.5-9.58c-2.6-2.27-6.02-3.41-10.26-3.41c-3.07,0-5.76,0.72-8.08,2.16c-2.32,1.44-4.1,3.51-5.35,6.21
					c-1.25,2.7-1.87,5.81-1.87,9.35v2.25c0,5.54,1.35,9.9,4.06,13.08c2.7,3.18,6.42,4.76,11.13,4.76c4.31,0,7.79-1.14,10.43-3.41
					c2.64-2.27,4.1-5.39,4.39-9.35h-7.58c-0.17,2.24-0.83,3.9-1.98,4.97c-1.15,1.07-2.9,1.61-5.24,1.61c-2.6,0-4.49-0.92-5.7-2.76
					c-1.2-1.84-1.81-4.74-1.81-8.72v-2.78C47.72,440.07,48.36,437.26,49.62,435.45z"/>
				<rect x="74.88" y="425" class="st0" width="7.33" height="38.82"/>
				<path class="st0" d="M109.04,439.36c-2.33-2.26-5.35-3.39-9.04-3.39c-2.6,0-4.87,0.58-6.84,1.74c-1.96,1.16-3.47,2.81-4.51,4.93
					c-1.04,2.12-1.57,4.54-1.57,7.25v0.33c0,4.35,1.18,7.79,3.53,10.31c2.35,2.53,5.5,3.79,9.44,3.79c3.94,0,7.09-1.27,9.43-3.8
					c2.34-2.54,3.51-5.9,3.51-10.1l-0.05-1.87C112.68,444.68,111.38,441.62,109.04,439.36z M104.2,456.4
					c-0.99,1.36-2.38,2.04-4.15,2.04c-1.82,0-3.22-0.69-4.2-2.06c-0.98-1.38-1.47-3.36-1.47-5.96c0-2.92,0.49-5.08,1.47-6.47
					c0.98-1.39,2.36-2.09,4.15-2.09c1.8,0,3.2,0.7,4.2,2.1c0.99,1.4,1.49,3.38,1.49,5.92C105.69,452.87,105.2,455.04,104.2,456.4z"
					/>
				<path class="st0" d="M133.62,455.86c-0.93,1.72-2.63,2.58-5.11,2.58c-2.6,0-3.89-1.43-3.89-4.28v-17.69h-7.3v17.92
					c0.03,3.25,0.82,5.72,2.36,7.41c1.54,1.68,3.78,2.53,6.71,2.53c3.18,0,5.68-1.1,7.48-3.29l0.2,2.78h6.88v-27.35h-7.33V455.86z"
					/>
				<path class="st0" d="M162.64,438.95c-1.74-1.99-3.93-2.98-6.6-2.98c-3.32,0-5.94,1.26-7.85,3.79
					c-1.91,2.53-2.87,5.92-2.87,10.19c0,4.52,0.97,8.04,2.92,10.58c1.95,2.54,4.53,3.8,7.75,3.8c2.86,0,5.21-1.14,7.03-3.41
					l0.35,2.91h6.6V425h-7.33V438.95z M162.64,455.58c-0.93,1.91-2.52,2.86-4.78,2.86c-1.7,0-3-0.68-3.89-2.05
					c-0.89-1.37-1.34-3.33-1.34-5.9c0-5.72,1.76-8.58,5.28-8.58c2.24,0,3.82,0.95,4.73,2.86V455.58z"/>
				<polygon class="st0" points="198.48,451.29 183.7,427.02 176.11,427.02 176.11,463.82 183.7,463.82 183.7,439.61 198.46,463.82 
					206.04,463.82 206.04,427.02 198.48,427.02 				"/>
				<path class="st0" d="M234.32,445.65c0-3.07-0.99-5.45-2.97-7.14c-1.98-1.69-4.66-2.54-8.03-2.54c-2.22,0-4.23,0.38-6.02,1.15
					c-1.79,0.77-3.17,1.82-4.15,3.17c-0.98,1.35-1.47,2.81-1.47,4.4h7.3c0-1.03,0.35-1.84,1.05-2.43c0.7-0.59,1.66-0.88,2.89-0.88
					c1.4,0,2.42,0.38,3.07,1.14c0.65,0.76,0.97,1.77,0.97,3.03v1.57h-3.36c-4.06,0.02-7.17,0.8-9.31,2.35
					c-2.15,1.55-3.22,3.77-3.22,6.67c0,2.36,0.88,4.31,2.64,5.86c1.76,1.55,3.98,2.33,6.66,2.33c2.83,0,5.13-0.99,6.9-2.96
					c0.15,0.98,0.4,1.79,0.73,2.45h7.38v-0.43c-0.71-1.33-1.07-3.29-1.09-5.89V445.65z M226.99,456.09
					c-0.44,0.81-1.11,1.45-2.02,1.93c-0.91,0.48-1.9,0.72-2.98,0.72c-1.11,0-1.99-0.29-2.63-0.88c-0.64-0.59-0.96-1.34-0.96-2.25
					l0.03-0.43c0.24-2.53,2.18-3.79,5.84-3.79h2.73V456.09z"/>
				<path class="st0" d="M249.26,457.66c-0.39-0.44-0.58-1.16-0.58-2.17v-13.65h4.68v-5.36h-4.68v-6.72h-7.3v6.72h-3.99v5.36h3.99
					v14.74c0.1,5.17,2.71,7.76,7.84,7.76c1.52,0,2.99-0.22,4.42-0.66v-5.54c-0.62,0.12-1.34,0.18-2.15,0.18
					C250.39,458.31,249.65,458.1,249.26,457.66z"/>
				<path class="st0" d="M261.47,425.63c-1.26,0-2.26,0.35-3,1.06c-0.73,0.71-1.1,1.61-1.1,2.7c0,1.11,0.37,2.02,1.12,2.73
					c0.75,0.71,1.74,1.06,2.97,1.06c1.23,0,2.22-0.35,2.97-1.06c0.75-0.71,1.12-1.62,1.12-2.73c0-1.1-0.37-2-1.11-2.7
					C263.71,425.99,262.72,425.63,261.47,425.63z"/>
				<rect x="257.81" y="436.48" class="st0" width="7.33" height="27.35"/>
				<polygon class="st0" points="281.39,454.9 276.31,436.48 268.67,436.48 277.9,463.82 284.88,463.82 294.1,436.48 286.47,436.48 
									"/>
				<path class="st0" d="M320.82,449.54c0-4.33-1.07-7.68-3.22-10.03c-2.15-2.36-5.09-3.54-8.83-3.54c-2.49,0-4.71,0.59-6.66,1.76
					c-1.95,1.17-3.45,2.84-4.51,5c-1.06,2.17-1.59,4.62-1.59,7.37v0.71c0,4.09,1.26,7.37,3.78,9.83c2.52,2.46,5.78,3.69,9.79,3.69
					c2.27,0,4.35-0.43,6.22-1.3c1.87-0.87,3.35-2.08,4.45-3.63l-3.59-4.02c-1.58,2.04-3.78,3.06-6.6,3.06
					c-1.82,0-3.32-0.54-4.51-1.62c-1.19-1.08-1.9-2.51-2.14-4.3h17.42V449.54z M313.64,447.55h-10.16c0.25-1.83,0.83-3.23,1.73-4.21
					c0.9-0.98,2.08-1.47,3.53-1.47c1.55,0,2.75,0.44,3.59,1.33c0.84,0.89,1.28,2.15,1.31,3.77V447.55z"/>
			</g>
		</g>
		<g>
			<path class="st0" d="M334.21,450.86v12.97h-7.58v-36.8h14.36c2.76,0,5.19,0.51,7.29,1.52c2.1,1.01,3.71,2.45,4.84,4.31
				c1.13,1.86,1.69,3.98,1.69,6.36c0,3.61-1.23,6.45-3.7,8.53c-2.47,2.08-5.89,3.12-10.25,3.12H334.21z M334.21,444.72h6.77
				c2.01,0,3.53-0.47,4.59-1.42c1.05-0.94,1.58-2.29,1.58-4.04c0-1.8-0.53-3.26-1.59-4.37s-2.53-1.68-4.4-1.72h-6.95V444.72z"/>
			<path class="st0" d="M389.14,459.17c-1.36,1.63-3.29,2.9-5.79,3.8c-2.49,0.9-5.26,1.35-8.29,1.35c-3.18,0-5.98-0.7-8.38-2.09
				c-2.4-1.39-4.25-3.41-5.56-6.05c-1.31-2.65-1.98-5.75-2.01-9.33v-2.5c0-3.67,0.62-6.85,1.86-9.54c1.24-2.69,3.02-4.74,5.36-6.17
				c2.33-1.42,5.07-2.14,8.2-2.14c4.36,0,7.78,1.04,10.24,3.12c2.46,2.08,3.92,5.11,4.37,9.09h-7.38c-0.34-2.11-1.08-3.65-2.24-4.63
				c-1.15-0.98-2.74-1.47-4.76-1.47c-2.58,0-4.54,0.97-5.89,2.91c-1.35,1.94-2.03,4.82-2.05,8.64v2.35c0,3.86,0.73,6.77,2.2,8.75
				c1.47,1.97,3.61,2.96,6.45,2.96c2.85,0,4.88-0.61,6.09-1.82v-6.34h-6.9v-5.59h14.48V459.17z"/>
		</g>
	</g>
</g>
</svg>
 mediatype: image/svg+xml From 918f9b087423cf386adef18dd6917c8928deaab7 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Tue, 25 Feb 2025 18:37:25 +0100 Subject: [PATCH 394/836] fix(plugin): do not append pgData if walName already contains it (#6964) This patch prevents the plugin executor from appending `$PGDATA/pg_wal` two times when the archiving of a WAL file is requested. This was happening when archiving the ready WALs in a former primary. Fixes: #6965 Fixes: cloudnative-pg/plugin-barman-cloud#164 Signed-off-by: Armando Ruocco --- pkg/management/postgres/archiver/archiver.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/pkg/management/postgres/archiver/archiver.go b/pkg/management/postgres/archiver/archiver.go index ea40498848..5e8c193b41 100644 --- a/pkg/management/postgres/archiver/archiver.go +++ b/pkg/management/postgres/archiver/archiver.go @@ -151,7 +151,7 @@ func internalRun( // We allow plugins to archive WALs even if there is no plugin // directly enabled by the user, to retain compatibility with // the old API. - if err := archiveWALViaPlugins(ctx, cluster, path.Join(pgData, walName)); err != nil { + if err := archiveWALViaPlugins(ctx, cluster, pgData, walName); err != nil { return err } @@ -260,10 +260,16 @@ func internalRun( func archiveWALViaPlugins( ctx context.Context, cluster *apiv1.Cluster, + pgData string, walName string, ) error { contextLogger := log.FromContext(ctx) + // check if the `walName` is an absolute path or just the filename + if !filepath.IsAbs(walName) { + walName = filepath.Join(pgData, walName) + } + plugins := repository.New() availablePluginNames, err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir) if err != nil { From 0b97551ace3af202dd5d49db4d1ce41776a3a7d8 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Tue, 25 Feb 2025 19:52:44 +0100 Subject: [PATCH 395/836] chore(log): include pg_ctl options in the instance manager log (#6943) This will help debugging the PostgreSQL shutdown process. Signed-off-by: Leonardo Cecchi --- pkg/management/postgres/instance.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/pkg/management/postgres/instance.go b/pkg/management/postgres/instance.go index f7402cc49d..02906ac396 100644 --- a/pkg/management/postgres/instance.go +++ b/pkg/management/postgres/instance.go @@ -507,6 +507,7 @@ func (instance *Instance) Shutdown(ctx context.Context, options shutdownOptions) "pgdata", instance.PgData, "mode", options.Mode, "timeout", options.Timeout, + "pgCtlOptions", pgCtlOptions, ) pgCtlCmd := exec.Command(pgCtlName, pgCtlOptions...) // #nosec @@ -621,8 +622,10 @@ func (instance *Instance) Reload(ctx context.Context) error { "reload", } - contextLogger.Info("Requesting configuration reload", - "pgdata", instance.PgData) + contextLogger.Info( + "Requesting configuration reload", + "pgdata", instance.PgData, + "pgCtlOptions", options) // Need to reload certificates if they changed if instance.primaryPool != nil { From e5c9b413951e2ce6b6a95fd835e9fff12a967a65 Mon Sep 17 00:00:00 2001 From: Jeff Mealo Date: Wed, 26 Feb 2025 03:16:05 -0500 Subject: [PATCH 396/836] feat: classify known Azure CSI snapshot errors as retriable (#6906) Fixes: #6901 Signed-off-by: Jeff Mealo Signed-off-by: Armando Ruocco Signed-off-by: Leonardo Cecchi Co-authored-by: Armando Ruocco Co-authored-by: Leonardo Cecchi --- .wordlist-en-custom.txt | 1 + .../backup/volumesnapshot/errors.go | 89 +++++++++++++++++++ .../backup/volumesnapshot/errors_test.go | 37 ++++++++ .../backup/volumesnapshot/resources.go | 21 ++--- 4 files changed, 132 insertions(+), 16 deletions(-) create mode 100644 pkg/reconciler/backup/volumesnapshot/errors.go create mode 100644 pkg/reconciler/backup/volumesnapshot/errors_test.go diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index a65db04731..f13ec7891d 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -1147,6 +1147,7 @@ rehydrated rehydration relabelings relatime +retryable replicationSecretVersion replicationSlots replicationTLSSecret diff --git a/pkg/reconciler/backup/volumesnapshot/errors.go b/pkg/reconciler/backup/volumesnapshot/errors.go new file mode 100644 index 0000000000..e5d8b1159d --- /dev/null +++ b/pkg/reconciler/backup/volumesnapshot/errors.go @@ -0,0 +1,89 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package volumesnapshot + +import ( + "regexp" + "strconv" + "strings" +) + +var ( + retryableStatusCodes = []int{408, 429, 500, 502, 503, 504} + httpStatusCodeRegex = regexp.MustCompile(`HTTPStatusCode:\s(\d{3})`) +) + +// isRetriableErrorMessage detects if a certain error message belongs +// to a retriable error or not. This is obviously an heuristic but +// unfortunately we don't have that information exposed in the +// Kubernetes VolumeSnapshot API and the CSI driver haven't that too. +func isRetriableErrorMessage(msg string) bool { + isRetryableFuncs := []func(string) bool{ + isExplicitlyRetriableError, + isRetryableHTTPError, + isConflictError, + } + + for _, isRetryableFunc := range isRetryableFuncs { + if isRetryableFunc(msg) { + return true + } + } + + return false +} + +// isConflictError detects optimistic locking errors +func isConflictError(msg string) bool { + // Obviously this is a heuristic, but unfortunately we don't have + // the information we need. + // We're trying to handle the cases where the external-snapshotter + // controller failed on a conflict with the following error: + // + // > the object has been modified; please apply your changes to the + // > latest version and try again + + return strings.Contains(msg, "the object has been modified") +} + +// isExplicitlyRetriableError detects explicitly retriable errors as raised +// by the Azure CSI driver. These errors contain the "Retriable: true" +// string. +func isExplicitlyRetriableError(msg string) bool { + return strings.Contains(msg, "Retriable: true") +} + +// isRetryableHTTPError, will return a retry on the following status codes: +// - 408: Request Timeout +// - 429: Too Many Requests +// - 500: Internal Server Error +// - 502: Bad Gateway +// - 503: Service Unavailable +// - 504: Gateway Timeout +func isRetryableHTTPError(msg string) bool { + if matches := httpStatusCodeRegex.FindStringSubmatch(msg); len(matches) == 2 { + if code, err := strconv.Atoi(matches[1]); err == nil { + for _, retryableCode := range retryableStatusCodes { + if code == retryableCode { + return true + } + } + } + } + + return false +} diff --git a/pkg/reconciler/backup/volumesnapshot/errors_test.go b/pkg/reconciler/backup/volumesnapshot/errors_test.go new file mode 100644 index 0000000000..1652fd062f --- /dev/null +++ b/pkg/reconciler/backup/volumesnapshot/errors_test.go @@ -0,0 +1,37 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package volumesnapshot + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Retriable error messages", func() { + DescribeTable( + "Retriable error messages", + func(msg string, isRetriable bool) { + Expect(isRetriableErrorMessage(msg)).To(Equal(isRetriable)) + }, + Entry("conflict", "Hey, the object has been modified!", true), + Entry("non-retriable error", "VolumeSnapshotClass not found", false), + Entry("explicitly retriable error", "Retriable: true, the storage is gone away forever", true), + Entry("explicitly non-retriable error", "Retriable: false because my pod is working", false), + Entry("error code 502 - retriable", "RetryAfter: 0s, HTTPStatusCode: 502, RawError: Internal Server Error", true), + Entry("error code 404 - non retriable", "RetryAfter: 0s, HTTPStatusCode: 404, RawError: Not found", false), + ) +}) diff --git a/pkg/reconciler/backup/volumesnapshot/resources.go b/pkg/reconciler/backup/volumesnapshot/resources.go index 5e8231f94e..eaee26cf81 100644 --- a/pkg/reconciler/backup/volumesnapshot/resources.go +++ b/pkg/reconciler/backup/volumesnapshot/resources.go @@ -19,7 +19,6 @@ package volumesnapshot import ( "context" "fmt" - "strings" storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -69,26 +68,16 @@ func (err volumeSnapshotError) Error() string { // IsRetryable returns true if the external snapshotter controller // will retry taking the snapshot func (err volumeSnapshotError) isRetryable() bool { - if err.InternalError.Message == nil { - return false - } - - // Obviously this is a heuristic, but unfortunately we don't have - // the information we need. - // We're trying to handle the cases where the external-snapshotter - // controller failed on a conflict with the following error: - // - // > the object has been modified; please apply your changes to the - // > latest version and try again - // TODO: instead of blindingly retry on matching errors, we // should enhance our CRD with a configurable deadline. After // the deadline have been met on err.InternalError.CreatedAt // the backup can be marked as failed - return strings.Contains( - *err.InternalError.Message, - "the object has been modified") + if err.InternalError.Message == nil { + return false + } + + return isRetriableErrorMessage(*err.InternalError.Message) } // slice represents a slice of []storagesnapshotv1.VolumeSnapshot From 0ca8b40715461bf7e1eeeea01c909c6c51a166fe Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Wed, 26 Feb 2025 09:30:42 +0100 Subject: [PATCH 397/836] chore: fix word list (#6982) Signed-off-by: Leonardo Cecchi --- .wordlist-en-custom.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index f13ec7891d..a7c81ba57f 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -1147,7 +1147,6 @@ rehydrated rehydration relabelings relatime -retryable replicationSecretVersion replicationSlots replicationTLSSecret @@ -1165,6 +1164,7 @@ restoreAdditionalCommandArgs restoreJobHookCapabilities resync retentionPolicy +retryable reusePVC ro robfig From 8a4e21e096cef22b466658694a39a5222ee8e09b Mon Sep 17 00:00:00 2001 From: Francesco Canovai Date: Wed, 26 Feb 2025 09:54:34 +0100 Subject: [PATCH 398/836] test: log the pod in the replication slot aligned check (#6974) Add more information in case of e2e failure. Also, reduce the polling frequency to not overload the API server. Closes #6973 Signed-off-by: Francesco Canovai --- tests/e2e/asserts_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/e2e/asserts_test.go b/tests/e2e/asserts_test.go index 32a01e734e..5e4ef696dc 100644 --- a/tests/e2e/asserts_test.go +++ b/tests/e2e/asserts_test.go @@ -2831,17 +2831,17 @@ func AssertClusterReplicationSlotsAligned( ) { podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - Eventually(func() bool { + Eventually(func(g Gomega) { var lsnList []string for _, pod := range podList.Items { out, err := replicationslot.GetReplicationSlotLsnsOnPod( env.Ctx, env.Client, env.Interface, env.RestClientConfig, namespace, clusterName, postgres.AppDBName, pod) - Expect(err).ToNot(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred(), "error getting replication slot lsn on pod %v", pod.Name) lsnList = append(lsnList, out...) } - return replicationslot.AreSameLsn(lsnList) - }, 300).Should(BeEquivalentTo(true), + g.Expect(replicationslot.AreSameLsn(lsnList)).To(BeTrue()) + }).WithTimeout(300*time.Second).WithPolling(2*time.Second).Should(Succeed(), func() string { return replicationslot.PrintReplicationSlots( env.Ctx, env.Client, env.Interface, env.RestClientConfig, From ead66724bd2fbc74f0132bb2dfa7188a2cfebf67 Mon Sep 17 00:00:00 2001 From: Peggie Date: Wed, 26 Feb 2025 10:51:31 +0100 Subject: [PATCH 399/836] feat: Public Cloud K8S versions update (#6980) Update the versions used to test the operator on public cloud providers Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: public-cloud-k8s-versions-check --- .github/aks_versions.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/aks_versions.json b/.github/aks_versions.json index c1825b7a7e..9ba3fc5a09 100644 --- a/.github/aks_versions.json +++ b/.github/aks_versions.json @@ -1,6 +1,6 @@ [ - "1.31.3", - "1.30.7", - "1.29.9", - "1.28.9" + "1.32.0", + "1.31.5", + "1.30.9", + "1.29.9" ] From 6e46e6c9eed9460f0e9532523a84393a63513e0d Mon Sep 17 00:00:00 2001 From: Francesco Canovai Date: Wed, 26 Feb 2025 13:58:31 +0100 Subject: [PATCH 400/836] test: fix tests remaining stuck waiting for portforward (#6987) Error out if ForwardPorts exists with error, instead of going into a blocking select. Closes #6985 Signed-off-by: Francesco Canovai --- tests/utils/forwardconnection/forwardconnection.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/utils/forwardconnection/forwardconnection.go b/tests/utils/forwardconnection/forwardconnection.go index 77b774ffc1..9b70f7c52b 100644 --- a/tests/utils/forwardconnection/forwardconnection.go +++ b/tests/utils/forwardconnection/forwardconnection.go @@ -127,6 +127,9 @@ func (fc *ForwardConnection) StartAndWait() error { return } }() + if err != nil { + return fmt.Errorf("error starting port-forward: %w", err) + } select { case <-fc.readyChannel: ginkgo.GinkgoWriter.Println("port-forward ready") From 96f60f77013f9939fb751cb52d436a3375636d4f Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 26 Feb 2025 22:23:52 +0100 Subject: [PATCH 401/836] fix(deps): update module github.com/cloudnative-pg/cnpg-i to v0.1.0 (main) (#6994) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index c131e3af51..ab65f3f309 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/blang/semver v3.5.1+incompatible github.com/cheynewallace/tabby v1.1.1 github.com/cloudnative-pg/barman-cloud v0.0.0-20250104195650-c1472628b450 - github.com/cloudnative-pg/cnpg-i v0.0.0-20241224161104-7e2cfa59debc + github.com/cloudnative-pg/cnpg-i v0.1.0 github.com/cloudnative-pg/machinery v0.0.0-20250102082645-95c37fe624d0 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/evanphx/json-patch/v5 v5.9.11 @@ -108,7 +108,7 @@ require ( golang.org/x/tools v0.28.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20241202173237-19429a94021a // indirect - google.golang.org/protobuf v1.36.4 // indirect + google.golang.org/protobuf v1.36.5 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect diff --git a/go.sum b/go.sum index 6a7af17e01..ec50f709eb 100644 --- a/go.sum +++ b/go.sum @@ -20,8 +20,8 @@ github.com/cheynewallace/tabby v1.1.1 h1:JvUR8waht4Y0S3JF17G6Vhyt+FRhnqVCkk8l4Yr github.com/cheynewallace/tabby v1.1.1/go.mod h1:Pba/6cUL8uYqvOc9RkyvFbHGrQ9wShyrn6/S/1OYVys= github.com/cloudnative-pg/barman-cloud v0.0.0-20250104195650-c1472628b450 h1:u11mKIHmbEGQWLsAb5hguwgGOOddA8lpPFAViBpbkt8= github.com/cloudnative-pg/barman-cloud v0.0.0-20250104195650-c1472628b450/go.mod h1:HPGwXHlatQEnb2HdsbGTZLEo8qlxKLdxTHiTeF9TTqw= -github.com/cloudnative-pg/cnpg-i v0.0.0-20241224161104-7e2cfa59debc h1:wo0KfZ4NRhA2/COjz8vTd1P+K/tMUMBPLtbfYQx138A= -github.com/cloudnative-pg/cnpg-i v0.0.0-20241224161104-7e2cfa59debc/go.mod h1:wmXfeji9qPPW+F/1OgDHdkI97ISN1I2f8vJKv/7sssY= +github.com/cloudnative-pg/cnpg-i v0.1.0 h1:QH2xTsrODMhEEc6B25GbOYe7ZIttDmSkYvXotfU5dfs= +github.com/cloudnative-pg/cnpg-i v0.1.0/go.mod h1:G28BhgUEHqrxEyyQeHz8BbpMVAsGuLhJm/tHUbDi8Sw= github.com/cloudnative-pg/machinery v0.0.0-20250102082645-95c37fe624d0 h1:RvwDA4W8K8NQNVQTOzrf9o8P328N7NXztvnq3cUncww= github.com/cloudnative-pg/machinery v0.0.0-20250102082645-95c37fe624d0/go.mod h1:pitcj6ztiuxfSFH5EbVHv8iCVxF+yQkzf9o9A1KoDvI= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= @@ -264,8 +264,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20241202173237-19429a94021a h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20241202173237-19429a94021a/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ= google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw= -google.golang.org/protobuf v1.36.4 h1:6A3ZDJHn/eNqc1i+IdefRzy/9PokBTPvcqMySR7NNIM= -google.golang.org/protobuf v1.36.4/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= From aef79141c26ea20531ee9351dc3773845f085c88 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 26 Feb 2025 22:38:10 +0100 Subject: [PATCH 402/836] fix(deps): update module github.com/cloudnative-pg/barman-cloud to v0.1.0 (main) (#6993) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index ab65f3f309..31362d6515 100644 --- a/go.mod +++ b/go.mod @@ -8,9 +8,9 @@ require ( github.com/avast/retry-go/v4 v4.6.1 github.com/blang/semver v3.5.1+incompatible github.com/cheynewallace/tabby v1.1.1 - github.com/cloudnative-pg/barman-cloud v0.0.0-20250104195650-c1472628b450 + github.com/cloudnative-pg/barman-cloud v0.1.0 github.com/cloudnative-pg/cnpg-i v0.1.0 - github.com/cloudnative-pg/machinery v0.0.0-20250102082645-95c37fe624d0 + github.com/cloudnative-pg/machinery v0.1.0 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/evanphx/json-patch/v5 v5.9.11 github.com/go-logr/logr v1.4.2 diff --git a/go.sum b/go.sum index ec50f709eb..0f0139aeec 100644 --- a/go.sum +++ b/go.sum @@ -18,12 +18,12 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cheynewallace/tabby v1.1.1 h1:JvUR8waht4Y0S3JF17G6Vhyt+FRhnqVCkk8l4YrOU54= github.com/cheynewallace/tabby v1.1.1/go.mod h1:Pba/6cUL8uYqvOc9RkyvFbHGrQ9wShyrn6/S/1OYVys= -github.com/cloudnative-pg/barman-cloud v0.0.0-20250104195650-c1472628b450 h1:u11mKIHmbEGQWLsAb5hguwgGOOddA8lpPFAViBpbkt8= -github.com/cloudnative-pg/barman-cloud v0.0.0-20250104195650-c1472628b450/go.mod h1:HPGwXHlatQEnb2HdsbGTZLEo8qlxKLdxTHiTeF9TTqw= +github.com/cloudnative-pg/barman-cloud v0.1.0 h1:e/z52CehMBIh1LjZqNBJnncWJbS+1JYvRMBR8Js6Uiw= +github.com/cloudnative-pg/barman-cloud v0.1.0/go.mod h1:rJUJO/f1yNckLZiVxHAyRmKY+4EPJkYRJsGbTZRJQSY= github.com/cloudnative-pg/cnpg-i v0.1.0 h1:QH2xTsrODMhEEc6B25GbOYe7ZIttDmSkYvXotfU5dfs= github.com/cloudnative-pg/cnpg-i v0.1.0/go.mod h1:G28BhgUEHqrxEyyQeHz8BbpMVAsGuLhJm/tHUbDi8Sw= -github.com/cloudnative-pg/machinery v0.0.0-20250102082645-95c37fe624d0 h1:RvwDA4W8K8NQNVQTOzrf9o8P328N7NXztvnq3cUncww= -github.com/cloudnative-pg/machinery v0.0.0-20250102082645-95c37fe624d0/go.mod h1:pitcj6ztiuxfSFH5EbVHv8iCVxF+yQkzf9o9A1KoDvI= +github.com/cloudnative-pg/machinery v0.1.0 h1:tjRmsqQmsO/OlaT0uFmkEtVqgr+SGPM88cKZOHYKLBo= +github.com/cloudnative-pg/machinery v0.1.0/go.mod h1:0V3vm44FaIsY+x4pm8ORry7xCC3AJiO+ebfPNxeP5Ck= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= From 5bc22e7cee562b41ff9e354c37d25c8eab58f858 Mon Sep 17 00:00:00 2001 From: Francesco Canovai Date: Wed, 26 Feb 2025 22:52:56 +0100 Subject: [PATCH 403/836] test: fix race on drain e2e (#6972) If the storage was not local to the node, it was possible for the drained pod to be recreated on a different node before we were able to verify its pending state. We now consider a good behaviour to be "pending" or already running on a different node. Closes #6971 Signed-off-by: Francesco Canovai --- tests/e2e/drain_node_test.go | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/tests/e2e/drain_node_test.go b/tests/e2e/drain_node_test.go index 5b2055cdd1..a5dd5d350b 100644 --- a/tests/e2e/drain_node_test.go +++ b/tests/e2e/drain_node_test.go @@ -18,6 +18,7 @@ package e2e import ( "fmt" + "time" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -462,16 +463,20 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La It("can drain the primary node and recover the cluster when uncordoned", func() { AssertCreateCluster(namespace, clusterName, sampleFile, env) + var drainedNodeName string By("waiting for the jobs to be removed", func() { // Wait for jobs to be removed timeout := 180 + var podList *corev1.PodList Eventually(func() (int, error) { - podList, err := pods.List(env.Ctx, env.Client, namespace) + var err error + podList, err = pods.List(env.Ctx, env.Client, namespace) if err != nil { return 0, err } return len(podList.Items), err }, timeout).Should(BeEquivalentTo(1)) + drainedNodeName = podList.Items[0].Spec.NodeName }) // Load test data @@ -490,16 +495,15 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La testTimeouts[testsUtils.DrainNode], ) - By("verifying the primary is now pending", func() { - timeout := 180 - // Expect a failover to have happened - Eventually(func() (string, error) { + By("verifying the primary is now pending or somewhere else", func() { + Eventually(func(g Gomega) { pod, err := pods.Get(env.Ctx, env.Client, namespace, clusterName+"-1") - if err != nil { - return "", err - } - return string(pod.Status.Phase), err - }, timeout).Should(BeEquivalentTo("Pending")) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(pod).Should(SatisfyAny( + HaveField("Spec.NodeName", Not(BeEquivalentTo(drainedNodeName))), + HaveField("Status.Phase", BeEquivalentTo("Pending")), + )) + }).WithTimeout(180 * time.Second).WithPolling(PollingTime * time.Second).Should(Succeed()) }) By("uncordoning all nodes", func() { From 0facc48cbbb026becb7295de329f1029a1c0d825 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Thu, 27 Feb 2025 09:22:20 +0100 Subject: [PATCH 404/836] chore(deps): downgrade go-grpc-middleware to v2.2.0 (#7002) This avoids requiring github.com/google/cel-go v0.23.0, is incompatible with k8s.io/apiserver v0.32.2 that is a dependency of controller-runtime v0.20.2 Signed-off-by: Marco Nenciarini --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 31362d6515..7bee0fc5ac 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/evanphx/json-patch/v5 v5.9.11 github.com/go-logr/logr v1.4.2 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 - github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.0 + github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0 github.com/jackc/pgx/v5 v5.7.2 github.com/jackc/puddle/v2 v2.2.2 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 diff --git a/go.sum b/go.sum index 0f0139aeec..cecd9e5f67 100644 --- a/go.sum +++ b/go.sum @@ -83,8 +83,8 @@ github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWm github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.0 h1:FbSCl+KggFl+Ocym490i/EyXF4lPgLoUtcSWquBM0Rs= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.0/go.mod h1:qOchhhIlmRcqk/O9uCo/puJlyo07YINaIqdZfZG3Jkc= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0 h1:kQ0NI7W1B3HwiN5gAYtY+XFItDPbLBwYRxAqbFTyDes= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0/go.mod h1:zrT2dxOAjNFPRGjTUe2Xmb4q4YdUwVvQFV6xiCSf+z0= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= From 34ab236e0a442e7bd30b401257824926660d1ae3 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Thu, 27 Feb 2025 16:18:28 +0100 Subject: [PATCH 405/836] ci: move the container build process to bake (#6806) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Redefine the build process to use bake. Generate SBOMs and attestations, and add OCI annotations. Closes #6804 --------- Signed-off-by: Jonathan Gonzalez V. Signed-off-by: Niccolò Fei Signed-off-by: Francesco Canovai Co-authored-by: Niccolò Fei Co-authored-by: Francesco Canovai --- .github/workflows/continuous-delivery.yml | 130 ++++++--------- .github/workflows/continuous-integration.yml | 157 +++++++++---------- .github/workflows/release-publish.yml | 97 +++++------- Dockerfile | 35 +---- Dockerfile-ubi9 | 29 ---- Makefile | 12 +- docker-bake.hcl | 149 ++++++++++++++++++ hack/setup-cluster.sh | 50 ++++-- pkg/utils/discovery.go | 2 +- 9 files changed, 359 insertions(+), 302 deletions(-) delete mode 100644 Dockerfile-ubi9 create mode 100644 docker-bake.hcl diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index 303f3f1b3f..d11129650e 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -47,6 +47,7 @@ env: REGISTRY_USER: ${{ github.actor }} REGISTRY_PASSWORD: ${{ secrets.GITHUB_TOKEN }} REPOSITORY_OWNER: "cloudnative-pg" + SIGN_IMAGES: "true" SLACK_USERNAME: "cnpg-bot" BUILD_MANAGER_RELEASE_ARGS: "build --skip=validate --clean --id manager" # Keep in mind that adding more platforms (architectures) will increase the building @@ -248,8 +249,8 @@ jobs: contents: read packages: write pull-requests: read + id-token: write outputs: - image: ${{ steps.image-meta.outputs.image }} # 'branch_name' is used in 'GetMostRecentReleaseTag' in the Go code branch_name: ${{ steps.build-meta.outputs.branch_name }} upload_artifacts: ${{ steps.build-meta.outputs.upload_artifacts }} @@ -258,7 +259,7 @@ jobs: author_name: ${{ steps.build-meta.outputs.author_name }} author_email: ${{ steps.build-meta.outputs.author_email }} controller_img: ${{ env.CONTROLLER_IMG }} - controller_img_ubi9: ${{ env.CONTROLLER_IMG_UBI9 }} + controller_img_ubi: ${{ env.CONTROLLER_IMG_UBI }} index_img: ${{ env.INDEX_IMG }} bundle_img: ${{ env.BUNDLE_IMG }} catalog_img: ${{ env.CATALOG_IMG }} @@ -282,9 +283,6 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | - images='${{ env.OPERATOR_IMAGE_NAME }}' - tags='' - labels='' commit_sha=${{ needs.evaluate_options.outputs.git_ref }} commit_date=$(git log -1 --pretty=format:'%ad' --date short "${commit_sha}" || : ) # use git describe to get the nearest tag and use that to build the version (e.g. 1.4.0-dev24 or 1.4.0) @@ -314,25 +312,23 @@ jobs: fi # extract tag from branch name - tag_name=$(echo "$branch_name" | sed 's/[^a-zA-Z0-9]/-/g') + tag_name=$(echo "$branch_name" | tr / -) upload_artifacts=false if [[ ${branch_name} == main || ${branch_name} =~ ^release- ]]; then upload_artifacts=true fi - echo "IMAGES=${images}" >> $GITHUB_ENV - echo "TAGS=${tags}" >> $GITHUB_ENV - echo "LABELS=${labels}" >> $GITHUB_ENV echo "DATE=${commit_date}" >> $GITHUB_ENV echo "VERSION=${commit_version}" >> $GITHUB_ENV echo "COMMIT=${commit_short}" >> $GITHUB_ENV + echo "IMAGE_TAG=${tag_name,,}" >> $GITHUB_ENV + echo "REPO_OWNER=${GITHUB_REPOSITORY_OWNER,,}" >> $GITHUB_ENV echo "commit_sha=${commit_sha}" >> $GITHUB_OUTPUT echo "commit_msg=${commit_message}" >> $GITHUB_OUTPUT echo "author_name=${author_name}" >> $GITHUB_OUTPUT echo "author_email=${author_email}" >> $GITHUB_OUTPUT echo "branch_name=${branch_name}" >> $GITHUB_OUTPUT - echo "tag_name=${tag_name,,}" >> $GITHUB_OUTPUT echo "upload_artifacts=${upload_artifacts}" >> $GITHUB_OUTPUT - name: Set GoReleaser environment @@ -350,24 +346,6 @@ jobs: DATE: ${{ env.DATE }} COMMIT: ${{ env.COMMIT }} VERSION: ${{ env.VERSION }} - - - name: Docker meta - id: docker-meta - uses: docker/metadata-action@v5 - with: - images: ${{ env.IMAGES }} - tags: | - type=raw,value=${{ steps.build-meta.outputs.tag_name }} - - - name: Docker meta UBI9 - id: docker-meta-ubi9 - uses: docker/metadata-action@v5 - with: - images: ${{ env.IMAGES }} - flavor: | - suffix=-ubi9 - tags: | - type=raw,value=${{ steps.build-meta.outputs.tag_name }} - name: Set up QEMU uses: docker/setup-qemu-action@v3 @@ -385,63 +363,50 @@ jobs: password: ${{ env.REGISTRY_PASSWORD }} - name: Build and push - uses: docker/build-push-action@v6 - with: - platforms: ${{ env.PLATFORMS }} - context: . - file: Dockerfile - push: true - build-args: | - VERSION=${{ env.VERSION }} - tags: ${{ steps.docker-meta.outputs.tags }} - labels: ${{ env.LABELS }} - provenance: ${{ env.BUILD_PUSH_PROVENANCE }} - cache-from: ${{ env.BUILD_PUSH_CACHE_FROM }} - cache-to: ${{ env.BUILD_PUSH_CACHE_TO }} - - - name: Build and push UBI9 - uses: docker/build-push-action@v6 + uses: docker/bake-action@v6 + id: bake-push + env: + environment: "testing" + buildVersion: ${{ env.VERSION }} + tag: ${{ env.IMAGE_TAG }} + registry: ${{ env.REGISTRY }}/${{ env.REPO_OWNER }} + revision: ${{ env.COMMIT }} with: - platforms: ${{ env.PLATFORMS }} - context: . - file: Dockerfile-ubi9 + source: . push: true - build-args: | - VERSION=${{ env.VERSION }} - tags: ${{ steps.docker-meta-ubi9.outputs.tags }} - labels: ${{ env.LABELS }} - provenance: ${{ env.BUILD_PUSH_PROVENANCE }} - cache-from: ${{ env.BUILD_PUSH_CACHE_FROM }} - cache-to: ${{ env.BUILD_PUSH_CACHE_TO }} - - - name: Image Meta - id: image-meta - env: - TAGS: ${{ steps.docker-meta.outputs.tags }} + no-cache: true + targets: "default" + - + name: Install cosign + if: env.SIGN_IMAGES == 'true' + uses: sigstore/cosign-installer@v3 + # See https://github.blog/security/supply-chain-security/safeguard-container-signing-capability-actions/ + # and https://github.com/actions/starter-workflows/blob/main/ci/docker-publish.yml for more details on + # how to use cosign. + - + name: Sign images + if: env.SIGN_IMAGES == 'true' run: | - # If there is more than one tag, take the first one - # TAGS could be separated by newlines or commas - image=$(sed -n '1{s/,.*//; p}' <<< "$TAGS") - echo "image=${image}" >> $GITHUB_OUTPUT + images=$(echo '${{ steps.bake-push.outputs.metadata }}' | + jq -r '.[] | (."image.name" | sub(",.*";"" )) + "@" + ."containerimage.digest"' + ) + cosign sign --yes ${images} - name: Output images env: - TAGS: ${{ steps.docker-meta.outputs.tags }} - TAGS_UBI9: ${{ steps.docker-meta-ubi9.outputs.tags }} + DISTROLESS: ${{ fromJSON(steps.bake-push.outputs.metadata)['distroless']['image.name'] }} + UBI: ${{ fromJSON(steps.bake-push.outputs.metadata)['ubi']['image.name'] }} run: | - LOWERCASE_OPERATOR_IMAGE_NAME=${OPERATOR_IMAGE_NAME,,} - TAG=${TAGS#*:} - TAG_UBI=${TAGS_UBI9#*:} - echo "CONTROLLER_IMG=${LOWERCASE_OPERATOR_IMAGE_NAME}:${TAG}" >> $GITHUB_ENV - echo "CONTROLLER_IMG_UBI9=${LOWERCASE_OPERATOR_IMAGE_NAME}:${TAG_UBI}" >> $GITHUB_ENV - echo "BUNDLE_IMG=${LOWERCASE_OPERATOR_IMAGE_NAME}:bundle-${TAG}" >> $GITHUB_ENV - echo "INDEX_IMG=${LOWERCASE_OPERATOR_IMAGE_NAME}:index-${TAG}" >> $GITHUB_ENV - echo "CATALOG_IMG=${LOWERCASE_OPERATOR_IMAGE_NAME}:catalog-${TAG}" >> $GITHUB_ENV + echo "CONTROLLER_IMG=${DISTROLESS}" >> $GITHUB_ENV + echo "CONTROLLER_IMG_UBI=${UBI}" >> $GITHUB_ENV + echo "BUNDLE_IMG=${UBI}-bundle" >> $GITHUB_ENV + echo "INDEX_IMG=${UBI}-index" >> $GITHUB_ENV + echo "CATALOG_IMG=${UBI}-catalog" >> $GITHUB_ENV - name: Generate manifest for operator deployment id: generate-manifest env: - CONTROLLER_IMG: ${{ steps.image-meta.outputs.image }} + CONTROLLER_IMG: ${{ env.CONTROLLER_IMG }} run: | make generate-manifest - @@ -493,8 +458,7 @@ jobs: push: true build-args: | VERSION=${{ env.VERSION }}-prime - tags: ${{ steps.docker-meta.outputs.tags }}-prime - labels: ${{ env.LABELS }} + tags: ${{ env.CONTROLLER_IMG }}-prime provenance: ${{ env.BUILD_PUSH_PROVENANCE }} cache-from: ${{ env.BUILD_PUSH_CACHE_FROM }} cache-to: ${{ env.BUILD_PUSH_CACHE_TO }} @@ -577,7 +541,7 @@ jobs: needs.buildx.result == 'success' runs-on: ubuntu-24.04 outputs: - image: ${{ needs.buildx.outputs.image }} + controller_img: ${{ needs.buildx.outputs.controller_img }} localMatrix: ${{ steps.generate-jobs.outputs.localMatrix }} localEnabled: ${{ steps.generate-jobs.outputs.localEnabled }} localTimeout: ${{ steps.generate-jobs.outputs.localE2ETimeout }} @@ -642,7 +606,7 @@ jobs: DEBUG: "true" BUILD_IMAGE: "false" - CONTROLLER_IMG: ${{ needs.generate-jobs.outputs.image }} + CONTROLLER_IMG: ${{ needs.generate-jobs.outputs.controller_img }} E2E_DEFAULT_STORAGE_CLASS: standard E2E_CSI_STORAGE_CLASS: csi-hostpath-sc E2E_DEFAULT_VOLUMESNAPSHOT_CLASS: csi-hostpath-snapclass @@ -895,7 +859,7 @@ jobs: DEBUG: "true" BUILD_IMAGE: "false" - CONTROLLER_IMG: ${{ needs.generate-jobs.outputs.image }} + CONTROLLER_IMG: ${{ needs.generate-jobs.outputs.controller_img }} E2E_DEFAULT_STORAGE_CLASS: rook-ceph-block E2E_CSI_STORAGE_CLASS: rook-ceph-block E2E_DEFAULT_VOLUMESNAPSHOT_CLASS: csi-rbdplugin-snapclass @@ -1233,7 +1197,7 @@ jobs: DEBUG: "true" BUILD_IMAGE: "false" - CONTROLLER_IMG: ${{ needs.generate-jobs.outputs.image }} + CONTROLLER_IMG: ${{ needs.generate-jobs.outputs.controller_img }} E2E_DEFAULT_STORAGE_CLASS: gp3 E2E_CSI_STORAGE_CLASS: gp3 E2E_DEFAULT_VOLUMESNAPSHOT_CLASS: ebs-csi-snapclass @@ -1621,7 +1585,7 @@ jobs: DEBUG: "true" BUILD_IMAGE: "false" - CONTROLLER_IMG: ${{ needs.generate-jobs.outputs.image }} + CONTROLLER_IMG: ${{ needs.generate-jobs.outputs.controller_img }} E2E_DEFAULT_STORAGE_CLASS: standard-rwo E2E_CSI_STORAGE_CLASS: standard-rwo E2E_DEFAULT_VOLUMESNAPSHOT_CLASS: pd-csi-snapclass @@ -1918,7 +1882,7 @@ jobs: DEBUG: "true" BUILD_IMAGE: "false" - CONTROLLER_IMG: ${{ needs.generate-jobs.outputs.image }} + CONTROLLER_IMG: ${{ needs.generate-jobs.outputs.controller_img }} E2E_DEFAULT_STORAGE_CLASS: gp3-csi E2E_CSI_STORAGE_CLASS: gp3-csi E2E_DEFAULT_VOLUMESNAPSHOT_CLASS: csi-aws-vsc @@ -1971,7 +1935,7 @@ jobs: - name: Build and push the operator and catalog env: - CONTROLLER_IMG: ${{ needs.buildx.outputs.controller_img_ubi9 }} + CONTROLLER_IMG: ${{ needs.buildx.outputs.controller_img_ubi }} BUNDLE_IMG: ${{ needs.buildx.outputs.bundle_img }} INDEX_IMG: ${{ needs.buildx.outputs.index_img }} CATALOG_IMG: ${{ needs.buildx.outputs.catalog_img }} diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 66082c715f..6aaa3cf633 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -35,6 +35,7 @@ env: REGISTRY: "ghcr.io" REGISTRY_USER: ${{ github.actor }} REGISTRY_PASSWORD: ${{ secrets.GITHUB_TOKEN }} + SIGN_IMAGES: "true" OPP_SCRIPT_URL: "https://raw.githubusercontent.com/redhat-openshift-ecosystem/community-operators-pipeline/ci/latest/ci/scripts/opp.sh" defaults: @@ -120,7 +121,7 @@ jobs: - '.github/workflows/continuous-integration.yml' - '.goreleaser*.yml' - 'Dockerfile' - - 'Dockerfile-ubi9' + - 'docker-bake.hcl' - 'Makefile' - 'go.mod' - 'go.sum' @@ -408,11 +409,12 @@ jobs: contents: read packages: write security-events: write + id-token: write outputs: commit_version: ${{ env.VERSION }} commit: ${{ env.COMMIT_SHA }} controller_img: ${{ env.CONTROLLER_IMG }} - controller_img_ubi9: ${{ env.CONTROLLER_IMG_UBI9 }} + controller_img_ubi: ${{ env.CONTROLLER_IMG_UBI }} bundle_img: ${{ env.BUNDLE_IMG }} catalog_img: ${{ env.CATALOG_IMG }} push: ${{ env.PUSH }} @@ -431,6 +433,8 @@ jobs: - name: Build meta id: build-meta + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | commit_sha=${{ github.event.pull_request.head.sha || github.sha }} commit_date=$(git log -1 --pretty=format:'%ad' --date short "${commit_sha}" || : ) @@ -441,10 +445,22 @@ jobs: # shortened commit sha commit_short=$(git rev-parse --short "${commit_sha}") + # extract branch name + branch_name=${GITHUB_REF#refs/heads/} + if [[ ${{ github.event_name }} == 'pull_request' ]] + then + branch_name=$(gh pr view "${{ github.event.pull_request.number }}" --json headRefName -q '.headRefName' 2>/dev/null) + fi + + # extract tag from branch name + tag_name=$(echo "$branch_name" | tr / -) + echo "DATE=${commit_date}" >> $GITHUB_ENV echo "VERSION=${commit_version}" >> $GITHUB_ENV echo "COMMIT=${commit_short}" >> $GITHUB_ENV echo "COMMIT_SHA=${commit_sha}" >> $GITHUB_ENV + echo "IMAGE_TAG=${tag_name,,}" >> $GITHUB_ENV + echo "REPO_OWNER=${GITHUB_REPOSITORY_OWNER,,}" >> $GITHUB_ENV # By default the container image is being pushed to the registry echo "PUSH=true" >> $GITHUB_ENV @@ -519,30 +535,11 @@ jobs: COMMIT: ${{ env.COMMIT }} VERSION: ${{ env.VERSION }} - - name: Docker meta - id: docker-meta - uses: docker/metadata-action@v5 - with: - images: ${{ env.OPERATOR_IMAGE_NAME }} - tags: | - type=ref,event=branch - type=ref,event=pr - - - name: Docker meta UBI9 - id: docker-meta-ubi9 - uses: docker/metadata-action@v5 - with: - images: ${{ env.OPERATOR_IMAGE_NAME }} - flavor: | - suffix=-ubi9 - tags: | - type=ref,event=branch - type=ref,event=pr - - name: Set up QEMU uses: docker/setup-qemu-action@v3 with: platforms: ${{ env.PLATFORMS }} + cache-image: false - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 @@ -554,44 +551,48 @@ jobs: username: ${{ env.REGISTRY_USER }} password: ${{ env.REGISTRY_PASSWORD }} - - name: Build for scan distroless image - uses: docker/build-push-action@v6 + - name: Build and push + uses: docker/bake-action@v6 + id: bake-push + env: + environment: "testing" + buildVersion: ${{ env.VERSION }} + tag: ${{ env.IMAGE_TAG }} + registry: ${{ env.REGISTRY }}/${{ env.REPO_OWNER }} + revision: ${{ env.COMMIT }} with: - platforms: "linux/amd64" - context: . - file: Dockerfile - push: false - load: true - build-args: | - VERSION=${{ env.VERSION }} - tags: ${{ steps.docker-meta.outputs.tags }} + source: . + push: ${{ env.PUSH }} + no-cache: true + targets: "default" + + - name: Output images + if: env.PUSH == 'true' + env: + DISTROLESS: ${{ fromJSON(steps.bake-push.outputs.metadata)['distroless']['image.name'] }} + UBI: ${{ fromJSON(steps.bake-push.outputs.metadata)['ubi']['image.name'] }} + run: | + echo "CONTROLLER_IMG=${DISTROLESS}" >> $GITHUB_ENV + echo "CONTROLLER_IMG_UBI=${UBI}" >> $GITHUB_ENV + echo "BUNDLE_IMG=${UBI}-bundle" >> $GITHUB_ENV + echo "CATALOG_IMG=${UBI}-catalog" >> $GITHUB_ENV - name: Dockle scan distroless image uses: erzz/dockle-action@v1 + if: env.PUSH == 'true' with: - image: ${{ steps.docker-meta.outputs.tags }} + image: ${{ env.CONTROLLER_IMG }} exit-code: '1' failure-threshold: WARN accept-keywords: key - - name: Build for scan UBI9 image - uses: docker/build-push-action@v6 - with: - platforms: "linux/amd64" - context: . - file: Dockerfile-ubi9 - push: false - load: true - build-args: | - VERSION=${{ env.VERSION }} - tags: ${{ steps.docker-meta-ubi9.outputs.tags }} - - - name: Dockle scan UBI9 image + - name: Dockle scan UBI image uses: erzz/dockle-action@v1 + if: env.PUSH == 'true' env: DOCKLE_IGNORES: CIS-DI-0009 with: - image: ${{ steps.docker-meta-ubi9.outputs.tags }} + image: ${{ env.CONTROLLER_IMG_UBI }} exit-code: '1' failure-threshold: WARN accept-keywords: key @@ -605,7 +606,7 @@ jobs: env: SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} with: - image: ${{ steps.docker-meta.outputs.tags }} + image: ${{ env.CONTROLLER_IMG }} args: --severity-threshold=high --file=Dockerfile - name: Upload result to GitHub Code Scanning @@ -617,43 +618,25 @@ jobs: with: sarif_file: snyk.sarif - - name: Build and push - uses: docker/build-push-action@v6 - with: - platforms: ${{ env.PLATFORMS }} - context: . - file: Dockerfile - push: ${{ env.PUSH }} - build-args: | - VERSION=${{ env.VERSION }} - tags: ${{ steps.docker-meta.outputs.tags }} - provenance: ${{ env.BUILD_PUSH_PROVENANCE }} - cache-from: ${{ env.BUILD_PUSH_CACHE_FROM }} - cache-to: ${{ env.BUILD_PUSH_CACHE_TO }} - - - name: Build and push UBI9 - uses: docker/build-push-action@v6 - with: - platforms: ${{ env.PLATFORMS }} - context: . - file: Dockerfile-ubi9 - push: ${{ env.PUSH }} - build-args: | - VERSION=${{ env.VERSION }} - tags: ${{ steps.docker-meta-ubi9.outputs.tags }} - - - name: Output images - env: - TAGS: ${{ steps.docker-meta.outputs.tags }} - TAGS_UBI9: ${{ steps.docker-meta-ubi9.outputs.tags }} + - name: Install cosign + if: | + env.SIGN_IMAGES == 'true' && + env.PUSH == 'true' + uses: sigstore/cosign-installer@v3 + # See https://github.blog/security/supply-chain-security/safeguard-container-signing-capability-actions/ + # and https://github.com/actions/starter-workflows/blob/main/ci/docker-publish.yml for more details on + # how to use cosign. + + - name: Sign images + if: | + env.SIGN_IMAGES == 'true' && + env.PUSH == 'true' run: | - LOWERCASE_OPERATOR_IMAGE_NAME=${OPERATOR_IMAGE_NAME,,} - TAG=${TAGS#*:} - TAG_UBI=${TAGS_UBI9#*:} - echo "CONTROLLER_IMG=${LOWERCASE_OPERATOR_IMAGE_NAME}:${TAG}" >> $GITHUB_ENV - echo "CONTROLLER_IMG_UBI9=${LOWERCASE_OPERATOR_IMAGE_NAME}:${TAG_UBI}" >> $GITHUB_ENV - echo "BUNDLE_IMG=${LOWERCASE_OPERATOR_IMAGE_NAME}:bundle-${TAG}" >> $GITHUB_ENV - echo "CATALOG_IMG=${LOWERCASE_OPERATOR_IMAGE_NAME}:catalog-${TAG}" >> $GITHUB_ENV + images=$(echo '${{ steps.bake-push.outputs.metadata }}' | + jq -r '.[] | (."image.name" | sub(",.*";"" )) + "@" + ."containerimage.digest"' + ) + cosign sign --yes ${images} + olm-bundle: name: Create OLM bundle and catalog @@ -678,6 +661,7 @@ jobs: uses: docker/setup-qemu-action@v3 with: platforms: ${{ env.PLATFORMS }} + cache-image: false - name: Install Go uses: actions/setup-go@v5 @@ -697,7 +681,7 @@ jobs: - name: Create bundle env: - CONTROLLER_IMG: ${{ needs.buildx.outputs.controller_img_ubi9 }} + CONTROLLER_IMG: ${{ needs.buildx.outputs.controller_img_ubi }} BUNDLE_IMG: ${{ needs.buildx.outputs.bundle_img }} CATALOG_IMG: ${{ needs.buildx.outputs.catalog_img }} run: | @@ -745,7 +729,7 @@ jobs: - name: Run preflight container test env: - CONTROLLER_IMG: ${{ needs.buildx.outputs.controller_img_ubi9 }} + CONTROLLER_IMG: ${{ needs.buildx.outputs.controller_img_ubi }} PFLT_ARTIFACTS: "preflight_results" run: | bin/preflight check container ${CONTROLLER_IMG} \ @@ -793,6 +777,7 @@ jobs: uses: docker/setup-qemu-action@v3 with: platforms: ${{ env.PLATFORMS }} + cache-image: false - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml index 590e81febe..909e1b6d01 100644 --- a/.github/workflows/release-publish.yml +++ b/.github/workflows/release-publish.yml @@ -9,7 +9,7 @@ on: env: GOLANG_VERSION: "1.24.x" - CNPG_IMAGE_NAME: "ghcr.io/${{ github.repository }}" + REGISTRY: "ghcr.io" permissions: contents: write @@ -91,11 +91,11 @@ jobs: needs: - check-version outputs: - version: ${{ steps.build-meta.outputs.version }} + version: ${{ env.IMAGE_TAG }} author_name: ${{ steps.build-meta.outputs.author_name }} author_email: ${{ steps.build-meta.outputs.author_email }} - digest: ${{ steps.build.outputs.digest }} platforms: ${{ env.PLATFORMS }} + ubi_img: ${{ fromJSON(steps.bake-push.outputs.metadata)['ubi']['image.name'] }} steps: - name: Checkout @@ -113,10 +113,9 @@ jobs: name: Build meta id: build-meta run: | - images='ghcr.io/cloudnative-pg/cloudnative-pg' - images="${images},ghcr.io/cloudnative-pg/cloudnative-pg-testing" commit_sha=${{ github.sha }} commit_date=$(git log -1 --pretty=format:'%ad' --date short "${commit_sha}") + tag="${GITHUB_REF#refs/tags/v}" # get git user and email author_name=$(git show -s --format='%an' "${commit_sha}") @@ -125,9 +124,11 @@ jobs: # use git describe to get the nearest tag and use that to build the version (e.g. 1.4.0-dev24 or 1.4.0) commit_version=$(git describe --tags --match 'v*' "${commit_sha}"| sed -e 's/^v//; s/-g[0-9a-f]\+$//; s/-\([0-9]\+\)$/-dev\1/') commit_short=$(git rev-parse --short "${commit_sha}") - echo "IMAGES=${images}" >> $GITHUB_ENV + echo "DATE=${commit_date}" >> $GITHUB_ENV - echo "version=${commit_version}" >> $GITHUB_OUTPUT + echo "VERSION=${commit_version}" >> $GITHUB_ENV + echo "IMAGE_TAG=${tag}" >> $GITHUB_ENV + echo "REPO_OWNER=${GITHUB_REPOSITORY_OWNER,,}" >> $GITHUB_ENV echo "COMMIT=${commit_short}" >> $GITHUB_ENV echo "author_name=${author_name}" >> $GITHUB_OUTPUT echo "author_email=${author_email}" >> $GITHUB_OUTPUT @@ -156,7 +157,7 @@ jobs: env: DATE: ${{ env.DATE }} COMMIT: ${{ env.COMMIT }} - VERSION: ${{ steps.build-meta.outputs.version }} + VERSION: ${{ env.VERSION }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GPG_FINGERPRINT: ${{ steps.import_gpg.outputs.fingerprint }} NFPM_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }} @@ -168,29 +169,6 @@ jobs: uses: rajatjindal/krew-release-bot@v0.0.47 with: krew_template_file: dist/krew/cnpg.yaml - - - name: Docker meta - id: docker-meta - uses: docker/metadata-action@v5 - env: - IS_LATEST: ${{ needs.check-version.outputs.is_latest == 'true' && needs.check-version.outputs.is_stable == 'true' }} - with: - images: ${{ env.IMAGES }} - flavor: | - latest=${{ env.IS_LATEST }} - tags: | - type=semver,pattern={{version}} - - - name: Docker meta UBI9 - id: docker-meta-ubi9 - uses: docker/metadata-action@v5 - with: - images: ${{ env.IMAGES }} - flavor: | - latest=false - suffix=-ubi9 - tags: | - type=semver,pattern={{version}} - name: Detect platforms run: | @@ -210,31 +188,37 @@ jobs: name: Login to ghcr.io uses: docker/login-action@v3 with: - registry: ghcr.io + registry: ${{ env.REGISTRY }} username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Build and push - uses: docker/build-push-action@v6 + uses: docker/bake-action@v6 + id: bake-push + env: + environment: "production" + buildVersion: ${{ env.VERSION }} + tag: ${{ env.IMAGE_TAG }} + registry: ${{ env.REGISTRY }}/${{ env.REPO_OWNER }} + revision: ${{ env.COMMIT }} + latest: ${{ needs.check-version.outputs.is_latest == 'true' && needs.check-version.outputs.is_stable == 'true' }} with: - platforms: ${{ env.PLATFORMS }} - context: . - file: Dockerfile + source: . push: true - build-args: | - VERSION=${{ steps.build-meta.outputs.version }} - tags: ${{ steps.docker-meta.outputs.tags }} + no-cache: true + targets: "default" - - name: Build and push UBI9 - uses: docker/build-push-action@v6 - with: - platforms: ${{ env.PLATFORMS }} - context: . - file: Dockerfile-ubi9 - push: true - build-args: | - VERSION=${{ steps.build-meta.outputs.version }} - tags: ${{ steps.docker-meta-ubi9.outputs.tags }} + name: Install cosign + uses: sigstore/cosign-installer@v3 + # See https://github.blog/security/supply-chain-security/safeguard-container-signing-capability-actions/ + # and https://github.com/actions/starter-workflows/blob/main/ci/docker-publish.yml for more details on + # how to use cosign. + - name: Sign images + run: | + images=$(echo '${{ steps.bake-push.outputs.metadata }}' | + jq '.[] | (."image.name" | sub(",.*";"" )) + "@" + ."containerimage.digest"' + ) + cosign sign --yes ${images} olm-bundle: name: Create OLM bundle and catalog @@ -270,23 +254,20 @@ jobs: - name: Login to ghcr.io uses: docker/login-action@v3 with: - registry: ghcr.io + registry: ${{ env.REGISTRY }} username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Set bundle variables + env: + UBI_IMG: ${{ needs.release-binaries.outputs.ubi_img }} run: | - tag="${GITHUB_REF#refs/tags/v}" - version="${tag#v}" - LOWERCASE_CNPG_IMAGE_NAME=${CNPG_IMAGE_NAME,,} - echo "IMAGE_NAME=${LOWERCASE_CNPG_IMAGE_NAME}" >> $GITHUB_ENV - echo "CONTROLLER_IMG=${LOWERCASE_CNPG_IMAGE_NAME}:${version}-ubi9" >> $GITHUB_ENV - echo "BUNDLE_IMG=${LOWERCASE_CNPG_IMAGE_NAME}:bundle-${version}" >> $GITHUB_ENV - echo "CATALOG_IMG=${LOWERCASE_CNPG_IMAGE_NAME}:catalog-${version}" >> $GITHUB_ENV + echo "CONTROLLER_IMG=${UBI_IMG}" >> $GITHUB_ENV + echo "BUNDLE_IMG=${UBI_IMG}-bundle" >> $GITHUB_ENV + echo "CATALOG_IMG=${UBI_IMG}-catalog" >> $GITHUB_ENV - name: Create bundle env: - IMAGE_NAME: ${{ env.IMAGE_NAME }} CONTROLLER_IMG: ${{ env.CONTROLLER_IMG }} BUNDLE_IMG: ${{ env.BUNDLE_IMG }} CATALOG_IMG: ${{ env.CATALOG_IMG }} diff --git a/Dockerfile b/Dockerfile index e6d787a93b..e85f152f8b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,36 +1,17 @@ +ARG BASE=gcr.io/distroless/static-debian12:nonroot + # This builder stage it's only because we need a command -# to create a symlink and reduce the size of the image +# to create a symlink and we do not have it in a distroless image FROM gcr.io/distroless/static-debian12:debug-nonroot AS builder ARG TARGETARCH - SHELL ["/busybox/sh", "-c"] -COPY --chown=nonroot:nonroot --chmod=0755 dist/manager/* bin/ -RUN ln -sf bin/manager_${TARGETARCH} manager - -FROM gcr.io/distroless/static-debian12:nonroot -ARG VERSION="dev" -ARG TARGETARCH - -ENV SUMMARY="CloudNativePG Operator Container Image." \ - DESCRIPTION="This Docker image contains CloudNativePG Operator." \ - MAINTAINER="CloudNativePG Contributors." - -LABEL summary="$SUMMARY" \ - description="$DESCRIPTION" \ - io.k8s.display-name="$SUMMARY" \ - io.k8s.description="$DESCRIPTION" \ - name="CloudNativePG Operator" \ - vendor="$MAINTAINER" \ - maintainer="$MAINTAINER" \ - url="https://cloudnative-pg.io/" \ - version="$VERSION" \ - release="1" +RUN ln -sf operator/manager_${TARGETARCH} manager +FROM ${BASE} WORKDIR / - -# Needs to copy the entire content, otherwise, it will not -# copy the symlink properly. +COPY --chown=nonroot:nonroot --chmod=0755 dist/manager/* operator/ COPY --from=builder /home/nonroot/ . +COPY licenses /licenses +COPY LICENSE /licenses USER 65532:65532 - ENTRYPOINT ["/manager"] diff --git a/Dockerfile-ubi9 b/Dockerfile-ubi9 deleted file mode 100644 index 0d846d91b0..0000000000 --- a/Dockerfile-ubi9 +++ /dev/null @@ -1,29 +0,0 @@ -FROM registry.access.redhat.com/ubi9/ubi-micro -ARG VERSION="dev" -ARG TARGETARCH - -ENV SUMMARY="CloudNativePG Operator Container Image." \ - DESCRIPTION="This Docker image contains CloudNativePG Operator." \ - MAINTAINER="CloudNativePG Contributors." - -LABEL summary="$SUMMARY" \ - description="$DESCRIPTION" \ - io.k8s.display-name="$SUMMARY" \ - io.k8s.description="$DESCRIPTION" \ - name="CloudNativePG Operator" \ - vendor="$MAINTAINER" \ - maintainer="$MAINTAINER" \ - url="https://cloudnative-pg.io/" \ - version="$VERSION" \ - release="1" - -COPY licenses /licenses -COPY LICENSE /licenses - -WORKDIR / - -COPY dist/manager/* /bin/ -RUN ln -sf /bin/manager_${TARGETARCH} manager -USER 65532:65532 - -ENTRYPOINT ["/manager"] diff --git a/Makefile b/Makefile index f0377b0d9e..67f42b1563 100644 --- a/Makefile +++ b/Makefile @@ -142,11 +142,13 @@ run: generate fmt vet manifests ## Run against the configured Kubernetes cluster docker-build: go-releaser ## Build the docker image. GOOS=linux GOARCH=${ARCH} GOPATH=$(go env GOPATH) DATE=${DATE} COMMIT=${COMMIT} VERSION=${VERSION} \ - $(GO_RELEASER) build --skip=validate --clean --single-target $(if $(VERSION),,--snapshot) - DOCKER_BUILDKIT=1 docker build . -t ${CONTROLLER_IMG} --build-arg VERSION=${VERSION} - -docker-push: ## Push the docker image. - docker push ${CONTROLLER_IMG} + $(GO_RELEASER) build --skip=validate --clean --single-target $(if $(VERSION),,--snapshot); \ + builder_name_option=""; \ + if [ -n "${BUILDER_NAME}" ]; then \ + builder_name_option="--builder ${BUILDER_NAME}"; \ + fi; \ + DOCKER_BUILDKIT=1 tag=${IMAGE_TAG} buildVersion=${VERSION} revision=${COMMIT} \ + docker buildx bake $${builder_name_option} --set=*.platform="linux/${ARCH}" --push olm-bundle: manifests kustomize operator-sdk ## Build the bundle for OLM installation set -xeEuo pipefail ;\ diff --git a/docker-bake.hcl b/docker-bake.hcl new file mode 100644 index 0000000000..40407a0281 --- /dev/null +++ b/docker-bake.hcl @@ -0,0 +1,149 @@ +# +# Copyright The CloudNativePG Contributors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +variable "environment" { + default = "testing" + validation { + condition = contains(["testing", "production"], environment) + error_message = "environment must be either testing or production" + } +} + +variable "registry" { + default = "localhost:5000" +} + +variable "insecure" { + default = "false" +} + +variable "latest" { + default = "false" +} + +variable "tag" { + default = "dev" +} + +variable "buildVersion" { + default = "dev" +} + +variable "revision" { + default = "" +} + +suffix = (environment == "testing") ? "-testing" : "" + +title = "CloudNativePG Operator" +description = "This Docker image contains CloudNativePG Operator." +authors = "The CloudNativePG Contributors" +url = "https://github.com/cloudnative-pg/cloudnative-pg" +documentation = "https://cloudnative-pg.io/documentation/current/" +license = "Apache-2.0" +now = timestamp() + +distros = { + distroless = { + baseImage = "gcr.io/distroless/static-debian12:nonroot@sha256:6ec5aa99dc335666e79dc64e4a6c8b89c33a543a1967f20d360922a80dd21f02", + tag = "" + } + ubi = { + baseImage = "registry.access.redhat.com/ubi9/ubi-micro:latest@sha256:7e85855f6925e03f91b5c51f07886ff1c18c6ec69b5fc65491428a899da914a2", + tag = "-ubi9" + } +} + +target "default" { + matrix = { + distro = [ + "distroless", + "ubi" + ] + } + + name = "${distro}" + platforms = ["linux/amd64", "linux/arm64"] + tags = [ + "${registry}/cloudnative-pg${suffix}:${tag}${distros[distro].tag}", + latest("${registry}/cloudnative-pg${suffix}", "${latest}"), + ] + + dockerfile = "Dockerfile" + + context = "." + + args = { + BASE = "${distros[distro].baseImage}" + } + + output = [ + "type=registry,registry.insecure=${insecure}", + ] + + attest = [ + "type=provenance,mode=max", + "type=sbom" + ] + annotations = [ + "index,manifest:org.opencontainers.image.created=${now}", + "index,manifest:org.opencontainers.image.url=${url}", + "index,manifest:org.opencontainers.image.source=${url}", + "index,manifest:org.opencontainers.image.version=${buildVersion}", + "index,manifest:org.opencontainers.image.revision=${revision}", + "index,manifest:org.opencontainers.image.vendor=${authors}", + "index,manifest:org.opencontainers.image.title=${title}", + "index,manifest:org.opencontainers.image.description=${description}", + "index,manifest:org.opencontainers.image.documentation=${documentation}", + "index,manifest:org.opencontainers.image.authors=${authors}", + "index,manifest:org.opencontainers.image.licenses=${license}", + "index,manifest:org.opencontainers.image.base.name=${distros[distro].baseImage}", + "index,manifest:org.opencontainers.image.base.digest=${digest(distros[distro].baseImage)}", + ] + labels = { + "org.opencontainers.image.created" = "${now}", + "org.opencontainers.image.url" = "${url}", + "org.opencontainers.image.source" = "${url}", + "org.opencontainers.image.version" = "${buildVersion}", + "org.opencontainers.image.revision" = "${revision}", + "org.opencontainers.image.vendor" = "${authors}", + "org.opencontainers.image.title" = "${title}", + "org.opencontainers.image.description" = "${description}", + "org.opencontainers.image.documentation" = "${documentation}", + "org.opencontainers.image.authors" = "${authors}", + "org.opencontainers.image.licenses" = "${license}", + "org.opencontainers.image.base.name" = "${distros[distro].baseImage}", + "org.opencontainers.image.base.digest" = "${digest(distros[distro].baseImage)}", + "name" = "${title}", + "maintainer" = "${authors}", + "vendor" = "${authors}", + "version" = "${buildVersion}", + "release" = "1", + "description" = "${description}", + "summary" = "${description}", + } + +} + +function digest { + params = [ imageNameWithSha ] + result = index(split("@", imageNameWithSha), 1) +} + +function latest { + params = [ image, latest ] + result = (latest == "true") ? "${image}:latest" : "" +} diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh index 229f28c013..e254a84fc9 100755 --- a/hack/setup-cluster.sh +++ b/hack/setup-cluster.sh @@ -74,6 +74,8 @@ export DOCKER_DEFAULT_PLATFORM # Constants registry_volume=registry_dev_data registry_name=registry.dev +registry_net=registry +builder_name=cnpg-builder # ######################################################################### # IMPORTANT: here we build a catalog of images that will be needed in the @@ -350,8 +352,12 @@ ensure_registry() { docker volume create "${registry_volume}" fi + if ! docker network inspect "${registry_net}" &>/dev/null; then + docker network create "${registry_net}" + fi + if ! docker inspect "${registry_name}" &>/dev/null; then - docker container run -d --name "${registry_name}" -v "${registry_volume}:/var/lib/registry" --restart always -p 5000:5000 registry:2 + docker container run -d --name "${registry_name}" --network "${registry_net}" -v "${registry_volume}:/var/lib/registry" --restart always -p 5000:5000 registry:2 fi } @@ -361,6 +367,20 @@ check_registry() { jq -r ".[].Containers | .[] | select(.Name==\"${registry_name}\") | .Name" } +# An existing builder will not have any knowledge of the local registry or the +# any host outside the builder, but when having the builder inside Kubernetes +# this is fixed since we already solved the issue of the kubernetes cluster reaching +# out the local registry. The following functions will handle that builder +create_builder() { + docker buildx rm "${builder_name}" &>/dev/null || true + # If ENABLE_REGISTRY is not set, we don't need to define driver-opt network + if [ -n "${ENABLE_REGISTRY:-}" ]; then + docker buildx create --name "${builder_name}" --driver-opt "network=${registry_net}" + else + docker buildx create --name "${builder_name}" + fi +} + deploy_fluentd() { local FLUENTD_IMAGE=fluent/fluentd-kubernetes-daemonset:v1.14.3-debian-forward-1.0 local FLUENTD_LOCAL_IMAGE="${registry_name}:5000/fluentd-kubernetes-daemonset:local" @@ -616,15 +636,15 @@ load() { ENABLE_REGISTRY=true fi + create_builder + echo "${bright}Building operator from current worktree${reset}" - CONTROLLER_IMG="$(ENABLE_REGISTRY="${ENABLE_REGISTRY}" print_image)" - make -C "${ROOT_DIR}" CONTROLLER_IMG="${CONTROLLER_IMG}" ARCH="${ARCH}" docker-build + make -C "${ROOT_DIR}" IMAGE_TAG="$(print_tag)" registry="${registry_name}:5000" insecure="true" \ + ARCH="${ARCH}" BUILDER_NAME=${builder_name} docker-build echo "${bright}Loading new operator image on cluster ${CLUSTER_NAME}${reset}" - load_image "${CLUSTER_NAME}" "${CONTROLLER_IMG}" - echo "${bright}Done loading new operator image on cluster ${CLUSTER_NAME}${reset}" if [[ "${TEST_UPGRADE_TO_V1}" != "false" ]]; then @@ -635,16 +655,16 @@ load() { echo "${bright}Building a 'prime' operator from current worktree${reset}" - PRIME_CONTROLLER_IMG="${CONTROLLER_IMG}-prime" CURRENT_VERSION=$(make -C "${ROOT_DIR}" -s print-version) PRIME_VERSION="${CURRENT_VERSION}-prime" - make -C "${ROOT_DIR}" CONTROLLER_IMG="${PRIME_CONTROLLER_IMG}" VERSION="${PRIME_VERSION}" \ - ARCH="${ARCH}" docker-build - - load_image "${CLUSTER_NAME}" "${PRIME_CONTROLLER_IMG}" + PRIME_TAG="$(print_tag)-prime" + make -C "${ROOT_DIR}" IMAGE_TAG="${PRIME_TAG}" VERSION="${PRIME_VERSION}" registry="${registry_name}:5000" insecure="true" \ + ARCH="${ARCH}" BUILDER_NAME="${builder_name}" docker-build echo "${bright}Done loading new 'prime' operator image on cluster ${CLUSTER_NAME}${reset}" fi + + docker buildx rm "${builder_name}" } deploy() { @@ -661,12 +681,16 @@ deploy() { echo "${bright}Done deploying manifests from current worktree on cluster ${CLUSTER_NAME}${reset}" } -print_image() { +print_tag() { local tag=devel if [ -n "${ENABLE_REGISTRY:-}" ] || "check_registry_${ENGINE}"; then tag=latest fi - echo "${registry_name}:5000/cloudnative-pg:${tag}" + echo "${tag}" +} + +print_image() { + echo "${registry_name}:5000/cloudnative-pg-testing:$(print_tag)" } export_logs() { @@ -766,7 +790,7 @@ main() { fi KUBECTL_VERSION=${KUBECTL_VERSION:-$K8S_VERSION} - # Only here the K8S_VERSION veriable contains its final value + # Only here the K8S_VERSION variable contains its final value # so we can set the default cluster name CLUSTER_NAME=${CLUSTER_NAME:-pg-operator-e2e-${K8S_VERSION//./-}} diff --git a/pkg/utils/discovery.go b/pkg/utils/discovery.go index b0479db081..9f4fc4637d 100644 --- a/pkg/utils/discovery.go +++ b/pkg/utils/discovery.go @@ -225,7 +225,7 @@ func detectAvailableArchitectures(filepathGlob string) error { // DetectAvailableArchitectures detects the architectures available in the cluster func DetectAvailableArchitectures() error { - return detectAvailableArchitectures("bin/manager_*") + return detectAvailableArchitectures("operator/manager_*") } // DetectOLM looks for the operators.coreos.com operators resource in the current From e68df5b4fee2ba6c673dfe43ae05369a177120de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niccol=C3=B2=20Fei?= Date: Thu, 27 Feb 2025 18:43:22 +0100 Subject: [PATCH 406/836] ci: fix buildx bake output type (#7013) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Output type registry always tries to push images. Switch to `type=image`. Closes #7016 Signed-off-by: Niccolò Fei --- docker-bake.hcl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-bake.hcl b/docker-bake.hcl index 40407a0281..1d5fdcf47c 100644 --- a/docker-bake.hcl +++ b/docker-bake.hcl @@ -91,7 +91,7 @@ target "default" { } output = [ - "type=registry,registry.insecure=${insecure}", + "type=image,registry.insecure=${insecure}", ] attest = [ From 19fbc526f5309a8a21e61f5a6720eea95a23d2f9 Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Fri, 28 Feb 2025 09:10:47 +0100 Subject: [PATCH 407/836] docs: update Slack channel invite (#7019) Closes #7018 Signed-off-by: Gabriele Bartolini --- .github/ISSUE_TEMPLATE/config.yml | 2 +- CONTRIBUTING.md | 3 +-- README.md | 2 +- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 17f1e503e7..237e748a6b 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -4,5 +4,5 @@ contact_links: url: https://github.com/cloudnative-pg/cloudnative-pg/discussions about: Please ask and answer questions here. - name: Slack chat - url: https://join.slack.com/t/cloudnativepg/shared_invite/zt-2vedd06pe-vMZf4wJ3l_H_hB3YCZ947A + url: https://github.com/cloudnative-pg/cloudnative-pg?tab=readme-ov-file#communications about: Please join the slack channel and interact with our community diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e6b74b5db6..acb70e94e2 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -40,8 +40,7 @@ For development contributions, please refer to the separate section called ## Ask for Help The best way to reach us with a question when contributing is to drop a line in -our [Slack channel](https://join.slack.com/t/cloudnativepg/shared_invite/zt-2vedd06pe-vMZf4wJ3l_H_hB3YCZ947A), or -start a new Github discussion. +our [Slack channel](README.md#communications), or start a new Github discussion. ## Raising Issues diff --git a/README.md b/README.md index e9e3eda545..748708e7af 100644 --- a/README.md +++ b/README.md @@ -114,7 +114,7 @@ MariaDB cluster). ## Communications -- [Slack Channel](https://join.slack.com/t/cloudnativepg/shared_invite/zt-2vedd06pe-vMZf4wJ3l_H_hB3YCZ947A) +- [Slack Channel](https://join.slack.com/t/cloudnativepg/shared_invite/zt-30a6l6bp3-u1lNAmh~N02Cfiv2utKTFg) - [Github Discussions](https://github.com/cloudnative-pg/cloudnative-pg/discussions) - [Twitter](https://twitter.com/CloudNativePg) From 14854247b022a5f7dc93a9beac7bec2b955a873c Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Fri, 28 Feb 2025 09:16:09 +0100 Subject: [PATCH 408/836] docs: streamline documentation on failure modes (#6945) This page included specific failure scenarios. Since these largely follow standard Kubernetes behavior, we have streamlined the content to avoid duplication of information that belongs to the underlying Kubernetes stack and is not specific to CloudNativePG. Signed-off-by: Marco Nenciarini Signed-off-by: Jaime Silvela Signed-off-by: Gabriele Bartolini Co-authored-by: Jaime Silvela Co-authored-by: Gabriele Bartolini --- docs/src/failure_modes.md | 212 ++++++++------------------------------ 1 file changed, 43 insertions(+), 169 deletions(-) diff --git a/docs/src/failure_modes.md b/docs/src/failure_modes.md index 4dd6df6c9b..38eb79a5c9 100644 --- a/docs/src/failure_modes.md +++ b/docs/src/failure_modes.md @@ -1,183 +1,59 @@ # Failure Modes -This section provides an overview of the major failure scenarios that -PostgreSQL can face on a Kubernetes cluster during its lifetime. - -!!! Important - In case the failure scenario you are experiencing is not covered by this - section, please immediately seek for [professional support](https://cloudnative-pg.io/support/). - -!!! Seealso "Postgres instance manager" - Please refer to the ["Postgres instance manager" section](instance_manager.md) - for more information the liveness and readiness probes implemented by - CloudNativePG. - -## Storage space usage - -The operator will instantiate one PVC for every PostgreSQL instance to store the `PGDATA` content. -A second PVC dedicated to the WAL storage will be provisioned in case `.spec.walStorage` is -specified during cluster initialization. - -Such storage space is set for reuse in two cases: - -- when the corresponding Pod is deleted by the user (and a new Pod will be recreated) -- when the corresponding Pod is evicted and scheduled on another node - -If you want to prevent the operator from reusing a certain PVC you need to -remove the PVC before deleting the Pod. For this purpose, you can use the -following command: - -```sh -kubectl delete -n [namespace] pvc/[cluster-name]-[serial] pod/[cluster-name]-[serial] -``` - !!! Note - If you specified a dedicated WAL volume, it will also have to be deleted during this process. - -```sh -kubectl delete -n [namespace] pvc/[cluster-name]-[serial] pvc/[cluster-name]-[serial]-wal pod/[cluster-name]-[serial] -``` - -For example: - -```sh -$ kubectl delete -n default pvc/cluster-example-1 pvc/cluster-example-1-wal pod/cluster-example-1 -persistentvolumeclaim "cluster-example-1" deleted -persistentvolumeclaim "cluster-example-1-wal" deleted -pod "cluster-example-1" deleted -``` - -## Failure modes - -A pod belonging to a `Cluster` can fail in the following ways: - -* the pod is explicitly deleted by the user; -* the readiness probe on its `postgres` container fails; -* the liveness probe on its `postgres` container fails; -* the Kubernetes worker node is drained; -* the Kubernetes worker node where the pod is scheduled fails. - -Each one of these failures has different effects on the `Cluster` and the -services managed by the operator. - -### Pod deleted by the user - -The operator is notified of the deletion. A new pod belonging to the -`Cluster` will be automatically created reusing the existing PVC, if available, -or starting from a physical backup of the *primary* otherwise. + In previous versions of CloudNativePG, this page included specific failure + scenarios. Since these largely follow standard Kubernetes behavior, we have + streamlined the content to avoid duplication of information that belongs to the + underlying Kubernetes stack and is not specific to CloudNativePG. + +CloudNativePG adheres to standard Kubernetes principles for self-healing and +high availability. We assume familiarity with core Kubernetes concepts such as +storage classes, PVCs, nodes, and Pods. For CloudNativePG-specific details, +refer to the ["Postgres Instance Manager" section](instance_manager.md), which +covers startup, liveness, and readiness probes, as well as the +[self-healing](#self-healing) section below. !!! Important - In case of deliberate deletion of a pod, `PodDisruptionBudget` policies - will not be enforced. - -Self-healing will happen as soon as the *apiserver* is notified. - -You can trigger a sudden failure on a given pod of the cluster using the -following generic command: - -```sh -kubectl delete -n [namespace] \ - pod/[cluster-name]-[serial] --grace-period=1 -``` - -For example, if you want to simulate a real failure on the primary and trigger -the failover process, you can run: + If you are running CloudNativePG in production, we strongly recommend + seeking [professional support](https://cloudnative-pg.io/support/). -```sh -kubectl delete pod [primary pod] --grace-period=1 -``` - -!!! Warning - Never use `--grace-period=0` in your failover simulation tests, as this - might produce misleading results with your PostgreSQL cluster. A grace - period of 0 guarantees that the pod is immediately removed from the - Kubernetes API server, without first ensuring that the PID 1 process of - the `postgres` container (the instance manager) is shut down - contrary - to what would happen in case of a real failure (e.g. unplug the power cord - cable or network partitioning). - As a result, the operator doesn't see the pod of the primary anymore, and - triggers a failover promoting the most aligned standby, without - the guarantee that the primary had been shut down. - -### Liveness Probe Failure - -By default, after three consecutive liveness probe failures, the `postgres` -container will be considered failed. The Pod will remain part of the `Cluster`, -but the *kubelet* will attempt to restart the failed container. If the issue -causing the failure persists and cannot be resolved, you can manually delete -the Pod. - -In both cases, self-healing occurs automatically once the underlying issues are -resolved. - -### Readiness Probe Failure - -By default, after three consecutive readiness probe failures, the Pod will be -marked as *not ready*. It will remain part of the `Cluster`, and no new Pod -will be created. If the issue causing the failure cannot be resolved, you can -manually delete the Pod. Once the failure is addressed, the Pod will -automatically regain its previous role. - -### Worker node drained - -The pod will be evicted from the worker node and removed from the service. A -new pod will be created on a different worker node from a physical backup of the -*primary* if the `reusePVC` option of the `nodeMaintenanceWindow` parameter -is set to `off` (default: `on` during maintenance windows, `off` otherwise). - -The `PodDisruptionBudget` may prevent the pod from being evicted if there -is at least another pod that is not ready. - -!!! Note - Single instance clusters prevent node drain when `reusePVC` is - set to `false`. Refer to the [Kubernetes Upgrade section](kubernetes_upgrade.md). +## Self-Healing -Self-healing will happen as soon as the *apiserver* is notified. +### Primary Failure -### Worker node failure +If the primary Pod fails: -Since the node is failed, the *kubelet* won't execute the liveness and -the readiness probes. The pod will be marked for deletion after the -toleration seconds configured by the Kubernetes cluster administrator for -that specific failure cause. Based on how the Kubernetes cluster is configured, -the pod might be removed from the service earlier. +- The operator promotes the most up-to-date standby with the lowest replication + lag. +- The `-rw` service is updated to point to the new primary. +- The failed Pod is removed from the `-r` and `-rw` services. +- Standby Pods begin replicating from the new primary. +- The former primary uses `pg_rewind` to re-synchronize if its PVC is available; + otherwise, a new standby is created from a backup of the new primary. -A new pod will be created on a different worker node from a physical backup -of the *primary*. The default value for that parameter in a Kubernetes -cluster is 5 minutes. +### Standby Failure -Self-healing will happen after `tolerationSeconds`. +If a standby Pod fails: -## Self-healing +- It is removed from the `-r` and `-ro` services. +- The Pod is restarted using its PVC if available; otherwise, a new Pod is + created from a backup of the current primary. +- Once ready, the Pod is re-added to the `-r` and `-ro` services. -If the failed pod is a standby, the pod is removed from the `-r` service -and from the `-ro` service. -The pod is then restarted using its PVC if available; otherwise, a new -pod will be created from a backup of the current primary. The pod -will be added again to the `-r` service and to the `-ro` service when ready. +## Manual Intervention -If the failed pod is the primary, the operator will promote the active pod -with status ready and the lowest replication lag, then point the `-rw` service -to it. The failed pod will be removed from the `-r` service and from the -`-rw` service. -Other standbys will start replicating from the new primary. The former -primary will use `pg_rewind` to synchronize itself with the new one if its -PVC is available; otherwise, a new standby will be created from a backup of the -current primary. - -## Manual intervention - -In the case of undocumented failure, it might be necessary to intervene -to solve the problem manually. +For failure scenarios not covered by automated recovery, manual intervention +may be required. !!! Important - In such cases, please do not perform any manual operation without - [professional support](https://cloudnative-pg.io/support/). + Do not perform manual operations without [professional support](https://cloudnative-pg.io/support/). -You can use the `cnpg.io/reconciliationLoop` annotation to temporarily disable -the reconciliation loop for a specific PostgreSQL cluster, as shown below: +### Disabling Reconciliation -``` yaml +To temporarily disable the reconciliation loop for a PostgreSQL cluster, use +the `cnpg.io/reconciliationLoop` annotation: + +```yaml metadata: name: cluster-example-no-reconcile annotations: @@ -186,12 +62,10 @@ spec: # ... ``` -The `cnpg.io/reconciliationLoop` must be used with extreme care -and for the sole duration of the extraordinary/emergency operation. +Use this annotation **with extreme caution** and only during emergency +operations. !!! Warning - Please make sure that you use this annotation only for a limited period of - time and you remove it when the emergency has finished. Leaving this annotation - in a cluster will prevent the operator from issuing any self-healing operation, - such as a failover. - + This annotation should be removed as soon as the issue is resolved. Leaving + it in place prevents the operator from executing self-healing actions, + including failover. From b679109e4b4070c38dbd84b6ae08f7aa9bf4f764 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Fri, 28 Feb 2025 09:22:28 +0100 Subject: [PATCH 409/836] fix: update dependencies (#7015) Update dependencies to prevent security scan failures: * golang.org/x/crypto -> v0.35.0 * golang.org/x/oauth2 -> v0.27.0 Closes #7014 Signed-off-by: Jonathan Gonzalez V. --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 7bee0fc5ac..2056e6d258 100644 --- a/go.mod +++ b/go.mod @@ -98,9 +98,9 @@ require ( github.com/spf13/pflag v1.0.6 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xlab/treeprint v1.2.0 // indirect - golang.org/x/crypto v0.33.0 // indirect + golang.org/x/crypto v0.35.0 // indirect golang.org/x/net v0.35.0 // indirect - golang.org/x/oauth2 v0.25.0 // indirect + golang.org/x/oauth2 v0.27.0 // indirect golang.org/x/sync v0.11.0 // indirect golang.org/x/sys v0.30.0 // indirect golang.org/x/text v0.22.0 // indirect diff --git a/go.sum b/go.sum index cecd9e5f67..276e5926bc 100644 --- a/go.sum +++ b/go.sum @@ -216,8 +216,8 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus= -golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= +golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs= +golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -226,8 +226,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= -golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= -golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= +golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= From 5282336ac15fbff1683a8d44550d90f3792e78a5 Mon Sep 17 00:00:00 2001 From: Francesco Canovai Date: Fri, 28 Feb 2025 10:35:10 +0100 Subject: [PATCH 410/836] docs: improve tcp_syn_retry troubleshooting section (#7011) Removed the part about setting values on the host. The sysctl is namespaced so altering the host configuration has no effect. Closes #7017 Signed-off-by: Francesco Canovai --- .wordlist-en-custom.txt | 1 + docs/src/troubleshooting.md | 28 ++++++++++++++-------------- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index a7c81ba57f..914058d14a 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -563,6 +563,7 @@ azurite ba backend backends +backoff backport backported backporting diff --git a/docs/src/troubleshooting.md b/docs/src/troubleshooting.md index 721dd3ca35..664bdc0272 100644 --- a/docs/src/troubleshooting.md +++ b/docs/src/troubleshooting.md @@ -797,18 +797,18 @@ you have sidecar injection enabled, retry with injection disabled. ### Replicas take over two minutes to reconnect after a failover -When the primary instance fails, the operator promotes the most advanced -standby to the primary role. Other standby instances then attempt to reconnect -to the `-rw` service for replication. However, during this reconnection -process, `kube-proxy` may not yet have updated its routing information. -As a result, the initial `SYN` packet sent by the standby instances can fail -to reach the intended destination. - -On Linux systems, the default value for the `tcp_syn_retries` kernel parameter -is set to 6. This configuration means the system will retry a failed connection -for approximately 127 seconds before giving up. This extended retry period can -significantly delay the reconnection process. For more details, consult the +When the primary instance fails, the operator promotes the most advanced standby +to the primary role. Other standby instances then attempt to reconnect to the +`-rw` service for replication. However, during this reconnection process, +`kube-proxy` may not have updated its routing information yet. As a result, the +initial `SYN` packet sent by the standby instances might fail to reach its +intended destination. + +If the network is configured to silently drop packets instead of rejecting them, +standby instances will not receive a response and will retry the connection +after an exponential backoff period. On Linux systems, the default value for the +`tcp_syn_retries` kernel parameter is 6, meaning the system will attempt to +establish the connection for approximately 127 seconds before giving up. This +prolonged retry period can significantly delay the reconnection process. +For more details, consult the [tcp_syn_retries documentation](https://www.kernel.org/doc/Documentation/networking/ip-sysctl.txt). - -Altering this behavior will require changing the `tcp_syn_retries` -parameter on the host node. From 2be105b0fa4871b8f423e06d41bbbc3dcb5c8cda Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Fri, 28 Feb 2025 11:26:20 +0100 Subject: [PATCH 411/836] fix: provide the operator extra time to start (#7008) When the operator starts, it calculates the hash code of its binary for each architecture included in the image. This process occurs early in the initialization, before the web server that serves the probes is launched. As a result, the liveness probe may fail. If the hash code calculation takes too long, the Kubelet will restart the operator. This patch adds a startup probe to the operator deployment, allowing up to 30 seconds to start up. Fixes: #7007 Signed-off-by: Leonardo Cecchi --- config/manager/manager.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 312cdc57cd..bc8b23dc20 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -69,6 +69,13 @@ spec: port: 9443 scheme: HTTPS path: /readyz + startupProbe: + failureThreshold: 6 + periodSeconds: 5 + httpGet: + port: 9443 + scheme: HTTPS + path: /readyz readinessProbe: httpGet: port: 9443 From 3c02d85a8bc0eb8cff3c83d29160ff87ca5c269a Mon Sep 17 00:00:00 2001 From: Francesco Canovai Date: Fri, 28 Feb 2025 11:54:03 +0100 Subject: [PATCH 412/836] test(e2e): retry failures on imperative backup (#6878) Retry backup execution through the plugin in case of a failure. Adding the retry works around short-lived network issues with the api-server. Closes #6412 Signed-off-by: Francesco Canovai --- tests/e2e/tablespaces_test.go | 17 +++++++++-------- tests/e2e/volume_snapshot_test.go | 17 +++++++++-------- 2 files changed, 18 insertions(+), 16 deletions(-) diff --git a/tests/e2e/tablespaces_test.go b/tests/e2e/tablespaces_test.go index d3e6314bc6..650270be57 100644 --- a/tests/e2e/tablespaces_test.go +++ b/tests/e2e/tablespaces_test.go @@ -460,14 +460,15 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, backupName = clusterName + pgTime.GetCurrentTimestampWithFormat("20060102150405") By("creating a volumeSnapshot and waiting until it's completed", func() { - err := backups.CreateOnDemandBackupViaKubectlPlugin( - namespace, - clusterName, - backupName, - apiv1.BackupTargetStandby, - apiv1.BackupMethodVolumeSnapshot, - ) - Expect(err).ToNot(HaveOccurred()) + Eventually(func() error { + return backups.CreateOnDemandBackupViaKubectlPlugin( + namespace, + clusterName, + backupName, + apiv1.BackupTargetStandby, + apiv1.BackupMethodVolumeSnapshot, + ) + }).WithTimeout(time.Minute).WithPolling(5 * time.Second).Should(Succeed()) // TODO: this is to force a CHECKPOINT when we run the backup on standby. // This should probably be moved elsewhere diff --git a/tests/e2e/volume_snapshot_test.go b/tests/e2e/volume_snapshot_test.go index 2dff661848..dcc6ea808d 100644 --- a/tests/e2e/volume_snapshot_test.go +++ b/tests/e2e/volume_snapshot_test.go @@ -110,14 +110,15 @@ var _ = Describe("Verify Volume Snapshot", It("can create a Volume Snapshot", func() { var backupObject apiv1.Backup By("creating a volumeSnapshot and waiting until it's completed", func() { - err := backups.CreateOnDemandBackupViaKubectlPlugin( - namespace, - clusterName, - "", - apiv1.BackupTargetStandby, - apiv1.BackupMethodVolumeSnapshot, - ) - Expect(err).ToNot(HaveOccurred()) + Eventually(func() error { + return backups.CreateOnDemandBackupViaKubectlPlugin( + namespace, + clusterName, + "", + apiv1.BackupTargetStandby, + apiv1.BackupMethodVolumeSnapshot, + ) + }).WithTimeout(time.Minute).WithPolling(5 * time.Second).Should(Succeed()) // trigger a checkpoint as the backup may run on standby CheckPointAndSwitchWalOnPrimary(namespace, clusterName) From 8d69ddcb49ee23d83aaafc359c404f89a12b167b Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Fri, 28 Feb 2025 14:16:49 +0100 Subject: [PATCH 413/836] docs(security): image signatures and attestations (#7023) Document image signatures and OCI attestations, providing SBOMs and provenance. Closes #7022 Signed-off-by: Jonathan Gonzalez V. Signed-off-by: Gabriele Bartolini Signed-off-by: Francesco Canovai Co-authored-by: Gabriele Bartolini Co-authored-by: Francesco Canovai --- .wordlist-en-custom.txt | 7 ++++ docs/src/security.md | 72 +++++++++++++++++++++++++++++++++++++---- 2 files changed, 72 insertions(+), 7 deletions(-) diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index 914058d14a..6483161f7a 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -272,6 +272,7 @@ OngoingSnapshotBackups OnlineConfiguration OnlineUpdateEnabled OnlineUpgrading +OpenID OpenSSL OpenShift Openshift @@ -389,12 +390,15 @@ RuntimeDefault Ruocco SANs SAS +SBOM SCC SCCs SDK SELinux SHA SLA +SLSA +SPDX SPoF SQLQuery SQLRefs @@ -691,6 +695,7 @@ creds cron crt cryptographic +cryptographically csvlog csvs ctl @@ -1226,6 +1231,7 @@ shmmax shutdownCheckpointToken sig sigs +sigstore singlenamespace skipRange slotPrefix @@ -1333,6 +1339,7 @@ tolerations topologies topologyKey topologySpreadConstraints +toto transactionID transactional transactionid diff --git a/docs/src/security.md b/docs/src/security.md index c5058c7f4a..47df5292f6 100644 --- a/docs/src/security.md +++ b/docs/src/security.md @@ -58,20 +58,78 @@ please use this medium to report it. ## Container Every container image in CloudNativePG is automatically built via CI/CD -pipelines following every commit. These images include not only the operator's +pipelines after every commit. These images include not only the operator's image but also the operands' images, specifically for every supported -PostgreSQL version. During the CI/CD process, images undergo scanning with the -following tools: +PostgreSQL version. + +!!! Important + All operand images are automatically and regularly rebuilt by our pipelines + to incorporate the latest security updates at both the base image and package + levels. This ensures that container images distributed to the community receive + **patch-level updates** regularly. + +During the CI/CD process, images are scanned using the following tools: - **[Dockle](https://github.com/goodwithtech/dockle):** Ensures best practices in the container build process. - **[Snyk](https://snyk.io/):** Detects security issues within the container and reports findings via the GitHub interface. -!!! Important - All operand images are automatically rebuilt daily by our pipelines to - incorporate security updates at the base image and package level, providing - **patch-level updates** for the container images distributed to the community. +### Image Signatures + +The operator and [operand +images](https://github.com/cloudnative-pg/postgres-containers) are +cryptographically signed using [cosign](https://github.com/sigstore/cosign), a +signature tool from [sigstore](https://www.sigstore.dev/). +This process is automated via GitHub Actions and leverages +[short-lived tokens issued through OpenID Connect](https://docs.github.com/en/actions/security-for-github-actions/security-hardening-your-deployments/about-security-hardening-with-openid-connect). + +The token issuer is `https://token.actions.githubusercontent.com`, and the +signing identity corresponds to a GitHub workflow executed under the +[cloudnative-pg](https://github.com/cloudnative-pg/cloudnative-pg/) repository. +This workflow uses the [cosign-installer action](https://github.com/marketplace/actions/cosign-installer) +to streamline the signing process. + +To verify the authenticity of an operator image, use the following `cosign` +command with the image digest: + +```shell +cosign verify ghcr.io/cloudnative-pg/cloudnative-pg@sha256: \ + --certificate-identity-regexp="^https://github.com/cloudnative-pg/cloudnative-pg/" \ + --certificate-oidc-issuer="https://token.actions.githubusercontent.com" +``` + +### Attestations + +Container images include the following attestations for transparency and +traceability: + +- **[Software Bill of Materials + (SBOM)](https://docs.docker.com/build/metadata/attestations/sbom/):** A + comprehensive list of software artifacts included in the image or used during + its build process, formatted using the + [in-toto SPDX predicate standard](https://github.com/in-toto/attestation/blob/main/spec/predicates/spdx.md). +- **[Provenance](https://docs.docker.com/build/metadata/attestations/slsa-provenance/):** + Metadata detailing how the image was built, following the [SLSA Provenance](https://slsa.dev) + framework. + +You can retrieve the SBOM for a specific image and platform using the following +command: + +```shell +docker buildx imagetools inspect \ + --format '{{ json (index .SBOM "").SPDX }}' +``` + +This command outputs the SBOM in JSON format, providing a detailed view of the +software components and build dependencies. + +For the provenance, use: + +```shell +docker buildx imagetools inspect \ + --format '{{ json (index .Provenance "").SLSA }}' +``` ### Guidelines and Frameworks for Container Security From 7e2ba907589edcdbd6b0c00255e59e5d9f49a696 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niccol=C3=B2=20Fei?= Date: Fri, 28 Feb 2025 15:09:10 +0100 Subject: [PATCH 414/836] chore: align OLM image's tags in the Makefile (#7028) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixing some leftovers in the Makefile that are still using the old format. Closes #7027 Signed-off-by: Niccolò Fei --- Makefile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 67f42b1563..e5f512438f 100644 --- a/Makefile +++ b/Makefile @@ -25,9 +25,9 @@ ifneq (,${IMAGE_TAG}) CONTROLLER_IMG = ${IMAGE_NAME}:${IMAGE_TAG} endif endif -CATALOG_IMG ?= $(shell echo "${CONTROLLER_IMG}" | sed -e 's/:/:catalog-/') -BUNDLE_IMG ?= $(shell echo "${CONTROLLER_IMG}" | sed -e 's/:/:bundle-/') -INDEX_IMG ?= $(shell echo "${CONTROLLER_IMG}" | sed -e 's/:/:index-/') +CATALOG_IMG ?= ${CONTROLLER_IMG}-catalog +BUNDLE_IMG ?= ${CONTROLLER_IMG}-bundle +INDEX_IMG ?= ${CONTROLLER_IMG}-index COMMIT := $(shell git rev-parse --short HEAD || echo unknown) DATE := $(shell git log -1 --pretty=format:'%ad' --date short) From 94b0605dfb53adcbf5b6814ba0f8ce61fb0b7400 Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Fri, 28 Feb 2025 15:27:35 +0100 Subject: [PATCH 415/836] docs: release notes for 1.25.1 and 1.24.2 (#6922) Closes #6892 Signed-off-by: Gabriele Bartolini Signed-off-by: Marco Nenciarini Co-authored-by: Marco Nenciarini --- .wordlist-en-custom.txt | 3 +- contribute/release-notes-template.md | 4 +- docs/src/installation_upgrade.md | 66 ++--------------------- docs/src/release_notes/v1.24.md | 75 ++++++++++++++++++++++++++ docs/src/release_notes/v1.25.md | 79 ++++++++++++++++++++++++++++ docs/src/release_notes/v1.26.md | 52 ++++++++++++++++++ 6 files changed, 213 insertions(+), 66 deletions(-) create mode 100644 docs/src/release_notes/v1.26.md diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index 6483161f7a..a0f5490eb9 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -261,6 +261,7 @@ NodeSelector NodesUsed Noland O'Reilly +OCI OLAP OLTP OOM @@ -866,7 +867,6 @@ init initDB initdb initialDelaySeconds -initialise initializingPVC inplace installModes @@ -1032,6 +1032,7 @@ operatorgroups operatorhub osdk ou +overridable ownerMetadata ownerReference packagemanifests diff --git a/contribute/release-notes-template.md b/contribute/release-notes-template.md index 53501c9492..e09753a725 100644 --- a/contribute/release-notes-template.md +++ b/contribute/release-notes-template.md @@ -5,10 +5,10 @@ sure you remove this comment. Create a spreadsheet with the list of commits since the last minor release: -Use the last known tag on `main` branch as a start (e.g. LAST_TAG=v1.24.0). +Use the last known tag on `main` branch as a start (e.g. LAST_TAG=v1.25.1). ```bash -LAST_TAG=v1.24.0 +LAST_TAG=v1.25.1 git checkout main git log ${LAST_TAG}.. --oneline --pretty="format:%h;%s" > log.csv ``` diff --git a/docs/src/installation_upgrade.md b/docs/src/installation_upgrade.md index 5acbdbd854..a93e164596 100644 --- a/docs/src/installation_upgrade.md +++ b/docs/src/installation_upgrade.md @@ -251,11 +251,11 @@ When versions are not directly upgradable, the old version needs to be removed before installing the new one. This won't affect user data but only the operator itself. -### Upgrading to 1.25.0 or 1.24.2 +### Upgrading to 1.25 from a previous minor version !!! Important - We encourage all existing users of CloudNativePG to upgrade to version - 1.25.0 or at least to the latest stable version of the minor release you are + We strongly recommend that all CloudNativePG users upgrade to version + 1.25.1 or at least to the latest stable version of the minor release you are currently using (namely 1.24.x). !!! Warning @@ -309,63 +309,3 @@ distributed PostgreSQL setup. Ensure the following steps are taken: For more information, please refer to the ["Distributed Topology" section for replica clusters](replica_cluster.md#distributed-topology). - -### Upgrading to 1.23 from a previous minor version - -#### User defined replication slots - -CloudNativePG now offers automated synchronization of all replication slots -defined on the primary to any standby within the High Availability (HA) -cluster. - -If you manually manage replication slots on a standby, it is essential to -exclude those replication slots from synchronization. Failure to do so may -result in CloudNativePG removing them from the standby. To implement this -exclusion, utilize the following YAML configuration. In this example, -replication slots with a name starting with 'foo' are prevented from -synchronization: - -```yaml -... - replicationSlots: - synchronizeReplicas: - enabled: true - excludePatterns: - - "^foo" -``` - -Alternatively, if you prefer to disable the synchronization mechanism entirely, -use the following configuration: - -```yaml -... - replicationSlots: - synchronizeReplicas: - enabled: false -``` - -#### Server-side apply of manifests - -To ensure compatibility with Kubernetes 1.29 and upcoming versions, -CloudNativePG now mandates the utilization of -["Server-side apply"](https://kubernetes.io/docs/reference/using-api/server-side-apply/) -when deploying the operator manifest. - -While employing this installation method poses no challenges for new -deployments, updating existing operator manifests using the `--server-side` -option may result in errors resembling the example below: - -``` text -Apply failed with 1 conflict: conflict with "kubectl-client-side-apply" using.. -``` - -If such errors arise, they can be resolved by explicitly specifying the -`--force-conflicts` option to enforce conflict resolution: - -```sh -kubectl apply --server-side --force-conflicts -f -``` - -Henceforth, `kube-apiserver` will be automatically acknowledged as a recognized -manager for the CRDs, eliminating the need for any further manual intervention -on this matter. diff --git a/docs/src/release_notes/v1.24.md b/docs/src/release_notes/v1.24.md index fdb59d023b..b652171b17 100644 --- a/docs/src/release_notes/v1.24.md +++ b/docs/src/release_notes/v1.24.md @@ -6,6 +6,81 @@ For a complete list of changes, please refer to the [commits](https://github.com/cloudnative-pg/cloudnative-pg/commits/release-1.24) on the release branch in GitHub. +## Version 1.24.3 + +**Release Date:** February 28, 2025 + +### Enhancements + +- Introduced a startup probe for the operator to enhance reliability and + prevent premature liveness probe failures during initialization. (#7008) +- Added support for using the `-r` service with the Pooler. (#6868) +- Introduced an optional `--ttl` flag for the `pgbench` plugin, enabling + automatic deletion of completed jobs after a user-defined duration. (#6701) +- Marked known error messages from the Azure CSI Driver for volume snapshots as + retryable, improving resilience. (#6906) +- Updated the default PostgreSQL version to 17.4 for new cluster + definitions. (#6960) + +### Security + +- The operator image build process has been enhanced to strengthen + security and transparency. Images are now signed with `cosign`, and + OCI attestations are generated, incorporating the Software Bill of + Materials (SBOM) and provenance data. Additionally, OCI annotations + have been added to improve traceability and ensure the integrity of + the images. + +### Bug Fixes + +- Fixed inconsistent behavior in default probe knob values when `.spec.probes` + is defined, ensuring users can override all settings, including + `failureThreshold`. If unspecified in the startup probe, `failureThreshold` is + now correctly derived from `.spec.startupDelay / periodSeconds` (default: `10`, + now overridable). The same logic applies to liveness probes via + `.spec.livenessProbeTimeout`. (#6656) +- Managed service ports now take precedence over default operator-defined + ports. (#6474) +- Fixed an issue where WAL metrics were unavailable after an instance restart + until a configuration change was applied. (#6816) +- Fixed an issue in monolithic database import where role import was skipped if + no roles were specified. (#6646) +- Added support for new metrics introduced in PgBouncer 1.24. (#6630) +- Improved handling of replication-sensitive parameter reductions by ensuring + timely reconciliation after primary server restarts. (#6440) +- Introduced a new `isWALArchiver` flag in the CNPG-I plugin configuration, + allowing users to designate a plugin as a WAL archiver. This enables seamless + migration from in-tree Barman Cloud support to the plugin while maintaining WAL + archive consistency. (#6593) +- Ensured `override.conf` is consistently included in `postgresql.conf` during + replica cluster bootstrapping, preventing replication failures due to missing + configuration settings. (#6808) +- Ensured `override.conf` is correctly initialized before invoking `pg_rewind` + to prevent failures during primary role changes. (#6670) +- Enhanced webhook responses to return both warnings and errors when + applicable, improving diagnostic accuracy. (#6579) +- Ensured the operator version is correctly reconciled. (#6496) +- Improved PostgreSQL version detection by using a more precise check of the + data directory. (#6659) +- Volume Snapshot Backups: + - Fixed an issue where unused backup connections were not properly cleaned + up. (#6882) + - Ensured the instance manager closes stale PostgreSQL connections left by + failed volume snapshot backups. (#6879) + - Prevented the operator from starting a new volume snapshot backup while + another is already in progress. (#6890) +- `cnpg` plugin: + - Restored functionality of the `promote` plugin command. (#6476) + - Enhanced `kubectl cnpg report --logs ` to collect logs from all + containers, including sidecars. (#6636) + - Ensured `pgbench` jobs can run when a `Cluster` uses an `ImageCatalog`. + (#6868) + +### Technical Enhancements + +- Added support for Kubernetes `client-gen`, enabling automated generation of + Go clients for all CloudNativePG CRDs. (#6695) + ## Version 1.24.2 **Release Date:** December 23, 2024 diff --git a/docs/src/release_notes/v1.25.md b/docs/src/release_notes/v1.25.md index 0e8ca6b85a..d8201f899b 100644 --- a/docs/src/release_notes/v1.25.md +++ b/docs/src/release_notes/v1.25.md @@ -6,6 +6,85 @@ For a complete list of changes, please refer to the [commits](https://github.com/cloudnative-pg/cloudnative-pg/commits/release-1.25) on the release branch in GitHub. +## Version 1.25.1 + +**Release Date:** February 28, 2025 + +### Enhancements + +- Introduced a startup probe for the operator to enhance reliability and + prevent premature liveness probe failures during initialization. (#7008) +- Added support for using the `-r` service with the Pooler. (#6868) +- Introduced an optional `--ttl` flag for the `pgbench` plugin, enabling + automatic deletion of completed jobs after a user-defined duration. (#6701) +- Marked known error messages from the Azure CSI Driver for volume snapshots as + retryable, improving resilience. (#6906) +- Updated the default PostgreSQL version to 17.4 for new cluster + definitions. (#6960) + +### Security + +- The operator image build process has been enhanced to strengthen + security and transparency. Images are now signed with `cosign`, and + OCI attestations are generated, incorporating the Software Bill of + Materials (SBOM) and provenance data. Additionally, OCI annotations + have been added to improve traceability and ensure the integrity of + the images. + +### Bug Fixes + +- Fixed inconsistent behavior in default probe knob values when `.spec.probes` + is defined, ensuring users can override all settings, including + `failureThreshold`. If unspecified in the startup probe, `failureThreshold` is + now correctly derived from `.spec.startupDelay / periodSeconds` (default: `10`, + now overridable). The same logic applies to liveness probes via + `.spec.livenessProbeTimeout`. (#6656) +- Managed service ports now take precedence over default operator-defined + ports. (#6474) +- Fixed an issue where WAL metrics were unavailable after an instance restart + until a configuration change was applied. (#6816) +- Fixed an issue in monolithic database import where role import was skipped if + no roles were specified. (#6646) +- Added support for new metrics introduced in PgBouncer 1.24. (#6630) +- Resolved an issue where `Database`, `Publication`, and `Subscription` CRDs + became stuck in `cluster resource has been deleted, skipping reconciliation` + after cluster rehydration. This patch forces `status.observedGeneration` to + zero, ensuring proper reconciliation. (#6607) +- Improved handling of replication-sensitive parameter reductions by ensuring + timely reconciliation after primary server restarts. (#6440) +- Introduced a new `isWALArchiver` flag in the CNPG-I plugin configuration, + allowing users to designate a plugin as a WAL archiver. This enables seamless + migration from in-tree Barman Cloud support to the plugin while maintaining WAL + archive consistency. (#6593) +- Ensured `override.conf` is consistently included in `postgresql.conf` during + replica cluster bootstrapping, preventing replication failures due to missing + configuration settings. (#6808) +- Ensured `override.conf` is correctly initialized before invoking `pg_rewind` + to prevent failures during primary role changes. (#6670) +- Enhanced webhook responses to return both warnings and errors when + applicable, improving diagnostic accuracy. (#6579) +- Ensured the operator version is correctly reconciled. (#6496) +- Improved PostgreSQL version detection by using a more precise check of the + data directory. (#6659) +- Volume Snapshot Backups: + - Fixed an issue where unused backup connections were not properly cleaned + up. (#6882) + - Ensured the instance manager closes stale PostgreSQL connections left by + failed volume snapshot backups. (#6879) + - Prevented the operator from starting a new volume snapshot backup while + another is already in progress. (#6890) +- `cnpg` plugin: + - Restored functionality of the `promote` plugin command. (#6476) + - Enhanced `kubectl cnpg report --logs ` to collect logs from all + containers, including sidecars. (#6636) + - Ensured `pgbench` jobs can run when a `Cluster` uses an `ImageCatalog`. + (#6868) + +### Technical Enhancements + +- Added support for Kubernetes `client-gen`, enabling automated generation of + Go clients for all CloudNativePG CRDs. (#6695) + ## Version 1.25.0 **Release Date:** December 23, 2024 diff --git a/docs/src/release_notes/v1.26.md b/docs/src/release_notes/v1.26.md new file mode 100644 index 0000000000..e25a2b4675 --- /dev/null +++ b/docs/src/release_notes/v1.26.md @@ -0,0 +1,52 @@ +# Release notes for CloudNativePG 1.26 + +History of user-visible changes in the 1.26 minor release of CloudNativePG. + +For a complete list of changes, please refer to the +[commits](https://github.com/cloudnative-pg/cloudnative-pg/commits/release-1.26 +on the release branch in GitHub. + +## Version 1.26.0-rc1 + +**Release date:** Mon DD, 20YY + +### Important changes: + +- OPTIONAL +- OPTIONAL + +### Features: + +- **MAIN FEATURE #1**: short description +- **MAIN FEATURE #2**: short description + +### Enhancements: + +- feat: support customizable pod patches via annotations (#6323) + +- `cnpg` plugin updates: + - ... + +### Security: + +- Add ... +- Improve ... + +### Fixes: + +- Enhance ... +- Disable ... +- Gracefully handle ... +- Wait ... +- Fix ... +- Address ... +- `cnpg` plugin: + - ... + - ... + +### Supported versions + +- Kubernetes 1.31, 1.30, and 1.29 +- PostgreSQL 17, 16, 15, 14, and 13 + - PostgreSQL 17.X is the default image + - PostgreSQL 13 support ends on November 12, 2025 From 62b8ae9d080a63137b5df7eeebe61c568157ea39 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Fri, 28 Feb 2025 16:44:35 +0100 Subject: [PATCH 416/836] ci(release): build specific ids during release process (#7033) Goreleaser by default builds all the ids inside the goreleaser file, after we included the `-race` build detection this was being build inside the release process and is not desired, now we build the specific ids we want for the release Signed-off-by: Jonathan Gonzalez V. --- .github/workflows/release-publish.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml index 909e1b6d01..d4b214e324 100644 --- a/.github/workflows/release-publish.yml +++ b/.github/workflows/release-publish.yml @@ -153,7 +153,7 @@ jobs: with: distribution: goreleaser version: v2 - args: release --clean --timeout 60m + args: release --clean --timeout 60m --id manager --id kubectl-cnpg env: DATE: ${{ env.DATE }} COMMIT: ${{ env.COMMIT }} From c59871b011240ee525bceba68d6d9af01d08cc38 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Fri, 28 Feb 2025 17:57:41 +0100 Subject: [PATCH 417/836] ci(release): build race only when RACE env is set (#7038) Build the manager-race id with GoReleaser only when the RACE env variable is set Signed-off-by: Jonathan Gonzalez V. --- .github/workflows/continuous-integration.yml | 1 + .github/workflows/release-publish.yml | 2 +- .goreleaser.yml | 2 ++ 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 6aaa3cf633..55da8e56e8 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -534,6 +534,7 @@ jobs: DATE: ${{ env.DATE }} COMMIT: ${{ env.COMMIT }} VERSION: ${{ env.VERSION }} + RACE: "true" - name: Set up QEMU uses: docker/setup-qemu-action@v3 diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml index d4b214e324..909e1b6d01 100644 --- a/.github/workflows/release-publish.yml +++ b/.github/workflows/release-publish.yml @@ -153,7 +153,7 @@ jobs: with: distribution: goreleaser version: v2 - args: release --clean --timeout 60m --id manager --id kubectl-cnpg + args: release --clean --timeout 60m env: DATE: ${{ env.DATE }} COMMIT: ${{ env.COMMIT }} diff --git a/.goreleaser.yml b/.goreleaser.yml index 55a38dcd61..191b7fa2b0 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -37,6 +37,8 @@ builds: binary: manager/manager_{{ .Arch }} main: cmd/manager/main.go no_unique_dist_dir: true + skip: >- + {{ if and (isEnvSet "RACE") (eq .Env.RACE "true") }}false{{ else }}true{{ end }} gcflags: - all=-trimpath={{.Env.GOPATH}};{{.Env.PWD}} ldflags: From fba278629a924f244cf8cac623a3b4e7241c145a Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Fri, 28 Feb 2025 18:41:52 +0100 Subject: [PATCH 418/836] ci(release): add missing permission to the release workflow (#7040) The release-publish.yml was missing the id-token permission required by cosign to sign the container images Signed-off-by: Jonathan Gonzalez V. --- .github/workflows/release-publish.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml index 909e1b6d01..2ffc37c50c 100644 --- a/.github/workflows/release-publish.yml +++ b/.github/workflows/release-publish.yml @@ -14,6 +14,7 @@ env: permissions: contents: write packages: write + id-token: write jobs: From 5c968e066cbd1843475a5837fba563c79cca6d14 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Fri, 28 Feb 2025 20:04:17 +0100 Subject: [PATCH 419/836] ci(release): fix cosign invocation (#7042) Signed-off-by: Marco Nenciarini --- .github/workflows/release-publish.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml index 2ffc37c50c..2222cadc9d 100644 --- a/.github/workflows/release-publish.yml +++ b/.github/workflows/release-publish.yml @@ -217,7 +217,7 @@ jobs: - name: Sign images run: | images=$(echo '${{ steps.bake-push.outputs.metadata }}' | - jq '.[] | (."image.name" | sub(",.*";"" )) + "@" + ."containerimage.digest"' + jq -r '.[] | (."image.name" | sub(",.*";"" )) + "@" + ."containerimage.digest"' ) cosign sign --yes ${images} From db9e4cec9bdbafad3200c7b2e972d7fab6fa6d2f Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Mon, 3 Mar 2025 16:58:38 +0100 Subject: [PATCH 420/836] chore(setup-cluster): fix the integration with bake (#7047) Signed-off-by: Marco Nenciarini --- Makefile | 6 ++++-- hack/setup-cluster.sh | 23 ++++++++++++++--------- 2 files changed, 18 insertions(+), 11 deletions(-) diff --git a/Makefile b/Makefile index e5f512438f..be41950037 100644 --- a/Makefile +++ b/Makefile @@ -147,8 +147,10 @@ docker-build: go-releaser ## Build the docker image. if [ -n "${BUILDER_NAME}" ]; then \ builder_name_option="--builder ${BUILDER_NAME}"; \ fi; \ - DOCKER_BUILDKIT=1 tag=${IMAGE_TAG} buildVersion=${VERSION} revision=${COMMIT} \ - docker buildx bake $${builder_name_option} --set=*.platform="linux/${ARCH}" --push + DOCKER_BUILDKIT=1 buildVersion=${VERSION} revision=${COMMIT} \ + docker buildx bake $${builder_name_option} --set=*.platform="linux/${ARCH}" \ + --set distroless.tags="$${CONTROLLER_IMG}" \ + --push distroless olm-bundle: manifests kustomize operator-sdk ## Build the bundle for OLM installation set -xeEuo pipefail ;\ diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh index e254a84fc9..e6cca6d229 100755 --- a/hack/setup-cluster.sh +++ b/hack/setup-cluster.sh @@ -640,9 +640,14 @@ load() { echo "${bright}Building operator from current worktree${reset}" - make -C "${ROOT_DIR}" IMAGE_TAG="$(print_tag)" registry="${registry_name}:5000" insecure="true" \ + CONTROLLER_IMG="$(ENABLE_REGISTRY="${ENABLE_REGISTRY}" print_image)" + make -C "${ROOT_DIR}" CONTROLLER_IMG="${CONTROLLER_IMG}" insecure="true" \ ARCH="${ARCH}" BUILDER_NAME=${builder_name} docker-build + if [ -z "${ENABLE_REGISTRY:-}" ]; then + "load_image_${ENGINE}" "${CLUSTER_NAME}" "${CONTROLLER_IMG}" + fi + echo "${bright}Loading new operator image on cluster ${CLUSTER_NAME}${reset}" echo "${bright}Done loading new operator image on cluster ${CLUSTER_NAME}${reset}" @@ -655,12 +660,16 @@ load() { echo "${bright}Building a 'prime' operator from current worktree${reset}" + PRIME_CONTROLLER_IMG="${CONTROLLER_IMG}-prime" CURRENT_VERSION=$(make -C "${ROOT_DIR}" -s print-version) PRIME_VERSION="${CURRENT_VERSION}-prime" - PRIME_TAG="$(print_tag)-prime" - make -C "${ROOT_DIR}" IMAGE_TAG="${PRIME_TAG}" VERSION="${PRIME_VERSION}" registry="${registry_name}:5000" insecure="true" \ + make -C "${ROOT_DIR}" CONTROLLER_IMG="${PRIME_CONTROLLER_IMG}" VERSION="${PRIME_VERSION}" insecure="true" \ ARCH="${ARCH}" BUILDER_NAME="${builder_name}" docker-build + if [ -z "${ENABLE_REGISTRY:-}" ]; then + "load_image_${ENGINE}" "${CLUSTER_NAME}" "${CONTROLLER_IMG}" + fi + echo "${bright}Done loading new 'prime' operator image on cluster ${CLUSTER_NAME}${reset}" fi @@ -681,16 +690,12 @@ deploy() { echo "${bright}Done deploying manifests from current worktree on cluster ${CLUSTER_NAME}${reset}" } -print_tag() { +print_image() { local tag=devel if [ -n "${ENABLE_REGISTRY:-}" ] || "check_registry_${ENGINE}"; then tag=latest fi - echo "${tag}" -} - -print_image() { - echo "${registry_name}:5000/cloudnative-pg-testing:$(print_tag)" + echo "${registry_name}:5000/cloudnative-pg-testing:${tag}" } export_logs() { From 0e5d2adcea61a1e4665b17c6047c41f7dc4eb6b6 Mon Sep 17 00:00:00 2001 From: Francesco Canovai Date: Mon, 3 Mar 2025 18:47:06 +0100 Subject: [PATCH 421/836] test: larger eks image (#7053) Drain E2E on EKS can fail because the operator can become unschedulable during a drain due to cpu requirements not available. Use a larger machine. Closes #7052 Signed-off-by: Francesco Canovai --- hack/e2e/eks-cluster.yaml.template | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hack/e2e/eks-cluster.yaml.template b/hack/e2e/eks-cluster.yaml.template index cf9ca18014..5cc370b85e 100644 --- a/hack/e2e/eks-cluster.yaml.template +++ b/hack/e2e/eks-cluster.yaml.template @@ -11,7 +11,7 @@ iam: managedNodeGroups: - name: default - instanceType: m5.large + instanceType: c6g.xlarge desiredCapacity: 3 addons: From e476d65f9bf81ce23629d7f2c37df5b97736357f Mon Sep 17 00:00:00 2001 From: Francesco Canovai Date: Tue, 4 Mar 2025 11:31:46 +0100 Subject: [PATCH 422/836] test: run EKS e2e on x86_64 instead of arm64 (#7059) In #7053 we switched to a machine with 4 cpus instead of 2, but the architecture was changed as well. To keep the tests aligned, we change to a 4 cpus x86_64 machine. Signed-off-by: Francesco Canovai --- hack/e2e/eks-cluster.yaml.template | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hack/e2e/eks-cluster.yaml.template b/hack/e2e/eks-cluster.yaml.template index 5cc370b85e..6d91f47958 100644 --- a/hack/e2e/eks-cluster.yaml.template +++ b/hack/e2e/eks-cluster.yaml.template @@ -11,7 +11,7 @@ iam: managedNodeGroups: - name: default - instanceType: c6g.xlarge + instanceType: c6a.xlarge desiredCapacity: 3 addons: From e50d6f305213d2b1af58a5391875d7dd0f5350ba Mon Sep 17 00:00:00 2001 From: Jaime Silvela Date: Tue, 4 Mar 2025 11:40:27 +0100 Subject: [PATCH 423/836] docs: archive the 1.23 release notes (#7060) 1.23 release notes are already archived in the release branches but not in `main`. This commit fixes the issue. Signed-off-by: Jaime Silvela --- docs/src/release_notes.md | 2 +- docs/src/release_notes/{ => old}/v1.23.md | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename docs/src/release_notes/{ => old}/v1.23.md (100%) diff --git a/docs/src/release_notes.md b/docs/src/release_notes.md index 71c503fb91..3bb0794089 100644 --- a/docs/src/release_notes.md +++ b/docs/src/release_notes.md @@ -11,7 +11,7 @@ refer to ["Supported releases"](supported_releases.md). Older releases: -- [CloudNativePG 1.23](release_notes/v1.23.md) +- [CloudNativePG 1.23](release_notes/old/v1.23.md) - [CloudNativePG 1.22](release_notes/old/v1.22.md) - [CloudNativePG 1.21](release_notes/old/v1.21.md) - [CloudNativePG 1.20](release_notes/old/v1.20.md) diff --git a/docs/src/release_notes/v1.23.md b/docs/src/release_notes/old/v1.23.md similarity index 100% rename from docs/src/release_notes/v1.23.md rename to docs/src/release_notes/old/v1.23.md From a85fcd8330e2ee0990d56e644c825621618536b7 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 28 Feb 2025 20:49:30 +0100 Subject: [PATCH 424/836] Version tag to 1.25.1 (#7044) Signed-off-by: Marco Nenciarini Co-authored-by: Marco Nenciarini --- docs/src/installation_upgrade.md | 4 +- docs/src/kubectl-plugin.md | 30 +- pkg/versions/versions.go | 6 +- releases/cnpg-1.25.1.yaml | 17791 +++++++++++++++++++++++++++++ 4 files changed, 17811 insertions(+), 20 deletions(-) create mode 100644 releases/cnpg-1.25.1.yaml diff --git a/docs/src/installation_upgrade.md b/docs/src/installation_upgrade.md index a93e164596..daf435e288 100644 --- a/docs/src/installation_upgrade.md +++ b/docs/src/installation_upgrade.md @@ -7,12 +7,12 @@ The operator can be installed like any other resource in Kubernetes, through a YAML manifest applied via `kubectl`. -You can install the [latest operator manifest](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.25/releases/cnpg-1.25.0.yaml) +You can install the [latest operator manifest](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.25/releases/cnpg-1.25.1.yaml) for this minor release as follows: ```sh kubectl apply --server-side -f \ - https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.25/releases/cnpg-1.25.0.yaml + https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.25/releases/cnpg-1.25.1.yaml ``` You can verify that with: diff --git a/docs/src/kubectl-plugin.md b/docs/src/kubectl-plugin.md index 1e4e101864..ed9d8535e5 100644 --- a/docs/src/kubectl-plugin.md +++ b/docs/src/kubectl-plugin.md @@ -30,11 +30,11 @@ them in your systems. #### Debian packages -For example, let's install the 1.25.0 release of the plugin, for an Intel based +For example, let's install the 1.25.1 release of the plugin, for an Intel based 64 bit server. First, we download the right `.deb` file. ```sh -wget https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.25.0/kubectl-cnpg_1.25.0_linux_x86_64.deb \ +wget https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.25.1/kubectl-cnpg_1.25.1_linux_x86_64.deb \ --output-document kube-plugin.deb ``` @@ -45,17 +45,17 @@ $ sudo dpkg -i kube-plugin.deb Selecting previously unselected package cnpg. (Reading database ... 6688 files and directories currently installed.) Preparing to unpack kube-plugin.deb ... -Unpacking cnpg (1.25.0) ... -Setting up cnpg (1.25.0) ... +Unpacking cnpg (1.25.1) ... +Setting up cnpg (1.25.1) ... ``` #### RPM packages -As in the example for `.rpm` packages, let's install the 1.25.0 release for an +As in the example for `.rpm` packages, let's install the 1.25.1 release for an Intel 64 bit machine. Note the `--output` flag to provide a file name. ```sh -curl -L https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.25.0/kubectl-cnpg_1.25.0_linux_x86_64.rpm \ +curl -L https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.25.1/kubectl-cnpg_1.25.1_linux_x86_64.rpm \ --output kube-plugin.rpm ``` @@ -69,7 +69,7 @@ Dependencies resolved. Package Architecture Version Repository Size ==================================================================================================== Installing: - cnpg x86_64 1.25.0-1 @commandline 20 M + cnpg x86_64 1.25.1 @commandline 20 M Transaction Summary ==================================================================================================== @@ -293,9 +293,9 @@ sandbox-3 0/604DE38 0/604DE38 0/604DE38 0/604DE38 00:00:00 00:00:00 00 Instances status Name Current LSN Replication role Status QoS Manager Version Node ---- ----------- ---------------- ------ --- --------------- ---- -sandbox-1 0/604DE38 Primary OK BestEffort 1.25.0 k8s-eu-worker -sandbox-2 0/604DE38 Standby (async) OK BestEffort 1.25.0 k8s-eu-worker2 -sandbox-3 0/604DE38 Standby (async) OK BestEffort 1.25.0 k8s-eu-worker +sandbox-1 0/604DE38 Primary OK BestEffort 1.25.1 k8s-eu-worker +sandbox-2 0/604DE38 Standby (async) OK BestEffort 1.25.1 k8s-eu-worker2 +sandbox-3 0/604DE38 Standby (async) OK BestEffort 1.25.1 k8s-eu-worker ``` If you require more detailed status information, use the `--verbose` option (or @@ -349,9 +349,9 @@ sandbox-primary primary 1 1 1 Instances status Name Current LSN Replication role Status QoS Manager Version Node ---- ----------- ---------------- ------ --- --------------- ---- -sandbox-1 0/6053720 Primary OK BestEffort 1.25.0 k8s-eu-worker -sandbox-2 0/6053720 Standby (async) OK BestEffort 1.25.0 k8s-eu-worker2 -sandbox-3 0/6053720 Standby (async) OK BestEffort 1.25.0 k8s-eu-worker +sandbox-1 0/6053720 Primary OK BestEffort 1.25.1 k8s-eu-worker +sandbox-2 0/6053720 Standby (async) OK BestEffort 1.25.1 k8s-eu-worker2 +sandbox-3 0/6053720 Standby (async) OK BestEffort 1.25.1 k8s-eu-worker ``` With an additional `-v` (e.g. `kubectl cnpg status sandbox -v -v`), you can @@ -574,12 +574,12 @@ Archive: report_operator_.zip ```output ====== Begin of Previous Log ===== -2023-03-28T12:56:41.251711811Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.25.0","build":{"Version":"1.25.0+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} +2023-03-28T12:56:41.251711811Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.25.1","build":{"Version":"1.25.1+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} 2023-03-28T12:56:41.251851909Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"} ====== End of Previous Log ===== -2023-03-28T12:57:09.854306024Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.25.0","build":{"Version":"1.25.0+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} +2023-03-28T12:57:09.854306024Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.25.1","build":{"Version":"1.25.1+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} 2023-03-28T12:57:09.854363943Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"} ``` diff --git a/pkg/versions/versions.go b/pkg/versions/versions.go index a098860e52..f6c527b9ad 100644 --- a/pkg/versions/versions.go +++ b/pkg/versions/versions.go @@ -20,13 +20,13 @@ package versions const ( // Version is the version of the operator - Version = "1.25.0" + Version = "1.25.1" // DefaultImageName is the default image used by the operator to create pods DefaultImageName = "ghcr.io/cloudnative-pg/postgresql:17.4" // DefaultOperatorImageName is the default operator image used by the controller in the pods running PostgreSQL - DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.25.0" + DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.25.1" ) // BuildInfo is a struct containing all the info about the build @@ -36,7 +36,7 @@ type BuildInfo struct { var ( // buildVersion injected during the build - buildVersion = "1.25.0" + buildVersion = "1.25.1" // buildCommit injected during the build buildCommit = "none" diff --git a/releases/cnpg-1.25.1.yaml b/releases/cnpg-1.25.1.yaml new file mode 100644 index 0000000000..58439bc983 --- /dev/null +++ b/releases/cnpg-1.25.1.yaml @@ -0,0 +1,17791 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-system +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.2 + name: backups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Backup + listKind: BackupList + plural: backups + singular: backup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.method + name: Method + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .status.error + name: Error + type: string + name: v1 + schema: + openAPIV3Schema: + description: Backup is the Schema for the backups API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the backup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + type: object + status: + description: |- + Most recently observed status of the backup. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + azureCredentials: + description: The credentials to use to upload data to Azure Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without providing + explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + backupId: + description: The ID of the Barman backup + type: string + backupLabelFile: + description: Backup label file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + backupName: + description: The Name of the Barman backup + type: string + beginLSN: + description: The starting xlog + type: string + beginWal: + description: The starting WAL + type: string + commandError: + description: The backup command output in case of error + type: string + commandOutput: + description: Unused. Retained for compatibility with old versions. + type: string + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data. This may not be populated in case of errors. + type: string + encryption: + description: Encryption method required to S3 API + type: string + endLSN: + description: The ending xlog + type: string + endWal: + description: The ending WAL + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + error: + description: The detected error + type: string + googleCredentials: + description: The credentials to use to upload data to Google Cloud + Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage JSON + file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + instanceID: + description: Information to identify the instance where the backup + has been taken from + properties: + ContainerID: + description: The container ID + type: string + podName: + description: The pod name + type: string + type: object + method: + description: The backup method being used + type: string + online: + description: Whether the backup was online/hot (`true`) or offline/cold + (`false`) + type: boolean + phase: + description: The last backup status + type: string + pluginMetadata: + additionalProperties: + type: string + description: A map containing the plugin metadata + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without providing + explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the region + name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + snapshotBackupStatus: + description: Status of the volumeSnapshot backup + properties: + elements: + description: The elements list, populated with the gathered volume + snapshots + items: + description: BackupSnapshotElementStatus is a volume snapshot + that is part of a volume snapshot method backup + properties: + name: + description: Name is the snapshot resource name + type: string + tablespaceName: + description: |- + TablespaceName is the name of the snapshotted tablespace. Only set + when type is PG_TABLESPACE + type: string + type: + description: Type is tho role of the snapshot in the cluster, + such as PG_DATA, PG_WAL and PG_TABLESPACE + type: string + required: + - name + - type + type: object + type: array + type: object + startedAt: + description: When the backup was started + format: date-time + type: string + stoppedAt: + description: When the backup was terminated + format: date-time + type: string + tablespaceMapFile: + description: Tablespace map file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.2 + name: clusterimagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ClusterImageCatalog + listKind: ClusterImageCatalogList + plural: clusterimagecatalogs + singular: clusterimagecatalog + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ClusterImageCatalog is the Schema for the clusterimagecatalogs + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ClusterImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.2 + name: clusters.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Cluster + listKind: ClusterList + plural: clusters + singular: cluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Number of instances + jsonPath: .status.instances + name: Instances + type: integer + - description: Number of ready instances + jsonPath: .status.readyInstances + name: Ready + type: integer + - description: Cluster current status + jsonPath: .status.phase + name: Status + type: string + - description: Primary pod + jsonPath: .status.currentPrimary + name: Primary + type: string + name: v1 + schema: + openAPIV3Schema: + description: Cluster is the Schema for the PostgreSQL API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the cluster. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + affinity: + description: Affinity/Anti-affinity rules for Pods + properties: + additionalPodAffinity: + description: AdditionalPodAffinity allows to specify pod affinity + terms to be passed to all the cluster's pods. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + additionalPodAntiAffinity: + description: |- + AdditionalPodAntiAffinity allows to specify pod anti-affinity terms to be added to the ones generated + by the operator if EnablePodAntiAffinity is set to true (default) or to be used exclusively if set to false. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + enablePodAntiAffinity: + description: |- + Activates anti-affinity for the pods. The operator will define pods + anti-affinity unless this field is explicitly set to false + type: boolean + nodeAffinity: + description: |- + NodeAffinity describes node affinity scheduling rules for the pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is map of key-value pairs used to define the nodes on which + the pods can run. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + podAntiAffinityType: + description: |- + PodAntiAffinityType allows the user to decide whether pod anti-affinity between cluster instance has to be + considered a strong requirement during scheduling or not. Allowed values are: "preferred" (default if empty) or + "required". Setting it to "required", could lead to instances remaining pending until new kubernetes nodes are + added if all the existing nodes don't match the required pod anti-affinity rule. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + type: string + tolerations: + description: |- + Tolerations is a list of Tolerations that should be set for all the pods, in order to allow them to run + on tainted nodes. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologyKey: + description: |- + TopologyKey to use for anti-affinity configuration. See k8s documentation + for more info on that + type: string + type: object + backup: + description: The configuration to be used for backups + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2` or `snappy`. + enum: + - gzip + - bzip2 + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage + JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the + region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2` or `snappy`. + enum: + - gzip + - bzip2 + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + retentionPolicy: + description: |- + RetentionPolicy is the retention policy to be used for backups + and WALs (i.e. '60d'). The retention policy is expressed in the form + of `XXu` where `XX` is a positive integer and `u` is in `[dwm]` - + days, weeks, months. + It's currently only applicable when using the BarmanObjectStore method. + pattern: ^[1-9][0-9]*[dwm]$ + type: string + target: + default: prefer-standby + description: |- + The policy to decide which instance should perform backups. Available + options are empty string, which will default to `prefer-standby` policy, + `primary` to have backups run always on primary instances, `prefer-standby` + to have backups run preferably on the most updated standby, if available. + enum: + - primary + - prefer-standby + type: string + volumeSnapshot: + description: VolumeSnapshot provides the configuration for the + execution of volume snapshot backups. + properties: + annotations: + additionalProperties: + type: string + description: Annotations key-value pairs that will be added + to .metadata.annotations snapshot resources. + type: object + className: + description: |- + ClassName specifies the Snapshot Class to be used for PG_DATA PersistentVolumeClaim. + It is the default class for the other types if no specific class is present + type: string + labels: + additionalProperties: + type: string + description: Labels are key-value pairs that will be added + to .metadata.labels snapshot resources. + type: object + online: + default: true + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + type: boolean + onlineConfiguration: + default: + immediateCheckpoint: false + waitForArchive: true + description: Configuration parameters to control the online/hot + backup with volume snapshots + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + snapshotOwnerReference: + default: none + description: SnapshotOwnerReference indicates the type of + owner reference the snapshot should have + enum: + - none + - cluster + - backup + type: string + tablespaceClassName: + additionalProperties: + type: string + description: |- + TablespaceClassName specifies the Snapshot Class to be used for the tablespaces. + defaults to the PGDATA Snapshot Class, if set + type: object + walClassName: + description: WalClassName specifies the Snapshot Class to + be used for the PG_WAL PersistentVolumeClaim. + type: string + type: object + type: object + bootstrap: + description: Instructions to bootstrap this cluster + properties: + initdb: + description: Bootstrap the cluster via initdb + properties: + builtinLocale: + description: |- + Specifies the locale name when the builtin provider is used. + This option requires `localeProvider` to be set to `builtin`. + Available from PostgreSQL 17. + type: string + dataChecksums: + description: |- + Whether the `-k` option should be passed to initdb, + enabling checksums on data pages (default: `false`) + type: boolean + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + encoding: + description: The value to be passed as option `--encoding` + for initdb (default:`UTF8`) + type: string + icuLocale: + description: |- + Specifies the ICU locale when the ICU provider is used. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 15. + type: string + icuRules: + description: |- + Specifies additional collation rules to customize the behavior of the default collation. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 16. + type: string + import: + description: |- + Bootstraps the new cluster by importing data from an existing PostgreSQL + instance using logical backup (`pg_dump` and `pg_restore`) + properties: + databases: + description: The databases to import + items: + type: string + type: array + pgDumpExtraOptions: + description: |- + List of custom options to pass to the `pg_dump` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + pgRestoreExtraOptions: + description: |- + List of custom options to pass to the `pg_restore` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + postImportApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after is imported - to be used with extreme care + (by default empty). Only available in microservice type. + items: + type: string + type: array + roles: + description: The roles to import + items: + type: string + type: array + schemaOnly: + description: |- + When set to true, only the `pre-data` and `post-data` sections of + `pg_restore` are invoked, avoiding data import. Default: `false`. + type: boolean + source: + description: The source of the import + properties: + externalCluster: + description: The name of the externalCluster used + for import + type: string + required: + - externalCluster + type: object + type: + description: The import type. Can be `microservice` or + `monolith`. + enum: + - microservice + - monolith + type: string + required: + - databases + - source + - type + type: object + locale: + description: Sets the default collation order and character + classification in the new database. + type: string + localeCType: + description: The value to be passed as option `--lc-ctype` + for initdb (default:`C`) + type: string + localeCollate: + description: The value to be passed as option `--lc-collate` + for initdb (default:`C`) + type: string + localeProvider: + description: |- + This option sets the locale provider for databases created in the new cluster. + Available from PostgreSQL 16. + type: string + options: + description: |- + The list of options that must be passed to initdb when creating the cluster. + Deprecated: This could lead to inconsistent configurations, + please use the explicit provided parameters instead. + If defined, explicit values will be ignored. + items: + type: string + type: array + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + postInitApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitApplicationSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the application database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitSQL: + description: |- + List of SQL queries to be executed as a superuser in the `postgres` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `postgres` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitTemplateSQL: + description: |- + List of SQL queries to be executed as a superuser in the `template1` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitTemplateSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `template1` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + walSegmentSize: + description: |- + The value in megabytes (1 to 1024) to be passed to the `--wal-segsize` + option for initdb (default: empty, resulting in PostgreSQL default: 16MB) + maximum: 1024 + minimum: 1 + type: integer + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider + is set to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is + set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set + to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + pg_basebackup: + description: |- + Bootstrap the cluster taking a physical backup of another compatible + PostgreSQL instance + properties: + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: The name of the server of which we need to take + a physical backup + minLength: 1 + type: string + required: + - source + type: object + recovery: + description: Bootstrap the cluster from a backup + properties: + backup: + description: |- + The backup object containing the physical base backup from which to + initiate the recovery procedure. + Mutually exclusive with `source` and `volumeSnapshots`. + properties: + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + name: + description: Name of the referent. + type: string + required: + - name + type: object + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + recoveryTarget: + description: |- + By default, the recovery process applies all the available + WAL files in the archive (full recovery). However, you can also + end the recovery as soon as a consistent state is reached or + recover to a point-in-time (PITR) by specifying a `RecoveryTarget` object, + as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...). + More info: https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET + properties: + backupID: + description: |- + The ID of the backup from which to start the recovery process. + If empty (default) the operator will automatically detect the backup + based on targetTime or targetLSN if specified. Otherwise use the + latest available backup in chronological order. + type: string + exclusive: + description: |- + Set the target to be exclusive. If omitted, defaults to false, so that + in Postgres, `recovery_target_inclusive` will be true + type: boolean + targetImmediate: + description: End recovery as soon as a consistent state + is reached + type: boolean + targetLSN: + description: The target LSN (Log Sequence Number) + type: string + targetName: + description: |- + The target name (to be previously created + with `pg_create_restore_point`) + type: string + targetTLI: + description: The target timeline ("latest" or a positive + integer) + type: string + targetTime: + description: The target time as a timestamp in the RFC3339 + standard + type: string + targetXID: + description: The target transaction ID + type: string + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: |- + The external cluster whose backup we will restore. This is also + used as the name of the folder under which the backup is stored, + so it must be set to the name of the source cluster + Mutually exclusive with `backup`. + type: string + volumeSnapshots: + description: |- + The static PVC data source(s) from which to initiate the + recovery procedure. Currently supporting `VolumeSnapshot` + and `PersistentVolumeClaim` resources that map an existing + PVC group, compatible with CloudNativePG, and taken with + a cold backup copy on a fenced Postgres instance (limitation + which will be removed in the future when online backup + will be implemented). + Mutually exclusive with `backup`. + properties: + storage: + description: Configuration of the storage of the instances + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + tablespaceStorage: + additionalProperties: + description: |- + TypedLocalObjectReference contains enough information to let you locate the + typed referenced object inside the same namespace. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + description: Configuration of the storage for PostgreSQL + tablespaces + type: object + walStorage: + description: Configuration of the storage for PostgreSQL + WAL (Write-Ahead Log) + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + required: + - storage + type: object + type: object + type: object + certificates: + description: The configuration for the CA and related certificates + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + description: + description: Description of this PostgreSQL cluster + type: string + enablePDB: + default: true + description: |- + Manage the `PodDisruptionBudget` resources within the cluster. When + configured as `true` (default setting), the pod disruption budgets + will safeguard the primary node from being terminated. Conversely, + setting it to `false` will result in the absence of any + `PodDisruptionBudget` resource, permitting the shutdown of all nodes + hosting the PostgreSQL cluster. This latter configuration is + advisable for any PostgreSQL cluster employed for + development/staging purposes. + type: boolean + enableSuperuserAccess: + default: false + description: |- + When this option is enabled, the operator will use the `SuperuserSecret` + to update the `postgres` user password (if the secret is + not present, the operator will automatically create one). When this + option is disabled, the operator will ignore the `SuperuserSecret` content, delete + it when automatically created, and then blank the password of the `postgres` + user by setting it to `NULL`. Disabled by default. + type: boolean + env: + description: |- + Env follows the Env format to pass environment variables + to the pods created in the cluster + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + EnvFrom follows the EnvFrom format to pass environment variables + sources to the pods to be used by Env + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each key in + the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + ephemeralVolumeSource: + description: EphemeralVolumeSource allows the user to configure the + source of ephemeral volumes. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to + consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the + PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + ephemeralVolumesSizeLimit: + description: |- + EphemeralVolumesSizeLimit allows the user to set the limits for the ephemeral + volumes + properties: + shm: + anyOf: + - type: integer + - type: string + description: Shm is the size limit of the shared memory volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + temporaryData: + anyOf: + - type: integer + - type: string + description: TemporaryData is the size limit of the temporary + data volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + externalClusters: + description: The list of external clusters which are used in the configuration + items: + description: |- + ExternalCluster represents the connection parameters to an + external cluster which is used in the other sections of the configuration + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2` or `snappy`. + enum: + - gzip + - bzip2 + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud + Storage JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing + the region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2` or `snappy`. + enum: + - gzip + - bzip2 + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + connectionParameters: + additionalProperties: + type: string + description: The list of connection parameters, such as dbname, + host, username, etc + type: object + name: + description: The server name, required + type: string + password: + description: |- + The reference to the password to be used to connect to the server. + If a password is provided, CloudNativePG creates a PostgreSQL + passfile at `/controller/external/NAME/pass` (where "NAME" is the + cluster's name). This passfile is automatically referenced in the + connection string when establishing a connection to the remote + PostgreSQL server from the current PostgreSQL `Cluster`. This ensures + secure and efficient password management for external clusters. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + plugin: + description: |- + The configuration of the plugin that is taking care + of WAL archiving and backups for this external cluster + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + isWALArchiver: + default: false + description: |- + Only one plugin can be declared as WALArchiver. + Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + sslCert: + description: |- + The reference to an SSL certificate to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslKey: + description: |- + The reference to an SSL private key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslRootCert: + description: |- + The reference to an SSL CA public key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - name + type: object + type: array + failoverDelay: + default: 0 + description: |- + The amount of time (in seconds) to wait before triggering a failover + after the primary PostgreSQL instance in the cluster was detected + to be unhealthy + format: int32 + type: integer + imageCatalogRef: + description: Defines the major PostgreSQL version we want to use within + an ImageCatalog + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + major: + description: The major version of PostgreSQL we want to use from + the ImageCatalog + type: integer + x-kubernetes-validations: + - message: Major is immutable + rule: self == oldSelf + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - major + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: Only image catalogs are supported + rule: self.kind == 'ImageCatalog' || self.kind == 'ClusterImageCatalog' + - message: Only image catalogs are supported + rule: self.apiGroup == 'postgresql.cnpg.io' + imageName: + description: |- + Name of the container image, supporting both tags (`:`) + and digests for deterministic and repeatable deployments + (`:@sha256:`) + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of `Always`, `Never` or `IfNotPresent`. + If not defined, it defaults to `IfNotPresent`. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + imagePullSecrets: + description: The list of pull secrets to be used to pull the images + items: + description: |- + LocalObjectReference contains enough information to let you locate a + local object with a known type inside the same namespace + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + type: array + inheritedMetadata: + description: Metadata that will be inherited by all objects related + to the Cluster + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + instances: + default: 1 + description: Number of instances required in the cluster + minimum: 1 + type: integer + livenessProbeTimeout: + description: |- + LivenessProbeTimeout is the time (in seconds) that is allowed for a PostgreSQL instance + to successfully respond to the liveness probe (default 30). + The Liveness probe failure threshold is derived from this value using the formula: + ceiling(livenessProbe / 10). + format: int32 + type: integer + logLevel: + default: info + description: 'The instances'' log level, one of the following values: + error, warning, info (default), debug, trace' + enum: + - error + - warning + - info + - debug + - trace + type: string + managed: + description: The configuration that is used by the portions of PostgreSQL + that are managed by the instance manager + properties: + roles: + description: Database roles managed by the `Cluster` + items: + description: |- + RoleConfiguration is the representation, in Kubernetes, of a PostgreSQL role + with the additional field Ensure specifying whether to ensure the presence or + absence of the role in the database + + The defaults of the CREATE ROLE command are applied + Reference: https://www.postgresql.org/docs/current/sql-createrole.html + properties: + bypassrls: + description: |- + Whether a role bypasses every row-level security (RLS) policy. + Default is `false`. + type: boolean + comment: + description: Description of the role + type: string + connectionLimit: + default: -1 + description: |- + If the role can log in, this specifies how many concurrent + connections the role can make. `-1` (the default) means no limit. + format: int64 + type: integer + createdb: + description: |- + When set to `true`, the role being defined will be allowed to create + new databases. Specifying `false` (default) will deny a role the + ability to create databases. + type: boolean + createrole: + description: |- + Whether the role will be permitted to create, alter, drop, comment + on, change the security label for, and grant or revoke membership in + other roles. Default is `false`. + type: boolean + disablePassword: + description: DisablePassword indicates that a role's password + should be set to NULL in Postgres + type: boolean + ensure: + default: present + description: Ensure the role is `present` or `absent` - + defaults to "present" + enum: + - present + - absent + type: string + inRoles: + description: |- + List of one or more existing roles to which this role will be + immediately added as a new member. Default empty. + items: + type: string + type: array + inherit: + default: true + description: |- + Whether a role "inherits" the privileges of roles it is a member of. + Defaults is `true`. + type: boolean + login: + description: |- + Whether the role is allowed to log in. A role having the `login` + attribute can be thought of as a user. Roles without this attribute + are useful for managing database privileges, but are not users in + the usual sense of the word. Default is `false`. + type: boolean + name: + description: Name of the role + type: string + passwordSecret: + description: |- + Secret containing the password of the role (if present) + If null, the password will be ignored unless DisablePassword is set + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + replication: + description: |- + Whether a role is a replication role. A role must have this + attribute (or be a superuser) in order to be able to connect to the + server in replication mode (physical or logical replication) and in + order to be able to create or drop replication slots. A role having + the `replication` attribute is a very highly privileged role, and + should only be used on roles actually used for replication. Default + is `false`. + type: boolean + superuser: + description: |- + Whether the role is a `superuser` who can override all access + restrictions within the database - superuser status is dangerous and + should be used only when really needed. You must yourself be a + superuser to create a new superuser. Defaults is `false`. + type: boolean + validUntil: + description: |- + Date and time after which the role's password is no longer valid. + When omitted, the password will never expire (default). + format: date-time + type: string + required: + - name + type: object + type: array + services: + description: Services roles managed by the `Cluster` + properties: + additional: + description: Additional is a list of additional managed services + specified by the user. + items: + description: |- + ManagedService represents a specific service managed by the cluster. + It includes the type of service and its associated template specification. + properties: + selectorType: + description: |- + SelectorType specifies the type of selectors that the service will have. + Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services. + enum: + - rw + - r + - ro + type: string + serviceTemplate: + description: ServiceTemplate is the template specification + for the service. + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only + supported for certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information + on service's port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed + by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains + the configurations of session affinity. + properties: + clientIP: + description: clientIP contains the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic is + distributed to Service endpoints. Implementations can use this field as a + hint, but are not required to guarantee strict adherence. If the field is + not set, the implementation will apply its default routing strategy. If set + to "PreferClose", implementations should prioritize endpoints that are + topologically close (e.g., same zone). + This is a beta field and requires enabling ServiceTrafficDistribution feature. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + updateStrategy: + default: patch + description: UpdateStrategy describes how the service + differences should be reconciled + enum: + - patch + - replace + type: string + required: + - selectorType + - serviceTemplate + type: object + type: array + disabledDefaultServices: + description: |- + DisabledDefaultServices is a list of service types that are disabled by default. + Valid values are "r", and "ro", representing read, and read-only services. + items: + description: |- + ServiceSelectorType describes a valid value for generating the service selectors. + It indicates which type of service the selector applies to, such as read-write, read, or read-only + enum: + - rw + - r + - ro + type: string + type: array + type: object + type: object + maxSyncReplicas: + default: 0 + description: |- + The target value for the synchronous replication quorum, that can be + decreased if the number of ready standbys is lower than this. + Undefined or 0 disable synchronous replication. + minimum: 0 + type: integer + minSyncReplicas: + default: 0 + description: |- + Minimum number of instances required in synchronous replication with the + primary. Undefined or 0 allow writes to complete when no standby is + available. + minimum: 0 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this cluster + properties: + customQueriesConfigMap: + description: The list of config maps containing the custom queries + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + customQueriesSecret: + description: The list of secrets containing the custom queries + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + disableDefaultQueries: + default: false + description: |- + Whether the default queries should be injected. + Set it to `true` if you don't want to inject default queries into the cluster. + Default: false. + type: boolean + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + tls: + description: |- + Configure TLS communication for the metrics endpoint. + Changing tls.enabled option will force a rollout of all instances. + properties: + enabled: + default: false + description: |- + Enable TLS for the monitoring endpoint. + Changing this option will force a rollout of all instances. + type: boolean + type: object + type: object + nodeMaintenanceWindow: + description: Define a maintenance window for the Kubernetes nodes + properties: + inProgress: + default: false + description: Is there a node maintenance activity in progress? + type: boolean + reusePVC: + default: true + description: |- + Reuse the existing PVC (wait for the node to come + up again) or not (recreate it elsewhere - when `instances` >1) + type: boolean + type: object + plugins: + description: |- + The plugins configuration, containing + any plugin to be loaded with the corresponding configuration + items: + description: |- + PluginConfiguration specifies a plugin that need to be loaded for this + cluster to be reconciled + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + isWALArchiver: + default: false + description: |- + Only one plugin can be declared as WALArchiver. + Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + type: array + postgresGID: + default: 26 + description: The GID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresUID: + default: 26 + description: The UID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresql: + description: Configuration of the PostgreSQL server + properties: + enableAlterSystem: + description: |- + If this parameter is true, the user will be able to invoke `ALTER SYSTEM` + on this CloudNativePG Cluster. + This should only be used for debugging and troubleshooting. + Defaults to false. + type: boolean + ldap: + description: Options to specify LDAP configuration + properties: + bindAsAuth: + description: Bind as authentication configuration + properties: + prefix: + description: Prefix for the bind authentication option + type: string + suffix: + description: Suffix for the bind authentication option + type: string + type: object + bindSearchAuth: + description: Bind+Search authentication configuration + properties: + baseDN: + description: Root DN to begin the user search + type: string + bindDN: + description: DN of the user to bind to the directory + type: string + bindPassword: + description: Secret with the password for the user to + bind to the directory + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + searchAttribute: + description: Attribute to match against the username + type: string + searchFilter: + description: Search filter to use when doing the search+bind + authentication + type: string + type: object + port: + description: LDAP server port + type: integer + scheme: + description: LDAP schema to be used, possible options are + `ldap` and `ldaps` + enum: + - ldap + - ldaps + type: string + server: + description: LDAP hostname or IP address + type: string + tls: + description: Set to 'true' to enable LDAP over TLS. 'false' + is default + type: boolean + type: object + parameters: + additionalProperties: + type: string + description: PostgreSQL configuration options (postgresql.conf) + type: object + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + pg_ident: + description: |- + PostgreSQL User Name Maps rules (lines to be appended + to the pg_ident.conf file) + items: + type: string + type: array + promotionTimeout: + description: |- + Specifies the maximum number of seconds to wait when promoting an instance to primary. + Default value is 40000000, greater than one year in seconds, + big enough to simulate an infinite timeout + format: int32 + type: integer + shared_preload_libraries: + description: Lists of shared preload libraries to add to the default + ones + items: + type: string + type: array + syncReplicaElectionConstraint: + description: |- + Requirements to be met by sync replicas. This will affect how the "synchronous_standby_names" parameter will be + set up. + properties: + enabled: + description: This flag enables the constraints for sync replicas + type: boolean + nodeLabelsAntiAffinity: + description: A list of node labels values to extract and compare + to evaluate if the pods reside in the same topology or not + items: + type: string + type: array + required: + - enabled + type: object + synchronous: + description: Configuration of the PostgreSQL synchronous replication + feature + properties: + dataDurability: + default: required + description: |- + If set to "required", data durability is strictly enforced. Write operations + with synchronous commit settings (`on`, `remote_write`, or `remote_apply`) will + block if there are insufficient healthy replicas, ensuring data persistence. + If set to "preferred", data durability is maintained when healthy replicas + are available, but the required number of instances will adjust dynamically + if replicas become unavailable. This setting relaxes strict durability enforcement + to allow for operational continuity. This setting is only applicable if both + `standbyNamesPre` and `standbyNamesPost` are unset (empty). + enum: + - required + - preferred + type: string + maxStandbyNamesFromCluster: + description: |- + Specifies the maximum number of local cluster pods that can be + automatically included in the `synchronous_standby_names` option in + PostgreSQL. + type: integer + method: + description: |- + Method to select synchronous replication standbys from the listed + servers, accepting 'any' (quorum-based synchronous replication) or + 'first' (priority-based synchronous replication) as values. + enum: + - any + - first + type: string + number: + description: |- + Specifies the number of synchronous standby servers that + transactions must wait for responses from. + type: integer + x-kubernetes-validations: + - message: The number of synchronous replicas should be greater + than zero + rule: self > 0 + standbyNamesPost: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` after local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + standbyNamesPre: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` before local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + required: + - method + - number + type: object + x-kubernetes-validations: + - message: dataDurability set to 'preferred' requires empty 'standbyNamesPre' + and empty 'standbyNamesPost' + rule: self.dataDurability!='preferred' || ((!has(self.standbyNamesPre) + || self.standbyNamesPre.size()==0) && (!has(self.standbyNamesPost) + || self.standbyNamesPost.size()==0)) + type: object + primaryUpdateMethod: + default: restart + description: |- + Method to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be with a switchover (`switchover`) or in-place (`restart` - default) + enum: + - switchover + - restart + type: string + primaryUpdateStrategy: + default: unsupervised + description: |- + Deployment strategy to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be automated (`unsupervised` - default) or manual (`supervised`) + enum: + - unsupervised + - supervised + type: string + priorityClassName: + description: |- + Name of the priority class which will be used in every generated Pod, if the PriorityClass + specified does not exist, the pod will not be able to schedule. Please refer to + https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass + for more information + type: string + probes: + description: |- + The configuration of the probes to be injected + in the PostgreSQL Pods. + properties: + liveness: + description: The liveness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + readiness: + description: The readiness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + startup: + description: The startup probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + type: object + projectedVolumeTemplate: + description: |- + Template to be used to define projected volumes, projected volumes will be mounted + under `/projected` base folder + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root to write + the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap data + to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the + pod: only annotations, labels, name, namespace + and uid are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must not + be absolute or contain the ''..'' path. Must + be utf-8 encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the secret data to + project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether the Secret + or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information about the + serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + replica: + description: Replica cluster configuration + properties: + enabled: + description: |- + If replica mode is enabled, this cluster will be a replica of an + existing cluster. Replica cluster can be created from a recovery + object store or via streaming through pg_basebackup. + Refer to the Replica clusters page of the documentation for more information. + type: boolean + minApplyDelay: + description: |- + When replica mode is enabled, this parameter allows you to replay + transactions only when the system time is at least the configured + time past the commit time. This provides an opportunity to correct + data loss errors. Note that when this parameter is set, a promotion + token cannot be used. + type: string + primary: + description: |- + Primary defines which Cluster is defined to be the primary in the distributed PostgreSQL cluster, based on the + topology specified in externalClusters + type: string + promotionToken: + description: |- + A demotion token generated by an external cluster used to + check if the promotion requirements are met. + type: string + self: + description: |- + Self defines the name of this cluster. It is used to determine if this is a primary + or a replica cluster, comparing it with `primary` + type: string + source: + description: The name of the external cluster which is the replication + origin + minLength: 1 + type: string + required: + - source + type: object + replicationSlots: + default: + highAvailability: + enabled: true + description: Replication slots management configuration + properties: + highAvailability: + default: + enabled: true + description: Replication slots for high availability configuration + properties: + enabled: + default: true + description: |- + If enabled (default), the operator will automatically manage replication slots + on the primary instance and use them in streaming replication + connections with all the standby instances that are part of the HA + cluster. If disabled, the operator will not take advantage + of replication slots in streaming connections with the replicas. + This feature also controls replication slots in replica cluster, + from the designated primary to its cascading replicas. + type: boolean + slotPrefix: + default: _cnpg_ + description: |- + Prefix for replication slots managed by the operator for HA. + It may only contain lower case letters, numbers, and the underscore character. + This can only be set at creation time. By default set to `_cnpg_`. + pattern: ^[0-9a-z_]*$ + type: string + type: object + synchronizeReplicas: + description: Configures the synchronization of the user defined + physical replication slots + properties: + enabled: + default: true + description: When set to true, every replication slot that + is on the primary is synchronized on each standby + type: boolean + excludePatterns: + description: List of regular expression patterns to match + the names of replication slots to be excluded (by default + empty) + items: + type: string + type: array + required: + - enabled + type: object + updateInterval: + default: 30 + description: |- + Standby will update the status of the local replication slots + every `updateInterval` seconds (default 30). + minimum: 1 + type: integer + type: object + resources: + description: |- + Resources requirements of every generated Pod. Please refer to + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + for more information. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + schedulerName: + description: |- + If specified, the pod will be dispatched by specified Kubernetes + scheduler. If not specified, the pod will be dispatched by the default + scheduler. More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/ + type: string + seccompProfile: + description: |- + The SeccompProfile applied to every Pod and Container. + Defaults to: `RuntimeDefault` + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + serviceAccountTemplate: + description: Configure the generation of the service account + properties: + metadata: + description: |- + Metadata are the metadata to be used for the generated + service account + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + required: + - metadata + type: object + smartShutdownTimeout: + default: 180 + description: |- + The time in seconds that controls the window of time reserved for the smart shutdown of Postgres to complete. + Make sure you reserve enough time for the operator to request a fast shutdown of Postgres + (that is: `stopDelay` - `smartShutdownTimeout`). + format: int32 + type: integer + startDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + successfully start up (default 3600). + The startup probe failure threshold is derived from this value using the formula: + ceiling(startDelay / 10). + format: int32 + type: integer + stopDelay: + default: 1800 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + gracefully shutdown (default 1800) + format: int32 + type: integer + storage: + description: Configuration of the storage of the instances + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + superuserSecret: + description: |- + The secret containing the superuser password. If not defined a new + secret will be created with a randomly generated password + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + switchoverDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a primary PostgreSQL instance + to gracefully shutdown during a switchover. + Default value is 3600 seconds (1 hour). + format: int32 + type: integer + tablespaces: + description: The tablespaces configuration + items: + description: |- + TablespaceConfiguration is the configuration of a tablespace, and includes + the storage specification for the tablespace + properties: + name: + description: The name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + properties: + name: + type: string + type: object + storage: + description: The storage configuration for the tablespace + properties: + pvcTemplate: + description: Template to be used to generate the Persistent + Volume Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to + the PersistentVolume backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + temporary: + default: false + description: |- + When set to true, the tablespace will be added as a `temp_tablespaces` + entry in PostgreSQL, and will be available to automatically house temp + database objects, or other temporary files. Please refer to PostgreSQL + documentation for more information on the `temp_tablespaces` GUC. + type: boolean + required: + - name + - storage + type: object + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints specifies how to spread matching pods among the given topology. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ + items: + description: TopologySpreadConstraint specifies how to spread matching + pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + walStorage: + description: Configuration of the storage for PostgreSQL WAL (Write-Ahead + Log) + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + required: + - instances + type: object + x-kubernetes-validations: + - message: imageName and imageCatalogRef are mutually exclusive + rule: '!(has(self.imageCatalogRef) && has(self.imageName))' + status: + description: |- + Most recently observed status of the cluster. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + availableArchitectures: + description: AvailableArchitectures reports the available architectures + of a cluster + items: + description: AvailableArchitecture represents the state of a cluster's + architecture + properties: + goArch: + description: GoArch is the name of the executable architecture + type: string + hash: + description: Hash is the hash of the executable + type: string + required: + - goArch + - hash + type: object + type: array + azurePVCUpdateEnabled: + description: AzurePVCUpdateEnabled shows if the PVC online upgrade + is enabled for this cluster + type: boolean + certificates: + description: The configuration for the CA and related certificates, + initialized with defaults. + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + expirations: + additionalProperties: + type: string + description: Expiration dates for all certificates. + type: object + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + cloudNativePGCommitHash: + description: The commit hash number of which this operator running + type: string + cloudNativePGOperatorHash: + description: The hash of the binary of the operator + type: string + conditions: + description: Conditions for cluster object + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + configMapResourceVersion: + description: |- + The list of resource versions of the configmaps, + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + configmap data + properties: + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the config maps used to pass metrics. + Map keys are the config map names, map values are the versions + type: object + type: object + currentPrimary: + description: Current primary instance + type: string + currentPrimaryFailingSinceTimestamp: + description: |- + The timestamp when the primary was detected to be unhealthy + This field is reported when `.spec.failoverDelay` is populated or during online upgrades + type: string + currentPrimaryTimestamp: + description: The timestamp when the last actual promotion to primary + has occurred + type: string + danglingPVC: + description: |- + List of all the PVCs created by this cluster and still available + which are not attached to a Pod + items: + type: string + type: array + demotionToken: + description: |- + DemotionToken is a JSON token containing the information + from pg_controldata such as Database system identifier, Latest checkpoint's + TimeLineID, Latest checkpoint's REDO location, Latest checkpoint's REDO + WAL file, and Time of latest checkpoint + type: string + firstRecoverabilityPoint: + description: |- + The first recoverability point, stored as a date in RFC3339 format. + This field is calculated from the content of FirstRecoverabilityPointByMethod + type: string + firstRecoverabilityPointByMethod: + additionalProperties: + format: date-time + type: string + description: The first recoverability point, stored as a date in RFC3339 + format, per backup method type + type: object + healthyPVC: + description: List of all the PVCs not dangling nor initializing + items: + type: string + type: array + image: + description: Image contains the image name used by the pods + type: string + initializingPVC: + description: List of all the PVCs that are being initialized by this + cluster + items: + type: string + type: array + instanceNames: + description: List of instance names in the cluster + items: + type: string + type: array + instances: + description: The total number of PVC Groups detected in the cluster. + It may differ from the number of existing instance pods. + type: integer + instancesReportedState: + additionalProperties: + description: InstanceReportedState describes the last reported state + of an instance during a reconciliation loop + properties: + isPrimary: + description: indicates if an instance is the primary one + type: boolean + timeLineID: + description: indicates on which TimelineId the instance is + type: integer + required: + - isPrimary + type: object + description: The reported state of the instances during the last reconciliation + loop + type: object + instancesStatus: + additionalProperties: + items: + type: string + type: array + description: InstancesStatus indicates in which status the instances + are + type: object + jobCount: + description: How many Jobs have been created by this cluster + format: int32 + type: integer + lastFailedBackup: + description: Stored as a date in RFC3339 format + type: string + lastPromotionToken: + description: |- + LastPromotionToken is the last verified promotion token that + was used to promote a replica cluster + type: string + lastSuccessfulBackup: + description: |- + Last successful backup, stored as a date in RFC3339 format + This field is calculated from the content of LastSuccessfulBackupByMethod + type: string + lastSuccessfulBackupByMethod: + additionalProperties: + format: date-time + type: string + description: Last successful backup, stored as a date in RFC3339 format, + per backup method type + type: object + latestGeneratedNode: + description: ID of the latest generated node (used to avoid node name + clashing) + type: integer + managedRolesStatus: + description: ManagedRolesStatus reports the state of the managed roles + in the cluster + properties: + byStatus: + additionalProperties: + items: + type: string + type: array + description: ByStatus gives the list of roles in each state + type: object + cannotReconcile: + additionalProperties: + items: + type: string + type: array + description: |- + CannotReconcile lists roles that cannot be reconciled in PostgreSQL, + with an explanation of the cause + type: object + passwordStatus: + additionalProperties: + description: PasswordState represents the state of the password + of a managed RoleConfiguration + properties: + resourceVersion: + description: the resource version of the password secret + type: string + transactionID: + description: the last transaction ID to affect the role + definition in PostgreSQL + format: int64 + type: integer + type: object + description: PasswordStatus gives the last transaction id and + password secret version for each managed role + type: object + type: object + onlineUpdateEnabled: + description: OnlineUpdateEnabled shows if the online upgrade is enabled + inside the cluster + type: boolean + phase: + description: Current phase of the cluster + type: string + phaseReason: + description: Reason for the current phase + type: string + pluginStatus: + description: PluginStatus is the status of the loaded plugins + items: + description: PluginStatus is the status of a loaded plugin + properties: + backupCapabilities: + description: |- + BackupCapabilities are the list of capabilities of the + plugin regarding the Backup management + items: + type: string + type: array + capabilities: + description: |- + Capabilities are the list of capabilities of the + plugin + items: + type: string + type: array + name: + description: Name is the name of the plugin + type: string + operatorCapabilities: + description: |- + OperatorCapabilities are the list of capabilities of the + plugin regarding the reconciler + items: + type: string + type: array + restoreJobHookCapabilities: + description: |- + RestoreJobHookCapabilities are the list of capabilities of the + plugin regarding the RestoreJobHook management + items: + type: string + type: array + status: + description: Status contain the status reported by the plugin + through the SetStatusInCluster interface + type: string + version: + description: |- + Version is the version of the plugin loaded by the + latest reconciliation loop + type: string + walCapabilities: + description: |- + WALCapabilities are the list of capabilities of the + plugin regarding the WAL management + items: + type: string + type: array + required: + - name + - version + type: object + type: array + poolerIntegrations: + description: The integration needed by poolers referencing the cluster + properties: + pgBouncerIntegration: + description: PgBouncerIntegrationStatus encapsulates the needed + integration for the pgbouncer poolers referencing the cluster + properties: + secrets: + items: + type: string + type: array + type: object + type: object + pvcCount: + description: How many PVCs have been created by this cluster + format: int32 + type: integer + readService: + description: Current list of read pods + type: string + readyInstances: + description: The total number of ready instances in the cluster. It + is equal to the number of ready instance pods. + type: integer + resizingPVC: + description: List of all the PVCs that have ResizingPVC condition. + items: + type: string + type: array + secretsResourceVersion: + description: |- + The list of resource versions of the secrets + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + secret data + properties: + applicationSecretVersion: + description: The resource version of the "app" user secret + type: string + barmanEndpointCA: + description: The resource version of the Barman Endpoint CA if + provided + type: string + caSecretVersion: + description: Unused. Retained for compatibility with old versions. + type: string + clientCaSecretVersion: + description: The resource version of the PostgreSQL client-side + CA secret version + type: string + externalClusterSecretVersion: + additionalProperties: + type: string + description: The resource versions of the external cluster secrets + type: object + managedRoleSecretVersion: + additionalProperties: + type: string + description: The resource versions of the managed roles secrets + type: object + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the secrets used to pass metrics. + Map keys are the secret names, map values are the versions + type: object + replicationSecretVersion: + description: The resource version of the "streaming_replica" user + secret + type: string + serverCaSecretVersion: + description: The resource version of the PostgreSQL server-side + CA secret version + type: string + serverSecretVersion: + description: The resource version of the PostgreSQL server-side + secret version + type: string + superuserSecretVersion: + description: The resource version of the "postgres" user secret + type: string + type: object + switchReplicaClusterStatus: + description: SwitchReplicaClusterStatus is the status of the switch + to replica cluster + properties: + inProgress: + description: InProgress indicates if there is an ongoing procedure + of switching a cluster to a replica cluster. + type: boolean + type: object + tablespacesStatus: + description: TablespacesStatus reports the state of the declarative + tablespaces in the cluster + items: + description: TablespaceState represents the state of a tablespace + in a cluster + properties: + error: + description: Error is the reconciliation error, if any + type: string + name: + description: Name is the name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + type: string + state: + description: State is the latest reconciliation state + type: string + required: + - name + - state + type: object + type: array + targetPrimary: + description: |- + Target primary instance, this is different from the previous one + during a switchover or a failover + type: string + targetPrimaryTimestamp: + description: The timestamp when the last request for a new primary + has occurred + type: string + timelineID: + description: The timeline of the Postgres cluster + type: integer + topology: + description: Instances topology. + properties: + instances: + additionalProperties: + additionalProperties: + type: string + description: PodTopologyLabels represent the topology of a Pod. + map[labelName]labelValue + type: object + description: Instances contains the pod topology of the instances + type: object + nodesUsed: + description: |- + NodesUsed represents the count of distinct nodes accommodating the instances. + A value of '1' suggests that all instances are hosted on a single node, + implying the absence of High Availability (HA). Ideally, this value should + be the same as the number of instances in the Postgres HA cluster, implying + shared nothing architecture on the compute side. + format: int32 + type: integer + successfullyExtracted: + description: |- + SuccessfullyExtracted indicates if the topology data was extract. It is useful to enact fallback behaviors + in synchronous replica election in case of failures + type: boolean + type: object + unusablePVC: + description: List of all the PVCs that are unusable because another + PVC is missing + items: + type: string + type: array + writeService: + description: Current write pod + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.2 + name: databases.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Database + listKind: DatabaseList + plural: databases + singular: database + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Database is the Schema for the databases API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired Database. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allowConnections: + description: |- + Maps to the `ALLOW_CONNECTIONS` parameter of `CREATE DATABASE` and + `ALTER DATABASE`. If false then no one can connect to this database. + type: boolean + builtinLocale: + description: |- + Maps to the `BUILTIN_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the locale name when the + builtin provider is used. This option requires `localeProvider` to + be set to `builtin`. Available from PostgreSQL 17. + type: string + x-kubernetes-validations: + - message: builtinLocale is immutable + rule: self == oldSelf + cluster: + description: The name of the PostgreSQL cluster hosting the database. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + collationVersion: + description: |- + Maps to the `COLLATION_VERSION` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: collationVersion is immutable + rule: self == oldSelf + connectionLimit: + description: |- + Maps to the `CONNECTION LIMIT` clause of `CREATE DATABASE` and + `ALTER DATABASE`. How many concurrent connections can be made to + this database. -1 (the default) means no limit. + type: integer + databaseReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this database. + enum: + - delete + - retain + type: string + encoding: + description: |- + Maps to the `ENCODING` parameter of `CREATE DATABASE`. This setting + cannot be changed. Character set encoding to use in the database. + type: string + x-kubernetes-validations: + - message: encoding is immutable + rule: self == oldSelf + ensure: + default: present + description: Ensure the PostgreSQL database is `present` or `absent` + - defaults to "present". + enum: + - present + - absent + type: string + icuLocale: + description: |- + Maps to the `ICU_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the ICU locale when the ICU + provider is used. This option requires `localeProvider` to be set to + `icu`. Available from PostgreSQL 15. + type: string + x-kubernetes-validations: + - message: icuLocale is immutable + rule: self == oldSelf + icuRules: + description: |- + Maps to the `ICU_RULES` parameter of `CREATE DATABASE`. This setting + cannot be changed. Specifies additional collation rules to customize + the behavior of the default collation. This option requires + `localeProvider` to be set to `icu`. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: icuRules is immutable + rule: self == oldSelf + isTemplate: + description: |- + Maps to the `IS_TEMPLATE` parameter of `CREATE DATABASE` and `ALTER + DATABASE`. If true, this database is considered a template and can + be cloned by any user with `CREATEDB` privileges. + type: boolean + locale: + description: |- + Maps to the `LOCALE` parameter of `CREATE DATABASE`. This setting + cannot be changed. Sets the default collation order and character + classification in the new database. + type: string + x-kubernetes-validations: + - message: locale is immutable + rule: self == oldSelf + localeCType: + description: |- + Maps to the `LC_CTYPE` parameter of `CREATE DATABASE`. This setting + cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCType is immutable + rule: self == oldSelf + localeCollate: + description: |- + Maps to the `LC_COLLATE` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCollate is immutable + rule: self == oldSelf + localeProvider: + description: |- + Maps to the `LOCALE_PROVIDER` parameter of `CREATE DATABASE`. This + setting cannot be changed. This option sets the locale provider for + databases created in the new cluster. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: localeProvider is immutable + rule: self == oldSelf + name: + description: The name of the database to create inside PostgreSQL. + This setting cannot be changed. + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + - message: the name postgres is reserved + rule: self != 'postgres' + - message: the name template0 is reserved + rule: self != 'template0' + - message: the name template1 is reserved + rule: self != 'template1' + owner: + description: |- + Maps to the `OWNER` parameter of `CREATE DATABASE`. + Maps to the `OWNER TO` command of `ALTER DATABASE`. + The role name of the user who owns the database inside PostgreSQL. + type: string + tablespace: + description: |- + Maps to the `TABLESPACE` parameter of `CREATE DATABASE`. + Maps to the `SET TABLESPACE` command of `ALTER DATABASE`. + The name of the tablespace (in PostgreSQL) that will be associated + with the new database. This tablespace will be the default + tablespace used for objects created in this database. + type: string + template: + description: |- + Maps to the `TEMPLATE` parameter of `CREATE DATABASE`. This setting + cannot be changed. The name of the template from which to create + this database. + type: string + x-kubernetes-validations: + - message: template is immutable + rule: self == oldSelf + required: + - cluster + - name + - owner + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider is set + to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + status: + description: |- + Most recently observed status of the Database. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + applied: + description: Applied is true if the database was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.2 + name: imagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ImageCatalog + listKind: ImageCatalogList + plural: imagecatalogs + singular: imagecatalog + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ImageCatalog is the Schema for the imagecatalogs API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.2 + name: poolers.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Pooler + listKind: PoolerList + plural: poolers + singular: pooler + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.type + name: Type + type: string + name: v1 + schema: + openAPIV3Schema: + description: Pooler is the Schema for the poolers API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the Pooler. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: |- + This is the cluster reference on which the Pooler will work. + Pooler name should never match with any cluster name within the same namespace. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + deploymentStrategy: + description: The deployment strategy to use for pgbouncer to replace + existing pods with new ones + properties: + rollingUpdate: + description: |- + Rolling update config params. Present only if DeploymentStrategyType = + RollingUpdate. + properties: + maxSurge: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be scheduled above the desired number of + pods. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + This can not be 0 if MaxUnavailable is 0. + Absolute number is calculated from percentage by rounding up. + Defaults to 25%. + Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when + the rolling update starts, such that the total number of old and new pods do not exceed + 130% of desired pods. Once old pods have been killed, + new ReplicaSet can be scaled up further, ensuring that total number of pods running + at any time during the update is at most 130% of desired pods. + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be unavailable during the update. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + Absolute number is calculated from percentage by rounding down. + This can not be 0 if MaxSurge is 0. + Defaults to 25%. + Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods + immediately when the rolling update starts. Once new pods are ready, old ReplicaSet + can be scaled down further, followed by scaling up the new ReplicaSet, ensuring + that the total number of pods available at all times during the update is at + least 70% of desired pods. + x-kubernetes-int-or-string: true + type: object + type: + description: Type of deployment. Can be "Recreate" or "RollingUpdate". + Default is RollingUpdate. + type: string + type: object + instances: + default: 1 + description: 'The number of replicas we want. Default: 1.' + format: int32 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this pooler. + properties: + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + type: object + pgbouncer: + description: The PgBouncer configuration + properties: + authQuery: + description: |- + The query that will be used to download the hash of the password + of a certain user. Default: "SELECT usename, passwd FROM public.user_search($1)". + In case it is specified, also an AuthQuerySecret has to be specified and + no automatic CNPG Cluster integration will be triggered. + type: string + authQuerySecret: + description: |- + The credentials of the user that need to be used for the authentication + query. In case it is specified, also an AuthQuery + (e.g. "SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1") + has to be specified and no automatic CNPG Cluster integration will be triggered. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + parameters: + additionalProperties: + type: string + description: |- + Additional parameters to be passed to PgBouncer - please check + the CNPG documentation for a list of options you can configure + type: object + paused: + default: false + description: |- + When set to `true`, PgBouncer will disconnect from the PostgreSQL + server, first waiting for all queries to complete, and pause all new + client connections until this value is set to `false` (default). Internally, + the operator calls PgBouncer's `PAUSE` and `RESUME` commands. + type: boolean + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + poolMode: + default: session + description: 'The pool mode. Default: `session`.' + enum: + - session + - transaction + type: string + type: object + serviceTemplate: + description: Template for the Service to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information on service's + port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the configurations + of session affinity. + properties: + clientIP: + description: clientIP contains the configurations of Client + IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic is + distributed to Service endpoints. Implementations can use this field as a + hint, but are not required to guarantee strict adherence. If the field is + not set, the implementation will apply its default routing strategy. If set + to "PreferClose", implementations should prioritize endpoints that are + topologically close (e.g., same zone). + This is a beta field and requires enabling ServiceTrafficDistribution feature. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + template: + description: The template of the Pod to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the pod. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + activeDeadlineSeconds: + description: |- + Optional duration in seconds the pod may be active on the node relative to + StartTime before the system will actively try to mark it failed and kill associated containers. + Value must be a positive integer. + format: int64 + type: integer + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + automountServiceAccountToken: + description: AutomountServiceAccountToken indicates whether + a service account token should be automatically mounted. + type: boolean + containers: + description: |- + List of containers belonging to the pod. + Containers cannot currently be added or removed. + There must be at least one container in a Pod. + Cannot be updated. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + dnsConfig: + description: |- + Specifies the DNS parameters of a pod. + Parameters specified here will be merged to the generated DNS + configuration based on DNSPolicy. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver + options of a pod. + properties: + name: + description: |- + Name is this DNS resolver option's name. + Required. + type: string + value: + description: Value is this DNS resolver option's + value. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + dnsPolicy: + description: |- + Set DNS policy for the pod. + Defaults to "ClusterFirst". + Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + To have DNS options set along with hostNetwork, you have to specify DNS policy + explicitly to 'ClusterFirstWithHostNet'. + type: string + enableServiceLinks: + description: |- + EnableServiceLinks indicates whether information about services should be injected into pod's + environment variables, matching the syntax of Docker links. + Optional: Defaults to true. + type: boolean + ephemeralContainers: + description: |- + List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + pod to perform user-initiated actions such as debugging. This list cannot be specified when + creating a pod, and it cannot be modified by updating the pod spec. In order to add an + ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + items: + description: |- + An EphemeralContainer is a temporary container that you may add to an existing Pod for + user-initiated activities such as debugging. Ephemeral containers have no resource or + scheduling guarantees, and they will not be restarted when they exit or when a Pod is + removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the + Pod to exceed its resource allocation. + + To add an ephemeral container, use the ephemeralcontainers subresource of an existing + Pod. Ephemeral containers may not be removed or restarted. + properties: + args: + description: |- + Arguments to the entrypoint. + The image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: Lifecycle is not allowed for ephemeral + containers. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the ephemeral container specified as a DNS_LABEL. + This name must be unique among all containers, init containers and ephemeral containers. + type: string + ports: + description: Ports are not allowed for ephemeral containers. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + already allocated to the pod. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for the container to manage the restart behavior of each + container within a pod. + This may only be set for init containers. You cannot set this field on + ephemeral containers. + type: string + securityContext: + description: |- + Optional: SecurityContext defines the security options the ephemeral container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + targetContainerName: + description: |- + If set, the name of the container from PodSpec that this ephemeral container targets. + The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + If not set then the ephemeral container uses the namespaces configured in the Pod spec. + + The container runtime must implement support for this feature. If the runtime does not + support namespace targeting then the result of setting this field is undefined. + type: string + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + hostAliases: + description: |- + HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + file if specified. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + description: IP address of the host file entry. + type: string + required: + - ip + type: object + type: array + x-kubernetes-list-map-keys: + - ip + x-kubernetes-list-type: map + hostIPC: + description: |- + Use the host's ipc namespace. + Optional: Default to false. + type: boolean + hostNetwork: + description: |- + Host networking requested for this pod. Use the host's network namespace. + If this option is set, the ports that will be used must be specified. + Default to false. + type: boolean + hostPID: + description: |- + Use the host's pid namespace. + Optional: Default to false. + type: boolean + hostUsers: + description: |- + Use the host's user namespace. + Optional: Default to true. + If set to true or not present, the pod will be run in the host user namespace, useful + for when the pod needs a feature only available to the host user namespace, such as + loading a kernel module with CAP_SYS_MODULE. + When set to false, a new userns is created for the pod. Setting false is useful for + mitigating container breakout vulnerabilities even allowing users to run their + containers as root without actually having root privileges on the host. + This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. + type: boolean + hostname: + description: |- + Specifies the hostname of the Pod + If not specified, the pod's hostname will be set to a system-defined value. + type: string + imagePullSecrets: + description: |- + ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + If specified, these secrets will be passed to individual puller implementations for them to use. + More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + initContainers: + description: |- + List of initialization containers belonging to the pod. + Init containers are executed in order prior to containers being started. If any + init container fails, the pod is considered to have failed and is handled according + to its restartPolicy. The name for an init container or normal container must be + unique among all containers. + Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. + The resourceRequirements of an init container are taken into account during scheduling + by finding the highest request/limit for each resource type, and then using the max of + of that value or the sum of the normal containers. Limits are applied to init containers + in a similar fashion. + Init containers cannot currently be added or removed. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + nodeName: + description: |- + NodeName indicates in which node this pod is scheduled. + If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. + Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. + This field should not be used to express a desire for the pod to be scheduled on a specific node. + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the pod to fit on a node. + Selector which must match a node's labels for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + x-kubernetes-map-type: atomic + os: + description: |- + Specifies the OS of the containers in the pod. + Some pod and container fields are restricted if this is set. + + If the OS field is set to linux, the following fields must be unset: + -securityContext.windowsOptions + + If the OS field is set to windows, following fields must be unset: + - spec.hostPID + - spec.hostIPC + - spec.hostUsers + - spec.securityContext.appArmorProfile + - spec.securityContext.seLinuxOptions + - spec.securityContext.seccompProfile + - spec.securityContext.fsGroup + - spec.securityContext.fsGroupChangePolicy + - spec.securityContext.sysctls + - spec.shareProcessNamespace + - spec.securityContext.runAsUser + - spec.securityContext.runAsGroup + - spec.securityContext.supplementalGroups + - spec.securityContext.supplementalGroupsPolicy + - spec.containers[*].securityContext.appArmorProfile + - spec.containers[*].securityContext.seLinuxOptions + - spec.containers[*].securityContext.seccompProfile + - spec.containers[*].securityContext.capabilities + - spec.containers[*].securityContext.readOnlyRootFilesystem + - spec.containers[*].securityContext.privileged + - spec.containers[*].securityContext.allowPrivilegeEscalation + - spec.containers[*].securityContext.procMount + - spec.containers[*].securityContext.runAsUser + - spec.containers[*].securityContext.runAsGroup + properties: + name: + description: |- + Name is the name of the operating system. The currently supported values are linux and windows. + Additional value may be defined in future and can be one of: + https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + Clients should expect to handle additional values and treat unrecognized values in this field as os: null + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + This field will be autopopulated at admission time by the RuntimeClass admission controller. If + the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + The RuntimeClass admission controller will reject Pod create requests which have the overhead already + set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md + type: object + preemptionPolicy: + description: |- + PreemptionPolicy is the Policy for preempting pods with lower priority. + One of Never, PreemptLowerPriority. + Defaults to PreemptLowerPriority if unset. + type: string + priority: + description: |- + The priority value. Various system components use this field to find the + priority of the pod. When Priority Admission Controller is enabled, it + prevents users from setting this field. The admission controller populates + this field from PriorityClassName. + The higher the value, the higher the priority. + format: int32 + type: integer + priorityClassName: + description: |- + If specified, indicates the pod's priority. "system-node-critical" and + "system-cluster-critical" are two special keywords which indicate the + highest priorities with the former being the highest priority. Any other + name must be defined by creating a PriorityClass object with that name. + If not specified, the pod priority will be default or zero if there is no + default. + type: string + readinessGates: + description: |- + If specified, all readiness gates will be evaluated for pod readiness. + A pod is ready when all its containers are ready AND + all conditions specified in the readiness gates have status equal to "True" + More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates + items: + description: PodReadinessGate contains the reference to + a pod condition + properties: + conditionType: + description: ConditionType refers to a condition in + the pod's condition list with matching type. + type: string + required: + - conditionType + type: object + type: array + x-kubernetes-list-type: atomic + resourceClaims: + description: |- + ResourceClaims defines which ResourceClaims must be allocated + and reserved before the Pod is allowed to start. The resources + will be made available to those containers which consume them + by name. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. + items: + description: |- + PodResourceClaim references exactly one ResourceClaim, either directly + or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim + for the pod. + + It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. + Containers that need access to the ResourceClaim reference it with this name. + properties: + name: + description: |- + Name uniquely identifies this resource claim inside the pod. + This must be a DNS_LABEL. + type: string + resourceClaimName: + description: |- + ResourceClaimName is the name of a ResourceClaim object in the same + namespace as this pod. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + resourceClaimTemplateName: + description: |- + ResourceClaimTemplateName is the name of a ResourceClaimTemplate + object in the same namespace as this pod. + + The template will be used to create a new ResourceClaim, which will + be bound to this pod. When this pod is deleted, the ResourceClaim + will also be deleted. The pod name and resource name, along with a + generated component, will be used to form a unique name for the + ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + + This field is immutable and no changes will be made to the + corresponding ResourceClaim by the control plane after creating the + ResourceClaim. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + resources: + description: |- + Resources is the total amount of CPU and Memory resources required by all + containers in the pod. It supports specifying Requests and Limits for + "cpu" and "memory" resource names only. ResourceClaims are not supported. + + This field enables fine-grained control over resource allocation for the + entire pod, allowing resource sharing among containers in a pod. + + This is an alpha field and requires enabling the PodLevelResources feature + gate. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for all containers within the pod. + One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. + Default to Always. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy + type: string + runtimeClassName: + description: |- + RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + empty definition that uses the default runtime handler. + More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + type: string + schedulerName: + description: |- + If specified, the pod will be dispatched by specified scheduler. + If not specified, the pod will be dispatched by default scheduler. + type: string + schedulingGates: + description: |- + SchedulingGates is an opaque list of values that if specified will block scheduling the pod. + If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + scheduler will not attempt to schedule the pod. + + SchedulingGates can only be set at pod creation time, and be removed only afterwards. + items: + description: PodSchedulingGate is associated to a Pod to + guard its scheduling. + properties: + name: + description: |- + Name of the scheduling gate. + Each scheduling gate must have a unique name field. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + securityContext: + description: |- + SecurityContext holds pod-level security attributes and common container settings. + Optional: Defaults to empty. See type description for default values of each field. + properties: + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxChangePolicy: + description: |- + seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. + It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. + Valid values are "MountOption" and "Recursive". + + "Recursive" means relabeling of all files on all Pod volumes by the container runtime. + This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. + + "MountOption" mounts all eligible Pod volumes with `-o context` mount option. + This requires all Pods that share the same volume to use the same SELinux label. + It is not possible to share the same volume among privileged and unprivileged Pods. + Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes + whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their + CSIDriver instance. Other volumes are always re-labelled recursively. + "MountOption" value is allowed only when SELinuxMount feature gate is enabled. + + If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. + If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes + and "Recursive" for all other volumes. + + This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers. + + All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. + Note that this field cannot be set when spec.os.name is windows. + type: string + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in + addition to the container's primary GID and fsGroup (if specified). If + the SupplementalGroupsPolicy feature is enabled, the + supplementalGroupsPolicy field determines whether these are in addition + to or instead of any group memberships defined in the container image. + If unspecified, no additional groups are added, though group memberships + defined in the container image may still be used, depending on the + supplementalGroupsPolicy field. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + description: |- + Defines how supplemental groups of the first container processes are calculated. + Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + and the container runtime must implement support for this feature. + Note that this field cannot be set when spec.os.name is windows. + type: string + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be + set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: |- + DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. + Deprecated: Use serviceAccountName instead. + type: string + serviceAccountName: + description: |- + ServiceAccountName is the name of the ServiceAccount to use to run this pod. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + type: string + setHostnameAsFQDN: + description: |- + If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). + In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). + In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. + If a pod does not have FQDN, this has no effect. + Default to false. + type: boolean + shareProcessNamespace: + description: |- + Share a single process namespace between all of the containers in a pod. + When this is set containers will be able to view and signal processes from other containers + in the same pod, and the first process in each container will not be assigned PID 1. + HostPID and ShareProcessNamespace cannot both be set. + Optional: Default to false. + type: boolean + subdomain: + description: |- + If specified, the fully qualified Pod hostname will be "...svc.". + If not specified, the pod will not have a domainname at all. + type: string + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + If this value is nil, the default grace period will be used instead. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: If specified, the pod's tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of pods ought to spread across topology + domains. Scheduler will schedule pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + description: |- + List of volumes that can be mounted by containers belonging to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk + in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in + the blob storage + type: string + fsType: + default: ext4 + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage account Managed: azure + managed data disk (only in managed availability + set). defaults to shared' + type: string + readOnly: + default: false + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default + is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers. + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about + the pod that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing the + pod field + properties: + fieldRef: + description: 'Required: Selects a field of + the pod: only annotations, labels, name, + namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path. + Must be utf-8 encoded. The first item of + the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource + that is attached to a kubelet's host machine and then + exposed to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target + worldwide names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. + properties: + driver: + description: driver is the name of the driver to + use for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the + specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + image: + description: |- + image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + The volume is resolved at pod startup depending on which PullPolicy value is provided: + + - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + + The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + The volume will be mounted read-only (ro) and non-executable files (noexec). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + default: default + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon + Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources + secrets, configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume + root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the + configMap data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about + the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name, namespace and uid + are supported.' + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file + to be created. Must not be absolute + or contain the ''..'' path. Must + be utf-8 encoded. The first item + of the relative path must not + start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the + secret data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + default: /etc/ceph/keyring + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. + properties: + fsType: + default: xfs + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the + ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the + ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL + communication with Gateway, default false + type: boolean + storageMode: + default: ThinProvisioned + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage + Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether the + Secret or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy + Based Management (SPBM) profile ID associated + with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + required: + - containers + type: object + type: object + type: + default: rw + description: 'Type of service to forward traffic to. Default: `rw`.' + enum: + - rw + - ro + - r + type: string + required: + - cluster + - pgbouncer + type: object + status: + description: |- + Most recently observed status of the Pooler. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + instances: + description: The number of pods trying to be scheduled + format: int32 + type: integer + secrets: + description: The resource version of the config object + properties: + clientCA: + description: The client CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + pgBouncerSecrets: + description: The version of the secrets used by PgBouncer + properties: + authQuery: + description: The auth query secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + serverCA: + description: The server CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + serverTLS: + description: The server TLS secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.2 + name: publications.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Publication + listKind: PublicationList + plural: publications + singular: publication + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Publication is the Schema for the publications API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PublicationSpec defines the desired state of Publication + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "publisher" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "publisher" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + name: + description: The name of the publication inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Publication parameters part of the `WITH` clause as expected by + PostgreSQL `CREATE PUBLICATION` command + type: object + publicationReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this publication + enum: + - delete + - retain + type: string + target: + description: Target of the publication as expected by PostgreSQL `CREATE + PUBLICATION` command + properties: + allTables: + description: |- + Marks the publication as one that replicates changes for all tables + in the database, including tables created in the future. + Corresponding to `FOR ALL TABLES` in PostgreSQL. + type: boolean + x-kubernetes-validations: + - message: allTables is immutable + rule: self == oldSelf + objects: + description: Just the following schema objects + items: + description: PublicationTargetObject is an object to publish + properties: + table: + description: |- + Specifies a list of tables to add to the publication. Corresponding + to `FOR TABLE` in PostgreSQL. + properties: + columns: + description: The columns to publish + items: + type: string + type: array + name: + description: The table name + type: string + only: + description: Whether to limit to the table only or include + all its descendants + type: boolean + schema: + description: The schema name + type: string + required: + - name + type: object + tablesInSchema: + description: |- + Marks the publication as one that replicates changes for all tables + in the specified list of schemas, including tables created in the + future. Corresponding to `FOR TABLES IN SCHEMA` in PostgreSQL. + type: string + type: object + x-kubernetes-validations: + - message: tablesInSchema and table are mutually exclusive + rule: (has(self.tablesInSchema) && !has(self.table)) || (!has(self.tablesInSchema) + && has(self.table)) + maxItems: 100000 + type: array + x-kubernetes-validations: + - message: specifying a column list when the publication also + publishes tablesInSchema is not supported + rule: '!(self.exists(o, has(o.table) && has(o.table.columns)) + && self.exists(o, has(o.tablesInSchema)))' + type: object + x-kubernetes-validations: + - message: allTables and objects are mutually exclusive + rule: (has(self.allTables) && !has(self.objects)) || (!has(self.allTables) + && has(self.objects)) + required: + - cluster + - dbname + - name + - target + type: object + status: + description: PublicationStatus defines the observed state of Publication + properties: + applied: + description: Applied is true if the publication was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.2 + name: scheduledbackups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ScheduledBackup + listKind: ScheduledBackupList + plural: scheduledbackups + singular: scheduledbackup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .status.lastScheduleTime + name: Last Backup + type: date + name: v1 + schema: + openAPIV3Schema: + description: ScheduledBackup is the Schema for the scheduledbackups API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ScheduledBackup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + backupOwnerReference: + default: none + description: |- + Indicates which ownerReference should be put inside the created backup resources.
+ - none: no owner reference for created backup objects (same behavior as before the field was introduced)
+ - self: sets the Scheduled backup object as owner of the backup
+ - cluster: set the cluster as owner of the backup
+ enum: + - none + - self + - cluster + type: string + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + immediate: + description: If the first backup has to be immediately start after + creation or not + type: boolean + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + schedule: + description: |- + The schedule does not follow the same format used in Kubernetes CronJobs + as it includes an additional seconds specifier, + see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format + type: string + suspend: + description: If this backup is suspended or not + type: boolean + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + - schedule + type: object + status: + description: |- + Most recently observed status of the ScheduledBackup. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + lastCheckTime: + description: The latest time the schedule + format: date-time + type: string + lastScheduleTime: + description: Information when was the last time that backup was successfully + scheduled. + format: date-time + type: string + nextScheduleTime: + description: Next time we will run a backup + format: date-time + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.2 + name: subscriptions.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Subscription + listKind: SubscriptionList + plural: subscriptions + singular: subscription + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Subscription is the Schema for the subscriptions API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SubscriptionSpec defines the desired state of Subscription + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "subscriber" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "subscriber" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + externalClusterName: + description: The name of the external cluster with the publication + ("publisher") + type: string + name: + description: The name of the subscription inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Subscription parameters part of the `WITH` clause as expected by + PostgreSQL `CREATE SUBSCRIPTION` command + type: object + publicationDBName: + description: |- + The name of the database containing the publication on the external + cluster. Defaults to the one in the external cluster definition. + type: string + publicationName: + description: |- + The name of the publication inside the PostgreSQL database in the + "publisher" + type: string + subscriptionReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this subscription + enum: + - delete + - retain + type: string + required: + - cluster + - dbname + - externalClusterName + - name + - publicationName + type: object + status: + description: SubscriptionStatus defines the observed state of Subscription + properties: + applied: + description: Applied is true if the subscription was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cnpg-manager +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps/status + - secrets/status + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - pods + - pods/exec + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - pods/status + verbs: + - get +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - get + - patch +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +- apiGroups: + - monitoring.coreos.com + resources: + - podmonitors + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups + - clusters + - databases + - poolers + - publications + - scheduledbackups + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups/status + - databases/status + - publications/status + - scheduledbackups/status + - subscriptions/status + verbs: + - get + - patch + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusterimagecatalogs + - imagecatalogs + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/finalizers + - poolers/finalizers + verbs: + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/status + - poolers/status + verbs: + - get + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - create + - get + - list + - patch + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cnpg-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cnpg-manager +subjects: +- kind: ServiceAccount + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: v1 +data: + queries: | + backends: + query: | + SELECT sa.datname + , sa.usename + , sa.application_name + , states.state + , COALESCE(sa.count, 0) AS total + , COALESCE(sa.max_tx_secs, 0) AS max_tx_duration_seconds + FROM ( VALUES ('active') + , ('idle') + , ('idle in transaction') + , ('idle in transaction (aborted)') + , ('fastpath function call') + , ('disabled') + ) AS states(state) + LEFT JOIN ( + SELECT datname + , state + , usename + , COALESCE(application_name, '') AS application_name + , COUNT(*) + , COALESCE(EXTRACT (EPOCH FROM (max(now() - xact_start))), 0) AS max_tx_secs + FROM pg_catalog.pg_stat_activity + GROUP BY datname, state, usename, application_name + ) sa ON states.state = sa.state + WHERE sa.usename IS NOT NULL + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - usename: + usage: "LABEL" + description: "Name of the user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - state: + usage: "LABEL" + description: "State of the backend" + - total: + usage: "GAUGE" + description: "Number of backends" + - max_tx_duration_seconds: + usage: "GAUGE" + description: "Maximum duration of a transaction in seconds" + + backends_waiting: + query: | + SELECT count(*) AS total + FROM pg_catalog.pg_locks blocked_locks + JOIN pg_catalog.pg_locks blocking_locks + ON blocking_locks.locktype = blocked_locks.locktype + AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database + AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation + AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page + AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple + AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid + AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid + AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid + AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid + AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid + AND blocking_locks.pid != blocked_locks.pid + JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid + WHERE NOT blocked_locks.granted + metrics: + - total: + usage: "GAUGE" + description: "Total number of backends that are currently waiting on other queries" + + pg_database: + query: | + SELECT datname + , pg_catalog.pg_database_size(datname) AS size_bytes + , pg_catalog.age(datfrozenxid) AS xid_age + , pg_catalog.mxid_age(datminmxid) AS mxid_age + FROM pg_catalog.pg_database + WHERE datallowconn + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - size_bytes: + usage: "GAUGE" + description: "Disk space used by the database" + - xid_age: + usage: "GAUGE" + description: "Number of transactions from the frozen XID to the current one" + - mxid_age: + usage: "GAUGE" + description: "Number of multiple transactions (Multixact) from the frozen XID to the current one" + + pg_postmaster: + query: | + SELECT EXTRACT(EPOCH FROM pg_postmaster_start_time) AS start_time + FROM pg_catalog.pg_postmaster_start_time() + metrics: + - start_time: + usage: "GAUGE" + description: "Time at which postgres started (based on epoch)" + + pg_replication: + query: "SELECT CASE WHEN ( + NOT pg_catalog.pg_is_in_recovery() + OR pg_catalog.pg_last_wal_receive_lsn() = pg_catalog.pg_last_wal_replay_lsn()) + THEN 0 + ELSE GREATEST (0, + EXTRACT(EPOCH FROM (now() - pg_catalog.pg_last_xact_replay_timestamp()))) + END AS lag, + pg_catalog.pg_is_in_recovery() AS in_recovery, + EXISTS (TABLE pg_stat_wal_receiver) AS is_wal_receiver_up, + (SELECT count(*) FROM pg_catalog.pg_stat_replication) AS streaming_replicas" + metrics: + - lag: + usage: "GAUGE" + description: "Replication lag behind primary in seconds" + - in_recovery: + usage: "GAUGE" + description: "Whether the instance is in recovery" + - is_wal_receiver_up: + usage: "GAUGE" + description: "Whether the instance wal_receiver is up" + - streaming_replicas: + usage: "GAUGE" + description: "Number of streaming replicas connected to the instance" + + pg_replication_slots: + query: | + SELECT slot_name, + slot_type, + database, + active, + (CASE pg_catalog.pg_is_in_recovery() + WHEN TRUE THEN pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_last_wal_receive_lsn(), restart_lsn) + ELSE pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), restart_lsn) + END) as pg_wal_lsn_diff + FROM pg_catalog.pg_replication_slots + WHERE NOT temporary + metrics: + - slot_name: + usage: "LABEL" + description: "Name of the replication slot" + - slot_type: + usage: "LABEL" + description: "Type of the replication slot" + - database: + usage: "LABEL" + description: "Name of the database" + - active: + usage: "GAUGE" + description: "Flag indicating whether the slot is active" + - pg_wal_lsn_diff: + usage: "GAUGE" + description: "Replication lag in bytes" + + pg_stat_archiver: + query: | + SELECT archived_count + , failed_count + , COALESCE(EXTRACT(EPOCH FROM (now() - last_archived_time)), -1) AS seconds_since_last_archival + , COALESCE(EXTRACT(EPOCH FROM (now() - last_failed_time)), -1) AS seconds_since_last_failure + , COALESCE(EXTRACT(EPOCH FROM last_archived_time), -1) AS last_archived_time + , COALESCE(EXTRACT(EPOCH FROM last_failed_time), -1) AS last_failed_time + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_archived_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_archived_wal_start_lsn + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_failed_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_failed_wal_start_lsn + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_archiver + metrics: + - archived_count: + usage: "COUNTER" + description: "Number of WAL files that have been successfully archived" + - failed_count: + usage: "COUNTER" + description: "Number of failed attempts for archiving WAL files" + - seconds_since_last_archival: + usage: "GAUGE" + description: "Seconds since the last successful archival operation" + - seconds_since_last_failure: + usage: "GAUGE" + description: "Seconds since the last failed archival operation" + - last_archived_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving succeeded" + - last_failed_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving failed" + - last_archived_wal_start_lsn: + usage: "GAUGE" + description: "Archived WAL start LSN" + - last_failed_wal_start_lsn: + usage: "GAUGE" + description: "Last failed WAL LSN" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_bgwriter: + runonserver: "<17.0.0" + query: | + SELECT checkpoints_timed + , checkpoints_req + , checkpoint_write_time + , checkpoint_sync_time + , buffers_checkpoint + , buffers_clean + , maxwritten_clean + , buffers_backend + , buffers_backend_fsync + , buffers_alloc + FROM pg_catalog.pg_stat_bgwriter + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - checkpoint_write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds" + - checkpoint_sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds" + - buffers_checkpoint: + usage: "COUNTER" + description: "Number of buffers written during checkpoints" + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_backend: + usage: "COUNTER" + description: "Number of buffers written directly by a backend" + - buffers_backend_fsync: + usage: "COUNTER" + description: "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + + pg_stat_bgwriter_17: + runonserver: ">=17.0.0" + name: pg_stat_bgwriter + query: | + SELECT buffers_clean + , maxwritten_clean + , buffers_alloc + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_bgwriter + metrics: + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_checkpointer: + runonserver: ">=17.0.0" + query: | + SELECT num_timed AS checkpoints_timed + , num_requested AS checkpoints_req + , restartpoints_timed + , restartpoints_req + , restartpoints_done + , write_time + , sync_time + , buffers_written + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_checkpointer + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - restartpoints_timed: + usage: "COUNTER" + description: "Number of scheduled restartpoints due to timeout or after a failed attempt to perform it" + - restartpoints_req: + usage: "COUNTER" + description: "Number of requested restartpoints that have been performed" + - restartpoints_done: + usage: "COUNTER" + description: "Number of restartpoints that have been performed" + - write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are written to disk, in milliseconds" + - sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are synchronized to disk, in milliseconds" + - buffers_written: + usage: "COUNTER" + description: "Number of buffers written during checkpoints and restartpoints" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_database: + query: | + SELECT datname + , xact_commit + , xact_rollback + , blks_read + , blks_hit + , tup_returned + , tup_fetched + , tup_inserted + , tup_updated + , tup_deleted + , conflicts + , temp_files + , temp_bytes + , deadlocks + , blk_read_time + , blk_write_time + FROM pg_catalog.pg_stat_database + metrics: + - datname: + usage: "LABEL" + description: "Name of this database" + - xact_commit: + usage: "COUNTER" + description: "Number of transactions in this database that have been committed" + - xact_rollback: + usage: "COUNTER" + description: "Number of transactions in this database that have been rolled back" + - blks_read: + usage: "COUNTER" + description: "Number of disk blocks read in this database" + - blks_hit: + usage: "COUNTER" + description: "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)" + - tup_returned: + usage: "COUNTER" + description: "Number of rows returned by queries in this database" + - tup_fetched: + usage: "COUNTER" + description: "Number of rows fetched by queries in this database" + - tup_inserted: + usage: "COUNTER" + description: "Number of rows inserted by queries in this database" + - tup_updated: + usage: "COUNTER" + description: "Number of rows updated by queries in this database" + - tup_deleted: + usage: "COUNTER" + description: "Number of rows deleted by queries in this database" + - conflicts: + usage: "COUNTER" + description: "Number of queries canceled due to conflicts with recovery in this database" + - temp_files: + usage: "COUNTER" + description: "Number of temporary files created by queries in this database" + - temp_bytes: + usage: "COUNTER" + description: "Total amount of data written to temporary files by queries in this database" + - deadlocks: + usage: "COUNTER" + description: "Number of deadlocks detected in this database" + - blk_read_time: + usage: "COUNTER" + description: "Time spent reading data file blocks by backends in this database, in milliseconds" + - blk_write_time: + usage: "COUNTER" + description: "Time spent writing data file blocks by backends in this database, in milliseconds" + + pg_stat_replication: + primary: true + query: | + SELECT usename + , COALESCE(application_name, '') AS application_name + , COALESCE(client_addr::text, '') AS client_addr + , COALESCE(client_port::text, '') AS client_port + , EXTRACT(EPOCH FROM backend_start) AS backend_start + , COALESCE(pg_catalog.age(backend_xmin), 0) AS backend_xmin_age + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), sent_lsn) AS sent_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), write_lsn) AS write_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), flush_lsn) AS flush_diff_bytes + , COALESCE(pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), replay_lsn),0) AS replay_diff_bytes + , COALESCE((EXTRACT(EPOCH FROM write_lag)),0)::float AS write_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM flush_lag)),0)::float AS flush_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM replay_lag)),0)::float AS replay_lag_seconds + FROM pg_catalog.pg_stat_replication + metrics: + - usename: + usage: "LABEL" + description: "Name of the replication user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - client_addr: + usage: "LABEL" + description: "Client IP address" + - client_port: + usage: "LABEL" + description: "Client TCP port" + - backend_start: + usage: "COUNTER" + description: "Time when this process was started" + - backend_xmin_age: + usage: "COUNTER" + description: "The age of this standby's xmin horizon" + - sent_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location sent on this connection" + - write_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location written to disk by this standby server" + - flush_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location flushed to disk by this standby server" + - replay_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location replayed into the database on this standby server" + - write_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it" + - flush_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it" + - replay_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it" + + pg_settings: + query: | + SELECT name, + CASE setting WHEN 'on' THEN '1' WHEN 'off' THEN '0' ELSE setting END AS setting + FROM pg_catalog.pg_settings + WHERE vartype IN ('integer', 'real', 'bool') + ORDER BY 1 + metrics: + - name: + usage: "LABEL" + description: "Name of the setting" + - setting: + usage: "GAUGE" + description: "Setting value" +kind: ConfigMap +metadata: + labels: + cnpg.io/reload: "" + name: cnpg-default-monitoring + namespace: cnpg-system +--- +apiVersion: v1 +kind: Service +metadata: + name: cnpg-webhook-service + namespace: cnpg-system +spec: + ports: + - port: 443 + targetPort: 9443 + selector: + app.kubernetes.io/name: cloudnative-pg +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-controller-manager + namespace: cnpg-system +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: cloudnative-pg + template: + metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + spec: + containers: + - args: + - controller + - --leader-elect + - --max-concurrent-reconciles=10 + - --config-map-name=cnpg-controller-manager-config + - --secret-name=cnpg-controller-manager-config + - --webhook-port=9443 + command: + - /manager + env: + - name: OPERATOR_IMAGE_NAME + value: ghcr.io/cloudnative-pg/cloudnative-pg:1.25.1 + - name: OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MONITORING_QUERIES_CONFIGMAP + value: cnpg-default-monitoring + image: ghcr.io/cloudnative-pg/cloudnative-pg:1.25.1 + livenessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + name: manager + ports: + - containerPort: 8080 + name: metrics + protocol: TCP + - containerPort: 9443 + name: webhook-server + protocol: TCP + readinessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + resources: + limits: + cpu: 100m + memory: 200Mi + requests: + cpu: 100m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 10001 + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + startupProbe: + failureThreshold: 6 + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + periodSeconds: 5 + volumeMounts: + - mountPath: /controller + name: scratch-data + - mountPath: /run/secrets/cnpg.io/webhook + name: webhook-certificates + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: cnpg-manager + terminationGracePeriodSeconds: 10 + volumes: + - emptyDir: {} + name: scratch-data + - name: webhook-certificates + secret: + defaultMode: 420 + optional: true + secretName: cnpg-webhook-cert +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: cnpg-mutating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: mbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: mcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: mscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: cnpg-validating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: vbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: vcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-pooler + failurePolicy: Fail + name: vpooler.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - poolers + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: vscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None From abc76e1a37f846301f37531ca9195b023f2e5d2b Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Tue, 4 Mar 2025 19:38:50 +0100 Subject: [PATCH 425/836] chore(setup-cluster.sh): remove `k3d` support (#7056) Remove k3s support from setup-cluster.sh to simplify the script. The -e option will also be rendered ineffective as a result of this change. Closes #7049 Signed-off-by: Armando Ruocco Signed-off-by: Marco Nenciarini Signed-off-by: Gabriele Bartolini Co-authored-by: Marco Nenciarini Co-authored-by: Gabriele Bartolini --- .devcontainer/devcontainer.json | 3 +- .github/renovate.json5 | 14 -- Makefile | 3 - contribute/e2e_testing_environment/README.md | 74 +++------- hack/e2e/run-e2e-k3d.sh | 99 ------------- hack/setup-cluster.sh | 141 +++---------------- tests/utils/cloudvendors/cloud_vendor.go | 2 +- 7 files changed, 43 insertions(+), 293 deletions(-) delete mode 100755 hack/e2e/run-e2e-k3d.sh diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 6728ff8914..b90b872945 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -2,7 +2,6 @@ "image": "mcr.microsoft.com/devcontainers/go:1-bookworm", "features": { "ghcr.io/devcontainers/features/docker-in-docker:2": {}, - "ghcr.io/rio/features/k3d:1": {}, "ghcr.io/mpriscella/features/kind:1": {}, "ghcr.io/rjfmachado/devcontainer-features/cloud-native:1": { "kubectl": "latest", @@ -17,7 +16,7 @@ "ghcr.io/dhoeric/features/stern:1": {} }, - // Needed by kind and k3s to enable kube-proxy's ipvs mode + // Needed by kind to enable kube-proxy's ipvs mode "mounts":["type=bind,source=/lib/modules,target=/lib/modules"], // Enable kubectl short alias with completion diff --git a/.github/renovate.json5 b/.github/renovate.json5 index 4906f5adaf..e707b55b1d 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -178,20 +178,6 @@ versioningTemplate: 'loose', depNameTemplate: 'kindest/node', }, - { - customType: 'regex', - fileMatch: [ - '^hack/setup-cluster.sh$', - '^hack/e2e/run-e2e-k3d.sh$', - ], - matchStrings: [ - 'K3D_NODE_DEFAULT_VERSION=(?.*?)\\n', - ], - versioningTemplate: 'regex:^v(?\\d+)(\\.(?\\d+))?(\\.(?\\d+))(\\+k3s?(?\\d+))?$', - extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)', - datasourceTemplate: 'github-releases', - depNameTemplate: 'k3s-io/k3s', - }, { customType: 'regex', fileMatch: [ diff --git a/Makefile b/Makefile index be41950037..ae8d59115a 100644 --- a/Makefile +++ b/Makefile @@ -113,9 +113,6 @@ test-race: generate fmt vet manifests envtest ## Run tests enabling race detecti e2e-test-kind: ## Run e2e tests locally using kind. hack/e2e/run-e2e-kind.sh -e2e-test-k3d: ## Run e2e tests locally using k3d. - hack/e2e/run-e2e-k3d.sh - e2e-test-local: ## Run e2e tests locally using the default kubernetes context. hack/e2e/run-e2e-local.sh diff --git a/contribute/e2e_testing_environment/README.md b/contribute/e2e_testing_environment/README.md index dd956ab464..2eb7936908 100644 --- a/contribute/e2e_testing_environment/README.md +++ b/contribute/e2e_testing_environment/README.md @@ -12,8 +12,8 @@ PostgreSQL** on **all supported versions of Kubernetes**. This framework is made up by two important components: -- a local and disposable Kubernetes cluster built with `kind` (default) or - `k3d` on which to run the E2E tests +- a local and disposable Kubernetes cluster built with `kind` on which to run + the E2E tests - a set of E2E tests to be run on an existing Kubernetes cluster (including the one above) @@ -57,7 +57,6 @@ All flags have corresponding environment variables labeled `(Env:...` in the tab | Flags | Usage | |-------|-------------------------------------------------------------------------------------------------------------------------------| | -r |--registry | Enable local registry. (Env: `ENABLE_REGISTRY`) | -| -e |--engine | Use the provided ENGINE to run the cluster. Available options are 'kind' and 'k3d'. Default 'kind'. (Env: `CLUSTER_ENGINE`) | | -k |--k8s-version | Use the specified Kubernetes full version number (e.g., `-k v1.30.0`). (Env: `K8S_VERSION`) | | -n |--nodes | Create a cluster with the required number of nodes. Used only during "create" command. Default: 3 (Env: `NODES`) | @@ -66,12 +65,12 @@ All flags have corresponding environment variables labeled `(Env:...` in the tab > sure that they are consistent through all invocations either via command line > options or by defining the respective environment variables -> **NOTE:** on ARM64 architecture like Apple M1/M2/M3, `kind` and `k3d` provide different -> images for AMD64 and ARM64 nodes. If the **x86/amd64 emulation** is not enabled, +> **NOTE:** on ARM64 architecture like Apple M1/M2/M3, `kind` provides different +> images for AMD64 and ARM64 nodes. If the **x86/amd64 emulation** is not enabled, > the `./hack/setup-cluster.sh` script will correctly detect the architecture > and pass the `DOCKER_DEFAULT_PLATFORM=linux/arm64` environment variable to Docker > to use the ARM64 node image. -> If you want to explicitly use the **x86/amd64 emulation**, you need to set +> If you want to explicitly use the **x86/amd64 emulation**, you need to set > the `DOCKER_DEFAULT_PLATFORM=linux/amd64` environment variable before > calling the `./hack/setup-cluster.sh` script. @@ -91,7 +90,7 @@ will create a deployment, and add two services on ports 6060 and 4040 respectively, in the same namespace as the operator: ``` console -kubectl get svc -n cnpg-system +kubectl get svc -n cnpg-system NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE cnpg-pprof ClusterIP 10.96.17.58 6060/TCP 9m41s @@ -158,23 +157,7 @@ specifying the following variable * `DOCKER_REGISTRY_MIRROR`: DockerHub mirror URL (i.e. https://mirror.gcr.io) -To run E2E testing you can also use: - -| kind | k3d | -|------------------------------------------------|-------------------------------------------------| -| `TEST_UPGRADE_TO_V1=false make e2e-test-kind` | `TEST_UPGRADE_TO_V1=false make e2e-test-k3d` | - - -### Wrapper scripts for E2E testing - -There are currently two available scripts that wrap cluster setup and -execution of tests. One is for `kind` and one is for `k3d`. They simply embed -`hack/setup-cluster.sh` and `hack/e2e/run-e2e.sh` to create a local Kubernetes -cluster and then run E2E tests on it. - -There is also a script to run E2E tests on an existing `local` Kubernetes cluster. -It tries to detect the appropriate defaults for storage class and volume snapshot class environment variables -by looking at the annotation of the default storage class and the volume snapshot class. +To run E2E testing you can also use `TEST_UPGRADE_TO_V1=false make e2e-test-kind`. ### Using feature type test selection/filter @@ -216,6 +199,19 @@ export FEATURE_TYPE=smoke,basic,service-connectivity This will run smoke, basic and service connectivity e2e. One or many can be passed as value with comma separation without spaces. +### Wrapper scripts for E2E testing + +There is a script available that wraps cluster setup and execution of +tests for `kind`. It embeds `hack/setup-cluster.sh` and +`hack/e2e/run-e2e.sh` to create a local Kubernetes cluster and then +run E2E tests on it. + +There is also a script to run E2E tests on an existing `local` +Kubernetes cluster. It tries to detect the appropriate defaults for +storage class and volume snapshot class environment variables by +looking at the annotation of the default storage class and the volume +snapshot class. + #### On kind You can test the operator locally on `kind` with: @@ -232,29 +228,6 @@ We have also provided a shortcut to this script in the main `Makefile`: make e2e-test-kind ``` -#### On k3d - -You can test the operator locally on `k3d` with: - -``` bash -run-e2e-k3d.sh -``` - -> **NOTE:** error messages, like the example below, that will be shown during -> cluster creation are **NOT** an issue: - -``` -Error response from daemon: manifest for rancher/k3s:v1.20.0-k3s5 not found: manifest unknown: manifest unknown -``` - -The script will take care of creating a K3d cluster and then run the tests on it. - -We have also provided a shortcut to this script in the main `Makefile`: - -```shell -make e2e-test-k3d -``` - #### On existing local cluster You can test the operator locally on `local` with: @@ -298,11 +271,10 @@ the following ones can be defined: * `LOG_DIR`: the directory where the container logs are exported. Default: `_logs/` directory in the project root -`run-e2e-kind.sh` forces `E2E_DEFAULT_STORAGE_CLASS=standard` while -`run-e2e-k3d.sh` forces `E2E_DEFAULT_STORAGE_CLASS=local-path` +`run-e2e-kind.sh` forces `E2E_DEFAULT_STORAGE_CLASS=standard`. -Both scripts use the `setup-cluster.sh` script, in order to initialize the cluster -choosing between `kind` or K3d engine. +The script uses the `setup-cluster.sh` script to initialize the cluster using +the `kind` engine. ### Running E2E tests on a fork of the repository diff --git a/hack/e2e/run-e2e-k3d.sh b/hack/e2e/run-e2e-k3d.sh deleted file mode 100755 index f01b50ecf8..0000000000 --- a/hack/e2e/run-e2e-k3d.sh +++ /dev/null @@ -1,99 +0,0 @@ -#!/usr/bin/env bash - -## -## Copyright The CloudNativePG Contributors -## -## Licensed under the Apache License, Version 2.0 (the "License"); -## you may not use this file except in compliance with the License. -## You may obtain a copy of the License at -## -## http://www.apache.org/licenses/LICENSE-2.0 -## -## Unless required by applicable law or agreed to in writing, software -## distributed under the License is distributed on an "AS IS" BASIS, -## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -## See the License for the specific language governing permissions and -## limitations under the License. -## -# shellcheck disable=SC2317 -# standard bash error handling -set -eEuo pipefail - -if [ "${DEBUG-}" = true ]; then - set -x -fi - -ROOT_DIR=$(realpath "$(dirname "$0")/../../") -HACK_DIR="${ROOT_DIR}/hack" -E2E_DIR="${HACK_DIR}/e2e" - -export PRESERVE_CLUSTER=${PRESERVE_CLUSTER:-false} -export BUILD_IMAGE=${BUILD_IMAGE:-false} -K3D_NODE_DEFAULT_VERSION=v1.30.3 -export K8S_VERSION=${K8S_VERSION:-$K3D_NODE_DEFAULT_VERSION} -export CLUSTER_ENGINE=k3d -export CLUSTER_NAME=pg-operator-e2e-${K8S_VERSION//./-} -export LOG_DIR=${LOG_DIR:-$ROOT_DIR/_logs/} - -export POSTGRES_IMG=${POSTGRES_IMG:-$(grep 'DefaultImageName.*=' "${ROOT_DIR}/pkg/versions/versions.go" | cut -f 2 -d \")} -export E2E_PRE_ROLLING_UPDATE_IMG=${E2E_PRE_ROLLING_UPDATE_IMG:-${POSTGRES_IMG%.*}} -export E2E_DEFAULT_STORAGE_CLASS=${E2E_DEFAULT_STORAGE_CLASS:-local-path} -export E2E_CSI_STORAGE_CLASS=${E2E_CSI_STORAGE_CLASS:-csi-hostpath-sc} -export E2E_DEFAULT_VOLUMESNAPSHOT_CLASS=${E2E_DEFAULT_VOLUMESNAPSHOT_CLASS:-csi-hostpath-snapclass} - -export DOCKER_REGISTRY_MIRROR=${DOCKER_REGISTRY_MIRROR:-} -export TEST_CLOUD_VENDOR="local" - -cleanup() { - if [ "${PRESERVE_CLUSTER}" = false ]; then - "${HACK_DIR}/setup-cluster.sh" destroy || true - else - set +x - echo "You've chosen to preserve the Kubernetes cluster." - echo "You can delete it manually later running:" - echo "'${HACK_DIR}/setup-cluster.sh' destroy" - fi -} - -main() { - # Call to setup-cluster.sh script - "${HACK_DIR}/setup-cluster.sh" -r create - - trap cleanup EXIT - - # In case image building is forced it will use a default - # controller image name: cloudnative-pg:e2e. - # Otherwise it will download the image from docker - # registry using below credentials. - if [ "${BUILD_IMAGE}" == false ]; then - # Prevent e2e tests to proceed with empty tag which - # will be considered as "latest". - # This will fail in case heuristic IMAGE_TAG will - # be empty, and will continue if CONTROLLER_IMG - # is manually specified during execution, i.e.: - # - # BUILD_IMAGE=false CONTROLLER_IMG=cloudnative-pg:e2e ./hack/e2e/run-e2e-k3d.sh - # - if [ -z "${CONTROLLER_IMG:-}" ]; then - IMAGE_TAG="$( (git symbolic-ref -q --short HEAD || git describe --tags --exact-match) | tr / -)" - export CONTROLLER_IMG="ghcr.io/cloudnative-pg/cloudnative-pg-testing:${IMAGE_TAG}" - fi - else - unset CONTROLLER_IMG - "${HACK_DIR}/setup-cluster.sh" load - fi - - "${HACK_DIR}/setup-cluster.sh" load-helper-images - - RC=0 - - # Run E2E tests - "${E2E_DIR}/run-e2e.sh" || RC=$? - - ## Export logs - "${HACK_DIR}/setup-cluster.sh" export-logs - - exit $RC -} - -main diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh index e6cca6d229..d663e26f88 100755 --- a/hack/setup-cluster.sh +++ b/hack/setup-cluster.sh @@ -25,7 +25,6 @@ fi # Defaults KIND_NODE_DEFAULT_VERSION=v1.32.2 -K3D_NODE_DEFAULT_VERSION=v1.30.3 CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=v1.15.0 EXTERNAL_SNAPSHOTTER_VERSION=v8.2.0 EXTERNAL_PROVISIONER_VERSION=v5.2.0 @@ -34,7 +33,6 @@ EXTERNAL_ATTACHER_VERSION=v4.8.0 K8S_VERSION=${K8S_VERSION-} KUBECTL_VERSION=${KUBECTL_VERSION-} CSI_DRIVER_HOST_PATH_VERSION=${CSI_DRIVER_HOST_PATH_VERSION:-$CSI_DRIVER_HOST_PATH_DEFAULT_VERSION} -ENGINE=${CLUSTER_ENGINE:-kind} ENABLE_REGISTRY=${ENABLE_REGISTRY:-} ENABLE_PYROSCOPE=${ENABLE_PYROSCOPE:-} ENABLE_CSI_DRIVER=${ENABLE_CSI_DRIVER:-} @@ -243,91 +241,6 @@ check_registry_kind() { [ -n "$(check_registry "kind")" ] } -## -## K3D SUPPORT -## - -install_k3d() { - local bindir=$1 - - curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | K3D_INSTALL_DIR=$bindir bash -s -- --no-sudo -} - -create_cluster_k3d() { - local k8s_version=$1 - local cluster_name=$2 - - local latest_k3s_tag - latest_k3s_tag=$(k3d version list k3s | grep -- "^${k8s_version//./\\.}"'\+-k3s[0-9]$' | tail -n 1) - - local options=() - if [ -n "${DOCKER_REGISTRY_MIRROR:-}" ] || [ -n "${ENABLE_REGISTRY:-}" ]; then - config_file="${TEMP_DIR}/k3d-registries.yaml" - cat >"${config_file}" <<-EOF -mirrors: -EOF - - if [ -n "${DOCKER_REGISTRY_MIRROR:-}" ]; then - cat >>"${config_file}" <<-EOF - "docker.io": - endpoint: - - "${DOCKER_REGISTRY_MIRROR}" -EOF - fi - - if [ -n "${ENABLE_REGISTRY:-}" ]; then - cat >>"${config_file}" <<-EOF - "${registry_name}:5000": - endpoint: - - http://${registry_name}:5000 -EOF - fi - - options+=(--registry-config "${config_file}") - fi - - local agents=() - if [ "$NODES" -gt 1 ]; then - agents=(-a "${NODES}") - fi - - K3D_FIX_MOUNTS=1 k3d cluster create "${options[@]}" "${agents[@]}" -i "rancher/k3s:${latest_k3s_tag}" --no-lb "${cluster_name}" \ - --k3s-arg "--disable=traefik@server:0" --k3s-arg "--disable=metrics-server@server:0" \ - --k3s-arg "--node-taint=node-role.kubernetes.io/master:NoSchedule@server:0" #wokeignore:rule=master - - if [ -n "${ENABLE_REGISTRY:-}" ]; then - docker network connect "k3d-${cluster_name}" "${registry_name}" &>/dev/null || true - fi -} - -load_image_k3d() { - local cluster_name=$1 - local image=$2 - k3d image import "${image}" -c "${cluster_name}" -} - -export_logs_k3d() { - local cluster_name=$1 - while IFS= read -r line; do - NODES_LIST+=("$line") - done < <(k3d node list | awk "/${cluster_name}/{print \$1}") - for i in "${NODES_LIST[@]}"; do - mkdir -p "${LOG_DIR}/${i}" - docker cp -L "${i}:/var/log/." "${LOG_DIR}/${i}" - done -} - -destroy_k3d() { - local cluster_name=$1 - docker network disconnect "k3d-${cluster_name}" "${registry_name}" &>/dev/null || true - k3d cluster delete "${cluster_name}" || true - docker network rm "k3d-${cluster_name}" &>/dev/null || true -} - -check_registry_k3d() { - [ -n "$(check_registry "k3d-${CLUSTER_NAME}")" ] -} - ## ## GENERIC ROUTINES ## @@ -530,7 +443,7 @@ load_image() { local cluster_name=$1 local image=$2 if [ -z "${ENABLE_REGISTRY:-}" ]; then - "load_image_${ENGINE}" "${cluster_name}" "${image}" + "load_image_kind" "${cluster_name}" "${image}" else load_image_registry "${image}" fi @@ -544,7 +457,7 @@ deploy_operator() { usage() { cat >&2 <] [-r] +Usage: $0 [-k ] [-r] Commands: prepare Downloads the prerequisite into @@ -560,11 +473,6 @@ Commands: pyroscope Deploy Pyroscope inside operator namespace Options: - -e|--engine - Use the provided ENGINE to run the cluster. - Available options are 'kind' and 'k3d'. Default 'kind'. - Env: CLUSTER_ENGINE - -k|--k8s-version Use the specified kubernetes full version number (e.g., v1.27.0). Env: K8S_VERSION @@ -590,14 +498,14 @@ prepare() { local bindir=$1 echo "${bright}Installing cluster prerequisites in ${bindir}${reset}" install_kubectl "${bindir}" - "install_${ENGINE}" "${bindir}" + "install_kind" "${bindir}" echo "${bright}Done installing cluster prerequisites in ${bindir}${reset}" } create() { - echo "${bright}Creating ${ENGINE} cluster ${CLUSTER_NAME} with version ${K8S_VERSION}${reset}" + echo "${bright}Creating kind cluster ${CLUSTER_NAME} with version ${K8S_VERSION}${reset}" - "create_cluster_${ENGINE}" "${K8S_VERSION}" "${CLUSTER_NAME}" + "create_cluster_kind" "${K8S_VERSION}" "${CLUSTER_NAME}" # Support for docker:dind service if [ "${DOCKER_HOST:-}" == "tcp://docker:2376" ]; then @@ -608,7 +516,7 @@ create() { deploy_csi_host_path deploy_prometheus_crds - echo "${bright}Done creating ${ENGINE} cluster ${CLUSTER_NAME} with version ${K8S_VERSION}${reset}" + echo "${bright}Done creating kind cluster ${CLUSTER_NAME} with version ${K8S_VERSION}${reset}" } load_helper_images() { @@ -618,7 +526,7 @@ load_helper_images() { # with the goal to speed up the runs. for IMG in "${HELPER_IMGS[@]}"; do docker pull "${IMG}" - "load_image_${ENGINE}" "${CLUSTER_NAME}" "${IMG}" + "load_image_kind" "${CLUSTER_NAME}" "${IMG}" done echo "${bright}Done loading helper images on cluster ${CLUSTER_NAME}${reset}" @@ -632,7 +540,7 @@ load() { # This code will NEVER run in the cloud CI/CD workflows, as there we do # the build and push (into GH test registry) once in `builds`, before # the strategy matrix blows up the number of executables - if [ -z "${ENABLE_REGISTRY}" ] && "check_registry_${ENGINE}"; then + if [ -z "${ENABLE_REGISTRY}" ] && "check_registry_kind"; then ENABLE_REGISTRY=true fi @@ -645,7 +553,7 @@ load() { ARCH="${ARCH}" BUILDER_NAME=${builder_name} docker-build if [ -z "${ENABLE_REGISTRY:-}" ]; then - "load_image_${ENGINE}" "${CLUSTER_NAME}" "${CONTROLLER_IMG}" + "load_image_kind" "${CLUSTER_NAME}" "${CONTROLLER_IMG}" fi echo "${bright}Loading new operator image on cluster ${CLUSTER_NAME}${reset}" @@ -667,7 +575,7 @@ load() { ARCH="${ARCH}" BUILDER_NAME="${builder_name}" docker-build if [ -z "${ENABLE_REGISTRY:-}" ]; then - "load_image_${ENGINE}" "${CLUSTER_NAME}" "${CONTROLLER_IMG}" + "load_image_kind" "${CLUSTER_NAME}" "${CONTROLLER_IMG}" fi echo "${bright}Done loading new 'prime' operator image on cluster ${CLUSTER_NAME}${reset}" @@ -677,7 +585,7 @@ load() { } deploy() { - if [ -z "${ENABLE_REGISTRY}" ] && "check_registry_${ENGINE}"; then + if [ -z "${ENABLE_REGISTRY}" ] && "check_registry_kind"; then ENABLE_REGISTRY=true fi @@ -692,7 +600,7 @@ deploy() { print_image() { local tag=devel - if [ -n "${ENABLE_REGISTRY:-}" ] || "check_registry_${ENGINE}"; then + if [ -n "${ENABLE_REGISTRY:-}" ] || "check_registry_kind"; then tag=latest fi echo "${registry_name}:5000/cloudnative-pg-testing:${tag}" @@ -701,17 +609,17 @@ print_image() { export_logs() { echo "${bright}Exporting logs from cluster ${CLUSTER_NAME} to ${LOG_DIR}${reset}" - "export_logs_${ENGINE}" "${CLUSTER_NAME}" + "export_logs_kind" "${CLUSTER_NAME}" echo "${bright}Done exporting logs from cluster ${CLUSTER_NAME} to ${LOG_DIR}${reset}" } destroy() { - echo "${bright}Destroying ${ENGINE} cluster ${CLUSTER_NAME}${reset}" + echo "${bright}Destroying kind cluster ${CLUSTER_NAME}${reset}" - "destroy_${ENGINE}" "${CLUSTER_NAME}" + "destroy_kind" "${CLUSTER_NAME}" - echo "${bright}Done destroying ${ENGINE} cluster ${CLUSTER_NAME}${reset}" + echo "${bright}Done destroying kind cluster ${CLUSTER_NAME}${reset}" } pyroscope() { @@ -737,13 +645,7 @@ main() { case "${o}" in -e | --engine) shift - ENGINE=$1 - shift - if [ "${ENGINE}" != "kind" ] && [ "${ENGINE}" != "k3d" ]; then - echo "ERROR: ${ENGINE} is not a valid engine! [kind, k3d]" >&2 - echo >&2 - usage - fi + # no-op, kept for compatibility ;; -k | --k8s-version) shift @@ -784,14 +686,7 @@ main() { fi if [ -z "${K8S_VERSION}" ]; then - case "${ENGINE}" in - kind) - K8S_VERSION=${KIND_NODE_DEFAULT_VERSION} - ;; - k3d) - K8S_VERSION=${K3D_NODE_DEFAULT_VERSION} - ;; - esac + K8S_VERSION=${KIND_NODE_DEFAULT_VERSION} fi KUBECTL_VERSION=${KUBECTL_VERSION:-$K8S_VERSION} diff --git a/tests/utils/cloudvendors/cloud_vendor.go b/tests/utils/cloudvendors/cloud_vendor.go index be50b780db..2fcc54ea1a 100644 --- a/tests/utils/cloudvendors/cloud_vendor.go +++ b/tests/utils/cloudvendors/cloud_vendor.go @@ -38,7 +38,7 @@ var EKS = TestEnvVendor("eks") // GKE google cloud cluster var GKE = TestEnvVendor("gke") -// LOCAL kind or k3d cluster running locally +// LOCAL kind cluster running locally var LOCAL = TestEnvVendor("local") // OCP openshift cloud cluster From f3bbc449314f60151f0ea5bf0c6dbefabb0673df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Rodr=C3=ADguez=20Hern=C3=A1ndez?= Date: Wed, 5 Mar 2025 12:05:04 +0100 Subject: [PATCH 426/836] docs: add Bitnami to ADOPTERS.md (#7070) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add Bitnami to the adopters list Signed-off-by: Carlos Rodríguez Hernández --- ADOPTERS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/ADOPTERS.md b/ADOPTERS.md index 2936cc88b3..a9a556254c 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -61,3 +61,4 @@ This list is sorted in chronological order, based on the submission date. | [Docaposte](https://docaposte.fr) | @albundy83 | 2024-11-20 | Docaposte is the digital trust leader in France. We use CloudNativePG because it is the most elegant and efficient solution for running PostgreSQL in production. | | [Obmondo](https://obmondo.com) | @Obmondo | 2024-11-25 | At Obmondo we use CloudNativePG in our open-source Kubernetes meta-management platform called [KubeAid](https://kubeaid.io/) to easily manage all PostgreSQL databases across clusters from a centralized interface. | | [Mirakl](https://www.mirakl.com/) | @ThomasBoussekey | 2025-02-03 | CloudNativePG is our default hosting solution for marketplace instances. With over 300 CloudNativePG clusters managing 8 TB of data, we have developed highly customizable Helm charts that support connection pooling, logical replication, and many other advanced features. | +| [Bitnami](https://bitnami.com) | [@carrodher](https://github.com/carrodher) | 2025-03-04 | Bitnami provides CloudNativePG as part of its open-source [Helm charts catalog](https://github.com/bitnami/charts), enabling users to easily deploy PostgreSQL clusters on Kubernetes. Additionally, CloudNativePG is available through [Tanzu Application Catalog](https://www.vmware.com/products/app-platform/tanzu-application-catalog) and [Bitnami Premium](https://www.arrow.com/globalecs/na/vendors/bitnami-premium/), where customers can benefit from advanced security and compliance features such as VEX, SBOM, SLSA3, and CVE scanning. | From 321a4b9805e0b401f7a1ea5dbf402fa7e86b62c9 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Wed, 5 Mar 2025 12:08:02 +0100 Subject: [PATCH 427/836] chore(setup-cluster.sh): remove support for running without a registry (#7054) Running `hack/setup-cluster.sh` without the `-r` option has been broken for several months, and the introduction of the Bake build system has further complicated fixing it. To improve maintainability, this commit removes support for running `setup-cluster.sh` without a registry, while keeping the `-r` option as a no-op. Closes #7048 Signed-off-by: Armando Ruocco Signed-off-by: Marco Nenciarini Co-authored-by: Marco Nenciarini --- Makefile | 6 +- contribute/e2e_testing_environment/README.md | 6 -- hack/e2e/run-e2e-kind.sh | 2 +- hack/setup-cluster.sh | 78 ++++---------------- 4 files changed, 19 insertions(+), 73 deletions(-) diff --git a/Makefile b/Makefile index ae8d59115a..c379f1aa9f 100644 --- a/Makefile +++ b/Makefile @@ -229,7 +229,7 @@ generate: controller-gen ## Generate code. deploy-locally: kind-cluster ## Build and deploy operator in local cluster set -e ;\ - hack/setup-cluster.sh -n1 -r load deploy + hack/setup-cluster.sh -n1 load deploy olm-scorecard: operator-sdk ## Run the Scorecard test from operator-sdk $(OPERATOR_SDK) scorecard ${BUNDLE_IMG} --wait-time 60s --verbose @@ -351,11 +351,11 @@ endef kind-cluster: ## Create KinD cluster to run operator locally set -e ;\ - hack/setup-cluster.sh -n1 -r create + hack/setup-cluster.sh -n1 create kind-cluster-destroy: ## Destroy KinD cluster created using kind-cluster command set -e ;\ - hack/setup-cluster.sh -n1 -r destroy + hack/setup-cluster.sh -n1 destroy .PHONY: operator-sdk OPERATOR_SDK = $(LOCALBIN)/operator-sdk diff --git a/contribute/e2e_testing_environment/README.md b/contribute/e2e_testing_environment/README.md index 2eb7936908..53d0ee747d 100644 --- a/contribute/e2e_testing_environment/README.md +++ b/contribute/e2e_testing_environment/README.md @@ -56,15 +56,9 @@ All flags have corresponding environment variables labeled `(Env:...` in the tab | Flags | Usage | |-------|-------------------------------------------------------------------------------------------------------------------------------| -| -r |--registry | Enable local registry. (Env: `ENABLE_REGISTRY`) | | -k |--k8s-version | Use the specified Kubernetes full version number (e.g., `-k v1.30.0`). (Env: `K8S_VERSION`) | | -n |--nodes | Create a cluster with the required number of nodes. Used only during "create" command. Default: 3 (Env: `NODES`) | - -> **NOTE:** if you want to use custom engine and registry settings, please make -> sure that they are consistent through all invocations either via command line -> options or by defining the respective environment variables - > **NOTE:** on ARM64 architecture like Apple M1/M2/M3, `kind` provides different > images for AMD64 and ARM64 nodes. If the **x86/amd64 emulation** is not enabled, > the `./hack/setup-cluster.sh` script will correctly detect the architecture diff --git a/hack/e2e/run-e2e-kind.sh b/hack/e2e/run-e2e-kind.sh index ef8e8879da..b33fb60550 100755 --- a/hack/e2e/run-e2e-kind.sh +++ b/hack/e2e/run-e2e-kind.sh @@ -58,7 +58,7 @@ cleanup() { main() { # Call to setup-cluster.sh script - "${HACK_DIR}/setup-cluster.sh" -r create + "${HACK_DIR}/setup-cluster.sh" create trap cleanup EXIT diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh index d663e26f88..1cc7900a21 100755 --- a/hack/setup-cluster.sh +++ b/hack/setup-cluster.sh @@ -33,7 +33,6 @@ EXTERNAL_ATTACHER_VERSION=v4.8.0 K8S_VERSION=${K8S_VERSION-} KUBECTL_VERSION=${KUBECTL_VERSION-} CSI_DRIVER_HOST_PATH_VERSION=${CSI_DRIVER_HOST_PATH_VERSION:-$CSI_DRIVER_HOST_PATH_DEFAULT_VERSION} -ENABLE_REGISTRY=${ENABLE_REGISTRY:-} ENABLE_PYROSCOPE=${ENABLE_PYROSCOPE:-} ENABLE_CSI_DRIVER=${ENABLE_CSI_DRIVER:-} ENABLE_APISERVER_AUDIT=${ENABLE_APISERVER_AUDIT:-} @@ -189,35 +188,30 @@ EOF done fi - if [ -n "${DOCKER_REGISTRY_MIRROR:-}" ] || [ -n "${ENABLE_REGISTRY:-}" ]; then - # Add containerdConfigPatches section - cat >>"${config_file}" <<-EOF + # Add containerdConfigPatches section + cat >>"${config_file}" <<-EOF containerdConfigPatches: EOF - if [ -n "${DOCKER_REGISTRY_MIRROR:-}" ]; then - cat >>"${config_file}" <<-EOF + if [ -n "${DOCKER_REGISTRY_MIRROR:-}" ]; then + cat >>"${config_file}" <<-EOF - |- [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] endpoint = ["${DOCKER_REGISTRY_MIRROR}"] EOF - fi + fi - if [ -n "${ENABLE_REGISTRY:-}" ]; then - cat >>"${config_file}" <<-EOF + cat >>"${config_file}" <<-EOF - |- [plugins."io.containerd.grpc.v1.cri".registry.mirrors."${registry_name}:5000"] endpoint = ["http://${registry_name}:5000"] EOF - fi - fi + # Create the cluster kind create cluster --name "${cluster_name}" --image "kindest/node:${k8s_version}" --config "${config_file}" - if [ -n "${ENABLE_REGISTRY:-}" ]; then - docker network connect "kind" "${registry_name}" &>/dev/null || true - fi + docker network connect "kind" "${registry_name}" &>/dev/null || true # Workaround for https://kind.sigs.k8s.io/docs/user/known-issues/#pod-errors-due-to-too-many-open-files for node in $(kind get nodes --name "${cluster_name}"); do @@ -237,10 +231,6 @@ destroy_kind() { docker network rm "kind" &>/dev/null || true } -check_registry_kind() { - [ -n "$(check_registry "kind")" ] -} - ## ## GENERIC ROUTINES ## @@ -259,8 +249,6 @@ install_kubectl() { # to have an easy way to refresh the operator version that is running # on the temporary cluster. ensure_registry() { - [ -z "${ENABLE_REGISTRY:-}" ] && return - if ! docker volume inspect "${registry_volume}" &>/dev/null; then docker volume create "${registry_volume}" fi @@ -274,24 +262,13 @@ ensure_registry() { fi } -check_registry() { - local network=$1 - docker network inspect "${network}" | \ - jq -r ".[].Containers | .[] | select(.Name==\"${registry_name}\") | .Name" -} - # An existing builder will not have any knowledge of the local registry or the # any host outside the builder, but when having the builder inside Kubernetes # this is fixed since we already solved the issue of the kubernetes cluster reaching # out the local registry. The following functions will handle that builder create_builder() { docker buildx rm "${builder_name}" &>/dev/null || true - # If ENABLE_REGISTRY is not set, we don't need to define driver-opt network - if [ -n "${ENABLE_REGISTRY:-}" ]; then - docker buildx create --name "${builder_name}" --driver-opt "network=${registry_net}" - else - docker buildx create --name "${builder_name}" - fi + docker buildx create --name "${builder_name}" --driver-opt "network=${registry_net}" } deploy_fluentd() { @@ -442,11 +419,7 @@ load_image_registry() { load_image() { local cluster_name=$1 local image=$2 - if [ -z "${ENABLE_REGISTRY:-}" ]; then - "load_image_kind" "${cluster_name}" "${image}" - else - load_image_registry "${image}" - fi + load_image_registry "${image}" } deploy_operator() { @@ -461,7 +434,7 @@ Usage: $0 [-k ] [-r] Commands: prepare Downloads the prerequisite into - create Create the test cluster + create Create the test cluster and a local registry load Build and load the operator image in the cluster load-helper-images Load the catalog of HELPER_IMGS into the local registry deploy Deploy the operator manifests in the cluster @@ -482,8 +455,6 @@ Options: Used only during "create" command. Default: 3 Env: NODES - -r|--registry Enable local registry. Env: ENABLE_REGISTRY - To use long options you need to have GNU enhanced getopt available, otherwise you can only use the short version of the options. EOF @@ -540,22 +511,15 @@ load() { # This code will NEVER run in the cloud CI/CD workflows, as there we do # the build and push (into GH test registry) once in `builds`, before # the strategy matrix blows up the number of executables - if [ -z "${ENABLE_REGISTRY}" ] && "check_registry_kind"; then - ENABLE_REGISTRY=true - fi create_builder echo "${bright}Building operator from current worktree${reset}" - CONTROLLER_IMG="$(ENABLE_REGISTRY="${ENABLE_REGISTRY}" print_image)" + CONTROLLER_IMG="$(print_image)" make -C "${ROOT_DIR}" CONTROLLER_IMG="${CONTROLLER_IMG}" insecure="true" \ ARCH="${ARCH}" BUILDER_NAME=${builder_name} docker-build - if [ -z "${ENABLE_REGISTRY:-}" ]; then - "load_image_kind" "${CLUSTER_NAME}" "${CONTROLLER_IMG}" - fi - echo "${bright}Loading new operator image on cluster ${CLUSTER_NAME}${reset}" echo "${bright}Done loading new operator image on cluster ${CLUSTER_NAME}${reset}" @@ -574,10 +538,6 @@ load() { make -C "${ROOT_DIR}" CONTROLLER_IMG="${PRIME_CONTROLLER_IMG}" VERSION="${PRIME_VERSION}" insecure="true" \ ARCH="${ARCH}" BUILDER_NAME="${builder_name}" docker-build - if [ -z "${ENABLE_REGISTRY:-}" ]; then - "load_image_kind" "${CLUSTER_NAME}" "${CONTROLLER_IMG}" - fi - echo "${bright}Done loading new 'prime' operator image on cluster ${CLUSTER_NAME}${reset}" fi @@ -585,11 +545,7 @@ load() { } deploy() { - if [ -z "${ENABLE_REGISTRY}" ] && "check_registry_kind"; then - ENABLE_REGISTRY=true - fi - - CONTROLLER_IMG="$(ENABLE_REGISTRY="${ENABLE_REGISTRY}" print_image)" + CONTROLLER_IMG="$(print_image)" echo "${bright}Deploying manifests from current worktree on cluster ${CLUSTER_NAME}${reset}" @@ -599,11 +555,7 @@ deploy() { } print_image() { - local tag=devel - if [ -n "${ENABLE_REGISTRY:-}" ] || "check_registry_kind"; then - tag=latest - fi - echo "${registry_name}:5000/cloudnative-pg-testing:${tag}" + echo "${registry_name}:5000/cloudnative-pg-testing:latest" } export_logs() { @@ -669,7 +621,7 @@ main() { ;; -r | --registry) shift - ENABLE_REGISTRY=true + # no-op, kept for compatibility ;; --) shift From 7e6af25bf9cd4f6045643dd37ca7599724b7b58a Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Wed, 5 Mar 2025 15:38:18 +0100 Subject: [PATCH 428/836] chore(setup-cluster.sh): remove `prepare` command (#7055) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The `hack/setup-cluster.sh` prepare subcommand was once essential for E2E environment setup. Now, this is fully handled by the GitHub workflow, which automatically installs the right versions of kind and kubectl. Closes #7050 Signed-off-by: Armando Ruocco Signed-off-by: Marco Nenciarini Signed-off-by: Niccolò Fei Co-authored-by: Marco Nenciarini Co-authored-by: Niccolò Fei --- .github/workflows/continuous-delivery.yml | 17 +++------ hack/setup-cluster.sh | 46 +---------------------- 2 files changed, 6 insertions(+), 57 deletions(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index d11129650e..f296d9fe6b 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -654,19 +654,12 @@ jobs: username: ${{ env.REGISTRY_USER }} password: ${{ env.REGISTRY_PASSWORD }} - - # 'Retry' preparing the E2E test ENV - name: Prepare the environment - uses: nick-fields/retry@v3 + name: Install Kind + uses: helm/kind-action@v1.12.0 with: - timeout_seconds: 300 - max_attempts: 3 - on_retry_command: | - # Clear-ups before retries - sudo rm -rf /usr/local/bin/kind /usr/local/bin/kubectl - command: | - sudo apt-get update - sudo apt-get install -y gettext-base - sudo hack/setup-cluster.sh prepare /usr/local/bin + install_only: true + version: ${{ env.KIND_VERSION }} + kubectl_version: ${{ env.K8S_VERSION }} - name: Prepare patch for customization env: diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh index 1cc7900a21..3d739065d6 100755 --- a/hack/setup-cluster.sh +++ b/hack/setup-cluster.sh @@ -54,8 +54,7 @@ TEMP_DIR="$(mktemp -d)" LOG_DIR=${LOG_DIR:-$ROOT_DIR/_logs/} trap 'rm -fr ${TEMP_DIR}' EXIT -# Operating System and Architecture -OS=$(uname | tr '[:upper:]' '[:lower:]') +# Architecture ARCH=$(uname -m) case $ARCH in x86_64) ARCH="amd64" ;; @@ -100,21 +99,6 @@ fi ## KIND SUPPORT ## -install_kind() { - local bindir=$1 - local binary="${bindir}/kind" - local version - - # Get the latest release of kind unless specified in the environment - version=${KIND_VERSION:-$( - curl -s -LH "Accept:application/json" https://github.com/kubernetes-sigs/kind/releases/latest | - sed 's/.*"tag_name":"\([^"]\+\)".*/\1/' - )} - - curl -s -L "https://kind.sigs.k8s.io/dl/${version}/kind-${OS}-${ARCH}" -o "${binary}" - chmod +x "${binary}" -} - load_image_kind() { local cluster_name=$1 local image=$2 @@ -235,15 +219,6 @@ destroy_kind() { ## GENERIC ROUTINES ## -install_kubectl() { - local bindir=$1 - - local binary="${bindir}/kubectl" - - curl -sL "https://dl.k8s.io/release/v${KUBECTL_VERSION#v}/bin/${OS}/${ARCH}/kubectl" -o "${binary}" - chmod +x "${binary}" -} - # The following function makes sure we already have a Docker container # with a bound volume to act as local registry. This is really needed # to have an easy way to refresh the operator version that is running @@ -433,7 +408,6 @@ usage() { Usage: $0 [-k ] [-r] Commands: - prepare Downloads the prerequisite into create Create the test cluster and a local registry load Build and load the operator image in the cluster load-helper-images Load the catalog of HELPER_IMGS into the local registry @@ -465,14 +439,6 @@ EOF ## COMMANDS ## -prepare() { - local bindir=$1 - echo "${bright}Installing cluster prerequisites in ${bindir}${reset}" - install_kubectl "${bindir}" - "install_kind" "${bindir}" - echo "${bright}Done installing cluster prerequisites in ${bindir}${reset}" -} - create() { echo "${bright}Creating kind cluster ${CLUSTER_NAME} with version ${K8S_VERSION}${reset}" @@ -652,16 +618,6 @@ main() { # Invoke the command case "$command" in - prepare) - if [ "$#" -eq 0 ]; then - echo "ERROR: prepare requires a destination directory" >&2 - echo >&2 - usage - fi - dest_dir=$1 - shift - prepare "${dest_dir}" - ;; create | load | load-helper-images | deploy | print-image | export-logs | destroy | pyroscope) ensure_registry From f5b85b9e655d0c193c8fb1a9d6c3e5a3347be9fc Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Thu, 6 Mar 2025 11:18:24 +0100 Subject: [PATCH 429/836] ci(deps): remove 4.13 from the list of version for OpenShift (#7078) Stop testing OpenShift 13, as it is no longer supported by Red Hat. Signed-off-by: Jonathan Gonzalez V. --- .github/openshift_versions.json | 1 - .github/workflows/k8s-versions-check.yml | 4 +++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/openshift_versions.json b/.github/openshift_versions.json index 49cf2ac65d..e41221f17a 100644 --- a/.github/openshift_versions.json +++ b/.github/openshift_versions.json @@ -4,6 +4,5 @@ "4.16", "4.15", "4.14", - "4.13", "4.12" ] diff --git a/.github/workflows/k8s-versions-check.yml b/.github/workflows/k8s-versions-check.yml index d966d8e2c1..43da7705b6 100644 --- a/.github/workflows/k8s-versions-check.yml +++ b/.github/workflows/k8s-versions-check.yml @@ -109,8 +109,10 @@ jobs: - name: Get updated OpenShift versions run: | + # We limit the range starting on 4 to 9 to skip the 13 version + # this needs to be updated when the 15 version is also EOL curl -s https://mirror.openshift.com/pub/openshift-v4/clients/ocp/ | \ - grep -e 'href.*"4\.1[2-9]\.[0-9].*"' | \ + grep -e 'href.*"4\.1[24-9]\.[0-9].*"' | \ sed -e 's/\(.*\)href="\(4\.1[2-9]\)\(.*\)/\2/' | \ sort -Vru | \ awk -vv="$MINIMAL_OCP" '$0>=v {print $0}' | \ From 868c0389133fd428015b2776229bd34b08da28e8 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Thu, 6 Mar 2025 11:28:55 +0100 Subject: [PATCH 430/836] fix: reload plugin certificates when secret changes (#7029) This patch uses the existing operator watch infrastructure to reload the plugin TLS secrets when they change. Fixes #7024 Signed-off-by: Leonardo Cecchi Signed-off-by: Armando Ruocco Signed-off-by: Marco Nenciarini Co-authored-by: Armando Ruocco Co-authored-by: Marco Nenciarini --- internal/cmd/manager/controller/controller.go | 4 +- internal/cnpi/plugin/repository/setup.go | 128 +++++++++++------- internal/cnpi/plugin/repository/setup_test.go | 62 +++++++++ internal/cnpi/plugin/repository/suite_test.go | 119 ++++++++++++++++ internal/controller/plugin_controller.go | 86 ++++++++---- internal/controller/plugin_predicates.go | 13 +- 6 files changed, 333 insertions(+), 79 deletions(-) create mode 100644 internal/cnpi/plugin/repository/setup_test.go create mode 100644 internal/cnpi/plugin/repository/suite_test.go diff --git a/internal/cmd/manager/controller/controller.go b/internal/cmd/manager/controller/controller.go index 01ccec5111..93d8315c97 100644 --- a/internal/cmd/manager/controller/controller.go +++ b/internal/cmd/manager/controller/controller.go @@ -236,8 +236,8 @@ func RunController( return err } - if err = controller.NewPluginReconciler(mgr, pluginRepository). - SetupWithManager(mgr, configuration.Current.OperatorNamespace, maxConcurrentReconciles); err != nil { + if err = controller.NewPluginReconciler(mgr, conf.OperatorNamespace, pluginRepository). + SetupWithManager(mgr, maxConcurrentReconciles); err != nil { setupLog.Error(err, "unable to create controller", "controller", "Plugin") return err } diff --git a/internal/cnpi/plugin/repository/setup.go b/internal/cnpi/plugin/repository/setup.go index ca06824075..b47db04283 100644 --- a/internal/cnpi/plugin/repository/setup.go +++ b/internal/cnpi/plugin/repository/setup.go @@ -34,11 +34,12 @@ import ( type Interface interface { // ForgetPlugin closes every connection to the plugin with the passed name // and forgets its discovery info. - // If the plug in was not available in the repository, this is a no-op + // This operation is synchronous and blocks until every connection is closed. + // If the plugin was not available in the repository, this is a no-op. ForgetPlugin(name string) // RegisterRemotePlugin registers a plugin available on a remote - // TCP entrypoint + // TCP entrypoint. RegisterRemotePlugin(name string, address string, tlsConfig *tls.Config) error // RegisterUnixSocketPluginsInPath scans the passed directory @@ -60,71 +61,84 @@ type data struct { pluginConnectionPool map[string]*puddle.Pool[connection.Interface] } +// pluginSetupOptions are the options to be used when setting up +// a plugin connection +type pluginSetupOptions struct { + // forceRegistration forces the creation of a new plugin connection + // even if one already exists. The existing connection will be closed. + forceRegistration bool +} + // maxPoolSize is the maximum number of connections in a plugin's connection // pool const maxPoolSize = 5 -func (r *data) setPluginProtocol(name string, protocol connection.Protocol) error { - r.mux.Lock() - defer r.mux.Unlock() - - if r.pluginConnectionPool == nil { - r.pluginConnectionPool = make(map[string]*puddle.Pool[connection.Interface]) - } - - _, ok := r.pluginConnectionPool[name] - if ok { - return &ErrPluginAlreadyRegistered{ - Name: name, - } - } - - constructor := func(ctx context.Context) (res connection.Interface, err error) { - var handler connection.Handler - - defer func() { - if err != nil && handler != nil { - _ = handler.Close() - } - }() - - constructorLogger := log. +func pluginConnectionConstructor(name string, protocol connection.Protocol) puddle.Constructor[connection.Interface] { + return func(ctx context.Context) (connection.Interface, error) { + logger := log. FromContext(ctx). WithName("setPluginProtocol"). WithValues("pluginName", name) - ctx = log.IntoContext(ctx, constructorLogger) + ctx = log.IntoContext(ctx, logger) - constructorLogger.Trace("Acquired physical plugin connection") + logger.Trace("Connecting to plugin") + var ( + result connection.Interface + handler connection.Handler + err error + ) if handler, err = protocol.Dial(ctx); err != nil { - constructorLogger.Error(err, "Got error while connecting to plugin") + logger.Error(err, "Error while connecting to plugin (physical)") + return nil, err + } + + if result, err = connection.LoadPlugin(ctx, handler); err != nil { + logger.Error(err, "Error while connecting to plugin (logical)") + _ = handler.Close() return nil, err } - return connection.LoadPlugin(ctx, handler) + return result, err } +} - destructor := func(res connection.Interface) { - constructorLogger := log. - FromContext(context.Background()). - WithName("setPluginProtocol"). - WithValues("pluginName", name) - constructorLogger.Trace("Released physical plugin connection") - - err := res.Close() - if err != nil { - destructorLogger := log.FromContext(context.Background()). - WithName("setPluginProtocol"). - WithValues("pluginName", res.Name()) - destructorLogger.Warning("Error while closing plugin connection", "err", err) +func pluginConnectionDestructor(res connection.Interface) { + logger := log.FromContext(context.Background()). + WithName("pluginConnectionDestructor"). + WithValues("pluginName", res.Name()) + + logger.Trace("Released physical plugin connection") + + err := res.Close() + if err != nil { + logger.Warning("Error while closing plugin connection", "err", err) + } +} + +func (r *data) setPluginProtocol(name string, protocol connection.Protocol, opts pluginSetupOptions) error { + r.mux.Lock() + defer r.mux.Unlock() + + if r.pluginConnectionPool == nil { + r.pluginConnectionPool = make(map[string]*puddle.Pool[connection.Interface]) + } + + if oldPool, alreadyRegistered := r.pluginConnectionPool[name]; alreadyRegistered { + if opts.forceRegistration { + oldPool.Close() + } else { + return &ErrPluginAlreadyRegistered{ + Name: name, + } } } var err error r.pluginConnectionPool[name], err = puddle.NewPool( &puddle.Config[connection.Interface]{ - Constructor: constructor, - Destructor: destructor, + Constructor: pluginConnectionConstructor(name, protocol), + Destructor: pluginConnectionDestructor, MaxSize: maxPoolSize, }, ) @@ -143,22 +157,34 @@ func (r *data) ForgetPlugin(name string) { return } - // TODO(leonardoce): should we really wait for all the plugin connections - // to be closed? pool.Close() + delete(r.pluginConnectionPool, name) } // registerUnixSocketPlugin registers a plugin available at the passed // unix socket path func (r *data) registerUnixSocketPlugin(name, path string) error { - return r.setPluginProtocol(name, connection.ProtocolUnix(path)) + return r.setPluginProtocol(name, connection.ProtocolUnix(path), pluginSetupOptions{ + // Forcing the registration of a Unix socket plugin has no meaning + // because they can be installed and started only when the Pod is created. + forceRegistration: false, + }) } func (r *data) RegisterRemotePlugin(name string, address string, tlsConfig *tls.Config) error { - return r.setPluginProtocol(name, &connection.ProtocolTCP{ + protocol := &connection.ProtocolTCP{ TLSConfig: tlsConfig, Address: address, - }) + } + + // The RegisterRemotePlugin function is called when the plugin is registered for + // the first time and when the certificates of an existing plugin get refreshed. + // In the second case, the plugin loading will be forced and all existing + // connections will be dropped and recreated. + opts := pluginSetupOptions{ + forceRegistration: true, + } + return r.setPluginProtocol(name, protocol, opts) } func (r *data) RegisterUnixSocketPluginsInPath(pluginsPath string) ([]string, error) { diff --git a/internal/cnpi/plugin/repository/setup_test.go b/internal/cnpi/plugin/repository/setup_test.go new file mode 100644 index 0000000000..d3030a5754 --- /dev/null +++ b/internal/cnpi/plugin/repository/setup_test.go @@ -0,0 +1,62 @@ +package repository + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Set Plugin Protocol", func() { + var repository *data + + BeforeEach(func() { + repository = &data{} + }) + + It("creates connection pool for new plugin", func() { + err := repository.setPluginProtocol("plugin1", newUnitTestProtocol("test"), pluginSetupOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(repository.pluginConnectionPool).To(HaveKey("plugin1")) + }) + + It("fails when adding same plugin name without forceRegistration", func() { + err := repository.setPluginProtocol("plugin1", newUnitTestProtocol("/tmp/socket"), pluginSetupOptions{}) + Expect(err).NotTo(HaveOccurred()) + + err = repository.setPluginProtocol("plugin1", newUnitTestProtocol("/tmp/socket2"), pluginSetupOptions{}) + Expect(err).To(BeEquivalentTo(&ErrPluginAlreadyRegistered{Name: "plugin1"})) + }) + + It("overwrites existing plugin when forceRegistration is true", func() { + first := newUnitTestProtocol("/tmp/socket") + err := repository.setPluginProtocol("plugin1", first, pluginSetupOptions{}) + Expect(err).NotTo(HaveOccurred()) + pool1 := repository.pluginConnectionPool["plugin1"] + + ctx1, cancel := context.WithCancel(context.Background()) + conn1, err := pool1.Acquire(ctx1) + Expect(err).NotTo(HaveOccurred()) + Expect(conn1).NotTo(BeNil()) + cancel() + conn1.Release() + + second := newUnitTestProtocol("/tmp/socket2") + err = repository.setPluginProtocol("plugin1", second, pluginSetupOptions{forceRegistration: true}) + Expect(err).NotTo(HaveOccurred()) + pool2 := repository.pluginConnectionPool["plugin1"] + + ctx2, cancel := context.WithCancel(context.Background()) + conn2, err := pool2.Acquire(ctx2) + Expect(err).NotTo(HaveOccurred()) + Expect(conn2).NotTo(BeNil()) + cancel() + conn2.Release() + + Expect(pool1).NotTo(Equal(pool2)) + Expect(first.mockHandlers).To(HaveLen(1)) + Expect(first.mockHandlers[0].closed).To(BeTrue()) + Expect(second.mockHandlers).To(HaveLen(1)) + Expect(second.mockHandlers[0].closed).To(BeFalse()) + }) +}) diff --git a/internal/cnpi/plugin/repository/suite_test.go b/internal/cnpi/plugin/repository/suite_test.go new file mode 100644 index 0000000000..4421aecdda --- /dev/null +++ b/internal/cnpi/plugin/repository/suite_test.go @@ -0,0 +1,119 @@ +package repository + +import ( + "context" + "net" + "testing" + + "github.com/cloudnative-pg/cnpg-i/pkg/identity" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/test/bufconn" + + "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/connection" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestRepository(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Repository Suite") +} + +type identityImplementation struct { + identity.UnimplementedIdentityServer +} + +// GetPluginMetadata implements Identity +func (i identityImplementation) GetPluginMetadata( + _ context.Context, + _ *identity.GetPluginMetadataRequest, +) (*identity.GetPluginMetadataResponse, error) { + return &identity.GetPluginMetadataResponse{ + Name: "testing-service", + Version: "0.0.1", + DisplayName: "testing-service", + ProjectUrl: "https://github.com/cloudnative-pg/cloudnative-pg", + RepositoryUrl: "https://github.com/cloudnative-pg/cloudnative-pg", + License: "APACHE 2.0", + Maturity: "alpha", + }, nil +} + +// GetPluginCapabilities implements identity +func (i identityImplementation) GetPluginCapabilities( + _ context.Context, + _ *identity.GetPluginCapabilitiesRequest, +) (*identity.GetPluginCapabilitiesResponse, error) { + return &identity.GetPluginCapabilitiesResponse{ + Capabilities: []*identity.PluginCapability{}, + }, nil +} + +// Probe implements Identity +func (i identityImplementation) Probe( + _ context.Context, + _ *identity.ProbeRequest, +) (*identity.ProbeResponse, error) { + return &identity.ProbeResponse{ + Ready: true, + }, nil +} + +type unitTestProtocol struct { + name string + mockHandlers []*mockHandler + server *grpc.Server +} + +type mockHandler struct { + *grpc.ClientConn + closed bool +} + +func newUnitTestProtocol(name string) *unitTestProtocol { + return &unitTestProtocol{name: name} +} + +func (h *mockHandler) Close() error { + _ = h.ClientConn.Close() + h.closed = true + return nil +} + +func (p *unitTestProtocol) Dial(ctx context.Context) (connection.Handler, error) { + listener := bufconn.Listen(1024 * 1024) + + if len(p.mockHandlers) == 0 { + p.server = grpc.NewServer() + + identity.RegisterIdentityServer(p.server, &identityImplementation{}) + + go func() { + <-ctx.Done() + p.server.Stop() + }() + + go func() { + _ = p.server.Serve(listener) + }() + } + + dialer := func(_ context.Context, _ string) (net.Conn, error) { + return listener.Dial() + } + + conn, err := grpc.NewClient("passthrough://bufnet", + grpc.WithContextDialer(dialer), + grpc.WithTransportCredentials(insecure.NewCredentials()), + ) + if err != nil { + return nil, err + } + mh := &mockHandler{ + ClientConn: conn, + } + p.mockHandlers = append(p.mockHandlers, mh) + return mh, nil +} diff --git a/internal/controller/plugin_controller.go b/internal/controller/plugin_controller.go index 0e393f1525..83efdef4eb 100644 --- a/internal/controller/plugin_controller.go +++ b/internal/controller/plugin_controller.go @@ -32,9 +32,9 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" @@ -46,14 +46,21 @@ type PluginReconciler struct { Scheme *runtime.Scheme Plugins repository.Interface + + OperatorNamespace string } // NewPluginReconciler creates a new PluginReconciler initializing it -func NewPluginReconciler(mgr manager.Manager, plugins repository.Interface) *PluginReconciler { +func NewPluginReconciler( + mgr manager.Manager, + operatorNamespace string, + plugins repository.Interface, +) *PluginReconciler { return &PluginReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Plugins: plugins, + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Plugins: plugins, + OperatorNamespace: operatorNamespace, } } @@ -61,9 +68,9 @@ func NewPluginReconciler(mgr manager.Manager, plugins repository.Interface) *Plu func (r *PluginReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { contextLogger, ctx := log.SetupLogger(ctx) - contextLogger.Debug("Plugin reconciliation loop start") + contextLogger.Trace("Plugin reconciliation loop start") defer func() { - contextLogger.Debug("Plugin reconciliation loop end") + contextLogger.Trace("Plugin reconciliation loop end") }() var service corev1.Service @@ -80,6 +87,11 @@ func (r *PluginReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr return ctrl.Result{}, fmt.Errorf("cannot get the resource: %w", err) } + if !isPluginService(&service, r.OperatorNamespace) { + contextLogger.Trace("Skipping reconciliation for a non-cnpg-i service") + return ctrl.Result{}, nil + } + // Process label and annotations pluginName := service.Labels[utils.PluginNameLabelName] if len(pluginName) == 0 { @@ -207,31 +219,55 @@ func (r *PluginReconciler) getSecret( return &secret, nil } +func (r *PluginReconciler) mapSecretToPlugin(ctx context.Context, obj client.Object) []reconcile.Request { + // We only consider the secrets that are installed in the + // operator namespace because plugins need to be deployed + // in the same namespace as the operator. + if obj.GetNamespace() != r.OperatorNamespace { + return nil + } + + logger := log.FromContext(ctx) + + var services corev1.ServiceList + if err := r.Client.List( + ctx, + &services, + client.HasLabels{utils.PluginNameLabelName}, + client.InNamespace(r.OperatorNamespace), + ); err != nil { + logger.Error( + err, + "Error while listing CNPG-I services in the operator namespace", + ) + return nil + } + + var result []reconcile.Request + for i := range services.Items { + service := &services.Items[i] + if isSecretUsedByPluginService(service, obj.GetName()) { + result = append(result, reconcile.Request{ + NamespacedName: client.ObjectKeyFromObject(service), + }) + } + } + + return result +} + // SetupWithManager adds this PluginReconciler to the passed controller manager func (r *PluginReconciler) SetupWithManager( mgr ctrl.Manager, - operatorNamespace string, maxConcurrentReconciles int, ) error { - pluginServicesPredicate := predicate.Funcs{ - CreateFunc: func(e event.CreateEvent) bool { - return isPluginService(e.Object, operatorNamespace) - }, - DeleteFunc: func(e event.DeleteEvent) bool { - return isPluginService(e.Object, operatorNamespace) - }, - GenericFunc: func(e event.GenericEvent) bool { - return isPluginService(e.Object, operatorNamespace) - }, - UpdateFunc: func(e event.UpdateEvent) bool { - return isPluginService(e.ObjectNew, operatorNamespace) - }, - } - return ctrl.NewControllerManagedBy(mgr). WithOptions(controller.Options{MaxConcurrentReconciles: maxConcurrentReconciles}). For(&corev1.Service{}). Named("plugin"). - WithEventFilter(pluginServicesPredicate). + Watches( + &corev1.Secret{}, + handler.EnqueueRequestsFromMapFunc(r.mapSecretToPlugin), + ). Complete(r) } diff --git a/internal/controller/plugin_predicates.go b/internal/controller/plugin_predicates.go index 7f6e381a0d..119eb2d2d3 100644 --- a/internal/controller/plugin_predicates.go +++ b/internal/controller/plugin_predicates.go @@ -22,7 +22,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) -var isPluginService = func(object client.Object, operatorNamespace string) bool { +func isPluginService(object client.Object, operatorNamespace string) bool { if object.GetNamespace() != operatorNamespace { // Only consider the services that are in the same // namespace where the operator is installed @@ -50,3 +50,14 @@ var isPluginService = func(object client.Object, operatorNamespace string) bool return true } + +// isSecretUsedByPluginService returns true when the passed service +// uses the secret with the passed name +func isSecretUsedByPluginService(service client.Object, secretName string) bool { + annotations := service.GetAnnotations() + + clientSecretName := annotations[utils.PluginClientSecretAnnotationName] + serverSecretName := annotations[utils.PluginServerSecretAnnotationName] + + return clientSecretName == secretName || serverSecretName == secretName +} From 52393d62c57605dfb84dc2972a9dc78b8ff02890 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 6 Mar 2025 14:12:02 +0100 Subject: [PATCH 431/836] chore(deps): update operator framework (main) (#7001) This PR contains the following updates: https://github.com/operator-framework/operator-registry `v1.50.0` -> `v1.51.0` https://github.com/redhat-openshift-ecosystem/openshift-preflight `1.12.0` -> `1.12.1` --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index c379f1aa9f..d6ccdb2dc0 100644 --- a/Makefile +++ b/Makefile @@ -48,8 +48,8 @@ GORELEASER_VERSION ?= v2.7.0 SPELLCHECK_VERSION ?= 0.47.0 WOKE_VERSION ?= 0.19.0 OPERATOR_SDK_VERSION ?= v1.39.1 -OPM_VERSION ?= v1.50.0 -PREFLIGHT_VERSION ?= 1.12.0 +OPM_VERSION ?= v1.51.0 +PREFLIGHT_VERSION ?= 1.12.1 OPENSHIFT_VERSIONS ?= v4.12-v4.18 ARCH ?= amd64 From 8669af54f02f43446df264080f0ce3337e5e627d Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Thu, 6 Mar 2025 16:58:25 +0100 Subject: [PATCH 432/836] fix: delay rollout until every Pods received the same configuration (#6991) This patch delays the rollout of Pods to the point where every instance manager received the latest PostgreSQL configuration and applied it. This prevents the operator from switching over two times to apply the same configuration when the primary received the configuration and, immediately after that, a reconciliation loop has been triggered on the cluster. Fixes #6956 Signed-off-by: Leonardo Cecchi Signed-off-by: Armando Ruocco Signed-off-by: Jaime Silvela Signed-off-by: Marco Nenciarini Co-authored-by: Armando Ruocco Co-authored-by: Jaime Silvela Co-authored-by: Marco Nenciarini --- internal/controller/cluster_controller.go | 19 ++++++- pkg/management/postgres/probes.go | 16 ++++++ pkg/postgres/status.go | 55 +++++++++++++++++- pkg/postgres/status_test.go | 69 +++++++++++++++++++++++ 4 files changed, 156 insertions(+), 3 deletions(-) diff --git a/internal/controller/cluster_controller.go b/internal/controller/cluster_controller.go index 1c3f0c4d68..8149bd1d24 100644 --- a/internal/controller/cluster_controller.go +++ b/internal/controller/cluster_controller.go @@ -860,7 +860,8 @@ func (r *ClusterReconciler) processUnschedulableInstances( func (r *ClusterReconciler) reconcilePods( ctx context.Context, cluster *apiv1.Cluster, - resources *managedResources, instancesStatus postgres.PostgresqlStatusList, + resources *managedResources, + instancesStatus postgres.PostgresqlStatusList, ) (ctrl.Result, error) { contextLogger := log.FromContext(ctx) @@ -917,7 +918,7 @@ func (r *ClusterReconciler) reconcilePods( } } - // Stop acting here if there are non-ready Pods + // Requeue here if there are non-ready Pods. // In the rest of the function we are sure that // cluster.Status.Instances == cluster.Spec.Instances and // we don't need to modify the cluster topology @@ -928,6 +929,20 @@ func (r *ClusterReconciler) reconcilePods( return ctrl.Result{RequeueAfter: 1 * time.Second}, ErrNextLoop } + report := instancesStatus.GetConfigurationReport() + + // If any pod is not reporting its configuration (i.e., uniform == nil), + // proceed with a rolling update to upgrade the instance manager + // to a version that reports the configuration status. + // If all pods report their configuration, wait until all instances + // report the same configuration. + if uniform := report.IsUniform(); uniform != nil && !*uniform { + contextLogger.Debug( + "Waiting for all Pods to have the same PostgreSQL configuration", + "configurationReport", report) + return ctrl.Result{RequeueAfter: 1 * time.Second}, ErrNextLoop + } + return r.handleRollingUpdate(ctx, cluster, instancesStatus) } diff --git a/pkg/management/postgres/probes.go b/pkg/management/postgres/probes.go index e087e1111e..1b62e1819b 100644 --- a/pkg/management/postgres/probes.go +++ b/pkg/management/postgres/probes.go @@ -86,6 +86,22 @@ func (instance *Instance) GetStatus() (result *postgres.PostgresqlStatus, err er return result, err } + // Get the latest configuration hash from the PostgreSQL settings + rowConfigHash := superUserDB.QueryRow( + "SELECT setting FROM pg_catalog.pg_show_all_file_settings() WHERE name = $1", + postgres.CNPGConfigSha256) + if err := rowConfigHash.Scan(&result.LoadedConfigurationHash); err != nil { + if errors.Is(err, sql.ErrNoRows) { + // The applied configuration doesn't contain a CNPGConfigSha256 so probably it is not + // generated by CloudNativePG. This can occur if PostgreSQL starts with an old + // configuration before it is updated by the instance manager. This is not an issue as + // the correct configuration will be written soon. + result.LoadedConfigurationHash = "" + } else { + return result, err + } + } + row := superUserDB.QueryRow( `SELECT (pg_catalog.pg_control_system()).system_identifier, diff --git a/pkg/postgres/status.go b/pkg/postgres/status.go index d3d731168a..3afafcdb6b 100644 --- a/pkg/postgres/status.go +++ b/pkg/postgres/status.go @@ -21,8 +21,10 @@ import ( "fmt" "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/stringset" "github.com/cloudnative-pg/machinery/pkg/types" corev1 "k8s.io/api/core/v1" + "k8s.io/utils/ptr" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) @@ -46,8 +48,10 @@ type PostgresqlStatus struct { // populated when MightBeUnavailable reported a healthy status even if it found an error MightBeUnavailableMaskedError string `json:"mightBeUnavailableMaskedError,omitempty"` - // Archiver status + // Hash of the current PostgreSQL configuration + LoadedConfigurationHash string `json:"loadedConfigurationHash,omitempty"` + // Archiver status LastArchivedWAL string `json:"lastArchivedWAL,omitempty"` LastArchivedWALTime string `json:"lastArchivedWALTime,omitempty"` LastFailedWAL string `json:"lastFailedWAL,omitempty"` @@ -419,3 +423,52 @@ func (list PostgresqlStatusList) PrimaryNames() []string { return result } + +// GetConfigurationReport generates a report on the PostgreSQL configuration +// status of each Pod in the list. +func (list PostgresqlStatusList) GetConfigurationReport() ConfigurationReport { + result := make([]ConfigurationReportEntry, len(list.Items)) + for i := range list.Items { + result[i].PodName = list.Items[i].Pod.Name + result[i].ConfigHash = list.Items[i].LoadedConfigurationHash + } + + return result +} + +// ConfigurationReportEntry contains information about the current +// PostgreSQL configuration of a Pod. +type ConfigurationReportEntry struct { + // PodName is the name of the Pod. + PodName string `json:"podName"` + + // ConfigHash is the hash of the currently loaded configuration or empty + // if the instance manager didn't report it. + ConfigHash string `json:"configHash"` +} + +// ConfigurationReport contains information about the current +// PostgreSQL configuration of each Pod. +type ConfigurationReport []ConfigurationReportEntry + +// IsUniform checks if every Pod has loaded the same PostgreSQL +// configuration. Returns: +// +// - true if every Pod reports the configuration, and the same +// configuration is used across all Pods. +// - false if every Pod reports the configuration and there +// are two Pods using different configurations. +// - nil if any Pod doesn't report the configuration. +func (report ConfigurationReport) IsUniform() *bool { + detectedConfigurationHash := stringset.New() + for _, item := range report { + if item.ConfigHash == "" { + // a Pod that isn't reporting its configuration, + // and we can't tell whether the configurations are uniform or not. + return nil + } + detectedConfigurationHash.Put(item.ConfigHash) + } + + return ptr.To(detectedConfigurationHash.Len() == 1) +} diff --git a/pkg/postgres/status_test.go b/pkg/postgres/status_test.go index f0390670b7..d3a06f762d 100644 --- a/pkg/postgres/status_test.go +++ b/pkg/postgres/status_test.go @@ -24,6 +24,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -234,3 +235,71 @@ var _ = Describe("PostgreSQL status real", func() { }) }) }) + +var _ = Describe("Configuration report", func() { + DescribeTable( + "Configuration report", + func(report ConfigurationReport, result *bool) { + if result == nil { + Expect(report.IsUniform()).To(BeNil()) + } + Expect(report.IsUniform()).To(Equal(result)) + }, + Entry( + "with older and newer instance managers at the same time", + ConfigurationReport{ + { + PodName: "cluster-example-1", + ConfigHash: "", + }, + { + PodName: "cluster-example-2", + ConfigHash: "abc", + }, + }, + nil, + ), + Entry( + "with old instance managers", + ConfigurationReport{ + { + PodName: "cluster-example-1", + ConfigHash: "", + }, + { + PodName: "cluster-example-2", + ConfigHash: "", + }, + }, + nil, + ), + Entry( + "with instance managers that are reporting different configurations", + ConfigurationReport{ + { + PodName: "cluster-example-1", + ConfigHash: "abc", + }, + { + PodName: "cluster-example-2", + ConfigHash: "def", + }, + }, + ptr.To(false), + ), + Entry( + "with instance manager that are reporting the same configuration", + ConfigurationReport{ + { + PodName: "cluster-example-1", + ConfigHash: "abc", + }, + { + PodName: "cluster-example-2", + ConfigHash: "abc", + }, + }, + ptr.To(true), + ), + ) +}) From 4ad57e10ba8d1c3ade52658381e0f56293111ca6 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Thu, 6 Mar 2025 17:40:41 +0100 Subject: [PATCH 433/836] chore(deps): update Go version in go.mod (#7082) Update Go version from 1.23.5 to Go 1.24.1 Signed-off-by: Jonathan Gonzalez V. --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 2056e6d258..f176a47732 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/cloudnative-pg/cloudnative-pg -go 1.23.5 +go 1.24.1 require ( github.com/DATA-DOG/go-sqlmock v1.5.2 From fa2cbd2c42387e010c86f935b73f4c8462ce45e0 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 7 Mar 2025 10:38:37 +0100 Subject: [PATCH 434/836] chore(deps): update dependency golangci/golangci-lint to v1.64.6 (main) (#7084) --- .github/workflows/continuous-integration.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 55da8e56e8..4be5b7285c 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -17,7 +17,7 @@ on: # set up environment variables to be used across all the jobs env: GOLANG_VERSION: "1.24.x" - GOLANGCI_LINT_VERSION: "v1.64.5" + GOLANGCI_LINT_VERSION: "v1.64.6" KUBEBUILDER_VERSION: "2.3.1" KIND_VERSION: "v0.27.0" OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing" From 436ed422a8f15c762dfa50ddbbfa55c22bac7f75 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Fri, 7 Mar 2025 12:34:44 +0100 Subject: [PATCH 435/836] chore(webserver,remote): rename `ongoingRequest` parameter (#7072) This clarifies the name ambiguity, as other endpoints might access the parameter. Signed-off-by: Armando Ruocco --- pkg/management/postgres/webserver/remote.go | 22 ++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/pkg/management/postgres/webserver/remote.go b/pkg/management/postgres/webserver/remote.go index d0c26c702d..a3c43eb548 100644 --- a/pkg/management/postgres/webserver/remote.go +++ b/pkg/management/postgres/webserver/remote.go @@ -59,11 +59,11 @@ func IsRetryableError(err *Error) bool { } type remoteWebserverEndpoints struct { - typedClient client.Client - instance *postgres.Instance - currentBackup *backupConnection - readinessChecker *readiness.Data - ongoingRequest sync.Mutex + typedClient client.Client + instance *postgres.Instance + currentBackup *backupConnection + readinessChecker *readiness.Data + ongoingBackupRequest sync.Mutex } // StartBackupRequest the required data to execute the pg_start_backup @@ -156,8 +156,8 @@ func (ws *remoteWebserverEndpoints) cleanupStaleCollections(ctx context.Context) return } - ws.ongoingRequest.Lock() - defer ws.ongoingRequest.Unlock() + ws.ongoingBackupRequest.Lock() + defer ws.ongoingBackupRequest.Unlock() if bc.data.Phase == Completed || bc.data.BackupName == "" { return @@ -331,11 +331,11 @@ func (ws *remoteWebserverEndpoints) updateInstanceManager( // nolint: gocognit func (ws *remoteWebserverEndpoints) backup(w http.ResponseWriter, req *http.Request) { log.Trace("request method", "method", req.Method) - if !ws.ongoingRequest.TryLock() { + if !ws.ongoingBackupRequest.TryLock() { sendUnprocessableEntityJSONResponse(w, errCodeAnotherRequestInProgress, "") return } - defer ws.ongoingRequest.Unlock() + defer ws.ongoingBackupRequest.Unlock() switch req.Method { case http.MethodGet: @@ -390,7 +390,7 @@ func (ws *remoteWebserverEndpoints) backup(w http.ResponseWriter, req *http.Requ sendUnprocessableEntityJSONResponse(w, "CANNOT_INITIALIZE_CONNECTION", err.Error()) return } - go ws.currentBackup.startBackup(context.Background(), &ws.ongoingRequest) + go ws.currentBackup.startBackup(context.Background(), &ws.ongoingBackupRequest) res := Response[BackupResultData]{ Data: &ws.currentBackup.data, @@ -449,7 +449,7 @@ func (ws *remoteWebserverEndpoints) backup(w http.ResponseWriter, req *http.Requ ws.currentBackup.data.Phase = Closing - go ws.currentBackup.stopBackup(context.Background(), &ws.ongoingRequest) + go ws.currentBackup.stopBackup(context.Background(), &ws.ongoingBackupRequest) sendJSONResponseWithData(w, 200, res) return } From d1c63c6a72778a9bf4ebd5880433f6977d88a07d Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Fri, 7 Mar 2025 13:41:07 +0100 Subject: [PATCH 436/836] fix(plugin,backup): close plugin connection (#7095) The operator now correctly closes the plugin connection when initiating a backup using the plugin. Signed-off-by: Armando Ruocco --- pkg/management/postgres/webserver/plugin_backup.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/management/postgres/webserver/plugin_backup.go b/pkg/management/postgres/webserver/plugin_backup.go index 44cd2afeba..f91df3f8f7 100644 --- a/pkg/management/postgres/webserver/plugin_backup.go +++ b/pkg/management/postgres/webserver/plugin_backup.go @@ -116,6 +116,7 @@ func (b *PluginBackupCommand) invokeStart(ctx context.Context) { b.markBackupAsFailed(ctx, err) return } + defer cli.Close(ctx) // record the backup beginning contextLogger.Info("Plugin backup started") From 8e78db292073934b70ba00e684e57c6e0c4687f0 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Fri, 7 Mar 2025 14:05:30 +0100 Subject: [PATCH 437/836] fix(repository,plugin): ensure that `repository` is closed (#7096) This patch ensures that we only open the plugin repository when necessary, and that we close it when done. Signed-off-by: Armando Ruocco --- internal/cmd/manager/controller/controller.go | 1 + pkg/management/postgres/webserver/plugin_backup.go | 10 ---------- 2 files changed, 1 insertion(+), 10 deletions(-) diff --git a/internal/cmd/manager/controller/controller.go b/internal/cmd/manager/controller/controller.go index 93d8315c97..38d93a07df 100644 --- a/internal/cmd/manager/controller/controller.go +++ b/internal/cmd/manager/controller/controller.go @@ -217,6 +217,7 @@ func RunController( ); err != nil { setupLog.Error(err, "Unable to load sidecar CNPG-i plugins, skipping") } + defer pluginRepository.Close() if err = controller.NewClusterReconciler( mgr, diff --git a/pkg/management/postgres/webserver/plugin_backup.go b/pkg/management/postgres/webserver/plugin_backup.go index f91df3f8f7..3fe1292d80 100644 --- a/pkg/management/postgres/webserver/plugin_backup.go +++ b/pkg/management/postgres/webserver/plugin_backup.go @@ -57,16 +57,6 @@ func NewPluginBackupCommand( ) *PluginBackupCommand { backup.EnsureGVKIsPresent() - logger := log.WithValues( - "pluginConfiguration", backup.Spec.PluginConfiguration, - "backupName", backup.Name, - "backupNamespace", backup.Name) - - plugins := repository.New() - if _, err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir); err != nil { - logger.Error(err, "Error while discovering plugins") - } - return &PluginBackupCommand{ Cluster: cluster, Backup: backup, From 2998d8fd50948e9b7ba0f87b314bc4f45ddd0ba9 Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Fri, 7 Mar 2025 14:43:20 +0100 Subject: [PATCH 438/836] docs: clarify labels and annotations for backups (#7085) Closes #7083 Signed-off-by: Gabriele Bartolini Signed-off-by: Marco Nenciarini Co-authored-by: Marco Nenciarini --- api/v1/backup_types.go | 2 +- .../crd/bases/postgresql.cnpg.io_backups.yaml | 3 +- docs/src/cloudnative-pg.v1.md | 2 +- docs/src/labels_annotations.md | 40 +++++++++++-------- 4 files changed, 28 insertions(+), 19 deletions(-) diff --git a/api/v1/backup_types.go b/api/v1/backup_types.go index f8a01fb1d3..2c7ad3d5f8 100644 --- a/api/v1/backup_types.go +++ b/api/v1/backup_types.go @@ -314,7 +314,7 @@ type InstanceID struct { // +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase" // +kubebuilder:printcolumn:name="Error",type="string",JSONPath=".status.error" -// Backup is the Schema for the backups API +// A Backup resource is a request for a PostgreSQL backup by the user. type Backup struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata"` diff --git a/config/crd/bases/postgresql.cnpg.io_backups.yaml b/config/crd/bases/postgresql.cnpg.io_backups.yaml index 2d352fbf40..aecdbd49eb 100644 --- a/config/crd/bases/postgresql.cnpg.io_backups.yaml +++ b/config/crd/bases/postgresql.cnpg.io_backups.yaml @@ -33,7 +33,8 @@ spec: name: v1 schema: openAPIV3Schema: - description: Backup is the Schema for the backups API + description: A Backup resource is a request for a PostgreSQL backup by the + user. properties: apiVersion: description: |- diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md index 236213281c..74e43468ad 100644 --- a/docs/src/cloudnative-pg.v1.md +++ b/docs/src/cloudnative-pg.v1.md @@ -20,7 +20,7 @@ -

Backup is the Schema for the backups API

+

A Backup resource is a request for a PostgreSQL backup by the user.

diff --git a/docs/src/labels_annotations.md b/docs/src/labels_annotations.md index 9acdbb13a6..677ee5636e 100644 --- a/docs/src/labels_annotations.md +++ b/docs/src/labels_annotations.md @@ -28,50 +28,55 @@ they're inherited by all resources created by it (including pods). ## Predefined labels -These predefined labels are managed by CloudNativePG. +CloudNativePG manages the following predefined labels: `cnpg.io/backupDate` -: The date of the backup in ISO 8601 format (`YYYYMMDD`) +: The date of the backup in ISO 8601 format (`YYYYMMDD`). + This label is available only on `VolumeSnapshot` resources. `cnpg.io/backupName` -: Backup identifier, available only on `Backup` and `VolumeSnapshot` - resources +: Backup identifier. + This label is available only on `VolumeSnapshot` resources. `cnpg.io/backupMonth` -: The year/month when a backup was taken +: The year/month when a backup was taken. + This label is available only on `VolumeSnapshot` resources. `cnpg.io/backupTimeline` -: The timeline of the instance when a backup was taken +: The timeline of the instance when a backup was taken. + This label is available only on `VolumeSnapshot` resources. `cnpg.io/backupYear` -: The year a backup was taken +: The year a backup was taken. + This label is available only on `VolumeSnapshot` resources. `cnpg.io/cluster` -: Name of the cluster +: Name of the cluster. `cnpg.io/immediateBackup` : Applied to a `Backup` resource if the backup is the first one created from - a `ScheduledBackup` object having `immediate` set to `true` + a `ScheduledBackup` object having `immediate` set to `true`. `cnpg.io/instanceName` : Name of the PostgreSQL instance (replaces the old and - deprecated `postgresql` label) + deprecated `postgresql` label). `cnpg.io/jobRole` : Role of the job (that is, `import`, `initdb`, `join`, ...) `cnpg.io/onlineBackup` -: Whether the backup is online (hot) or taken when Postgres is down (cold) +: Whether the backup is online (hot) or taken when Postgres is down (cold). + This label is available only on `VolumeSnapshot` resources. `cnpg.io/podRole` : Distinguishes pods dedicated to pooler deployment from those used for - database instances + database instances. `cnpg.io/poolerName` -: Name of the PgBouncer pooler +: Name of the PgBouncer pooler. `cnpg.io/pvcRole` -: Purpose of the PVC, such as `PG_DATA` or `PG_WAL` +: Purpose of the PVC, such as `PG_DATA` or `PG_WAL`. `cnpg.io/reload` : Available on `ConfigMap` and `Secret` resources. When set to `true`, @@ -89,7 +94,7 @@ These predefined labels are managed by CloudNativePG. `cnpg.io/scheduled-backup` : When available, name of the `ScheduledBackup` resource that created a given - `Backup` object + `Backup` object. `cnpg.io/instanceRole` : Whether the instance running in a pod is a `primary` or a `replica`. @@ -97,7 +102,7 @@ These predefined labels are managed by CloudNativePG. ## Predefined annotations -These predefined annotations are managed by CloudNativePG. +CloudNativePG manages the following predefined annotations: `container.apparmor.security.beta.kubernetes.io/*` : Name of the AppArmor profile to apply to the named container. @@ -106,15 +111,18 @@ These predefined annotations are managed by CloudNativePG. `cnpg.io/backupEndTime` : The time a backup ended. + This annotation is available only on `VolumeSnapshot` resources. `cnpg.io/backupEndWAL` : The WAL at the conclusion of a backup. + This annotation is available only on `VolumeSnapshot` resources. `cnpg.io/backupStartTime` : The time a backup started. `cnpg.io/backupStartWAL` : The WAL at the start of a backup. + This annotation is available only on `VolumeSnapshot` resources. `cnpg.io/coredumpFilter` : Filter to control the coredump of Postgres processes, expressed with a From 47e065cd72dc40f0b404f4fb002b382366afd78b Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Fri, 7 Mar 2025 15:10:59 +0100 Subject: [PATCH 439/836] refactor(backup-client): move the backup client to the remote client (#6902) Previously, the backup client was incorrectly included in the local client, as the backup endpoints are exposed on the remote web server. Signed-off-by: Armando Ruocco --- .../postgres/webserver/client/local/local.go | 7 ------- .../client/{local => remote}/backup.go | 9 ++++----- .../webserver/client/remote/instance.go | 9 --------- .../postgres/webserver/client/remote/remote.go | 18 +++++++++++++++++- .../client/{local => remote}/request.go | 3 ++- pkg/reconciler/backup/volumesnapshot/online.go | 6 +++--- 6 files changed, 26 insertions(+), 26 deletions(-) rename pkg/management/postgres/webserver/client/{local => remote}/backup.go (93%) rename pkg/management/postgres/webserver/client/{local => remote}/request.go (93%) diff --git a/pkg/management/postgres/webserver/client/local/local.go b/pkg/management/postgres/webserver/client/local/local.go index 5b91d3e41a..3e8df28922 100644 --- a/pkg/management/postgres/webserver/client/local/local.go +++ b/pkg/management/postgres/webserver/client/local/local.go @@ -24,13 +24,11 @@ import ( // Client is an entity capable of interacting with the local webserver endpoints type Client interface { - Backup() BackupClient Cache() CacheClient Cluster() ClusterClient } type localClient struct { - backup BackupClient cache CacheClient cluster ClusterClient } @@ -43,16 +41,11 @@ func NewClient() Client { standardClient := common.NewHTTPClient(connectionTimeout, requestTimeout) return &localClient{ - backup: &backupClientImpl{cli: standardClient}, cache: &cacheClientImpl{cli: standardClient}, cluster: &clusterClientImpl{cli: standardClient}, } } -func (c *localClient) Backup() BackupClient { - return c.backup -} - func (c *localClient) Cache() CacheClient { return c.cache } diff --git a/pkg/management/postgres/webserver/client/local/backup.go b/pkg/management/postgres/webserver/client/remote/backup.go similarity index 93% rename from pkg/management/postgres/webserver/client/local/backup.go rename to pkg/management/postgres/webserver/client/remote/backup.go index 0e30140738..0740eed40b 100644 --- a/pkg/management/postgres/webserver/client/local/backup.go +++ b/pkg/management/postgres/webserver/client/remote/backup.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package local +package remote import ( "bytes" @@ -26,7 +26,6 @@ import ( corev1 "k8s.io/api/core/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver" - "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/remote" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/url" ) @@ -56,7 +55,7 @@ func (c *backupClientImpl) StatusWithErrors( ctx context.Context, pod *corev1.Pod, ) (*webserver.Response[webserver.BackupResultData], error) { - scheme := remote.GetStatusSchemeFromPod(pod) + scheme := GetStatusSchemeFromPod(pod) httpURL := url.Build(scheme.ToString(), pod.Status.PodIP, url.PathPgModeBackup, url.StatusPort) req, err := http.NewRequestWithContext(ctx, "GET", httpURL, nil) if err != nil { @@ -72,7 +71,7 @@ func (c *backupClientImpl) Start( pod *corev1.Pod, sbq webserver.StartBackupRequest, ) (*webserver.Response[webserver.BackupResultData], error) { - scheme := remote.GetStatusSchemeFromPod(pod) + scheme := GetStatusSchemeFromPod(pod) httpURL := url.Build(scheme.ToString(), pod.Status.PodIP, url.PathPgModeBackup, url.StatusPort) // Marshalling the payload to JSON @@ -96,7 +95,7 @@ func (c *backupClientImpl) Stop( pod *corev1.Pod, sbq webserver.StopBackupRequest, ) (*webserver.Response[webserver.BackupResultData], error) { - scheme := remote.GetStatusSchemeFromPod(pod) + scheme := GetStatusSchemeFromPod(pod) httpURL := url.Build(scheme.ToString(), pod.Status.PodIP, url.PathPgModeBackup, url.StatusPort) // Marshalling the payload to JSON jsonBody, err := json.Marshal(sbq) diff --git a/pkg/management/postgres/webserver/client/remote/instance.go b/pkg/management/postgres/webserver/client/remote/instance.go index 6c158402fe..0f526251f9 100644 --- a/pkg/management/postgres/webserver/client/remote/instance.go +++ b/pkg/management/postgres/webserver/client/remote/instance.go @@ -34,7 +34,6 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/retry" - "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/common" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/url" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" @@ -97,14 +96,6 @@ func (i StatusError) Error() string { return fmt.Sprintf("error status code: %v, body: %v", i.StatusCode, i.Body) } -// newInstanceClient returns a client capable of querying the instance HTTP endpoints -func newInstanceClient() InstanceClient { - const connectionTimeout = 2 * time.Second - const requestTimeout = 10 * time.Second - - return &instanceClientImpl{Client: common.NewHTTPClient(connectionTimeout, requestTimeout)} -} - // extractInstancesStatus extracts the status of the underlying PostgreSQL instance from // the requested Pod, via the instance manager. In case of failure, errors are passed // in the result list diff --git a/pkg/management/postgres/webserver/client/remote/remote.go b/pkg/management/postgres/webserver/client/remote/remote.go index 2b6a375e0e..a70524a3fc 100644 --- a/pkg/management/postgres/webserver/client/remote/remote.go +++ b/pkg/management/postgres/webserver/client/remote/remote.go @@ -16,13 +16,25 @@ limitations under the License. package remote +import ( + "time" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/common" +) + // Client is the interface to interact with the remote webserver type Client interface { Instance() InstanceClient + Backup() BackupClient } type remoteClientImpl struct { instance InstanceClient + backup *backupClientImpl +} + +func (r *remoteClientImpl) Backup() BackupClient { + return r.backup } func (r *remoteClientImpl) Instance() InstanceClient { @@ -31,7 +43,11 @@ func (r *remoteClientImpl) Instance() InstanceClient { // NewClient creates a new remote client func NewClient() Client { + const connectionTimeout = 2 * time.Second + const requestTimeout = 10 * time.Second + return &remoteClientImpl{ - instance: newInstanceClient(), + instance: &instanceClientImpl{Client: common.NewHTTPClient(connectionTimeout, requestTimeout)}, + backup: &backupClientImpl{cli: common.NewHTTPClient(connectionTimeout, requestTimeout)}, } } diff --git a/pkg/management/postgres/webserver/client/local/request.go b/pkg/management/postgres/webserver/client/remote/request.go similarity index 93% rename from pkg/management/postgres/webserver/client/local/request.go rename to pkg/management/postgres/webserver/client/remote/request.go index efc3a2c7c5..a08e767e50 100644 --- a/pkg/management/postgres/webserver/client/local/request.go +++ b/pkg/management/postgres/webserver/client/remote/request.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package local +package remote import ( "context" @@ -28,6 +28,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver" ) +// executeRequestWithError executes an http request and returns a webserver.response and any error encountered func executeRequestWithError[T any]( ctx context.Context, cli *http.Client, diff --git a/pkg/reconciler/backup/volumesnapshot/online.go b/pkg/reconciler/backup/volumesnapshot/online.go index 99dba6b6ec..f57c012909 100644 --- a/pkg/reconciler/backup/volumesnapshot/online.go +++ b/pkg/reconciler/backup/volumesnapshot/online.go @@ -26,15 +26,15 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver" - "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/local" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/remote" ) type onlineExecutor struct { - backupClient local.BackupClient + backupClient remote.BackupClient } func newOnlineExecutor() *onlineExecutor { - return &onlineExecutor{backupClient: local.NewClient().Backup()} + return &onlineExecutor{backupClient: remote.NewClient().Backup()} } func (o *onlineExecutor) finalize( From 962d4041f7b081e6c4d80ed54445a12f1e2cf1b6 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 7 Mar 2025 15:54:54 +0100 Subject: [PATCH 440/836] fix(deps): update all non-major go dependencies (main) (#7091) This PR contains the following updates: https://github.com/grpc-ecosystem/go-grpc-middleware `v2.2.0` -> `v2.3.1` https://github.com/onsi/ginkgo `v2.22.2` -> `v2.23.0` https://github.com/prometheus/client_golang `v1.21.0` -> `v1.21.1` golang.org/x/term `v0.29.0` -> `v0.30.0` https://github.com/grpc/grpc-go `v1.70.0` -> `v1.71.0` --- go.mod | 16 ++++++++-------- go.sum | 54 ++++++++++++++++++++++++++++-------------------------- 2 files changed, 36 insertions(+), 34 deletions(-) diff --git a/go.mod b/go.mod index f176a47732..74dba44d6b 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/evanphx/json-patch/v5 v5.9.11 github.com/go-logr/logr v1.4.2 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 - github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0 + github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.1 github.com/jackc/pgx/v5 v5.7.2 github.com/jackc/puddle/v2 v2.2.2 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 @@ -23,10 +23,10 @@ require ( github.com/lib/pq v1.10.9 github.com/logrusorgru/aurora/v4 v4.0.0 github.com/mitchellh/go-ps v1.0.0 - github.com/onsi/ginkgo/v2 v2.22.2 + github.com/onsi/ginkgo/v2 v2.23.0 github.com/onsi/gomega v1.36.2 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.80.1 - github.com/prometheus/client_golang v1.21.0 + github.com/prometheus/client_golang v1.21.1 github.com/robfig/cron v1.2.0 github.com/sethvargo/go-password v0.3.1 github.com/spf13/cobra v1.9.1 @@ -35,8 +35,8 @@ require ( go.uber.org/atomic v1.11.0 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 - golang.org/x/term v0.29.0 - google.golang.org/grpc v1.70.0 + golang.org/x/term v0.30.0 + google.golang.org/grpc v1.71.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.32.2 k8s.io/apiextensions-apiserver v0.32.2 @@ -102,12 +102,12 @@ require ( golang.org/x/net v0.35.0 // indirect golang.org/x/oauth2 v0.27.0 // indirect golang.org/x/sync v0.11.0 // indirect - golang.org/x/sys v0.30.0 // indirect + golang.org/x/sys v0.31.0 // indirect golang.org/x/text v0.22.0 // indirect golang.org/x/time v0.9.0 // indirect - golang.org/x/tools v0.28.0 // indirect + golang.org/x/tools v0.30.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241202173237-19429a94021a // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect google.golang.org/protobuf v1.36.5 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index 276e5926bc..6d12cc0a85 100644 --- a/go.sum +++ b/go.sum @@ -83,8 +83,8 @@ github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWm github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0 h1:kQ0NI7W1B3HwiN5gAYtY+XFItDPbLBwYRxAqbFTyDes= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0/go.mod h1:zrT2dxOAjNFPRGjTUe2Xmb4q4YdUwVvQFV6xiCSf+z0= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.1 h1:KcFzXwzM/kGhIRHvc8jdixfIJjVzuUJdnv+5xsPutog= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.1/go.mod h1:qOchhhIlmRcqk/O9uCo/puJlyo07YINaIqdZfZG3Jkc= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= @@ -143,8 +143,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU= -github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk= +github.com/onsi/ginkgo/v2 v2.23.0 h1:FA1xjp8ieYDzlgS5ABTpdUDB7wtngggONc8a7ku2NqQ= +github.com/onsi/ginkgo/v2 v2.23.0/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM= github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= @@ -156,8 +156,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.80.1 h1:DP+PUNVOc+Bkft8a4QunLzaZ0RspWuD3tBbcPHr2PeE= github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.80.1/go.mod h1:6x4x0t9BP35g4XcjkHE9EB3RxhyfxpdpmZKd/Qyk8+M= -github.com/prometheus/client_golang v1.21.0 h1:DIsaGmiaBkSangBgMtWdNfxbMNdku5IK6iNhrEqWvdA= -github.com/prometheus/client_golang v1.21.0/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg= +github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk= +github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= @@ -195,16 +195,18 @@ github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= -go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= -go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= -go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= -go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= -go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= -go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= -go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= -go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= -go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= +go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= +go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= +go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= +go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= +go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= +go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= +go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= +go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -238,10 +240,10 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= -golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU= -golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= @@ -252,18 +254,18 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= -golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= +golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY= +golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241202173237-19429a94021a h1:hgh8P4EuoxpsuKMXX/To36nOFD7vixReXgn8lPGnt+o= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241202173237-19429a94021a/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= -google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ= -google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= +google.golang.org/grpc v1.71.0 h1:kF77BGdPTQ4/JZWMlb9VpJ5pa25aqvVqogsxNHHdeBg= +google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From 70d8664e03e7b6b12f93e47b6e90d22ab97d9b68 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Fri, 7 Mar 2025 16:39:01 +0100 Subject: [PATCH 441/836] fix(PodMonitor): add proper `matchLabels` constraints (#7063) The generated `PodMonitors` now have an appropriate `matchLabels` scope for the targeted pooler and cluster pods. Previously, the `matchLabels` were too broad and wrongly inherited labels from the cluster, leading to data collection from unwanted targets. The following `matchLabels` will be generated: For PodMonitors targeting cluster instance pods ``` "cnpg.io/cluster": , "cnpg.io/podRole": "instance" ``` For PodMonitors targeting pooler pods ``` "cnpg.io/poolerName": "cnpg.io/podRole": "pooler" ``` Closes #7006 Signed-off-by: Armando Ruocco --- pkg/specs/pgbouncer/podmonitor.go | 5 ++++- pkg/specs/pgbouncer/podmonitor_test.go | 1 + pkg/specs/podmonitor.go | 6 +++++- pkg/specs/podmonitor_test.go | 10 ++++++++-- 4 files changed, 18 insertions(+), 4 deletions(-) diff --git a/pkg/specs/pgbouncer/podmonitor.go b/pkg/specs/pgbouncer/podmonitor.go index ff7962e3b9..4ea6abb33b 100644 --- a/pkg/specs/pgbouncer/podmonitor.go +++ b/pkg/specs/pgbouncer/podmonitor.go @@ -63,7 +63,10 @@ func (c PoolerPodMonitorManager) BuildPodMonitor() *monitoringv1.PodMonitor { spec := monitoringv1.PodMonitorSpec{ Selector: metav1.LabelSelector{ - MatchLabels: meta.Labels, + MatchLabels: map[string]string{ + utils.PgbouncerNameLabel: c.pooler.Name, + utils.PodRoleLabelName: string(utils.PodRolePooler), + }, }, PodMetricsEndpoints: []monitoringv1.PodMetricsEndpoint{endpoint}, } diff --git a/pkg/specs/pgbouncer/podmonitor_test.go b/pkg/specs/pgbouncer/podmonitor_test.go index 66a16fd644..49db8ab4e0 100644 --- a/pkg/specs/pgbouncer/podmonitor_test.go +++ b/pkg/specs/pgbouncer/podmonitor_test.go @@ -76,6 +76,7 @@ var _ = Describe("PoolerPodMonitorManager", func() { Expect(podMonitor.Spec.Selector.MatchLabels).To(Equal(map[string]string{ utils.PgbouncerNameLabel: pooler.Name, + utils.PodRoleLabelName: string(utils.PodRolePooler), })) Expect(podMonitor.Spec.PodMetricsEndpoints).To(HaveLen(1)) diff --git a/pkg/specs/podmonitor.go b/pkg/specs/podmonitor.go index 4a7ab863ef..7d1e00bfcb 100644 --- a/pkg/specs/podmonitor.go +++ b/pkg/specs/podmonitor.go @@ -24,6 +24,7 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/certs" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) // ClusterPodMonitorManager builds the PodMonitor for the cluster resource @@ -74,7 +75,10 @@ func (c ClusterPodMonitorManager) BuildPodMonitor() *monitoringv1.PodMonitor { spec := monitoringv1.PodMonitorSpec{ Selector: metav1.LabelSelector{ - MatchLabels: meta.Labels, + MatchLabels: map[string]string{ + utils.ClusterLabelName: c.cluster.Name, + utils.PodRoleLabelName: string(utils.PodRoleInstance), + }, }, PodMetricsEndpoints: []monitoringv1.PodMetricsEndpoint{endpoint}, } diff --git a/pkg/specs/podmonitor_test.go b/pkg/specs/podmonitor_test.go index b043eb5fe5..1673d6f28a 100644 --- a/pkg/specs/podmonitor_test.go +++ b/pkg/specs/podmonitor_test.go @@ -62,8 +62,14 @@ var _ = Describe("PodMonitor test", func() { It("should create a valid monitoringv1.PodMonitor object", func() { mgr := NewClusterPodMonitorManager(cluster.DeepCopy()) monitor := mgr.BuildPodMonitor() - Expect(monitor.Labels[utils.ClusterLabelName]).To(Equal(cluster.Name)) - Expect(monitor.Spec.Selector.MatchLabels[utils.ClusterLabelName]).To(Equal(cluster.Name)) + Expect(monitor.Labels).To(BeEquivalentTo(map[string]string{ + utils.ClusterLabelName: cluster.Name, + })) + Expect(monitor.Spec.Selector.MatchLabels).To(BeEquivalentTo(map[string]string{ + utils.ClusterLabelName: cluster.Name, + utils.PodRoleLabelName: string(utils.PodRoleInstance), + })) + Expect(monitor.Spec.PodMetricsEndpoints).To(ContainElement(expectedEndpoint)) }) From af65773138c1754761e7e95a47e255f61815e949 Mon Sep 17 00:00:00 2001 From: Jeff Mealo Date: Fri, 7 Mar 2025 11:44:23 -0500 Subject: [PATCH 442/836] chore(webserver,update): retrieve instance architecture using Go runtime (#6884) The instance architecture is now determined using the Go runtime's `runtime.GOARCH` variable, eliminating the need for an SQL connection. This enhancement improves efficiency by reducing unnecessary database interactions and simplifies the codebase by leveraging Go's built-in capabilities. This change is internal and does not affect the existing functionality of the system. Relates to #6599 #6761 Signed-off-by: Jeff Mealo Signed-off-by: Jeff Mealo Signed-off-by: Armando Ruocco Co-authored-by: Armando Ruocco --- pkg/management/postgres/instance.go | 6 ++++++ pkg/management/postgres/probes.go | 3 +-- pkg/management/upgrade/upgrade.go | 7 +------ 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/pkg/management/postgres/instance.go b/pkg/management/postgres/instance.go index 02906ac396..362172bdb0 100644 --- a/pkg/management/postgres/instance.go +++ b/pkg/management/postgres/instance.go @@ -28,6 +28,7 @@ import ( "os/exec" "path" "path/filepath" + "runtime" "strconv" "time" @@ -1151,6 +1152,11 @@ func (instance *Instance) GetNamespaceName() string { return instance.namespace } +// GetArchitecture returns the runtime architecture +func (instance *Instance) GetArchitecture() string { + return runtime.GOARCH +} + // RequestFastImmediateShutdown request the lifecycle manager to shut down // PostgreSQL using the fast strategy and then the immediate strategy. func (instance *Instance) RequestFastImmediateShutdown() { diff --git a/pkg/management/postgres/probes.go b/pkg/management/postgres/probes.go index 1b62e1819b..bebc986d9a 100644 --- a/pkg/management/postgres/probes.go +++ b/pkg/management/postgres/probes.go @@ -21,7 +21,6 @@ import ( "errors" "fmt" "path/filepath" - "runtime" "strings" "github.com/cloudnative-pg/machinery/pkg/fileutils" @@ -126,7 +125,7 @@ func (instance *Instance) GetStatus() (result *postgres.PostgresqlStatus, err er return result, err } - result.InstanceArch = runtime.GOARCH + result.InstanceArch = instance.GetArchitecture() result.ExecutableHash, err = executablehash.Get() if err != nil { diff --git a/pkg/management/upgrade/upgrade.go b/pkg/management/upgrade/upgrade.go index 7c08c0bb24..3c21d4a727 100644 --- a/pkg/management/upgrade/upgrade.go +++ b/pkg/management/upgrade/upgrade.go @@ -69,11 +69,6 @@ func FromReader( "name", updatedInstanceManager.Name(), "err", err) } }() - // Gather the status of the instance - instanceStatus, err := instance.GetStatus() - if err != nil { - return fmt.Errorf("while retrieving instance's status: %w", err) - } // Read the new instance manager version newHash, err := downloadAndCloseInstanceManagerBinary(updatedInstanceManager, r) @@ -84,7 +79,7 @@ func FromReader( // Validate the hash of this instance manager if err := validateInstanceManagerHash(typedClient, instance.GetClusterName(), instance.GetNamespaceName(), - instanceStatus.InstanceArch, newHash); err != nil { + instance.GetArchitecture(), newHash); err != nil { return fmt.Errorf("while validating instance manager binary: %w", err) } From 7eb255111379ffd7c5a69061997d57106b888973 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niccol=C3=B2=20Fei?= Date: Fri, 7 Mar 2025 18:43:08 +0100 Subject: [PATCH 443/836] fix(metrics): adapt pg_stat_wal metrics collection for PostgreSQL 18 compatibility (#7005) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adjust the metrics collected from the `pg_stat_wal` system view to align with the changes introduced in PostgreSQL 18. Closes #7004 Signed-off-by: Niccolò Fei Signed-off-by: Armando Ruocco Signed-off-by: Gabriele Bartolini Co-authored-by: Armando Ruocco Co-authored-by: Gabriele Bartolini --- pkg/management/postgres/probes.go | 65 +++++++++++++------ .../webserver/metricserver/pg_collector.go | 41 +++++++----- .../postgres/webserver/metricserver/wal.go | 16 +++-- tests/e2e/asserts_test.go | 14 ++-- 4 files changed, 86 insertions(+), 50 deletions(-) diff --git a/pkg/management/postgres/probes.go b/pkg/management/postgres/probes.go index bebc986d9a..832d57a158 100644 --- a/pkg/management/postgres/probes.go +++ b/pkg/management/postgres/probes.go @@ -583,7 +583,7 @@ func (instance *Instance) IsWALReceiverActive() (bool, error) { return result, nil } -// PgStatWal is a representation of the pg_stat_wal table +// PgStatWal is a representation of the pg_stat_wal table, introduced in PostgreSQL 14. type PgStatWal struct { WalRecords int64 WalFpi int64 @@ -596,7 +596,7 @@ type PgStatWal struct { StatsReset string } -// TryGetPgStatWAL retrieves pg_wal_stat on pg version 14 and further +// TryGetPgStatWAL retrieves pg_stat_wal on pg version 14 and further func (instance *Instance) TryGetPgStatWAL() (*PgStatWal, error) { version, err := instance.GetPgVersion() if err != nil || version.Major < 14 { @@ -608,31 +608,56 @@ func (instance *Instance) TryGetPgStatWAL() (*PgStatWal, error) { return nil, err } + // Since PostgreSQL 18, `wal_write`, `wal_sync`, `wal_write_time` and + // `wal_sync_time` have been removed. + // See https://github.com/postgres/postgres/commit/2421e9a51d20bb83154e54a16ce628f9249fa907 var pgWalStat PgStatWal - row := superUserDB.QueryRow( - `SELECT + if version.Major < 18 { + row := superUserDB.QueryRow( + `SELECT + wal_records, + wal_fpi, + wal_bytes, + wal_buffers_full, + wal_write, + wal_sync, + wal_write_time, + wal_sync_time, + stats_reset + FROM pg_catalog.pg_stat_wal`) + if err := row.Scan( + &pgWalStat.WalRecords, + &pgWalStat.WalFpi, + &pgWalStat.WalBytes, + &pgWalStat.WALBuffersFull, + &pgWalStat.WalWrite, + &pgWalStat.WalSync, + &pgWalStat.WalWriteTime, + &pgWalStat.WalSyncTime, + &pgWalStat.StatsReset, + ); err != nil { + return nil, err + } + } + + if version.Major >= 18 { + row := superUserDB.QueryRow( + `SELECT wal_records, wal_fpi, wal_bytes, wal_buffers_full, - wal_write, - wal_sync, - wal_write_time, - wal_sync_time, stats_reset FROM pg_catalog.pg_stat_wal`) - if err := row.Scan( - &pgWalStat.WalRecords, - &pgWalStat.WalFpi, - &pgWalStat.WalBytes, - &pgWalStat.WALBuffersFull, - &pgWalStat.WalWrite, - &pgWalStat.WalSync, - &pgWalStat.WalWriteTime, - &pgWalStat.WalSyncTime, - &pgWalStat.StatsReset, - ); err != nil { - return nil, err + if err := row.Scan( + &pgWalStat.WalRecords, + &pgWalStat.WalFpi, + &pgWalStat.WalBytes, + &pgWalStat.WALBuffersFull, + &pgWalStat.StatsReset, + ); err != nil { + return nil, err + } } return &pgWalStat, nil diff --git a/pkg/management/postgres/webserver/metricserver/pg_collector.go b/pkg/management/postgres/webserver/metricserver/pg_collector.go index 2fd2cc7ad0..392ce31c8a 100644 --- a/pkg/management/postgres/webserver/metricserver/pg_collector.go +++ b/pkg/management/postgres/webserver/metricserver/pg_collector.go @@ -232,7 +232,8 @@ func newMetrics() *metrics { Namespace: PrometheusNamespace, Subsystem: subsystem, Name: "wal_write", - Help: "Number of times WAL buffers were written out to disk via XLogWrite request. Only available on PG 14+", + Help: "Number of times WAL buffers were written out to disk via XLogWrite request." + + " Only available on PG 14 to 17.", }, []string{"stats_reset"}), WalSync: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: PrometheusNamespace, @@ -240,7 +241,7 @@ func newMetrics() *metrics { Name: "wal_sync", Help: "Number of times WAL files were synced to disk via issue_xlog_fsync request " + "(if fsync is on and wal_sync_method is either fdatasync, fsync or fsync_writethrough, otherwise zero)." + - " Only available on PG 14+", + " Only available on PG 14 to 17.", }, []string{"stats_reset"}), WalWriteTime: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: PrometheusNamespace, @@ -249,7 +250,7 @@ func newMetrics() *metrics { Help: "Total amount of time spent writing WAL buffers to disk via XLogWrite request, in milliseconds " + "(if track_wal_io_timing is enabled, otherwise zero). This includes the sync time when wal_sync_method " + "is either open_datasync or open_sync." + - " Only available on PG 14+", + " Only available on PG 14 to 17.", }, []string{"stats_reset"}), WalSyncTime: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: PrometheusNamespace, @@ -257,7 +258,7 @@ func newMetrics() *metrics { Name: "wal_sync_time", Help: "Total amount of time spent syncing WAL files to disk via issue_xlog_fsync request, in milliseconds " + "(if track_wal_io_timing is enabled, fsync is on, and wal_sync_method is either fdatasync, fsync or " + - "fsync_writethrough, otherwise zero). Only available on PG 14+", + "fsync_writethrough, otherwise zero). Only available on PG 14 to 17.", }, []string{"stats_reset"}), }, } @@ -287,14 +288,16 @@ func (e *Exporter) Describe(ch chan<- *prometheus.Desc) { } if version, _ := e.instance.GetPgVersion(); version.Major >= 14 { - e.Metrics.PgStatWalMetrics.WalSync.Describe(ch) - e.Metrics.PgStatWalMetrics.WalWriteTime.Describe(ch) - e.Metrics.PgStatWalMetrics.WalFpi.Describe(ch) - e.Metrics.PgStatWalMetrics.WalWrite.Describe(ch) - e.Metrics.PgStatWalMetrics.WalSyncTime.Describe(ch) e.Metrics.PgStatWalMetrics.WalRecords.Describe(ch) - e.Metrics.PgStatWalMetrics.WALBuffersFull.Describe(ch) + e.Metrics.PgStatWalMetrics.WalFpi.Describe(ch) e.Metrics.PgStatWalMetrics.WalBytes.Describe(ch) + e.Metrics.PgStatWalMetrics.WALBuffersFull.Describe(ch) + if version.Major < 18 { + e.Metrics.PgStatWalMetrics.WalWrite.Describe(ch) + e.Metrics.PgStatWalMetrics.WalSync.Describe(ch) + e.Metrics.PgStatWalMetrics.WalWriteTime.Describe(ch) + e.Metrics.PgStatWalMetrics.WalSyncTime.Describe(ch) + } } } @@ -321,14 +324,16 @@ func (e *Exporter) Collect(ch chan<- prometheus.Metric) { e.Metrics.NodesUsed.Collect(ch) if version, _ := e.instance.GetPgVersion(); version.Major >= 14 { - e.Metrics.PgStatWalMetrics.WalSync.Collect(ch) - e.Metrics.PgStatWalMetrics.WalWriteTime.Collect(ch) - e.Metrics.PgStatWalMetrics.WalFpi.Collect(ch) - e.Metrics.PgStatWalMetrics.WalWrite.Collect(ch) - e.Metrics.PgStatWalMetrics.WalSyncTime.Collect(ch) e.Metrics.PgStatWalMetrics.WalRecords.Collect(ch) - e.Metrics.PgStatWalMetrics.WALBuffersFull.Collect(ch) + e.Metrics.PgStatWalMetrics.WalFpi.Collect(ch) e.Metrics.PgStatWalMetrics.WalBytes.Collect(ch) + e.Metrics.PgStatWalMetrics.WALBuffersFull.Collect(ch) + if version.Major < 18 { + e.Metrics.PgStatWalMetrics.WalWrite.Collect(ch) + e.Metrics.PgStatWalMetrics.WalSync.Collect(ch) + e.Metrics.PgStatWalMetrics.WalWriteTime.Collect(ch) + e.Metrics.PgStatWalMetrics.WalSyncTime.Collect(ch) + } } } @@ -423,8 +428,8 @@ func (e *Exporter) collectPgMetrics(ch chan<- prometheus.Metric) { } if version, _ := e.instance.GetPgVersion(); version.Major >= 14 { - if err := collectPGWALStat(e); err != nil { - log.Error(err, "while collecting pg_wal_stat") + if err := collectPGStatWAL(e); err != nil { + log.Error(err, "while collecting pg_stat_wal") e.Metrics.Error.Set(1) e.Metrics.PgCollectionErrors.WithLabelValues("Collect.PGWALStat").Inc() } diff --git a/pkg/management/postgres/webserver/metricserver/wal.go b/pkg/management/postgres/webserver/metricserver/wal.go index 6d0aa73245..3f855f12a6 100644 --- a/pkg/management/postgres/webserver/metricserver/wal.go +++ b/pkg/management/postgres/webserver/metricserver/wal.go @@ -41,20 +41,22 @@ func collectPGWalArchiveMetric(exporter *Exporter) error { return nil } -func collectPGWALStat(e *Exporter) error { +func collectPGStatWAL(e *Exporter) error { walStat, err := e.instance.TryGetPgStatWAL() if walStat == nil || err != nil { return err } walMetrics := e.Metrics.PgStatWalMetrics - walMetrics.WalSync.WithLabelValues(walStat.StatsReset).Set(float64(walStat.WalSync)) - walMetrics.WalSyncTime.WithLabelValues(walStat.StatsReset).Set(walStat.WalSyncTime) - walMetrics.WALBuffersFull.WithLabelValues(walStat.StatsReset).Set(float64(walStat.WALBuffersFull)) + walMetrics.WalRecords.WithLabelValues(walStat.StatsReset).Set(float64(walStat.WalRecords)) walMetrics.WalFpi.WithLabelValues(walStat.StatsReset).Set(float64(walStat.WalFpi)) - walMetrics.WalWrite.WithLabelValues(walStat.StatsReset).Set(float64(walStat.WalWrite)) walMetrics.WalBytes.WithLabelValues(walStat.StatsReset).Set(float64(walStat.WalBytes)) - walMetrics.WalWriteTime.WithLabelValues(walStat.StatsReset).Set(walStat.WalWriteTime) - walMetrics.WalRecords.WithLabelValues(walStat.StatsReset).Set(float64(walStat.WalRecords)) + walMetrics.WALBuffersFull.WithLabelValues(walStat.StatsReset).Set(float64(walStat.WALBuffersFull)) + if version, _ := e.instance.GetPgVersion(); version.Major < 18 { + walMetrics.WalWrite.WithLabelValues(walStat.StatsReset).Set(float64(walStat.WalWrite)) + walMetrics.WalSync.WithLabelValues(walStat.StatsReset).Set(float64(walStat.WalSync)) + walMetrics.WalWriteTime.WithLabelValues(walStat.StatsReset).Set(walStat.WalWriteTime) + walMetrics.WalSyncTime.WithLabelValues(walStat.StatsReset).Set(walStat.WalSyncTime) + } return nil } diff --git a/tests/e2e/asserts_test.go b/tests/e2e/asserts_test.go index 5e4ef696dc..7a302b5b2e 100644 --- a/tests/e2e/asserts_test.go +++ b/tests/e2e/asserts_test.go @@ -2542,17 +2542,21 @@ func collectAndAssertCollectorMetricsPresentOnEachPod(cluster *apiv1.Cluster) { "cnpg_collector_replica_mode", } - if env.PostgresVersion > 14 { + if env.PostgresVersion >= 14 { cnpgCollectorMetrics = append(cnpgCollectorMetrics, "cnpg_collector_wal_records", "cnpg_collector_wal_fpi", "cnpg_collector_wal_bytes", "cnpg_collector_wal_buffers_full", - "cnpg_collector_wal_write", - "cnpg_collector_wal_sync", - "cnpg_collector_wal_write_time", - "cnpg_collector_wal_sync_time", ) + if env.PostgresVersion < 18 { + cnpgCollectorMetrics = append(cnpgCollectorMetrics, + "cnpg_collector_wal_write", + "cnpg_collector_wal_sync", + "cnpg_collector_wal_write_time", + "cnpg_collector_wal_sync_time", + ) + } } By("collecting and verify set of collector metrics on each pod", func() { podList, err := clusterutils.ListPods(env.Ctx, env.Client, cluster.Namespace, cluster.Name) From 82d09bff756a8b28e9483a4f3a925163c8807fac Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sun, 9 Mar 2025 09:54:59 +0100 Subject: [PATCH 444/836] fix(deps): update module sigs.k8s.io/controller-runtime to v0.20.3 (main) (#7106) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 74dba44d6b..12edc1f47e 100644 --- a/go.mod +++ b/go.mod @@ -44,7 +44,7 @@ require ( k8s.io/cli-runtime v0.32.2 k8s.io/client-go v0.32.2 k8s.io/utils v0.0.0-20241210054802-24370beab758 - sigs.k8s.io/controller-runtime v0.20.2 + sigs.k8s.io/controller-runtime v0.20.3 sigs.k8s.io/yaml v1.4.0 ) diff --git a/go.sum b/go.sum index 6d12cc0a85..bb64e43baf 100644 --- a/go.sum +++ b/go.sum @@ -295,8 +295,8 @@ k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 h1:hcha5B1kVACrLujCKLbr8X k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7/go.mod h1:GewRfANuJ70iYzvn+i4lezLDAFzvjxZYK1gn1lWcfas= k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0= k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/controller-runtime v0.20.2 h1:/439OZVxoEc02psi1h4QO3bHzTgu49bb347Xp4gW1pc= -sigs.k8s.io/controller-runtime v0.20.2/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= +sigs.k8s.io/controller-runtime v0.20.3 h1:I6Ln8JfQjHH7JbtCD2HCYHoIzajoRxPNuvhvcDbZgkI= +sigs.k8s.io/controller-runtime v0.20.3/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/kustomize/api v0.19.0 h1:F+2HB2mU1MSiR9Hp1NEgoU2q9ItNOaBJl0I4Dlus5SQ= From 3bafb17497f388f19d1227d2263b9de306b242eb Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Mon, 10 Mar 2025 17:31:32 +0100 Subject: [PATCH 445/836] feat: startup and readiness probes for replicas (#6623) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Extend the startup and readiness probes configured through the `.spec.probes.startup` and `.spec.probes.readiness` sections by adding two additional parameters: - `type`: Defines the criteria for considering the probe successful. Accepted values include: - `pg_isready`: This setting marks the probe as successful when the `pg_isready` command exits with a status of `0`. This is the default for both primary instances and replicas. - `query`: This setting marks the probe as successful when a basic query is executed locally on the `postgres` database. - `streaming`: This setting marks the probe successful when the replica starts streaming from its source and meets the specified lag requirements (details below). - `lag`: Specifies the maximum acceptable replication lag, measured in bytes (expressed using Kubernetes quantities). This parameter is applicable only when `type` is set to `streaming`. If the `lag` parameter is not specified, the replica is considered successfully started/ready as soon as it begins streaming. Consequently, the liveness probe has been streamlined to verify solely that the instance manager is operational, without monitoring the underlying PostgreSQL instance. Closes: #6621 ## Release Notes **Improved Startup and Readiness Probes for Replicas**: Enhanced support for Kubernetes startup and readiness probes in PostgreSQL instances, providing greater control over replicas based on the streaming lag. Signed-off-by: Leonardo Cecchi Signed-off-by: Gabriele Bartolini Signed-off-by: Leonardo Cecchi Signed-off-by: Armando Ruocco Signed-off-by: Niccolò Fei Signed-off-by: Marco Nenciarini Co-authored-by: Gabriele Bartolini Co-authored-by: Armando Ruocco Co-authored-by: Niccolò Fei Co-authored-by: Marco Nenciarini --- .wordlist-en-custom.txt | 6 + api/v1/cluster_funcs.go | 10 + api/v1/cluster_types.go | 38 +++- api/v1/zz_generated.deepcopy.go | 25 ++- .../bases/postgresql.cnpg.io_clusters.yaml | 28 +++ docs/src/cloudnative-pg.v1.md | 59 ++++- docs/src/instance_manager.md | 154 ++++++++++--- docs/src/release_notes/v1.26.md | 4 + docs/src/replication.md | 4 +- .../samples/cluster-example-with-probes.yaml | 16 ++ .../controller/instance_controller.go | 2 +- .../management/controller/roles/runnable.go | 4 +- .../controller/tablespaces/controller_test.go | 16 +- .../controller/tablespaces/manager.go | 1 + .../controller/tablespaces/reconciler.go | 8 +- pkg/management/postgres/instance.go | 5 + pkg/management/postgres/probes.go | 14 -- .../postgres/readiness/readiness.go | 101 --------- .../postgres/webserver/probes/checker.go | 152 +++++++++++++ .../{readiness => webserver/probes}/doc.go | 6 +- .../postgres/webserver/probes/isready.go | 31 +++ .../postgres/webserver/probes/query.go | 41 ++++ .../postgres/webserver/probes/streaming.go | 123 +++++++++++ pkg/management/postgres/webserver/remote.go | 39 ++-- pkg/management/url/url.go | 5 +- pkg/specs/pods.go | 2 +- ...-pg-data-corruption-no-slots.yaml.template | 4 + ...ter-pg-data-corruption-roles.yaml.template | 4 + .../cluster-pg-data-corruption.yaml.template | 4 + .../readiness-probe-lag-control.yaml.template | 31 +++ .../startup-probe-lag-control.yaml.template | 31 +++ tests/e2e/probes_test.go | 12 +- tests/e2e/syncreplicas_test.go | 209 +++++++++++++++++- 33 files changed, 986 insertions(+), 203 deletions(-) create mode 100644 docs/src/samples/cluster-example-with-probes.yaml delete mode 100644 pkg/management/postgres/readiness/readiness.go create mode 100644 pkg/management/postgres/webserver/probes/checker.go rename pkg/management/postgres/{readiness => webserver/probes}/doc.go (81%) create mode 100644 pkg/management/postgres/webserver/probes/isready.go create mode 100644 pkg/management/postgres/webserver/probes/query.go create mode 100644 pkg/management/postgres/webserver/probes/streaming.go create mode 100644 tests/e2e/fixtures/sync_replicas/readiness-probe-lag-control.yaml.template create mode 100644 tests/e2e/fixtures/sync_replicas/startup-probe-lag-control.yaml.template diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index a0f5490eb9..fe66ba48e2 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -343,7 +343,9 @@ PrimaryUpdateMethod PrimaryUpdateStrategy PriorityClass PriorityClassName +ProbeStrategyType ProbeTerminationGracePeriod +ProbeWithStrategy ProbesConfiguration ProjectedVolumeSource PublicationReclaimPolicy @@ -442,6 +444,8 @@ SnapshotType Snapshotting Snyk Stackgres +StartupProbe +StartupStrategyType StatefulSets StorageClass StorageConfiguration @@ -957,6 +961,7 @@ maxClientConnections maxParallel maxStandbyNamesFromCluster maxSyncReplicas +maximumLag maxwait mcache md @@ -1348,6 +1353,7 @@ tx ubi ui uid +uint ul un uncordon diff --git a/api/v1/cluster_funcs.go b/api/v1/cluster_funcs.go index 3fd8e8aacc..340ce9e740 100644 --- a/api/v1/cluster_funcs.go +++ b/api/v1/cluster_funcs.go @@ -1482,6 +1482,16 @@ func (p *Probe) ApplyInto(k8sProbe *corev1.Probe) { } } +// ApplyInto applies the content of the probe configuration in a Kubernetes +// probe +func (p *ProbeWithStrategy) ApplyInto(k8sProbe *corev1.Probe) { + if p == nil { + return + } + + p.Probe.ApplyInto(k8sProbe) +} + // GetEnabledWALArchivePluginName returns the name of the enabled backup plugin or an empty string // if no backup plugin is enabled func (cluster *Cluster) GetEnabledWALArchivePluginName() string { diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go index 97b9ed3b4c..70b5c2ed3c 100644 --- a/api/v1/cluster_types.go +++ b/api/v1/cluster_types.go @@ -487,15 +487,49 @@ type ClusterSpec struct { // to be injected in the PostgreSQL Pods type ProbesConfiguration struct { // The startup probe configuration - Startup *Probe `json:"startup,omitempty"` + Startup *ProbeWithStrategy `json:"startup,omitempty"` // The liveness probe configuration Liveness *Probe `json:"liveness,omitempty"` // The readiness probe configuration - Readiness *Probe `json:"readiness,omitempty"` + Readiness *ProbeWithStrategy `json:"readiness,omitempty"` } +// ProbeWithStrategy is the configuration of the startup probe +type ProbeWithStrategy struct { + // Probe is the standard probe configuration + Probe `json:",inline"` + + // The probe strategy + // +kubebuilder:validation:Enum=pg_isready;streaming;query + // +optional + Type ProbeStrategyType `json:"type,omitempty"` + + // Lag limit. Used only for `streaming` strategy + // +optional + MaximumLag *resource.Quantity `json:"maximumLag,omitempty"` +} + +// ProbeStrategyType is the type of the strategy used to declare a PostgreSQL instance +// ready +type ProbeStrategyType string + +const ( + // ProbeStrategyPgIsReady means that the pg_isready tool is used to determine + // whether PostgreSQL is started up + ProbeStrategyPgIsReady ProbeStrategyType = "pg_isready" + + // ProbeStrategyStreaming means that pg_isready is positive and the replica is + // connected via streaming replication to the current primary and the lag is, if specified, + // within the limit. + ProbeStrategyStreaming ProbeStrategyType = "streaming" + + // ProbeStrategyQuery means that the server is able to connect to the superuser database + // and able to execute a simple query like "-- ping" + ProbeStrategyQuery ProbeStrategyType = "query" +) + // Probe describes a health check to be performed against a container to determine whether it is // alive or ready to receive traffic. type Probe struct { diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index e6b7801fa0..a2d4e4c166 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -2202,12 +2202,33 @@ func (in *Probe) DeepCopy() *Probe { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProbeWithStrategy) DeepCopyInto(out *ProbeWithStrategy) { + *out = *in + in.Probe.DeepCopyInto(&out.Probe) + if in.MaximumLag != nil { + in, out := &in.MaximumLag, &out.MaximumLag + x := (*in).DeepCopy() + *out = &x + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeWithStrategy. +func (in *ProbeWithStrategy) DeepCopy() *ProbeWithStrategy { + if in == nil { + return nil + } + out := new(ProbeWithStrategy) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ProbesConfiguration) DeepCopyInto(out *ProbesConfiguration) { *out = *in if in.Startup != nil { in, out := &in.Startup, &out.Startup - *out = new(Probe) + *out = new(ProbeWithStrategy) (*in).DeepCopyInto(*out) } if in.Liveness != nil { @@ -2217,7 +2238,7 @@ func (in *ProbesConfiguration) DeepCopyInto(out *ProbesConfiguration) { } if in.Readiness != nil { in, out := &in.Readiness, &out.Readiness - *out = new(Probe) + *out = new(ProbeWithStrategy) (*in).DeepCopyInto(*out) } } diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml index ed7ece5b06..7fcdabd306 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml @@ -4302,6 +4302,13 @@ spec: More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer + maximumLag: + anyOf: + - type: integer + - type: string + description: Lag limit. Used only for `streaming` strategy + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true periodSeconds: description: |- How often (in seconds) to perform the probe. @@ -4335,6 +4342,13 @@ spec: More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer + type: + description: The probe strategy + enum: + - pg_isready + - streaming + - query + type: string type: object startup: description: The startup probe configuration @@ -4351,6 +4365,13 @@ spec: More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer + maximumLag: + anyOf: + - type: integer + - type: string + description: Lag limit. Used only for `streaming` strategy + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true periodSeconds: description: |- How often (in seconds) to perform the probe. @@ -4384,6 +4405,13 @@ spec: More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer + type: + description: The probe strategy + enum: + - pg_isready + - streaming + - query + type: string type: object type: object projectedVolumeTemplate: diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md index 74e43468ad..eb29b4453f 100644 --- a/docs/src/cloudnative-pg.v1.md +++ b/docs/src/cloudnative-pg.v1.md @@ -4216,6 +4216,8 @@ the primary server of the cluster as part of rolling updates

**Appears in:** +- [ProbeWithStrategy](#postgresql-cnpg-io-v1-ProbeWithStrategy) + - [ProbesConfiguration](#postgresql-cnpg-io-v1-ProbesConfiguration) @@ -4286,6 +4288,59 @@ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.

+## ProbeStrategyType {#postgresql-cnpg-io-v1-ProbeStrategyType} + +(Alias of `string`) + +**Appears in:** + +- [ProbeWithStrategy](#postgresql-cnpg-io-v1-ProbeWithStrategy) + + +

ProbeStrategyType is the type of the strategy used to declare a PostgreSQL instance +ready

+ + + + +## ProbeWithStrategy {#postgresql-cnpg-io-v1-ProbeWithStrategy} + + +**Appears in:** + +- [ProbesConfiguration](#postgresql-cnpg-io-v1-ProbesConfiguration) + + +

ProbeWithStrategy is the configuration of the startup probe

+ + + + + + + + + + + + + + + +
FieldDescription
Probe
+Probe +
(Members of Probe are embedded into this type.) +

Probe is the standard probe configuration

+
type
+ProbeStrategyType +
+

The probe strategy

+
maximumLag
+k8s.io/apimachinery/pkg/api/resource.Quantity +
+

Lag limit. Used only for streaming strategy

+
+ ## ProbesConfiguration {#postgresql-cnpg-io-v1-ProbesConfiguration} @@ -4302,7 +4357,7 @@ to be injected in the PostgreSQL Pods

FieldDescription startup [Required]
-Probe +ProbeWithStrategy

The startup probe configuration

@@ -4316,7 +4371,7 @@ to be injected in the PostgreSQL Pods

readiness [Required]
-Probe +ProbeWithStrategy

The readiness probe configuration

diff --git a/docs/src/instance_manager.md b/docs/src/instance_manager.md index 30dceab78d..69032b9923 100644 --- a/docs/src/instance_manager.md +++ b/docs/src/instance_manager.md @@ -15,23 +15,23 @@ main container, which in turn runs the PostgreSQL instance. During the lifetime of the Pod, the instance manager acts as a backend to handle the [startup, liveness and readiness probes](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes). -## Startup, Liveness, and Readiness Probes +## Startup Probe -CloudNativePG leverages [PostgreSQL's `pg_isready`](https://www.postgresql.org/docs/current/app-pg-isready.html) -to implement Kubernetes startup, liveness, and readiness probes. +The startup probe ensures that a PostgreSQL instance, whether a primary or +standby, has fully started. -### Startup Probe +!!! Info + By default, the startup probe uses + [`pg_isready`](https://www.postgresql.org/docs/current/app-pg-isready.html). + However, the behavior can be customized by specifying a different startup + strategy. -The startup probe ensures that a PostgreSQL instance, whether a primary or -standby, has fully started according to `pg_isready`. While the startup probe is running, the liveness and readiness probes remain disabled. Following Kubernetes standards, if the startup probe fails, the kubelet will terminate the container, which will then be restarted. -The startup probe provided by CloudNativePG is configurable via the -parameter `.spec.startDelay`, which specifies the maximum time, in seconds, -allowed for the startup probe to succeed. At a minimum, the probe requires -`pg_isready` to return `0` or `1`. +The `.spec.startDelay` parameter specifies the maximum time, in seconds, +allowed for the startup probe to succeed. By default, the `startDelay` is set to `3600` seconds. It is recommended to adjust this setting based on the time PostgreSQL needs to fully initialize in @@ -63,7 +63,7 @@ section of your configuration. !!! Info For more details on probe configuration, refer to the - [probe API documentation](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-Probe). + [probe API documentation](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-ProbeWithStrategy). If you manually specify `.spec.probes.startup.failureThreshold`, it will override the default behavior and disable the automatic use of `startDelay`. @@ -81,16 +81,63 @@ spec: failureThreshold: 10 ``` -### Liveness Probe +### Startup Probe Strategy + +In certain scenarios, you may need to customize the startup strategy for your +PostgreSQL cluster. For example, you might delay marking a replica as started +until it begins streaming from the primary or define a replication lag +threshold that must be met before considering the replica ready. + +To accommodate these requirements, CloudNativePG extends the +`.spec.probes.startup` stanza with two optional parameters: + +- `type`: specifies the criteria for considering the probe successful. Accepted + values, in increasing order of complexity/depth, include: + + - `pg_isready`: marks the probe as successful when the `pg_isready` command + exits with `0`. This is the default for primary instances and replicas. + - `query`: marks the probe as successful when a basic query is executed on + the `postgres` database locally. + - `streaming`: marks the probe as successful when the replica begins + streaming from its source and meets the specified lag requirements (details + below). + +- `maximumLag`: defines the maximum acceptable replication lag, measured in bytes + (expressed as Kubernetes quantities). This parameter is only applicable when + `type` is set to `streaming`. If `maximumLag` is not specified, the replica is + considered successfully started as soon as it begins streaming. + +!!! Important + The `.spec.probes.startup.maximumLag` option is validated and enforced only + during the startup phase of the pod, meaning it applies exclusively when the + replica is starting. + +!!! Warning + Incorrect configuration of the `maximumLag` option can cause continuous + failures of the startup probe, leading to repeated replica restarts. Ensure + you understand how this option works and configure appropriate values for + `failureThreshold` and `periodSeconds` to give the replica enough time to + catch up with its source. + +The following example requires a replica to have a maximum lag of 16Mi from the +source to be considered started: + +```yaml +# +probes: + startup: + type: streaming + maximumLag: 16Mi +``` + +## Liveness Probe The liveness probe begins after the startup probe successfully completes. Its -primary role is to ensure the PostgreSQL instance—whether primary or standby—is -operating correctly. This is achieved using the `pg_isready` utility. Both exit -codes `0` (indicating the server is accepting connections) and `1` (indicating -the server is rejecting connections, such as during startup or a smart -shutdown) are treated as valid outcomes. -Following Kubernetes standards, if the liveness probe fails, the -kubelet will terminate the container, which will then be restarted. +primary role is to ensure the PostgreSQL instance manager is operating +correctly. + +Following Kubernetes standards, if the liveness probe fails, the kubelet will +terminate the container, which will then be restarted. The amount of time before a Pod is classified as not alive is configurable via the `.spec.livenessProbeTimeout` parameter. @@ -140,14 +187,21 @@ spec: failureThreshold: 10 ``` -### Readiness Probe +## Readiness Probe + +The readiness probe starts once the startup probe has successfully completed. +Its primary purpose is to check whether the PostgreSQL instance is ready to +accept traffic and serve requests at any point during the pod's lifecycle. + +!!! Info + By default, the readiness probe uses + [`pg_isready`](https://www.postgresql.org/docs/current/app-pg-isready.html). + However, the behavior can be customized by specifying a different readiness + strategy. -The readiness probe begins once the startup probe has successfully completed. -Its purpose is to check whether the PostgreSQL instance is ready to accept -traffic and serve requests. -For streaming replicas, it also requires that they have connected to the source -at least once. Following Kubernetes standards, if the readiness probe fails, -the pod will be marked unready and will not receive traffic from any services. +Following Kubernetes standards, if the readiness probe fails, the pod will be +marked unready and will not receive traffic from any services. An unready pod +is also ineligible for promotion during automated failover scenarios. CloudNativePG uses the following default configuration for the readiness probe: @@ -178,7 +232,53 @@ spec: !!! Info For more information on configuring probes, see the - [probe API](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-Probe). + [probe API](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-ProbeWithStrategy). + +### Readiness Probe Strategy + +In certain scenarios, you may need to customize the readiness strategy for your +cluster. For example, you might delay marking a replica as ready until it +begins streaming from the primary or define a maximum replication lag threshold +before considering the replica ready. + +To accommodate these requirements, CloudNativePG extends the +`.spec.probes.readiness` stanza with two optional parameters: `type` and +`maximumLag`. Please refer to the [Startup Probe Strategy](#startup-probe-strategy) +section for detailed information on these options. + +!!! Important + Unlike the startup probe, the `.spec.probes.readiness.maximumLag` option is + continuously monitored. A lagging replica may become unready if this setting is + not appropriately tuned. + +!!! Warning + Incorrect configuration of the `maximumLag` option can lead to repeated + readiness probe failures, causing serious consequences, such as: + + - Exclusion of the replica from key operator features, such as promotion + during failover or participation in synchronous replication quorum. + - Disruptions in read/read-only services. + - In longer failover times scenarios, replicas might be declared unready, + leading to a cluster stall requiring manual intervention. + +!!! Recommendation + Use the `streaming` and `maximumLag` options with extreme caution. If + you're unfamiliar with PostgreSQL replication, rely on the default + strategy. Seek professional advice if unsure. + +The following example requires a replica to have a maximum lag of 64Mi from the +source to be considered ready. It also provides approximately 300 seconds (30 +failures × 10 seconds) for the startup probe to succeed: + +```yaml +# +probes: + readiness: + type: streaming + maximumLag: 64Mi + failureThreshold: 30 + periodSeconds: 10 +``` ## Shutdown control diff --git a/docs/src/release_notes/v1.26.md b/docs/src/release_notes/v1.26.md index e25a2b4675..f7749bb84d 100644 --- a/docs/src/release_notes/v1.26.md +++ b/docs/src/release_notes/v1.26.md @@ -17,6 +17,10 @@ on the release branch in GitHub. ### Features: +- **Improved Startup and Readiness Probes for Replicas**: Enhanced support for + Kubernetes startup and readiness probes in PostgreSQL instances, providing + greater control over replicas based on the streaming lag. (#6623) + - **MAIN FEATURE #1**: short description - **MAIN FEATURE #2**: short description diff --git a/docs/src/replication.md b/docs/src/replication.md index fbe01b2d58..3091333692 100644 --- a/docs/src/replication.md +++ b/docs/src/replication.md @@ -394,7 +394,9 @@ number of standbys are available. Make sure you have a clear understanding of what *ready/available* means for a replica and set your expectations accordingly. By default, a replica is considered ready when it has successfully connected to the source at least - once. + once. However, CloudNativePG allows you to configure startup probes for + replicas. For more details, please refer to the + ["Advanced Startup Probe" section](instance_manager.md#advanced-startup-probe). This setting balances data safety with availability, enabling applications to continue writing during temporary standby unavailability—hence, it’s also known diff --git a/docs/src/samples/cluster-example-with-probes.yaml b/docs/src/samples/cluster-example-with-probes.yaml new file mode 100644 index 0000000000..dc2c1bb94c --- /dev/null +++ b/docs/src/samples/cluster-example-with-probes.yaml @@ -0,0 +1,16 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: cluster-example +spec: + instances: 3 + + probes: + startup: + type: streaming + maximumLag: 16Mi + failureThreshold: 30 + periodSeconds: 10 + + storage: + size: 1Gi diff --git a/internal/management/controller/instance_controller.go b/internal/management/controller/instance_controller.go index c1386796c2..31d29b6e5c 100644 --- a/internal/management/controller/instance_controller.go +++ b/internal/management/controller/instance_controller.go @@ -186,7 +186,7 @@ func (r *InstanceReconciler) Reconcile( return reconcile.Result{}, nil } - if r.instance.IsServerHealthy() != nil { + if err := r.instance.IsReady(); err != nil { contextLogger.Info("Instance is still down, will retry in 1 second") return reconcile.Result{RequeueAfter: time.Second}, nil } diff --git a/internal/management/controller/roles/runnable.go b/internal/management/controller/roles/runnable.go index 1d97e6bfaf..585dea7ae4 100644 --- a/internal/management/controller/roles/runnable.go +++ b/internal/management/controller/roles/runnable.go @@ -53,7 +53,7 @@ type instanceInterface interface { GetSuperUserDB() (*sql.DB, error) IsPrimary() (bool, error) RoleSynchronizerChan() <-chan *apiv1.ManagedConfiguration - IsServerHealthy() error + IsReady() error GetClusterName() string GetNamespaceName() string } @@ -134,7 +134,7 @@ func (sr *RoleSynchronizer) reconcile(ctx context.Context, config *apiv1.Managed contextLog := log.FromContext(ctx).WithName("roles_reconciler") contextLog.Debug("reconciling managed roles") - if sr.instance.IsServerHealthy() != nil { + if err := sr.instance.IsReady(); err != nil { contextLog.Debug("database not ready, skipping roles reconciling") return nil } diff --git a/internal/management/controller/tablespaces/controller_test.go b/internal/management/controller/tablespaces/controller_test.go index 5d62d6da72..000bd19aad 100644 --- a/internal/management/controller/tablespaces/controller_test.go +++ b/internal/management/controller/tablespaces/controller_test.go @@ -71,6 +71,10 @@ func (f fakeInstance) IsPrimary() (bool, error) { return true, nil } +func (f fakeInstance) IsReady() error { + return nil +} + const ( expectedListStmt = ` SELECT @@ -84,13 +88,6 @@ const ( "LOCATION '%s'" expectedUpdateStmt = "ALTER TABLESPACE \"%s\" OWNER TO \"%s\"" - - expectedReadinessCheck = ` - SELECT - NOT pg_catalog.pg_is_in_recovery() - OR (SELECT coalesce(setting, '') = '' FROM pg_catalog.pg_settings WHERE name = 'primary_conninfo') - OR pg_catalog.pg_last_wal_replay_lsn() IS NOT NULL - ` ) func getCluster(ctx context.Context, c client.Client, cluster *apiv1.Cluster) (*apiv1.Cluster, error) { @@ -150,11 +147,6 @@ func assertTablespaceReconciled(ctx context.Context, tt tablespaceTest) { storageManager: tt.storageManager, } - // these bits happen because the reconciler checks for instance readiness - dbMock.ExpectPing() - expectedReadiness := sqlmock.NewRows([]string{""}).AddRow("t") - dbMock.ExpectQuery(expectedReadinessCheck).WillReturnRows(expectedReadiness) - tt.postgresExpectations(dbMock) results, err := tablespaceReconciler.Reconcile(ctx, reconcile.Request{}) diff --git a/internal/management/controller/tablespaces/manager.go b/internal/management/controller/tablespaces/manager.go index 6ba7490a6f..0ea317061f 100644 --- a/internal/management/controller/tablespaces/manager.go +++ b/internal/management/controller/tablespaces/manager.go @@ -35,6 +35,7 @@ type instanceInterface interface { GetClusterName() string GetSuperUserDB() (*sql.DB, error) IsPrimary() (bool, error) + IsReady() error CanCheckReadiness() bool } diff --git a/internal/management/controller/tablespaces/reconciler.go b/internal/management/controller/tablespaces/reconciler.go index 031ea0bfeb..c8f10295e1 100644 --- a/internal/management/controller/tablespaces/reconciler.go +++ b/internal/management/controller/tablespaces/reconciler.go @@ -29,7 +29,6 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/internal/management/controller/tablespaces/infrastructure" - "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/readiness" ) // Reconcile is the main reconciliation loop for the instance @@ -71,9 +70,10 @@ func (r *TablespaceReconciler) Reconcile( return reconcile.Result{}, nil } - checker := readiness.ForInstance(r.instance) - if checker.IsServerReady(ctx) != nil { - contextLogger.Debug("database not ready, skipping tablespace reconciling") + if err := r.instance.IsReady(); err != nil { + contextLogger.Debug( + "database not ready, skipping tablespace reconciling", + "err", err) return reconcile.Result{RequeueAfter: time.Second}, nil } diff --git a/pkg/management/postgres/instance.go b/pkg/management/postgres/instance.go index 362172bdb0..5bd3ae3cd3 100644 --- a/pkg/management/postgres/instance.go +++ b/pkg/management/postgres/instance.go @@ -225,6 +225,11 @@ func (instance *Instance) SetPostgreSQLAutoConfWritable(writeable bool) error { return os.Chmod(autoConfFileName, mode) } +// IsReady runs PgIsReady +func (instance *Instance) IsReady() error { + return PgIsReady() +} + // IsFenced checks whether the instance is marked as fenced func (instance *Instance) IsFenced() bool { return instance.fenced.Load() diff --git a/pkg/management/postgres/probes.go b/pkg/management/postgres/probes.go index 832d57a158..84144f1645 100644 --- a/pkg/management/postgres/probes.go +++ b/pkg/management/postgres/probes.go @@ -35,20 +35,6 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" ) -// IsServerHealthy check if the instance is healthy -func (instance *Instance) IsServerHealthy() error { - err := PgIsReady() - - // A healthy server can also be actively rejecting connections. - // That's not a problem: it's only the server starting up or shutting - // down. - if errors.Is(err, ErrPgRejectingConnection) { - return nil - } - - return err -} - // GetStatus Extract the status of this PostgreSQL database func (instance *Instance) GetStatus() (result *postgres.PostgresqlStatus, err error) { result = &postgres.PostgresqlStatus{ diff --git a/pkg/management/postgres/readiness/readiness.go b/pkg/management/postgres/readiness/readiness.go deleted file mode 100644 index c359853801..0000000000 --- a/pkg/management/postgres/readiness/readiness.go +++ /dev/null @@ -1,101 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package readiness - -import ( - "context" - "database/sql" - "errors" -) - -// ErrStreamingReplicaNotConnected is raised for streaming replicas that never connected to its primary -var ErrStreamingReplicaNotConnected = errors.New("streaming replica was never connected to the primary node") - -// instanceInterface represents the required behavior for use in the readiness probe -type instanceInterface interface { - CanCheckReadiness() bool - GetSuperUserDB() (*sql.DB, error) -} - -// Data is the readiness checker structure -type Data struct { - instance instanceInterface - - streamingReplicaValidated bool -} - -// ForInstance creates a readiness checker for a certain instance -func ForInstance(instance instanceInterface) *Data { - return &Data{ - instance: instance, - } -} - -// IsServerReady check if the instance is healthy and can really accept connections -func (data *Data) IsServerReady(ctx context.Context) error { - if !data.instance.CanCheckReadiness() { - return errors.New("instance is not ready yet") - } - superUserDB, err := data.instance.GetSuperUserDB() - if err != nil { - return err - } - - // We now check if the database is ready to accept - // connections - if err := superUserDB.PingContext(ctx); err != nil { - return err - } - - // If we already validated this streaming replica, everything - // is fine - if data.streamingReplicaValidated { - return nil - } - - // If this is a streaming replica, meaning that - // primary_conninfo is not empty, we won't declare it ready - // unless it connected one time successfully to its primary. - // - // We check this because a streaming replica that was - // never connected to the primary could be incoherent, - // and we want users to notice this as soon as possible - row := superUserDB.QueryRowContext( - ctx, - ` - SELECT - NOT pg_catalog.pg_is_in_recovery() - OR (SELECT coalesce(setting, '') = '' FROM pg_catalog.pg_settings WHERE name = 'primary_conninfo') - OR pg_catalog.pg_last_wal_replay_lsn() IS NOT NULL - `, - ) - if err := row.Err(); err != nil { - return err - } - - var status bool - if err := row.Scan(&status); err != nil { - return err - } - - if !status { - return ErrStreamingReplicaNotConnected - } - - data.streamingReplicaValidated = true - return nil -} diff --git a/pkg/management/postgres/webserver/probes/checker.go b/pkg/management/postgres/webserver/probes/checker.go new file mode 100644 index 0000000000..c85bba8b6b --- /dev/null +++ b/pkg/management/postgres/webserver/probes/checker.go @@ -0,0 +1,152 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package probes + +import ( + "context" + "fmt" + "net/http" + + "github.com/cloudnative-pg/machinery/pkg/log" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" +) + +// probeType is the type of the probe +type probeType string + +const ( + // probeTypeReadiness is the readiness probe + probeTypeReadiness probeType = "readiness" + // probeTypeStartup is the startup probe + probeTypeStartup probeType = "startup" +) + +type runner interface { + // IsHealthy evaluates the status of PostgreSQL. If the probe is positive, + // it returns a nil error, otherwise the error status describes why + // the probe is failing + IsHealthy(ctx context.Context, instance *postgres.Instance) error +} + +// Checker executes the probe and writes the response to the request +type Checker interface { + IsHealthy(ctx context.Context, w http.ResponseWriter) +} + +type executor struct { + cli client.Client + instance *postgres.Instance + probeType probeType +} + +// NewReadinessChecker creates a new instance of the readiness probe checker +func NewReadinessChecker( + cli client.Client, + instance *postgres.Instance, +) Checker { + return &executor{ + cli: cli, + instance: instance, + probeType: probeTypeReadiness, + } +} + +// NewStartupChecker creates a new instance of the startup probe checker +func NewStartupChecker( + cli client.Client, + instance *postgres.Instance, +) Checker { + return &executor{ + cli: cli, + instance: instance, + probeType: probeTypeStartup, + } +} + +// IsHealthy executes the underlying probe logic and writes a response to the request accordingly to the result obtained +func (e *executor) IsHealthy( + ctx context.Context, + w http.ResponseWriter, +) { + contextLogger := log.FromContext(ctx) + + var cluster apiv1.Cluster + if err := e.cli.Get( + ctx, + client.ObjectKey{Namespace: e.instance.GetNamespaceName(), Name: e.instance.GetClusterName()}, + &cluster, + ); err != nil { + contextLogger.Warning( + fmt.Sprintf("%s check failed, cannot check Cluster definition", e.probeType), + "err", err.Error(), + ) + http.Error( + w, + fmt.Sprintf("%s check failed cannot get Cluster definition: %s", e.probeType, err.Error()), + http.StatusInternalServerError, + ) + return + } + + probeRunner := getProbeRunnerFromCluster(e.probeType, cluster) + if err := probeRunner.IsHealthy(ctx, e.instance); err != nil { + contextLogger.Warning(fmt.Sprintf("%s probe failing", e.probeType), "err", err.Error()) + http.Error( + w, + fmt.Sprintf("%s check failed: %s", e.probeType, err.Error()), + http.StatusInternalServerError, + ) + return + } + + contextLogger.Trace(fmt.Sprintf("%s probe succeeding", e.probeType)) + _, _ = fmt.Fprint(w, "OK") +} + +func getProbeRunnerFromCluster(probeType probeType, cluster apiv1.Cluster) runner { + var probe *apiv1.ProbeWithStrategy + if cluster.Spec.Probes != nil { + switch probeType { + case probeTypeStartup: + probe = cluster.Spec.Probes.Startup + + case probeTypeReadiness: + probe = cluster.Spec.Probes.Readiness + } + } + + switch { + case probe == nil: + return pgIsReadyChecker{} + case probe.Type == apiv1.ProbeStrategyPgIsReady: + return pgIsReadyChecker{} + case probe.Type == apiv1.ProbeStrategyQuery: + return pgQueryChecker{} + case probe.Type == apiv1.ProbeStrategyStreaming: + result := pgStreamingChecker{} + if probe.MaximumLag != nil { + result.maximumLag = ptr.To(probe.MaximumLag.AsDec().UnscaledBig().Uint64()) + } + return result + } + + return pgIsReadyChecker{} +} diff --git a/pkg/management/postgres/readiness/doc.go b/pkg/management/postgres/webserver/probes/doc.go similarity index 81% rename from pkg/management/postgres/readiness/doc.go rename to pkg/management/postgres/webserver/probes/doc.go index b9af9c89f2..2350a83307 100644 --- a/pkg/management/postgres/readiness/doc.go +++ b/pkg/management/postgres/webserver/probes/doc.go @@ -14,6 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package readiness contains the code needed to check -// if PostgreSQL is ready to accept client connections. -package readiness +// Package probes contains the implementation of startup, liveness and +// readiness probes +package probes diff --git a/pkg/management/postgres/webserver/probes/isready.go b/pkg/management/postgres/webserver/probes/isready.go new file mode 100644 index 0000000000..35c8deb370 --- /dev/null +++ b/pkg/management/postgres/webserver/probes/isready.go @@ -0,0 +1,31 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package probes + +import ( + "context" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" +) + +// pgIsReadyChecker checks is PostgreSQL is ready +type pgIsReadyChecker struct{} + +// IsHealthy implements the runner interface +func (pgIsReadyChecker) IsHealthy(_ context.Context, instance *postgres.Instance) error { + return instance.IsReady() +} diff --git a/pkg/management/postgres/webserver/probes/query.go b/pkg/management/postgres/webserver/probes/query.go new file mode 100644 index 0000000000..f61a433163 --- /dev/null +++ b/pkg/management/postgres/webserver/probes/query.go @@ -0,0 +1,41 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package probes + +import ( + "context" + "fmt" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" +) + +// pgQueryChecker checks if the PostgreSQL server can execute a simple query +type pgQueryChecker struct{} + +// IsHealthy implements the runner interface +func (c pgQueryChecker) IsHealthy(ctx context.Context, instance *postgres.Instance) error { + superUserDB, err := instance.GetSuperUserDB() + if err != nil { + return fmt.Errorf("while getting superuser connection pool: %w", err) + } + + if err := superUserDB.PingContext(ctx); err != nil { + return fmt.Errorf("while pinging database: %w", err) + } + + return nil +} diff --git a/pkg/management/postgres/webserver/probes/streaming.go b/pkg/management/postgres/webserver/probes/streaming.go new file mode 100644 index 0000000000..99bd5cd8aa --- /dev/null +++ b/pkg/management/postgres/webserver/probes/streaming.go @@ -0,0 +1,123 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package probes + +import ( + "context" + "fmt" + "math" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" +) + +// pgStreamingChecker checks if the replica is connected via streaming +// replication and, optionally, if the lag is within the specified maximum +type pgStreamingChecker struct { + maximumLag *uint64 +} + +// IsHealthy implements the runner interface +func (c pgStreamingChecker) IsHealthy(ctx context.Context, instance *postgres.Instance) error { + superUserDB, err := instance.GetSuperUserDB() + if err != nil { + return fmt.Errorf("while getting superuser connection pool: %w", err) + } + + var configuredLag uint64 = math.MaxUint64 + if c.maximumLag != nil { + configuredLag = *c.maximumLag + } + + // At this point, the instance is already running. + // The startup probe succeeds if the instance satisfies any of the following conditions: + // - It is a primary instance. + // - It is a log shipping replica (including a designated primary). + // - It is a streaming replica with replication lag below the specified threshold. + // If no lag threshold is specified, the startup probe succeeds if the replica has successfully connected + // to its source at least once. + row := superUserDB.QueryRowContext( + ctx, + ` + WITH + lag AS ( + SELECT + (latest_end_lsn - pg_last_wal_replay_lsn()) AS value, + latest_end_time + FROM pg_catalog.pg_stat_wal_receiver + ) + SELECT + CASE + WHEN NOT pg_is_in_recovery() + THEN true + WHEN (SELECT coalesce(setting, '') = '' FROM pg_catalog.pg_settings WHERE name = 'primary_conninfo') + THEN true + WHEN (SELECT value FROM lag) < $1 + THEN true + ELSE false + END AS ready_to_start, + COALESCE((SELECT value FROM lag), 0) AS lag, + COALESCE((SELECT latest_end_time FROM lag), '-infinity') AS latest_end_time + `, + configuredLag, + ) + if err := row.Err(); err != nil { + return fmt.Errorf("streaming replication check failed: %w", err) + } + + var status bool + var detectedLag uint64 + var latestEndTime string + if err := row.Scan(&status, &detectedLag, &latestEndTime); err != nil { + return fmt.Errorf("streaming replication check failed (scan): %w", err) + } + + if !status { + if detectedLag > configuredLag { + return &ReplicaLaggingError{ + DetectedLag: detectedLag, + ConfiguredLag: configuredLag, + LatestEndTime: latestEndTime, + } + } + return fmt.Errorf("replica not connected via streaming replication") + } + + return nil +} + +// ReplicaLaggingError is raised when a replica is lagging more +// than the configured cap +type ReplicaLaggingError struct { + // DetectedLag is the lag that was detected + DetectedLag uint64 + + // ConfiguredLag is the lag as configured in the probe + ConfiguredLag uint64 + + // LatestEndTime is the time of last write-ahead log location reported to + // origin WAL sender + LatestEndTime string +} + +func (e *ReplicaLaggingError) Error() string { + return fmt.Sprintf( + "streaming replica lagging; detectedLag=%v configuredLag=%v latestEndTime=%s", + e.DetectedLag, + e.ConfiguredLag, + e.LatestEndTime, + ) +} diff --git a/pkg/management/postgres/webserver/remote.go b/pkg/management/postgres/webserver/remote.go index a3c43eb548..4aeb7da9a1 100644 --- a/pkg/management/postgres/webserver/remote.go +++ b/pkg/management/postgres/webserver/remote.go @@ -42,7 +42,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/management" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/constants" - "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/readiness" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/probes" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/upgrade" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/url" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" @@ -62,7 +62,6 @@ type remoteWebserverEndpoints struct { typedClient client.Client instance *postgres.Instance currentBackup *backupConnection - readinessChecker *readiness.Data ongoingBackupRequest sync.Mutex } @@ -95,15 +94,15 @@ func NewRemoteWebServer( } endpoints := remoteWebserverEndpoints{ - typedClient: typedClient, - instance: instance, - readinessChecker: readiness.ForInstance(instance), + typedClient: typedClient, + instance: instance, } serveMux := http.NewServeMux() serveMux.HandleFunc(url.PathPgModeBackup, endpoints.backup) serveMux.HandleFunc(url.PathHealth, endpoints.isServerHealthy) serveMux.HandleFunc(url.PathReady, endpoints.isServerReady) + serveMux.HandleFunc(url.PathStartup, endpoints.isServerStartedUp) serveMux.HandleFunc(url.PathPgStatus, endpoints.pgStatus) serveMux.HandleFunc(url.PathPgArchivePartial, endpoints.pgArchivePartial) serveMux.HandleFunc(url.PathPGControlData, endpoints.pgControlData) @@ -206,37 +205,33 @@ func (ws *remoteWebserverEndpoints) cleanupStaleCollections(ctx context.Context) } } -func (ws *remoteWebserverEndpoints) isServerHealthy(w http.ResponseWriter, _ *http.Request) { - // If `pg_rewind` is running the Pod is starting up. +// isServerStartedUp evaluates the liveness probe +func (ws *remoteWebserverEndpoints) isServerStartedUp(w http.ResponseWriter, req *http.Request) { + // If `pg_rewind` is running, it means that the Pod is starting up. // We need to report it healthy to avoid being killed by the kubelet. - // Same goes for instances with fencing on. if ws.instance.PgRewindIsRunning || ws.instance.MightBeUnavailable() { - log.Trace("Liveness probe skipped") + log.Trace("Startup probe skipped") _, _ = fmt.Fprint(w, "Skipped") return } - err := ws.instance.IsServerHealthy() - if err != nil { - log.Debug("Liveness probe failing", "err", err.Error()) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } + checker := probes.NewStartupChecker(ws.typedClient, ws.instance) + checker.IsHealthy(req.Context(), w) +} - log.Trace("Liveness probe succeeding") +func (ws *remoteWebserverEndpoints) isServerHealthy(w http.ResponseWriter, _ *http.Request) { _, _ = fmt.Fprint(w, "OK") } // This is the readiness probe -func (ws *remoteWebserverEndpoints) isServerReady(w http.ResponseWriter, r *http.Request) { - if err := ws.readinessChecker.IsServerReady(r.Context()); err != nil { - log.Debug("Readiness probe failing", "err", err.Error()) - http.Error(w, err.Error(), http.StatusInternalServerError) +func (ws *remoteWebserverEndpoints) isServerReady(w http.ResponseWriter, req *http.Request) { + if !ws.instance.CanCheckReadiness() { + http.Error(w, "instance is not ready yet", http.StatusInternalServerError) return } - log.Trace("Readiness probe succeeding") - _, _ = fmt.Fprint(w, "OK") + checker := probes.NewReadinessChecker(ws.typedClient, ws.instance) + checker.IsHealthy(req.Context(), w) } // This probe is for the instance status, including replication diff --git a/pkg/management/url/url.go b/pkg/management/url/url.go index 31e3b4cead..d18d94cc58 100644 --- a/pkg/management/url/url.go +++ b/pkg/management/url/url.go @@ -34,9 +34,12 @@ const ( // PathHealth is the URL path for Health State PathHealth string = "/healthz" - // PathReady is the URL oath for Ready State + // PathReady is the URL path for Ready State PathReady string = "/readyz" + // PathStartup is the URL path for the Startup probe + PathStartup string = "/startupz" + // PathPGControlData is the URL path for PostgreSQL pg_controldata output PathPGControlData string = "/pg/controldata" diff --git a/pkg/specs/pods.go b/pkg/specs/pods.go index 41482b5dea..d059818f4a 100644 --- a/pkg/specs/pods.go +++ b/pkg/specs/pods.go @@ -206,7 +206,7 @@ func createPostgresContainers(cluster apiv1.Cluster, envConfig EnvConfig, enable TimeoutSeconds: 5, ProbeHandler: corev1.ProbeHandler{ HTTPGet: &corev1.HTTPGetAction{ - Path: url.PathHealth, + Path: url.PathStartup, Port: intstr.FromInt32(url.StatusPort), }, }, diff --git a/tests/e2e/fixtures/pg_data_corruption/cluster-pg-data-corruption-no-slots.yaml.template b/tests/e2e/fixtures/pg_data_corruption/cluster-pg-data-corruption-no-slots.yaml.template index c38a36ff0d..d42e607616 100644 --- a/tests/e2e/fixtures/pg_data_corruption/cluster-pg-data-corruption-no-slots.yaml.template +++ b/tests/e2e/fixtures/pg_data_corruption/cluster-pg-data-corruption-no-slots.yaml.template @@ -27,6 +27,10 @@ spec: enabled: false slotPrefix: _cnpg_ + probes: + readiness: + type: query + # Persistent storage configuration storage: storageClass: ${E2E_DEFAULT_STORAGE_CLASS} diff --git a/tests/e2e/fixtures/pg_data_corruption/cluster-pg-data-corruption-roles.yaml.template b/tests/e2e/fixtures/pg_data_corruption/cluster-pg-data-corruption-roles.yaml.template index 29c869a33b..d2cffe85f4 100644 --- a/tests/e2e/fixtures/pg_data_corruption/cluster-pg-data-corruption-roles.yaml.template +++ b/tests/e2e/fixtures/pg_data_corruption/cluster-pg-data-corruption-roles.yaml.template @@ -36,6 +36,10 @@ spec: database: app owner: app + probes: + readiness: + type: query + # Persistent storage configuration storage: storageClass: ${E2E_DEFAULT_STORAGE_CLASS} diff --git a/tests/e2e/fixtures/pg_data_corruption/cluster-pg-data-corruption.yaml.template b/tests/e2e/fixtures/pg_data_corruption/cluster-pg-data-corruption.yaml.template index f46f729c85..9c419300a0 100644 --- a/tests/e2e/fixtures/pg_data_corruption/cluster-pg-data-corruption.yaml.template +++ b/tests/e2e/fixtures/pg_data_corruption/cluster-pg-data-corruption.yaml.template @@ -21,6 +21,10 @@ spec: database: app owner: app + probes: + readiness: + type: query + # Persistent storage configuration storage: storageClass: ${E2E_DEFAULT_STORAGE_CLASS} diff --git a/tests/e2e/fixtures/sync_replicas/readiness-probe-lag-control.yaml.template b/tests/e2e/fixtures/sync_replicas/readiness-probe-lag-control.yaml.template new file mode 100644 index 0000000000..af46bd5192 --- /dev/null +++ b/tests/e2e/fixtures/sync_replicas/readiness-probe-lag-control.yaml.template @@ -0,0 +1,31 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: cluster-readiness-probe-replica-lag-control +spec: + instances: 3 + + postgresql: + synchronous: + method: any + number: 2 + dataDurability: preferred + parameters: + log_checkpoints: "on" + log_lock_waits: "on" + log_min_duration_statement: '1000' + log_statement: 'ddl' + log_temp_files: '1024' + log_autovacuum_min_duration: '1s' + log_replication_commands: 'on' + + probes: + readiness: + type: streaming + maximumLag: 16Mi + failureThreshold: 30 + periodSeconds: 1 + + storage: + storageClass: ${E2E_DEFAULT_STORAGE_CLASS} + size: 1G diff --git a/tests/e2e/fixtures/sync_replicas/startup-probe-lag-control.yaml.template b/tests/e2e/fixtures/sync_replicas/startup-probe-lag-control.yaml.template new file mode 100644 index 0000000000..adbefa3874 --- /dev/null +++ b/tests/e2e/fixtures/sync_replicas/startup-probe-lag-control.yaml.template @@ -0,0 +1,31 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: cluster-startup-probe-replica-lag-control +spec: + instances: 3 + + postgresql: + synchronous: + method: any + number: 2 + dataDurability: preferred + parameters: + log_checkpoints: "on" + log_lock_waits: "on" + log_min_duration_statement: '1000' + log_statement: 'ddl' + log_temp_files: '1024' + log_autovacuum_min_duration: '1s' + log_replication_commands: 'on' + + probes: + startup: + type: streaming + maximumLag: 16Mi + failureThreshold: 30 + periodSeconds: 1 + + storage: + storageClass: ${E2E_DEFAULT_STORAGE_CLASS} + size: 1G diff --git a/tests/e2e/probes_test.go b/tests/e2e/probes_test.go index c3858d9210..8bf817c6cc 100644 --- a/tests/e2e/probes_test.go +++ b/tests/e2e/probes_test.go @@ -56,9 +56,13 @@ var _ = Describe("Probes configuration tests", Label(tests.LabelBasic), func() { TimeoutSeconds: 8, } probesConfiguration := apiv1.ProbesConfiguration{ - Startup: probeConfiguration.DeepCopy(), - Liveness: probeConfiguration.DeepCopy(), - Readiness: probeConfiguration.DeepCopy(), + Startup: &apiv1.ProbeWithStrategy{ + Probe: probeConfiguration, + }, + Liveness: probeConfiguration.DeepCopy(), + Readiness: &apiv1.ProbeWithStrategy{ + Probe: probeConfiguration, + }, } assertProbeCoherentWithConfiguration := func(probe *corev1.Probe) { @@ -70,7 +74,7 @@ var _ = Describe("Probes configuration tests", Label(tests.LabelBasic), func() { assertProbesCoherentWithConfiguration := func(container *corev1.Container) { assertProbeCoherentWithConfiguration(container.LivenessProbe) assertProbeCoherentWithConfiguration(container.ReadinessProbe) - assertProbeCoherentWithConfiguration(container.LivenessProbe) + assertProbeCoherentWithConfiguration(container.StartupProbe) } var defaultReadinessProbe *corev1.Probe diff --git a/tests/e2e/syncreplicas_test.go b/tests/e2e/syncreplicas_test.go index 9731ae3f73..80982e6d80 100644 --- a/tests/e2e/syncreplicas_test.go +++ b/tests/e2e/syncreplicas_test.go @@ -22,14 +22,18 @@ import ( "strings" "time" + corev1 "k8s.io/api/core/v1" "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/fencing" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/logs" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" @@ -83,7 +87,71 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { Expect(err).ShouldNot(HaveOccurred()) return strings.Trim(out, "\n") - }, 30).Should(ContainSubstring(element)) + }, 60).Should(ContainSubstring(element)) + } + + assertProbeRespectsReplicaLag := func(namespace, replicaName, probeType string) { + By(fmt.Sprintf( + "checking that %s probe of replica %s is waiting for lag to decrease before marking the pod ready", + probeType, replicaName), func() { + timeout := 2 * time.Minute + + // This "Eventually" block is needed because we may grab only a portion + // of the replica logs, and the "ParseJSONLogs" function may fail on the latest + // log record when this happens + Eventually(func(g Gomega) { + data, err := logs.ParseJSONLogs(env.Ctx, env.Interface, namespace, replicaName) + g.Expect(err).ToNot(HaveOccurred()) + + recordWasFound := false + for _, record := range data { + err, ok := record["err"].(string) + if !ok { + continue + } + msg, ok := record["msg"].(string) + if !ok { + continue + } + + if msg == fmt.Sprintf("%s probe failing", probeType) && + strings.Contains(err, "streaming replica lagging") { + recordWasFound = true + break + } + } + + g.Expect(recordWasFound).To( + BeTrue(), + fmt.Sprintf("The %s probe is preventing the replica from being marked ready", probeType), + ) + }, timeout).Should(Succeed()) + }) + } + + generateDataLoad := func(namespace, clusterName string) { + By("adding data to the primary", func() { + commandTimeout := time.Second * 600 + primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + + // This will generate 1Gi of data in the primary node and, since the replica we fenced + // is not aligned, will generate lag. + _, _, err = exec.Command( + env.Ctx, env.Interface, env.RestClientConfig, + *primary, specs.PostgresContainerName, &commandTimeout, + "psql", + "-U", + "postgres", + "-c", + "create table numbers (i integer); "+ + "insert into numbers (select generate_series(1,1000000)); "+ + "insert into numbers (select * from numbers); "+ + "insert into numbers (select * from numbers); "+ + "insert into numbers (select * from numbers); ", + ) + Expect(err).ToNot(HaveOccurred()) + }) } Context("Legacy synchronous replication", func() { @@ -179,9 +247,8 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { }) Context("Synchronous replication", func() { - var namespace string - It("can manage quorum/priority based synchronous replication", func() { + var namespace string const ( namespacePrefix = "sync-replicas-e2e" sampleFile = fixturesDir + "/sync_replicas/cluster-sync-replica.yaml.template" @@ -240,6 +307,7 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { Context("data durability is preferred", func() { It("will decrease the number of sync replicas to the number of available replicas", func() { + var namespace string const ( namespacePrefix = "sync-replicas-preferred" sampleFile = fixturesDir + "/sync_replicas/preferred.yaml.template" @@ -279,7 +347,7 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { return strings.Trim(stdout, "\n") }, 160).Should(BeEmpty()) }) - By("unfenicing the replicas and verifying we have 2 quorum-based replicas", func() { + By("unfencing the replicas and verifying we have 2 quorum-based replicas", func() { Expect(fencing.Off(env.Ctx, env.Client, fmt.Sprintf("%v-3", clusterName), namespace, clusterName, fencing.UsingAnnotation)).Should(Succeed()) Expect(fencing.Off(env.Ctx, env.Client, fmt.Sprintf("%v-2", clusterName), @@ -289,5 +357,138 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { }) }) }) + + Context("Lag-control in startup & readiness probes", func() { + var ( + namespace string + namespacePrefix string + sampleFile string + clusterName string + fencedReplicaName string + err error + ) + + setupClusterWithLaggingReplica := func() { + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) + Expect(err).ToNot(HaveOccurred()) + + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) + Expect(err).ToNot(HaveOccurred()) + AssertCreateCluster(namespace, clusterName, sampleFile, env) + + // Set our target fencedReplica + fencedReplicaName = fmt.Sprintf("%s-2", clusterName) + + By("verifying we have 2 quorum-based replicas", func() { + getSyncReplicationCount(namespace, clusterName, "quorum", 2) + compareSynchronousStandbyNames(namespace, clusterName, "ANY 2") + }) + + By("fencing a replica and verifying we have only 1 quorum-based replica", func() { + Expect(fencing.On(env.Ctx, env.Client, fencedReplicaName, + namespace, clusterName, fencing.UsingAnnotation)).Should(Succeed()) + getSyncReplicationCount(namespace, clusterName, "quorum", 1) + compareSynchronousStandbyNames(namespace, clusterName, "ANY 1") + }) + + By("waiting for the fenced pod to be not ready", func() { + Eventually(func(g Gomega) bool { + var pod corev1.Pod + err := env.Client.Get(env.Ctx, client.ObjectKey{ + Namespace: namespace, + Name: fencedReplicaName, + }, &pod) + g.Expect(err).ToNot(HaveOccurred()) + + return utils.IsPodReady(pod) + }, 160).Should(BeFalse()) + }) + + generateDataLoad(namespace, clusterName) + } + + It("lag control in startup probe will delay the readiness of replicas", func() { + namespacePrefix = "startup-probe-lag" + sampleFile = fixturesDir + "/sync_replicas/startup-probe-lag-control.yaml.template" + + setupClusterWithLaggingReplica() + + By("stopping the reconciliation loop on the cluster", func() { + // This is needed to avoid the operator to recreate the new Pod when we'll + // delete it. + // We want the Pod to start without being fenced to engage the lag checking + // startup probe + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + + origCluster := cluster.DeepCopy() + if cluster.Annotations == nil { + cluster.Annotations = make(map[string]string) + } + cluster.Annotations[utils.ReconciliationLoopAnnotationName] = "disabled" + + err = env.Client.Patch(env.Ctx, cluster, client.MergeFrom(origCluster)) + Expect(err).ToNot(HaveOccurred()) + }) + + By("deleting the test replica and disabling fencing", func() { + var pod corev1.Pod + err := env.Client.Get(env.Ctx, client.ObjectKey{ + Namespace: namespace, + Name: fencedReplicaName, + }, &pod) + Expect(err).ToNot(HaveOccurred()) + + err = env.Client.Delete(env.Ctx, &pod) + Expect(err).ToNot(HaveOccurred()) + + Expect(fencing.Off(env.Ctx, env.Client, fmt.Sprintf("%v-2", clusterName), + namespace, clusterName, fencing.UsingAnnotation)).Should(Succeed()) + }) + + By("enabling the reconciliation loops on the cluster", func() { + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + + origCluster := cluster.DeepCopy() + if cluster.Annotations == nil { + cluster.Annotations = make(map[string]string) + } + delete(cluster.Annotations, utils.ReconciliationLoopAnnotationName) + + err = env.Client.Patch(env.Ctx, cluster, client.MergeFrom(origCluster)) + Expect(err).ToNot(HaveOccurred()) + }) + + By("waiting for the replica to be back again and ready", func() { + Eventually(func(g Gomega) bool { + var pod corev1.Pod + err := env.Client.Get(env.Ctx, client.ObjectKey{ + Namespace: namespace, + Name: fencedReplicaName, + }, &pod) + g.Expect(err).ToNot(HaveOccurred()) + + return utils.IsPodReady(pod) + }, 160).Should(BeTrue()) + }) + + assertProbeRespectsReplicaLag(namespace, fencedReplicaName, "startup") + }) + + It("lag control in readiness probe will delay the readiness of replicas", func() { + namespacePrefix = "readiness-probe-lag" + sampleFile = fixturesDir + "/sync_replicas/readiness-probe-lag-control.yaml.template" + + setupClusterWithLaggingReplica() + + By("disabling fencing", func() { + Expect(fencing.Off(env.Ctx, env.Client, fmt.Sprintf("%v-2", clusterName), + namespace, clusterName, fencing.UsingAnnotation)).Should(Succeed()) + }) + + assertProbeRespectsReplicaLag(namespace, fencedReplicaName, "readiness") + }) + }) }) }) From 444bfe1df097bd2feb54e5f2e88669abf58778a7 Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Tue, 11 Mar 2025 14:10:35 +0100 Subject: [PATCH 446/836] chore: add the release notes type of issue (#7121) Signed-off-by: Gabriele Bartolini --- .github/ISSUE_TEMPLATE/release-notes.yml | 48 ++++++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/release-notes.yml diff --git a/.github/ISSUE_TEMPLATE/release-notes.yml b/.github/ISSUE_TEMPLATE/release-notes.yml new file mode 100644 index 0000000000..8b40b9e731 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/release-notes.yml @@ -0,0 +1,48 @@ +name: Release Notes +description: Release notes for a new version of CloudNativePG +title: "[Release Notes]: CloudNativePG 1.XX.Y and 1.XX-1.Z" +labels: ["triage", "documentation"] +projects: ["cloudnative-pg/cloudnative-pg"] +assignees: + - gbartolini +body: + - type: markdown + attributes: + value: | + Make sure that the correct versions are reported in the title of the ticket. + - type: checkboxes + id: search + attributes: + label: Is there an existing issue already for this task? + description: Before you submit a new issue, make sure you have searched if a similar one already exists + options: + - label: I have searched for an existing issue, and could not find anything. I believe this is a new request. + required: true + - type: dropdown + id: minor + attributes: + label: Is this a new minor release? + description: Is this a new minor release for CloudNativePG? If so, make sure you check the `contribute/release-notes-template.md` file. + options: + - "No" + - "Yes" + validations: + required: true + - type: dropdown + id: preview + attributes: + label: Is this a preview release? + description: Is this a preview release for CloudNativePG? If so, make sure you add `-RC1` to the version and update the `preview_version.md` file. + options: + - "No" + - "Yes" + validations: + required: true + - type: checkboxes + id: terms + attributes: + label: Code of Conduct + description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/cloudnative-pg/governance/blob/main/CODE_OF_CONDUCT.md) + options: + - label: I agree to follow this project's Code of Conduct + required: true From 4ce21b89ef7e8dda8c1f2e671ba732de49b1c42a Mon Sep 17 00:00:00 2001 From: David Luo Date: Tue, 11 Mar 2025 12:49:26 -0400 Subject: [PATCH 447/836] docs(k9s): do not use bashisms in k9s integration sample (#6959) Replace bash-specific syntax with POSIX-compatible commands in the k9s integration sample, improving portability across different shell environments. Fixes #6958 Signed-off-by: dsluo --- docs/src/samples/k9s/plugins.yml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/src/samples/k9s/plugins.yml b/docs/src/samples/k9s/plugins.yml index ca242960b5..04b6850e0c 100644 --- a/docs/src/samples/k9s/plugins.yml +++ b/docs/src/samples/k9s/plugins.yml @@ -26,7 +26,7 @@ plugins: background: false args: - -c - - "kubectl cnpg backup $NAME -n $NAMESPACE --context \"$CONTEXT\" |& less -R" + - "kubectl cnpg backup $NAME -n $NAMESPACE --context \"$CONTEXT\" 2>&1 | less -R" cnpg-hibernate-status: shortCut: h description: Hibernate status @@ -36,7 +36,7 @@ plugins: background: false args: - -c - - "kubectl cnpg hibernate status $NAME -n $NAMESPACE --context \"$CONTEXT\" |& less -R" + - "kubectl cnpg hibernate status $NAME -n $NAMESPACE --context \"$CONTEXT\" 2>&1 | less -R" cnpg-hibernate: shortCut: Shift-H description: Hibernate @@ -47,7 +47,7 @@ plugins: background: false args: - -c - - "kubectl cnpg hibernate on $NAME -n $NAMESPACE --context \"$CONTEXT\" |& less -R" + - "kubectl cnpg hibernate on $NAME -n $NAMESPACE --context \"$CONTEXT\" 2>&1 | less -R" cnpg-hibernate-off: shortCut: Shift-H description: Wake up hibernated cluster in this namespace @@ -58,7 +58,7 @@ plugins: background: false args: - -c - - "kubectl cnpg hibernate off $NAME -n $NAME --context \"$CONTEXT\" |& less -R" + - "kubectl cnpg hibernate off $NAME -n $NAME --context \"$CONTEXT\" 2>&1 | less -R" cnpg-logs: shortCut: l description: Logs @@ -89,7 +89,7 @@ plugins: background: false args: - -c - - "kubectl cnpg reload $NAME -n $NAMESPACE --context \"$CONTEXT\" |& less -R" + - "kubectl cnpg reload $NAME -n $NAMESPACE --context \"$CONTEXT\" 2>&1 | less -R" cnpg-restart: shortCut: Shift-R description: Restart @@ -100,7 +100,7 @@ plugins: background: false args: - -c - - "kubectl cnpg restart $NAME -n $NAMESPACE --context \"$CONTEXT\" |& less -R" + - "kubectl cnpg restart $NAME -n $NAMESPACE --context \"$CONTEXT\" 2>&1 | less -R" cnpg-status: shortCut: s description: Status @@ -110,7 +110,7 @@ plugins: background: false args: - -c - - "kubectl cnpg status $NAME -n $NAMESPACE --context \"$CONTEXT\" |& less -R" + - "kubectl cnpg status $NAME -n $NAMESPACE --context \"$CONTEXT\" 2>&1 | less -R" cnpg-status-verbose: shortCut: Shift-S description: Status (verbose) @@ -120,4 +120,4 @@ plugins: background: false args: - -c - - "kubectl cnpg status $NAME -n $NAMESPACE --context \"$CONTEXT\" --verbose |& less -R" + - "kubectl cnpg status $NAME -n $NAMESPACE --context \"$CONTEXT\" --verbose 2>&1 | less -R" From c112c8ea9b5cb93d117b87ada49eda6735b2d3df Mon Sep 17 00:00:00 2001 From: Jaime Silvela Date: Tue, 11 Mar 2025 18:39:42 +0100 Subject: [PATCH 448/836] refactor: move the multi-container logging to a dedicated package (#6809) This pull request refactors the code that iterates over containers when logging instance pods for the `kubectl cnpg report` plugin command. The refactoring involves moving the multi-container logging logic to a dedicated package, which improves the codebase's organization and maintainability. Additionally, new tests have been added to ensure the functionality is thoroughly covered. Signed-off-by: Jaime Silvela Signed-off-by: Armando Ruocco Signed-off-by: Marco Nenciarini Co-authored-by: Armando Ruocco Co-authored-by: Marco Nenciarini --- docs/src/kubectl-plugin.md | 2 +- internal/cmd/plugin/logs/cluster_logs.go | 6 +- internal/cmd/plugin/logs/cluster_logs_test.go | 2 +- internal/cmd/plugin/logs/cluster_test.go | 2 +- internal/cmd/plugin/report/logs.go | 105 +++----- .../cluster_writer.go} | 22 +- .../cluster_writer_test.go} | 12 +- pkg/{utils/logs => podlogs}/suite_test.go | 2 +- pkg/podlogs/writer.go | 132 +++++++++ pkg/podlogs/writer_test.go | 251 ++++++++++++++++++ pkg/utils/discovery_test.go | 2 +- pkg/utils/logs/logs.go | 205 -------------- pkg/utils/logs/logs_test.go | 147 ---------- 13 files changed, 441 insertions(+), 449 deletions(-) rename pkg/{utils/logs/cluster_logs.go => podlogs/cluster_writer.go} (89%) rename pkg/{utils/logs/cluster_logs_test.go => podlogs/cluster_writer_test.go} (94%) rename pkg/{utils/logs => podlogs}/suite_test.go (97%) create mode 100644 pkg/podlogs/writer.go create mode 100644 pkg/podlogs/writer_test.go delete mode 100644 pkg/utils/logs/logs.go delete mode 100644 pkg/utils/logs/logs_test.go diff --git a/docs/src/kubectl-plugin.md b/docs/src/kubectl-plugin.md index ed9d8535e5..4048b9b749 100644 --- a/docs/src/kubectl-plugin.md +++ b/docs/src/kubectl-plugin.md @@ -573,7 +573,7 @@ Archive: report_operator_.zip and previous logs are available, it will show them both. ```output -====== Begin of Previous Log ===== +====== Beginning of Previous Log ===== 2023-03-28T12:56:41.251711811Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.25.1","build":{"Version":"1.25.1+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} 2023-03-28T12:56:41.251851909Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"} diff --git a/internal/cmd/plugin/logs/cluster_logs.go b/internal/cmd/plugin/logs/cluster_logs.go index 09d5bd32d6..23de7650cc 100644 --- a/internal/cmd/plugin/logs/cluster_logs.go +++ b/internal/cmd/plugin/logs/cluster_logs.go @@ -31,7 +31,7 @@ import ( cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils/logs" + "github.com/cloudnative-pg/cloudnative-pg/pkg/podlogs" ) // clusterLogs contains the options and context to retrieve cluster logs @@ -55,7 +55,7 @@ func getCluster(cl clusterLogs) (*cnpgv1.Cluster, error) { return &cluster, err } -func getStreamClusterLogs(cluster *cnpgv1.Cluster, cl clusterLogs) logs.ClusterStreamingRequest { +func getStreamClusterLogs(cluster *cnpgv1.Cluster, cl clusterLogs) podlogs.ClusterWriter { var sinceTime *metav1.Time var tail *int64 if cl.timestamp { @@ -64,7 +64,7 @@ func getStreamClusterLogs(cluster *cnpgv1.Cluster, cl clusterLogs) logs.ClusterS if cl.tailLines >= 0 { tail = &cl.tailLines } - return logs.ClusterStreamingRequest{ + return podlogs.ClusterWriter{ Cluster: cluster, Options: &corev1.PodLogOptions{ Timestamps: cl.timestamp, diff --git a/internal/cmd/plugin/logs/cluster_logs_test.go b/internal/cmd/plugin/logs/cluster_logs_test.go index bcd7a87a1f..ffb9e14c0c 100644 --- a/internal/cmd/plugin/logs/cluster_logs_test.go +++ b/internal/cmd/plugin/logs/cluster_logs_test.go @@ -42,7 +42,7 @@ var _ = Describe("Get the logs", Ordered, func() { Name: clusterName + "-1", }, } - client := fakeClient.NewSimpleClientset(pod) + client := fakeClient.NewClientset(pod) cluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, diff --git a/internal/cmd/plugin/logs/cluster_test.go b/internal/cmd/plugin/logs/cluster_test.go index d66206731a..356d4a3ee3 100644 --- a/internal/cmd/plugin/logs/cluster_test.go +++ b/internal/cmd/plugin/logs/cluster_test.go @@ -53,7 +53,7 @@ var _ = Describe("Test the command", func() { } plugin.Namespace = namespace - plugin.ClientInterface = fakeClient.NewSimpleClientset(pod) + plugin.ClientInterface = fakeClient.NewClientset(pod) plugin.Client = fake.NewClientBuilder(). WithScheme(scheme.BuildWithAllKnownScheme()). WithObjects(cluster). diff --git a/internal/cmd/plugin/report/logs.go b/internal/cmd/plugin/report/logs.go index e6858696d9..4c28a3a2e8 100644 --- a/internal/cmd/plugin/report/logs.go +++ b/internal/cmd/plugin/report/logs.go @@ -24,11 +24,13 @@ import ( batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" + "github.com/cloudnative-pg/cloudnative-pg/pkg/podlogs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils/logs" ) const jobMatcherLabel = "job-name" @@ -46,9 +48,7 @@ func streamOperatorLogsToZip( if _, err := zipper.Create(logsDir + "/"); err != nil { return fmt.Errorf("could not add '%s' to zip: %w", logsDir, err) } - podLogOptions := &corev1.PodLogOptions{ - Timestamps: logTimeStamp, // NOTE: when activated, lines are no longer JSON - } + for i := range pods { pod := pods[i] path := filepath.Join(logsDir, fmt.Sprintf("%s-logs.jsonl", pod.Name)) @@ -57,21 +57,12 @@ func streamOperatorLogsToZip( return fmt.Errorf("could not add '%s' to zip: %w", path, zipperErr) } - streamPodLogs := &logs.StreamingRequest{ - Pod: &pod, - Options: podLogOptions, - Previous: true, - } - if _, err := fmt.Fprint(writer, "\n\"====== Begin of Previous Log =====\"\n"); err != nil { - return err + streamPodLogs := podlogs.NewPodLogsWriter(pod, kubernetes.NewForConfigOrDie(ctrl.GetConfigOrDie())) + opts := &corev1.PodLogOptions{ + Timestamps: logTimeStamp, + Previous: true, } - _ = streamPodLogs.Stream(ctx, writer) - if _, err := fmt.Fprint(writer, "\n\"====== End of Previous Log =====\"\n"); err != nil { - return err - } - - streamPodLogs.Previous = false - if err := streamPodLogs.Stream(ctx, writer); err != nil { + if err := streamPodLogs.Single(ctx, writer, opts); err != nil { return err } } @@ -99,45 +90,26 @@ func streamClusterLogsToZip( utils.ClusterLabelName: clusterName, } - podLogOptions := &corev1.PodLogOptions{ - Timestamps: logTimeStamp, // NOTE: when activated, lines are no longer JSON - } - var podList corev1.PodList err = plugin.Client.List(ctx, &podList, matchClusterName, client.InNamespace(namespace)) if err != nil { return fmt.Errorf("could not get cluster pods: %w", err) } - streamPodLogs := &logs.StreamingRequest{ - Options: podLogOptions, - Previous: true, - } + + cli := kubernetes.NewForConfigOrDie(ctrl.GetConfigOrDie()) for idx := range podList.Items { pod := podList.Items[idx] - for _, container := range pod.Spec.Containers { - path := filepath.Join(logsdir, fmt.Sprintf("%s-%s.jsonl", pod.Name, container.Name)) - writer, err := zipper.Create(path) - if err != nil { - return fmt.Errorf("could not add '%s' to zip: %w", path, err) - } - streamPodLogs.Options.Container = container.Name - streamPodLogs.Pod = &pod - - if _, err := fmt.Fprint(writer, "\n\"====== Begin of Previous Log =====\"\n"); err != nil { - return err - } - // We ignore the error because it will error if there are no previous logs - _ = streamPodLogs.Stream(ctx, writer) - if _, err := fmt.Fprint(writer, "\n\"====== End of Previous Log =====\"\n"); err != nil { - return err - } - - streamPodLogs.Previous = false - - if err := streamPodLogs.Stream(ctx, writer); err != nil { - return err - } + streamPodLogs := podlogs.NewPodLogsWriter(pod, cli) + fileNamer := func(containerName string) string { + return filepath.Join(logsdir, fmt.Sprintf("%s-%s.jsonl", pod.Name, containerName)) + } + opts := &corev1.PodLogOptions{ + Timestamps: logTimeStamp, + Previous: true, + } + if err := streamPodLogs.Multiple(ctx, opts, zipper, fileNamer); err != nil { + return err } } @@ -159,13 +131,8 @@ func streamClusterJobLogsToZip(ctx context.Context, clusterName, namespace strin utils.ClusterLabelName: clusterName, } - podLogOptions := &corev1.PodLogOptions{ - Timestamps: logTimeStamp, // NOTE: when activated, lines are no longer JSON - } - var jobList batchv1.JobList - err = plugin.Client.List(ctx, &jobList, matchClusterName, client.InNamespace(namespace)) - if err != nil { + if err := plugin.Client.List(ctx, &jobList, matchClusterName, client.InNamespace(namespace)); err != nil { return fmt.Errorf("could not get cluster jobs: %w", err) } @@ -174,28 +141,22 @@ func streamClusterJobLogsToZip(ctx context.Context, clusterName, namespace strin jobMatcherLabel: job.Name, } var podList corev1.PodList - err = plugin.Client.List(ctx, &podList, matchJobName, client.InNamespace(namespace)) - if err != nil { + if err := plugin.Client.List(ctx, &podList, matchJobName, client.InNamespace(namespace)); err != nil { return fmt.Errorf("could not get pods for job '%s': %w", job.Name, err) } - streamPodLogs := &logs.StreamingRequest{ - Options: podLogOptions, - Previous: false, - } for idx := range podList.Items { pod := podList.Items[idx] - for _, container := range pod.Spec.Containers { - path := filepath.Join(logsdir, fmt.Sprintf("%s-%s.jsonl", pod.Name, container.Name)) - writer, err := zipper.Create(path) - if err != nil { - return fmt.Errorf("could not add '%s' to zip: %w", path, err) - } - streamPodLogs.Options.Container = container.Name - streamPodLogs.Pod = &pod - if err = streamPodLogs.Stream(ctx, writer); err != nil { - return err - } + streamPodLogs := podlogs.NewPodLogsWriter(pod, kubernetes.NewForConfigOrDie(ctrl.GetConfigOrDie())) + + fileNamer := func(containerName string) string { + return filepath.Join(logsdir, fmt.Sprintf("%s-%s.jsonl", pod.Name, containerName)) + } + opts := corev1.PodLogOptions{ + Timestamps: logTimeStamp, + } + if err := streamPodLogs.Multiple(ctx, &opts, zipper, fileNamer); err != nil { + return err } } } diff --git a/pkg/utils/logs/cluster_logs.go b/pkg/podlogs/cluster_writer.go similarity index 89% rename from pkg/utils/logs/cluster_logs.go rename to pkg/podlogs/cluster_writer.go index 3e5b85e6c9..ffabfb227c 100644 --- a/pkg/utils/logs/cluster_logs.go +++ b/pkg/podlogs/cluster_writer.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package logs +package podlogs import ( "bufio" @@ -39,12 +39,12 @@ import ( // wait before searching again for new cluster pods const DefaultFollowWaiting time.Duration = 1 * time.Second -// ClusterStreamingRequest represents a request to stream a cluster's pod logs +// ClusterWriter represents a request to stream a cluster's pod logs. // // If the Follow Option is set to true, streaming will sit in a loop looking -// for any new / regenerated pods, and will only exit when there are no pods +// for any new / regenerated pods and will only exit when there are no pods // streaming -type ClusterStreamingRequest struct { +type ClusterWriter struct { Cluster *apiv1.Cluster Options *corev1.PodLogOptions Previous bool `json:"previous,omitempty"` @@ -54,15 +54,15 @@ type ClusterStreamingRequest struct { Client kubernetes.Interface } -func (csr *ClusterStreamingRequest) getClusterName() string { +func (csr *ClusterWriter) getClusterName() string { return csr.Cluster.Name } -func (csr *ClusterStreamingRequest) getClusterNamespace() string { +func (csr *ClusterWriter) getClusterNamespace() string { return csr.Cluster.Namespace } -func (csr *ClusterStreamingRequest) getLogOptions(containerName string) *corev1.PodLogOptions { +func (csr *ClusterWriter) getLogOptions(containerName string) *corev1.PodLogOptions { if csr.Options == nil { return &corev1.PodLogOptions{ Container: containerName, @@ -75,7 +75,7 @@ func (csr *ClusterStreamingRequest) getLogOptions(containerName string) *corev1. return options } -func (csr *ClusterStreamingRequest) getKubernetesClient() kubernetes.Interface { +func (csr *ClusterWriter) getKubernetesClient() kubernetes.Interface { if csr.Client != nil { return csr.Client } @@ -86,7 +86,7 @@ func (csr *ClusterStreamingRequest) getKubernetesClient() kubernetes.Interface { return csr.Client } -func (csr *ClusterStreamingRequest) getFollowWaitingTime() time.Duration { +func (csr *ClusterWriter) getFollowWaitingTime() time.Duration { if csr.FollowWaiting > 0 { return csr.FollowWaiting } @@ -165,7 +165,7 @@ func (as *activeSet) wait() { } // SingleStream streams the cluster's pod logs and shunts them to a single io.Writer -func (csr *ClusterStreamingRequest) SingleStream(ctx context.Context, writer io.Writer) error { +func (csr *ClusterWriter) SingleStream(ctx context.Context, writer io.Writer) error { client := csr.getKubernetesClient() streamSet := newActiveSet() defer func() { @@ -230,7 +230,7 @@ func (csr *ClusterStreamingRequest) SingleStream(ctx context.Context, writer io. // // IMPORTANT: the output writer should be goroutine-safe // NOTE: the default Go `log` package is used for logging because it's goroutine-safe -func (csr *ClusterStreamingRequest) streamInGoroutine( +func (csr *ClusterWriter) streamInGoroutine( ctx context.Context, podName string, containerName string, diff --git a/pkg/utils/logs/cluster_logs_test.go b/pkg/podlogs/cluster_writer_test.go similarity index 94% rename from pkg/utils/logs/cluster_logs_test.go rename to pkg/podlogs/cluster_writer_test.go index b0561d8a22..96fea0859b 100644 --- a/pkg/utils/logs/cluster_logs_test.go +++ b/pkg/podlogs/cluster_writer_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package logs +package podlogs import ( "bytes" @@ -104,14 +104,14 @@ var _ = Describe("Cluster logging tests", func() { }, } It("should exit on ended pod logs with the non-follow option", func(ctx context.Context) { - client := fake.NewSimpleClientset(pod) + client := fake.NewClientset(pod) var logBuffer bytes.Buffer var wait sync.WaitGroup wait.Add(1) go func() { defer GinkgoRecover() defer wait.Done() - streamClusterLogs := ClusterStreamingRequest{ + streamClusterLogs := ClusterWriter{ Cluster: cluster, Options: &corev1.PodLogOptions{ Follow: false, @@ -134,7 +134,7 @@ var _ = Describe("Cluster logging tests", func() { go func() { defer GinkgoRecover() defer wait.Done() - streamClusterLogs := ClusterStreamingRequest{ + streamClusterLogs := ClusterWriter{ Cluster: cluster, Options: &corev1.PodLogOptions{ Follow: false, @@ -150,7 +150,7 @@ var _ = Describe("Cluster logging tests", func() { }) It("should catch extra logs if given the follow option", func(ctx context.Context) { - client := fake.NewSimpleClientset(pod) + client := fake.NewClientset(pod) var logBuffer syncBuffer // let's set a short follow-wait, and keep the cluster streaming for two // cycles @@ -158,7 +158,7 @@ var _ = Describe("Cluster logging tests", func() { ctx2, cancel := context.WithTimeout(ctx, 300*time.Millisecond) go func() { defer GinkgoRecover() - streamClusterLogs := ClusterStreamingRequest{ + streamClusterLogs := ClusterWriter{ Cluster: cluster, Options: &corev1.PodLogOptions{ Follow: true, diff --git a/pkg/utils/logs/suite_test.go b/pkg/podlogs/suite_test.go similarity index 97% rename from pkg/utils/logs/suite_test.go rename to pkg/podlogs/suite_test.go index cfa5b0aeae..793e424c49 100644 --- a/pkg/utils/logs/suite_test.go +++ b/pkg/podlogs/suite_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package logs +package podlogs import ( "testing" diff --git a/pkg/podlogs/writer.go b/pkg/podlogs/writer.go new file mode 100644 index 0000000000..96d306a269 --- /dev/null +++ b/pkg/podlogs/writer.go @@ -0,0 +1,132 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package podlogs contains code to fetch logs from Kubernetes pods +package podlogs + +import ( + "context" + "encoding/json" + "fmt" + "io" + + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +// Writer represents a request to stream a pod's logs and send them to an io.Writer +type Writer struct { + Pod corev1.Pod + Client kubernetes.Interface +} + +// NewPodLogsWriter initializes the struct +func NewPodLogsWriter(pod corev1.Pod, cli kubernetes.Interface) *Writer { + return &Writer{Pod: pod, Client: cli} +} + +// Single streams the pod logs and shunts them to the `writer`. +// If there are multiple containers, it will concatenate all the container streams into the writer +func (spl *Writer) Single(ctx context.Context, writer io.Writer, opts *corev1.PodLogOptions) (err error) { + if opts.Container != "" { + return spl.sendLogsToWriter(ctx, writer, opts) + } + + for _, container := range spl.Pod.Spec.Containers { + containerOpts := opts.DeepCopy() + containerOpts.Container = container.Name + if err := spl.sendLogsToWriter(ctx, writer, containerOpts); err != nil { + return err + } + } + return nil +} + +// writerConstructor is the interface representing an object that can spawn writers +type writerConstructor interface { + Create(name string) (io.Writer, error) +} + +func (spl *Writer) sendLogsToWriter( + ctx context.Context, + writer io.Writer, + options *corev1.PodLogOptions, +) error { + request := spl.Client.CoreV1().Pods(spl.Pod.Namespace).GetLogs(spl.Pod.Name, options) + + if options.Previous { + jsWriter := json.NewEncoder(writer) + if err := jsWriter.Encode("====== Beginning of Previous Log ====="); err != nil { + return err + } + // getting the Previous logs can fail (as with `kubectl logs -p`). Don't error out + if err := executeGetLogRequest(ctx, request, writer); err != nil { + // we try to print the json-safe error message. We don't exit on error + _ = json.NewEncoder(writer).Encode("Error fetching previous logs: " + err.Error()) + } + if err := jsWriter.Encode("====== End of Previous Log ====="); err != nil { + return err + } + } + return executeGetLogRequest(ctx, request, writer) +} + +// Multiple streams the pod logs, sending each container's stream to a separate writer +func (spl *Writer) Multiple( + ctx context.Context, + opts *corev1.PodLogOptions, + writerConstructor writerConstructor, + filePathGenerator func(string) string, +) error { + if opts.Container != "" { + return fmt.Errorf("use Single method to handle a single container output") + } + + for _, container := range spl.Pod.Spec.Containers { + writer, err := writerConstructor.Create(filePathGenerator(container.Name)) + if err != nil { + return err + } + containerOpts := opts.DeepCopy() + containerOpts.Container = container.Name + + if err := spl.sendLogsToWriter(ctx, writer, opts); err != nil { + return err + } + } + return nil +} + +func executeGetLogRequest(ctx context.Context, logRequest *rest.Request, writer io.Writer) error { + logStream, err := logRequest.Stream(ctx) + if err != nil { + return fmt.Errorf("when opening the log stream: %w", err) + } + defer func() { + innerErr := logStream.Close() + if err == nil && innerErr != nil { + err = fmt.Errorf("when closing the log stream: %w", innerErr) + } + }() + + _, err = io.Copy(writer, logStream) + if err != nil { + return fmt.Errorf("when copying the log stream to the writer: %w", err) + } + _, _ = writer.Write([]byte("\n")) + return nil +} diff --git a/pkg/podlogs/writer_test.go b/pkg/podlogs/writer_test.go new file mode 100644 index 0000000000..ff296e06e7 --- /dev/null +++ b/pkg/podlogs/writer_test.go @@ -0,0 +1,251 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podlogs + +import ( + "bytes" + "context" + "fmt" + "io" + "sync" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +type multiWriter struct { + writers map[string]*bytes.Buffer +} + +func newMultiWriter() *multiWriter { + newMw := &multiWriter{ + writers: make(map[string]*bytes.Buffer), + } + return newMw +} + +func (mw *multiWriter) Create(name string) (io.Writer, error) { + var buffer bytes.Buffer + mw.writers[name] = &buffer + return &buffer, nil +} + +var _ = Describe("Pod logging tests", func() { + podNamespace := "pod-test" + podName := "pod-name-test" + pod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: podNamespace, + Name: podName, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "postgres", + }, + }, + }, + } + + podWithSidecar := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: podNamespace, + Name: podName, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "postgres", + }, + { + Name: "sidecar", + }, + }, + }, + Status: corev1.PodStatus{ + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: "postgres", + State: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{ + StartedAt: metav1.Time{Time: time.Now()}, + }, + }, + }, + { + Name: "sidecar", + State: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{ + StartedAt: metav1.Time{Time: time.Now()}, + }, + }, + }, + }, + }, + } + + When("using the Stream function", func() { + It("should return the proper podName", func() { + streamPodLog := Writer{ + Pod: pod, + } + Expect(streamPodLog.Pod.Name).To(BeEquivalentTo(podName)) + Expect(streamPodLog.Pod.Namespace).To(BeEquivalentTo(podNamespace)) + }) + + It("should be able to handle the empty Pod", func(ctx context.Context) { + client := fake.NewClientset() + streamPodLog := Writer{ + Pod: corev1.Pod{}, + Client: client, + } + var logBuffer bytes.Buffer + err := streamPodLog.Single(ctx, &logBuffer, &corev1.PodLogOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(logBuffer.String()).To(BeEquivalentTo("")) + }) + + It("should read the logs of a pod with one container", func(ctx context.Context) { + client := fake.NewClientset(&pod) + streamPodLog := Writer{ + Pod: pod, + Client: client, + } + + var logBuffer bytes.Buffer + err := streamPodLog.Single(ctx, &logBuffer, &corev1.PodLogOptions{}) + Expect(err).ToNot(HaveOccurred()) + + Expect(logBuffer.String()).To(BeEquivalentTo("fake logs\n")) + }) + + It("should read the logs of a pod with multiple containers", func(ctx context.Context) { + client := fake.NewClientset(&podWithSidecar) + streamPodLog := Writer{ + Pod: podWithSidecar, + Client: client, + } + + var logBuffer bytes.Buffer + err := streamPodLog.Single(ctx, &logBuffer, &corev1.PodLogOptions{}) + Expect(err).ToNot(HaveOccurred()) + + Expect(logBuffer.String()).To(BeEquivalentTo("fake logs\nfake logs\n")) + }) + + It("should read only the specified container logs in a pod with multiple containers", func(ctx context.Context) { + client := fake.NewClientset(&podWithSidecar) + streamPodLog := Writer{ + Pod: podWithSidecar, + Client: client, + } + + var logBuffer bytes.Buffer + err := streamPodLog.Single(ctx, &logBuffer, &corev1.PodLogOptions{ + Container: "postgres", + Previous: false, + }) + Expect(err).ToNot(HaveOccurred()) + + Expect(logBuffer.String()).To(BeEquivalentTo("fake logs\n")) + }) + + It("can follow pod logs", func(ctx SpecContext) { + client := fake.NewClientset(&pod) + var logBuffer bytes.Buffer + var wait sync.WaitGroup + wait.Add(1) + go func() { + defer GinkgoRecover() + defer wait.Done() + now := metav1.Now() + streamPodLog := Writer{ + Pod: pod, + Client: client, + } + err := streamPodLog.Single(ctx, &logBuffer, &corev1.PodLogOptions{ + Timestamps: false, + Follow: true, + SinceTime: &now, + }) + Expect(err).NotTo(HaveOccurred()) + }() + // Calling ctx.Done is not strictly necessary because the fake Client + // will terminate the pod stream anyway, ending Stream. + // But in "production", Stream will follow + // the pod logs until the context, or the logs, are over + ctx.Done() + wait.Wait() + Expect(logBuffer.String()).To(BeEquivalentTo("fake logs\n")) + }) + }) + When("using the StreamMultiple function", func() { + It("should log each container into a separate writer", func(ctx context.Context) { + client := fake.NewClientset(&podWithSidecar) + streamPodLog := Writer{ + Pod: podWithSidecar, + Client: client, + } + + namer := func(container string) string { + return fmt.Sprintf("%s-%s.log", streamPodLog.Pod.Name, container) + } + mw := newMultiWriter() + err := streamPodLog.Multiple(ctx, &corev1.PodLogOptions{}, mw, namer) + Expect(err).ToNot(HaveOccurred()) + Expect(mw.writers).To(HaveLen(2)) + + Expect(mw.writers["pod-name-test-postgres.log"].String()).To(BeEquivalentTo("fake logs\n")) + Expect(mw.writers["pod-name-test-sidecar.log"].String()).To(BeEquivalentTo("fake logs\n")) + }) + + It("can fetch the previous logs for each container", func(ctx context.Context) { + client := fake.NewClientset(&podWithSidecar) + streamPodLog := Writer{ + Pod: podWithSidecar, + Client: client, + } + + namer := func(container string) string { + return fmt.Sprintf("%s-%s.log", streamPodLog.Pod.Name, container) + } + mw := newMultiWriter() + err := streamPodLog.Multiple(ctx, &corev1.PodLogOptions{Previous: true}, mw, namer) + Expect(err).ToNot(HaveOccurred()) + Expect(mw.writers).To(HaveLen(2)) + + Expect(mw.writers["pod-name-test-postgres.log"].String()).To(BeEquivalentTo( + `"====== Beginning of Previous Log =====" +fake logs +"====== End of Previous Log =====" +fake logs +`)) + + Expect(mw.writers["pod-name-test-sidecar.log"].String()).To(BeEquivalentTo( + `"====== Beginning of Previous Log =====" +fake logs +"====== End of Previous Log =====" +fake logs +`)) + }) + }) +}) diff --git a/pkg/utils/discovery_test.go b/pkg/utils/discovery_test.go index 8652c3a445..efd23e0c2c 100644 --- a/pkg/utils/discovery_test.go +++ b/pkg/utils/discovery_test.go @@ -47,7 +47,7 @@ var _ = Describe("Detect resources properly when", func() { var fakeDiscovery *discoveryFake.FakeDiscovery BeforeEach(func() { - client = fakeClient.NewSimpleClientset() + client = fakeClient.NewClientset() fakeDiscovery = client.Discovery().(*discoveryFake.FakeDiscovery) }) diff --git a/pkg/utils/logs/logs.go b/pkg/utils/logs/logs.go deleted file mode 100644 index 8cf9aeda70..0000000000 --- a/pkg/utils/logs/logs.go +++ /dev/null @@ -1,205 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package logs contains code to fetch logs from Kubernetes pods -package logs - -import ( - "bufio" - "context" - "fmt" - "io" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - ctrl "sigs.k8s.io/controller-runtime" -) - -// StreamingRequest represents a request to stream a pod's logs -type StreamingRequest struct { - Pod *v1.Pod - Options *v1.PodLogOptions - Previous bool `json:"previous,omitempty"` - // NOTE: the Client argument may be omitted, but it is good practice to pass it - // Importantly, it makes the logging functions testable - Client kubernetes.Interface -} - -func (spl *StreamingRequest) getPodName() string { - if spl.Pod != nil { - return spl.Pod.Name - } - return "" -} - -func (spl *StreamingRequest) getPodNamespace() string { - if spl.Pod != nil { - return spl.Pod.Namespace - } - return "" -} - -func (spl *StreamingRequest) getLogOptions() *v1.PodLogOptions { - if spl.Options == nil { - spl.Options = &v1.PodLogOptions{} - } - spl.Options.Previous = spl.Previous - return spl.Options -} - -func (spl *StreamingRequest) getKubernetesClient() kubernetes.Interface { - if spl.Client != nil { - return spl.Client - } - conf := ctrl.GetConfigOrDie() - - spl.Client = kubernetes.NewForConfigOrDie(conf) - - return spl.Client -} - -// getStreamToPod opens the REST request to the pod -func (spl *StreamingRequest) getStreamToPod() *rest.Request { - client := spl.getKubernetesClient() - pods := client.CoreV1().Pods(spl.getPodNamespace()) - - return pods.GetLogs( - spl.getPodName(), - spl.getLogOptions()) -} - -// Stream streams the pod logs and shunts them to the `writer`. -func (spl *StreamingRequest) Stream(ctx context.Context, writer io.Writer) (err error) { - wrapErr := func(err error) error { return fmt.Errorf("in Stream: %w", err) } - - logsRequest := spl.getStreamToPod() - logStream, err := logsRequest.Stream(ctx) - if err != nil { - return wrapErr(err) - } - defer func() { - innerErr := logStream.Close() - if err == nil && innerErr != nil { - err = innerErr - } - }() - - _, err = io.Copy(writer, logStream) - if err != nil { - err = wrapErr(err) - } - return err -} - -// TailPodLogs streams the pod logs starting from the current time, and keeps -// waiting for any new logs, until the context is cancelled by the calling process -// If `parseTimestamps` is true, the log line will have the timestamp in -// human-readable prepended. NOTE: this will make log-lines NON-JSON -func TailPodLogs( - ctx context.Context, - client kubernetes.Interface, - pod v1.Pod, - writer io.Writer, - parseTimestamps bool, -) error { - now := metav1.Now() - streamPodLog := StreamingRequest{ - Pod: &pod, - Options: &v1.PodLogOptions{ - Timestamps: parseTimestamps, - Follow: true, - SinceTime: &now, - }, - Client: client, - } - return streamPodLog.Stream(ctx, writer) -} - -// GetPodLogs streams the pod logs and shunts them to the `writer`, as well as -// returning the last `requestedLineLength` of lines of logs in a slice. -// If `getPrevious` was activated, it will get the previous logs -// -// TODO: this function is a bit hacky. The K8s PodLogOptions have a field -// called `TailLines` that seems to be just what we would like. -// HOWEVER: we want the full logs too, so we can write them to a file, in addition to -// the `TailLines` we want to pass along for display -func GetPodLogs( - ctx context.Context, - client kubernetes.Interface, - pod v1.Pod, - getPrevious bool, - writer io.Writer, - requestedLineLength int, -) ( - []string, error, -) { - wrapErr := func(err error) error { return fmt.Errorf("in GetPodLogs: %w", err) } - - streamPodLog := StreamingRequest{ - Pod: &pod, - Previous: getPrevious, - Options: &v1.PodLogOptions{}, - Client: client, - } - logsRequest := streamPodLog.getStreamToPod() - - logStream, err := logsRequest.Stream(ctx) - if err != nil { - return nil, wrapErr(err) - } - defer func() { - innerErr := logStream.Close() - if err == nil && innerErr != nil { - err = innerErr - } - }() - - rd := bufio.NewReader(logStream) - teedReader := io.TeeReader(rd, writer) - scanner := bufio.NewScanner(teedReader) - scanner.Buffer(make([]byte, 0, 4096), 1024*1024) - - if requestedLineLength <= 0 { - requestedLineLength = 10 - } - - // slice to hold the last `requestedLineLength` lines of log - lines := make([]string, requestedLineLength) - // index of the current line of the log (starting from zero) - i := 0 - // index in the slice that holds the current line of log - curIdx := 0 - - for scanner.Scan() { - lines[curIdx] = scanner.Text() - i++ - // `curIdx` walks from `0` to `requestedLineLength-1` and then to `0` in a cycle - curIdx = i % requestedLineLength - } - - if err := scanner.Err(); err != nil { - return nil, wrapErr(err) - } - // if `curIdx` walks to in the middle of 0 and `requestedLineLength-1`, assemble the last `requestedLineLength` - // lines of logs - if i > requestedLineLength && curIdx < (requestedLineLength-1) { - return append(lines[curIdx+1:], lines[:curIdx+1]...), nil - } - - return lines, nil -} diff --git a/pkg/utils/logs/logs_test.go b/pkg/utils/logs/logs_test.go deleted file mode 100644 index 5d93c21cbc..0000000000 --- a/pkg/utils/logs/logs_test.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package logs - -import ( - "bytes" - "context" - "sync" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes/fake" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -var _ = Describe("Pod logging tests", func() { - podNamespace := "pod-test" - podName := "pod-name-test" - pod := &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: podNamespace, - Name: podName, - }, - } - - podLogOptions := &v1.PodLogOptions{} - - It("should return the proper podName", func() { - streamPodLog := StreamingRequest{ - Pod: pod, - Options: podLogOptions, - } - Expect(streamPodLog.getPodName()).To(BeEquivalentTo(podName)) - Expect(streamPodLog.getPodNamespace()).To(BeEquivalentTo(podNamespace)) - }) - - It("should be able to handle the nil Pod", func(ctx context.Context) { - // the nil pod passed will still default to the empty pod name - client := fake.NewSimpleClientset() - streamPodLog := StreamingRequest{ - Pod: nil, - Options: podLogOptions, - Client: client, - } - var logBuffer bytes.Buffer - err := streamPodLog.Stream(ctx, &logBuffer) - Expect(err).NotTo(HaveOccurred()) - // The fake Client will be given a pod name of "", but it will still - // go on along. In production, we'd have an error when pod not found - Expect(logBuffer.String()).To(BeEquivalentTo("fake logs")) - Expect(streamPodLog.getPodName()).To(BeEquivalentTo("")) - Expect(streamPodLog.getPodNamespace()).To(BeEquivalentTo("")) - }) - - It("previous option must be false by default", func() { - streamPodLog := StreamingRequest{ - Pod: pod, - Options: podLogOptions, - } - Expect(streamPodLog.getLogOptions().Previous).To(BeFalse()) - }) - - It("getLogOptions respects the Previous field setting", func() { - streamPodLog := StreamingRequest{ - Pod: pod, - Options: podLogOptions, - } - options := streamPodLog.getLogOptions() - Expect(options.Previous).To(BeFalse()) - - streamPodLog.Previous = true - options = streamPodLog.getLogOptions() - Expect(options.Previous).To(BeTrue()) - }) - - It("should read the logs with the provided k8s Client", func(ctx context.Context) { - client := fake.NewSimpleClientset(pod) - streamPodLog := StreamingRequest{ - Pod: pod, - Options: podLogOptions, - Previous: false, - Client: client, - } - - var logBuffer bytes.Buffer - err := streamPodLog.Stream(ctx, &logBuffer) - Expect(err).ToNot(HaveOccurred()) - - Expect(logBuffer.String()).To(BeEquivalentTo("fake logs")) - }) - - It("GetPodLogs correctly streams and provides output lines", func(ctx context.Context) { - client := fake.NewSimpleClientset(pod) - var logBuffer bytes.Buffer - lines, err := GetPodLogs(ctx, client, *pod, false, &logBuffer, 2) - Expect(err).ToNot(HaveOccurred()) - Expect(lines).To(HaveLen(2)) - Expect(lines[0]).To(BeEquivalentTo("fake logs")) - Expect(logBuffer.String()).To(BeEquivalentTo("fake logs")) - }) - - It("GetPodLogs defaults to non-zero lines shown if set to zero", func(ctx context.Context) { - client := fake.NewSimpleClientset(pod) - var logBuffer bytes.Buffer - lines, err := GetPodLogs(ctx, client, *pod, false, &logBuffer, 0) - Expect(err).ToNot(HaveOccurred()) - Expect(lines).To(HaveLen(10)) - Expect(lines[0]).To(BeEquivalentTo("fake logs")) - Expect(logBuffer.String()).To(BeEquivalentTo("fake logs")) - }) - - It("can follow pod logs", func(ctx SpecContext) { - client := fake.NewSimpleClientset(pod) - var logBuffer bytes.Buffer - var wait sync.WaitGroup - wait.Add(1) - go func() { - defer GinkgoRecover() - defer wait.Done() - err := TailPodLogs(ctx, client, *pod, &logBuffer, true) - Expect(err).NotTo(HaveOccurred()) - }() - // calling ctx.Done is not strictly necessary because the fake Client - // will terminate the pod stream anyway, ending TailPodLogs. - // But in "production", TailPodLogs will follow - // the pod logs until the context, or the logs, are over - ctx.Done() - wait.Wait() - Expect(logBuffer.String()).To(BeEquivalentTo("fake logs")) - }) -}) From 163dbafbac0a68f69d0ed0ed76f9e2210a0acbe2 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Tue, 11 Mar 2025 22:51:22 +0100 Subject: [PATCH 449/836] fix: imperative restart should rollout the primary pod (#7122) This patch updates the operator to create a new Pod for the primary instance when an imperative restart is requested. Previously, if `primaryUpdateMethod` was set to `restart`, the operator would recreate the replica Pods but only perform an in-place restart of the primary Pod. This approach could lead to inconsistencies between the definitions of the primary and the replica Pods, as only the replicas were fully recreated. Closes #7120 Signed-off-by: Leonardo Cecchi Signed-off-by: Armando Ruocco Co-authored-by: Armando Ruocco --- internal/controller/cluster_upgrade.go | 20 ++++++++------------ internal/controller/cluster_upgrade_test.go | 2 +- 2 files changed, 9 insertions(+), 13 deletions(-) diff --git a/internal/controller/cluster_upgrade.go b/internal/controller/cluster_upgrade.go index dfcc54dc8f..b34ad064ed 100644 --- a/internal/controller/cluster_upgrade.go +++ b/internal/controller/cluster_upgrade.go @@ -352,11 +352,11 @@ func isPodNeedingRollout( } checkers := map[string]rolloutChecker{ - "pod has missing PVCs": checkHasMissingPVCs, - "pod has PVC requiring resizing": checkHasResizingPVC, - "pod projected volume is outdated": checkProjectedVolumeIsOutdated, - "pod image is outdated": checkPodImageIsOutdated, - "cluster has newer restart annotation": checkClusterHasNewerRestartAnnotation, + "pod has missing PVCs": checkHasMissingPVCs, + "pod has PVC requiring resizing": checkHasResizingPVC, + "pod projected volume is outdated": checkProjectedVolumeIsOutdated, + "pod image is outdated": checkPodImageIsOutdated, + "cluster has different restart annotation": checkClusterHasDifferentRestartAnnotation, } podRollout := applyCheckers(checkers) @@ -544,19 +544,15 @@ func checkHasMissingPVCs(pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, erro return rollout{}, nil } -func checkClusterHasNewerRestartAnnotation(pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, error) { - // check if pod needs to be restarted because of some config requiring it - // or if the cluster have been explicitly restarted - // If the cluster has been restarted and we are working with a Pod - // which has not been restarted yet, or restarted at a different - // time, let's restart it. +func checkClusterHasDifferentRestartAnnotation(pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, error) { + // If the pod restart value doesn't match with the one contained in the cluster, restart the pod. if clusterRestart, ok := cluster.Annotations[utils.ClusterRestartAnnotationName]; ok { podRestart := pod.Annotations[utils.ClusterRestartAnnotationName] if clusterRestart != podRestart { return rollout{ required: true, reason: "cluster has been explicitly restarted via annotation", - canBeInPlace: true, + canBeInPlace: false, }, nil } } diff --git a/internal/controller/cluster_upgrade_test.go b/internal/controller/cluster_upgrade_test.go index a1540166e6..106cadca2e 100644 --- a/internal/controller/cluster_upgrade_test.go +++ b/internal/controller/cluster_upgrade_test.go @@ -104,7 +104,7 @@ var _ = Describe("Pod upgrade", Ordered, func() { rollout := isInstanceNeedingRollout(ctx, status, &clusterRestart) Expect(rollout.required).To(BeTrue()) Expect(rollout.reason).To(Equal("cluster has been explicitly restarted via annotation")) - Expect(rollout.canBeInPlace).To(BeTrue()) + Expect(rollout.canBeInPlace).To(BeFalse()) Expect(rollout.needsChangeOperandImage).To(BeFalse()) Expect(rollout.needsChangeOperatorImage).To(BeFalse()) From 82a016c81cd2e44a3beeac68702d6bc3d910b0a7 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Wed, 12 Mar 2025 16:17:17 +0100 Subject: [PATCH 450/836] feat(databases): declarative management of extensions and schemas (#7062) This patch introduces the `extensions` and `schemas` stanzas in the Database resource to declaratively create, modify, and drop PostgreSQL extensions and schemas within a database. Closes: #6292 ## Release Notes **Declarative management of extensions and schemas**: Introduced the `extensions` and `schemas` stanzas in the Database resource to declaratively create, modify, and drop PostgreSQL extensions and schemas within a database. Signed-off-by: Leonardo Cecchi Signed-off-by: Gabriele Bartolini Signed-off-by: Francesco Canovai Signed-off-by: Marco Nenciarini Co-authored-by: Gabriele Bartolini Co-authored-by: Francesco Canovai Co-authored-by: Marco Nenciarini Co-authored-by: Gabriele Quaresima --- .wordlist-en-custom.txt | 5 + api/v1/database_funcs.go | 10 + api/v1/database_types.go | 75 +++++ api/v1/zz_generated.deepcopy.go | 82 +++++ .../bases/postgresql.cnpg.io_databases.yaml | 110 +++++++ config/webhook/manifests.yaml | 40 +++ docs/src/cloudnative-pg.v1.md | 183 +++++++++++ docs/src/declarative_database_management.md | 106 ++++++- docs/src/index.md | 2 +- docs/src/operator_capability_levels.md | 2 +- docs/src/postgresql_conf.md | 6 + docs/src/release_notes/v1.26.md | 4 + internal/cmd/manager/controller/controller.go | 5 + internal/management/controller/common.go | 12 +- .../controller/database_controller.go | 71 ++++- .../controller/database_controller_sql.go | 194 +++++++++++- .../database_controller_sql_test.go | 294 +++++++++++++++++- .../management/controller/database_objects.go | 145 +++++++++ internal/webhook/v1/database_webhook.go | 211 +++++++++++++ internal/webhook/v1/database_webhook_test.go | 105 +++++++ tests/e2e/asserts_test.go | 10 +- .../declarative_database_management_test.go | 20 ++ ...e-with-delete-reclaim-policy.yaml.template | 6 + .../database.yaml.template | 6 + 24 files changed, 1681 insertions(+), 23 deletions(-) create mode 100644 internal/management/controller/database_objects.go create mode 100644 internal/webhook/v1/database_webhook.go create mode 100644 internal/webhook/v1/database_webhook_test.go diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index fe66ba48e2..a59c43aeab 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -122,6 +122,8 @@ DataBackupConfiguration DataBase DataDurabilityLevel DataSource +DatabaseObjectSpec +DatabaseObjectStatus DatabaseReclaimPolicy DatabaseRoleRef DatabaseSpec @@ -155,6 +157,8 @@ EnvVar EphemeralVolumeSource EphemeralVolumesSizeLimit EphemeralVolumesSizeLimitConfiguration +ExtensionSpec +ExtensionStatus ExternalCluster FQDN Fei @@ -414,6 +418,7 @@ ScheduledBackupList ScheduledBackupSpec ScheduledBackupStatus ScheduledBackups +SchemaSpec Scorsolini Seccomp SeccompProfile diff --git a/api/v1/database_funcs.go b/api/v1/database_funcs.go index 91511c411c..3059538e90 100644 --- a/api/v1/database_funcs.go +++ b/api/v1/database_funcs.go @@ -75,3 +75,13 @@ func (dbList *DatabaseList) MustHaveManagedResourceExclusivity(reference *Databa pointers := toSliceWithPointers(dbList.Items) return ensureManagedResourceExclusivity(reference, pointers) } + +// GetEnsure gets the ensure status of the resource +func (dbObject DatabaseObjectSpec) GetEnsure() EnsureOption { + return dbObject.Ensure +} + +// GetName gets the name of the resource +func (dbObject DatabaseObjectSpec) GetName() string { + return dbObject.Name +} diff --git a/api/v1/database_types.go b/api/v1/database_types.go index 3c759ab767..ba5e2fd31d 100644 --- a/api/v1/database_types.go +++ b/api/v1/database_types.go @@ -162,6 +162,59 @@ type DatabaseSpec struct { // +kubebuilder:default:=retain // +optional ReclaimPolicy DatabaseReclaimPolicy `json:"databaseReclaimPolicy,omitempty"` + + // The list of schemas to be managed in the database + // +optional + Schemas []SchemaSpec `json:"schemas,omitempty"` + + // The list of extensions to be managed in the database + // +optional + Extensions []ExtensionSpec `json:"extensions,omitempty"` +} + +// DatabaseObjectSpec contains the fields which are common to every +// database object +type DatabaseObjectSpec struct { + // Name of the extension/schema + Name string `json:"name"` + + // Specifies whether an extension/schema should be present or absent in + // the database. If set to `present`, the extension/schema will be + // created if it does not exist. If set to `absent`, the + // extension/schema will be removed if it exists. + // +kubebuilder:default:="present" + // +kubebuilder:validation:Enum=present;absent + // +optional + Ensure EnsureOption `json:"ensure"` +} + +// SchemaSpec configures a schema in a database +type SchemaSpec struct { + // Common fields + DatabaseObjectSpec `json:",inline"` + + // The role name of the user who owns the schema inside PostgreSQL. + // It maps to the `AUTHORIZATION` parameter of `CREATE SCHEMA` and the + // `OWNER TO` command of `ALTER SCHEMA`. + Owner string `json:"owner,omitempty"` +} + +// ExtensionSpec configures an extension in a database +type ExtensionSpec struct { + // Common fields + DatabaseObjectSpec `json:",inline"` + + // The version of the extension to install. If empty, the operator will + // install the default version (whatever is specified in the + // extension's control file) + Version string `json:"version,omitempty"` + + // The name of the schema in which to install the extension's objects, + // in case the extension allows its contents to be relocated. If not + // specified (default), and the extension's control file does not + // specify a schema either, the current default object creation schema + // is used. + Schema string `json:"schema,omitempty"` } // DatabaseStatus defines the observed state of Database @@ -178,6 +231,28 @@ type DatabaseStatus struct { // Message is the reconciliation output message // +optional Message string `json:"message,omitempty"` + + // Schemas is the status of the managed schemas + // +optional + Schemas []DatabaseObjectStatus `json:"schemas,omitempty"` + + // Extensions is the status of the managed extensions + // +optional + Extensions []DatabaseObjectStatus `json:"extensions,omitempty"` +} + +// DatabaseObjectStatus is the status of the managed database objects +type DatabaseObjectStatus struct { + // The name of the object + Name string `json:"name"` + + // True of the object has been installed successfully in + // the database + Applied bool `json:"applied"` + + // Message is the object reconciliation message + // +optional + Message string `json:"message,omitempty"` } // +genclient diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index a2d4e4c166..1171af8553 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -1084,6 +1084,36 @@ func (in *DatabaseList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObjectSpec) DeepCopyInto(out *DatabaseObjectSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObjectSpec. +func (in *DatabaseObjectSpec) DeepCopy() *DatabaseObjectSpec { + if in == nil { + return nil + } + out := new(DatabaseObjectSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObjectStatus) DeepCopyInto(out *DatabaseObjectStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObjectStatus. +func (in *DatabaseObjectStatus) DeepCopy() *DatabaseObjectStatus { + if in == nil { + return nil + } + out := new(DatabaseObjectStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DatabaseRoleRef) DeepCopyInto(out *DatabaseRoleRef) { *out = *in @@ -1118,6 +1148,16 @@ func (in *DatabaseSpec) DeepCopyInto(out *DatabaseSpec) { *out = new(int) **out = **in } + if in.Schemas != nil { + in, out := &in.Schemas, &out.Schemas + *out = make([]SchemaSpec, len(*in)) + copy(*out, *in) + } + if in.Extensions != nil { + in, out := &in.Extensions, &out.Extensions + *out = make([]ExtensionSpec, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseSpec. @@ -1138,6 +1178,16 @@ func (in *DatabaseStatus) DeepCopyInto(out *DatabaseStatus) { *out = new(bool) **out = **in } + if in.Schemas != nil { + in, out := &in.Schemas, &out.Schemas + *out = make([]DatabaseObjectStatus, len(*in)) + copy(*out, *in) + } + if in.Extensions != nil { + in, out := &in.Extensions, &out.Extensions + *out = make([]DatabaseObjectStatus, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseStatus. @@ -1204,6 +1254,22 @@ func (in *EphemeralVolumesSizeLimitConfiguration) DeepCopy() *EphemeralVolumesSi return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtensionSpec) DeepCopyInto(out *ExtensionSpec) { + *out = *in + out.DatabaseObjectSpec = in.DatabaseObjectSpec +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtensionSpec. +func (in *ExtensionSpec) DeepCopy() *ExtensionSpec { + if in == nil { + return nil + } + out := new(ExtensionSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExternalCluster) DeepCopyInto(out *ExternalCluster) { *out = *in @@ -2699,6 +2765,22 @@ func (in *ScheduledBackupStatus) DeepCopy() *ScheduledBackupStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaSpec) DeepCopyInto(out *SchemaSpec) { + *out = *in + out.DatabaseObjectSpec = in.DatabaseObjectSpec +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaSpec. +func (in *SchemaSpec) DeepCopy() *SchemaSpec { + if in == nil { + return nil + } + out := new(SchemaSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SecretVersion) DeepCopyInto(out *SecretVersion) { *out = *in diff --git a/config/crd/bases/postgresql.cnpg.io_databases.yaml b/config/crd/bases/postgresql.cnpg.io_databases.yaml index d8ae251677..04841bc36a 100644 --- a/config/crd/bases/postgresql.cnpg.io_databases.yaml +++ b/config/crd/bases/postgresql.cnpg.io_databases.yaml @@ -124,6 +124,43 @@ spec: - present - absent type: string + extensions: + description: The list of extensions to be managed in the database + items: + description: ExtensionSpec configures an extension in a database + properties: + ensure: + default: present + description: |- + Specifies whether an extension/schema should be present or absent in + the database. If set to `present`, the extension/schema will be + created if it does not exist. If set to `absent`, the + extension/schema will be removed if it exists. + enum: + - present + - absent + type: string + name: + description: Name of the extension/schema + type: string + schema: + description: |- + The name of the schema in which to install the extension's objects, + in case the extension allows its contents to be relocated. If not + specified (default), and the extension's control file does not + specify a schema either, the current default object creation schema + is used. + type: string + version: + description: |- + The version of the extension to install. If empty, the operator will + install the default version (whatever is specified in the + extension's control file) + type: string + required: + - name + type: object + type: array icuLocale: description: |- Maps to the `ICU_LOCALE` parameter of `CREATE DATABASE`. This @@ -203,6 +240,35 @@ spec: Maps to the `OWNER TO` command of `ALTER DATABASE`. The role name of the user who owns the database inside PostgreSQL. type: string + schemas: + description: The list of schemas to be managed in the database + items: + description: SchemaSpec configures a schema in a database + properties: + ensure: + default: present + description: |- + Specifies whether an extension/schema should be present or absent in + the database. If set to `present`, the extension/schema will be + created if it does not exist. If set to `absent`, the + extension/schema will be removed if it exists. + enum: + - present + - absent + type: string + name: + description: Name of the extension/schema + type: string + owner: + description: |- + The role name of the user who owns the schema inside PostgreSQL. + It maps to the `AUTHORIZATION` parameter of `CREATE SCHEMA` and the + `OWNER TO` command of `ALTER SCHEMA`. + type: string + required: + - name + type: object + type: array tablespace: description: |- Maps to the `TABLESPACE` parameter of `CREATE DATABASE`. @@ -242,6 +308,28 @@ spec: applied: description: Applied is true if the database was reconciled correctly type: boolean + extensions: + description: Extensions is the status of the managed extensions + items: + description: DatabaseObjectStatus is the status of the managed database + objects + properties: + applied: + description: |- + True of the object has been installed successfully in + the database + type: boolean + message: + description: Message is the object reconciliation message + type: string + name: + description: The name of the object + type: string + required: + - applied + - name + type: object + type: array message: description: Message is the reconciliation output message type: string @@ -251,6 +339,28 @@ spec: desired state that was synchronized format: int64 type: integer + schemas: + description: Schemas is the status of the managed schemas + items: + description: DatabaseObjectStatus is the status of the managed database + objects + properties: + applied: + description: |- + True of the object has been installed successfully in + the database + type: boolean + message: + description: Message is the object reconciliation message + type: string + name: + description: The name of the object + type: string + required: + - applied + - name + type: object + type: array type: object required: - metadata diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index 33aafeddb4..595d06293b 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -44,6 +44,26 @@ webhooks: resources: - clusters sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-postgresql-cnpg-io-v1-database + failurePolicy: Fail + name: mdatabase.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - databases + sideEffects: None - admissionReviewVersions: - v1 clientConfig: @@ -110,6 +130,26 @@ webhooks: resources: - clusters sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-postgresql-cnpg-io-v1-database + failurePolicy: Fail + name: vdatabase.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - databases + sideEffects: None - admissionReviewVersions: - v1 clientConfig: diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md index eb29b4453f..02dd9a27c1 100644 --- a/docs/src/cloudnative-pg.v1.md +++ b/docs/src/cloudnative-pg.v1.md @@ -2371,6 +2371,82 @@ PostgreSQL cluster from an existing storage

+## DatabaseObjectSpec {#postgresql-cnpg-io-v1-DatabaseObjectSpec} + + +**Appears in:** + +- [ExtensionSpec](#postgresql-cnpg-io-v1-ExtensionSpec) + +- [SchemaSpec](#postgresql-cnpg-io-v1-SchemaSpec) + + +

DatabaseObjectSpec contains the fields which are common to every +database object

+ + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+

Name of the extension/schema

+
ensure
+EnsureOption +
+

Specifies whether an extension/schema should be present or absent in +the database. If set to present, the extension/schema will be +created if it does not exist. If set to absent, the +extension/schema will be removed if it exists.

+
+ +## DatabaseObjectStatus {#postgresql-cnpg-io-v1-DatabaseObjectStatus} + + +**Appears in:** + +- [DatabaseStatus](#postgresql-cnpg-io-v1-DatabaseStatus) + + +

DatabaseObjectStatus is the status of the managed database objects

+ + + + + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+

The name of the object

+
applied [Required]
+bool +
+

True of the object has been installed successfully in +the database

+
message
+string +
+

Message is the object reconciliation message

+
+ ## DatabaseReclaimPolicy {#postgresql-cnpg-io-v1-DatabaseReclaimPolicy} (Alias of `string`) @@ -2586,6 +2662,20 @@ tablespace used for objects created in this database.

The policy for end-of-life maintenance of this database.

+schemas
+[]SchemaSpec + + +

The list of schemas to be managed in the database

+ + +extensions
+[]ExtensionSpec + + +

The list of extensions to be managed in the database

+ + @@ -2625,6 +2715,20 @@ desired state that was synchronized

Message is the reconciliation output message

+schemas
+[]DatabaseObjectStatus + + +

Schemas is the status of the managed schemas

+ + +extensions
+[]DatabaseObjectStatus + + +

Extensions is the status of the managed extensions

+ + @@ -2663,6 +2767,8 @@ desired state that was synchronized

**Appears in:** +- [DatabaseObjectSpec](#postgresql-cnpg-io-v1-DatabaseObjectSpec) + - [DatabaseSpec](#postgresql-cnpg-io-v1-DatabaseSpec) - [RoleConfiguration](#postgresql-cnpg-io-v1-RoleConfiguration) @@ -2706,6 +2812,50 @@ storage

+## ExtensionSpec {#postgresql-cnpg-io-v1-ExtensionSpec} + + +**Appears in:** + +- [DatabaseSpec](#postgresql-cnpg-io-v1-DatabaseSpec) + + +

ExtensionSpec configures an extension in a database

+ + + + + + + + + + + + + + + +
FieldDescription
DatabaseObjectSpec
+DatabaseObjectSpec +
(Members of DatabaseObjectSpec are embedded into this type.) +

Common fields

+
version [Required]
+string +
+

The version of the extension to install. If empty, the operator will +install the default version (whatever is specified in the +extension's control file)

+
schema [Required]
+string +
+

The name of the schema in which to install the extension's objects, +in case the extension allows its contents to be relocated. If not +specified (default), and the extension's control file does not +specify a schema either, the current default object creation schema +is used.

+
+ ## ExternalCluster {#postgresql-cnpg-io-v1-ExternalCluster} @@ -5165,6 +5315,39 @@ Overrides the default settings specified in the cluster '.backup.volumeSnapshot. +## SchemaSpec {#postgresql-cnpg-io-v1-SchemaSpec} + + +**Appears in:** + +- [DatabaseSpec](#postgresql-cnpg-io-v1-DatabaseSpec) + + +

SchemaSpec configures a schema in a database

+ + + + + + + + + + + + +
FieldDescription
DatabaseObjectSpec
+DatabaseObjectSpec +
(Members of DatabaseObjectSpec are embedded into this type.) +

Common fields

+
owner [Required]
+string +
+

The role name of the user who owns the schema inside PostgreSQL. +It maps to the AUTHORIZATION parameter of CREATE SCHEMA and the +OWNER TO command of ALTER SCHEMA.

+
+ ## SecretVersion {#postgresql-cnpg-io-v1-SecretVersion} diff --git a/docs/src/declarative_database_management.md b/docs/src/declarative_database_management.md index 52e9b2c76e..98f5a6e4b5 100644 --- a/docs/src/declarative_database_management.md +++ b/docs/src/declarative_database_management.md @@ -18,10 +18,10 @@ automated, and consistent approach to managing PostgreSQL databases. ### Scope of Management !!! Important - CloudNativePG manages **global objects** in PostgreSQL clusters, such as - databases, roles, and tablespaces. However, it does **not** manage the content - of databases (e.g., schemas and tables). For database content, specialized - tools or the applications themselves should be used. + CloudNativePG manages **global objects** in PostgreSQL clusters, including + databases, roles, and tablespaces. However, it does **not** manage database content + beyond extensions and schemas (e.g., tables). To manage database content, use specialized + tools or rely on the applications themselves. ### Declarative `Database` Manifest @@ -38,6 +38,9 @@ spec: owner: app cluster: name: cluster-example + extensions: + - name: bloom + ensure: present ``` When applied, this manifest creates a `Database` object called @@ -157,6 +160,101 @@ spec: This manifest ensures that the `database-to-drop` database is removed from the `cluster-example` cluster. +## Managing Extensions in a Database + +!!! Info + While extensions are database-scoped rather than global objects, + CloudNativePG provides a declarative interface for managing them. This approach + is necessary because installing certain extensions may require superuser + privileges, which CloudNativePG recommends disabling by default. By leveraging + this API, users can efficiently manage extensions in a scalable and controlled + manner without requiring elevated privileges. + +CloudNativePG simplifies and automates the management of PostgreSQL extensions within the +target database. + +To enable this feature, define the `spec.extensions` field +with a list of extension specifications, as shown in the following example: + +```yaml +# ... +spec: + extensions: + - name: bloom + ensure: present +# ... +``` + +Each extension entry supports the following properties: + +- `name` *(mandatory)*: The name of the extension. +- `ensure`: Specifies whether the extension should be present or absent in the + database: + - `present`: Ensures that the extension is installed (default). + - `absent`: Ensures that the extension is removed. +- `version`: The specific version of the extension to install or + upgrade to. +- `schema`: The schema in which the extension should be installed. + +!!! Info + CloudNativePG manages extensions using the following PostgreSQL’s SQL commands: + [`CREATE EXTENSION`](https://www.postgresql.org/docs/current/sql-createextension.html), + [`DROP EXTENSION`](https://www.postgresql.org/docs/current/sql-dropextension.html), + [`ALTER EXTENSION`](https://www.postgresql.org/docs/current/sql-alterextension.html) + (limited to `UPDATE TO` and `SET SCHEMA`). + +The operator reconciles only the extensions explicitly listed in +`spec.extensions`. Any existing extensions not specified in this list remain +unchanged. + +!!! Warning + Before the introduction of declarative extension management, CloudNativePG + did not offer a straightforward way to create extensions through configuration. + To address this, the ["managed extensions"](postgresql_conf.md#managed-extensions) + feature was introduced, enabling the automated and transparent management + of key extensions like `pg_stat_statements`. Currently, it is your + responsibility to ensure there are no conflicts between extension support in + the `Database` CRD and the managed extensions feature. + +## Managing Schemas in a Database + +!!! Info + Schema management in PostgreSQL is an exception to CloudNativePG's primary + focus on managing global objects. Since schemas exist within a database, they + are typically managed as part of the application development process. However, + CloudNativePG provides a declarative interface for schema management, primarily + to complete the support of extensions deployment within schemas. + +CloudNativePG simplifies and automates the management of PostgreSQL schemas within the +target database. + +To enable this feature, define the `spec.schemas` field +with a list of schema specifications, as shown in the following example: + +```yaml +# ... +spec: + schemas: + - name: app + owner: app +# ... +``` + +Each schema entry supports the following properties: + +- `name` *(mandatory)*: The name of the schema. +- `owner`: The owner of the schema. +- `ensure`: Specifies whether the schema should be present or absent in the + database: + - `present`: Ensures that the schema is installed (default). + - `absent`: Ensures that the schema is removed. + +!!! Info + CloudNativePG manages schemas using the following PostgreSQL’s SQL commands: + [`CREATE SCHEMA`](https://www.postgresql.org/docs/current/sql-createschema.html), + [`DROP SCHEMA`](https://www.postgresql.org/docs/current/sql-dropschema.html), + [`ALTER SCHEMA`](https://www.postgresql.org/docs/current/sql-alterschema.html). + ## Limitations and Caveats ### Renaming a database diff --git a/docs/src/index.md b/docs/src/index.md index 02afa896d7..acf3bc01fc 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -86,7 +86,7 @@ Additionally, the community provides images for the [PostGIS extension](postgis. Postgres extensions through the cluster `spec`: `pgaudit`, `auto_explain`, `pg_stat_statements`, and `pg_failover_slots` * Declarative management of Postgres roles, users and groups -* Declarative management of Postgres databases +* Declarative management of Postgres databases, including extensions and schemas * Support for Local Persistent Volumes with PVC templates * Reuse of Persistent Volumes storage in Pods * Separate volumes for WAL files and tablespaces diff --git a/docs/src/operator_capability_levels.md b/docs/src/operator_capability_levels.md index 0aaaf5772a..76a1249a80 100644 --- a/docs/src/operator_capability_levels.md +++ b/docs/src/operator_capability_levels.md @@ -147,7 +147,7 @@ required, as part of the bootstrap. Additional databases can be created or managed via [declarative database management](declarative_database_management.md) using -the `Database` CRD. +the `Database` CRD, also supporting extensions and schemas. Although no configuration is required to run the cluster, you can customize both PostgreSQL runtime configuration and PostgreSQL host-based diff --git a/docs/src/postgresql_conf.md b/docs/src/postgresql_conf.md index fdd3fc4fe3..4bc740ef11 100644 --- a/docs/src/postgresql_conf.md +++ b/docs/src/postgresql_conf.md @@ -212,6 +212,12 @@ SELECT datname FROM pg_database WHERE datallowconn !!! Note The above query also includes template databases like `template1`. +!!! Important + With the introduction of [declarative extensions](declarative_database_management.md#managing-extensions-in-a-database) + in the `Database` CRD, you can now manage extensions directly. As a result, + the managed extensions feature may undergo significant changes in future + versions of CloudNativePG, and some functionalities might be deprecated. + #### Enabling `auto_explain` The [`auto_explain`](https://www.postgresql.org/docs/current/auto-explain.html) diff --git a/docs/src/release_notes/v1.26.md b/docs/src/release_notes/v1.26.md index f7749bb84d..926cb21cb4 100644 --- a/docs/src/release_notes/v1.26.md +++ b/docs/src/release_notes/v1.26.md @@ -21,6 +21,10 @@ on the release branch in GitHub. Kubernetes startup and readiness probes in PostgreSQL instances, providing greater control over replicas based on the streaming lag. (#6623) +- **Declarative management of extensions and schemas**: Introduced the + `extensions` and `schemas` stanzas in the Database resource to declaratively + create, modify, and drop PostgreSQL extensions and schemas within a database. (#7062) + - **MAIN FEATURE #1**: short description - **MAIN FEATURE #2**: short description diff --git a/internal/cmd/manager/controller/controller.go b/internal/cmd/manager/controller/controller.go index 38d93a07df..81755ca6fb 100644 --- a/internal/cmd/manager/controller/controller.go +++ b/internal/cmd/manager/controller/controller.go @@ -282,6 +282,11 @@ func RunController( return err } + if err = webhookv1.SetupDatabaseWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "Database", "version", "v1") + return err + } + // Setup the handler used by the readiness and liveliness probe. // // Unfortunately the readiness of the probe is not sufficient for the operator to be diff --git a/internal/management/controller/common.go b/internal/management/controller/common.go index d0fe51dc68..27d54efce6 100644 --- a/internal/management/controller/common.go +++ b/internal/management/controller/common.go @@ -58,9 +58,8 @@ func markAsFailed( resource markableAsFailed, err error, ) error { - oldResource := resource.DeepCopyObject().(markableAsFailed) resource.SetAsFailed(err) - return cli.Status().Patch(ctx, resource, client.MergeFrom(oldResource)) + return cli.Status().Update(ctx, resource) } type markableAsUnknown interface { @@ -68,16 +67,15 @@ type markableAsUnknown interface { SetAsUnknown(err error) } -// markAsFailed marks the reconciliation as failed and logs the corresponding error +// markAsUnknown marks the reconciliation as failed and logs the corresponding error func markAsUnknown( ctx context.Context, cli client.Client, resource markableAsUnknown, err error, ) error { - oldResource := resource.DeepCopyObject().(markableAsUnknown) resource.SetAsUnknown(err) - return cli.Status().Patch(ctx, resource, client.MergeFrom(oldResource)) + return cli.Status().Update(ctx, resource) } type markableAsReady interface { @@ -91,10 +89,8 @@ func markAsReady( cli client.Client, resource markableAsReady, ) error { - oldResource := resource.DeepCopyObject().(markableAsReady) resource.SetAsReady() - - return cli.Status().Patch(ctx, resource, client.MergeFrom(oldResource)) + return cli.Status().Update(ctx, resource) } func getClusterFromInstance( diff --git a/internal/management/controller/database_controller.go b/internal/management/controller/database_controller.go index ad9ed1b14e..55b19aeb04 100644 --- a/internal/management/controller/database_controller.go +++ b/internal/management/controller/database_controller.go @@ -40,7 +40,28 @@ type DatabaseReconciler struct { instance instanceInterface finalizerReconciler *finalizerReconciler[*apiv1.Database] - getSuperUserDB func() (*sql.DB, error) + + getSuperUserDB func() (*sql.DB, error) + getTargetDB func(dbname string) (*sql.DB, error) +} + +// ErrFailedDatabaseObjectReconciliation is raised when a database object failed to reconcile +var ErrFailedDatabaseObjectReconciliation = fmt.Errorf("database object reconciliation failed") + +// schemaObjectManager is the manager of schema objects +var schemaObjectManager = databaseObjectManager[apiv1.SchemaSpec, schemaInfo]{ + get: getDatabaseSchemaInfo, + create: createDatabaseSchema, + update: updateDatabaseSchema, + drop: dropDatabaseSchema, +} + +// extensionObjectManager is the manager of the extension objects +var extensionObjectManager = databaseObjectManager[apiv1.ExtensionSpec, extInfo]{ + get: getDatabaseExtensionInfo, + create: createDatabaseExtension, + update: updateDatabaseExtension, + drop: dropDatabaseExtension, } // databaseReconciliationInterval is the time between the @@ -121,7 +142,7 @@ func (r *DatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c return res, err } - if err := r.reconcileDatabase(ctx, &database); err != nil { + if err := r.reconcileDatabaseResource(ctx, &database); err != nil { if markErr := markAsFailed(ctx, r.Client, &database, err); markErr != nil { contextLogger.Error(err, "while marking as failed the database resource", "error", err, @@ -164,6 +185,9 @@ func NewDatabaseReconciler( getSuperUserDB: func() (*sql.DB, error) { return instance.GetSuperUserDB() }, + getTargetDB: func(dbname string) (*sql.DB, error) { + return instance.ConnectionPool().Connection(dbname) + }, } dr.finalizerReconciler = newFinalizerReconciler( @@ -188,7 +212,7 @@ func (r *DatabaseReconciler) GetCluster(ctx context.Context) (*apiv1.Cluster, er return getClusterFromInstance(ctx, r.Client, r.instance) } -func (r *DatabaseReconciler) reconcileDatabase(ctx context.Context, obj *apiv1.Database) error { +func (r *DatabaseReconciler) reconcileDatabaseResource(ctx context.Context, obj *apiv1.Database) error { db, err := r.getSuperUserDB() if err != nil { return fmt.Errorf("while connecting to the database %q: %w", obj.Spec.Name, err) @@ -198,6 +222,47 @@ func (r *DatabaseReconciler) reconcileDatabase(ctx context.Context, obj *apiv1.D return dropDatabase(ctx, db, obj) } + if err := r.reconcilePostgresDatabase(ctx, db, obj); err != nil { + return err + } + + if err := r.reconcileDatabaseObjects(ctx, obj); err != nil { + return err + } + + for _, status := range obj.Status.Schemas { + if !status.Applied { + return ErrFailedDatabaseObjectReconciliation + } + } + for _, status := range obj.Status.Extensions { + if !status.Applied { + return ErrFailedDatabaseObjectReconciliation + } + } + + return nil +} + +func (r *DatabaseReconciler) reconcileDatabaseObjects( + ctx context.Context, + obj *apiv1.Database, +) error { + if len(obj.Spec.Schemas) == 0 && len(obj.Spec.Extensions) == 0 { + return nil + } + + db, err := r.getTargetDB(obj.Spec.Name) + if err != nil { + return fmt.Errorf("while connecting to the database %q: %v", obj.Spec.Name, err) + } + + obj.Status.Schemas = schemaObjectManager.reconcileList(ctx, db, obj.Spec.Schemas) + obj.Status.Extensions = extensionObjectManager.reconcileList(ctx, db, obj.Spec.Extensions) + return nil +} + +func (r *DatabaseReconciler) reconcilePostgresDatabase(ctx context.Context, db *sql.DB, obj *apiv1.Database) error { dbExists, err := detectDatabase(ctx, db, obj) if err != nil { return fmt.Errorf("while detecting the database %q: %w", obj.Spec.Name, err) diff --git a/internal/management/controller/database_controller_sql.go b/internal/management/controller/database_controller_sql.go index fc6ccce66a..504633aecf 100644 --- a/internal/management/controller/database_controller_sql.go +++ b/internal/management/controller/database_controller_sql.go @@ -19,6 +19,7 @@ package controller import ( "context" "database/sql" + "errors" "fmt" "strings" @@ -28,6 +29,17 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" ) +type extInfo struct { + Name string `json:"name"` + Version string `json:"version"` + Schema string `json:"schema"` +} + +type schemaInfo struct { + Name string `json:"name"` + Owner string `json:"owner"` +} + func detectDatabase( ctx context.Context, db *sql.DB, @@ -38,7 +50,7 @@ func detectDatabase( ` SELECT count(*) FROM pg_catalog.pg_database - WHERE datname = $1 + WHERE datname = $1 `, obj.Spec.Name) if row.Err() != nil { @@ -87,7 +99,8 @@ func createDatabase( sqlCreateDatabase.WriteString(fmt.Sprintf(" LOCALE %s", pgx.Identifier{obj.Spec.Locale}.Sanitize())) } if obj.Spec.LocaleProvider != "" { - sqlCreateDatabase.WriteString(fmt.Sprintf(" LOCALE_PROVIDER %s", pgx.Identifier{obj.Spec.LocaleProvider}.Sanitize())) + sqlCreateDatabase.WriteString(fmt.Sprintf(" LOCALE_PROVIDER %s", + pgx.Identifier{obj.Spec.LocaleProvider}.Sanitize())) } if obj.Spec.LcCollate != "" { sqlCreateDatabase.WriteString(fmt.Sprintf(" LC_COLLATE %s", pgx.Identifier{obj.Spec.LcCollate}.Sanitize())) @@ -102,7 +115,8 @@ func createDatabase( sqlCreateDatabase.WriteString(fmt.Sprintf(" ICU_RULES %s", pgx.Identifier{obj.Spec.IcuRules}.Sanitize())) } if obj.Spec.BuiltinLocale != "" { - sqlCreateDatabase.WriteString(fmt.Sprintf(" BUILTIN_LOCALE %s", pgx.Identifier{obj.Spec.BuiltinLocale}.Sanitize())) + sqlCreateDatabase.WriteString(fmt.Sprintf(" BUILTIN_LOCALE %s", + pgx.Identifier{obj.Spec.BuiltinLocale}.Sanitize())) } if obj.Spec.CollationVersion != "" { sqlCreateDatabase.WriteString(fmt.Sprintf(" COLLATION_VERSION %s", @@ -213,3 +227,177 @@ func dropDatabase( return nil } + +const detectDatabaseExtensionSQL = ` +SELECT e.extname, e.extversion, n.nspname +FROM pg_catalog.pg_extension e +JOIN pg_catalog.pg_namespace n ON e.extnamespace=n.oid +WHERE e.extname = $1 +` + +func getDatabaseExtensionInfo(ctx context.Context, db *sql.DB, ext apiv1.ExtensionSpec) (*extInfo, error) { + row := db.QueryRowContext( + ctx, detectDatabaseExtensionSQL, + ext.Name) + if row.Err() != nil { + return nil, fmt.Errorf("while checking if extension %q exists: %w", ext.Name, row.Err()) + } + + var result extInfo + if err := row.Scan(&result.Name, &result.Version, &result.Schema); err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + return nil, fmt.Errorf("while scanning if extension %q exists: %w", ext.Name, err) + } + + return &result, nil +} + +func createDatabaseExtension(ctx context.Context, db *sql.DB, ext apiv1.ExtensionSpec) error { + contextLogger := log.FromContext(ctx) + + var sqlCreateExtension strings.Builder + sqlCreateExtension.WriteString(fmt.Sprintf("CREATE EXTENSION %s ", pgx.Identifier{ext.Name}.Sanitize())) + if len(ext.Version) > 0 { + sqlCreateExtension.WriteString(fmt.Sprintf(" VERSION %s", pgx.Identifier{ext.Version}.Sanitize())) + } + if len(ext.Schema) > 0 { + sqlCreateExtension.WriteString(fmt.Sprintf(" SCHEMA %s", pgx.Identifier{ext.Schema}.Sanitize())) + } + + _, err := db.ExecContext(ctx, sqlCreateExtension.String()) + if err != nil { + contextLogger.Error(err, "while creating extension", "query", sqlCreateExtension.String()) + return err + } + contextLogger.Info("created extension", "name", ext.Name) + + return nil +} + +func dropDatabaseExtension(ctx context.Context, db *sql.DB, ext apiv1.ExtensionSpec) error { + contextLogger := log.FromContext(ctx) + query := fmt.Sprintf("DROP EXTENSION IF EXISTS %s", pgx.Identifier{ext.Name}.Sanitize()) + _, err := db.ExecContext( + ctx, + query) + if err != nil { + contextLogger.Error(err, "while dropping extension", "query", query) + return err + } + contextLogger.Info("dropped extension", "name", ext.Name) + return nil +} + +func updateDatabaseExtension(ctx context.Context, db *sql.DB, spec apiv1.ExtensionSpec, info *extInfo) error { + contextLogger := log.FromContext(ctx) + if len(spec.Schema) > 0 && spec.Schema != info.Schema { + changeSchemaSQL := fmt.Sprintf( + "ALTER EXTENSION %s SET SCHEMA %v", + pgx.Identifier{spec.Name}.Sanitize(), + pgx.Identifier{spec.Schema}.Sanitize(), + ) + + if _, err := db.ExecContext(ctx, changeSchemaSQL); err != nil { + return fmt.Errorf("altering schema: %w", err) + } + + contextLogger.Info("altered extension schema", "name", spec.Name, "schema", spec.Schema) + } + + if len(spec.Version) > 0 && spec.Version != info.Version { + //nolint:gosec + changeVersionSQL := fmt.Sprintf( + "ALTER EXTENSION %s UPDATE TO %v", + pgx.Identifier{spec.Name}.Sanitize(), + pgx.Identifier{spec.Version}.Sanitize(), + ) + + if _, err := db.ExecContext(ctx, changeVersionSQL); err != nil { + return fmt.Errorf("altering version: %w", err) + } + + contextLogger.Info("altered extension version", "name", spec.Name, "version", spec.Version) + } + + return nil +} + +const detectDatabaseSchemaSQL = ` +SELECT n.nspname, a.rolname +FROM pg_catalog.pg_namespace n +JOIN pg_catalog.pg_authid a ON n.nspowner = a.oid +WHERE n.nspname = $1 +` + +func getDatabaseSchemaInfo(ctx context.Context, db *sql.DB, schema apiv1.SchemaSpec) (*schemaInfo, error) { + row := db.QueryRowContext( + ctx, detectDatabaseSchemaSQL, + schema.Name) + if row.Err() != nil { + return nil, fmt.Errorf("while checking if schema %q exists: %w", schema.Name, row.Err()) + } + + var result schemaInfo + if err := row.Scan(&result.Name, &result.Owner); err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + return nil, fmt.Errorf("while scanning if schema %q exists: %w", schema.Name, err) + } + + return &result, nil +} + +func createDatabaseSchema(ctx context.Context, db *sql.DB, schema apiv1.SchemaSpec) error { + contextLogger := log.FromContext(ctx) + + var sqlCreateExtension strings.Builder + sqlCreateExtension.WriteString(fmt.Sprintf("CREATE SCHEMA %s ", pgx.Identifier{schema.Name}.Sanitize())) + if len(schema.Owner) > 0 { + sqlCreateExtension.WriteString(fmt.Sprintf(" AUTHORIZATION %s", pgx.Identifier{schema.Owner}.Sanitize())) + } + + _, err := db.ExecContext(ctx, sqlCreateExtension.String()) + if err != nil { + contextLogger.Error(err, "while creating schema", "query", sqlCreateExtension.String()) + return err + } + contextLogger.Info("created schema", "name", schema.Name) + + return nil +} + +func updateDatabaseSchema(ctx context.Context, db *sql.DB, schema apiv1.SchemaSpec, info *schemaInfo) error { + contextLogger := log.FromContext(ctx) + if len(schema.Owner) > 0 && schema.Owner != info.Owner { + changeSchemaSQL := fmt.Sprintf( + "ALTER SCHEMA %s OWNER TO %v", + pgx.Identifier{schema.Name}.Sanitize(), + pgx.Identifier{schema.Owner}.Sanitize(), + ) + + if _, err := db.ExecContext(ctx, changeSchemaSQL); err != nil { + return fmt.Errorf("altering schema: %w", err) + } + + contextLogger.Info("altered schema owner", "name", schema.Name, "owner", schema.Owner) + } + + return nil +} + +func dropDatabaseSchema(ctx context.Context, db *sql.DB, schema apiv1.SchemaSpec) error { + contextLogger := log.FromContext(ctx) + query := fmt.Sprintf("DROP SCHEMA IF EXISTS %s", pgx.Identifier{schema.Name}.Sanitize()) + _, err := db.ExecContext( + ctx, + query) + if err != nil { + contextLogger.Error(err, "while dropping schema", "query", query) + return err + } + contextLogger.Info("dropped schema", "name", schema.Name) + return nil +} diff --git a/internal/management/controller/database_controller_sql_test.go b/internal/management/controller/database_controller_sql_test.go index cc5fe74ced..aa9b3aef0e 100644 --- a/internal/management/controller/database_controller_sql_test.go +++ b/internal/management/controller/database_controller_sql_test.go @@ -124,7 +124,8 @@ var _ = Describe("Managed Database SQL", func() { "ICU_LOCALE %s ICU_RULES %s", pgx.Identifier{database.Spec.Name}.Sanitize(), pgx.Identifier{database.Spec.Owner}.Sanitize(), pgx.Identifier{database.Spec.Encoding}.Sanitize(), pgx.Identifier{database.Spec.Locale}.Sanitize(), - pgx.Identifier{database.Spec.LocaleProvider}.Sanitize(), pgx.Identifier{database.Spec.LcCollate}.Sanitize(), + pgx.Identifier{database.Spec.LocaleProvider}.Sanitize(), + pgx.Identifier{database.Spec.LcCollate}.Sanitize(), pgx.Identifier{database.Spec.LcCtype}.Sanitize(), pgx.Identifier{database.Spec.IcuLocale}.Sanitize(), pgx.Identifier{database.Spec.IcuRules}.Sanitize(), ) @@ -144,7 +145,8 @@ var _ = Describe("Managed Database SQL", func() { "CREATE DATABASE %s OWNER %s "+ "LOCALE_PROVIDER %s BUILTIN_LOCALE %s COLLATION_VERSION %s", pgx.Identifier{database.Spec.Name}.Sanitize(), pgx.Identifier{database.Spec.Owner}.Sanitize(), - pgx.Identifier{database.Spec.LocaleProvider}.Sanitize(), pgx.Identifier{database.Spec.BuiltinLocale}.Sanitize(), + pgx.Identifier{database.Spec.LocaleProvider}.Sanitize(), + pgx.Identifier{database.Spec.BuiltinLocale}.Sanitize(), pgx.Identifier{database.Spec.CollationVersion}.Sanitize(), ) dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedValue) @@ -223,3 +225,291 @@ var _ = Describe("Managed Database SQL", func() { }) }) }) + +var _ = Describe("Managed Extensions SQL", func() { + var ( + dbMock sqlmock.Sqlmock + db *sql.DB + ext apiv1.ExtensionSpec + err error + + testError error + ) + + BeforeEach(func() { + db, dbMock, err = sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) + Expect(err).ToNot(HaveOccurred()) + + ext = apiv1.ExtensionSpec{ + DatabaseObjectSpec: apiv1.DatabaseObjectSpec{ + Name: "testext", + Ensure: "present", + }, + Version: "1.0", + Schema: "default", + } + + testError = fmt.Errorf("test error") + }) + + AfterEach(func() { + Expect(dbMock.ExpectationsWereMet()).To(Succeed()) + }) + + Context("getDatabaseExtensionInfo", func() { + It("returns info when the extension exists", func(ctx SpecContext) { + dbMock. + ExpectQuery(detectDatabaseExtensionSQL). + WithArgs(ext.Name). + WillReturnRows( + sqlmock.NewRows([]string{"extname", "extversion", "nspname"}). + AddRow("testext", "1.0", "default"), + ) + extInfo, err := getDatabaseExtensionInfo(ctx, db, ext) + Expect(err).ToNot(HaveOccurred()) + Expect(extInfo).ToNot(BeNil()) + Expect(extInfo.Name).To(Equal("testext")) + Expect(extInfo.Schema).To(Equal("default")) + Expect(extInfo.Version).To(Equal("1.0")) + }) + + It("returns nil info when the extension does not exist", func(ctx SpecContext) { + dbMock. + ExpectQuery(detectDatabaseExtensionSQL). + WithArgs(ext.Name). + WillReturnRows( + sqlmock.NewRows([]string{"extname", "extversion", "nspname"}), + ) + extInfo, err := getDatabaseExtensionInfo(ctx, db, ext) + Expect(err).ToNot(HaveOccurred()) + Expect(extInfo).To(BeNil()) + }) + }) + + Context("createDatabaseExtension", func() { + createExtensionSQL := "CREATE EXTENSION \"testext\" VERSION \"1.0\" SCHEMA \"default\"" + + It("returns success when the extension has been created", func(ctx SpecContext) { + dbMock. + ExpectExec(createExtensionSQL). + WillReturnResult(sqlmock.NewResult(0, 1)) + Expect(createDatabaseExtension(ctx, db, ext)).Error().NotTo(HaveOccurred()) + }) + + It("fails when the extension could not be created", func(ctx SpecContext) { + dbMock. + ExpectExec(createExtensionSQL). + WillReturnError(testError) + Expect(createDatabaseExtension(ctx, db, ext)).Error().To(Equal(testError)) + }) + }) + + Context("dropDatabaseExtension", func() { + dropExtensionSQL := "DROP EXTENSION IF EXISTS \"testext\"" + + It("returns success when the extension has been dropped", func(ctx SpecContext) { + dbMock. + ExpectExec(dropExtensionSQL). + WillReturnResult(sqlmock.NewResult(0, 1)) + + Expect(dropDatabaseExtension(ctx, db, ext)).Error().NotTo(HaveOccurred()) + }) + + It("returns an error when the DROP statement failed", func(ctx SpecContext) { + dbMock. + ExpectExec(dropExtensionSQL). + WillReturnError(testError) + + Expect(dropDatabaseExtension(ctx, db, ext)).Error().To(Equal(testError)) + }) + }) + + Context("updateDatabaseExtension", func() { + It("does nothing when the extension is already at the correct version", func(ctx SpecContext) { + Expect(updateDatabaseExtension(ctx, db, ext, &extInfo{ + Name: ext.Name, + Version: ext.Version, + Schema: ext.Schema, + })).Error().NotTo(HaveOccurred()) + }) + + It("updates the extension version", func(ctx SpecContext) { + dbMock. + ExpectExec("ALTER EXTENSION \"testext\" UPDATE TO \"1.0\""). + WillReturnResult(sqlmock.NewResult(0, 1)) + + Expect(updateDatabaseExtension(ctx, db, ext, + &extInfo{Name: ext.Name, Version: "0.9", Schema: ext.Schema})).Error().NotTo(HaveOccurred()) + }) + + It("updates the schema", func(ctx SpecContext) { + dbMock. + ExpectExec("ALTER EXTENSION \"testext\" SET SCHEMA \"default\""). + WillReturnResult(sqlmock.NewResult(0, 1)) + + Expect(updateDatabaseExtension(ctx, db, ext, + &extInfo{Name: ext.Name, Version: ext.Version, Schema: "old"})).Error().NotTo(HaveOccurred()) + }) + + It("sets the schema and the extension version", func(ctx SpecContext) { + dbMock. + ExpectExec("ALTER EXTENSION \"testext\" SET SCHEMA \"default\""). + WillReturnResult(sqlmock.NewResult(0, 1)) + dbMock. + ExpectExec("ALTER EXTENSION \"testext\" UPDATE TO \"1.0\""). + WillReturnResult(sqlmock.NewResult(0, 1)) + + Expect(updateDatabaseExtension(ctx, db, ext, &extInfo{ + Name: ext.Name, Version: "0.9", + Schema: "old", + })).Error().NotTo(HaveOccurred()) + }) + + It("fail when setting the schema failed", func(ctx SpecContext) { + dbMock. + ExpectExec("ALTER EXTENSION \"testext\" SET SCHEMA \"default\""). + WillReturnError(testError) + + Expect(updateDatabaseExtension(ctx, db, ext, + &extInfo{Name: ext.Name, Version: ext.Version, Schema: "old"})).Error().To(MatchError(testError)) + }) + + It("fail when setting the version failed", func(ctx SpecContext) { + dbMock. + ExpectExec("ALTER EXTENSION \"testext\" SET SCHEMA \"default\""). + WillReturnResult(sqlmock.NewResult(0, 1)) + dbMock. + ExpectExec("ALTER EXTENSION \"testext\" UPDATE TO \"1.0\""). + WillReturnError(testError) + + Expect(updateDatabaseExtension(ctx, db, ext, &extInfo{ + Name: ext.Name, Version: "0.9", + Schema: "old", + })).Error().To(MatchError(testError)) + }) + }) +}) + +var _ = Describe("Managed schema SQL", func() { + var ( + dbMock sqlmock.Sqlmock + db *sql.DB + schema apiv1.SchemaSpec + err error + + testError error + ) + + BeforeEach(func() { + db, dbMock, err = sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) + Expect(err).ToNot(HaveOccurred()) + + schema = apiv1.SchemaSpec{ + DatabaseObjectSpec: apiv1.DatabaseObjectSpec{ + Name: "testschema", + Ensure: "present", + }, + Owner: "owner", + } + + testError = fmt.Errorf("test error") + }) + + AfterEach(func() { + Expect(dbMock.ExpectationsWereMet()).To(Succeed()) + }) + + Context("getDatabaseSchemaInfo", func() { + It("returns info when the extension exits", func(ctx SpecContext) { + dbMock. + ExpectQuery(detectDatabaseSchemaSQL). + WithArgs(schema.Name). + WillReturnRows( + sqlmock.NewRows([]string{"name", "owner"}). + AddRow("name", "owner"), + ) + schemaInfo, err := getDatabaseSchemaInfo(ctx, db, schema) + Expect(err).ToNot(HaveOccurred()) + Expect(schemaInfo).ToNot(BeNil()) + Expect(schemaInfo.Name).To(Equal("name")) + Expect(schemaInfo.Owner).To(Equal("owner")) + }) + + It("returns nil info when the extension does not exist", func(ctx SpecContext) { + dbMock. + ExpectQuery(detectDatabaseSchemaSQL). + WithArgs(schema.Name). + WillReturnRows( + sqlmock.NewRows([]string{"name", "owner"}), + ) + schemaInfo, err := getDatabaseSchemaInfo(ctx, db, schema) + Expect(err).ToNot(HaveOccurred()) + Expect(schemaInfo).To(BeNil()) + }) + }) + + Context("createDatabaseSchema", func() { + createSchemaSQL := "CREATE SCHEMA \"testschema\" AUTHORIZATION \"owner\"" + + It("returns success when the schema has been created", func(ctx SpecContext) { + dbMock. + ExpectExec(createSchemaSQL). + WillReturnResult(sqlmock.NewResult(0, 1)) + Expect(createDatabaseSchema(ctx, db, schema)).Error().NotTo(HaveOccurred()) + }) + + It("fails when the schema has not been created", func(ctx SpecContext) { + dbMock. + ExpectExec(createSchemaSQL). + WillReturnError(testError) + Expect(createDatabaseSchema(ctx, db, schema)).Error().To(Equal(testError)) + }) + }) + + Context("updateDatabaseSchema", func() { + It("does nothing when the schema has been correctly reconciled", func(ctx SpecContext) { + Expect(updateDatabaseSchema(ctx, db, schema, &schemaInfo{ + Name: schema.Name, + Owner: schema.Owner, + })).Error().NotTo(HaveOccurred()) + }) + + It("updates the schema owner", func(ctx SpecContext) { + dbMock. + ExpectExec("ALTER SCHEMA \"testschema\" OWNER TO \"owner\""). + WillReturnResult(sqlmock.NewResult(0, 1)) + + Expect(updateDatabaseSchema(ctx, db, schema, + &schemaInfo{Name: schema.Name, Owner: "old"})).Error().NotTo(HaveOccurred()) + }) + + It("fail when setting the owner failed", func(ctx SpecContext) { + dbMock. + ExpectExec("ALTER SCHEMA \"testschema\" OWNER TO \"owner\""). + WillReturnError(testError) + + Expect(updateDatabaseSchema(ctx, db, schema, + &schemaInfo{Name: schema.Name, Owner: "old"})).Error().To(MatchError(testError)) + }) + }) + + Context("dropDatabaseSchema", func() { + dropSchemaSQL := "DROP SCHEMA IF EXISTS \"testschema\"" + + It("returns success when the extension has been dropped", func(ctx SpecContext) { + dbMock. + ExpectExec(dropSchemaSQL). + WillReturnResult(sqlmock.NewResult(0, 1)) + + Expect(dropDatabaseSchema(ctx, db, schema)).Error().NotTo(HaveOccurred()) + }) + + It("returns an error when the DROP statement failed", func(ctx SpecContext) { + dbMock. + ExpectExec(dropSchemaSQL). + WillReturnError(testError) + + Expect(dropDatabaseSchema(ctx, db, schema)).Error().To(Equal(testError)) + }) + }) +}) diff --git a/internal/management/controller/database_objects.go b/internal/management/controller/database_objects.go new file mode 100644 index 0000000000..a0fb2e7e3c --- /dev/null +++ b/internal/management/controller/database_objects.go @@ -0,0 +1,145 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "database/sql" + "fmt" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" +) + +type databaseObjectSpec interface { + GetName() string + GetEnsure() apiv1.EnsureOption +} + +type databaseObjectManager[Spec databaseObjectSpec, Info any] struct { + get func(ctx context.Context, db *sql.DB, spec Spec) (*Info, error) + create func(ctx context.Context, db *sql.DB, spec Spec) error + update func(ctx context.Context, db *sql.DB, spec Spec, info *Info) error + drop func(ctx context.Context, db *sql.DB, spec Spec) error +} + +func createFailedStatus(name, message string) apiv1.DatabaseObjectStatus { + return apiv1.DatabaseObjectStatus{ + Name: name, + Applied: false, + Message: message, + } +} + +func createSuccessStatus(name string) apiv1.DatabaseObjectStatus { + return apiv1.DatabaseObjectStatus{ + Name: name, + Applied: true, + } +} + +func (r *databaseObjectManager[Spec, Info]) reconcileList( + ctx context.Context, + db *sql.DB, + specs []Spec, +) []apiv1.DatabaseObjectStatus { + result := make([]apiv1.DatabaseObjectStatus, len(specs)) + for i := range specs { + spec := specs[i] + result[i] = r.reconcile(ctx, db, spec) + } + return result +} + +func (r *databaseObjectManager[Spec, Info]) reconcile( + ctx context.Context, + db *sql.DB, + spec Spec, +) apiv1.DatabaseObjectStatus { + info, err := r.get(ctx, db, spec) + if err != nil { + return createFailedStatus( + spec.GetName(), + fmt.Sprintf("while reading the object %#v: %v", spec, err), + ) + } + + exists := info != nil + ensureOption := spec.GetEnsure() + + switch { + case !exists && ensureOption == apiv1.EnsurePresent: + return r.reconcileCreate(ctx, db, spec) + + case !exists && ensureOption == apiv1.EnsureAbsent: + return createSuccessStatus(spec.GetName()) + + case exists && ensureOption == apiv1.EnsurePresent: + return r.reconcileUpdate(ctx, db, spec, info) + + case exists && ensureOption == apiv1.EnsureAbsent: + return r.reconcileDrop(ctx, db, spec) + + default: + // If this happens, the CRD and/or the validating webhook + // are not working properly. In this case, let's do nothing: + // better to be safe than sorry. + return createSuccessStatus(spec.GetName()) + } +} + +func (r *databaseObjectManager[Spec, Info]) reconcileCreate( + ctx context.Context, + db *sql.DB, + spec Spec, +) apiv1.DatabaseObjectStatus { + if err := r.create(ctx, db, spec); err != nil { + return createFailedStatus( + spec.GetName(), + err.Error(), + ) + } + + return createSuccessStatus(spec.GetName()) +} + +func (r *databaseObjectManager[Spec, Info]) reconcileUpdate( + ctx context.Context, db *sql.DB, spec Spec, info *Info, +) apiv1.DatabaseObjectStatus { + if err := r.update(ctx, db, spec, info); err != nil { + return createFailedStatus( + spec.GetName(), + err.Error(), + ) + } + + return createSuccessStatus(spec.GetName()) +} + +func (r *databaseObjectManager[Spec, Info]) reconcileDrop( + ctx context.Context, + db *sql.DB, + spec Spec, +) apiv1.DatabaseObjectStatus { + if err := r.drop(ctx, db, spec); err != nil { + return createFailedStatus( + spec.GetName(), + err.Error(), + ) + } + + return createSuccessStatus(spec.GetName()) +} diff --git a/internal/webhook/v1/database_webhook.go b/internal/webhook/v1/database_webhook.go new file mode 100644 index 0000000000..797042495e --- /dev/null +++ b/internal/webhook/v1/database_webhook.go @@ -0,0 +1,211 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "fmt" + + "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/stringset" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" +) + +// databaseLog is for logging in this package. +var databaseLog = log.WithName("database-resource").WithValues("version", "v1") + +// SetupDatabaseWebhookWithManager registers the webhook for Database in the manager. +func SetupDatabaseWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr).For(&apiv1.Database{}). + WithValidator(&DatabaseCustomValidator{}). + WithDefaulter(&DatabaseCustomDefaulter{}). + Complete() +} + +// NOTE: The 'path' attribute must follow a specific pattern and should not be modified directly here. +// Modifying the path for an invalid path can cause API server errors; failing to locate the webhook. +// +// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},path=/mutate-postgresql-cnpg-io-v1-database,mutating=true,failurePolicy=fail,groups=postgresql.cnpg.io,resources=databases,verbs=create;update,versions=v1,name=mdatabase.cnpg.io,sideEffects=None +// +kubebuilder:webhook:webhookVersions={v1},admissionReviewVersions={v1},verbs=create;update,path=/validate-postgresql-cnpg-io-v1-database,mutating=false,failurePolicy=fail,groups=postgresql.cnpg.io,resources=databases,versions=v1,name=vdatabase.cnpg.io,sideEffects=None + +// DatabaseCustomDefaulter struct is responsible for setting default values on the custom resource of the +// Kind Database when those are created or updated. +type DatabaseCustomDefaulter struct{} + +// Default implements webhook.CustomDefaulter so a webhook will be registered for the Kind Database. +func (d *DatabaseCustomDefaulter) Default(_ context.Context, obj runtime.Object) error { + database, ok := obj.(*apiv1.Database) + if !ok { + return fmt.Errorf("expected a database object but got %T", obj) + } + databaseLog.Info("Defaulting for database", "name", database.GetName(), "namespace", database.GetNamespace()) + + // database.Default() + + return nil +} + +// DatabaseCustomValidator is responsible for validating the Database +// resource when it is created, updated, or deleted. +type DatabaseCustomValidator struct{} + +// ValidateCreate implements webhook.CustomValidator so a webhook will be registered for the type Database . +func (v *DatabaseCustomValidator) ValidateCreate(_ context.Context, obj runtime.Object) (admission.Warnings, error) { + database, ok := obj.(*apiv1.Database) + if !ok { + return nil, fmt.Errorf("expected a Database object but got %T", obj) + } + databaseLog.Info( + "Validation for Database upon creation", + "name", database.GetName(), "namespace", database.GetNamespace()) + + allErrs := v.validate(database) + allWarnings := v.getAdmissionWarnings(database) + + if len(allErrs) == 0 { + return allWarnings, nil + } + + return allWarnings, apierrors.NewInvalid( + schema.GroupKind{Group: "postgresql.cnpg.io", Kind: "Database "}, + database.Name, allErrs) +} + +// ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type Database . +func (v *DatabaseCustomValidator) ValidateUpdate( + _ context.Context, + oldObj, newObj runtime.Object, +) (admission.Warnings, error) { + database, ok := newObj.(*apiv1.Database) + if !ok { + return nil, fmt.Errorf("expected a Database object for the newObj but got %T", newObj) + } + + oldDatabase, ok := oldObj.(*apiv1.Database) + if !ok { + return nil, fmt.Errorf("expected a Database object for the oldObj but got %T", oldObj) + } + + databaseLog.Info( + "Validation for Database upon update", + "name", database.GetName(), "namespace", database.GetNamespace()) + + allErrs := append( + v.validate(database), + v.validateDatabaseChanges(database, oldDatabase)..., + ) + allWarnings := v.getAdmissionWarnings(database) + + if len(allErrs) == 0 { + return allWarnings, nil + } + + return allWarnings, apierrors.NewInvalid( + schema.GroupKind{Group: "database.cnpg.io", Kind: "Database "}, + database.Name, allErrs) +} + +func (v *DatabaseCustomValidator) validateDatabaseChanges(_ *apiv1.Database, _ *apiv1.Database) field.ErrorList { + return nil +} + +// ValidateDelete implements webhook.CustomValidator so a webhook will be registered for the type Database . +func (v *DatabaseCustomValidator) ValidateDelete(_ context.Context, obj runtime.Object) (admission.Warnings, error) { + database, ok := obj.(*apiv1.Database) + if !ok { + return nil, fmt.Errorf("expected a Database object but got %T", obj) + } + databaseLog.Info( + "Validation for Database upon deletion", + "name", database.GetName(), "namespace", database.GetNamespace()) + + // TODO(user): fill in your validation logic upon object deletion. + + return nil, nil +} + +// validateDatabse groups the validation logic for databases returning a list of all encountered errors +func (v *DatabaseCustomValidator) validate(d *apiv1.Database) (allErrs field.ErrorList) { + type validationFunc func(*apiv1.Database) field.ErrorList + validations := []validationFunc{ + v.validateExtensions, + v.validateSchemas, + } + + for _, validate := range validations { + allErrs = append(allErrs, validate(d)...) + } + + return allErrs +} + +func (v *DatabaseCustomValidator) getAdmissionWarnings(_ *apiv1.Database) admission.Warnings { + return nil +} + +// validateExtensions validates the database extensions +func (v *DatabaseCustomValidator) validateExtensions(d *apiv1.Database) field.ErrorList { + var result field.ErrorList + + extensionNames := stringset.New() + for i, ext := range d.Spec.Extensions { + name := ext.Name + if extensionNames.Has(name) { + result = append( + result, + field.Duplicate( + field.NewPath("spec", "extensions").Index(i).Child("name"), + name, + ), + ) + } + + extensionNames.Put(name) + } + + return result +} + +// validateSchemas validates the database schemas +func (v *DatabaseCustomValidator) validateSchemas(d *apiv1.Database) field.ErrorList { + var result field.ErrorList + + schemaNames := stringset.New() + for i, schema := range d.Spec.Schemas { + name := schema.Name + if schemaNames.Has(name) { + result = append( + result, + field.Duplicate( + field.NewPath("spec", "schemas").Index(i).Child("name"), + name, + ), + ) + } + + schemaNames.Put(name) + } + + return result +} diff --git a/internal/webhook/v1/database_webhook_test.go b/internal/webhook/v1/database_webhook_test.go new file mode 100644 index 0000000000..c50f3d681d --- /dev/null +++ b/internal/webhook/v1/database_webhook_test.go @@ -0,0 +1,105 @@ +/* +Copyright The CloudNativePG Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Database validation", func() { + var v *DatabaseCustomValidator + + createExtensionSpec := func(name string) apiv1.ExtensionSpec { + return apiv1.ExtensionSpec{ + DatabaseObjectSpec: apiv1.DatabaseObjectSpec{ + Name: name, + Ensure: apiv1.EnsurePresent, + }, + } + } + createSchemaSpec := func(name string) apiv1.SchemaSpec { + return apiv1.SchemaSpec{ + DatabaseObjectSpec: apiv1.DatabaseObjectSpec{ + Name: name, + Ensure: apiv1.EnsurePresent, + }, + } + } + + BeforeEach(func() { + v = &DatabaseCustomValidator{} + }) + + DescribeTable( + "Database validation", + func(db *apiv1.Database, errorCount int) { + foundErrors := v.validate(db) + Expect(foundErrors).To(HaveLen(errorCount)) + }, + Entry( + "doesn't complain when extensions and schemas are null", + &apiv1.Database{ + Spec: apiv1.DatabaseSpec{}, + }, + 0, + ), + Entry( + "doesn't complain if there are no duplicate extensions and no duplicate schemas", + &apiv1.Database{ + Spec: apiv1.DatabaseSpec{ + Extensions: []apiv1.ExtensionSpec{ + createExtensionSpec("postgis"), + }, + Schemas: []apiv1.SchemaSpec{ + createSchemaSpec("test_schema"), + }, + }, + }, + 0, + ), + Entry( + "complain if there are duplicate extensions", + &apiv1.Database{ + Spec: apiv1.DatabaseSpec{ + Extensions: []apiv1.ExtensionSpec{ + createExtensionSpec("postgis"), + createExtensionSpec("postgis"), + createExtensionSpec("cube"), + }, + }, + }, + 1, + ), + + Entry( + "complain if there are duplicate schemas", + &apiv1.Database{ + Spec: apiv1.DatabaseSpec{ + Schemas: []apiv1.SchemaSpec{ + createSchemaSpec("test_one"), + createSchemaSpec("test_two"), + createSchemaSpec("test_two"), + }, + }, + }, + 1, + ), + ) +}) diff --git a/tests/e2e/asserts_test.go b/tests/e2e/asserts_test.go index 7a302b5b2e..941dc034b5 100644 --- a/tests/e2e/asserts_test.go +++ b/tests/e2e/asserts_test.go @@ -549,7 +549,7 @@ func QueryMatchExpectationPredicate( } g.Expect(err).ToNot(HaveOccurred()) g.Expect(strings.Trim(stdout, "\n")).To(BeEquivalentTo(expectedOutput), - fmt.Sprintf("expected query %q to return %q", query, expectedOutput)) + fmt.Sprintf("expected query %q to return %q (in database %q)", query, expectedOutput, dbname)) } } @@ -561,6 +561,14 @@ func databaseExistsQuery(dbName string) string { return fmt.Sprintf("SELECT EXISTS(SELECT 1 FROM pg_catalog.pg_database WHERE datname='%v')", dbName) } +func extensionExistsQuery(extName string) string { + return fmt.Sprintf("SELECT EXISTS(SELECT FROM pg_catalog.pg_extension WHERE extname='%v')", extName) +} + +func schemaExistsQuery(namespaceName string) string { + return fmt.Sprintf("SELECT EXISTS(SELECT FROM pg_catalog.pg_namespace WHERE nspname='%v')", namespaceName) +} + // AssertDataExpectedCount verifies that an expected amount of rows exists on the table func AssertDataExpectedCount( env *environment.TestingEnvironment, diff --git a/tests/e2e/declarative_database_management_test.go b/tests/e2e/declarative_database_management_test.go index ab6afe1131..07f87d94b4 100644 --- a/tests/e2e/declarative_database_management_test.go +++ b/tests/e2e/declarative_database_management_test.go @@ -131,6 +131,26 @@ var _ = Describe("Declarative database management", Label(tests.LabelSmoke, test assertDatabaseHasExpectedFields(namespace, primaryPodInfo.Name, database) }) + By("verifying the extension presence in the target database", func() { + primaryPodInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + + for _, extSpec := range database.Spec.Extensions { + Eventually(QueryMatchExpectationPredicate(primaryPodInfo, exec.DatabaseName(database.Spec.Name), + extensionExistsQuery(extSpec.Name), boolPGOutput(true)), 30).Should(Succeed()) + } + }) + + By("verifying the schema presence in the target database", func() { + primaryPodInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + + for _, schemaSpec := range database.Spec.Schemas { + Eventually(QueryMatchExpectationPredicate(primaryPodInfo, exec.DatabaseName(database.Spec.Name), + schemaExistsQuery(schemaSpec.Name), boolPGOutput(true)), 30).Should(Succeed()) + } + }) + By("removing the Database object", func() { Expect(objects.Delete(env.Ctx, env.Client, &database)).To(Succeed()) }) diff --git a/tests/e2e/fixtures/declarative_databases/database-with-delete-reclaim-policy.yaml.template b/tests/e2e/fixtures/declarative_databases/database-with-delete-reclaim-policy.yaml.template index be0f6c7e23..40f7cfe7e9 100644 --- a/tests/e2e/fixtures/declarative_databases/database-with-delete-reclaim-policy.yaml.template +++ b/tests/e2e/fixtures/declarative_databases/database-with-delete-reclaim-policy.yaml.template @@ -11,3 +11,9 @@ spec: databaseReclaimPolicy: delete cluster: name: cluster-with-declarative-databases + extensions: + - name: bloom + ensure: present + schemas: + - name: test_schema + ensure: present diff --git a/tests/e2e/fixtures/declarative_databases/database.yaml.template b/tests/e2e/fixtures/declarative_databases/database.yaml.template index a3ae25d8b3..1b2cfc8fdc 100644 --- a/tests/e2e/fixtures/declarative_databases/database.yaml.template +++ b/tests/e2e/fixtures/declarative_databases/database.yaml.template @@ -11,3 +11,9 @@ spec: template: template0 cluster: name: cluster-with-declarative-databases + extensions: + - name: bloom + ensure: present + schemas: + - name: test_schema + ensure: present From aca872499ff53461830d3f4dd9d5b9898f87cfd1 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 13 Mar 2025 13:48:08 +0100 Subject: [PATCH 451/836] chore(deps): update dependency golangci/golangci-lint to v1.64.7 (main) (#7137) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Update | Change | |---|---|---| | [golangci/golangci-lint](https://redirect.github.com/golangci/golangci-lint) | patch | `1.64.6` -> `1.64.7` | --- ### Release Notes
golangci/golangci-lint (golangci/golangci-lint) ### [`v1.64.7`](https://redirect.github.com/golangci/golangci-lint/blob/HEAD/CHANGELOG.md#v1647) [Compare Source](https://redirect.github.com/golangci/golangci-lint/compare/v1.64.6...v1.64.7) 1. Linters bug fixes - `depguard`: from 2.2.0 to 2.2.1 - `dupl`: from [`3e9179a`](https://redirect.github.com/golangci/golangci-lint/commit/3e9179ac440a) to [`f665c8d`](https://redirect.github.com/golangci/golangci-lint/commit/f665c8d69b32) - `gosec`: from 2.22.1 to 2.22.2 - `staticcheck`: from 0.6.0 to 0.6.1 2. Documentation - Add GitLab documentation
--- ### Configuration 📅 **Schedule**: Branch creation - At any time (no schedule defined), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Never, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/cloudnative-pg/cloudnative-pg). Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- .github/workflows/continuous-integration.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 4be5b7285c..36cd7ebb67 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -17,7 +17,7 @@ on: # set up environment variables to be used across all the jobs env: GOLANG_VERSION: "1.24.x" - GOLANGCI_LINT_VERSION: "v1.64.6" + GOLANGCI_LINT_VERSION: "v1.64.7" KUBEBUILDER_VERSION: "2.3.1" KIND_VERSION: "v0.27.0" OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing" From e744bf352c9eaf398dda6e78bb20b00977709fb4 Mon Sep 17 00:00:00 2001 From: Floor Drees Date: Fri, 14 Mar 2025 12:08:41 +0100 Subject: [PATCH 452/836] docs: update README.md with Mastodon & Bluesky links (#7141) Signed-off-by: Floor Drees --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 748708e7af..4290915a59 100644 --- a/README.md +++ b/README.md @@ -117,6 +117,8 @@ MariaDB cluster). - [Slack Channel](https://join.slack.com/t/cloudnativepg/shared_invite/zt-30a6l6bp3-u1lNAmh~N02Cfiv2utKTFg) - [Github Discussions](https://github.com/cloudnative-pg/cloudnative-pg/discussions) - [Twitter](https://twitter.com/CloudNativePg) +- [Mastodon](https://mastodon.social/@CloudNativePG) +- [Bluesky](https://bsky.app/profile/cloudnativepg.bsky.social) ## Resources From 10a87e1d0b9a3106c25bac0bda9ca212bf3fa1d7 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Mon, 17 Mar 2025 18:18:39 +0100 Subject: [PATCH 453/836] feat: add support for configuring `tcp_user_timeout` on replicas (#7036) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Introduces the `STANDBY_TCP_USER_TIMEOUT` operator configuration setting, which, if specified, sets the `tcp_user_timeout` parameter on all standby instances managed by the operator. The default value is `0`, which means the system’s default is used. The `tcp_user_timeout` parameter controls how long transmitted data can remain unacknowledged before the TCP connection is forcibly closed. This allows fine-tuning of standby responsiveness to network disruptions. Closes #7119 Signed-off-by: Leonardo Cecchi Signed-off-by: Marco Nenciarini Signed-off-by: Gabriele Bartolini Signed-off-by: Francesco Canovai Co-authored-by: Marco Nenciarini Co-authored-by: Gabriele Bartolini Co-authored-by: Francesco Canovai --- .wordlist-en-custom.txt | 1 + Makefile | 3 +- docs/src/operator_conf.md | 1 + docs/src/postgresql_conf.md | 22 +++-- docs/src/release_notes/v1.26.md | 4 + docs/src/ssl_connections.md | 3 +- docs/src/troubleshooting.md | 6 ++ hack/e2e/run-e2e-ocp.sh | 3 + hack/setup-cluster.sh | 1 - internal/configuration/configuration.go | 9 +++ internal/webhook/v1/cluster_webhook.go | 3 + pkg/management/postgres/instance.go | 11 ++- pkg/specs/pods.go | 10 +++ tests/e2e/asserts_test.go | 81 ++++++++----------- tests/e2e/backup_restore_azure_test.go | 8 +- tests/e2e/backup_restore_azurite_test.go | 16 ++-- tests/e2e/backup_restore_minio_test.go | 8 +- tests/e2e/certificates_test.go | 2 +- tests/e2e/config_support_test.go | 45 ++++++++++- tests/e2e/eviction_test.go | 10 ++- tests/e2e/failover_test.go | 19 ++--- tests/e2e/fastfailover_test.go | 7 -- tests/e2e/fastswitchover_test.go | 17 ++-- .../apache-benchmark-webtest.yaml | 4 + .../fastfailover/webtest-syncreplicas.yaml | 8 +- tests/e2e/fixtures/fastfailover/webtest.yaml | 8 +- .../apache-benchmark-webtest.yaml | 16 ++-- .../e2e/fixtures/fastswitchover/webtest.yaml | 8 +- .../cluster-pgstatstatements.yaml.template | 4 + .../cluster-sync-replica-legacy.yaml.template | 4 + .../cluster-sync-replica.yaml.template | 4 + .../sync_replicas/preferred.yaml.template | 4 + .../readiness-probe-lag-control.yaml.template | 2 +- .../startup-probe-lag-control.yaml.template | 2 +- tests/e2e/managed_roles_test.go | 53 ++++++------ tests/e2e/metrics_test.go | 2 +- tests/e2e/nodeselector_test.go | 8 +- tests/e2e/operator_unavailable_test.go | 16 ++-- tests/e2e/publication_subscription_test.go | 6 +- tests/e2e/syncreplicas_test.go | 49 +++++------ tests/e2e/tablespaces_test.go | 2 +- tests/e2e/update_user_test.go | 26 +++--- tests/e2e/upgrade_test.go | 10 +-- 43 files changed, 324 insertions(+), 202 deletions(-) diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index a59c43aeab..35211d21eb 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -903,6 +903,7 @@ json jsonpath kb kbytes +keepalive kms kube kubebuilder diff --git a/Makefile b/Makefile index d6ccdb2dc0..8410a26ccf 100644 --- a/Makefile +++ b/Makefile @@ -215,7 +215,8 @@ generate-manifest: manifests kustomize ## Generate manifest used for deployment. $(KUSTOMIZE) edit set image controller="${CONTROLLER_IMG}" ;\ $(KUSTOMIZE) edit add patch --path env_override.yaml ;\ $(KUSTOMIZE) edit add configmap controller-manager-env \ - --from-literal="POSTGRES_IMAGE_NAME=${POSTGRES_IMAGE_NAME}" ;\ + --from-literal="POSTGRES_IMAGE_NAME=${POSTGRES_IMAGE_NAME}" \ + --from-literal="STANDBY_TCP_USER_TIMEOUT=5000" ;\ } ;\ mkdir -p ${DIST_PATH} ;\ $(KUSTOMIZE) build $$CONFIG_TMP_DIR/default > ${OPERATOR_MANIFEST_PATH} ;\ diff --git a/docs/src/operator_conf.md b/docs/src/operator_conf.md index 2e6dadb7fc..70c47c845f 100644 --- a/docs/src/operator_conf.md +++ b/docs/src/operator_conf.md @@ -48,6 +48,7 @@ Name | Description `MONITORING_QUERIES_CONFIGMAP` | The name of a ConfigMap in the operator's namespace with a set of default queries (to be specified under the key `queries`) to be applied to all created Clusters `MONITORING_QUERIES_SECRET` | The name of a Secret in the operator's namespace with a set of default queries (to be specified under the key `queries`) to be applied to all created Clusters `PULL_SECRET_NAME` | Name of an additional pull secret to be defined in the operator's namespace and to be used to download images +`STANDBY_TCP_USER_TIMEOUT` | Defines the [`TCP_USER_TIMEOUT` socket option](https://www.postgresql.org/docs/current/runtime-config-connection.html#GUC-TCP-USER-TIMEOUT) for replication connections from standby instances to the primary. Default is 0 (system's default). Values in `INHERITED_ANNOTATIONS` and `INHERITED_LABELS` support path-like wildcards. For example, the value `example.com/*` will match both the value `example.com/one` and `example.com/two`. diff --git a/docs/src/postgresql_conf.md b/docs/src/postgresql_conf.md index 4bc740ef11..a62dfd13a7 100644 --- a/docs/src/postgresql_conf.md +++ b/docs/src/postgresql_conf.md @@ -127,17 +127,28 @@ Since the fixed parameters are added at the end, they can't be overridden by the user via the YAML configuration. Those parameters are required for correct WAL archiving and replication. -### Replication settings +### Replication Settings -The `primary_conninfo`, `restore_command`, and `recovery_target_timeline` -parameters are managed automatically by the operator according to the state of -the instance in the cluster. +The `primary_conninfo`, `restore_command`, and `recovery_target_timeline` +parameters are automatically managed by the operator based on the instance's +role within the cluster. These parameters are effectively applied only when the +instance is operating as a replica. ```text -primary_conninfo = 'host=cluster-example-rw user=postgres dbname=postgres' +primary_conninfo = 'host= user=postgres dbname=postgres' recovery_target_timeline = 'latest' ``` +The [`STANDBY_TCP_USER_TIMEOUT` operator configuration setting](operator_conf.md#available-options), +if specified, sets the `tcp_user_timeout` parameter on all standby instances +managed by the operator. + +The `tcp_user_timeout` parameter determines how long transmitted data can +remain unacknowledged before the TCP connection is forcibly closed. Adjusting +this value allows you to fine-tune the responsiveness of standby instances to +network disruptions. For more details, refer to the +[PostgreSQL documentation](https://www.postgresql.org/docs/current/runtime-config-connection.html#GUC-TCP-USER-TIMEOUT). + ### Log control settings The operator requires PostgreSQL to output its log in CSV format, and the @@ -648,4 +659,3 @@ Users are not allowed to set the following configuration parameters in the - `unix_socket_directories` - `unix_socket_group` - `unix_socket_permissions` - diff --git a/docs/src/release_notes/v1.26.md b/docs/src/release_notes/v1.26.md index 926cb21cb4..ca52d5e202 100644 --- a/docs/src/release_notes/v1.26.md +++ b/docs/src/release_notes/v1.26.md @@ -30,6 +30,10 @@ on the release branch in GitHub. ### Enhancements: +- Introduced the `STANDBY_TCP_USER_TIMEOUT` operator configuration setting, + which, if specified, sets the `tcp_user_timeout` parameter on all standby + instances managed by the operator. + - feat: support customizable pod patches via annotations (#6323) - `cnpg` plugin updates: diff --git a/docs/src/ssl_connections.md b/docs/src/ssl_connections.md index 1fde48eeda..c7324443ea 100644 --- a/docs/src/ssl_connections.md +++ b/docs/src/ssl_connections.md @@ -90,7 +90,7 @@ spec: app: webtest spec: containers: - - image: ghcr.io/cloudnative-pg/webtest:1.6.0 + - image: ghcr.io/cloudnative-pg/webtest:1.7.0 name: cert-test volumeMounts: - name: secret-volume-root-ca @@ -188,4 +188,3 @@ This assumes that the PostgreSQL operand images include an OpenSSL library that supports the `TLSv1.3` version. If not, or if your client applications need a lower version number, you need to manually configure it in the PostgreSQL configuration as any other Postgres GUC. - diff --git a/docs/src/troubleshooting.md b/docs/src/troubleshooting.md index 664bdc0272..67cfe25ce8 100644 --- a/docs/src/troubleshooting.md +++ b/docs/src/troubleshooting.md @@ -812,3 +812,9 @@ establish the connection for approximately 127 seconds before giving up. This prolonged retry period can significantly delay the reconnection process. For more details, consult the [tcp_syn_retries documentation](https://www.kernel.org/doc/Documentation/networking/ip-sysctl.txt). + +You can work around this issue by setting `STANDBY_TCP_USER_TIMEOUT` in the +[operator configuration](operator_conf.md#available-options). This will cause +the standby instances to close the TCP connection if the initial `SYN` packet +is not acknowledged within the specified timeout, allowing them to retry the +connection more quickly. diff --git a/hack/e2e/run-e2e-ocp.sh b/hack/e2e/run-e2e-ocp.sh index d6a8a0d814..bddb940447 100755 --- a/hack/e2e/run-e2e-ocp.sh +++ b/hack/e2e/run-e2e-ocp.sh @@ -50,6 +50,9 @@ oc apply -f cloudnative-pg-catalog.yaml # create the secret for the index to be pulled in the marketplace oc create secret docker-registry -n openshift-marketplace --docker-server="${REGISTRY}" --docker-username="${REGISTRY_USER}" --docker-password="${REGISTRY_PASSWORD}" cnpg-pull-secret || true +# Create the default configmap to set global keepalives on all the tests +oc create configmap -n openshift-operators --from-literal=STANDBY_TCP_USER_TIMEOUT=5000 cnpg-controller-manager-config + # Install the operator oc apply -f - < 0 { + result = fmt.Sprintf("%s tcp_user_timeout='%s'", result, + strings.ReplaceAll(strings.ReplaceAll(standbyTCPUserTimeout, `\`, `\\`), `'`, `\'`)) + } + + return result } // HandleInstanceCommandRequests execute a command requested by the reconciliation diff --git a/pkg/specs/pods.go b/pkg/specs/pods.go index d059818f4a..ada3575d56 100644 --- a/pkg/specs/pods.go +++ b/pkg/specs/pods.go @@ -154,6 +154,16 @@ func CreatePodEnvConfig(cluster apiv1.Cluster, podName string) EnvConfig { } config.EnvVars = append(config.EnvVars, cluster.Spec.Env...) + if configuration.Current.StandbyTCPUserTimeout != 0 { + config.EnvVars = append( + config.EnvVars, + corev1.EnvVar{ + Name: "CNPG_STANDBY_TCP_USER_TIMEOUT", + Value: strconv.Itoa(configuration.Current.StandbyTCPUserTimeout), + }, + ) + } + hashValue, _ := hash.ComputeHash(config) config.Hash = hashValue return config diff --git a/tests/e2e/asserts_test.go b/tests/e2e/asserts_test.go index 941dc034b5..45847935f3 100644 --- a/tests/e2e/asserts_test.go +++ b/tests/e2e/asserts_test.go @@ -134,11 +134,11 @@ func AssertSwitchoverWithHistory( }) By("waiting that the TargetPrimary become also CurrentPrimary", func() { - Eventually(func() (string, error) { + Eventually(func(g Gomega) { cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - return cluster.Status.CurrentPrimary, err - }, testTimeouts[timeouts.NewPrimaryAfterSwitchover]).Should(BeEquivalentTo(targetPrimary)) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cluster.Status.CurrentPrimary).To(BeEquivalentTo(targetPrimary)) + }, testTimeouts[timeouts.NewPrimaryAfterSwitchover]).Should(Succeed()) }) By("waiting that the old primary become ready", func() { @@ -331,7 +331,6 @@ func AssertClusterDefault( Eventually(func(g Gomega) { var err error cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) g.Expect(err).ToNot(HaveOccurred()) }).Should(Succeed()) @@ -767,12 +766,15 @@ func AssertNewPrimary(namespace string, clusterName string, oldPrimary string) { timeout := 120 // Wait for the operator to set a new TargetPrimary var cluster *apiv1.Cluster - Eventually(func() (string, error) { + Eventually(func(g Gomega) { var err error cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - return cluster.Status.TargetPrimary, err - }, timeout).ShouldNot(Or(BeEquivalentTo(oldPrimary), BeEquivalentTo(apiv1.PendingFailoverMarker))) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cluster.Status.TargetPrimary).ToNot(Or( + BeEquivalentTo(oldPrimary), + BeEquivalentTo(apiv1.PendingFailoverMarker), + )) + }, timeout).Should(Succeed()) newPrimary := cluster.Status.TargetPrimary // Expect the chosen pod to eventually become a primary @@ -1292,6 +1294,9 @@ func AssertFastFailOver( " -f " + webTestFile) Expect(err).ToNot(HaveOccurred()) + webtestDeploy := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: "webtest", Namespace: namespace}} + Expect(deployments.WaitForReady(env.Ctx, env.Client, webtestDeploy, 60)).To(Succeed()) + _, _, err = run.Run("kubectl create -n " + namespace + " -f " + webTestJob) Expect(err).ToNot(HaveOccurred()) @@ -2040,15 +2045,15 @@ func switchWalAndGetLatestArchive(namespace, podName string) string { func createAndAssertPgBouncerPoolerIsSetUp(namespace, poolerYamlFilePath string, expectedInstanceCount int) { CreateResourceFromFile(namespace, poolerYamlFilePath) - Eventually(func() (int32, error) { + Eventually(func(g Gomega) { poolerName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerYamlFilePath) - Expect(err).ToNot(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) // Wait for the deployment to be ready deployment := &appsv1.Deployment{} err = env.Client.Get(env.Ctx, types.NamespacedName{Namespace: namespace, Name: poolerName}, deployment) - - return deployment.Status.ReadyReplicas, err - }, 300).Should(BeEquivalentTo(expectedInstanceCount)) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(deployment.Status.ReadyReplicas).To(BeEquivalentTo(expectedInstanceCount)) + }, 300).Should(Succeed()) // check pooler pod is up and running assertPGBouncerPodsAreReady(namespace, poolerYamlFilePath, expectedInstanceCount) @@ -2059,41 +2064,31 @@ func assertPgBouncerPoolerDeploymentStrategy( expectedMaxSurge, expectedMaxUnavailable string, ) { By("verify pooler deployment has expected rolling update configuration", func() { - Eventually(func() bool { + Eventually(func(g Gomega) { poolerName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerYamlFilePath) - Expect(err).ToNot(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) // Wait for the deployment to be ready deployment := &appsv1.Deployment{} err = env.Client.Get(env.Ctx, types.NamespacedName{Namespace: namespace, Name: poolerName}, deployment) - if err != nil { - return false - } - if expectedMaxSurge == deployment.Spec.Strategy.RollingUpdate.MaxSurge.String() && - expectedMaxUnavailable == deployment.Spec.Strategy.RollingUpdate.MaxUnavailable.String() { - return true - } - return false - }, 300).Should(BeTrue()) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(deployment.Spec.Strategy.RollingUpdate.MaxSurge.String()).To(BeEquivalentTo(expectedMaxSurge)) + g.Expect(deployment.Spec.Strategy.RollingUpdate.MaxUnavailable.String()).To(BeEquivalentTo(expectedMaxUnavailable)) + }, 300).Should(Succeed()) }) } // assertPGBouncerPodsAreReady verifies if PGBouncer pooler pods are ready func assertPGBouncerPodsAreReady(namespace, poolerYamlFilePath string, expectedPodCount int) { - Eventually(func() (bool, error) { + Eventually(func(g Gomega) { poolerName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerYamlFilePath) - Expect(err).ToNot(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) podList := &corev1.PodList{} err = env.Client.List(env.Ctx, podList, ctrlclient.InNamespace(namespace), ctrlclient.MatchingLabels{utils.PgbouncerNameLabel: poolerName}) - if err != nil { - return false, err - } + g.Expect(err).ToNot(HaveOccurred()) podItemsCount := len(podList.Items) - if podItemsCount != expectedPodCount { - return false, fmt.Errorf("expected pgBouncer pods count match passed expected instance count. "+ - "Got: %v, Expected: %v", podItemsCount, expectedPodCount) - } + g.Expect(podItemsCount).To(BeEquivalentTo(expectedPodCount)) activeAndReadyPodCount := 0 for _, item := range podList.Items { @@ -2102,14 +2097,8 @@ func assertPGBouncerPodsAreReady(namespace, poolerYamlFilePath string, expectedP } continue } - - if activeAndReadyPodCount != expectedPodCount { - return false, fmt.Errorf("expected pgBouncer pods to be all active and ready. Got: %v, Expected: %v", - activeAndReadyPodCount, expectedPodCount) - } - - return true, nil - }, 90).Should(BeTrue()) + g.Expect(activeAndReadyPodCount).To(BeEquivalentTo(expectedPodCount)) + }, 90).Should(Succeed()) } func assertReadWriteConnectionUsingPgBouncerService( @@ -2331,12 +2320,12 @@ func OnlineResizePVC(namespace, clusterName string) { if walStorageEnabled { expectedCount = 6 } - Eventually(func() int { + Eventually(func(g Gomega) { // Variable counter to store the updated total of expanded PVCs. It should be equal to three updateCount := 0 // Gathering PVC list err := env.Client.List(env.Ctx, pvc, ctrlclient.InNamespace(namespace)) - Expect(err).ToNot(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) // Iterating through PVC list to compare with expanded size for _, pvClaim := range pvc.Items { // Size comparison @@ -2344,8 +2333,8 @@ func OnlineResizePVC(namespace, clusterName string) { updateCount++ } } - return updateCount - }, 300).Should(BeEquivalentTo(expectedCount)) + g.Expect(updateCount).To(BeEquivalentTo(expectedCount)) + }, 300).Should(Succeed()) }) } diff --git a/tests/e2e/backup_restore_azure_test.go b/tests/e2e/backup_restore_azure_test.go index 6b335da4a3..d83e2c6a2e 100644 --- a/tests/e2e/backup_restore_azure_test.go +++ b/tests/e2e/backup_restore_azure_test.go @@ -479,11 +479,11 @@ func prepareClusterForPITROnAzureBlob( Eventually(func() (int, error) { return backups.CountFilesOnAzureBlobStorage(azureConfig, clusterName, "data.tar") }, 30).Should(BeEquivalentTo(expectedVal)) - Eventually(func() (string, error) { + Eventually(func(g Gomega) { cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - return cluster.Status.FirstRecoverabilityPoint, err - }, 30).ShouldNot(BeEmpty()) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cluster.Status.FirstRecoverabilityPoint).ToNot(BeEmpty()) + }, 30).Should(Succeed()) }) // Write a table and insert 2 entries on the "app" database diff --git a/tests/e2e/backup_restore_azurite_test.go b/tests/e2e/backup_restore_azurite_test.go index 8b4e8f47ae..c0f8281ef7 100644 --- a/tests/e2e/backup_restore_azurite_test.go +++ b/tests/e2e/backup_restore_azurite_test.go @@ -292,11 +292,11 @@ func prepareClusterBackupOnAzurite( Eventually(func() (int, error) { return backups.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar") }, 30).Should(BeNumerically(">=", 1)) - Eventually(func() (string, error) { + Eventually(func(g Gomega) { cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - return cluster.Status.FirstRecoverabilityPoint, err - }, 30).ShouldNot(BeEmpty()) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cluster.Status.FirstRecoverabilityPoint).ToNot(BeEmpty()) + }, 30).Should(Succeed()) }) backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName) } @@ -318,11 +318,11 @@ func prepareClusterForPITROnAzurite( Eventually(func() (int, error) { return backups.CountFilesOnAzuriteBlobStorage(namespace, clusterName, "data.tar") }, 30).Should(BeNumerically(">=", 1)) - Eventually(func() (string, error) { + Eventually(func(g Gomega) { cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - return cluster.Status.FirstRecoverabilityPoint, err - }, 30).ShouldNot(BeEmpty()) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cluster.Status.FirstRecoverabilityPoint).ToNot(BeEmpty()) + }, 30).Should(Succeed()) }) // Write a table and insert 2 entries on the "app" database diff --git a/tests/e2e/backup_restore_minio_test.go b/tests/e2e/backup_restore_minio_test.go index 4f4a611d1d..9e065f3250 100644 --- a/tests/e2e/backup_restore_minio_test.go +++ b/tests/e2e/backup_restore_minio_test.go @@ -800,11 +800,11 @@ func prepareClusterForPITROnMinio( }, 60).Should(BeNumerically(">=", expectedVal), fmt.Sprintf("verify the number of backups %v is greater than or equal to %v", latestTar, expectedVal)) - Eventually(func() (string, error) { + Eventually(func(g Gomega) { cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - return cluster.Status.FirstRecoverabilityPoint, err - }, 30).ShouldNot(BeEmpty()) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cluster.Status.FirstRecoverabilityPoint).ToNot(BeEmpty()) + }, 30).Should(Succeed()) }) // Write a table and insert 2 entries on the "app" database diff --git a/tests/e2e/certificates_test.go b/tests/e2e/certificates_test.go index a12c885059..26a82acaf4 100644 --- a/tests/e2e/certificates_test.go +++ b/tests/e2e/certificates_test.go @@ -110,7 +110,7 @@ var _ = Describe("Certificates", func() { Containers: []corev1.Container{ { Name: name, - Image: "ghcr.io/cloudnative-pg/webtest:1.6.0", + Image: "ghcr.io/cloudnative-pg/webtest:1.7.0", Ports: []corev1.ContainerPort{ { ContainerPort: 8080, diff --git a/tests/e2e/config_support_test.go b/tests/e2e/config_support_test.go index 8477e3a0c2..4f02ba0dca 100644 --- a/tests/e2e/config_support_test.go +++ b/tests/e2e/config_support_test.go @@ -21,6 +21,8 @@ import ( "github.com/onsi/ginkgo/v2/types" corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" "github.com/cloudnative-pg/cloudnative-pg/tests" @@ -45,16 +47,47 @@ var _ = Describe("Config support", Serial, Ordered, Label(tests.LabelDisruptive, level = tests.Low ) var operatorNamespace, namespace string + var initialConfigMap *corev1.ConfigMap + var initialSecret *corev1.Secret BeforeEach(func() { if testLevelEnv.Depth < int(level) { Skip("Test depth is lower than the amount requested for this test") } + }) + BeforeAll(func() { operatorDeployment, err := operator.GetDeployment(env.Ctx, env.Client) Expect(err).ToNot(HaveOccurred()) - operatorNamespace = operatorDeployment.GetNamespace() + + // Save the initial configMap + initialConfigMap = &corev1.ConfigMap{} + err = env.Client.Get(env.Ctx, ctrlclient.ObjectKey{Namespace: operatorNamespace, Name: configName}, + initialConfigMap) + if !apierrors.IsNotFound(err) { + Expect(err).ToNot(HaveOccurred()) + } + initialConfigMap.SetResourceVersion("") + initialConfigMap.SetUID("") + initialConfigMap.SetCreationTimestamp(metav1.Time{}) + initialConfigMap.SetSelfLink("") + initialConfigMap.SetGeneration(0) + initialConfigMap.SetManagedFields(nil) + + // Save the initial secret + initialSecret = &corev1.Secret{} + err = env.Client.Get(env.Ctx, ctrlclient.ObjectKey{Namespace: operatorNamespace, Name: configName}, + initialSecret) + if !apierrors.IsNotFound(err) { + Expect(err).ToNot(HaveOccurred()) + } + initialSecret.SetResourceVersion("") + initialSecret.SetUID("") + initialSecret.SetCreationTimestamp(metav1.Time{}) + initialSecret.SetSelfLink("") + initialSecret.SetGeneration(0) + initialSecret.SetManagedFields(nil) }) AfterAll(func() { @@ -76,6 +109,16 @@ var _ = Describe("Config support", Serial, Ordered, Label(tests.LabelDisruptive, err = env.Client.Delete(env.Ctx, secret) Expect(err).NotTo(HaveOccurred()) + // Create preexisting ConfigMap and Secret + if initialConfigMap.Name != "" { + err = env.Client.Create(env.Ctx, initialConfigMap) + Expect(err).ToNot(HaveOccurred()) + } + if initialSecret.Name != "" { + err = env.Client.Create(env.Ctx, initialSecret) + Expect(err).ToNot(HaveOccurred()) + } + err = operator.ReloadDeployment(env.Ctx, env.Client, env.Interface, 120) Expect(err).ToNot(HaveOccurred()) }) diff --git a/tests/e2e/eviction_test.go b/tests/e2e/eviction_test.go index 121f4f14fd..54b9a6f42d 100644 --- a/tests/e2e/eviction_test.go +++ b/tests/e2e/eviction_test.go @@ -278,15 +278,17 @@ var _ = Describe("Pod eviction", Serial, Label(tests.LabelDisruptive), func() { }) By("checking switchover happens", func() { - Eventually(func() bool { + Eventually(func() (bool, error) { podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) + if err != nil { + return false, err + } for _, p := range podList.Items { if specs.IsPodPrimary(p) && primaryPod.GetName() != p.GetName() { - return true + return true, nil } } - return false + return false, nil }, 60).Should(BeTrue()) }) diff --git a/tests/e2e/failover_test.go b/tests/e2e/failover_test.go index 657321c73a..23f922fca4 100644 --- a/tests/e2e/failover_test.go +++ b/tests/e2e/failover_test.go @@ -120,13 +120,13 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() { Expect(err).ToNot(HaveOccurred()) // Expect the primary to have lost connection with the stopped standby - Eventually(func() (int, error) { + Eventually(func(g Gomega) { primaryPod, err = podutils.Get(env.Ctx, env.Client, namespace, currentPrimary) - Expect(err).ToNot(HaveOccurred()) - return postgres.CountReplicas( + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(postgres.CountReplicas( env.Ctx, env.Client, env.Interface, env.RestClientConfig, - primaryPod, RetryTimeout) - }, RetryTimeout).Should(BeEquivalentTo(1)) + primaryPod, RetryTimeout)).To(BeEquivalentTo(1)) + }, RetryTimeout).Should(Succeed()) }) // Perform a CHECKPOINT on the primary and wait for the working standby @@ -166,9 +166,9 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() { targetPrimary, strings.Trim(initialLSN, "\n")) // The replay_lsn of the targetPrimary should be ahead // of the one before the checkpoint - Eventually(func() (string, error) { + Eventually(func(g Gomega) { primaryPod, err = podutils.Get(env.Ctx, env.Client, namespace, currentPrimary) - Expect(err).ToNot(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) out, _, err := exec.EventuallyExecQueryInInstancePod( env.Ctx, env.Client, env.Interface, env.RestClientConfig, exec.PodLocator{ @@ -179,8 +179,9 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() { RetryTimeout, PollingTime, ) - return strings.TrimSpace(out), err - }, RetryTimeout).Should(BeEquivalentTo("t")) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(strings.TrimSpace(out)).To(BeEquivalentTo("t")) + }, RetryTimeout).Should(Succeed()) }) // Force-delete the primary. Eventually the cluster should elect a diff --git a/tests/e2e/fastfailover_test.go b/tests/e2e/fastfailover_test.go index f49921ceb2..7b356c74b2 100644 --- a/tests/e2e/fastfailover_test.go +++ b/tests/e2e/fastfailover_test.go @@ -45,14 +45,7 @@ var _ = Describe("Fast failover", Serial, Label(tests.LabelPerformance, tests.La Skip("Test depth is lower than the amount requested for this test") } - // The walreceiver of a standby that wasn't promoted may try to reconnect - // before the rw service endpoints are updated. In this case, the walreceiver - // can be stuck for waiting for the connection to be established for a time that - // depends on the tcp_syn_retries sysctl. Since by default - // net.ipv4.tcp_syn_retries=6, PostgreSQL can wait 2^7-1=127 seconds before - // restarting the walreceiver. if !IsLocal() { - maxReattachTime = 180 maxFailoverTime = 30 } }) diff --git a/tests/e2e/fastswitchover_test.go b/tests/e2e/fastswitchover_test.go index 99c8017bb7..9b96bfc6e3 100644 --- a/tests/e2e/fastswitchover_test.go +++ b/tests/e2e/fastswitchover_test.go @@ -20,7 +20,9 @@ import ( "fmt" "strings" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/retry" @@ -28,6 +30,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/tests" "github.com/cloudnative-pg/cloudnative-pg/tests/utils" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/deployments" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" @@ -156,6 +159,10 @@ func assertFastSwitchover(namespace, sampleFile, clusterName, webTestFile, webTe _, _, err := run.Run("kubectl create -n " + namespace + " -f " + webTestFile) Expect(err).ToNot(HaveOccurred()) + + webtestDeploy := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: "webtest", Namespace: namespace}} + Expect(deployments.WaitForReady(env.Ctx, env.Client, webtestDeploy, 60)).To(Succeed()) + _, _, err = run.Run("kubectl create -n " + namespace + " -f " + webTestJob) Expect(err).ToNot(HaveOccurred()) @@ -197,16 +204,6 @@ func assertFastSwitchover(namespace, sampleFile, clusterName, webTestFile, webTe var maxReattachTime int32 = 60 var maxSwitchoverTime int32 = 20 - // The walreceiver of a standby that wasn't promoted may try to reconnect - // before the rw service endpoints are updated. In this case, the walreceiver - // can be stuck for waiting for the connection to be established for a time that - // depends on the tcp_syn_retries sysctl. Since by default - // net.ipv4.tcp_syn_retries=6, PostgreSQL can wait 2^7-1=127 seconds before - // restarting the walreceiver. - if !IsLocal() { - maxReattachTime = 180 - } - AssertStandbysFollowPromotion(namespace, clusterName, maxReattachTime) AssertWritesResumedBeforeTimeout(namespace, clusterName, maxSwitchoverTime) diff --git a/tests/e2e/fixtures/fastfailover/apache-benchmark-webtest.yaml b/tests/e2e/fixtures/fastfailover/apache-benchmark-webtest.yaml index 8af0fe341e..347e76dc01 100644 --- a/tests/e2e/fixtures/fastfailover/apache-benchmark-webtest.yaml +++ b/tests/e2e/fixtures/fastfailover/apache-benchmark-webtest.yaml @@ -14,5 +14,9 @@ spec: - "120" - "-c" - "5" + - "-v" + - "2" + - "-s" + - "3" - "http://webtest:8080/tx" restartPolicy: Never diff --git a/tests/e2e/fixtures/fastfailover/webtest-syncreplicas.yaml b/tests/e2e/fixtures/fastfailover/webtest-syncreplicas.yaml index 8de28f9e4e..541f9bf81d 100644 --- a/tests/e2e/fixtures/fastfailover/webtest-syncreplicas.yaml +++ b/tests/e2e/fixtures/fastfailover/webtest-syncreplicas.yaml @@ -16,7 +16,7 @@ spec: app: webtest spec: containers: - - image: ghcr.io/cloudnative-pg/webtest:1.6.0 + - image: ghcr.io/cloudnative-pg/webtest:1.7.0 name: webtest env: - name: PASSWORD @@ -33,6 +33,12 @@ spec: value: "postgres://$(USER):$(PASSWORD)@cluster-syncreplicas-fast-failover-rw/app?sslmode=require&connect_timeout=2" - name: SQL_QUERY value: "insert into tps.tl(source) values ('hey');" + livenessProbe: + tcpSocket: + port: 8080 + readinessProbe: + tcpSocket: + port: 8080 ports: - containerPort: 8080 --- diff --git a/tests/e2e/fixtures/fastfailover/webtest.yaml b/tests/e2e/fixtures/fastfailover/webtest.yaml index 3865c4dffd..e3464cfa10 100644 --- a/tests/e2e/fixtures/fastfailover/webtest.yaml +++ b/tests/e2e/fixtures/fastfailover/webtest.yaml @@ -16,7 +16,7 @@ spec: app: webtest spec: containers: - - image: ghcr.io/cloudnative-pg/webtest:1.6.0 + - image: ghcr.io/cloudnative-pg/webtest:1.7.0 name: webtest env: - name: PASSWORD @@ -33,6 +33,12 @@ spec: value: "postgres://$(USER):$(PASSWORD)@cluster-fast-failover-rw/app?sslmode=require&connect_timeout=2" - name: SQL_QUERY value: "insert into tps.tl(source) values ('hey');" + livenessProbe: + tcpSocket: + port: 8080 + readinessProbe: + tcpSocket: + port: 8080 ports: - containerPort: 8080 --- diff --git a/tests/e2e/fixtures/fastswitchover/apache-benchmark-webtest.yaml b/tests/e2e/fixtures/fastswitchover/apache-benchmark-webtest.yaml index 8af0fe341e..82192f24d1 100644 --- a/tests/e2e/fixtures/fastswitchover/apache-benchmark-webtest.yaml +++ b/tests/e2e/fixtures/fastswitchover/apache-benchmark-webtest.yaml @@ -9,10 +9,14 @@ spec: - name: apache-benchmark image: httpd command: - - "/usr/local/apache2/bin/ab" - - "-t" - - "120" - - "-c" - - "5" - - "http://webtest:8080/tx" + - "/usr/local/apache2/bin/ab" + - "-t" + - "120" + - "-c" + - "5" + - "-v" + - "2" + - "-s" + - "3" + - "http://webtest:8080/tx" restartPolicy: Never diff --git a/tests/e2e/fixtures/fastswitchover/webtest.yaml b/tests/e2e/fixtures/fastswitchover/webtest.yaml index f94effbaa8..8ef5df717f 100644 --- a/tests/e2e/fixtures/fastswitchover/webtest.yaml +++ b/tests/e2e/fixtures/fastswitchover/webtest.yaml @@ -16,7 +16,7 @@ spec: app: webtest spec: containers: - - image: ghcr.io/cloudnative-pg/webtest:1.6.0 + - image: ghcr.io/cloudnative-pg/webtest:1.7.0 name: webtest env: - name: PASSWORD @@ -33,6 +33,12 @@ spec: value: "postgres://$(USER):$(PASSWORD)@cluster-fast-switchover-rw/app?sslmode=require&connect_timeout=2" - name: SQL_QUERY value: "insert into tps.tl(source) values ('hey');" + livenessProbe: + tcpSocket: + port: 8080 + readinessProbe: + tcpSocket: + port: 8080 ports: - containerPort: 8080 --- diff --git a/tests/e2e/fixtures/sync_replicas/cluster-pgstatstatements.yaml.template b/tests/e2e/fixtures/sync_replicas/cluster-pgstatstatements.yaml.template index d773df1b11..20cbcd7bf3 100644 --- a/tests/e2e/fixtures/sync_replicas/cluster-pgstatstatements.yaml.template +++ b/tests/e2e/fixtures/sync_replicas/cluster-pgstatstatements.yaml.template @@ -18,6 +18,10 @@ spec: log_autovacuum_min_duration: '1s' 'pg_stat_statements.max': '10000' log_replication_commands: 'on' + probes: + readiness: + failureThreshold: 10 + periodSeconds: 1 storage: storageClass: ${E2E_DEFAULT_STORAGE_CLASS} diff --git a/tests/e2e/fixtures/sync_replicas/cluster-sync-replica-legacy.yaml.template b/tests/e2e/fixtures/sync_replicas/cluster-sync-replica-legacy.yaml.template index 5a9dc71f7c..973e8ef640 100644 --- a/tests/e2e/fixtures/sync_replicas/cluster-sync-replica-legacy.yaml.template +++ b/tests/e2e/fixtures/sync_replicas/cluster-sync-replica-legacy.yaml.template @@ -17,6 +17,10 @@ spec: log_temp_files: '1024' log_autovacuum_min_duration: '1s' log_replication_commands: 'on' + probes: + readiness: + failureThreshold: 10 + periodSeconds: 1 storage: storageClass: ${E2E_DEFAULT_STORAGE_CLASS} diff --git a/tests/e2e/fixtures/sync_replicas/cluster-sync-replica.yaml.template b/tests/e2e/fixtures/sync_replicas/cluster-sync-replica.yaml.template index 8bb4e43906..bbdccd10fe 100644 --- a/tests/e2e/fixtures/sync_replicas/cluster-sync-replica.yaml.template +++ b/tests/e2e/fixtures/sync_replicas/cluster-sync-replica.yaml.template @@ -17,6 +17,10 @@ spec: log_temp_files: '1024' log_autovacuum_min_duration: '1s' log_replication_commands: 'on' + probes: + readiness: + failureThreshold: 10 + periodSeconds: 1 storage: storageClass: ${E2E_DEFAULT_STORAGE_CLASS} diff --git a/tests/e2e/fixtures/sync_replicas/preferred.yaml.template b/tests/e2e/fixtures/sync_replicas/preferred.yaml.template index 86aee12481..737ae75d7b 100644 --- a/tests/e2e/fixtures/sync_replicas/preferred.yaml.template +++ b/tests/e2e/fixtures/sync_replicas/preferred.yaml.template @@ -18,6 +18,10 @@ spec: log_temp_files: '1024' log_autovacuum_min_duration: '1s' log_replication_commands: 'on' + probes: + readiness: + failureThreshold: 10 + periodSeconds: 1 storage: storageClass: ${E2E_DEFAULT_STORAGE_CLASS} diff --git a/tests/e2e/fixtures/sync_replicas/readiness-probe-lag-control.yaml.template b/tests/e2e/fixtures/sync_replicas/readiness-probe-lag-control.yaml.template index af46bd5192..33075f2eac 100644 --- a/tests/e2e/fixtures/sync_replicas/readiness-probe-lag-control.yaml.template +++ b/tests/e2e/fixtures/sync_replicas/readiness-probe-lag-control.yaml.template @@ -23,7 +23,7 @@ spec: readiness: type: streaming maximumLag: 16Mi - failureThreshold: 30 + failureThreshold: 15 periodSeconds: 1 storage: diff --git a/tests/e2e/fixtures/sync_replicas/startup-probe-lag-control.yaml.template b/tests/e2e/fixtures/sync_replicas/startup-probe-lag-control.yaml.template index adbefa3874..5e6f5bfcb1 100644 --- a/tests/e2e/fixtures/sync_replicas/startup-probe-lag-control.yaml.template +++ b/tests/e2e/fixtures/sync_replicas/startup-probe-lag-control.yaml.template @@ -23,7 +23,7 @@ spec: startup: type: streaming maximumLag: 16Mi - failureThreshold: 30 + failureThreshold: 60 periodSeconds: 1 storage: diff --git a/tests/e2e/managed_roles_test.go b/tests/e2e/managed_roles_test.go index b1e671fd8f..748b7a0217 100644 --- a/tests/e2e/managed_roles_test.go +++ b/tests/e2e/managed_roles_test.go @@ -116,7 +116,7 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic assertRoleStatus := func(namespace, clusterName, query, expectedResult string) { primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - Eventually(func() string { + Eventually(func(g Gomega) { stdout, _, err := exec.QueryInInstancePod( env.Ctx, env.Client, env.Interface, env.RestClientConfig, exec.PodLocator{ @@ -125,11 +125,9 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic }, postgres.PostgresDBName, query) - if err != nil { - return "" - } - return strings.TrimSpace(stdout) - }, 30).Should(Equal(expectedResult)) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(strings.TrimSpace(stdout)).To(Equal(expectedResult)) + }, 30).Should(Succeed()) } It("can create roles specified in the managed roles stanza", func() { @@ -405,11 +403,11 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic } err = env.Client.Patch(env.Ctx, updated, client.MergeFrom(cluster)) Expect(err).ToNot(HaveOccurred()) - Eventually(func() int { + Eventually(func(g Gomega) { cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - return len(cluster.Status.ManagedRolesStatus.CannotReconcile) - }, 30).Should(Equal(0)) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cluster.Status.ManagedRolesStatus.CannotReconcile).To(BeEmpty()) + }, 30).Should(Succeed()) assertInRoles(namespace, primaryPod.Name, newUserName, []string{"postgres", username}) }) @@ -427,11 +425,11 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic } err = env.Client.Patch(env.Ctx, updated, client.MergeFrom(cluster)) Expect(err).ToNot(HaveOccurred()) - Eventually(func() int { + Eventually(func(g Gomega) { cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - return len(cluster.Status.ManagedRolesStatus.CannotReconcile) - }, 30).Should(Equal(0)) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cluster.Status.ManagedRolesStatus.CannotReconcile).To(BeEmpty()) + }, 30).Should(Succeed()) assertInRoles(namespace, primaryPod.Name, newUserName, []string{username}) }) @@ -450,22 +448,23 @@ var _ = Describe("Managed roles tests", Label(tests.LabelSmoke, tests.LabelBasic // user not changed Eventually(QueryMatchExpectationPredicate(primaryPod, postgres.PostgresDBName, roleExistsQuery(unrealizableUser), "t"), 30).Should(Succeed()) - Eventually(func() int { + Eventually(func(g Gomega) { cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - return len(cluster.Status.ManagedRolesStatus.CannotReconcile) - }, 30).Should(Equal(1)) - Eventually(func() int { + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cluster.Status.ManagedRolesStatus.CannotReconcile).To(HaveLen(1)) + }, 30).Should(Succeed()) + Eventually(func(g Gomega) { cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - return len(cluster.Status.ManagedRolesStatus.CannotReconcile[unrealizableUser]) - }, 30).Should(Equal(1)) - Eventually(func() string { + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cluster.Status.ManagedRolesStatus.CannotReconcile[unrealizableUser]).To(HaveLen(1)) + }, 30).Should(Succeed()) + Eventually(func(g Gomega) { cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - return cluster.Status.ManagedRolesStatus.CannotReconcile[unrealizableUser][0] - }, 30).Should(ContainSubstring(fmt.Sprintf("role \"%s\" is a member of role \"%s\"", - unrealizableUser, unrealizableUser))) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cluster.Status.ManagedRolesStatus.CannotReconcile[unrealizableUser][0]).To( + ContainSubstring(fmt.Sprintf("role \"%s\" is a member of role \"%s\"", + unrealizableUser, unrealizableUser))) + }, 30).Should(Succeed()) }) }) diff --git a/tests/e2e/metrics_test.go b/tests/e2e/metrics_test.go index 00675f9b8c..71890aa882 100644 --- a/tests/e2e/metrics_test.go +++ b/tests/e2e/metrics_test.go @@ -349,7 +349,7 @@ var _ = Describe("Metrics", Label(tests.LabelObservability), func() { g.Expect(strings.TrimSpace(out)).To(BeEquivalentTo("t")) }).WithTimeout( time.Duration(timeouts.DefaultTestTimeouts[timeouts.Short])*time.Second, - ).WithPolling(time.Second).Should(Succeed(), fmt.Sprintf("on pod %v", pod.Name)) + ).Should(Succeed(), fmt.Sprintf("on pod %v", pod.Name)) out, err := proxy.RetrieveMetricsFromInstance(env.Ctx, env.Interface, pod, replicaCluster.IsMetricsTLSEnabled()) diff --git a/tests/e2e/nodeselector_test.go b/tests/e2e/nodeselector_test.go index a93945838c..e7b3d1a045 100644 --- a/tests/e2e/nodeselector_test.go +++ b/tests/e2e/nodeselector_test.go @@ -76,10 +76,10 @@ var _ = Describe("nodeSelector", Label(tests.LabelPodScheduling), func() { // We check the error to verify that's the case By("verifying that the pods can't be scheduled", func() { timeout := 120 - Eventually(func() bool { + Eventually(func(g Gomega) { isPending := false podList, err := pods.List(env.Ctx, env.Client, namespace) - Expect(err).ToNot(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) if len(podList.Items) > 0 { if len(podList.Items[0].Status.Conditions) > 0 { if podList.Items[0].Status.Phase == "Pending" && strings.Contains(podList.Items[0].Status.Conditions[0].Message, @@ -93,8 +93,8 @@ var _ = Describe("nodeSelector", Label(tests.LabelPodScheduling), func() { } } } - return isPending - }, timeout).Should(BeEquivalentTo(true)) + g.Expect(isPending).To(BeTrue()) + }, timeout).Should(Succeed()) }) }) }) diff --git a/tests/e2e/operator_unavailable_test.go b/tests/e2e/operator_unavailable_test.go index 4a38e29a46..a573de9a67 100644 --- a/tests/e2e/operator_unavailable_test.go +++ b/tests/e2e/operator_unavailable_test.go @@ -84,16 +84,16 @@ var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, te Expect(err).ToNot(HaveOccurred()) // Expect only 2 instances to be up and running - Eventually(func() int32 { + Eventually(func(g Gomega) { podList := &corev1.PodList{} err := env.Client.List( env.Ctx, podList, ctrlclient.InNamespace(namespace), ctrlclient.MatchingLabels{utils.ClusterLabelName: clusterName}, ) - Expect(err).ToNot(HaveOccurred()) - return int32(len(podList.Items)) - }, 120).Should(BeEquivalentTo(2)) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(podList.Items).To(HaveLen(2)) + }, 120).Should(Succeed()) // And to stay like that Consistently(func() int32 { @@ -183,15 +183,15 @@ var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, te wg.Wait() // Expect only 2 instances to be up and running - Eventually(func() int32 { + Eventually(func(g Gomega) { podList := &corev1.PodList{} err := env.Client.List( env.Ctx, podList, ctrlclient.InNamespace(namespace), ctrlclient.MatchingLabels{utils.ClusterLabelName: "operator-unavailable"}, ) - Expect(err).ToNot(HaveOccurred()) - return int32(len(utils.FilterActivePods(podList.Items))) - }, 120).Should(BeEquivalentTo(2)) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(utils.FilterActivePods(podList.Items)).To(HaveLen(2)) + }, 120).Should(Succeed()) }) By("verifying a new operator pod is now back", func() { diff --git a/tests/e2e/publication_subscription_test.go b/tests/e2e/publication_subscription_test.go index e2537d5690..197b4b28ec 100644 --- a/tests/e2e/publication_subscription_test.go +++ b/tests/e2e/publication_subscription_test.go @@ -154,7 +154,7 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelPublicationSub Eventually(func(g Gomega) { err := env.Client.Get(env.Ctx, databaseNamespacedName, databaseObject) - Expect(err).ToNot(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(databaseObject.Status.Applied).Should(HaveValue(BeTrue())) }, 300).WithPolling(10 * time.Second).Should(Succeed()) }) @@ -187,7 +187,7 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelPublicationSub Eventually(func(g Gomega) { pub := &apiv1.Publication{} err := env.Client.Get(env.Ctx, pubNamespacedName, pub) - Expect(err).ToNot(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(pub.Status.Applied).Should(HaveValue(BeTrue())) }, 300).WithPolling(10 * time.Second).Should(Succeed()) }) @@ -220,7 +220,7 @@ var _ = Describe("Publication and Subscription", Label(tests.LabelPublicationSub Eventually(func(g Gomega) { sub := &apiv1.Subscription{} err := env.Client.Get(env.Ctx, pubNamespacedName, sub) - Expect(err).ToNot(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) g.Expect(sub.Status.Applied).Should(HaveValue(BeTrue())) }, 300).WithPolling(10 * time.Second).Should(Succeed()) }) diff --git a/tests/e2e/syncreplicas_test.go b/tests/e2e/syncreplicas_test.go index 80982e6d80..221d2c42e3 100644 --- a/tests/e2e/syncreplicas_test.go +++ b/tests/e2e/syncreplicas_test.go @@ -50,9 +50,9 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { }) getSyncReplicationCount := func(namespace, clusterName, syncState string, expectedCount int) { - Eventually(func() (int, error, error) { + Eventually(func(g Gomega) int { primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) out, stdErr, err := exec.QueryInInstancePod( env.Ctx, env.Client, env.Interface, env.RestClientConfig, @@ -62,18 +62,19 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { }, "postgres", fmt.Sprintf("SELECT count(*) from pg_catalog.pg_stat_replication WHERE sync_state = '%s'", syncState)) - Expect(stdErr).To(BeEmpty()) - Expect(err).ShouldNot(HaveOccurred()) + g.Expect(stdErr).To(BeEmpty()) + g.Expect(err).ToNot(HaveOccurred()) value, atoiErr := strconv.Atoi(strings.Trim(out, "\n")) - return value, err, atoiErr + g.Expect(atoiErr).ToNot(HaveOccurred()) + return value }, RetryTimeout).Should(BeEquivalentTo(expectedCount)) } compareSynchronousStandbyNames := func(namespace, clusterName, element string) { - Eventually(func() string { + Eventually(func(g Gomega) { primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) out, stdErr, err := exec.QueryInInstancePod( env.Ctx, env.Client, env.Interface, env.RestClientConfig, @@ -83,11 +84,11 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { }, "postgres", "select setting from pg_catalog.pg_settings where name = 'synchronous_standby_names'") - Expect(stdErr).To(BeEmpty()) - Expect(err).ShouldNot(HaveOccurred()) + g.Expect(stdErr).To(BeEmpty()) + g.Expect(err).ShouldNot(HaveOccurred()) - return strings.Trim(out, "\n") - }, 60).Should(ContainSubstring(element)) + g.Expect(strings.Trim(out, "\n")).To(ContainSubstring(element)) + }, 30).Should(Succeed()) } assertProbeRespectsReplicaLag := func(namespace, replicaName, probeType string) { @@ -177,12 +178,12 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { }) By("checking that synchronous_standby_names reflects cluster's changes", func() { // Set MaxSyncReplicas to 1 - Eventually(func(g Gomega) error { + Eventually(func(g Gomega) { cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) cluster.Spec.MaxSyncReplicas = 1 - return env.Client.Update(env.Ctx, cluster) + g.Expect(env.Client.Update(env.Ctx, cluster)).To(Succeed()) }, RetryTimeout, 5).Should(Succeed()) // Scale the cluster down to 2 pods @@ -267,12 +268,12 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { }) By("setting MaxStandbyNamesFromCluster to 1 and decreasing to 1 the sync replicas required", func() { - Eventually(func(g Gomega) error { + Eventually(func(g Gomega) { cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) cluster.Spec.PostgresConfiguration.Synchronous.MaxStandbyNamesFromCluster = ptr.To(1) cluster.Spec.PostgresConfiguration.Synchronous.Number = 1 - return env.Client.Update(env.Ctx, cluster) + g.Expect(env.Client.Update(env.Ctx, cluster)).To(Succeed()) }, RetryTimeout, 5).Should(Succeed()) getSyncReplicationCount(namespace, clusterName, "quorum", 1) @@ -280,11 +281,11 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { }) By("switching to MethodFirst (priority-based)", func() { - Eventually(func(g Gomega) error { + Eventually(func(g Gomega) { cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) cluster.Spec.PostgresConfiguration.Synchronous.Method = apiv1.SynchronousReplicaConfigurationMethodFirst - return env.Client.Update(env.Ctx, cluster) + g.Expect(env.Client.Update(env.Ctx, cluster)).To(Succeed()) }, RetryTimeout, 5).Should(Succeed()) getSyncReplicationCount(namespace, clusterName, "sync", 1) @@ -292,13 +293,13 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { }) By("by properly setting standbyNamesPre and standbyNamesPost", func() { - Eventually(func(g Gomega) error { + Eventually(func(g Gomega) { cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) cluster.Spec.PostgresConfiguration.Synchronous.MaxStandbyNamesFromCluster = nil cluster.Spec.PostgresConfiguration.Synchronous.StandbyNamesPre = []string{"preSyncReplica"} cluster.Spec.PostgresConfiguration.Synchronous.StandbyNamesPost = []string{"postSyncReplica"} - return env.Client.Update(env.Ctx, cluster) + g.Expect(env.Client.Update(env.Ctx, cluster)).To(Succeed()) }, RetryTimeout, 5).Should(Succeed()) compareSynchronousStandbyNames(namespace, clusterName, "FIRST 1 (\"preSyncReplica\"") compareSynchronousStandbyNames(namespace, clusterName, "\"postSyncReplica\")") @@ -333,19 +334,19 @@ var _ = Describe("Synchronous Replicas", Label(tests.LabelReplication), func() { By("fencing the second replica and verifying we unset synchronous_standby_names", func() { Expect(fencing.On(env.Ctx, env.Client, fmt.Sprintf("%v-2", clusterName), namespace, clusterName, fencing.UsingAnnotation)).Should(Succeed()) - Eventually(func() string { + Eventually(func(g Gomega) { commandTimeout := time.Second * 10 primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) stdout, _, err := exec.Command( env.Ctx, env.Interface, env.RestClientConfig, *primary, specs.PostgresContainerName, &commandTimeout, "psql", "-U", "postgres", "-tAc", "show synchronous_standby_names", ) - Expect(err).ToNot(HaveOccurred()) - return strings.Trim(stdout, "\n") - }, 160).Should(BeEmpty()) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(strings.Trim(stdout, "\n")).To(BeEmpty()) + }, 160).Should(Succeed()) }) By("unfencing the replicas and verifying we have 2 quorum-based replicas", func() { Expect(fencing.Off(env.Ctx, env.Client, fmt.Sprintf("%v-3", clusterName), diff --git a/tests/e2e/tablespaces_test.go b/tests/e2e/tablespaces_test.go index 650270be57..7a202db7e9 100644 --- a/tests/e2e/tablespaces_test.go +++ b/tests/e2e/tablespaces_test.go @@ -1029,7 +1029,7 @@ func AssertClusterHasPvcsAndDataDirsForTablespaces(cluster *apiv1.Cluster, timeo ) targetContainer := getPostgresContainer(pod) - Expect(targetContainer).NotTo(BeNil()) + g.Expect(targetContainer).NotTo(BeNil()) dbUser := getDatabasUserUID(cluster, targetContainer) g.Expect(stdErr).To(BeEmpty()) diff --git a/tests/e2e/update_user_test.go b/tests/e2e/update_user_test.go index 424e6adf66..aec3dc1b7c 100644 --- a/tests/e2e/update_user_test.go +++ b/tests/e2e/update_user_test.go @@ -97,12 +97,12 @@ var _ = Describe("Update user and superuser password", Label(tests.LabelServiceC By("update superuser password", func() { // Setting EnableSuperuserAccess to true - Eventually(func() error { + Eventually(func(g Gomega) { cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) - Expect(err).NotTo(HaveOccurred()) + g.Expect(err).NotTo(HaveOccurred()) cluster.Spec.EnableSuperuserAccess = ptr.To(true) - return env.Client.Update(env.Ctx, cluster) - }, 60, 5).Should(Not(HaveOccurred())) + g.Expect(env.Client.Update(env.Ctx, cluster)).To(Succeed()) + }, 60, 5).Should(Succeed()) // We should now have a secret var secret corev1.Secret @@ -184,18 +184,18 @@ var _ = Describe("Enable superuser password", Label(tests.LabelServiceConnectivi By("enable superuser access", func() { // Setting EnableSuperuserAccess to true - Eventually(func() error { + Eventually(func(g Gomega) { cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) - Expect(err).NotTo(HaveOccurred()) + g.Expect(err).NotTo(HaveOccurred()) cluster.Spec.EnableSuperuserAccess = ptr.To(true) - return env.Client.Update(env.Ctx, cluster) - }, 60, 5).Should(Not(HaveOccurred())) + g.Expect(env.Client.Update(env.Ctx, cluster)).To(Succeed()) + }, 60, 5).Should(Succeed()) // We should now have a secret Eventually(func(g Gomega) { err = env.Client.Get(env.Ctx, namespacedName, &secret) g.Expect(err).ToNot(HaveOccurred()) - }, 90).WithPolling(time.Second).Should(Succeed()) + }, 90).Should(Succeed()) superUser, superUserPass, err := secrets.GetCredentials( env.Ctx, env.Client, @@ -207,12 +207,12 @@ var _ = Describe("Enable superuser password", Label(tests.LabelServiceConnectivi By("disable superuser access", func() { // Setting EnableSuperuserAccess to false - Eventually(func() error { + Eventually(func(g Gomega) { cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) - Expect(err).NotTo(HaveOccurred()) + g.Expect(err).NotTo(HaveOccurred()) cluster.Spec.EnableSuperuserAccess = ptr.To(false) - return env.Client.Update(env.Ctx, cluster) - }, 60, 5).Should(Not(HaveOccurred())) + g.Expect(env.Client.Update(env.Ctx, cluster)).To(Succeed()) + }, 60, 5).Should(Succeed()) // We expect the secret to eventually be deleted Eventually(func(g Gomega) { diff --git a/tests/e2e/upgrade_test.go b/tests/e2e/upgrade_test.go index aceaaa27de..bebd63687a 100644 --- a/tests/e2e/upgrade_test.go +++ b/tests/e2e/upgrade_test.go @@ -233,20 +233,20 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O "Pod %v should have updated its config", pod.Name) } // Check that a switchover happened - Eventually(func() (bool, error) { + Eventually(func(g Gomega) bool { c, err := clusterutils.Get(env.Ctx, env.Client, upgradeNamespace, clusterName) - Expect(err).ToNot(HaveOccurred()) + g.Expect(err).ToNot(HaveOccurred()) GinkgoWriter.Printf("Current Primary: %s, Current Primary timestamp: %s\n", c.Status.CurrentPrimary, c.Status.CurrentPrimaryTimestamp) if c.Status.CurrentPrimary != oldPrimary { - return true, nil + return true } else if c.Status.CurrentPrimaryTimestamp != oldPrimaryTimestamp { - return true, nil + return true } - return false, nil + return false }, timeout, "1s").Should(BeTrue()) }) From 6f1855dd44d5181fb486d0ceee8ece4f5036bd00 Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Wed, 19 Mar 2025 10:37:43 +0100 Subject: [PATCH 454/836] docs: update PostGIS instructions (#7146) Leverage the new feature for declarative management of extensions in the example to deploy PostGIS. Closes #7144 Signed-off-by: Gabriele Bartolini --- docs/src/postgis.md | 86 ++++++++++++++------------- docs/src/samples/postgis-example.yaml | 30 ++++++---- 2 files changed, 65 insertions(+), 51 deletions(-) diff --git a/docs/src/postgis.md b/docs/src/postgis.md index 9df998c16e..916f8bda1a 100644 --- a/docs/src/postgis.md +++ b/docs/src/postgis.md @@ -46,7 +46,7 @@ do this in two ways: ## Create a new PostgreSQL cluster with PostGIS -Let's suppose you want to create a new PostgreSQL 14 cluster with PostGIS 3.2. +Let's suppose you want to create a new PostgreSQL 17 cluster with PostGIS 3.2. The first step is to ensure you use the right PostGIS container image for the operand, and properly set the `.spec.imageName` option in the `Cluster` @@ -59,7 +59,7 @@ provides some guidance on how the creation of a PostGIS cluster can be done. Please consider that, although convention over configuration applies in CloudNativePG, you should spend time configuring and tuning your system for production. Also the `imageName` in the example below deliberately points - to the latest available image for PostgreSQL 14 - you should use a specific + to the latest available image for PostgreSQL 17 - you should use a specific image name or, preferably, the SHA256 digest for true immutability. ```yaml @@ -68,71 +68,75 @@ kind: Cluster metadata: name: postgis-example spec: - instances: 3 - imageName: ghcr.io/cloudnative-pg/postgis:14 - bootstrap: - initdb: - postInitTemplateSQL: - - CREATE EXTENSION postgis; - - CREATE EXTENSION postgis_topology; - - CREATE EXTENSION fuzzystrmatch; - - CREATE EXTENSION postgis_tiger_geocoder; - + instances: 1 + imageName: ghcr.io/cloudnative-pg/postgis:17 storage: size: 1Gi + postgresql: + parameters: + log_statement: ddl +--- +apiVersion: postgresql.cnpg.io/v1 +kind: Database +metadata: + name: postgis-example-app +spec: + name: app + owner: app + cluster: + name: postgis-example + extensions: + - name: postgis + - name: postgis_topology + - name: fuzzystrmatch + - name: postgis_tiger_geocoder ``` -The example relies on the `postInitTemplateSQL` option which executes a list of -queries against the `template1` database, before the actual creation of the -application database (called `app`). This means that, once you have applied the -manifest and the cluster is up, you will have the above extensions installed in -both the template database and the application database, ready for use. +The example leverages the `Database` resource's declarative extension +management to add the specified extensions to the `app` database. !!! Info - Take some time and look at the available options in `.spec.bootstrap.initdb` - from the [API reference](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-BootstrapInitDB), such as - `postInitApplicationSQL`. + For more details, see the + ["Managing Extensions in a Database" section](declarative_database_management.md#managing-extensions-in-a-database). You can easily verify the available version of PostGIS that is in the container, by connecting to the `app` database (you might obtain different values from the ones in this document): ```console -$ kubectl exec -ti postgis-example-1 -- psql app -Defaulted container "postgres" out of: postgres, bootstrap-controller (init) -psql (17.4 (Debian 17.4-1.pgdg110+1)) +$ kubectl cnpg psql postgis-example -- app +psql (17.4 (Debian 17.4-1.pgdg110+2)) Type "help" for help. app=# SELECT * FROM pg_available_extensions WHERE name ~ '^postgis' ORDER BY 1; name | default_version | installed_version | comment --------------------------+-----------------+-------------------+------------------------------------------------------------ - postgis | 3.2.2 | 3.2.2 | PostGIS geometry and geography spatial types and functions - postgis-3 | 3.2.2 | | PostGIS geometry and geography spatial types and functions - postgis_raster | 3.2.2 | | PostGIS raster types and functions - postgis_raster-3 | 3.2.2 | | PostGIS raster types and functions - postgis_sfcgal | 3.2.2 | | PostGIS SFCGAL functions - postgis_sfcgal-3 | 3.2.2 | | PostGIS SFCGAL functions - postgis_tiger_geocoder | 3.2.2 | 3.2.2 | PostGIS tiger geocoder and reverse geocoder - postgis_tiger_geocoder-3 | 3.2.2 | | PostGIS tiger geocoder and reverse geocoder - postgis_topology | 3.2.2 | 3.2.2 | PostGIS topology spatial types and functions - postgis_topology-3 | 3.2.2 | | PostGIS topology spatial types and functions + postgis | 3.5.2 | 3.5.2 | PostGIS geometry and geography spatial types and functions + postgis-3 | 3.5.2 | | PostGIS geometry and geography spatial types and functions + postgis_raster | 3.5.2 | | PostGIS raster types and functions + postgis_raster-3 | 3.5.2 | | PostGIS raster types and functions + postgis_sfcgal | 3.5.2 | | PostGIS SFCGAL functions + postgis_sfcgal-3 | 3.5.2 | | PostGIS SFCGAL functions + postgis_tiger_geocoder | 3.5.2 | 3.5.2 | PostGIS tiger geocoder and reverse geocoder + postgis_tiger_geocoder-3 | 3.5.2 | | PostGIS tiger geocoder and reverse geocoder + postgis_topology | 3.5.2 | 3.5.2 | PostGIS topology spatial types and functions + postgis_topology-3 | 3.5.2 | | PostGIS topology spatial types and functions (10 rows) ``` -The next step is to verify that the extensions listed in the -`postInitTemplateSQL` section have been correctly installed in the `app` -database. +The next step is to verify that the extensions listed in the `Database` +resource have been correctly installed in the `app` database. ```console app=# \dx List of installed extensions Name | Version | Schema | Description ------------------------+---------+------------+------------------------------------------------------------ - fuzzystrmatch | 1.1 | public | determine similarities and distance between strings + fuzzystrmatch | 1.2 | public | determine similarities and distance between strings plpgsql | 1.0 | pg_catalog | PL/pgSQL procedural language - postgis | 3.2.2 | public | PostGIS geometry and geography spatial types and functions - postgis_tiger_geocoder | 3.2.2 | tiger | PostGIS tiger geocoder and reverse geocoder - postgis_topology | 3.2.2 | topology | PostGIS topology spatial types and functions + postgis | 3.5.2 | public | PostGIS geometry and geography spatial types and functions + postgis_tiger_geocoder | 3.5.2 | tiger | PostGIS tiger geocoder and reverse geocoder + postgis_topology | 3.5.2 | topology | PostGIS topology spatial types and functions (5 rows) ``` @@ -142,6 +146,6 @@ Finally: app=# SELECT postgis_full_version(); postgis_full_version ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - POSTGIS="3.2.2 628da50" [EXTENSION] PGSQL="140" GEOS="3.9.0-CAPI-1.16.2" PROJ="7.2.1" LIBXML="2.9.10" LIBJSON="0.15" LIBPROTOBUF="1.3.3" WAGYU="0.5.0 (Internal)" TOPOLOGY + POSTGIS="3.5.2 dea6d0a" [EXTENSION] PGSQL="170" GEOS="3.9.0-CAPI-1.16.2" PROJ="7.2.1 NETWORK_ENABLED=OFF URL_ENDPOINT=https://cdn.proj.org USER_WRITABLE_DIRECTORY=/tmp/proj DATABASE_PATH=/usr/share/proj/proj.db" (compiled against PROJ 7.2.1) LIBXML="2.9.10" LIBJSON="0.15" LIBPROTOBUF="1.3.3" WAGYU="0.5.0 (Internal)" TOPOLOGY (1 row) ``` diff --git a/docs/src/samples/postgis-example.yaml b/docs/src/samples/postgis-example.yaml index 6907c83e9c..34a3ff44b2 100644 --- a/docs/src/samples/postgis-example.yaml +++ b/docs/src/samples/postgis-example.yaml @@ -3,15 +3,25 @@ kind: Cluster metadata: name: postgis-example spec: - instances: 3 - imageName: ghcr.io/cloudnative-pg/postgis:14 - bootstrap: - initdb: - postInitTemplateSQL: - - CREATE EXTENSION postgis; - - CREATE EXTENSION postgis_topology; - - CREATE EXTENSION fuzzystrmatch; - - CREATE EXTENSION postgis_tiger_geocoder; - + instances: 1 + imageName: ghcr.io/cloudnative-pg/postgis:17 storage: size: 1Gi + postgresql: + parameters: + log_statement: ddl +--- +apiVersion: postgresql.cnpg.io/v1 +kind: Database +metadata: + name: postgis-example-app +spec: + name: app + owner: app + cluster: + name: postgis-example + extensions: + - name: postgis + - name: postgis_topology + - name: fuzzystrmatch + - name: postgis_tiger_geocoder From 40c44b00ae6d8b841764609e91c02101cb580965 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Thu, 20 Mar 2025 10:54:34 +0100 Subject: [PATCH 455/836] feat(cmd,plugin): make `hibernate` command declarative (#7155) ## Release Notes The `hibernate on/off` commands in the `cnpg` plugin for `kubectl` now serve as shortcuts for the declarative hibernation procedure. The previously available imperative implementation has been removed in favour of the declarative approach. Additionally, the `hibernate status` command has been deprecated in favour of the standard `status` command, which now provides a clearer interface for hibernated clusters. **Users should not upgrade to version 1.26 of both the plugin and the operator unless they are prepared to migrate to the declarative method.** Closes #6870 Signed-off-by: Armando Ruocco Signed-off-by: Gabriele Bartolini Signed-off-by: Leonardo Cecchi Co-authored-by: Gabriele Bartolini Co-authored-by: Leonardo Cecchi --- docs/src/declarative_hibernation.md | 8 - docs/src/kubectl-plugin.md | 60 ++- docs/src/release_notes/v1.26.md | 9 +- internal/cmd/plugin/hibernate/cmd.go | 95 +++-- internal/cmd/plugin/hibernate/cmd_test.go | 97 +++++ internal/cmd/plugin/hibernate/off.go | 175 --------- internal/cmd/plugin/hibernate/on.go | 375 ------------------ internal/cmd/plugin/hibernate/output.go | 210 ---------- internal/cmd/plugin/hibernate/resources.go | 63 --- internal/cmd/plugin/hibernate/status.go | 123 ------ internal/cmd/plugin/hibernate/suite_test.go | 13 + internal/cmd/plugin/status/status.go | 116 +++--- tests/e2e/hibernation_test.go | 404 -------------------- tests/e2e/tablespaces_test.go | 20 - 14 files changed, 255 insertions(+), 1513 deletions(-) create mode 100644 internal/cmd/plugin/hibernate/cmd_test.go delete mode 100644 internal/cmd/plugin/hibernate/off.go delete mode 100644 internal/cmd/plugin/hibernate/on.go delete mode 100644 internal/cmd/plugin/hibernate/output.go delete mode 100644 internal/cmd/plugin/hibernate/resources.go delete mode 100644 internal/cmd/plugin/hibernate/status.go create mode 100644 internal/cmd/plugin/hibernate/suite_test.go delete mode 100644 tests/e2e/hibernation_test.go diff --git a/docs/src/declarative_hibernation.md b/docs/src/declarative_hibernation.md index 36eda0bf35..a5cbeae3ca 100644 --- a/docs/src/declarative_hibernation.md +++ b/docs/src/declarative_hibernation.md @@ -12,14 +12,6 @@ process is running. The declarative hibernation feature enables saving CPU power by removing the database Pods, while keeping the database PVCs. -!!! Note - Declarative hibernation is different from the existing implementation - of [imperative hibernation via the `cnpg` plugin](kubectl-plugin.md#cluster-hibernation). - Imperative hibernation shuts down all Postgres instances in the High - Availability cluster, and keeps a static copy of the PVCs of the primary that - contain `PGDATA` and WALs. The plugin enables to exit the hibernation phase, by - resuming the primary and then recreating all the replicas - if they exist. - ## Hibernation To hibernate a cluster, set the `cnpg.io/hibernation=on` annotation: diff --git a/docs/src/kubectl-plugin.md b/docs/src/kubectl-plugin.md index 4048b9b749..7663c1b358 100644 --- a/docs/src/kubectl-plugin.md +++ b/docs/src/kubectl-plugin.md @@ -895,62 +895,48 @@ PVCs: kubectl cnpg destroy cluster-example 2 ``` -### Cluster hibernation +### Cluster Hibernation -Sometimes you may want to suspend the execution of a CloudNativePG `Cluster` -while retaining its data, then resume its activity at a later time. We've -called this feature **cluster hibernation**. +There are times when you may need to temporarily suspend a CloudNativePG +`Cluster` while preserving its data, allowing you to resume operations later. +This feature is known as **cluster hibernation**. -Hibernation is only available via the `kubectl cnpg hibernate [on|off]` -commands. +Hibernation is managed declaratively using the `cnpg.io/hibernation` +annotation. -Hibernating a CloudNativePG cluster means destroying all the resources -generated by the cluster, except the PVCs that belong to the PostgreSQL primary -instance. +!!! Info + For more details, see the ["Declarative Hibernation"](declarative_hibernation.md) + documentation page. + +To simplify the process, the `cnpg` plugin for `kubectl` provides a `hibernate` +command, which acts as a convenient shortcut for applying the annotation. -You can hibernate a cluster with: +To hibernate a cluster, run: ```sh kubectl cnpg hibernate on CLUSTER ``` -This will: - -1. shutdown every PostgreSQL instance -2. detach the PVCs containing the data of the primary instance, and annotate - them with the latest database status and the latest cluster configuration -3. delete the `Cluster` resource, including every generated resource - except - the aforementioned PVCs - -When hibernated, a CloudNativePG cluster is represented by just a group of -PVCs, in which the one containing the `PGDATA` is annotated with the latest -available status, including content from `pg_controldata`. - -!!! Warning - A cluster having fenced instances cannot be hibernated, as fencing is - part of the hibernation procedure too. - -In case of error the operator will not be able to revert the procedure. You can -still force the operation with: - -```sh -kubectl cnpg hibernate on CLUSTER --force -``` +This command applies the `cnpg.io/hibernation=on` annotation to the cluster, +suspending its execution. -A hibernated cluster can be resumed with: +To resume a hibernated cluster, use: ```sh kubectl cnpg hibernate off CLUSTER ``` -Once the cluster has been hibernated, it's possible to show the last -configuration and the status that PostgreSQL had after it was shut down. -That can be done with: +This will remove the hibernation state by setting `cnpg.io/hibernation=off`. + +You can check the cluster’s status at any time with: ```sh -kubectl cnpg hibernate status CLUSTER +kubectl cnpg status CLUSTER ``` +This will display the current state of the cluster, including whether it is +hibernated. + ### Benchmarking the database with pgbench Pgbench can be run against an existing PostgreSQL cluster with following diff --git a/docs/src/release_notes/v1.26.md b/docs/src/release_notes/v1.26.md index ca52d5e202..17f9f6b6af 100644 --- a/docs/src/release_notes/v1.26.md +++ b/docs/src/release_notes/v1.26.md @@ -12,8 +12,13 @@ on the release branch in GitHub. ### Important changes: -- OPTIONAL -- OPTIONAL +- The `hibernate on/off` commands in the `cnpg` plugin for `kubectl` now serve + as shortcuts for the declarative hibernation procedure. The previously + available imperative implementation has been removed in favor of the + declarative approach. Additionally, the `hibernate status` command has been + removed in favor of the standard `status` command. + **Do not upgrade to version 1.26 of both the plugin and the operator unless + you are prepared to migrate to the declarative method.** ### Features: diff --git a/internal/cmd/plugin/hibernate/cmd.go b/internal/cmd/plugin/hibernate/cmd.go index 134c5412ff..5a2c0c9645 100644 --- a/internal/cmd/plugin/hibernate/cmd.go +++ b/internal/cmd/plugin/hibernate/cmd.go @@ -17,11 +17,15 @@ limitations under the License. package hibernate import ( + "context" "fmt" "github.com/spf13/cobra" + "sigs.k8s.io/controller-runtime/pkg/client" + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) var ( @@ -34,17 +38,10 @@ var ( }, RunE: func(cmd *cobra.Command, args []string) error { clusterName := args[0] - force, err := cmd.Flags().GetBool("force") - if err != nil { - return err - } - - hibernateOn, err := newOnCommand(cmd.Context(), clusterName, force) - if err != nil { - return err - } - - return hibernateOn.execute() + return annotateCluster(cmd.Context(), plugin.Client, client.ObjectKey{ + Name: clusterName, + Namespace: plugin.Namespace, + }, utils.HibernationAnnotationValueOn) }, } @@ -57,34 +54,10 @@ var ( }, RunE: func(cmd *cobra.Command, args []string) error { clusterName := args[0] - off := newOffCommand(cmd.Context(), clusterName) - return off.execute() - }, - } - - hibernateStatusCmd = &cobra.Command{ - Use: "status CLUSTER", - Short: "Prints the hibernation status for the CLUSTER", - Args: plugin.RequiresArguments(1), - ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp - }, - RunE: func(cmd *cobra.Command, args []string) error { - clusterName := args[0] - rawOutput, err := cmd.Flags().GetString("output") - if err != nil { - return err - } - - outputFormat := plugin.OutputFormat(rawOutput) - switch outputFormat { - case plugin.OutputFormatJSON, plugin.OutputFormatYAML: - return newStatusCommandStructuredOutput(cmd.Context(), clusterName, outputFormat).execute() - case plugin.OutputFormatText: - return newStatusCommandTextOutput(cmd.Context(), clusterName).execute() - default: - return fmt.Errorf("output: %s is not supported by the hibernate CLI", rawOutput) - } + return annotateCluster(cmd.Context(), plugin.Client, client.ObjectKey{ + Name: clusterName, + Namespace: plugin.Namespace, + }, utils.HibernationAnnotationValueOff) }, } ) @@ -99,19 +72,37 @@ func NewCmd() *cobra.Command { cmd.AddCommand(hibernateOnCmd) cmd.AddCommand(hibernateOffCmd) - cmd.AddCommand(hibernateStatusCmd) - - hibernateOnCmd.Flags().Bool( - "force", - false, - "Force the hibernation procedure even if the preconditions are not met") - hibernateStatusCmd.Flags(). - StringP( - "output", - "o", - "text", - "Output format. One of text, json, or yaml", - ) return cmd } + +func annotateCluster( + ctx context.Context, + cli client.Client, + clusterKey client.ObjectKey, + value utils.HibernationAnnotationValue, +) error { + var cluster apiv1.Cluster + + if err := cli.Get(ctx, clusterKey, &cluster); err != nil { + return fmt.Errorf("failed to get cluster %s: %w", clusterKey.Name, err) + } + + if cluster.Annotations == nil { + cluster.SetAnnotations(make(map[string]string)) + } + + origCluster := cluster.DeepCopy() + + cluster.Annotations[utils.HibernationAnnotationName] = string(value) + + if cluster.Annotations[utils.HibernationAnnotationName] == origCluster.Annotations[utils.HibernationAnnotationName] { + return fmt.Errorf("cluster %s is already in the requested state", clusterKey.Name) + } + + if err := cli.Patch(ctx, &cluster, client.MergeFrom(origCluster)); err != nil { + return fmt.Errorf("failed to patch cluster %s: %w", clusterKey.Name, err) + } + + return nil +} diff --git a/internal/cmd/plugin/hibernate/cmd_test.go b/internal/cmd/plugin/hibernate/cmd_test.go new file mode 100644 index 0000000000..89f82bd8e8 --- /dev/null +++ b/internal/cmd/plugin/hibernate/cmd_test.go @@ -0,0 +1,97 @@ +package hibernate + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8client "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + schemeBuilder "github.com/cloudnative-pg/cloudnative-pg/internal/scheme" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("annotateCluster", func() { + var ( + cluster *apiv1.Cluster + cli k8client.Client + clusterKey k8client.ObjectKey + ) + + BeforeEach(func() { + cluster = &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-namespace", + }, + } + cli = fake.NewClientBuilder().WithScheme(schemeBuilder.BuildWithAllKnownScheme()).WithObjects(cluster).Build() + clusterKey = k8client.ObjectKeyFromObject(cluster) + }) + + It("annotates the cluster with hibernation on", func(ctx SpecContext) { + err := annotateCluster(ctx, cli, clusterKey, utils.HibernationAnnotationValueOn) + Expect(err).ToNot(HaveOccurred()) + + updatedCluster := &apiv1.Cluster{} + err = cli.Get(ctx, clusterKey, updatedCluster) + Expect(err).ToNot(HaveOccurred()) + Expect(updatedCluster.Annotations[utils.HibernationAnnotationName]). + To(Equal(string(utils.HibernationAnnotationValueOn))) + }) + + It("annotates the cluster with hibernation off", func(ctx SpecContext) { + err := annotateCluster(ctx, cli, clusterKey, utils.HibernationAnnotationValueOff) + Expect(err).ToNot(HaveOccurred()) + + updatedCluster := &apiv1.Cluster{} + err = cli.Get(ctx, clusterKey, updatedCluster) + Expect(err).ToNot(HaveOccurred()) + Expect(updatedCluster.Annotations[utils.HibernationAnnotationName]). + To(Equal(string(utils.HibernationAnnotationValueOff))) + }) + + It("returns an error if the cluster is already in the requested state", func(ctx SpecContext) { + err := annotateCluster(ctx, cli, clusterKey, utils.HibernationAnnotationValueOn) + Expect(err).ToNot(HaveOccurred()) + + err = annotateCluster(ctx, cli, clusterKey, utils.HibernationAnnotationValueOn) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(fmt.Sprintf("cluster %s is already in the requested state", clusterKey.Name))) + }) + + It("returns an error if the cluster cannot be retrieved", func(ctx SpecContext) { + nonExistingClusterKey := k8client.ObjectKey{ + Name: "non-existing-cluster", + Namespace: "test-namespace", + } + + err := annotateCluster(ctx, cli, nonExistingClusterKey, utils.HibernationAnnotationValueOn) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring(fmt.Sprintf("failed to get cluster %s", nonExistingClusterKey.Name))) + }) + + It("toggles hibernation from on to off", func(ctx SpecContext) { + err := annotateCluster(ctx, cli, clusterKey, utils.HibernationAnnotationValueOn) + Expect(err).ToNot(HaveOccurred()) + + updatedCluster := &apiv1.Cluster{} + err = cli.Get(ctx, clusterKey, updatedCluster) + Expect(err).ToNot(HaveOccurred()) + Expect(updatedCluster.Annotations[utils.HibernationAnnotationName]). + To(Equal(string(utils.HibernationAnnotationValueOn))) + + err = annotateCluster(ctx, cli, clusterKey, utils.HibernationAnnotationValueOff) + Expect(err).ToNot(HaveOccurred()) + + updatedCluster = &apiv1.Cluster{} + err = cli.Get(ctx, clusterKey, updatedCluster) + Expect(err).ToNot(HaveOccurred()) + Expect(updatedCluster.Annotations[utils.HibernationAnnotationName]). + To(Equal(string(utils.HibernationAnnotationValueOff))) + }) +}) diff --git a/internal/cmd/plugin/hibernate/off.go b/internal/cmd/plugin/hibernate/off.go deleted file mode 100644 index 2a6e9a23c7..0000000000 --- a/internal/cmd/plugin/hibernate/off.go +++ /dev/null @@ -1,175 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package hibernate - -import ( - "context" - "fmt" - - "github.com/cloudnative-pg/machinery/pkg/log" - corev1 "k8s.io/api/core/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/utils/strings/slices" - - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" -) - -// offCommand represent the `hibernate off` command -type offCommand struct { - ctx context.Context - clusterName string -} - -// newOffCommand creates a new `hibernate off` command -func newOffCommand(ctx context.Context, clusterName string) *offCommand { - contextLogger := log.FromContext(ctx).WithValues( - "clusterName", clusterName) - - return &offCommand{ - ctx: log.IntoContext(ctx, contextLogger), - clusterName: clusterName, - } -} - -// execute executes the `hibernate off` command -func (off *offCommand) execute() error { - off.printAdvancement("cluster reactivation starting") - - // Ensuring the cluster doesn't exist - if err := off.ensureClusterDoesNotExistStep(); err != nil { - return err - } - - // Get the list of PVC from which we need to resume this cluster - pvcGroup, err := getHibernatedPVCGroup(off.ctx, off.clusterName) - if err != nil { - return err - } - - // Ensure the list of PVCs we have is correct - if err := off.ensurePVCsArePartOfAPVCGroupStep(pvcGroup); err != nil { - return err - } - - // We recreate the cluster resource from the first PVC of the group, - // and don't care of which PVC we select because we annotate - // each PVC of a group with the same data. - pvc := pvcGroup[0] - - // We get the original cluster resource from the annotation - clusterFromPVC, err := getClusterFromPVCAnnotation(pvc) - if err != nil { - return err - } - - // And recreate it into the Kubernetes cluster - if err := off.createClusterWithoutRuntimeDataStep(clusterFromPVC); err != nil { - return err - } - - off.printAdvancement("cluster reactivation completed") - - return nil -} - -// ensureClusterDoesNotExistStep checks if this cluster exist or not, ensuring -// that it is not present -func (off *offCommand) ensureClusterDoesNotExistStep() error { - var cluster apiv1.Cluster - err := plugin.Client.Get( - off.ctx, - types.NamespacedName{Name: off.clusterName, Namespace: plugin.Namespace}, - &cluster, - ) - if err == nil { - return fmt.Errorf("cluster already exist, cannot proceed with reactivation") - } - if !apierrs.IsNotFound(err) { - return err - } - return nil -} - -// ensurePVCsArePartOfAPVCGroupStep check if the passed PVCs are really part of the same group -func (off *offCommand) ensurePVCsArePartOfAPVCGroupStep(pvcs []corev1.PersistentVolumeClaim) error { - // ensure all the pvcs belong to the same node serial and are hibernated - var nodeSerial []string - for _, pvc := range pvcs { - // IMPORTANT: do not use utils.ClusterManifestAnnotationName, utils.PgControlDataAnnotationName here for backwards - // compatibility - if err := ensureAnnotationsExists( - pvc, - utils.HibernateClusterManifestAnnotationName, - utils.HibernatePgControlDataAnnotationName, - utils.ClusterSerialAnnotationName, - ); err != nil { - return err - } - - serial := pvc.Annotations[utils.ClusterSerialAnnotationName] - if !slices.Contains(nodeSerial, serial) { - nodeSerial = append(nodeSerial, serial) - } - } - if len(nodeSerial) != 1 { - return fmt.Errorf("hibernate pvcs belong to different instances of the cluster, cannot proceed") - } - - return nil -} - -// createClusterWithoutRuntimeDataStep recreate the original cluster back into Kubernetes -func (off *offCommand) createClusterWithoutRuntimeDataStep(clusterFromPVC apiv1.Cluster) error { - cluster := clusterFromPVC.DeepCopy() - // remove any runtime kubernetes metadata - cluster.ObjectMeta.ResourceVersion = "" - cluster.ObjectMeta.ManagedFields = nil - cluster.ObjectMeta.UID = "" - cluster.ObjectMeta.Generation = 0 - cluster.ObjectMeta.CreationTimestamp = metav1.Time{} - // remove cluster status - cluster.Status = apiv1.ClusterStatus{} - - // remove any runtime kubernetes annotations - delete(cluster.Annotations, corev1.LastAppliedConfigAnnotation) - - // remove the cluster fencing - delete(cluster.Annotations, utils.FencedInstanceAnnotation) - - // create cluster - return plugin.Client.Create(off.ctx, cluster) -} - -// ensureAnnotationsExists returns an error if the passed PVC is annotated with all the -// passed annotations names -func ensureAnnotationsExists(volume corev1.PersistentVolumeClaim, annotationNames ...string) error { - for _, annotationName := range annotationNames { - if _, ok := volume.Annotations[annotationName]; !ok { - return fmt.Errorf("missing %s annotation, from pvcs: %s", annotationName, volume.Name) - } - } - - return nil -} - -func (off *offCommand) printAdvancement(msg string) { - fmt.Println(msg) -} diff --git a/internal/cmd/plugin/hibernate/on.go b/internal/cmd/plugin/hibernate/on.go deleted file mode 100644 index f9b685b5c5..0000000000 --- a/internal/cmd/plugin/hibernate/on.go +++ /dev/null @@ -1,375 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package hibernate - -import ( - "context" - "encoding/json" - "fmt" - "strconv" - "time" - - "github.com/cloudnative-pg/machinery/pkg/log" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/util/retry" - "sigs.k8s.io/controller-runtime/pkg/client" - - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" - "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/destroy" - pluginresources "github.com/cloudnative-pg/cloudnative-pg/internal/plugin/resources" - "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/persistentvolumeclaim" - "github.com/cloudnative-pg/cloudnative-pg/pkg/resources" - "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" -) - -var hibernationBackoff = wait.Backoff{ - Steps: 4, - Duration: 10 * time.Second, - Factor: 5.0, - Jitter: 0.1, -} - -// onCommand represent the `hibernate on` subcommand -type onCommand struct { - ctx context.Context - cluster *apiv1.Cluster - primaryInstanceSerial int - force bool - shouldRollback bool - - managedInstances []corev1.Pod - primaryInstance corev1.Pod - pvcs []corev1.PersistentVolumeClaim -} - -// newOnCommand creates a new `hibernate on` command -func newOnCommand(ctx context.Context, clusterName string, force bool) (*onCommand, error) { - var cluster apiv1.Cluster - - // Get the Cluster object - err := plugin.Client.Get( - ctx, - client.ObjectKey{Namespace: plugin.Namespace, Name: clusterName}, - &cluster) - if err != nil { - return nil, fmt.Errorf("could not get cluster: %v", err) - } - - // Get the instances to be hibernated - managedInstances, primaryInstance, err := pluginresources.GetInstancePods(ctx, clusterName) - if err != nil { - return nil, fmt.Errorf("could not get cluster pods: %w", err) - } - if primaryInstance.Name == "" { - return nil, fmt.Errorf("no primary instance found, cannot proceed with the hibernation") - } - - // Get the PVCs that will be hibernated - pvcs, err := persistentvolumeclaim.GetInstancePVCs(ctx, plugin.Client, primaryInstance.Name, plugin.Namespace) - if err != nil { - return nil, fmt.Errorf("cannot get PVCs: %w", err) - } - - // Get the serial ID of the primary instance - primaryInstanceSerial, err := specs.GetNodeSerial(primaryInstance.ObjectMeta) - if err != nil { - return nil, fmt.Errorf("could not get the primary node: %w", err) - } - - contextLogger := log.FromContext(ctx).WithValues( - "clusterName", clusterName, - "primaryInstance", primaryInstance.Name) - - return &onCommand{ - ctx: log.IntoContext(ctx, contextLogger), - cluster: &cluster, - primaryInstanceSerial: primaryInstanceSerial, - managedInstances: managedInstances, - primaryInstance: primaryInstance, - pvcs: pvcs, - force: force, - shouldRollback: false, - }, nil -} - -// execute executes the `hibernate on` command -func (on *onCommand) execute() error { - // Check the hibernation preconditions - if err := on.checkPreconditionsStep(); err != nil { - return err - } - - on.printAdvancement("hibernation process starting...") - - if err := on.fenceClusterStep(); err != nil { - on.shouldRollback = true - return err - } - defer on.rollbackFenceClusterIfNeeded() - - on.printAdvancement("waiting for the cluster to be fenced") - - if err := on.waitInstancesToBeFencedStep(); err != nil { - on.shouldRollback = true - return err - } - - on.printAdvancement("cluster is now fenced, storing primary pg_controldata output") - - if err := on.annotatePVCStep(); err != nil { - on.shouldRollback = true - return err - } - defer on.rollBackAnnotationsIfNeeded() - - on.printAdvancement("PVC annotation complete") - - if err := on.deleteResourcesStep(); err != nil { - on.shouldRollback = true - return err - } - - on.printAdvancement("Hibernation completed") - return nil -} - -// checkPreconditionsStep checks if the preconditions for the execution of this step are -// met or not. If they are not met, it will return an error -func (on *onCommand) checkPreconditionsStep() error { - contextLogger := log.FromContext(on.ctx) - - // We should refuse to hibernate a cluster that was fenced already - fencedInstances, err := utils.GetFencedInstances(on.cluster.Annotations) - if err != nil { - return fmt.Errorf("could not check if cluster is fenced: %v", err) - } - - if fencedInstances.Len() > 0 { - if on.force { - contextLogger.Warning("Continuing hibernation procedure even if there are fenced instances") - } else { - return fmt.Errorf("cannot hibernate a cluster that has fenced instances") - } - } - - return nil -} - -func (on *onCommand) fenceClusterStep() error { - contextLogger := log.FromContext(on.ctx) - - contextLogger.Debug("applying the fencing annotation to the cluster manifest") - if err := utils.NewFencingMetadataExecutor(plugin.Client). - AddFencing(). - ForAllInstances(). - Execute( - on.ctx, - types.NamespacedName{Name: on.cluster.Name, Namespace: plugin.Namespace}, - &apiv1.Cluster{}, - ); err != nil { - return err - } - contextLogger.Debug("fencing annotation set on the cluster manifest") - - return nil -} - -// rollbackFenceClusterIfNeeded removes the fencing status from the -// cluster -func (on *onCommand) rollbackFenceClusterIfNeeded() { - if !on.shouldRollback { - return - } - - contextLogger := log.FromContext(on.ctx) - - fmt.Println("rolling back hibernation: removing the fencing annotation") - if err := utils.NewFencingMetadataExecutor(plugin.Client). - RemoveFencing(). - ForAllInstances(). - Execute(on.ctx, - types.NamespacedName{Name: on.cluster.Name, Namespace: plugin.Namespace}, &apiv1.Cluster{}); err != nil { - contextLogger.Error(err, "Rolling back from hibernation failed") - } -} - -// waitInstancesToBeFenced waits for all instances to be shut down -func (on *onCommand) waitInstancesToBeFencedStep() error { - isRetryable := func(err error) bool { - return !apierrors.IsForbidden(err) && !apierrors.IsUnauthorized(err) - } - for _, instance := range on.managedInstances { - if err := retry.OnError(hibernationBackoff, isRetryable, func() error { - running, err := pluginresources.IsInstanceRunning(on.ctx, instance) - if err != nil { - return fmt.Errorf("error checking instance status (%v): %w", instance.Name, err) - } - if running { - return fmt.Errorf("instance still running (%v)", instance.Name) - } - return nil - }); err != nil { - return err - } - } - - return nil -} - -// annotatePVCStep stores the pg_controldata output -// into an annotation of the primary PVC -func (on *onCommand) annotatePVCStep() error { - controlData, err := plugin.GetPGControlData(on.ctx, on.primaryInstance) - if err != nil { - return fmt.Errorf("could not get primary control data: %w", err) - } - on.printAdvancement("primary pg_controldata output fetched") - - on.printAdvancement("annotating the PVC with the cluster manifest") - if err := annotatePVCs(on.ctx, on.pvcs, on.cluster, controlData); err != nil { - return fmt.Errorf("could not annotate PVCs: %w", err) - } - - return nil -} - -func (on *onCommand) rollBackAnnotationsIfNeeded() { - if !on.shouldRollback { - return - } - - fmt.Println("rolling back hibernation: removing pvc annotations") - err := removePVCannotations(on.ctx, on.pvcs) - if err != nil { - fmt.Printf("could not remove PVC annotations: %v", err) - } -} - -func (on *onCommand) deleteResourcesStep() error { - on.printAdvancement("destroying the primary instance while preserving the pvc") - - // from this point there is no going back - if err := destroy.Destroy( - on.ctx, - on.cluster.Name, - fmt.Sprintf("%s-%s", on.cluster.Name, strconv.Itoa(on.primaryInstanceSerial)), - true, - ); err != nil { - return fmt.Errorf("error destroying primary instance: %w", err) - } - on.printAdvancement("primary instance destroy completed") - - on.printAdvancement("deleting the cluster resource") - if err := plugin.Client.Delete(on.ctx, on.cluster); err != nil { - return fmt.Errorf("error while deleting cluster resource: %w", err) - } - on.printAdvancement("cluster resource deletion complete") - - return nil -} - -func (on *onCommand) printAdvancement(msg string) { - fmt.Println(msg) -} - -func annotatePVCs( - ctx context.Context, - pvcs []corev1.PersistentVolumeClaim, - cluster *apiv1.Cluster, - pgControlData string, -) error { - for _, pvc := range pvcs { - if err := retry.OnError(retry.DefaultBackoff, resources.RetryAlways, func() error { - var currentPVC corev1.PersistentVolumeClaim - if err := plugin.Client.Get( - ctx, - types.NamespacedName{Name: pvc.Name, Namespace: pvc.Namespace}, - ¤tPVC, - ); err != nil { - return err - } - - if currentPVC.Annotations == nil { - currentPVC.Annotations = map[string]string{} - } - origPVC := currentPVC.DeepCopy() - - // IMPORTANT: do not use utils.ClusterManifestAnnotationName, utils.PgControlDataAnnotationName here for backwards - // compatibility - _, hasHibernateAnnotation := currentPVC.Annotations[utils.HibernateClusterManifestAnnotationName] - _, hasPgControlDataAnnotation := currentPVC.Annotations[utils.HibernatePgControlDataAnnotationName] - if hasHibernateAnnotation || hasPgControlDataAnnotation { - return fmt.Errorf("the PVC already contains Hibernation annotations. Erroring out") - } - - bytes, err := json.Marshal(&cluster) - if err != nil { - return err - } - - currentPVC.Annotations[utils.HibernateClusterManifestAnnotationName] = string(bytes) - currentPVC.Annotations[utils.HibernatePgControlDataAnnotationName] = pgControlData - currentPVC.Annotations[utils.ClusterManifestAnnotationName] = string(bytes) - currentPVC.Annotations[utils.PgControldataAnnotationName] = pgControlData - - return plugin.Client.Patch(ctx, ¤tPVC, client.MergeFrom(origPVC)) - }); err != nil { - return err - } - } - - return nil -} - -func removePVCannotations( - ctx context.Context, - pvcs []corev1.PersistentVolumeClaim, -) error { - for _, pvc := range pvcs { - if err := retry.OnError(retry.DefaultBackoff, resources.RetryAlways, func() error { - var currentPVC corev1.PersistentVolumeClaim - if err := plugin.Client.Get( - ctx, - types.NamespacedName{Name: pvc.Name, Namespace: pvc.Namespace}, - ¤tPVC, - ); err != nil { - return err - } - - if currentPVC.Annotations == nil { - return nil - } - origPVC := currentPVC.DeepCopy() - - delete(currentPVC.Annotations, utils.HibernateClusterManifestAnnotationName) - delete(currentPVC.Annotations, utils.HibernatePgControlDataAnnotationName) - delete(currentPVC.Annotations, utils.ClusterManifestAnnotationName) - delete(currentPVC.Annotations, utils.PgControldataAnnotationName) - - return plugin.Client.Patch(ctx, ¤tPVC, client.MergeFrom(origPVC)) - }); err != nil { - return err - } - } - - return nil -} diff --git a/internal/cmd/plugin/hibernate/output.go b/internal/cmd/plugin/hibernate/output.go deleted file mode 100644 index d337f5e0ee..0000000000 --- a/internal/cmd/plugin/hibernate/output.go +++ /dev/null @@ -1,210 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package hibernate - -import ( - "bytes" - "context" - "fmt" - "os" - "strings" - "text/tabwriter" - - "github.com/cheynewallace/tabby" - "github.com/cloudnative-pg/machinery/pkg/log" - "github.com/logrusorgru/aurora/v4" - corev1 "k8s.io/api/core/v1" - "sigs.k8s.io/yaml" - - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" -) - -const ( - // statusClusterManifestNotFound is an error message reported when no cluster manifest is not found - statusClusterManifestNotFound = "Cluster manifest not found" -) - -// statusOutputManager is a type capable of executing a status output request -type statusOutputManager interface { - addHibernationSummaryInformation(level statusLevel, statusMessage, clusterName string) - addClusterManifestInformation(cluster *apiv1.Cluster) - addPVCGroupInformation(pvcs []corev1.PersistentVolumeClaim) - // execute renders the output - execute() error -} - -type textStatusOutputManager struct { - textPrinter *tabby.Tabby - stdoutBuffer *bytes.Buffer -} - -func newTextStatusOutputManager() *textStatusOutputManager { - buffer := &bytes.Buffer{} - return &textStatusOutputManager{ - textPrinter: tabby.NewCustom(tabwriter.NewWriter(buffer, 0, 0, 4, ' ', 0)), - stdoutBuffer: buffer, - } -} - -func (t *textStatusOutputManager) getColor(level statusLevel) aurora.Color { - switch level { - case warningLevel: - return aurora.YellowFg - case errorLevel: - return aurora.RedFg - default: - return aurora.GreenFg - } -} - -func (t *textStatusOutputManager) addHibernationSummaryInformation( - level statusLevel, - statusMessage string, - clusterName string, -) { - t.textPrinter.AddHeader(aurora.Colorize("Hibernation Summary", t.getColor(level))) - t.textPrinter.AddLine("Hibernation status", statusMessage) - t.textPrinter.AddLine("Cluster Name", clusterName) - t.textPrinter.AddLine("Cluster Namespace", plugin.Namespace) - t.textPrinter.AddLine() -} - -func (t *textStatusOutputManager) addClusterManifestInformation( - cluster *apiv1.Cluster, -) { - if cluster == nil { - t.textPrinter.AddHeader(aurora.Red("Cluster Spec Information")) - t.textPrinter.AddLine(aurora.Red(statusClusterManifestNotFound)) - return - } - - t.textPrinter.AddHeader(aurora.Green("Cluster Spec Information")) - bytesArray, err := yaml.Marshal(cluster.Spec) - if err != nil { - const message = "Could not serialize the cluster manifest" - t.textPrinter.AddLine(aurora.Red(message)) - return - } - - t.textPrinter.AddLine(string(bytesArray)) - t.textPrinter.AddLine() -} - -func (t *textStatusOutputManager) addPVCGroupInformation( - pvcs []corev1.PersistentVolumeClaim, -) { - if len(pvcs) == 0 { - return - } - - // there is no need to iterate the pvc group, it is either all valid or none - value, ok := pvcs[0].Annotations[utils.HibernatePgControlDataAnnotationName] - if !ok { - return - } - - t.textPrinter.AddHeader(aurora.Green("PostgreSQL instance control information")) - t.textPrinter.AddLine(value) -} - -func (t *textStatusOutputManager) execute() error { - // do not remove this is to flush the writer cache into the buffer - t.textPrinter.Print() - fmt.Print(t.stdoutBuffer) - return nil -} - -type structuredStatusOutputManager struct { - mapToSerialize map[string]interface{} - format plugin.OutputFormat - ctx context.Context -} - -func newStructuredOutputManager(ctx context.Context, format plugin.OutputFormat) *structuredStatusOutputManager { - return &structuredStatusOutputManager{ - mapToSerialize: map[string]interface{}{}, - format: format, - ctx: ctx, - } -} - -func (t *structuredStatusOutputManager) addHibernationSummaryInformation( - level statusLevel, - statusMessage string, - clusterName string, -) { - t.mapToSerialize["summary"] = map[string]string{ - "status": statusMessage, - "clusterName": clusterName, - "namespace": plugin.Namespace, - "level": string(level), - } -} - -func (t *structuredStatusOutputManager) addClusterManifestInformation( - cluster *apiv1.Cluster, -) { - tmpMap := map[string]interface{}{} - defer func() { - t.mapToSerialize["cluster"] = tmpMap - }() - - if cluster == nil { - tmpMap["error"] = statusClusterManifestNotFound - return - } - - tmpMap["spec"] = cluster.Spec -} - -func (t *structuredStatusOutputManager) addPVCGroupInformation( - pvcs []corev1.PersistentVolumeClaim, -) { - contextLogger := log.FromContext(t.ctx) - - // there is no need to iterate the pvc group, it is either all valid or none - value, ok := pvcs[0].Annotations[utils.HibernatePgControlDataAnnotationName] - if !ok { - return - } - - tmp := map[string]string{} - rows := strings.Split(value, "\n") - for _, row := range rows { - // skip empty rows - if strings.Trim(row, " ") == "" { - continue - } - - res := strings.SplitN(row, ":", 2) - if len(res) != 2 { - // bad row parsing, we skip it - contextLogger.Warning("skipping row because it was malformed", "row", row) - tmp["error"] = "one or more rows could not be parsed" - continue - } - tmp[res[0]] = strings.Trim(res[1], " ") - } - - t.mapToSerialize["pgControlData"] = tmp -} - -func (t *structuredStatusOutputManager) execute() error { - return plugin.Print(t.mapToSerialize, t.format, os.Stdout) -} diff --git a/internal/cmd/plugin/hibernate/resources.go b/internal/cmd/plugin/hibernate/resources.go deleted file mode 100644 index f8deae24aa..0000000000 --- a/internal/cmd/plugin/hibernate/resources.go +++ /dev/null @@ -1,63 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package hibernate - -import ( - "context" - "encoding/json" - "fmt" - - corev1 "k8s.io/api/core/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" -) - -// errNoHibernatedPVCsFound indicates that no PVCs were found. -var errNoHibernatedPVCsFound = fmt.Errorf("no hibernated PVCs to reactivate found") - -// getHibernatedPVCGroupStep gets the PVC group resulting from the hibernation process -func getHibernatedPVCGroup(ctx context.Context, clusterName string) ([]corev1.PersistentVolumeClaim, error) { - // Get the list of PVCs belonging to this group - var pvcList corev1.PersistentVolumeClaimList - if err := plugin.Client.List( - ctx, - &pvcList, - client.MatchingLabels{utils.ClusterLabelName: clusterName}, - client.InNamespace(plugin.Namespace), - ); err != nil { - return nil, err - } - if len(pvcList.Items) == 0 { - return nil, errNoHibernatedPVCsFound - } - - return pvcList.Items, nil -} - -// getClusterFromPVCAnnotation reads the original cluster resource from the chosen PVC -func getClusterFromPVCAnnotation(pvc corev1.PersistentVolumeClaim) (apiv1.Cluster, error) { - var clusterFromPVC apiv1.Cluster - // get the cluster manifest - clusterJSON := pvc.Annotations[utils.HibernateClusterManifestAnnotationName] - if err := json.Unmarshal([]byte(clusterJSON), &clusterFromPVC); err != nil { - return apiv1.Cluster{}, err - } - return clusterFromPVC, nil -} diff --git a/internal/cmd/plugin/hibernate/status.go b/internal/cmd/plugin/hibernate/status.go deleted file mode 100644 index f425d85959..0000000000 --- a/internal/cmd/plugin/hibernate/status.go +++ /dev/null @@ -1,123 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package hibernate - -import ( - "context" - "errors" - "fmt" - - corev1 "k8s.io/api/core/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" - "sigs.k8s.io/controller-runtime/pkg/client" - - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin" -) - -// statusLevel describes if the output should communicate an ok,warning or error status -type statusLevel string - -const ( - okLevel statusLevel = "ok" - warningLevel statusLevel = "warning" - errorLevel statusLevel = "error" -) - -type statusCommand struct { - outputManager statusOutputManager - ctx context.Context - clusterName string -} - -func newStatusCommandStructuredOutput( - ctx context.Context, - clusterName string, - format plugin.OutputFormat, -) *statusCommand { - return &statusCommand{ - outputManager: newStructuredOutputManager(ctx, format), - ctx: ctx, - clusterName: clusterName, - } -} - -func newStatusCommandTextOutput(ctx context.Context, clusterName string) *statusCommand { - return &statusCommand{ - outputManager: newTextStatusOutputManager(), - ctx: ctx, - clusterName: clusterName, - } -} - -func (cmd *statusCommand) execute() error { - isDeployed, err := cmd.isClusterDeployed() - if err != nil { - return err - } - if isDeployed { - return cmd.clusterIsAlreadyRunningOutput() - } - - pvcs, err := getHibernatedPVCGroup(cmd.ctx, cmd.clusterName) - if errors.Is(err, errNoHibernatedPVCsFound) { - return cmd.noHibernatedPVCsOutput() - } - if err != nil { - return err - } - - return cmd.clusterHibernatedOutput(pvcs) -} - -func (cmd *statusCommand) clusterHibernatedOutput(pvcs []corev1.PersistentVolumeClaim) error { - clusterFromPVC, err := getClusterFromPVCAnnotation(pvcs[0]) - if err != nil { - return err - } - - cmd.outputManager.addHibernationSummaryInformation(okLevel, "Cluster Hibernated", cmd.clusterName) - cmd.outputManager.addClusterManifestInformation(&clusterFromPVC) - cmd.outputManager.addPVCGroupInformation(pvcs) - - return cmd.outputManager.execute() -} - -func (cmd *statusCommand) clusterIsAlreadyRunningOutput() error { - cmd.outputManager.addHibernationSummaryInformation(warningLevel, "No Hibernation. Cluster Deployed.", cmd.clusterName) - return cmd.outputManager.execute() -} - -func (cmd *statusCommand) noHibernatedPVCsOutput() error { - cmd.outputManager.addHibernationSummaryInformation(errorLevel, "No hibernated PVCs found", cmd.clusterName) - return cmd.outputManager.execute() -} - -func (cmd *statusCommand) isClusterDeployed() (bool, error) { - var cluster apiv1.Cluster - - // Get the Cluster object - err := plugin.Client.Get(cmd.ctx, client.ObjectKey{Namespace: plugin.Namespace, Name: cmd.clusterName}, &cluster) - if apierrs.IsNotFound(err) { - return false, nil - } - if err != nil { - return false, fmt.Errorf("error while fetching the cluster resource: %w", err) - } - - return true, nil -} diff --git a/internal/cmd/plugin/hibernate/suite_test.go b/internal/cmd/plugin/hibernate/suite_test.go new file mode 100644 index 0000000000..b87a96a22c --- /dev/null +++ b/internal/cmd/plugin/hibernate/suite_test.go @@ -0,0 +1,13 @@ +package hibernate_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestHibernate(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Hibernate Suite") +} diff --git a/internal/cmd/plugin/status/status.go b/internal/cmd/plugin/status/status.go index 4d07622063..890362c84a 100644 --- a/internal/cmd/plugin/status/status.go +++ b/internal/cmd/plugin/status/status.go @@ -120,6 +120,8 @@ func Status( } status := extractPostgresqlStatus(ctx, cluster) + hibernated, _ := isHibernated(status) + err = plugin.Print(status, format, os.Stdout) if err != nil || format != plugin.OutputFormatText { return err @@ -134,16 +136,18 @@ func Status( errs = append(errs, status.printPostgresConfiguration(ctx, clientInterface)...) status.printCertificatesStatus() } - status.printBackupStatus() - status.printBasebackupStatus(verbosity) - status.printReplicaStatus(verbosity) - if verbosity > 0 { - status.printUnmanagedReplicationSlotStatus() - status.printRoleManagerStatus() - status.printTablespacesStatus() - status.printPodDisruptionBudgetStatus() + if !hibernated { + status.printBackupStatus() + status.printBasebackupStatus(verbosity) + status.printReplicaStatus(verbosity) + if verbosity > 0 { + status.printUnmanagedReplicationSlotStatus() + status.printRoleManagerStatus() + status.printTablespacesStatus() + status.printPodDisruptionBudgetStatus() + } + status.printInstancesStatus() } - status.printInstancesStatus() status.printPluginStatus(verbosity) if len(errs) > 0 { @@ -234,17 +238,10 @@ func (fullStatus *PostgresqlStatus) printBasicInfo(ctx context.Context, k8sClien cluster := fullStatus.Cluster - if cluster.IsReplica() { - fmt.Println(aurora.Yellow("Replica Cluster Summary")) - } else { - fmt.Println(aurora.Green("Cluster Summary")) - } - primaryInstance := cluster.Status.CurrentPrimary - if cluster.Status.CurrentPrimary != cluster.Status.TargetPrimary { - primaryInstance = fmt.Sprintf("%v (switching to %v)", - cluster.Status.CurrentPrimary, cluster.Status.TargetPrimary) - } + + // Determine if the cluster is hibernated + hibernated, _ := isHibernated(fullStatus) fencedInstances, err := utils.GetFencedInstances(cluster.Annotations) if err != nil { @@ -253,6 +250,11 @@ func (fullStatus *PostgresqlStatus) printBasicInfo(ctx context.Context, k8sClien isPrimaryFenced := cluster.IsInstanceFenced(cluster.Status.CurrentPrimary) primaryInstanceStatus := fullStatus.tryGetPrimaryInstance() + if cluster.Status.CurrentPrimary != cluster.Status.TargetPrimary { + primaryInstance = fmt.Sprintf("%v (switching to %v)", + cluster.Status.CurrentPrimary, cluster.Status.TargetPrimary) + } + summary.AddLine("Name", client.ObjectKeyFromObject(cluster).String()) if primaryInstanceStatus != nil { @@ -266,12 +268,20 @@ func (fullStatus *PostgresqlStatus) printBasicInfo(ctx context.Context, k8sClien summary.AddLine("Primary instance:", primaryInstance) } - primaryStartTime := getPrimaryStartTime(cluster) - if len(primaryStartTime) > 0 { - summary.AddLine("Primary start time:", primaryStartTime) + switch { + case hibernated: + summary.AddLine("Status:", aurora.Red("Hibernated")) + case isPrimaryFenced: + summary.AddLine("Status:", aurora.Red("Primary instance is fenced")) + default: + // Avoid printing the start time when hibernated or fenced + primaryStartTime := getPrimaryStartTime(cluster) + if len(primaryStartTime) > 0 { + summary.AddLine("Primary start time:", primaryStartTime) + } + summary.AddLine("Status:", fullStatus.getStatus(cluster)) } - summary.AddLine("Status:", fullStatus.getStatus(isPrimaryFenced, cluster)) if cluster.Spec.Instances == cluster.Status.Instances { summary.AddLine("Instances:", aurora.Green(cluster.Spec.Instances)) } else { @@ -291,16 +301,15 @@ func (fullStatus *PostgresqlStatus) printBasicInfo(ctx context.Context, k8sClien } } - if cluster.Status.CurrentPrimary != cluster.Status.TargetPrimary { - if cluster.Status.CurrentPrimary == "" { - fmt.Println(aurora.Red("Primary server is initializing")) - } else { - fmt.Println(aurora.Red("Switchover in progress")) - } - } - if clusterSizeErr != nil { - summary.AddLine("Size:", aurora.Red(clusterSizeErr.Error())) + switch { + case hibernated: + summary.AddLine("Size:", "- (hibernated)") + case isPrimaryFenced: + summary.AddLine("Size:", "- (fenced)") + default: + summary.AddLine("Size:", aurora.Red(clusterSizeErr.Error())) + } } else { summary.AddLine("Size:", clusterSize) } @@ -315,23 +324,32 @@ func (fullStatus *PostgresqlStatus) printBasicInfo(ctx context.Context, k8sClien summary.AddLine("Current Write LSN:", lsnInfo) } + if cluster.IsReplica() { + fmt.Println(aurora.Yellow("Replica Cluster Summary")) + } else { + fmt.Println(aurora.Green("Cluster Summary")) + } + + if cluster.Status.CurrentPrimary != cluster.Status.TargetPrimary { + if cluster.Status.CurrentPrimary == "" { + fmt.Println(aurora.Red("Primary server is initializing")) + } else { + fmt.Println(aurora.Red("Switchover in progress")) + } + } + summary.Print() fmt.Println() } func (fullStatus *PostgresqlStatus) printHibernationInfo() { - cluster := fullStatus.Cluster - - hibernationCondition := meta.FindStatusCondition( - cluster.Status.Conditions, - hibernation.HibernationConditionType, - ) + hibernated, hibernationCondition := isHibernated(fullStatus) if hibernationCondition == nil { return } hibernationStatus := tabby.New() - if hibernationCondition.Status == metav1.ConditionTrue { + if hibernated { hibernationStatus.AddLine("Status", "Hibernated") } else { hibernationStatus.AddLine("Status", "Active") @@ -345,6 +363,20 @@ func (fullStatus *PostgresqlStatus) printHibernationInfo() { fmt.Println() } +func isHibernated(fullStatus *PostgresqlStatus) (bool, *metav1.Condition) { + cluster := fullStatus.Cluster + hibernationCondition := meta.FindStatusCondition( + cluster.Status.Conditions, + hibernation.HibernationConditionType, + ) + + if hibernationCondition == nil || hibernationCondition.Status != metav1.ConditionTrue { + return false, hibernationCondition + } + + return true, hibernationCondition +} + func (fullStatus *PostgresqlStatus) printTokenStatus(token string) { primaryInstanceStatus := fullStatus.tryGetPrimaryInstance() @@ -428,11 +460,7 @@ func (fullStatus *PostgresqlStatus) printPromotionTokenInfo() { fmt.Println() } -func (fullStatus *PostgresqlStatus) getStatus(isPrimaryFenced bool, cluster *apiv1.Cluster) string { - if isPrimaryFenced { - return fmt.Sprintf("%v", aurora.Red("Primary instance is fenced")) - } - +func (fullStatus *PostgresqlStatus) getStatus(cluster *apiv1.Cluster) string { switch cluster.Status.Phase { case apiv1.PhaseHealthy, apiv1.PhaseFirstPrimary, apiv1.PhaseCreatingReplica: return fmt.Sprintf("%v %v", aurora.Green(cluster.Status.Phase), cluster.Status.PhaseReason) diff --git a/tests/e2e/hibernation_test.go b/tests/e2e/hibernation_test.go deleted file mode 100644 index 0b4df3876a..0000000000 --- a/tests/e2e/hibernation_test.go +++ /dev/null @@ -1,404 +0,0 @@ -/* -Copyright The CloudNativePG Contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package e2e - -import ( - "encoding/json" - "fmt" - "strings" - - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/types" - ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" - - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/persistentvolumeclaim" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" - "github.com/cloudnative-pg/cloudnative-pg/tests" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils/storage" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" - "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -var _ = Describe("Cluster Hibernation with plugin", Label(tests.LabelPlugin), func() { - type mode string - type hibernateSatusMessage string - type expectedKeysInStatus string - const ( - sampleFileClusterWithPGWalVolume = fixturesDir + "/base/cluster-storage-class.yaml.template" - sampleFileClusterWithOutPGWalVolume = fixturesDir + "/hibernate/" + - "cluster-storage-class-without-wal.yaml.template" - level = tests.Medium - HibernateOn mode = "on" - HibernateOff mode = "off" - HibernateStatus mode = "status" - clusterOffStatusMessage hibernateSatusMessage = "No Hibernation. Cluster Deployed." - clusterOnStatusMessage hibernateSatusMessage = "Cluster Hibernated" - summaryInStatus expectedKeysInStatus = "summary" - tableName = "test" - ) - BeforeEach(func() { - if testLevelEnv.Depth < int(level) { - Skip("Test depth is lower than the amount requested for this test") - } - }) - - Context("hibernate", func() { - var namespace string - var err error - getPrimaryAndClusterManifest := func(namespace, clusterName string) ([]byte, string) { - var beforeHibernationClusterInfo *apiv1.Cluster - var clusterManifest []byte - var beforeHibernationCurrentPrimary string - By("collecting current primary details", func() { - beforeHibernationClusterInfo, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) - Expect(err).ToNot(HaveOccurred()) - beforeHibernationCurrentPrimary = beforeHibernationClusterInfo.Status.CurrentPrimary - // collect expected cluster manifesto info - clusterManifest, err = json.Marshal(&beforeHibernationClusterInfo) - Expect(err).ToNot(HaveOccurred()) - }) - return clusterManifest, beforeHibernationCurrentPrimary - } - getPvc := func(role persistentvolumeclaim.Meta, instanceName string) corev1.PersistentVolumeClaim { - pvcName := role.GetName(instanceName) - pvcInfo := corev1.PersistentVolumeClaim{} - err = objects.Get(env.Ctx, env.Client, ctrlclient.ObjectKey{Namespace: namespace, Name: pvcName}, &pvcInfo) - Expect(err).ToNot(HaveOccurred()) - return pvcInfo - } - performHibernation := func(mode mode, namespace, clusterName string) { - By(fmt.Sprintf("performing hibernation %v", mode), func() { - _, _, err := run.Run(fmt.Sprintf("kubectl cnpg hibernate %v %v -n %v", - mode, clusterName, namespace)) - Expect(err).ToNot(HaveOccurred()) - }) - By(fmt.Sprintf("verifying cluster %v pods are removed", clusterName), func() { - Eventually(func(g Gomega) { - podList, _ := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) - g.Expect(len(podList.Items)).Should(BeEquivalentTo(0)) - }, 300).Should(Succeed()) - }) - } - - getHibernationStatusInJSON := func(namespace, clusterName string) map[string]interface{} { - var data map[string]interface{} - By("getting hibernation status", func() { - stdOut, _, err := run.Run(fmt.Sprintf("kubectl cnpg hibernate %v %v -n %v -ojson", - HibernateStatus, clusterName, namespace)) - Expect(err).ToNot(HaveOccurred(), stdOut) - err = json.Unmarshal([]byte(stdOut), &data) - Expect(err).ToNot(HaveOccurred()) - }) - return data - } - - verifySummaryInHibernationStatus := func(clusterName string, message hibernateSatusMessage) { - statusOut := getHibernationStatusInJSON(namespace, clusterName) - actualStatus := statusOut[string(summaryInStatus)].(map[string]interface{})["status"].(string) - Expect(strings.Contains(string(message), actualStatus)).Should(BeEquivalentTo(true), - actualStatus+"\\not-contained-in\\"+string(message)) - } - verifyClusterResources := func( - namespace, clusterName string, objs []persistentvolumeclaim.ExpectedObjectCalculator, - ) { - By(fmt.Sprintf("verifying cluster resources are removed "+ - "post hibernation where roles %v", objs), func() { - timeout := 120 - - By(fmt.Sprintf("verifying cluster %v is removed", clusterName), func() { - Eventually(func() (bool, apiv1.Cluster) { - cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) - if err != nil { - return true, apiv1.Cluster{} - } - return false, *cluster - }, timeout).Should(BeTrue()) - }) - - By(fmt.Sprintf("verifying cluster %v PVCs are removed", clusterName), func() { - Eventually(func() (int, error) { - pvcList, err := storage.GetPVCList(env.Ctx, env.Client, namespace) - if err != nil { - return -1, err - } - return len(pvcList.Items), nil - }, timeout).Should(BeEquivalentTo(len(objs))) - }) - - By(fmt.Sprintf("verifying cluster %v configMap is removed", clusterName), func() { - Eventually(func() (bool, corev1.ConfigMap) { - configMap := corev1.ConfigMap{} - err = env.Client.Get(env.Ctx, - ctrlclient.ObjectKey{Namespace: namespace, Name: apiv1.DefaultMonitoringConfigMapName}, - &configMap) - if err != nil { - return true, corev1.ConfigMap{} - } - return false, configMap - }, timeout).Should(BeTrue()) - }) - - By(fmt.Sprintf("verifying cluster %v secrets are removed", clusterName), func() { - Eventually(func() (bool, corev1.SecretList, error) { - secretList := corev1.SecretList{} - err = env.Client.List(env.Ctx, &secretList, ctrlclient.InNamespace(namespace)) - if err != nil { - return false, corev1.SecretList{}, err - } - var getClusterSecrets []string - for _, secret := range secretList.Items { - if strings.HasPrefix(secret.GetName(), clusterName) { - getClusterSecrets = append(getClusterSecrets, secret.GetName()) - } - } - if len(getClusterSecrets) == 0 { - return true, corev1.SecretList{}, nil - } - return false, secretList, nil - }, timeout).Should(BeTrue()) - }) - - By(fmt.Sprintf("verifying cluster %v role is removed", clusterName), func() { - Eventually(func() (bool, v1.Role) { - role := v1.Role{} - err = env.Client.Get(env.Ctx, - ctrlclient.ObjectKey{Namespace: namespace, Name: clusterName}, - &role) - if err != nil { - return true, v1.Role{} - } - return false, role - }, timeout).Should(BeTrue()) - }) - - By(fmt.Sprintf("verifying cluster %v rolebinding is removed", clusterName), func() { - Eventually(func() (bool, v1.RoleBinding) { - roleBinding := v1.RoleBinding{} - err = env.Client.Get(env.Ctx, - ctrlclient.ObjectKey{Namespace: namespace, Name: clusterName}, - &roleBinding) - if err != nil { - return true, v1.RoleBinding{} - } - return false, roleBinding - }, timeout).Should(BeTrue()) - }) - }) - } - verifyPvc := func( - expectedObject persistentvolumeclaim.ExpectedObjectCalculator, pvcUid types.UID, - clusterManifest []byte, instanceName string, - ) { - pvcInfo := getPvc(expectedObject, instanceName) - Expect(pvcUid).Should(BeEquivalentTo(pvcInfo.GetUID())) - // pvc should be attached annotation with pgControlData and Cluster manifesto - expectedAnnotationKeyPresent := []string{ - utils.HibernatePgControlDataAnnotationName, - utils.HibernateClusterManifestAnnotationName, - utils.PgControldataAnnotationName, - utils.ClusterManifestAnnotationName, - } - storage.ObjectHasAnnotations(&pvcInfo, expectedAnnotationKeyPresent) - expectedAnnotation := map[string]string{ - utils.HibernateClusterManifestAnnotationName: string(clusterManifest), - utils.ClusterManifestAnnotationName: string(clusterManifest), - } - storage.ObjectMatchesAnnotations(&pvcInfo, expectedAnnotation) - } - - assertHibernation := func(namespace, clusterName, tableName string) { - var beforeHibernationPgWalPvcUID types.UID - var beforeHibernationPgDataPvcUID types.UID - - // Write a table and some data on the "app" database - tableLocator := TableLocator{ - Namespace: namespace, - ClusterName: clusterName, - DatabaseName: postgres.AppDBName, - TableName: tableName, - } - AssertCreateTestData(env, tableLocator) - clusterManifest, currentPrimary := getPrimaryAndClusterManifest(namespace, clusterName) - - By("collecting pgWal pvc details of current primary", func() { - pvcInfo := getPvc(persistentvolumeclaim.NewPgWalCalculator(), currentPrimary) - beforeHibernationPgWalPvcUID = pvcInfo.GetUID() - }) - - By("collecting pgData pvc details of current primary", func() { - pvcInfo := getPvc(persistentvolumeclaim.NewPgDataCalculator(), currentPrimary) - beforeHibernationPgDataPvcUID = pvcInfo.GetUID() - }) - - By(fmt.Sprintf("verifying hibernation status"+ - " before hibernate on cluster %v", clusterName), func() { - verifySummaryInHibernationStatus(clusterName, clusterOffStatusMessage) - }) - - performHibernation(HibernateOn, namespace, clusterName) - - By(fmt.Sprintf("verifying hibernation status"+ - " after hibernate on cluster %v", clusterName), func() { - verifySummaryInHibernationStatus(clusterName, clusterOnStatusMessage) - }) - - // After hibernation, it will destroy all the resources generated by the cluster, - // except the PVCs that belong to the PostgreSQL primary instance. - verifyClusterResources( - namespace, - clusterName, - []persistentvolumeclaim.ExpectedObjectCalculator{ - persistentvolumeclaim.NewPgWalCalculator(), - persistentvolumeclaim.NewPgDataCalculator(), - }, - ) - - By("verifying primary pgWal pvc info", func() { - verifyPvc( - persistentvolumeclaim.NewPgWalCalculator(), - beforeHibernationPgWalPvcUID, - clusterManifest, - currentPrimary, - ) - }) - - By("verifying primary pgData pvc info", func() { - verifyPvc( - persistentvolumeclaim.NewPgDataCalculator(), - beforeHibernationPgDataPvcUID, - clusterManifest, - currentPrimary, - ) - }) - - // verifying hibernation off - performHibernation(HibernateOff, namespace, clusterName) - - By(fmt.Sprintf("verifying hibernation status after "+ - "perform hibernation off on cluster %v", clusterName), func() { - verifySummaryInHibernationStatus(clusterName, clusterOffStatusMessage) - }) - - AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env) - // Test data should be present after hibernation off - AssertDataExpectedCount(env, tableLocator, 2) - } - - When("cluster setup with PG-WAL volume", func() { - It("hibernation process should work", func() { - const namespacePrefix = "hibernation-on-with-pg-wal" - clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFileClusterWithPGWalVolume) - Expect(err).ToNot(HaveOccurred()) - // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) - Expect(err).ToNot(HaveOccurred()) - AssertCreateCluster(namespace, clusterName, sampleFileClusterWithPGWalVolume, env) - assertHibernation(namespace, clusterName, tableName) - }) - }) - When("cluster setup without PG-WAL volume", func() { - It("hibernation process should work", func() { - var beforeHibernationPgDataPvcUID types.UID - - const namespacePrefix = "hibernation-without-pg-wal" - clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFileClusterWithOutPGWalVolume) - Expect(err).ToNot(HaveOccurred()) - // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) - Expect(err).ToNot(HaveOccurred()) - AssertCreateCluster(namespace, clusterName, sampleFileClusterWithOutPGWalVolume, env) - // Write a table and some data on the "app" database - tableLocator := TableLocator{ - Namespace: namespace, - ClusterName: clusterName, - DatabaseName: postgres.AppDBName, - TableName: tableName, - } - AssertCreateTestData(env, tableLocator) - clusterManifest, currentPrimary := getPrimaryAndClusterManifest(namespace, - clusterName) - - By("collecting pgData pvc details of current primary", func() { - pvcInfo := getPvc(persistentvolumeclaim.NewPgDataCalculator(), currentPrimary) - beforeHibernationPgDataPvcUID = pvcInfo.GetUID() - }) - - By(fmt.Sprintf("verifying hibernation status"+ - " before hibernate on cluster %v", clusterName), func() { - verifySummaryInHibernationStatus(clusterName, clusterOffStatusMessage) - }) - - performHibernation(HibernateOn, namespace, clusterName) - - By(fmt.Sprintf("verifying hibernation status"+ - " after hibernate on cluster %v", clusterName), func() { - verifySummaryInHibernationStatus(clusterName, clusterOnStatusMessage) - }) - - // After hibernation, it will destroy all the resources generated by the cluster, - // except the PVCs that belong to the PostgreSQL primary instance. - verifyClusterResources( - namespace, - clusterName, - []persistentvolumeclaim.ExpectedObjectCalculator{persistentvolumeclaim.NewPgDataCalculator()}, - ) - - By("verifying primary pgData pvc info", func() { - verifyPvc( - persistentvolumeclaim.NewPgDataCalculator(), - beforeHibernationPgDataPvcUID, - clusterManifest, - currentPrimary, - ) - }) - - // verifying hibernation off - performHibernation(HibernateOff, namespace, clusterName) - By(fmt.Sprintf("verifying hibernation status"+ - " before hibernate on cluster %v", clusterName), func() { - verifySummaryInHibernationStatus(clusterName, clusterOffStatusMessage) - }) - - AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReady], env) - // Test data should be present after hibernation off - AssertDataExpectedCount(env, tableLocator, 2) - }) - }) - When("cluster hibernation after switchover", func() { - It("hibernation process should work", func() { - const namespacePrefix = "hibernation-with-switchover" - clusterName, err := yaml.GetResourceNameFromYAML(env.Scheme, sampleFileClusterWithPGWalVolume) - Expect(err).ToNot(HaveOccurred()) - // Create a cluster in a namespace we'll delete after the test - namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) - Expect(err).ToNot(HaveOccurred()) - AssertCreateCluster(namespace, clusterName, sampleFileClusterWithPGWalVolume, env) - AssertSwitchover(namespace, clusterName, env) - assertHibernation(namespace, clusterName, tableName) - }) - }) - }) -}) diff --git a/tests/e2e/tablespaces_test.go b/tests/e2e/tablespaces_test.go index 7a202db7e9..f3b21772c2 100644 --- a/tests/e2e/tablespaces_test.go +++ b/tests/e2e/tablespaces_test.go @@ -697,10 +697,6 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, }) }) - It("can hibernate via plugin a cluster with tablespaces", func() { - assertCanHibernateClusterWithTablespaces(namespace, clusterName, hibernateImperatively, 2) - }) - It("can hibernate via annotation a cluster with tablespaces", func() { assertCanHibernateClusterWithTablespaces(namespace, clusterName, hibernateDeclaratively, 6) }) @@ -1279,8 +1275,6 @@ type hibernationMethod string const ( // hibernateDeclaratively it is a keyword to use while fencing on/off the instances using annotation method hibernateDeclaratively hibernationMethod = "annotation" - // hibernateImperatively it is a keyword to use while fencing on/off the instances using plugin method - hibernateImperatively hibernationMethod = "plugin" ) func hibernateOn( @@ -1291,13 +1285,6 @@ func hibernateOn( method hibernationMethod, ) error { switch method { - case hibernateImperatively: - _, _, err := run.Run(fmt.Sprintf("kubectl cnpg hibernate on %v -n %v", - clusterName, namespace)) - if err != nil { - return err - } - return nil case hibernateDeclaratively: cluster, err := clusterutils.Get(ctx, crudClient, namespace, clusterName) if err != nil { @@ -1324,13 +1311,6 @@ func hibernateOff( method hibernationMethod, ) error { switch method { - case hibernateImperatively: - _, _, err := run.Run(fmt.Sprintf("kubectl cnpg hibernate off %v -n %v", - clusterName, namespace)) - if err != nil { - return err - } - return nil case hibernateDeclaratively: cluster, err := clusterutils.Get(ctx, crudClient, namespace, clusterName) if err != nil { From 817fd64d69788ae38f98658fdfe87d9ea95b11cc Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Thu, 20 Mar 2025 11:40:55 +0100 Subject: [PATCH 456/836] chore: update go version to 1.24 in devcontainer configuration (#7164) Signed-off-by: Marco Nenciarini --- .devcontainer/devcontainer.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index b90b872945..e66ed8dbf3 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -1,5 +1,5 @@ { - "image": "mcr.microsoft.com/devcontainers/go:1-bookworm", + "image": "mcr.microsoft.com/devcontainers/go:1.24-bookworm", "features": { "ghcr.io/devcontainers/features/docker-in-docker:2": {}, "ghcr.io/mpriscella/features/kind:1": {}, From b8f94121a9fd613b5dc6f6c5c4930890f1260130 Mon Sep 17 00:00:00 2001 From: Pierrick <139142330+pchovelon@users.noreply.github.com> Date: Thu, 20 Mar 2025 12:16:23 +0100 Subject: [PATCH 457/836] fix(docs): EOL dates (#7161) Fixed EOL dates on the supported releases page. Signed-off-by: Pierrick Chovelon Signed-off-by: Gabriele Bartolini Co-authored-by: Gabriele Bartolini --- docs/src/supported_releases.md | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/docs/src/supported_releases.md b/docs/src/supported_releases.md index 446ed26f99..f3d032cc29 100644 --- a/docs/src/supported_releases.md +++ b/docs/src/supported_releases.md @@ -82,7 +82,7 @@ Git tags for versions are prefixed with `v`. | Version | Currently supported | Release date | End of life | Supported Kubernetes versions | Tested, but not supported | Supported Postgres versions | |-----------------|----------------------|--------------|-----------------|-------------------------------|---------------------------|-----------------------------| -| 1.25.x | Yes | Dec 23, 2024 | ~ May/Jun, 2025 | 1.29, 1.30, 1.31, 1.32 | 1.27, 1.28 | 13 - 17 | +| 1.25.x | Yes | Dec 23, 2024 | ~ Jun/July 2025 | 1.29, 1.30, 1.31, 1.32 | 1.27, 1.28 | 13 - 17 | | 1.24.x | Yes | Aug 22, 2024 | Mar 23, 2025 | 1.28, 1.29, 1.30, 1.31 | 1.27 | 13 - 17 | | main | No, development only | | | | | 13 - 17 | @@ -119,11 +119,12 @@ version of PostgreSQL, we might not be able to help you. ## Upcoming releases -| Version | Release date | End of life | -|---------|--------------|---------------| -| 1.26.0 | Mar, 2025 | Aug/Sep, 2025 | -| 1.27.0 | Jun, 2025 | Dec, 2025 | -| 1.28.0 | Sep, 2025 | Mar/Apr, 2025 | +| Version | Release date | End of life | +|---------|--------------|-------------| +| 1.26.0 | ~ Mar, 2025 | ~ Dec, 2025 | +| 1.27.0 | ~ Jun, 2025 | ~ Mar, 2026 | +| 1.28.0 | ~ Sep, 2025 | ~ Jun, 2026 | +| 1.29.0 | ~ Dec, 2025 | ~ Sep, 2026 | !!! Note Feature freeze occurs 1-2 weeks before the release, at which point a From fd05adbb294a5149e6e91a9d05616a10e229cafe Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 20 Mar 2025 12:38:08 +0100 Subject: [PATCH 458/836] chore(deps): bump golang.org/x/net from 0.35.0 to 0.36.0 in the go_modules group across 1 directory (#7136) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 12edc1f47e..3089204aa1 100644 --- a/go.mod +++ b/go.mod @@ -99,7 +99,7 @@ require ( github.com/x448/float16 v0.8.4 // indirect github.com/xlab/treeprint v1.2.0 // indirect golang.org/x/crypto v0.35.0 // indirect - golang.org/x/net v0.35.0 // indirect + golang.org/x/net v0.36.0 // indirect golang.org/x/oauth2 v0.27.0 // indirect golang.org/x/sync v0.11.0 // indirect golang.org/x/sys v0.31.0 // indirect diff --git a/go.sum b/go.sum index bb64e43baf..b502924267 100644 --- a/go.sum +++ b/go.sum @@ -226,8 +226,8 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= -golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= +golang.org/x/net v0.36.0 h1:vWF2fRbw4qslQsQzgFqZff+BItCvGFQqKzKIzx1rmoA= +golang.org/x/net v0.36.0/go.mod h1:bFmbeoIPfrw4sMHNhb4J9f6+tPziuGjq7Jk/38fxi1I= golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= From 1fdf6d3fe7e23297d77f1e7ae5ad50b58759c0cc Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 20 Mar 2025 15:13:42 +0100 Subject: [PATCH 459/836] chore(deps): update kindest/node docker tag to v1.32.3 (main) (#7168) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Update | Change | |---|---|---| | kindest/node | patch | `v1.32.2` -> `v1.32.3` | --- ### Configuration 📅 **Schedule**: Branch creation - At any time (no schedule defined), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Never, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/cloudnative-pg/cloudnative-pg). Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- hack/e2e/run-e2e-kind.sh | 2 +- hack/setup-cluster.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hack/e2e/run-e2e-kind.sh b/hack/e2e/run-e2e-kind.sh index b33fb60550..28a2a96f94 100755 --- a/hack/e2e/run-e2e-kind.sh +++ b/hack/e2e/run-e2e-kind.sh @@ -29,7 +29,7 @@ E2E_DIR="${HACK_DIR}/e2e" export PRESERVE_CLUSTER=${PRESERVE_CLUSTER:-false} export BUILD_IMAGE=${BUILD_IMAGE:-false} -KIND_NODE_DEFAULT_VERSION=v1.32.2 +KIND_NODE_DEFAULT_VERSION=v1.32.3 export K8S_VERSION=${K8S_VERSION:-$KIND_NODE_DEFAULT_VERSION} export CLUSTER_ENGINE=kind export CLUSTER_NAME=pg-operator-e2e-${K8S_VERSION//./-} diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh index 709e79dda5..a2f28f7ac3 100755 --- a/hack/setup-cluster.sh +++ b/hack/setup-cluster.sh @@ -24,7 +24,7 @@ if [ "${DEBUG-}" = true ]; then fi # Defaults -KIND_NODE_DEFAULT_VERSION=v1.32.2 +KIND_NODE_DEFAULT_VERSION=v1.32.3 CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=v1.15.0 EXTERNAL_SNAPSHOTTER_VERSION=v8.2.0 EXTERNAL_PROVISIONER_VERSION=v5.2.0 From 76459889f4aee499107921715ef8416f2cae3cb5 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Thu, 20 Mar 2025 15:33:02 +0100 Subject: [PATCH 460/836] feat(operator): introduce `KUBERNETES_CLUSTER_DOMAIN` configuration option (#6989) ## Release Notes Added the `KUBERNETES_CLUSTER_DOMAIN` configuration option to the operator, allowing users to specify the domain suffix for fully qualified domain names (FQDNs) generated within the Kubernetes cluster. If not set, it defaults to `cluster.local`. Closes #6979 Signed-off-by: Armando Ruocco Signed-off-by: Leonardo Cecchi Signed-off-by: Gabriele Bartolini Co-authored-by: Leonardo Cecchi Co-authored-by: Gabriele Bartolini --- .wordlist-en-custom.txt | 3 +++ api/v1/cluster_funcs.go | 2 +- api/v1/cluster_funcs_test.go | 18 +++++++++--------- docs/src/certificates.md | 5 +++++ docs/src/operator_conf.md | 1 + docs/src/release_notes/v1.26.md | 5 +++++ internal/configuration/configuration.go | 25 +++++++++++++++++-------- 7 files changed, 41 insertions(+), 18 deletions(-) diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index 35211d21eb..eca5fe59fa 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -161,6 +161,7 @@ ExtensionSpec ExtensionStatus ExternalCluster FQDN +FQDNs Fei Filesystem Fluentd @@ -213,6 +214,7 @@ KinD Krew KubeCon Kubegres +KubernetesClusterDomain Kumar LDAP LDAPBindAsAuth @@ -910,6 +912,7 @@ kubebuilder kubectl kubelet kubernetes +kubernetesClusterDomain labelColumnName labelColumnValue labelName diff --git a/api/v1/cluster_funcs.go b/api/v1/cluster_funcs.go index 340ce9e740..9e493829bc 100644 --- a/api/v1/cluster_funcs.go +++ b/api/v1/cluster_funcs.go @@ -1042,7 +1042,7 @@ func (cluster *Cluster) GetClusterAltDNSNames() []string { serviceName, fmt.Sprintf("%v.%v", serviceName, cluster.Namespace), fmt.Sprintf("%v.%v.svc", serviceName, cluster.Namespace), - fmt.Sprintf("%v.%v.svc.cluster.local", serviceName, cluster.Namespace), + fmt.Sprintf("%v.%v.svc.%s", serviceName, cluster.Namespace, configuration.Current.KubernetesClusterDomain), } } altDNSNames := slices.Concat( diff --git a/api/v1/cluster_funcs_test.go b/api/v1/cluster_funcs_test.go index fd2d93e5c1..1c31cde69f 100644 --- a/api/v1/cluster_funcs_test.go +++ b/api/v1/cluster_funcs_test.go @@ -461,12 +461,12 @@ var _ = Describe("look up for secrets", Ordered, func() { } // assertServiceNamesPresent returns the first missing service name encountered - assertServiceNamesPresent := func(data *stringset.Data, serviceName string) string { + assertServiceNamesPresent := func(data *stringset.Data, serviceName string, clusterDomain string) string { assertions := []string{ serviceName, fmt.Sprintf("%v.%v", serviceName, cluster.Namespace), fmt.Sprintf("%v.%v.svc", serviceName, cluster.Namespace), - fmt.Sprintf("%v.%v.svc.cluster.local", serviceName, cluster.Namespace), + fmt.Sprintf("%v.%v.svc.%s", serviceName, cluster.Namespace, clusterDomain), } for _, assertion := range assertions { if !data.Has(assertion) { @@ -498,11 +498,11 @@ var _ = Describe("look up for secrets", Ordered, func() { Expect(names).To(HaveLen(12)) namesSet := stringset.From(names) Expect(namesSet.Len()).To(Equal(12)) - Expect(assertServiceNamesPresent(namesSet, cluster.GetServiceReadWriteName())).To(BeEmpty(), + Expect(assertServiceNamesPresent(namesSet, cluster.GetServiceReadWriteName(), "cluster.local")).To(BeEmpty(), "missing service name") - Expect(assertServiceNamesPresent(namesSet, cluster.GetServiceReadName())).To(BeEmpty(), + Expect(assertServiceNamesPresent(namesSet, cluster.GetServiceReadName(), "cluster.local")).To(BeEmpty(), "missing service name") - Expect(assertServiceNamesPresent(namesSet, cluster.GetServiceReadOnlyName())).To(BeEmpty(), + Expect(assertServiceNamesPresent(namesSet, cluster.GetServiceReadOnlyName(), "cluster.local")).To(BeEmpty(), "missing service name") }) @@ -521,9 +521,9 @@ var _ = Describe("look up for secrets", Ordered, func() { It("should generate correctly the managed services names", func() { namesSet := stringset.From(cluster.GetClusterAltDNSNames()) Expect(namesSet.Len()).To(Equal(20)) - Expect(assertServiceNamesPresent(namesSet, "one")).To(BeEmpty(), + Expect(assertServiceNamesPresent(namesSet, "one", "cluster.local")).To(BeEmpty(), "missing service name") - Expect(assertServiceNamesPresent(namesSet, "two")).To(BeEmpty(), + Expect(assertServiceNamesPresent(namesSet, "two", "cluster.local")).To(BeEmpty(), "missing service name") }) @@ -536,9 +536,9 @@ var _ = Describe("look up for secrets", Ordered, func() { Expect(namesSet.Len()).To(Equal(12)) Expect(namesSet.Has(cluster.GetServiceReadName())).To(BeFalse()) Expect(namesSet.Has(cluster.GetServiceReadOnlyName())).To(BeFalse()) - Expect(assertServiceNamesPresent(namesSet, "one")).To(BeEmpty(), + Expect(assertServiceNamesPresent(namesSet, "one", "cluster.local")).To(BeEmpty(), "missing service name") - Expect(assertServiceNamesPresent(namesSet, "two")).To(BeEmpty(), + Expect(assertServiceNamesPresent(namesSet, "two", "cluster.local")).To(BeEmpty(), "missing service name") }) }) diff --git a/docs/src/certificates.md b/docs/src/certificates.md index c9cc2eb95c..70108140b8 100644 --- a/docs/src/certificates.md +++ b/docs/src/certificates.md @@ -50,6 +50,11 @@ expiration (within a 90-day validity period). certificates not controlled by CloudNativePG must be re-issued following the renewal process. +When generating certificates, the operator assumes that the Kubernetes +cluster's DNS zone is set to `cluster.local` by default. This behavior can be +customized by setting the `KUBERNETES_CLUSTER_DOMAIN` environment variable. A +convenient alternative is to use the [operator's configuration capability](operator_conf.md). + ### Server certificates #### Server CA secret diff --git a/docs/src/operator_conf.md b/docs/src/operator_conf.md index 70c47c845f..c5895b2e25 100644 --- a/docs/src/operator_conf.md +++ b/docs/src/operator_conf.md @@ -45,6 +45,7 @@ Name | Description `INHERITED_ANNOTATIONS` | List of annotation names that, when defined in a `Cluster` metadata, will be inherited by all the generated resources, including pods `INHERITED_LABELS` | List of label names that, when defined in a `Cluster` metadata, will be inherited by all the generated resources, including pods `INSTANCES_ROLLOUT_DELAY` | The duration (in seconds) to wait between roll-outs of individual PostgreSQL instances within the same cluster during an operator upgrade. The default value is `0`, meaning no delay between upgrades of instances in the same PostgreSQL cluster. +`KUBERNETES_CLUSTER_DOMAIN` | Defines the domain suffix for service FQDNs within the Kubernetes cluster. If left unset, it defaults to "cluster.local". `MONITORING_QUERIES_CONFIGMAP` | The name of a ConfigMap in the operator's namespace with a set of default queries (to be specified under the key `queries`) to be applied to all created Clusters `MONITORING_QUERIES_SECRET` | The name of a Secret in the operator's namespace with a set of default queries (to be specified under the key `queries`) to be applied to all created Clusters `PULL_SECRET_NAME` | Name of an additional pull secret to be defined in the operator's namespace and to be used to download images diff --git a/docs/src/release_notes/v1.26.md b/docs/src/release_notes/v1.26.md index 17f9f6b6af..37949438dc 100644 --- a/docs/src/release_notes/v1.26.md +++ b/docs/src/release_notes/v1.26.md @@ -39,6 +39,11 @@ on the release branch in GitHub. which, if specified, sets the `tcp_user_timeout` parameter on all standby instances managed by the operator. +- Added the `KUBERNETES_CLUSTER_DOMAIN` configuration option to the operator, + allowing users to specify the domain suffix for fully qualified domain names + (FQDNs) generated within the Kubernetes cluster. If not set, it defaults to + `cluster.local`. (#6989) + - feat: support customizable pod patches via annotations (#6323) - `cnpg` plugin updates: diff --git a/internal/configuration/configuration.go b/internal/configuration/configuration.go index 6e2bc703dc..56f00fa2aa 100644 --- a/internal/configuration/configuration.go +++ b/internal/configuration/configuration.go @@ -40,6 +40,10 @@ const ( // ExpiringCheckThreshold is the default threshold to consider a certificate as expiring ExpiringCheckThreshold = 7 + + // DefaultKubernetesClusterDomain is the default value used as + // Kubernetes cluster domain. + DefaultKubernetesClusterDomain = "cluster.local" ) // DefaultPluginSocketDir is the default directory where the plugin sockets are located. @@ -133,6 +137,10 @@ type Data struct { // string, which is used by the standby server to connect to the // primary server in CloudNativePG. StandbyTCPUserTimeout int `json:"standbyTcpUserTimeout" env:"STANDBY_TCP_USER_TIMEOUT"` + + // KubernetesClusterDomain defines the domain suffix for service FQDNs + // within the Kubernetes cluster. If left unset, it defaults to `cluster.local`. + KubernetesClusterDomain string `json:"kubernetesClusterDomain" env:"KUBERNETES_CLUSTER_DOMAIN"` } // Current is the configuration used by the operator @@ -141,14 +149,15 @@ var Current = NewConfiguration() // newDefaultConfig creates a configuration holding the defaults func newDefaultConfig() *Data { return &Data{ - OperatorPullSecretName: DefaultOperatorPullSecretName, - OperatorImageName: versions.DefaultOperatorImageName, - PostgresImageName: versions.DefaultImageName, - PluginSocketDir: DefaultPluginSocketDir, - CreateAnyService: false, - CertificateDuration: CertificateDuration, - ExpiringCheckThreshold: ExpiringCheckThreshold, - StandbyTCPUserTimeout: 0, + OperatorPullSecretName: DefaultOperatorPullSecretName, + OperatorImageName: versions.DefaultOperatorImageName, + PostgresImageName: versions.DefaultImageName, + PluginSocketDir: DefaultPluginSocketDir, + CreateAnyService: false, + CertificateDuration: CertificateDuration, + ExpiringCheckThreshold: ExpiringCheckThreshold, + StandbyTCPUserTimeout: 0, + KubernetesClusterDomain: DefaultKubernetesClusterDomain, } } From 1bc8265a4e0c11c0b7819d426f9df9ce2c9d25ae Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Thu, 20 Mar 2025 16:39:23 +0100 Subject: [PATCH 461/836] chore(webhook): warn when `shared_buffers` lacks a unit qualifier (#7160) Closes #6657 Signed-off-by: Armando Ruocco Signed-off-by: Gabriele Bartolini Co-authored-by: Gabriele Bartolini --- internal/webhook/v1/cluster_webhook.go | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/internal/webhook/v1/cluster_webhook.go b/internal/webhook/v1/cluster_webhook.go index c8640d760c..6b46f63d59 100644 --- a/internal/webhook/v1/cluster_webhook.go +++ b/internal/webhook/v1/cluster_webhook.go @@ -2324,7 +2324,24 @@ func (v *ClusterCustomValidator) validatePgFailoverSlots(r *apiv1.Cluster) field } func (v *ClusterCustomValidator) getAdmissionWarnings(r *apiv1.Cluster) admission.Warnings { - return getMaintenanceWindowsAdmissionWarnings(r) + list := getMaintenanceWindowsAdmissionWarnings(r) + return append(list, getSharedBuffersWarnings(r)...) +} + +func getSharedBuffersWarnings(r *apiv1.Cluster) admission.Warnings { + var result admission.Warnings + + if v := r.Spec.PostgresConfiguration.Parameters["shared_buffers"]; v != "" { + if _, err := strconv.Atoi(v); err == nil { + result = append( + result, + fmt.Sprintf("`shared_buffers` value '%s' is missing a unit (e.g., MB, GB). "+ + "While this is currently allowed, future releases will require an explicit unit. "+ + "Please update your configuration to specify a valid unit, such as '%sMB'.", v, v), + ) + } + } + return result } func getMaintenanceWindowsAdmissionWarnings(r *apiv1.Cluster) admission.Warnings { From 857109bf330362be7c7aa4ff95765668862cd69a Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 20 Mar 2025 17:32:15 +0100 Subject: [PATCH 462/836] chore(deps): update dependency golangci/golangci-lint to v1.64.8 (main) (#7172) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Update | Change | |---|---|---| | [golangci/golangci-lint](https://redirect.github.com/golangci/golangci-lint) | patch | `1.64.7` -> `1.64.8` | --- ### Release Notes
golangci/golangci-lint (golangci/golangci-lint) ### [`v1.64.8`](https://redirect.github.com/golangci/golangci-lint/blob/HEAD/CHANGELOG.md#v1648) [Compare Source](https://redirect.github.com/golangci/golangci-lint/compare/v1.64.7...v1.64.8) - Detects use pf configuration files from golangci-lint v2
--- ### Configuration 📅 **Schedule**: Branch creation - At any time (no schedule defined), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Never, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/cloudnative-pg/cloudnative-pg). Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- .github/workflows/continuous-integration.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 36cd7ebb67..9eef899069 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -17,7 +17,7 @@ on: # set up environment variables to be used across all the jobs env: GOLANG_VERSION: "1.24.x" - GOLANGCI_LINT_VERSION: "v1.64.7" + GOLANGCI_LINT_VERSION: "v1.64.8" KUBEBUILDER_VERSION: "2.3.1" KIND_VERSION: "v0.27.0" OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing" From efd28ad3a1c982ef758a3968d4f265f84cde3e73 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Fri, 21 Mar 2025 00:09:08 +0100 Subject: [PATCH 463/836] fix(log): use the correct variable when logging mostAdvancedInstanceName (#7177) Signed-off-by: Marco Nenciarini --- internal/controller/cluster_controller.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/controller/cluster_controller.go b/internal/controller/cluster_controller.go index 8149bd1d24..036d4e4bd2 100644 --- a/internal/controller/cluster_controller.go +++ b/internal/controller/cluster_controller.go @@ -459,7 +459,7 @@ func (r *ClusterReconciler) reconcile(ctx context.Context, cluster *apiv1.Cluste // we need to wait for it to be refreshed contextLogger.Info( "Waiting for the Kubelet to refresh the readiness probe", - "mostAdvancedInstanceName", mostAdvancedInstance.Node, + "mostAdvancedInstanceName", mostAdvancedInstance.Pod.Name, "hasHTTPStatus", hasHTTPStatus, "isPodReady", isPodReady) return ctrl.Result{RequeueAfter: 1 * time.Second}, nil From 135a6f9ed90603485d80013fdb8c96a9de3756e8 Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Fri, 21 Mar 2025 14:36:15 +0100 Subject: [PATCH 464/836] chore(Makefile): remove targets for `kind` clusters (#7186) Now everything can be done directly through the `hack/setup.sh` script. Closes #7185 Signed-off-by: Gabriele Bartolini --- Makefile | 12 ------------ contribute/development_environment/README.md | 6 ++++-- 2 files changed, 4 insertions(+), 14 deletions(-) diff --git a/Makefile b/Makefile index 8410a26ccf..edd48bf9bd 100644 --- a/Makefile +++ b/Makefile @@ -228,10 +228,6 @@ manifests: controller-gen ## Generate manifests e.g. CRD, RBAC etc. generate: controller-gen ## Generate code. $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." -deploy-locally: kind-cluster ## Build and deploy operator in local cluster - set -e ;\ - hack/setup-cluster.sh -n1 load deploy - olm-scorecard: operator-sdk ## Run the Scorecard test from operator-sdk $(OPERATOR_SDK) scorecard ${BUNDLE_IMG} --wait-time 60s --verbose @@ -350,14 +346,6 @@ GOBIN=$(PROJECT_DIR)/bin go install $(2) ;\ } endef -kind-cluster: ## Create KinD cluster to run operator locally - set -e ;\ - hack/setup-cluster.sh -n1 create - -kind-cluster-destroy: ## Destroy KinD cluster created using kind-cluster command - set -e ;\ - hack/setup-cluster.sh -n1 destroy - .PHONY: operator-sdk OPERATOR_SDK = $(LOCALBIN)/operator-sdk operator-sdk: ## Install the operator-sdk app diff --git a/contribute/development_environment/README.md b/contribute/development_environment/README.md index 309081835a..988ac98290 100644 --- a/contribute/development_environment/README.md +++ b/contribute/development_environment/README.md @@ -180,13 +180,15 @@ build and deploy: ```shell cd cloudnative-pg git checkout main -make deploy-locally +./hack/setup.sh create load deploy ``` This will build the operator based on the `main` branch content, create a `kind` cluster in your workstation with a container registry that provides the operator image that you just built. +*Note:* For a list of options, run `./hack/setup-cluster.sh`. + > **NOTE:** In case of errors, make sure that you have the latest versions of the Go > binaries in your system. For this reason, from time to time, we recommend > you running: `make distclean`. @@ -202,7 +204,7 @@ kubectl get deploy -n cnpg-system cnpg-controller-manager Now that your system has been validated, you can tear down the local cluster with: ```shell -make kind-cluster-destroy +./hack/setup.sh destroy ``` Congratulations, you have a suitable development environment. You are now able From ae3735aade3ccbffc57a03aa8a195e5b68698d23 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Fri, 21 Mar 2025 14:48:01 +0100 Subject: [PATCH 465/836] fix: consider plugin changes while evaluating instance rollout (#7126) Introduce the `EVALUATE` verb to enable a plugin to trigger a rollout if the desired Pod specification differs from the current one. Signed-off-by: Armando Ruocco Signed-off-by: Leonardo Cecchi Co-authored-by: Leonardo Cecchi --- go.mod | 2 +- go.sum | 4 +- internal/cnpi/plugin/mapping.go | 15 +- internal/controller/cluster_create.go | 5 +- internal/controller/cluster_upgrade.go | 65 +++++--- internal/controller/cluster_upgrade_test.go | 167 ++++++++++++++++---- internal/controller/suite_test.go | 2 +- internal/webhook/v1/cluster_webhook.go | 7 +- pkg/specs/pg_pods_test.go | 4 +- pkg/specs/pods.go | 67 ++++++-- pkg/specs/pods_test.go | 10 +- pkg/specs/podspec_diff.go | 18 ++- pkg/utils/labels_annotations.go | 17 ++ 13 files changed, 299 insertions(+), 84 deletions(-) diff --git a/go.mod b/go.mod index 3089204aa1..5b7c8218b0 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/blang/semver v3.5.1+incompatible github.com/cheynewallace/tabby v1.1.1 github.com/cloudnative-pg/barman-cloud v0.1.0 - github.com/cloudnative-pg/cnpg-i v0.1.0 + github.com/cloudnative-pg/cnpg-i v0.1.1-0.20250321093050-de4ab51537cb github.com/cloudnative-pg/machinery v0.1.0 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/evanphx/json-patch/v5 v5.9.11 diff --git a/go.sum b/go.sum index b502924267..c332c3c35a 100644 --- a/go.sum +++ b/go.sum @@ -20,8 +20,8 @@ github.com/cheynewallace/tabby v1.1.1 h1:JvUR8waht4Y0S3JF17G6Vhyt+FRhnqVCkk8l4Yr github.com/cheynewallace/tabby v1.1.1/go.mod h1:Pba/6cUL8uYqvOc9RkyvFbHGrQ9wShyrn6/S/1OYVys= github.com/cloudnative-pg/barman-cloud v0.1.0 h1:e/z52CehMBIh1LjZqNBJnncWJbS+1JYvRMBR8Js6Uiw= github.com/cloudnative-pg/barman-cloud v0.1.0/go.mod h1:rJUJO/f1yNckLZiVxHAyRmKY+4EPJkYRJsGbTZRJQSY= -github.com/cloudnative-pg/cnpg-i v0.1.0 h1:QH2xTsrODMhEEc6B25GbOYe7ZIttDmSkYvXotfU5dfs= -github.com/cloudnative-pg/cnpg-i v0.1.0/go.mod h1:G28BhgUEHqrxEyyQeHz8BbpMVAsGuLhJm/tHUbDi8Sw= +github.com/cloudnative-pg/cnpg-i v0.1.1-0.20250321093050-de4ab51537cb h1:FPORwCxjZwlnKnF7dOkuOAz0GBSQ3Hrn+8lm4uMiWeM= +github.com/cloudnative-pg/cnpg-i v0.1.1-0.20250321093050-de4ab51537cb/go.mod h1:n+kbHm3rzRCY5IJKuE1tGMbG6JaeYz8yycYoLt7BeKo= github.com/cloudnative-pg/machinery v0.1.0 h1:tjRmsqQmsO/OlaT0uFmkEtVqgr+SGPM88cKZOHYKLBo= github.com/cloudnative-pg/machinery v0.1.0/go.mod h1:0V3vm44FaIsY+x4pm8ORry7xCC3AJiO+ebfPNxeP5Ck= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= diff --git a/internal/cnpi/plugin/mapping.go b/internal/cnpi/plugin/mapping.go index 7fc5b67613..7b9373969b 100644 --- a/internal/cnpi/plugin/mapping.go +++ b/internal/cnpi/plugin/mapping.go @@ -22,15 +22,16 @@ import ( "github.com/cloudnative-pg/cnpg-i/pkg/lifecycle" ) -// The OperationVerb corresponds to the Kubernetes API method +// The OperationVerb corresponds to the CNPG-I lifecycle operation verb type OperationVerb string -// A Kubernetes operation verb +// A lifecycle operation verb const ( - OperationVerbPatch OperationVerb = "PATCH" - OperationVerbUpdate OperationVerb = "UPDATE" - OperationVerbCreate OperationVerb = "CREATE" - OperationVerbDelete OperationVerb = "DELETE" + OperationVerbPatch OperationVerb = "PATCH" + OperationVerbUpdate OperationVerb = "UPDATE" + OperationVerbCreate OperationVerb = "CREATE" + OperationVerbDelete OperationVerb = "DELETE" + OperationVerbEvaluate OperationVerb = "EVALUATE" ) // ToOperationType_Type converts an OperationVerb into a lifecycle.OperationType_Type @@ -45,6 +46,8 @@ func (o OperationVerb) ToOperationType_Type() (lifecycle.OperatorOperationType_T return lifecycle.OperatorOperationType_TYPE_CREATE, nil case OperationVerbUpdate: return lifecycle.OperatorOperationType_TYPE_UPDATE, nil + case OperationVerbEvaluate: + return lifecycle.OperatorOperationType_TYPE_EVALUATE, nil } return lifecycle.OperatorOperationType_Type(0), fmt.Errorf("unknown operation type: '%s'", o) diff --git a/internal/controller/cluster_create.go b/internal/controller/cluster_create.go index 0eda7e8002..4feef214ce 100644 --- a/internal/controller/cluster_create.go +++ b/internal/controller/cluster_create.go @@ -1307,7 +1307,7 @@ func (r *ClusterReconciler) ensureInstancesAreCreated( ) (ctrl.Result, error) { contextLogger := log.FromContext(ctx) - instanceToCreate, err := findInstancePodToCreate(cluster, instancesStatus, resources.pvcs.Items) + instanceToCreate, err := findInstancePodToCreate(ctx, cluster, instancesStatus, resources.pvcs.Items) if err != nil { return ctrl.Result{}, err } @@ -1394,6 +1394,7 @@ func (r *ClusterReconciler) ensureInstancesAreCreated( // we elect a current instance that doesn't exist for creation func findInstancePodToCreate( + ctx context.Context, cluster *apiv1.Cluster, instancesStatus postgres.PostgresqlStatusList, pvcs []corev1.PersistentVolumeClaim, @@ -1440,7 +1441,7 @@ func findInstancePodToCreate( if err != nil { return nil, err } - return specs.PodWithExistingStorage(*cluster, serial) + return specs.NewInstance(ctx, *cluster, serial, true) } return nil, nil diff --git a/internal/controller/cluster_upgrade.go b/internal/controller/cluster_upgrade.go index b34ad064ed..3fc56d42b1 100644 --- a/internal/controller/cluster_upgrade.go +++ b/internal/controller/cluster_upgrade.go @@ -276,6 +276,7 @@ type rollout struct { } type rolloutChecker func( + ctx context.Context, pod *corev1.Pod, cluster *apiv1.Cluster, ) (rollout, error) @@ -335,7 +336,7 @@ func isPodNeedingRollout( contextLogger := log.FromContext(ctx) applyCheckers := func(checkers map[string]rolloutChecker) rollout { for message, check := range checkers { - podRollout, err := check(pod, cluster) + podRollout, err := check(ctx, pod, cluster) if err != nil { contextLogger.Error(err, "while checking if pod needs rollout") continue @@ -380,10 +381,10 @@ func isPodNeedingRollout( // These checks are subsumed by the PodSpec checker checkers = map[string]rolloutChecker{ - "pod environment is outdated": checkPodEnvironmentIsOutdated, - "pod scheduler is outdated": checkSchedulerIsOutdated, - "pod needs updated topology": checkPodNeedsUpdatedTopology, - "pod init container is outdated": checkPodInitContainerIsOutdated, + "pod environment is outdated": checkPodEnvironmentIsOutdated, + "pod scheduler is outdated": checkSchedulerIsOutdated, + "pod needs updated topology": checkPodNeedsUpdatedTopology, + "pod bootstrap container is outdated": checkPodBootstrapImage, } podRollout = applyCheckers(checkers) if podRollout.required { @@ -403,7 +404,7 @@ func hasValidPodSpec(pod *corev1.Pod) bool { return err == nil } -func checkHasResizingPVC(pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, error) { +func checkHasResizingPVC(_ context.Context, pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, error) { if configuration.Current.EnableAzurePVCUpdates { for _, pvcName := range cluster.Status.ResizingPVC { // This code works on the assumption that the PVC begins with the name of the pod using it. @@ -418,7 +419,7 @@ func checkHasResizingPVC(pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, erro return rollout{}, nil } -func checkPodNeedsUpdatedTopology(pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, error) { +func checkPodNeedsUpdatedTopology(_ context.Context, pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, error) { if reflect.DeepEqual(cluster.Spec.TopologySpreadConstraints, pod.Spec.TopologySpreadConstraints) { return rollout{}, nil } @@ -432,7 +433,7 @@ func checkPodNeedsUpdatedTopology(pod *corev1.Pod, cluster *apiv1.Cluster) (roll }, nil } -func checkSchedulerIsOutdated(pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, error) { +func checkSchedulerIsOutdated(_ context.Context, pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, error) { if cluster.Spec.SchedulerName == "" || cluster.Spec.SchedulerName == pod.Spec.SchedulerName { return rollout{}, nil } @@ -447,7 +448,7 @@ func checkSchedulerIsOutdated(pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, }, nil } -func checkProjectedVolumeIsOutdated(pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, error) { +func checkProjectedVolumeIsOutdated(_ context.Context, pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, error) { isNilOrZero := func(vs *corev1.ProjectedVolumeSource) bool { return vs == nil || len(vs.Sources) == 0 } @@ -490,7 +491,7 @@ func getProjectedVolumeConfigurationFromPod(pod corev1.Pod) *corev1.ProjectedVol return nil } -func checkPodImageIsOutdated(pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, error) { +func checkPodImageIsOutdated(_ context.Context, pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, error) { targetImageName := cluster.GetImageName() pgCurrentImageName, err := specs.GetPostgresImageName(*pod) @@ -510,7 +511,7 @@ func checkPodImageIsOutdated(pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, }, nil } -func checkPodInitContainerIsOutdated(pod *corev1.Pod, _ *apiv1.Cluster) (rollout, error) { +func checkPodBootstrapImage(_ context.Context, pod *corev1.Pod, _ *apiv1.Cluster) (rollout, error) { if configuration.Current.EnableInstanceManagerInplaceUpdates { return rollout{}, nil } @@ -527,13 +528,13 @@ func checkPodInitContainerIsOutdated(pod *corev1.Pod, _ *apiv1.Cluster) (rollout // We need to apply a different version of the instance manager return rollout{ required: true, - reason: fmt.Sprintf("the instance is using an old init container image: %s -> %s", + reason: fmt.Sprintf("the instance is using an old bootstrap container image: %s -> %s", opCurrentImageName, configuration.Current.OperatorImageName), needsChangeOperatorImage: true, }, nil } -func checkHasMissingPVCs(pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, error) { +func checkHasMissingPVCs(_ context.Context, pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, error) { if persistentvolumeclaim.InstanceHasMissingMounts(cluster, pod) { return rollout{ required: true, @@ -544,7 +545,11 @@ func checkHasMissingPVCs(pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, erro return rollout{}, nil } -func checkClusterHasDifferentRestartAnnotation(pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, error) { +func checkClusterHasDifferentRestartAnnotation( + _ context.Context, + pod *corev1.Pod, + cluster *apiv1.Cluster, +) (rollout, error) { // If the pod restart value doesn't match with the one contained in the cluster, restart the pod. if clusterRestart, ok := cluster.Annotations[utils.ClusterRestartAnnotationName]; ok { podRestart := pod.Annotations[utils.ClusterRestartAnnotationName] @@ -560,7 +565,9 @@ func checkClusterHasDifferentRestartAnnotation(pod *corev1.Pod, cluster *apiv1.C return rollout{}, nil } -func checkPodEnvironmentIsOutdated(pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, error) { +// checkPodEnvironmentIsOutdated checks if the environment variables in the pod have changed. +// Deprecated: this function doesn't take into account plugin changes, use PodSpec annotation. +func checkPodEnvironmentIsOutdated(_ context.Context, pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, error) { // Check if there is a change in the environment section envConfig := specs.CreatePodEnvConfig(*cluster, pod.Name) @@ -605,7 +612,11 @@ func checkPodEnvironmentIsOutdated(pod *corev1.Pod, cluster *apiv1.Cluster) (rol return rollout{}, nil } -func checkPodSpecIsOutdated(pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, error) { +func checkPodSpecIsOutdated( + ctx context.Context, + pod *corev1.Pod, + cluster *apiv1.Cluster, +) (rollout, error) { podSpecAnnotation, ok := pod.ObjectMeta.Annotations[utils.PodSpecAnnotationName] if !ok { return rollout{}, nil @@ -616,10 +627,18 @@ func checkPodSpecIsOutdated(pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, e if err != nil { return rollout{}, fmt.Errorf("while unmarshaling the pod resources annotation: %w", err) } - envConfig := specs.CreatePodEnvConfig(*cluster, pod.Name) - gracePeriod := int64(cluster.GetMaxStopDelay()) + tlsEnabled := remote.GetStatusSchemeFromPod(pod).IsHTTPS() - targetPodSpec := specs.CreateClusterPodSpec(pod.Name, *cluster, envConfig, gracePeriod, tlsEnabled) + + serial, err := utils.GetClusterSerialValue(pod.Annotations) + if err != nil { + return rollout{}, fmt.Errorf("while getting the pod serial value: %w", err) + } + + targetPod, err := specs.NewInstance(ctx, *cluster, serial, tlsEnabled) + if err != nil { + return rollout{}, fmt.Errorf("while creating a new pod to check podSpec: %w", err) + } // the bootstrap init-container could change image after an operator upgrade. // If in-place upgrades of the instance manager are enabled, we don't need rollout. @@ -631,17 +650,13 @@ func checkPodSpecIsOutdated(pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, e !configuration.Current.EnableInstanceManagerInplaceUpdates { return rollout{ required: true, - reason: fmt.Sprintf("the instance is using an old init container image: %s -> %s", + reason: fmt.Sprintf("the instance is using an old bootstrap container image: %s -> %s", opCurrentImageName, configuration.Current.OperatorImageName), needsChangeOperatorImage: true, }, nil } - // from here we don't care about drift in the init containers: avoid checking them - storedPodSpec.InitContainers = nil - targetPodSpec.InitContainers = nil - - match, diff := specs.ComparePodSpecs(storedPodSpec, targetPodSpec) + match, diff := specs.ComparePodSpecs(storedPodSpec, targetPod.Spec) if !match { return rollout{ required: true, diff --git a/internal/controller/cluster_upgrade_test.go b/internal/controller/cluster_upgrade_test.go index 106cadca2e..9c0fea0e27 100644 --- a/internal/controller/cluster_upgrade_test.go +++ b/internal/controller/cluster_upgrade_test.go @@ -17,13 +17,17 @@ limitations under the License. package controller import ( + "context" "encoding/json" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8client "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin" + pluginClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client" "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" @@ -57,7 +61,7 @@ var _ = Describe("Pod upgrade", Ordered, func() { }) It("will not require a restart for just created Pods", func(ctx SpecContext) { - pod, err := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.NewInstance(ctx, cluster, 1, true) Expect(err).ToNot(HaveOccurred()) status := postgres.PostgresqlStatus{ @@ -72,7 +76,7 @@ var _ = Describe("Pod upgrade", Ordered, func() { }) It("requires rollout when running a different image name", func(ctx SpecContext) { - pod, err := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.NewInstance(ctx, cluster, 1, true) Expect(err).ToNot(HaveOccurred()) pod.Spec.Containers[0].Image = "postgres:13.10" @@ -89,7 +93,7 @@ var _ = Describe("Pod upgrade", Ordered, func() { }) It("requires rollout when a restart annotation has been added to the cluster", func(ctx SpecContext) { - pod, err := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.NewInstance(ctx, cluster, 1, true) Expect(err).ToNot(HaveOccurred()) clusterRestart := cluster clusterRestart.Annotations = make(map[string]string) @@ -114,7 +118,7 @@ var _ = Describe("Pod upgrade", Ordered, func() { }) It("should prioritize full rollout over inplace restarts", func(ctx SpecContext) { - pod, err := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.NewInstance(ctx, cluster, 1, true) Expect(err).ToNot(HaveOccurred()) status := postgres.PostgresqlStatus{ @@ -145,7 +149,7 @@ var _ = Describe("Pod upgrade", Ordered, func() { }) It("requires rollout when PostgreSQL needs to be restarted", func(ctx SpecContext) { - pod, err := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.NewInstance(ctx, cluster, 1, true) Expect(err).ToNot(HaveOccurred()) status := postgres.PostgresqlStatus{ @@ -172,7 +176,7 @@ var _ = Describe("Pod upgrade", Ordered, func() { }) It("requires pod rollout if executable does not have a hash", func(ctx SpecContext) { - pod, err := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.NewInstance(ctx, cluster, 1, true) Expect(err).ToNot(HaveOccurred()) status := postgres.PostgresqlStatus{ Pod: pod, @@ -188,9 +192,9 @@ var _ = Describe("Pod upgrade", Ordered, func() { }) It("checkPodSpecIsOutdated should not return any error", func() { - pod, err := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.NewInstance(context.TODO(), cluster, 1, true) Expect(err).ToNot(HaveOccurred()) - rollout, err := checkPodSpecIsOutdated(pod, &cluster) + rollout, err := checkPodSpecIsOutdated(context.TODO(), pod, &cluster) Expect(rollout.required).To(BeFalse()) Expect(rollout.canBeInPlace).To(BeFalse()) Expect(rollout.reason).To(BeEmpty()) @@ -198,7 +202,7 @@ var _ = Describe("Pod upgrade", Ordered, func() { }) It("checks when a rollout is needed for any reason", func(ctx SpecContext) { - pod, err := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.NewInstance(ctx, cluster, 1, true) Expect(err).ToNot(HaveOccurred()) status := postgres.PostgresqlStatus{ Pod: pod, @@ -225,7 +229,7 @@ var _ = Describe("Pod upgrade", Ordered, func() { When("the PodSpec annotation is not available", func() { It("should trigger a rollout when the scheduler changes", func(ctx SpecContext) { - pod, err := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.NewInstance(ctx, cluster, 1, true) Expect(err).ToNot(HaveOccurred()) cluster.Spec.SchedulerName = "newScheduler" delete(pod.Annotations, utils.PodSpecAnnotationName) @@ -251,7 +255,7 @@ var _ = Describe("Pod upgrade", Ordered, func() { ImageName: "postgres:13.11", }, } - pod, err := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.NewInstance(ctx, cluster, 1, true) Expect(err).ToNot(HaveOccurred()) cluster.Spec.SchedulerName = "newScheduler" @@ -283,7 +287,12 @@ var _ = Describe("Pod upgrade", Ordered, func() { }, } It("should trigger a rollout when the cluster has a Resource changed", func(ctx SpecContext) { - pod, err := specs.PodWithExistingStorage(clusterWithResources, 1) + pod, err := specs.NewInstance( + context.TODO(), + clusterWithResources, + 1, + true, + ) Expect(err).ToNot(HaveOccurred()) clusterWithResources.Spec.Resources.Limits["cpu"] = resource.MustParse("3") // was "2" @@ -302,7 +311,7 @@ var _ = Describe("Pod upgrade", Ordered, func() { Expect(rollout.needsChangeOperatorImage).To(BeFalse()) }) It("should trigger a rollout when the cluster has Resources deleted from spec", func(ctx SpecContext) { - pod, err := specs.PodWithExistingStorage(clusterWithResources, 1) + pod, err := specs.NewInstance(context.TODO(), clusterWithResources, 1, true) Expect(err).ToNot(HaveOccurred()) clusterWithResources.Spec.Resources = corev1.ResourceRequirements{} @@ -324,7 +333,7 @@ var _ = Describe("Pod upgrade", Ordered, func() { When("the PodSpec annotation is not available", func() { It("detects when a new custom environment variable is set", func(ctx SpecContext) { - pod, err := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.NewInstance(ctx, cluster, 1, true) Expect(err).ToNot(HaveOccurred()) delete(pod.Annotations, utils.PodSpecAnnotationName) @@ -355,7 +364,7 @@ var _ = Describe("Pod upgrade", Ordered, func() { ImageName: "postgres:13.11", }, } - pod, err := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.NewInstance(ctx, cluster, 1, true) Expect(err).ToNot(HaveOccurred()) delete(pod.Annotations, utils.PodSpecAnnotationName) @@ -380,7 +389,7 @@ var _ = Describe("Pod upgrade", Ordered, func() { ImageName: "postgres:13.11", }, } - pod, err := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.NewInstance(ctx, cluster, 1, true) Expect(err).ToNot(HaveOccurred()) delete(pod.Annotations, utils.PodSpecAnnotationName) @@ -395,7 +404,7 @@ var _ = Describe("Pod upgrade", Ordered, func() { configuration.Current.OperatorImageName = newOperatorImage configuration.Current.EnableInstanceManagerInplaceUpdates = false rollout := isInstanceNeedingRollout(ctx, status, &cluster) - Expect(rollout.reason).To(ContainSubstring("the instance is using an old init container image")) + Expect(rollout.reason).To(ContainSubstring("the instance is using an old bootstrap container image")) Expect(rollout.required).To(BeTrue()) Expect(rollout.needsChangeOperandImage).To(BeFalse()) Expect(rollout.needsChangeOperatorImage).To(BeTrue()) @@ -404,7 +413,7 @@ var _ = Describe("Pod upgrade", Ordered, func() { When("the podSpec annotation is available", func() { It("detects when a new custom environment variable is set", func(ctx SpecContext) { - pod, err := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.NewInstance(ctx, cluster, 1, true) Expect(err).ToNot(HaveOccurred()) cluster := cluster.DeepCopy() @@ -435,7 +444,7 @@ var _ = Describe("Pod upgrade", Ordered, func() { ImageName: "postgres:13.11", }, } - pod, err := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.NewInstance(ctx, cluster, 1, true) Expect(err).ToNot(HaveOccurred()) status := postgres.PostgresqlStatus{ @@ -459,7 +468,7 @@ var _ = Describe("Pod upgrade", Ordered, func() { ImageName: "postgres:13.11", }, } - pod, err := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.NewInstance(ctx, cluster, 1, true) Expect(err).ToNot(HaveOccurred()) status := postgres.PostgresqlStatus{ @@ -473,7 +482,7 @@ var _ = Describe("Pod upgrade", Ordered, func() { configuration.Current.OperatorImageName = newOperatorImage configuration.Current.EnableInstanceManagerInplaceUpdates = false rollout := isInstanceNeedingRollout(ctx, status, &cluster) - Expect(rollout.reason).To(ContainSubstring("the instance is using an old init container image")) + Expect(rollout.reason).To(ContainSubstring("the instance is using an old bootstrap container image")) Expect(rollout.required).To(BeTrue()) Expect(rollout.needsChangeOperandImage).To(BeFalse()) Expect(rollout.needsChangeOperatorImage).To(BeTrue()) @@ -486,7 +495,7 @@ var _ = Describe("Pod upgrade", Ordered, func() { cluster.Spec.ProjectedVolumeTemplate = &corev1.ProjectedVolumeSource{ Sources: []corev1.VolumeProjection{}, } - pod, err := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.NewInstance(ctx, cluster, 1, true) Expect(err).ToNot(HaveOccurred()) status := postgres.PostgresqlStatus{ Pod: pod, @@ -504,7 +513,7 @@ var _ = Describe("Pod upgrade", Ordered, func() { cluster.Spec.ProjectedVolumeTemplate = &corev1.ProjectedVolumeSource{ Sources: nil, } - pod, err := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.NewInstance(ctx, cluster, 1, true) Expect(err).ToNot(HaveOccurred()) status := postgres.PostgresqlStatus{ Pod: pod, @@ -520,7 +529,7 @@ var _ = Describe("Pod upgrade", Ordered, func() { It("should not require rollout if projected volume is nil", func(ctx SpecContext) { cluster.Spec.ProjectedVolumeTemplate = nil - pod, err := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.NewInstance(ctx, cluster, 1, true) Expect(err).ToNot(HaveOccurred()) status := postgres.PostgresqlStatus{ Pod: pod, @@ -554,7 +563,7 @@ var _ = Describe("Test pod rollout due to topology", func() { }, } var err error - pod, err = specs.PodWithExistingStorage(*cluster, 1) + pod, err = specs.NewInstance(context.TODO(), *cluster, 1, true) Expect(err).ToNot(HaveOccurred()) }) @@ -621,7 +630,7 @@ var _ = Describe("Test pod rollout due to topology", func() { It("should not require rollout if pod and spec both lack TopologySpreadConstraints", func(ctx SpecContext) { cluster.Spec.TopologySpreadConstraints = nil var err error - pod, err = specs.PodWithExistingStorage(*cluster, 1) + pod, err = specs.NewInstance(context.TODO(), *cluster, 1, true) Expect(err).ToNot(HaveOccurred()) Expect(pod.Spec.TopologySpreadConstraints).To(BeNil()) @@ -781,7 +790,7 @@ var _ = Describe("Cluster upgrade with podSpec reconciliation disabled", func() It("skips the rollout if the annotation that disables PodSpec reconciliation is set", func(ctx SpecContext) { cluster.ObjectMeta.Annotations[utils.ReconcilePodSpecAnnotationName] = "disabled" - pod, err := specs.PodWithExistingStorage(cluster, 1) + pod, err := specs.NewInstance(ctx, cluster, 1, true) Expect(err).ToNot(HaveOccurred()) cluster.Spec.SchedulerName = "newScheduler" delete(pod.Annotations, utils.PodSpecAnnotationName) @@ -799,3 +808,107 @@ var _ = Describe("Cluster upgrade with podSpec reconciliation disabled", func() Expect(rollout.reason).To(BeEmpty()) }) }) + +type fakePluginClientRollout struct { + pluginClient.Client + returnedPod *corev1.Pod + returnedError error +} + +func (f fakePluginClientRollout) LifecycleHook( + _ context.Context, + _ plugin.OperationVerb, + _ k8client.Object, + _ k8client.Object, +) (k8client.Object, error) { + return f.returnedPod, f.returnedError +} + +var _ = Describe("checkPodSpec with plugins", Ordered, func() { + var cluster apiv1.Cluster + + BeforeEach(func() { + cluster = apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:13.11", + }, + } + configuration.Current = configuration.NewConfiguration() + }) + + AfterAll(func() { + configuration.Current = configuration.NewConfiguration() + }) + + It("image change", func() { + pod, err := specs.NewInstance(context.TODO(), cluster, 1, true) + Expect(err).ToNot(HaveOccurred()) + + podModifiedByPlugins := pod.DeepCopy() + + podModifiedByPlugins.Spec.Containers[0].Image = "postgres:19.0" + + pluginClient := fakePluginClientRollout{ + returnedPod: podModifiedByPlugins, + } + ctx := context.WithValue(context.TODO(), utils.PluginClientKey, pluginClient) + + rollout, err := checkPodSpecIsOutdated(ctx, pod, &cluster) + Expect(err).ToNot(HaveOccurred()) + Expect(rollout.required).To(BeTrue()) + Expect(rollout.reason).To(Equal( + "original and target PodSpec differ in containers: container postgres differs in image")) + }) + + It("init-container change", func() { + pod, err := specs.NewInstance(context.TODO(), cluster, 1, true) + Expect(err).ToNot(HaveOccurred()) + + podModifiedByPlugins := pod.DeepCopy() + + podModifiedByPlugins.Spec.InitContainers = []corev1.Container{ + { + Name: "new-init-container", + Image: "postgres:19.0", + }, + } + + pluginClient := fakePluginClientRollout{ + returnedPod: podModifiedByPlugins, + } + ctx := context.WithValue(context.TODO(), utils.PluginClientKey, pluginClient) + + rollout, err := checkPodSpecIsOutdated(ctx, pod, &cluster) + Expect(err).ToNot(HaveOccurred()) + Expect(rollout.required).To(BeTrue()) + Expect(rollout.reason).To(Equal( + "original and target PodSpec differ in init-containers: container new-init-container has been added")) + }) + + It("environment variable change", func() { + pod, err := specs.NewInstance(context.TODO(), cluster, 1, true) + Expect(err).ToNot(HaveOccurred()) + + podModifiedByPlugins := pod.DeepCopy() + + podModifiedByPlugins.Spec.Containers[0].Env = append(podModifiedByPlugins.Spec.Containers[0].Env, + corev1.EnvVar{ + Name: "NEW_ENV", + Value: "new_value", + }) + + pluginClient := fakePluginClientRollout{ + returnedPod: podModifiedByPlugins, + } + ctx := context.WithValue(context.TODO(), utils.PluginClientKey, pluginClient) + + rollout, err := checkPodSpecIsOutdated(ctx, pod, &cluster) + Expect(err).ToNot(HaveOccurred()) + Expect(rollout.required).To(BeTrue()) + Expect(rollout.reason).To(Equal( + "original and target PodSpec differ in containers: container postgres differs in environment")) + }) +}) diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index 2a753bfb52..8da21a71f0 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -313,7 +313,7 @@ func generateFakeClusterPods( var pods []corev1.Pod for idx < cluster.Spec.Instances { idx++ - pod, _ := specs.PodWithExistingStorage(*cluster, idx) + pod, _ := specs.NewInstance(context.TODO(), *cluster, idx, true) cluster.SetInheritedDataAndOwnership(&pod.ObjectMeta) err := c.Create(context.Background(), pod) diff --git a/internal/webhook/v1/cluster_webhook.go b/internal/webhook/v1/cluster_webhook.go index 6b46f63d59..23dc3a6faf 100644 --- a/internal/webhook/v1/cluster_webhook.go +++ b/internal/webhook/v1/cluster_webhook.go @@ -2392,7 +2392,12 @@ func (v *ClusterCustomValidator) validatePodPatchAnnotation(r *apiv1.Cluster) fi } } - if _, err := specs.PodWithExistingStorage(*r, 1); err != nil { + if _, err := specs.NewInstance( + context.Background(), + *r, + 1, + true, + ); err != nil { return field.ErrorList{ field.Invalid( field.NewPath("metadata", "annotations", utils.PodPatchAnnotationName), diff --git a/pkg/specs/pg_pods_test.go b/pkg/specs/pg_pods_test.go index 4fc732690c..4cf2022de3 100644 --- a/pkg/specs/pg_pods_test.go +++ b/pkg/specs/pg_pods_test.go @@ -17,6 +17,8 @@ limitations under the License. package specs import ( + "context" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" @@ -33,7 +35,7 @@ var _ = Describe("Extract the used image name", func() { Namespace: "default", }, } - pod, err := PodWithExistingStorage(cluster, 1) + pod, err := NewInstance(context.TODO(), cluster, 1, true) Expect(err).ToNot(HaveOccurred()) It("extract the default image name", func() { diff --git a/pkg/specs/pods.go b/pkg/specs/pods.go index ada3575d56..e9c308cb11 100644 --- a/pkg/specs/pods.go +++ b/pkg/specs/pods.go @@ -19,6 +19,7 @@ limitations under the License. package specs import ( + "context" "encoding/json" "fmt" "math" @@ -27,12 +28,15 @@ import ( "slices" "strconv" + "github.com/cloudnative-pg/machinery/pkg/log" jsonpatch "github.com/evanphx/json-patch/v5" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin" + cnpgiClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client" "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/url" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" @@ -169,8 +173,8 @@ func CreatePodEnvConfig(cluster apiv1.Cluster, podName string) EnvConfig { return config } -// CreateClusterPodSpec computes the PodSpec corresponding to a cluster -func CreateClusterPodSpec( +// createClusterPodSpec computes the PodSpec corresponding to a cluster +func createClusterPodSpec( podName string, cluster apiv1.Cluster, envConfig EnvConfig, @@ -443,15 +447,62 @@ func CreatePodSecurityContext(seccompProfile *corev1.SeccompProfile, user, group } } -// PodWithExistingStorage create a new instance with an existing storage -func PodWithExistingStorage(cluster apiv1.Cluster, nodeSerial int) (*corev1.Pod, error) { +// NewInstance creates a new instance Pod with the plugin patches applied +func NewInstance( + ctx context.Context, + cluster apiv1.Cluster, + nodeSerial int, + // tlsEnabled TODO: remove when we drop the support for the instances created without TLS + tlsEnabled bool, +) (*corev1.Pod, error) { + contextLogger := log.FromContext(ctx).WithName("new_instance") + + pod, err := buildInstance(cluster, nodeSerial, tlsEnabled) + if err != nil { + return nil, err + } + + defer func() { + if pod == nil { + return + } + if podSpecMarshaled, marshalErr := json.Marshal(pod.Spec); marshalErr == nil { + pod.Annotations[utils.PodSpecAnnotationName] = string(podSpecMarshaled) + } + }() + + pluginClient, ok := ctx.Value(utils.PluginClientKey).(cnpgiClient.Client) + if !ok || pluginClient == nil { + contextLogger.Trace("skipping NewInstance, cannot find the plugin client inside the context") + return pod, nil + } + + contextLogger.Trace("correctly loaded the plugin client for instance evaluation") + + podClientObject, err := pluginClient.LifecycleHook(ctx, plugin.OperationVerbEvaluate, &cluster, pod) + if err != nil { + return nil, fmt.Errorf("while invoking the lifecycle instance evaluation hook: %w", err) + } + + pod, ok = podClientObject.(*corev1.Pod) + if !ok { + return nil, fmt.Errorf("while casting the clientObject to the pod type") + } + + return pod, nil +} + +func buildInstance( + cluster apiv1.Cluster, + nodeSerial int, + tlsEnabled bool, +) (*corev1.Pod, error) { podName := GetInstanceName(cluster.Name, nodeSerial) gracePeriod := int64(cluster.GetMaxStopDelay()) envConfig := CreatePodEnvConfig(cluster, podName) - tlsEnabled := true - podSpec := CreateClusterPodSpec(podName, cluster, envConfig, gracePeriod, tlsEnabled) + podSpec := createClusterPodSpec(podName, cluster, envConfig, gracePeriod, tlsEnabled) pod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -470,10 +521,6 @@ func PodWithExistingStorage(cluster apiv1.Cluster, nodeSerial int) (*corev1.Pod, Spec: podSpec, } - if podSpecMarshaled, err := json.Marshal(podSpec); err == nil { - pod.Annotations[utils.PodSpecAnnotationName] = string(podSpecMarshaled) - } - if cluster.Spec.PriorityClassName != "" { pod.Spec.PriorityClassName = cluster.Spec.PriorityClassName } diff --git a/pkg/specs/pods_test.go b/pkg/specs/pods_test.go index de99a9a026..fd2e7e1bbd 100644 --- a/pkg/specs/pods_test.go +++ b/pkg/specs/pods_test.go @@ -928,8 +928,8 @@ var _ = Describe("Compute startup probe failure threshold", func() { }) }) -var _ = Describe("PodWithExistingStorage", func() { - It("applies JSON patch from annotation", func() { +var _ = Describe("NewInstance", func() { + It("applies JSON patch from annotation", func(ctx SpecContext) { cluster := v1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "test-cluster", @@ -940,13 +940,13 @@ var _ = Describe("PodWithExistingStorage", func() { }, } - pod, err := PodWithExistingStorage(cluster, 1) + pod, err := NewInstance(ctx, cluster, 1, true) Expect(err).NotTo(HaveOccurred()) Expect(pod).NotTo(BeNil()) Expect(pod.Spec.Containers[0].Image).To(Equal("new-image:latest")) }) - It("returns error if JSON patch is invalid", func() { + It("returns error if JSON patch is invalid", func(ctx SpecContext) { cluster := v1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "test-cluster", @@ -957,7 +957,7 @@ var _ = Describe("PodWithExistingStorage", func() { }, } - _, err := PodWithExistingStorage(cluster, 1) + _, err := NewInstance(ctx, cluster, 1, true) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("while decoding JSON patch from annotation")) }) diff --git a/pkg/specs/podspec_diff.go b/pkg/specs/podspec_diff.go index ecd02fbfe0..15ab552799 100644 --- a/pkg/specs/podspec_diff.go +++ b/pkg/specs/podspec_diff.go @@ -26,8 +26,6 @@ import ( // ComparePodSpecs compares two pod specs, returns true iff they are equivalent, and // if they are not, points out the first discrepancy. -// This function matches CreateClusterPodSpec, specifically it looks in more detail -// and ignores reordering of volume mounts and containers func ComparePodSpecs( currentPodSpec, targetPodSpec corev1.PodSpec, ) (bool, string) { @@ -39,7 +37,21 @@ func ComparePodSpecs( return compareContainers(currentPodSpec.Containers, targetPodSpec.Containers) }, "init-containers": func() (bool, string) { - return compareContainers(currentPodSpec.InitContainers, targetPodSpec.InitContainers) + extractContainersForComparison := func(passedContainers []corev1.Container) []corev1.Container { + var containers []corev1.Container + for _, container := range passedContainers { + if container.Name == BootstrapControllerContainerName { + // ignore the bootstrap controller init container. We handle it inside checkPodSpecIsOutdated. + continue + } + containers = append(containers, container) + } + return containers + } + return compareContainers( + extractContainersForComparison(currentPodSpec.InitContainers), + extractContainersForComparison(targetPodSpec.InitContainers), + ) }, } diff --git a/pkg/utils/labels_annotations.go b/pkg/utils/labels_annotations.go index 028ab14c3d..e8970cd561 100644 --- a/pkg/utils/labels_annotations.go +++ b/pkg/utils/labels_annotations.go @@ -17,7 +17,9 @@ limitations under the License. package utils import ( + "fmt" "reflect" + "strconv" "strings" corev1 "k8s.io/api/core/v1" @@ -500,3 +502,18 @@ func MergeObjectsMetadata(receiver client.Object, giver client.Object) { receiver.SetLabels(mergeMap(receiver.GetLabels(), giver.GetLabels())) receiver.SetAnnotations(mergeMap(receiver.GetAnnotations(), giver.GetAnnotations())) } + +// GetClusterSerialValue returns the `nodeSerial` value from the given annotation map or return an error +func GetClusterSerialValue(annotations map[string]string) (int, error) { + rawSerial, ok := annotations[ClusterSerialAnnotationName] + if !ok { + return 0, fmt.Errorf("no serial annotation found") + } + + serial, err := strconv.Atoi(rawSerial) + if err != nil { + return 0, fmt.Errorf("invalid serial annotation found: %w", err) + } + + return serial, nil +} From 8e1861e4a60e567ae570c5c85296e15d228ccc55 Mon Sep 17 00:00:00 2001 From: Francesco Canovai Date: Fri, 21 Mar 2025 15:09:53 +0100 Subject: [PATCH 466/836] test: wait for tablespace owner change (#7142) Fix a failure in the e2e tests when the check for the tablespace being reconciled was done on a cluster that had not updated its status. Closes #7125 Signed-off-by: Francesco Canovai --- tests/e2e/tablespaces_test.go | 92 +++++++++++++++-------------------- 1 file changed, 40 insertions(+), 52 deletions(-) diff --git a/tests/e2e/tablespaces_test.go b/tests/e2e/tablespaces_test.go index f3b21772c2..5a7929221b 100644 --- a/tests/e2e/tablespaces_test.go +++ b/tests/e2e/tablespaces_test.go @@ -89,6 +89,34 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, Expect(err).ToNot(HaveOccurred()) } + // Verify that the tablespace exists on the primary pod of a cluster + hasTablespaceAndOwner := func(cluster *apiv1.Cluster, tablespace, owner string) (bool, error) { + namespace := cluster.ObjectMeta.Namespace + clusterName := cluster.ObjectMeta.Name + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + if err != nil { + return false, err + } + result, stdErr, err := exec.QueryInInstancePod( + env.Ctx, env.Client, env.Interface, env.RestClientConfig, + exec.PodLocator{ + Namespace: namespace, + PodName: primaryPod.Name, + }, postgres.AppDBName, + fmt.Sprintf("SELECT 1 FROM pg_catalog.pg_tablespace WHERE spcname = '%s' "+ + "AND pg_catalog.pg_get_userbyid(spcowner) = '%s'", + tablespace, + owner), + ) + if err != nil { + return false, err + } + if stdErr != "" { + return false, fmt.Errorf("error while checking tablespaces: %s", stdErr) + } + return result == "1\n", nil + } + Context("on a new cluster with tablespaces", Ordered, func() { var namespace, backupName string var err error @@ -131,19 +159,20 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, AssertDatabaseContainsTablespaces(cluster, testTimeouts[timeouts.Short]) AssertRoleReconciled(namespace, clusterName, "dante", testTimeouts[timeouts.Short]) AssertRoleReconciled(namespace, clusterName, "alpha", testTimeouts[timeouts.Short]) - AssertTablespaceAndOwnerExist(cluster, "atablespace", "app") - AssertTablespaceAndOwnerExist(cluster, "anothertablespace", "dante") + Expect(hasTablespaceAndOwner(cluster, "atablespace", "app")).To(BeTrue()) + Expect(hasTablespaceAndOwner(cluster, "anothertablespace", "dante")).To(BeTrue()) }) - It("can update the cluster by change the owner of tablesapce", func() { + It("can update the cluster by change the owner of tablespace", func() { cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) updateTablespaceOwner(cluster, "anothertablespace", "alpha") cluster, err = clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) - AssertTablespaceReconciled(namespace, clusterName, "anothertablespace", testTimeouts[timeouts.Short]) - AssertTablespaceAndOwnerExist(cluster, "anothertablespace", "alpha") + Eventually(func() (bool, error) { + return hasTablespaceAndOwner(cluster, "anothertablespace", "alpha") + }).WithTimeout(30 * time.Second).Should(BeTrue()) }) It("can update the cluster to set a tablespace as temporary", func() { @@ -228,9 +257,9 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, AssertClusterHasMountPointsAndVolumesForTablespaces(cluster, 3, testTimeouts[timeouts.PodRollout]) AssertClusterHasPvcsAndDataDirsForTablespaces(cluster, testTimeouts[timeouts.PodRollout]) AssertDatabaseContainsTablespaces(cluster, testTimeouts[timeouts.PodRollout]) - AssertTablespaceAndOwnerExist(cluster, "atablespace", "app") - AssertTablespaceAndOwnerExist(cluster, "anothertablespace", "alpha") - AssertTablespaceAndOwnerExist(cluster, "thirdtablespace", "dante") + Expect(hasTablespaceAndOwner(cluster, "atablespace", "app")).To(BeTrue()) + Expect(hasTablespaceAndOwner(cluster, "anothertablespace", "alpha")).To(BeTrue()) + Expect(hasTablespaceAndOwner(cluster, "thirdtablespace", "dante")).To(BeTrue()) }) By("waiting for the cluster to be ready", func() { @@ -328,9 +357,9 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, testTimeouts[timeouts.Short]) AssertClusterHasPvcsAndDataDirsForTablespaces(restoredCluster, testTimeouts[timeouts.Short]) AssertDatabaseContainsTablespaces(restoredCluster, testTimeouts[timeouts.Short]) - AssertTablespaceAndOwnerExist(cluster, "atablespace", "app") - AssertTablespaceAndOwnerExist(cluster, "anothertablespace", "alpha") - AssertTablespaceAndOwnerExist(cluster, "thirdtablespace", "dante") + Expect(hasTablespaceAndOwner(cluster, "atablespace", "app")).To(BeTrue()) + Expect(hasTablespaceAndOwner(cluster, "anothertablespace", "alpha")).To(BeTrue()) + Expect(hasTablespaceAndOwner(cluster, "thirdtablespace", "dante")).To(BeTrue()) }) }) }) @@ -859,25 +888,6 @@ func updateTablespaceOwner(cluster *apiv1.Cluster, tablespaceName, newOwner stri Expect(err).ToNot(HaveOccurred()) } -func AssertTablespaceReconciled( - namespace, clusterName, - tablespaceName string, - timeout int, -) { - By(fmt.Sprintf("checking if tablespace %v is in reconciled status", tablespaceName), func() { - Eventually(func(g Gomega) bool { - cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) - g.Expect(err).ToNot(HaveOccurred()) - for _, state := range cluster.Status.TablespacesStatus { - if state.State == apiv1.TablespaceStatusReconciled && state.Name == tablespaceName { - return true - } - } - return false - }, timeout).Should(BeTrue()) - }) -} - func AssertRoleReconciled( namespace, clusterName, roleName string, @@ -1121,28 +1131,6 @@ func AssertTempTablespaceBehavior(cluster *apiv1.Cluster, expectedTempTablespace }) } -func AssertTablespaceAndOwnerExist(cluster *apiv1.Cluster, tablespace, owner string) { - namespace := cluster.ObjectMeta.Namespace - clusterName := cluster.ObjectMeta.Name - primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) - Expect(err).ShouldNot(HaveOccurred()) - result, stdErr, err := exec.QueryInInstancePod( - env.Ctx, env.Client, env.Interface, env.RestClientConfig, - exec.PodLocator{ - Namespace: namespace, - PodName: primaryPod.Name, - }, postgres.AppDBName, - fmt.Sprintf("SELECT 1 FROM pg_catalog.pg_tablespace WHERE spcname = '%s' "+ - "AND pg_catalog.pg_get_userbyid(spcowner) = '%s'", - tablespace, - owner), - ) - Expect(stdErr).To(BeEmpty()) - Expect(err).ShouldNot(HaveOccurred()) - Expect(result).To(Equal("1\n")) - GinkgoWriter.Printf("Found Tablespaces %s with owner %s", tablespace, owner) -} - func assertCanHibernateClusterWithTablespaces( namespace string, clusterName string, From 2dc763a17401067f00a0453d408352528a6aaac6 Mon Sep 17 00:00:00 2001 From: Francesco Canovai Date: Fri, 21 Mar 2025 15:17:36 +0100 Subject: [PATCH 467/836] test(e2e): wait for webhook to work after disruption (#7184) Disruption tests could end with the webhook not yet able to reply. Wait until we know it can validate requests. Closes #7182 Signed-off-by: Francesco Canovai --- tests/e2e/operator_unavailable_test.go | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/tests/e2e/operator_unavailable_test.go b/tests/e2e/operator_unavailable_test.go index a573de9a67..96125d7b23 100644 --- a/tests/e2e/operator_unavailable_test.go +++ b/tests/e2e/operator_unavailable_test.go @@ -18,11 +18,15 @@ package e2e import ( "sync" + "time" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" @@ -224,6 +228,20 @@ var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, te }, timeout).Should(BeTrue()) }) AssertDataExpectedCount(env, tableLocator, 2) + + // There is a chance that the webhook is not able to reach the new operator pod yet. + // This could make following tests fail, so we need to wait for the webhook to be working again. + By("verifying the webhook is working again", func() { + invalidCluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: "invalid"}, + Spec: apiv1.ClusterSpec{Instances: 1}, + } + Eventually(func(g Gomega) { + err := env.Client.Create(env.Ctx, invalidCluster) + g.Expect(errors.IsInvalid(err)).To(BeTrue()) + g.Expect(err).To(MatchError(ContainSubstring("spec.storage.size"))) + }).WithTimeout(10 * time.Second).Should(Succeed()) + }) }) }) }) From 3b3917e20242753970b7e65b2f206bc93fd99fcd Mon Sep 17 00:00:00 2001 From: Jeff Mealo Date: Mon, 24 Mar 2025 06:42:54 -0400 Subject: [PATCH 468/836] fix(backup,VolumeSnapshots): treat timeout errors as retryable (#7010) When creating volume snapshots, the CSI controller will automatically retry snapshot creation for certain errors (including timeouts). However, CNPG was marking backups as `failed` immediately when encountering timeouts. Fixes #7000 Signed-off-by: Jeff Mealo Signed-off-by: Jeffrey Mealo Signed-off-by: Armando Ruocco Signed-off-by: Leonardo Cecchi Signed-off-by: Gabriele Bartolini Signed-off-by: Marco Nenciarini Co-authored-by: Armando Ruocco Co-authored-by: Leonardo Cecchi Co-authored-by: Gabriele Bartolini Co-authored-by: Marco Nenciarini --- api/v1/backup_funcs.go | 19 +++ api/v1/scheduledbackup_funcs.go | 9 ++ api/v1/scheduledbackup_funcs_test.go | 17 ++- docs/src/backup_volumesnapshot.md | 90 +++++++++++- docs/src/labels_annotations.md | 5 + internal/controller/backup_controller.go | 9 -- internal/webhook/v1/backup_webhook.go | 12 ++ internal/webhook/v1/backup_webhook_test.go | 37 +++++ .../backup/volumesnapshot/errors.go | 37 ++++- .../backup/volumesnapshot/errors_test.go | 64 ++++++++- .../backup/volumesnapshot/reconciler.go | 134 ++++++++++++++--- .../backup/volumesnapshot/reconciler_test.go | 135 ++++++++++++++++++ .../backup/volumesnapshot/resources.go | 11 +- pkg/utils/labels_annotations.go | 4 + 14 files changed, 541 insertions(+), 42 deletions(-) diff --git a/api/v1/backup_funcs.go b/api/v1/backup_funcs.go index e7ec0411ba..e6264fd385 100644 --- a/api/v1/backup_funcs.go +++ b/api/v1/backup_funcs.go @@ -19,7 +19,9 @@ package v1 import ( "context" "sort" + "strconv" "strings" + "time" volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" corev1 "k8s.io/api/core/v1" @@ -95,6 +97,23 @@ func (backupStatus *BackupStatus) GetOnline() bool { return *backupStatus.Online } +// GetVolumeSnapshotDeadline returns the volume snapshot deadline in minutes. +func (backup *Backup) GetVolumeSnapshotDeadline() time.Duration { + const defaultValue = 10 + + value := backup.Annotations[utils.BackupVolumeSnapshotDeadlineAnnotationName] + if value == "" { + return defaultValue * time.Minute + } + + minutes, err := strconv.Atoi(value) + if err != nil { + return defaultValue * time.Minute + } + + return time.Duration(minutes) * time.Minute +} + // IsCompletedVolumeSnapshot checks if a backup is completed using the volume snapshot method. // It returns true if the backup's method is BackupMethodVolumeSnapshot and its status phase is BackupPhaseCompleted. // Otherwise, it returns false. diff --git a/api/v1/scheduledbackup_funcs.go b/api/v1/scheduledbackup_funcs.go index 6acf6c88c9..770683cec8 100644 --- a/api/v1/scheduledbackup_funcs.go +++ b/api/v1/scheduledbackup_funcs.go @@ -78,5 +78,14 @@ func (scheduledBackup *ScheduledBackup) CreateBackup(name string) *Backup { }, } utils.InheritAnnotations(&backup.ObjectMeta, scheduledBackup.Annotations, nil, configuration.Current) + + if backup.Annotations == nil { + backup.Annotations = make(map[string]string) + } + + if v := scheduledBackup.Annotations[utils.BackupVolumeSnapshotDeadlineAnnotationName]; v != "" { + backup.Annotations[utils.BackupVolumeSnapshotDeadlineAnnotationName] = v + } + return &backup } diff --git a/api/v1/scheduledbackup_funcs_test.go b/api/v1/scheduledbackup_funcs_test.go index 150192d8d4..ca957482dc 100644 --- a/api/v1/scheduledbackup_funcs_test.go +++ b/api/v1/scheduledbackup_funcs_test.go @@ -17,7 +17,10 @@ limitations under the License. package v1 import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -28,7 +31,11 @@ var _ = Describe("Scheduled backup", func() { backupName := "test" BeforeEach(func() { - scheduledBackup = &ScheduledBackup{} + scheduledBackup = &ScheduledBackup{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: make(map[string]string), + }, + } }) It("properly creates a backup with no annotations", func() { @@ -38,6 +45,14 @@ var _ = Describe("Scheduled backup", func() { Expect(backup.Annotations).To(BeEmpty()) }) + It("should always inherit volumeSnapshotDeadline while creating a backup", func() { + scheduledBackup.Annotations[utils.BackupVolumeSnapshotDeadlineAnnotationName] = "20" + backup := scheduledBackup.CreateBackup("test") + Expect(backup).ToNot(BeNil()) + Expect(backup.ObjectMeta.Name).To(BeEquivalentTo(backupName)) + Expect(backup.Annotations[utils.BackupVolumeSnapshotDeadlineAnnotationName]).To(BeEquivalentTo("20")) + }) + It("properly creates a backup with annotations", func() { annotations := make(map[string]string, 1) annotations["test"] = "annotations" diff --git a/docs/src/backup_volumesnapshot.md b/docs/src/backup_volumesnapshot.md index a927b01fa8..964081b96c 100644 --- a/docs/src/backup_volumesnapshot.md +++ b/docs/src/backup_volumesnapshot.md @@ -242,7 +242,95 @@ referenced in the `.spec.backup.volumeSnapshot.className` option. Please refer to the [Kubernetes documentation on Volume Snapshot Classes](https://kubernetes.io/docs/concepts/storage/volume-snapshot-classes/) for details on this standard behavior. -## Example +## Backup Volume Snapshot Deadlines + +CloudNativePG supports backups using the volume snapshot method. In some +environments, volume snapshots may encounter temporary issues that can be +retried. + +The `backup.cnpg.io/volumeSnapshotDeadline` annotation defines how long +CloudNativePG should continue retrying recoverable errors before marking the +backup as failed. + +You can add the `backup.cnpg.io/volumeSnapshotDeadline` annotation to both +`Backup` and `ScheduledBackup` resources. For `ScheduledBackup` resources, this +annotation is automatically inherited by any `Backup` resources created from +the schedule. + +If not specified, the default retry deadline is **10 minutes**. + +### Error Handling + +When a retryable error occurs during a volume snapshot operation: + +1. CloudNativePG records the time of the first error. +2. The system retries the operation every **10 seconds**. +3. If the error persists beyond the specified deadline (or the default 10 + minutes), the backup is marked as **failed**. + +### Retryable Errors + +CloudNativePG treats the following types of errors as retryable: + +- **Server timeout errors** (HTTP 408, 429, 500, 502, 503, 504) +- **Conflicts** (optimistic locking errors) +- **Internal errors** +- **Context deadline exceeded errors** +- **Timeout errors from the CSI snapshot controller** + +### Examples + +You can add the annotation to a `ScheduledBackup` resource as follows: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: daily-backup-schedule + annotations: + backup.cnpg.io/volumeSnapshotDeadline: "20" +spec: + schedule: "0 0 * * *" + backupOwnerReference: self + method: volumeSnapshot + # other configuration... +``` + +When you define a `ScheduledBackup` with the annotation, any `Backup` resources +created from this schedule automatically inherit the specified timeout value. + +In the following example, all backups created from the schedule will have a +30-minute timeout for retrying recoverable snapshot errors. + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: weekly-backup + annotations: + backup.cnpg.io/volumeSnapshotDeadline: "30" +spec: + schedule: "0 0 * * 0" # Weekly backup on Sunday + method: volumeSnapshot + cluster: + name: my-postgresql-cluster +``` + +Alternatively, you can add the annotation directly to a `Backup` Resource: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Backup +metadata: + name: my-backup + annotations: + backup.cnpg.io/volumeSnapshotDeadline: "15" +spec: + method: volumeSnapshot + # other backup configuration... +``` + +## Example of Volume Snapshot Backup The following example shows how to configure volume snapshot base backups on an EKS cluster on AWS using the `ebs-sc` storage class and the `csi-aws-vsc` diff --git a/docs/src/labels_annotations.md b/docs/src/labels_annotations.md index 677ee5636e..b4d3a0a6bf 100644 --- a/docs/src/labels_annotations.md +++ b/docs/src/labels_annotations.md @@ -227,6 +227,11 @@ CloudNativePG manages the following predefined annotations: `cnpg.io/snapshotEndTime` : The time a snapshot was marked as ready to use. +`cnpg.io/volumeSnapshotDeadline` +: Applied to `Backup` and `ScheduledBackup` resources, allows you to control + how long the operator should retry recoverable errors before considering the + volume snapshot backup failed. In minutes, defaulting to 10. + `kubectl.kubernetes.io/restartedAt` : When available, the time of last requested restart of a Postgres cluster. diff --git a/internal/controller/backup_controller.go b/internal/controller/backup_controller.go index 5d588b62af..a9582701f0 100644 --- a/internal/controller/backup_controller.go +++ b/internal/controller/backup_controller.go @@ -456,10 +456,6 @@ func (r *BackupReconciler) reconcileSnapshotBackup( Build() res, err := reconciler.Reconcile(ctx, cluster, backup, targetPod, pvcs) - if isErrorRetryable(err) { - contextLogger.Error(err, "detected retryable error while executing snapshot backup, retrying...") - return &ctrl.Result{RequeueAfter: 5 * time.Second}, nil - } if err != nil { // Volume Snapshot errors are not retryable, we need to set this backup as failed // and un-fence the Pod @@ -569,11 +565,6 @@ func updateClusterWithSnapshotsBackupTimes( return nil } -// isErrorRetryable detects is an error is retryable or not -func isErrorRetryable(err error) bool { - return apierrs.IsServerTimeout(err) || apierrs.IsConflict(err) || apierrs.IsInternalError(err) -} - // getBackupTargetPod returns the pod that should run the backup according to the current // cluster's target policy func (r *BackupReconciler) getBackupTargetPod(ctx context.Context, diff --git a/internal/webhook/v1/backup_webhook.go b/internal/webhook/v1/backup_webhook.go index 87ed87d1f6..681e40dc22 100644 --- a/internal/webhook/v1/backup_webhook.go +++ b/internal/webhook/v1/backup_webhook.go @@ -19,6 +19,7 @@ package v1 import ( "context" "fmt" + "strconv" "github.com/cloudnative-pg/machinery/pkg/log" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -168,5 +169,16 @@ func (v *BackupCustomValidator) validate(r *apiv1.Backup) field.ErrorList { )) } + if value := r.Annotations[utils.BackupVolumeSnapshotDeadlineAnnotationName]; value != "" { + _, err := strconv.Atoi(value) + if err != nil { + result = append(result, field.Invalid( + field.NewPath("metadata", "annotations", utils.BackupVolumeSnapshotDeadlineAnnotationName), + value, + "must be an integer", + )) + } + } + return result } diff --git a/internal/webhook/v1/backup_webhook_test.go b/internal/webhook/v1/backup_webhook_test.go index 2ac2fbf883..60c73f9eec 100644 --- a/internal/webhook/v1/backup_webhook_test.go +++ b/internal/webhook/v1/backup_webhook_test.go @@ -17,6 +17,7 @@ limitations under the License. package v1 import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" @@ -78,4 +79,40 @@ var _ = Describe("Backup webhook validate", func() { Expect(result).To(HaveLen(1)) Expect(result[0].Field).To(Equal("spec.onlineConfiguration")) }) + + It("returns error if BackupVolumeSnapshotDeadlineAnnotationName is not an integer", func() { + backup := &apiv1.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.BackupVolumeSnapshotDeadlineAnnotationName: "not-an-integer", + }, + }, + } + result := v.validate(backup) + Expect(result).To(HaveLen(1)) + Expect(result[0].Field).To(Equal("metadata.annotations." + utils.BackupVolumeSnapshotDeadlineAnnotationName)) + Expect(result[0].Error()).To(ContainSubstring("must be an integer")) + }) + + It("does not return error if BackupVolumeSnapshotDeadlineAnnotationName is an integer", func() { + backup := &apiv1.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.BackupVolumeSnapshotDeadlineAnnotationName: "123", + }, + }, + } + result := v.validate(backup) + Expect(result).To(BeEmpty()) + }) + + It("does not return error if BackupVolumeSnapshotDeadlineAnnotationName is not set", func() { + backup := &apiv1.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{}, + }, + } + result := v.validate(backup) + Expect(result).To(BeEmpty()) + }) }) diff --git a/pkg/reconciler/backup/volumesnapshot/errors.go b/pkg/reconciler/backup/volumesnapshot/errors.go index e5d8b1159d..e5c9d4c4e9 100644 --- a/pkg/reconciler/backup/volumesnapshot/errors.go +++ b/pkg/reconciler/backup/volumesnapshot/errors.go @@ -17,9 +17,13 @@ limitations under the License. package volumesnapshot import ( + "context" + "errors" "regexp" "strconv" "strings" + + apierrs "k8s.io/apimachinery/pkg/api/errors" ) var ( @@ -27,15 +31,32 @@ var ( httpStatusCodeRegex = regexp.MustCompile(`HTTPStatusCode:\s(\d{3})`) ) -// isRetriableErrorMessage detects if a certain error message belongs -// to a retriable error or not. This is obviously an heuristic but -// unfortunately we don't have that information exposed in the -// Kubernetes VolumeSnapshot API and the CSI driver haven't that too. -func isRetriableErrorMessage(msg string) bool { +// isErrorRetryable detects is an error is retryable or not. +// +// Important: this function is intended for detecting errors that +// occur during communication between the operator and the Kubernetes +// API server, as well as between the operator and the instance +// manager. +// It is not designed to check errors raised by the CSI driver and +// exposed by the CSI snapshotter sidecar. +func isNetworkErrorRetryable(err error) bool { + return apierrs.IsServerTimeout(err) || apierrs.IsConflict(err) || apierrs.IsInternalError(err) || + errors.Is(err, context.DeadlineExceeded) +} + +// isCSIErrorMessageRetriable detects if a certain error message +// raised by the CSI driver corresponds to a retriable error or +// not. +// +// It relies on heuristics, as this information is not available in +// the Kubernetes VolumeSnapshot API, and the CSI driver does not +// expose it either. +func isCSIErrorMessageRetriable(msg string) bool { isRetryableFuncs := []func(string) bool{ isExplicitlyRetriableError, isRetryableHTTPError, isConflictError, + isContextDeadlineExceededError, } for _, isRetryableFunc := range isRetryableFuncs { @@ -47,6 +68,12 @@ func isRetriableErrorMessage(msg string) bool { return false } +// isContextDeadlineExceededError detects context deadline exceeded errors +// These are timeouts that may be retried by the Kubernetes CSI controller +func isContextDeadlineExceededError(msg string) bool { + return strings.Contains(msg, "deadline exceeded") || strings.Contains(msg, "timed out") +} + // isConflictError detects optimistic locking errors func isConflictError(msg string) bool { // Obviously this is a heuristic, but unfortunately we don't have diff --git a/pkg/reconciler/backup/volumesnapshot/errors_test.go b/pkg/reconciler/backup/volumesnapshot/errors_test.go index 1652fd062f..81536f9371 100644 --- a/pkg/reconciler/backup/volumesnapshot/errors_test.go +++ b/pkg/reconciler/backup/volumesnapshot/errors_test.go @@ -17,6 +17,13 @@ limitations under the License. package volumesnapshot import ( + "context" + "errors" + "fmt" + + apierrs "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) @@ -25,7 +32,7 @@ var _ = Describe("Retriable error messages", func() { DescribeTable( "Retriable error messages", func(msg string, isRetriable bool) { - Expect(isRetriableErrorMessage(msg)).To(Equal(isRetriable)) + Expect(isCSIErrorMessageRetriable(msg)).To(Equal(isRetriable)) }, Entry("conflict", "Hey, the object has been modified!", true), Entry("non-retriable error", "VolumeSnapshotClass not found", false), @@ -33,5 +40,60 @@ var _ = Describe("Retriable error messages", func() { Entry("explicitly non-retriable error", "Retriable: false because my pod is working", false), Entry("error code 502 - retriable", "RetryAfter: 0s, HTTPStatusCode: 502, RawError: Internal Server Error", true), Entry("error code 404 - non retriable", "RetryAfter: 0s, HTTPStatusCode: 404, RawError: Not found", false), + Entry("context deadline exceeded - retriable", "context deadline exceeded waiting for snapshot creation", true), + Entry("deadline exceeded - retriable", "deadline exceeded during Azure snapshot creation", true), + Entry("timed out - retriable", "operation timed out for csi-disk-handler", true), ) + + Describe("isContextDeadlineExceededError", func() { + It("detects 'context deadline exceeded' error messages", func() { + Expect(isContextDeadlineExceededError("context deadline exceeded")).To(BeTrue()) + }) + + It("detects 'deadline exceeded' error messages", func() { + Expect(isContextDeadlineExceededError("deadline exceeded")).To(BeTrue()) + }) + + It("detects 'timed out' error messages", func() { + Expect(isContextDeadlineExceededError("operation timed out")).To(BeTrue()) + }) + + It("rejects non-timeout error messages", func() { + Expect(isContextDeadlineExceededError("not found")).To(BeFalse()) + Expect(isContextDeadlineExceededError("permission denied")).To(BeFalse()) + Expect(isContextDeadlineExceededError("invalid input")).To(BeFalse()) + }) + }) +}) + +var _ = Describe("isNetworkErrorRetryable", func() { + It("recognizes server timeout errors", func() { + err := apierrs.NewServerTimeout(schema.GroupResource{}, "test", 1) + Expect(isNetworkErrorRetryable(err)).To(BeTrue()) + }) + + It("recognizes conflict errors", func() { + err := apierrs.NewConflict(schema.GroupResource{}, "test", nil) + Expect(isNetworkErrorRetryable(err)).To(BeTrue()) + }) + + It("recognizes internal errors", func() { + err := apierrs.NewInternalError(fmt.Errorf("test error")) + Expect(isNetworkErrorRetryable(err)).To(BeTrue()) + }) + + It("recognizes context deadline exceeded errors", func() { + err := context.DeadlineExceeded + Expect(isNetworkErrorRetryable(err)).To(BeTrue()) + }) + + It("does not retry on not found errors", func() { + err := apierrs.NewNotFound(schema.GroupResource{}, "test") + Expect(isNetworkErrorRetryable(err)).To(BeFalse()) + }) + + It("does not retry on random errors", func() { + err := errors.New("random error") + Expect(isNetworkErrorRetryable(err)).To(BeFalse()) + }) }) diff --git a/pkg/reconciler/backup/volumesnapshot/reconciler.go b/pkg/reconciler/backup/volumesnapshot/reconciler.go index 730fa8f241..2ff568edff 100644 --- a/pkg/reconciler/backup/volumesnapshot/reconciler.go +++ b/pkg/reconciler/backup/volumesnapshot/reconciler.go @@ -155,6 +155,24 @@ func (se *Reconciler) Reconcile( backup *apiv1.Backup, targetPod *corev1.Pod, pvcs []corev1.PersistentVolumeClaim, +) (*ctrl.Result, error) { + contextLogger := log.FromContext(ctx).WithName("volumesnapshot_reconciler") + + res, err := se.internalReconcile(ctx, cluster, backup, targetPod, pvcs) + if isNetworkErrorRetryable(err) { + contextLogger.Error(err, "detected retryable error while executing snapshot backup, retrying...") + return &ctrl.Result{RequeueAfter: 5 * time.Second}, nil + } + + return res, err +} + +func (se *Reconciler) internalReconcile( + ctx context.Context, + cluster *apiv1.Cluster, + backup *apiv1.Backup, + targetPod *corev1.Pod, + pvcs []corev1.PersistentVolumeClaim, ) (*ctrl.Result, error) { if cluster.Spec.Backup == nil || cluster.Spec.Backup.VolumeSnapshot == nil { return nil, fmt.Errorf("cannot execute a VolumeSnapshot on a cluster without configuration") @@ -190,7 +208,7 @@ func (se *Reconciler) Reconcile( } // Step 3: wait for snapshots to be provisioned - if res, err := se.waitSnapshotToBeProvisionedStep(ctx, volumeSnapshots); res != nil || err != nil { + if res, err := se.waitSnapshotToBeProvisionedStep(ctx, backup, volumeSnapshots); res != nil || err != nil { return res, err } @@ -207,7 +225,7 @@ func (se *Reconciler) Reconcile( } // Step 5: wait for snapshots to be ready to use - if res, err := se.waitSnapshotToBeReadyStep(ctx, volumeSnapshots); res != nil || err != nil { + if res, err := se.waitSnapshotToBeReadyStep(ctx, backup, volumeSnapshots); res != nil || err != nil { return res, err } @@ -385,10 +403,11 @@ func (se *Reconciler) createSnapshotPVCGroupStep( // waitSnapshotToBeProvisionedStep waits for every PVC snapshot to be claimed func (se *Reconciler) waitSnapshotToBeProvisionedStep( ctx context.Context, + backup *apiv1.Backup, snapshots []storagesnapshotv1.VolumeSnapshot, ) (*ctrl.Result, error) { for i := range snapshots { - if res, err := se.waitSnapshotToBeProvisionedAndAnnotate(ctx, &snapshots[i]); res != nil || err != nil { + if res, err := se.waitSnapshotToBeProvisionedAndAnnotate(ctx, backup, &snapshots[i]); res != nil || err != nil { return res, err } } @@ -399,10 +418,11 @@ func (se *Reconciler) waitSnapshotToBeProvisionedStep( // waitSnapshotToBeReadyStep waits for every PVC snapshot to be ready to use func (se *Reconciler) waitSnapshotToBeReadyStep( ctx context.Context, + backup *apiv1.Backup, snapshots []storagesnapshotv1.VolumeSnapshot, ) (*ctrl.Result, error) { for i := range snapshots { - if res, err := se.waitSnapshotToBeReady(ctx, &snapshots[i]); res != nil || err != nil { + if res, err := se.waitSnapshotToBeReady(ctx, backup, &snapshots[i]); res != nil || err != nil { return res, err } } @@ -501,20 +521,14 @@ func transferLabelsToAnnotations(labels map[string]string, annotations map[strin // SnapshotStartTimeAnnotationName and SnapshotEndTimeAnnotationName. func (se *Reconciler) waitSnapshotToBeProvisionedAndAnnotate( ctx context.Context, + backup *apiv1.Backup, snapshot *storagesnapshotv1.VolumeSnapshot, ) (*ctrl.Result, error) { contextLogger := log.FromContext(ctx) info := parseVolumeSnapshotInfo(snapshot) if info.error != nil { - if info.error.isRetryable() { - contextLogger.Error(info.error, - "Retryable snapshot provisioning error, trying again", - "volumeSnapshotName", snapshot.Name) - return &ctrl.Result{RequeueAfter: 10 * time.Second}, nil - } - - return nil, info.error + return se.handleSnapshotErrors(ctx, backup, info.error) } if !info.provisioned { contextLogger.Info( @@ -544,20 +558,14 @@ func (se *Reconciler) waitSnapshotToBeProvisionedAndAnnotate( // SnapshotStartTimeAnnotationName and SnapshotEndTimeAnnotationName. func (se *Reconciler) waitSnapshotToBeReady( ctx context.Context, + backup *apiv1.Backup, snapshot *storagesnapshotv1.VolumeSnapshot, ) (*ctrl.Result, error) { contextLogger := log.FromContext(ctx) info := parseVolumeSnapshotInfo(snapshot) if info.error != nil { - if info.error.isRetryable() { - contextLogger.Error(info.error, - "Retryable snapshot provisioning error, trying again", - "volumeSnapshotName", snapshot.Name) - return &ctrl.Result{RequeueAfter: 10 * time.Second}, nil - } - - return nil, info.error + return se.handleSnapshotErrors(ctx, backup, info.error) } if !info.ready { contextLogger.Info( @@ -570,3 +578,89 @@ func (se *Reconciler) waitSnapshotToBeReady( return nil, nil } + +func (se *Reconciler) handleSnapshotErrors( + ctx context.Context, + backup *apiv1.Backup, + snapshotErr *volumeSnapshotError, +) (*ctrl.Result, error) { + contextLogger := log.FromContext(ctx). + WithName("handle_snapshot_errors") + + if !snapshotErr.isRetryable() { + return nil, snapshotErr + } + + if err := addDeadlineStatus(ctx, se.cli, backup); err != nil { + return nil, fmt.Errorf("while adding deadline status: %w", err) + } + + exceeded, err := isDeadlineExceeded(backup) + if err != nil { + return nil, fmt.Errorf("while checking if deadline was exceeded: %w", err) + } + if exceeded { + return nil, fmt.Errorf("deadline exceeded for error %w", snapshotErr) + } + + contextLogger.Error(snapshotErr, + "Retryable snapshot provisioning error, trying again", + ) + return &ctrl.Result{RequeueAfter: 10 * time.Second}, nil +} + +func isDeadlineExceeded(backup *apiv1.Backup) (bool, error) { + if backup.Status.PluginMetadata[pluginName] == "" { + return false, fmt.Errorf("no plugin metadata found in backup status") + } + + data, err := unmarshalMetadata(backup.Status.PluginMetadata[pluginName]) + if err != nil { + return false, fmt.Errorf("while unmarshalling plugin metadata: %w", err) + } + + // if the deadline have passed since firstFailureTime we need to consider the deadline exceeded + deadline := int64(backup.GetVolumeSnapshotDeadline().Seconds()) + return time.Now().Unix()-data.VolumeSnapshotFirstDetectedFailure > deadline, nil +} + +type metadata struct { + // VolumeSnapshotFirstDetectedFailure is UNIX the timestamp when the first volume snapshot failure was detected + VolumeSnapshotFirstDetectedFailure int64 `json:"volumeSnapshotFirstFailure,omitempty"` +} + +func unmarshalMetadata(rawData string) (*metadata, error) { + var data metadata + if err := json.Unmarshal([]byte(rawData), &data); err != nil { + return nil, fmt.Errorf("while unmarshalling metadata: %w", err) + } + + if data.VolumeSnapshotFirstDetectedFailure == 0 { + return nil, fmt.Errorf("no volumeSnapshotFirstFailure found in plugin metadata: %s", pluginName) + } + + return &data, nil +} + +func addDeadlineStatus(ctx context.Context, cli client.Client, backup *apiv1.Backup) error { + if value, ok := backup.Status.PluginMetadata[pluginName]; ok { + if _, err := unmarshalMetadata(value); err == nil { + return nil + } + } + + data := &metadata{VolumeSnapshotFirstDetectedFailure: time.Now().Unix()} + rawData, err := json.Marshal(data) + if err != nil { + return err + } + + if backup.Status.PluginMetadata == nil { + backup.Status.PluginMetadata = map[string]string{} + } + + origBackup := backup.DeepCopy() + backup.Status.PluginMetadata[pluginName] = string(rawData) + + return cli.Status().Patch(ctx, backup, client.MergeFrom(origBackup)) +} diff --git a/pkg/reconciler/backup/volumesnapshot/reconciler_test.go b/pkg/reconciler/backup/volumesnapshot/reconciler_test.go index 8ce7adc393..e3950ea970 100644 --- a/pkg/reconciler/backup/volumesnapshot/reconciler_test.go +++ b/pkg/reconciler/backup/volumesnapshot/reconciler_test.go @@ -18,6 +18,7 @@ package volumesnapshot import ( "context" + "encoding/json" "fmt" "time" @@ -451,3 +452,137 @@ var _ = Describe("annotateSnapshotsWithBackupData", func() { } }) }) + +var _ = Describe("addDeadlineStatus", func() { + var ( + ctx context.Context + backup *apiv1.Backup + cli k8client.Client + ) + + BeforeEach(func() { + ctx = context.TODO() + backup = &apiv1.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-namespace", + Name: "test-backup", + }, + Status: apiv1.BackupStatus{ + PluginMetadata: make(map[string]string), + }, + } + cli = fake.NewClientBuilder().WithScheme(scheme.BuildWithAllKnownScheme()). + WithObjects(backup). + WithStatusSubresource(&apiv1.Backup{}). + Build() + }) + + It("should add deadline status if not present", func() { + err := addDeadlineStatus(ctx, cli, backup) + Expect(err).ToNot(HaveOccurred()) + + var updatedBackup apiv1.Backup + err = cli.Get(ctx, types.NamespacedName{Name: backup.Name, Namespace: backup.Namespace}, &updatedBackup) + Expect(err).ToNot(HaveOccurred()) + Expect(updatedBackup.Status.PluginMetadata).To(HaveKey(pluginName)) + Expect(updatedBackup.Status.PluginMetadata[pluginName]).ToNot(BeEmpty()) + Expect(updatedBackup.Status.PluginMetadata[pluginName]).To(MatchRegexp(`{"volumeSnapshotFirstFailure":\d+}`)) + }) + + It("should not modify deadline status if already present", func() { + backup.Status.PluginMetadata[pluginName] = `{"volumeSnapshotFirstFailure": 1234567890}` + err := cli.Status().Update(ctx, backup) + Expect(err).ToNot(HaveOccurred()) + + err = addDeadlineStatus(ctx, cli, backup) + Expect(err).ToNot(HaveOccurred()) + + var updatedBackup apiv1.Backup + err = cli.Get(ctx, types.NamespacedName{Name: backup.Name, Namespace: backup.Namespace}, &updatedBackup) + Expect(err).ToNot(HaveOccurred()) + Expect(updatedBackup.Status.PluginMetadata[pluginName]).To(Equal(`{"volumeSnapshotFirstFailure": 1234567890}`)) + }) +}) + +var _ = Describe("isDeadlineExceeded", func() { + var backup *apiv1.Backup + + BeforeEach(func() { + backup = &apiv1.Backup{ + Status: apiv1.BackupStatus{ + PluginMetadata: make(map[string]string), + }, + } + }) + + It("should return an error if plugin metadata is empty", func() { + _, err := isDeadlineExceeded(backup) + Expect(err).To(HaveOccurred()) + }) + + It("should return error if unmarshalling fails", func() { + backup.Status.PluginMetadata[pluginName] = "invalid-json" + exceeded, err := isDeadlineExceeded(backup) + Expect(err).To(HaveOccurred()) + Expect(exceeded).To(BeFalse()) + }) + + It("should return error if no volumeSnapshotFirstFailure found in plugin metadata", func() { + backup.Status.PluginMetadata[pluginName] = `{}` + exceeded, err := isDeadlineExceeded(backup) + Expect(err).To(HaveOccurred()) + Expect(exceeded).To(BeFalse()) + }) + + It("should return false if deadline has not exceeded", func() { + data := metadata{VolumeSnapshotFirstDetectedFailure: time.Now().Unix()} + rawData, _ := json.Marshal(data) + backup.Status.PluginMetadata[pluginName] = string(rawData) + backup.Annotations = map[string]string{utils.BackupVolumeSnapshotDeadlineAnnotationName: "10"} + + exceeded, err := isDeadlineExceeded(backup) + Expect(err).ToNot(HaveOccurred()) + Expect(exceeded).To(BeFalse()) + }) + + It("should return true if deadline has exceeded", func() { + data := metadata{VolumeSnapshotFirstDetectedFailure: time.Now().Add(-20 * time.Minute).Unix()} + rawData, _ := json.Marshal(data) + backup.Status.PluginMetadata[pluginName] = string(rawData) + backup.Annotations = map[string]string{utils.BackupVolumeSnapshotDeadlineAnnotationName: "10"} + + exceeded, err := isDeadlineExceeded(backup) + Expect(err).ToNot(HaveOccurred()) + Expect(exceeded).To(BeTrue()) + }) +}) + +var _ = Describe("unmarshalMetadata", func() { + It("should unmarshal valid metadata correctly", func() { + rawData := `{"volumeSnapshotFirstFailure": 1234567890}` + data, err := unmarshalMetadata(rawData) + Expect(err).ToNot(HaveOccurred()) + Expect(data.VolumeSnapshotFirstDetectedFailure).To(Equal(int64(1234567890))) + }) + + It("should return an error if rawData is invalid JSON", func() { + rawData := `invalid-json` + data, err := unmarshalMetadata(rawData) + Expect(err).To(HaveOccurred()) + Expect(data).To(BeNil()) + }) + + It("should return an error if volumeSnapshotFirstFailure is missing", func() { + rawData := `{}` + data, err := unmarshalMetadata(rawData) + Expect(err).To(HaveOccurred()) + Expect(data).To(BeNil()) + }) + + It("should return an error if volumeSnapshotFirstFailure is zero", func() { + rawData := `{"volumeSnapshotFirstFailure": 0}` + data, err := unmarshalMetadata(rawData) + Expect(err).To(HaveOccurred()) + Expect(data).To(BeNil()) + }) +}) diff --git a/pkg/reconciler/backup/volumesnapshot/resources.go b/pkg/reconciler/backup/volumesnapshot/resources.go index eaee26cf81..71794502f9 100644 --- a/pkg/reconciler/backup/volumesnapshot/resources.go +++ b/pkg/reconciler/backup/volumesnapshot/resources.go @@ -26,6 +26,8 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) +const pluginName = "cnpg_volumesnapshot" + // volumeSnapshotInfo host information about a volume snapshot type volumeSnapshotInfo struct { // error contains the raised error when the volume snapshot terminated @@ -68,16 +70,15 @@ func (err volumeSnapshotError) Error() string { // IsRetryable returns true if the external snapshotter controller // will retry taking the snapshot func (err volumeSnapshotError) isRetryable() bool { - // TODO: instead of blindingly retry on matching errors, we - // should enhance our CRD with a configurable deadline. After - // the deadline have been met on err.InternalError.CreatedAt - // the backup can be marked as failed + // The Kubernetes CSI driver/controller will automatically retry snapshot creation + // for certain errors, including timeouts. We use pattern matching to identify + // these retryable errors and handle them appropriately. if err.InternalError.Message == nil { return false } - return isRetriableErrorMessage(*err.InternalError.Message) + return isCSIErrorMessageRetriable(*err.InternalError.Message) } // slice represents a slice of []storagesnapshotv1.VolumeSnapshot diff --git a/pkg/utils/labels_annotations.go b/pkg/utils/labels_annotations.go index e8970cd561..32d634962a 100644 --- a/pkg/utils/labels_annotations.go +++ b/pkg/utils/labels_annotations.go @@ -209,6 +209,10 @@ const ( // BackupTablespaceMapFileAnnotationName is the name of the annotation where the `tablespace_map` file is kept BackupTablespaceMapFileAnnotationName = MetadataNamespace + "/backupTablespaceMapFile" + // BackupVolumeSnapshotDeadlineAnnotationName is the annotation for the snapshot backup failure deadline in minutes. + // It is only applied to snapshot retryable errors + BackupVolumeSnapshotDeadlineAnnotationName = MetadataNamespace + "/volumeSnapshotDeadline" + // SnapshotStartTimeAnnotationName is the name of the annotation where a snapshot's start time is kept SnapshotStartTimeAnnotationName = MetadataNamespace + "/snapshotStartTime" From 2945f8114263452f405fafb5d2ceea544c98301c Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Mon, 24 Mar 2025 11:46:30 +0100 Subject: [PATCH 469/836] feat(barman-cloud): support lz4, xz, and zstd compression (#7151) Closes #7147 Signed-off-by: Marco Nenciarini Signed-off-by: Armando Ruocco Co-authored-by: Armando Ruocco --- .wordlist-en-custom.txt | 3 ++ .../bases/postgresql.cnpg.io_clusters.yaml | 30 ++++++++--- docs/src/backup_barmanobjectstore.md | 3 ++ go.mod | 2 +- go.sum | 4 +- pkg/management/postgres/backup_test.go | 52 +++++++++---------- 6 files changed, 57 insertions(+), 37 deletions(-) diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index eca5fe59fa..eb470824da 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -958,6 +958,7 @@ logLevel lookups lsn lt +lz macOS malcolm mallocs @@ -1417,5 +1418,7 @@ www xact xlog xml +xz yaml yml +zstd diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml index 7fcdabd306..4e27dd2999 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml @@ -1152,11 +1152,14 @@ spec: description: |- Compress a backup file (a tar file per tablespace) while streaming it to the object store. Available options are empty string (no - compression, default), `gzip`, `bzip2` or `snappy`. + compression, default), `gzip`, `bzip2`, `lz4`, `snappy`, `xz`, and `zstd`. enum: - - gzip - bzip2 + - gzip + - lz4 - snappy + - xz + - zstd type: string encryption: description: |- @@ -1343,11 +1346,15 @@ spec: compression: description: |- Compress a WAL file before sending it to the object store. Available - options are empty string (no compression, default), `gzip`, `bzip2` or `snappy`. + options are empty string (no compression, default), `gzip`, `bzip2`, + `lz4`, `snappy`, `xz`, and `zstd`. enum: - - gzip - bzip2 + - gzip + - lz4 - snappy + - xz + - zstd type: string encryption: description: |- @@ -2654,11 +2661,14 @@ spec: description: |- Compress a backup file (a tar file per tablespace) while streaming it to the object store. Available options are empty string (no - compression, default), `gzip`, `bzip2` or `snappy`. + compression, default), `gzip`, `bzip2`, `lz4`, `snappy`, `xz`, and `zstd`. enum: - - gzip - bzip2 + - gzip + - lz4 - snappy + - xz + - zstd type: string encryption: description: |- @@ -2845,11 +2855,15 @@ spec: compression: description: |- Compress a WAL file before sending it to the object store. Available - options are empty string (no compression, default), `gzip`, `bzip2` or `snappy`. + options are empty string (no compression, default), `gzip`, `bzip2`, + `lz4`, `snappy`, `xz`, and `zstd`. enum: - - gzip - bzip2 + - gzip + - lz4 - snappy + - xz + - zstd type: string encryption: description: |- diff --git a/docs/src/backup_barmanobjectstore.md b/docs/src/backup_barmanobjectstore.md index 5859966af3..ba2ed40750 100644 --- a/docs/src/backup_barmanobjectstore.md +++ b/docs/src/backup_barmanobjectstore.md @@ -93,7 +93,10 @@ algorithms via `barman-cloud-backup` (for backups) and * bzip2 * gzip +* lz4 * snappy +* xz +* zstd The compression settings for backups and WALs are independent. See the [DataBackupConfiguration](https://pkg.go.dev/github.com/cloudnative-pg/barman-cloud/pkg/api#DataBackupConfiguration) and diff --git a/go.mod b/go.mod index 5b7c8218b0..df8e4f363d 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/avast/retry-go/v4 v4.6.1 github.com/blang/semver v3.5.1+incompatible github.com/cheynewallace/tabby v1.1.1 - github.com/cloudnative-pg/barman-cloud v0.1.0 + github.com/cloudnative-pg/barman-cloud v0.3.0 github.com/cloudnative-pg/cnpg-i v0.1.1-0.20250321093050-de4ab51537cb github.com/cloudnative-pg/machinery v0.1.0 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc diff --git a/go.sum b/go.sum index c332c3c35a..d841fae2ac 100644 --- a/go.sum +++ b/go.sum @@ -18,8 +18,8 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cheynewallace/tabby v1.1.1 h1:JvUR8waht4Y0S3JF17G6Vhyt+FRhnqVCkk8l4YrOU54= github.com/cheynewallace/tabby v1.1.1/go.mod h1:Pba/6cUL8uYqvOc9RkyvFbHGrQ9wShyrn6/S/1OYVys= -github.com/cloudnative-pg/barman-cloud v0.1.0 h1:e/z52CehMBIh1LjZqNBJnncWJbS+1JYvRMBR8Js6Uiw= -github.com/cloudnative-pg/barman-cloud v0.1.0/go.mod h1:rJUJO/f1yNckLZiVxHAyRmKY+4EPJkYRJsGbTZRJQSY= +github.com/cloudnative-pg/barman-cloud v0.3.0 h1:tCtIF7nsHDH5X7nAXXd7VqNKKNGHrycXAyyKKKpdGS4= +github.com/cloudnative-pg/barman-cloud v0.3.0/go.mod h1:8m6W117343zT28ctcskUYEu/dy+MX3hUUW4DynH8MLI= github.com/cloudnative-pg/cnpg-i v0.1.1-0.20250321093050-de4ab51537cb h1:FPORwCxjZwlnKnF7dOkuOAz0GBSQ3Hrn+8lm4uMiWeM= github.com/cloudnative-pg/cnpg-i v0.1.1-0.20250321093050-de4ab51537cb/go.mod h1:n+kbHm3rzRCY5IJKuE1tGMbG6JaeYz8yycYoLt7BeKo= github.com/cloudnative-pg/machinery v0.1.0 h1:tjRmsqQmsO/OlaT0uFmkEtVqgr+SGPM88cKZOHYKLBo= diff --git a/pkg/management/postgres/backup_test.go b/pkg/management/postgres/backup_test.go index 8ff4796c72..bfd037e4a1 100644 --- a/pkg/management/postgres/backup_test.go +++ b/pkg/management/postgres/backup_test.go @@ -150,39 +150,39 @@ var _ = Describe("testing backup command", func() { var _ = Describe("generate backup options", func() { const namespace = "test" - capabilities := barmanCapabilities.Capabilities{ - Version: nil, - HasAzure: true, - HasS3: true, - HasGoogle: true, - HasRetentionPolicy: true, - HasTags: true, - HasCheckWalArchive: true, - HasSnappy: true, - HasErrorCodesForWALRestore: true, - HasAzureManagedIdentity: true, - } - cluster := &apiv1.Cluster{ - ObjectMeta: metav1.ObjectMeta{Name: "test-cluster", Namespace: namespace}, - Spec: apiv1.ClusterSpec{ - Backup: &apiv1.BackupConfiguration{ - BarmanObjectStore: &apiv1.BarmanObjectStoreConfiguration{ - Data: &apiv1.DataBackupConfiguration{ - Compression: "gzip", - Encryption: "aes256", - ImmediateCheckpoint: true, - Jobs: ptr.To(int32(2)), + + var ( + capabilities *barmanCapabilities.Capabilities + cluster *apiv1.Cluster + ) + + BeforeEach(func() { + var err error + capabilities, err = barmanCapabilities.CurrentCapabilities() + Expect(err).ShouldNot(HaveOccurred()) + + cluster = &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test-cluster", Namespace: namespace}, + Spec: apiv1.ClusterSpec{ + Backup: &apiv1.BackupConfiguration{ + BarmanObjectStore: &apiv1.BarmanObjectStoreConfiguration{ + Data: &apiv1.DataBackupConfiguration{ + Compression: "gzip", + Encryption: "aes256", + ImmediateCheckpoint: true, + Jobs: ptr.To(int32(2)), + }, }, }, }, - }, - } + } + }) It("should generate correct options", func() { extraOptions := []string{"--min-chunk-size=5MB", "--read-timeout=60", "-vv"} cluster.Spec.Backup.BarmanObjectStore.Data.AdditionalCommandArgs = extraOptions - cmd := barmanBackup.NewBackupCommand(cluster.Spec.Backup.BarmanObjectStore, &capabilities) + cmd := barmanBackup.NewBackupCommand(cluster.Spec.Backup.BarmanObjectStore, capabilities) options, err := cmd.GetDataConfiguration([]string{}) Expect(err).ToNot(HaveOccurred()) @@ -203,7 +203,7 @@ var _ = Describe("generate backup options", func() { "--encryption=aes256", } cluster.Spec.Backup.BarmanObjectStore.Data.AdditionalCommandArgs = extraOptions - cmd := barmanBackup.NewBackupCommand(cluster.Spec.Backup.BarmanObjectStore, &capabilities) + cmd := barmanBackup.NewBackupCommand(cluster.Spec.Backup.BarmanObjectStore, capabilities) options, err := cmd.GetDataConfiguration([]string{}) Expect(err).ToNot(HaveOccurred()) From e698b97f8d9464a110905cc70781872a346cd7f4 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Mon, 24 Mar 2025 18:22:38 +0100 Subject: [PATCH 470/836] ci(openshift): retry oc secret link calls (#7191) Signed-off-by: Marco Nenciarini --- hack/e2e/run-e2e-ocp.sh | 36 +++++++++++++++++++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/hack/e2e/run-e2e-ocp.sh b/hack/e2e/run-e2e-ocp.sh index bddb940447..130e4c80b0 100755 --- a/hack/e2e/run-e2e-ocp.sh +++ b/hack/e2e/run-e2e-ocp.sh @@ -36,6 +36,40 @@ function wait_for() { [[ $ITER -lt $5 ]] } +# Retry a command up to a specific numer of times until it exits successfully, +# with exponential back off. +# +# $ retry 5 echo Hello +# Hello +# +# $ retry 5 false +# Retry 1/5 exited 1, retrying in 1 seconds... +# Retry 2/5 exited 1, retrying in 2 seconds... +# Retry 3/5 exited 1, retrying in 4 seconds... +# Retry 4/5 exited 1, retrying in 8 seconds... +# Retry 5/5 exited 1, no more retries left. +# +# Inspired from https://gist.github.com/sj26/88e1c6584397bb7c13bd11108a579746 +function retry { + local retries=$1 + shift + + local count=0 + until "$@"; do + local exit=$? + local wait=$((2 ** count)) + count=$((count + 1)) + if [ $count -lt "$retries" ]; then + echo "Retry $count/$retries exited $exit, retrying in $wait seconds..." >&2 + sleep $wait + else + echo "Retry $count/$retries exited $exit, no more retries left." >&2 + return $exit + fi + done + return 0 +} + ROOT_DIR=$(realpath "$(dirname "$0")/../../") # we need to export ENVs defined in the workflow and used in run-e2e.sh script export POSTGRES_IMG=${POSTGRES_IMG:-$(grep 'DefaultImageName.*=' "${ROOT_DIR}/pkg/versions/versions.go" | cut -f 2 -d \")} @@ -71,7 +105,7 @@ EOF # requires a secret. When the sa is available, define the secret. wait_for sa openshift-operators cnpg-manager 10 60 oc create secret docker-registry -n openshift-operators --docker-server="${REGISTRY}" --docker-username="${REGISTRY_USER}" --docker-password="${REGISTRY_PASSWORD}" cnpg-pull-secret || true -oc secrets link -n openshift-operators cnpg-manager cnpg-pull-secret --for=pull +retry 5 oc secrets link -n openshift-operators cnpg-manager cnpg-pull-secret --for=pull # We wait 30 seconds for the operator deployment to be created echo "Waiting 30s for the operator deployment to be ready" From 8adf3f5f16430dfce16da866864b1d639c7548a3 Mon Sep 17 00:00:00 2001 From: Francesco Canovai Date: Mon, 24 Mar 2025 18:55:39 +0100 Subject: [PATCH 471/836] test(e2e): fix lag control probe threshold (#7199) The test timeout and the total readiness time coincide. This means that the test can pass or fail for fractions of a second. Define a quicker readiness probe so we should be ready before we fail the test. Closes #7198 Signed-off-by: Francesco Canovai --- .../sync_replicas/startup-probe-lag-control.yaml.template | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/e2e/fixtures/sync_replicas/startup-probe-lag-control.yaml.template b/tests/e2e/fixtures/sync_replicas/startup-probe-lag-control.yaml.template index 5e6f5bfcb1..d25f256732 100644 --- a/tests/e2e/fixtures/sync_replicas/startup-probe-lag-control.yaml.template +++ b/tests/e2e/fixtures/sync_replicas/startup-probe-lag-control.yaml.template @@ -25,6 +25,9 @@ spec: maximumLag: 16Mi failureThreshold: 60 periodSeconds: 1 + readiness: + failureThreshold: 10 + periodSeconds: 1 storage: storageClass: ${E2E_DEFAULT_STORAGE_CLASS} From 3abc879680f10d93809d8f96584244ac8f45b4c7 Mon Sep 17 00:00:00 2001 From: Francesco Canovai Date: Tue, 25 Mar 2025 10:24:41 +0100 Subject: [PATCH 472/836] feat(metrics): check if extensions have updates (#7195) Updating the image can update the extensions available, but doesn't change the version installed in the database. We want to expose the info about an available update to the users. Closes #7194 Signed-off-by: Francesco Canovai --- config/manager/default-monitoring.yaml | 32 ++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/config/manager/default-monitoring.yaml b/config/manager/default-monitoring.yaml index 0078087ee2..ab4878bdb6 100644 --- a/config/manager/default-monitoring.yaml +++ b/config/manager/default-monitoring.yaml @@ -454,3 +454,35 @@ data: - setting: usage: "GAUGE" description: "Setting value" + + pg_extensions: + query: | + SELECT + current_database() as datname, + name as extname, + default_version, + installed_version, + CASE + WHEN default_version = installed_version THEN 0 + ELSE 1 + END AS update_available + FROM pg_catalog.pg_available_extensions + WHERE installed_version IS NOT NULL + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - extname: + usage: "LABEL" + description: "Extension name" + - default_version: + usage: "LABEL" + description: "Default version" + - installed_version: + usage: "LABEL" + description: "Installed version" + - update_available: + usage: "GAUGE" + description: "An update is available" + target_databases: + - '*' From d7f2b5e4c36784b311198e0bc27926d9e67d5f8b Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Tue, 25 Mar 2025 11:52:46 +0100 Subject: [PATCH 473/836] docs: Creative Commons Attribution 4.0 International License (#7206) Relates to #7202 Signed-off-by: Gabriele Bartolini --- docs/LICENSE | 396 ++++++++++++++++++ docs/README.md | 7 + docs/markdown/pkg.tpl | 1 + docs/src/appendixes/object_stores.md | 1 + docs/src/applications.md | 1 + docs/src/architecture.md | 1 + docs/src/backup.md | 1 + docs/src/backup_barmanobjectstore.md | 1 + docs/src/backup_recovery.md | 1 + docs/src/backup_volumesnapshot.md | 1 + docs/src/before_you_start.md | 1 + docs/src/benchmarking.md | 1 + docs/src/bootstrap.md | 1 + docs/src/certificates.md | 1 + docs/src/cloudnative-pg.v1.md | 2 + docs/src/cluster_conf.md | 1 + docs/src/connection_pooling.md | 1 + docs/src/container_images.md | 1 + docs/src/controller.md | 1 + docs/src/database_import.md | 1 + docs/src/declarative_database_management.md | 1 + docs/src/declarative_hibernation.md | 1 + docs/src/declarative_role_management.md | 1 + docs/src/e2e.md | 1 + docs/src/failover.md | 1 + docs/src/failure_modes.md | 1 + docs/src/faq.md | 1 + docs/src/fencing.md | 1 + docs/src/image_catalog.md | 1 + docs/src/index.md | 4 + docs/src/installation_upgrade.md | 1 + docs/src/instance_manager.md | 1 + docs/src/kubectl-plugin.md | 1 + docs/src/kubernetes_upgrade.md | 1 + docs/src/labels_annotations.md | 1 + docs/src/logging.md | 1 + docs/src/logical_replication.md | 1 + docs/src/monitoring.md | 1 + docs/src/networking.md | 1 + docs/src/operator_capability_levels.md | 1 + docs/src/operator_conf.md | 1 + docs/src/postgis.md | 1 + docs/src/postgresql_conf.md | 1 + docs/src/preview_version.md | 1 + docs/src/quickstart.md | 1 + docs/src/recovery.md | 1 + docs/src/release_notes.md | 1 + .../edb-cloud-native-postgresql.md | 1 + docs/src/release_notes/v1.24.md | 1 + docs/src/release_notes/v1.25.md | 1 + docs/src/release_notes/v1.26.md | 1 + docs/src/replica_cluster.md | 1 + docs/src/replication.md | 1 + docs/src/resource_management.md | 1 + docs/src/rolling_update.md | 1 + docs/src/samples.md | 1 + docs/src/scheduling.md | 1 + docs/src/security.md | 1 + docs/src/service_management.md | 1 + docs/src/ssl_connections.md | 1 + docs/src/storage.md | 1 + docs/src/supported_releases.md | 1 + docs/src/tablespaces.md | 1 + docs/src/troubleshooting.md | 1 + docs/src/use_cases.md | 1 + docs/src/wal_archiving.md | 1 + 66 files changed, 471 insertions(+) create mode 100644 docs/LICENSE diff --git a/docs/LICENSE b/docs/LICENSE new file mode 100644 index 0000000000..da6ab6cc8f --- /dev/null +++ b/docs/LICENSE @@ -0,0 +1,396 @@ +Attribution 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution 4.0 International Public License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution 4.0 International Public License ("Public License"). To the +extent this Public License may be interpreted as a contract, You are +granted the Licensed Rights in consideration of Your acceptance of +these terms and conditions, and the Licensor grants You such rights in +consideration of benefits the Licensor receives from making the +Licensed Material available under these terms and conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + d. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + e. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + f. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + g. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + h. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + i. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + j. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + k. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + 4. If You Share Adapted Material You produce, the Adapter's + License You apply must not prevent recipients of the Adapted + Material from complying with this Public License. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material; and + + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public +licenses. Notwithstanding, Creative Commons may elect to apply one of +its public licenses to material it publishes and in those instances +will be considered the “Licensor.” The text of the Creative Commons +public licenses is dedicated to the public domain under the CC0 Public +Domain Dedication. Except for the limited purpose of indicating that +material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the +public licenses. + +Creative Commons may be contacted at creativecommons.org. + diff --git a/docs/README.md b/docs/README.md index 692d45a2f3..cf74a9cd25 100644 --- a/docs/README.md +++ b/docs/README.md @@ -71,3 +71,10 @@ consider if they should be included in the curated [list of examples](src/sample And please help keeping the samples in the curated list, as well as any samples named `cluster-example-*` in runnable condition. These can be a big help for beginners. + +## License + +The CloudNativePG documentation and all the work under the `docs` folder is +licensed under a Creative Commons Attribution 4.0 International License. + + diff --git a/docs/markdown/pkg.tpl b/docs/markdown/pkg.tpl index 345d032ff9..542b5bd178 100644 --- a/docs/markdown/pkg.tpl +++ b/docs/markdown/pkg.tpl @@ -1,4 +1,5 @@ {{ define "packages" -}} + # API Reference diff --git a/docs/src/appendixes/object_stores.md b/docs/src/appendixes/object_stores.md index 2fc32452ca..59d175244b 100644 --- a/docs/src/appendixes/object_stores.md +++ b/docs/src/appendixes/object_stores.md @@ -1,3 +1,4 @@ + # Appendix A - Common object stores for backups You can store the [backup](../backup.md) files in any service that is supported diff --git a/docs/src/applications.md b/docs/src/applications.md index fea987587b..bc3db66533 100644 --- a/docs/src/applications.md +++ b/docs/src/applications.md @@ -1,3 +1,4 @@ + # Connecting from an application Applications are supposed to work with the services created by CloudNativePG diff --git a/docs/src/architecture.md b/docs/src/architecture.md index 509461e26d..577397a4fa 100644 --- a/docs/src/architecture.md +++ b/docs/src/architecture.md @@ -1,3 +1,4 @@ + # Architecture !!! Hint diff --git a/docs/src/backup.md b/docs/src/backup.md index cffda0e7fa..eddeb28b39 100644 --- a/docs/src/backup.md +++ b/docs/src/backup.md @@ -1,3 +1,4 @@ + # Backup PostgreSQL natively provides first class backup and recovery capabilities based diff --git a/docs/src/backup_barmanobjectstore.md b/docs/src/backup_barmanobjectstore.md index ba2ed40750..fb1f1f29e3 100644 --- a/docs/src/backup_barmanobjectstore.md +++ b/docs/src/backup_barmanobjectstore.md @@ -1,3 +1,4 @@ + # Backup on object stores CloudNativePG natively supports **online/hot backup** of PostgreSQL diff --git a/docs/src/backup_recovery.md b/docs/src/backup_recovery.md index 942ae45f11..ac1e0915ac 100644 --- a/docs/src/backup_recovery.md +++ b/docs/src/backup_recovery.md @@ -1,3 +1,4 @@ + # Backup and Recovery [Backup](backup.md) and [recovery](recovery.md) are in two separate sections. diff --git a/docs/src/backup_volumesnapshot.md b/docs/src/backup_volumesnapshot.md index 964081b96c..aedd03654b 100644 --- a/docs/src/backup_volumesnapshot.md +++ b/docs/src/backup_volumesnapshot.md @@ -1,3 +1,4 @@ + # Backup on volume snapshots !!! Warning diff --git a/docs/src/before_you_start.md b/docs/src/before_you_start.md index 7ebc61d732..36d2f9a7a3 100644 --- a/docs/src/before_you_start.md +++ b/docs/src/before_you_start.md @@ -1,3 +1,4 @@ + # Before You Start Before we get started, it is essential to go over some terminology that is diff --git a/docs/src/benchmarking.md b/docs/src/benchmarking.md index 2526049477..129c75cd19 100644 --- a/docs/src/benchmarking.md +++ b/docs/src/benchmarking.md @@ -1,3 +1,4 @@ + # Benchmarking The CNPG kubectl plugin provides an easy way for benchmarking a PostgreSQL deployment in Kubernetes using CloudNativePG. diff --git a/docs/src/bootstrap.md b/docs/src/bootstrap.md index 8315dafa23..860a471bda 100644 --- a/docs/src/bootstrap.md +++ b/docs/src/bootstrap.md @@ -1,3 +1,4 @@ + # Bootstrap This section describes the options available to create a new diff --git a/docs/src/certificates.md b/docs/src/certificates.md index 70108140b8..7ec3f45784 100644 --- a/docs/src/certificates.md +++ b/docs/src/certificates.md @@ -1,3 +1,4 @@ + # Certificates CloudNativePG was designed to natively support TLS certificates. diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md index 02dd9a27c1..416b7c7101 100644 --- a/docs/src/cloudnative-pg.v1.md +++ b/docs/src/cloudnative-pg.v1.md @@ -1,3 +1,5 @@ + + # API Reference

Package v1 contains API Schema definitions for the postgresql v1 API group

diff --git a/docs/src/cluster_conf.md b/docs/src/cluster_conf.md index 3634b11e59..9e34c4822e 100644 --- a/docs/src/cluster_conf.md +++ b/docs/src/cluster_conf.md @@ -1,3 +1,4 @@ + # Instance pod configuration ## Projected volumes diff --git a/docs/src/connection_pooling.md b/docs/src/connection_pooling.md index 4b66f0564d..db4863dade 100644 --- a/docs/src/connection_pooling.md +++ b/docs/src/connection_pooling.md @@ -1,3 +1,4 @@ + # Connection pooling CloudNativePG provides native support for connection pooling with diff --git a/docs/src/container_images.md b/docs/src/container_images.md index b51344553b..79e6b0ba76 100644 --- a/docs/src/container_images.md +++ b/docs/src/container_images.md @@ -1,3 +1,4 @@ + # Container Image Requirements The CloudNativePG operator for Kubernetes is designed to diff --git a/docs/src/controller.md b/docs/src/controller.md index cb000833cf..00f49595a0 100644 --- a/docs/src/controller.md +++ b/docs/src/controller.md @@ -1,3 +1,4 @@ + # Custom Pod Controller Kubernetes uses the diff --git a/docs/src/database_import.md b/docs/src/database_import.md index 2fc3b4500e..f6507f1cd1 100644 --- a/docs/src/database_import.md +++ b/docs/src/database_import.md @@ -1,3 +1,4 @@ + # Importing Postgres databases This section describes how to import one or more existing PostgreSQL diff --git a/docs/src/declarative_database_management.md b/docs/src/declarative_database_management.md index 98f5a6e4b5..b4f83e582c 100644 --- a/docs/src/declarative_database_management.md +++ b/docs/src/declarative_database_management.md @@ -1,3 +1,4 @@ + # PostgreSQL Database Management CloudNativePG simplifies PostgreSQL database provisioning by automatically diff --git a/docs/src/declarative_hibernation.md b/docs/src/declarative_hibernation.md index a5cbeae3ca..c9116bca5b 100644 --- a/docs/src/declarative_hibernation.md +++ b/docs/src/declarative_hibernation.md @@ -1,3 +1,4 @@ + # Declarative hibernation CloudNativePG is designed to keep PostgreSQL clusters up, running and available diff --git a/docs/src/declarative_role_management.md b/docs/src/declarative_role_management.md index 2c0c109cbc..652be3ff5e 100644 --- a/docs/src/declarative_role_management.md +++ b/docs/src/declarative_role_management.md @@ -1,3 +1,4 @@ + # PostgreSQL Role Management From its inception, CloudNativePG has managed the creation of specific roles diff --git a/docs/src/e2e.md b/docs/src/e2e.md index de06101da5..2122227ede 100644 --- a/docs/src/e2e.md +++ b/docs/src/e2e.md @@ -1,3 +1,4 @@ + # End-to-End Tests CloudNativePG is automatically tested after each diff --git a/docs/src/failover.md b/docs/src/failover.md index 89ecac7d3f..7a922890cf 100644 --- a/docs/src/failover.md +++ b/docs/src/failover.md @@ -1,3 +1,4 @@ + # Automated failover In the case of unexpected errors on the primary for longer than the diff --git a/docs/src/failure_modes.md b/docs/src/failure_modes.md index 38eb79a5c9..5b887be256 100644 --- a/docs/src/failure_modes.md +++ b/docs/src/failure_modes.md @@ -1,3 +1,4 @@ + # Failure Modes !!! Note diff --git a/docs/src/faq.md b/docs/src/faq.md index f8f92e2253..dd6d38c96a 100644 --- a/docs/src/faq.md +++ b/docs/src/faq.md @@ -1,3 +1,4 @@ + # Frequently Asked Questions (FAQ) ## Running PostgreSQL in Kubernetes diff --git a/docs/src/fencing.md b/docs/src/fencing.md index 70617cee7d..7195453cf7 100644 --- a/docs/src/fencing.md +++ b/docs/src/fencing.md @@ -1,3 +1,4 @@ + # Fencing Fencing in CloudNativePG is the ultimate process of protecting the diff --git a/docs/src/image_catalog.md b/docs/src/image_catalog.md index 1f42752e80..0753a46676 100644 --- a/docs/src/image_catalog.md +++ b/docs/src/image_catalog.md @@ -1,3 +1,4 @@ + # Image Catalog `ImageCatalog` and `ClusterImageCatalog` are essential resources that empower diff --git a/docs/src/index.md b/docs/src/index.md index acf3bc01fc..33df61a23e 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -1,3 +1,4 @@ + # CloudNativePG **CloudNativePG** is an open-source @@ -145,3 +146,6 @@ please consult the ["Before you start" section](before_you_start.md). *[Postgres, PostgreSQL and the Slonik Logo](https://www.postgresql.org/about/policies/trademarks/) are trademarks or registered trademarks of the PostgreSQL Community Association of Canada, and used with their permission.* + +The CloudNativePG documentation is licensed under a Creative Commons +Attribution 4.0 International License. diff --git a/docs/src/installation_upgrade.md b/docs/src/installation_upgrade.md index daf435e288..3525ca0560 100644 --- a/docs/src/installation_upgrade.md +++ b/docs/src/installation_upgrade.md @@ -1,3 +1,4 @@ + # Installation and upgrades ## Installation on Kubernetes diff --git a/docs/src/instance_manager.md b/docs/src/instance_manager.md index 69032b9923..b16d399389 100644 --- a/docs/src/instance_manager.md +++ b/docs/src/instance_manager.md @@ -1,3 +1,4 @@ + # Postgres instance manager CloudNativePG does not rely on an external tool for failover management. diff --git a/docs/src/kubectl-plugin.md b/docs/src/kubectl-plugin.md index 7663c1b358..3aab8c4ee3 100644 --- a/docs/src/kubectl-plugin.md +++ b/docs/src/kubectl-plugin.md @@ -1,3 +1,4 @@ + # Kubectl Plugin CloudNativePG provides a plugin for `kubectl` to manage a cluster in Kubernetes. diff --git a/docs/src/kubernetes_upgrade.md b/docs/src/kubernetes_upgrade.md index e14d5b271a..8ad3771f40 100644 --- a/docs/src/kubernetes_upgrade.md +++ b/docs/src/kubernetes_upgrade.md @@ -1,3 +1,4 @@ + # Kubernetes Upgrade and Maintenance Maintaining an up-to-date Kubernetes cluster is crucial for ensuring optimal diff --git a/docs/src/labels_annotations.md b/docs/src/labels_annotations.md index b4d3a0a6bf..69c48d6c2b 100644 --- a/docs/src/labels_annotations.md +++ b/docs/src/labels_annotations.md @@ -1,3 +1,4 @@ + # Labels and annotations Resources in Kubernetes are organized in a flat structure, with no hierarchical diff --git a/docs/src/logging.md b/docs/src/logging.md index 69433688b4..4535200897 100644 --- a/docs/src/logging.md +++ b/docs/src/logging.md @@ -1,3 +1,4 @@ + # Logging CloudNativePG outputs logs in JSON format directly to standard output, including diff --git a/docs/src/logical_replication.md b/docs/src/logical_replication.md index 345dfe0cae..bb89f04270 100644 --- a/docs/src/logical_replication.md +++ b/docs/src/logical_replication.md @@ -1,3 +1,4 @@ + # Logical Replication PostgreSQL extends its replication capabilities beyond physical replication, diff --git a/docs/src/monitoring.md b/docs/src/monitoring.md index 06efd3a610..eeb40511d6 100644 --- a/docs/src/monitoring.md +++ b/docs/src/monitoring.md @@ -1,3 +1,4 @@ + # Monitoring !!! Important diff --git a/docs/src/networking.md b/docs/src/networking.md index d64624d3c3..d46af9f267 100644 --- a/docs/src/networking.md +++ b/docs/src/networking.md @@ -1,3 +1,4 @@ + # Networking CloudNativePG assumes the underlying Kubernetes cluster has the required diff --git a/docs/src/operator_capability_levels.md b/docs/src/operator_capability_levels.md index 76a1249a80..3d1ca43f43 100644 --- a/docs/src/operator_capability_levels.md +++ b/docs/src/operator_capability_levels.md @@ -1,3 +1,4 @@ + # Operator capability levels These capabilities were implemented by CloudNativePG, diff --git a/docs/src/operator_conf.md b/docs/src/operator_conf.md index c5895b2e25..7da0b498cc 100644 --- a/docs/src/operator_conf.md +++ b/docs/src/operator_conf.md @@ -1,3 +1,4 @@ + # Operator configuration The operator for CloudNativePG is installed from a standard diff --git a/docs/src/postgis.md b/docs/src/postgis.md index 916f8bda1a..bcab823f6f 100644 --- a/docs/src/postgis.md +++ b/docs/src/postgis.md @@ -1,3 +1,4 @@ + # PostGIS [PostGIS](https://postgis.net/) is a very popular open source extension diff --git a/docs/src/postgresql_conf.md b/docs/src/postgresql_conf.md index a62dfd13a7..b4bd3eb388 100644 --- a/docs/src/postgresql_conf.md +++ b/docs/src/postgresql_conf.md @@ -1,3 +1,4 @@ + # PostgreSQL Configuration Users that are familiar with PostgreSQL are aware of the existence of the diff --git a/docs/src/preview_version.md b/docs/src/preview_version.md index d2e2702a9f..758427fe46 100644 --- a/docs/src/preview_version.md +++ b/docs/src/preview_version.md @@ -1,3 +1,4 @@ + # Preview Versions CloudNativePG candidate releases are pre-release versions made available for diff --git a/docs/src/quickstart.md b/docs/src/quickstart.md index a0b7b55e6c..d91fef3255 100644 --- a/docs/src/quickstart.md +++ b/docs/src/quickstart.md @@ -1,3 +1,4 @@ + # Quickstart This section guides you through testing a PostgreSQL cluster on your local machine by diff --git a/docs/src/recovery.md b/docs/src/recovery.md index e53db596ba..06715663e7 100644 --- a/docs/src/recovery.md +++ b/docs/src/recovery.md @@ -1,3 +1,4 @@ + # Recovery In PostgreSQL terminology, recovery is the process of starting a PostgreSQL diff --git a/docs/src/release_notes.md b/docs/src/release_notes.md index 3bb0794089..0df52fe60b 100644 --- a/docs/src/release_notes.md +++ b/docs/src/release_notes.md @@ -1,3 +1,4 @@ + # Release notes History of user-visible changes for CloudNativePG, classified for each minor release. diff --git a/docs/src/release_notes/edb-cloud-native-postgresql.md b/docs/src/release_notes/edb-cloud-native-postgresql.md index 06f5ef6d7e..e68113133c 100644 --- a/docs/src/release_notes/edb-cloud-native-postgresql.md +++ b/docs/src/release_notes/edb-cloud-native-postgresql.md @@ -1,3 +1,4 @@ + # Release notes for 1.14.0 and earlier The first public release of CloudNativePG is version 1.15.0. Before that, diff --git a/docs/src/release_notes/v1.24.md b/docs/src/release_notes/v1.24.md index b652171b17..315bfcc28d 100644 --- a/docs/src/release_notes/v1.24.md +++ b/docs/src/release_notes/v1.24.md @@ -1,3 +1,4 @@ + # Release notes for CloudNativePG 1.24 History of user-visible changes in the 1.24 minor release of CloudNativePG. diff --git a/docs/src/release_notes/v1.25.md b/docs/src/release_notes/v1.25.md index d8201f899b..c53c343f08 100644 --- a/docs/src/release_notes/v1.25.md +++ b/docs/src/release_notes/v1.25.md @@ -1,3 +1,4 @@ + # Release notes for CloudNativePG 1.25 History of user-visible changes in the 1.25 minor release of CloudNativePG. diff --git a/docs/src/release_notes/v1.26.md b/docs/src/release_notes/v1.26.md index 37949438dc..92409602a1 100644 --- a/docs/src/release_notes/v1.26.md +++ b/docs/src/release_notes/v1.26.md @@ -1,3 +1,4 @@ + # Release notes for CloudNativePG 1.26 History of user-visible changes in the 1.26 minor release of CloudNativePG. diff --git a/docs/src/replica_cluster.md b/docs/src/replica_cluster.md index 630fe60bd0..ba2b66dffc 100644 --- a/docs/src/replica_cluster.md +++ b/docs/src/replica_cluster.md @@ -1,3 +1,4 @@ + # Replica clusters A replica cluster is a CloudNativePG `Cluster` resource designed to diff --git a/docs/src/replication.md b/docs/src/replication.md index 3091333692..fc47f2784e 100644 --- a/docs/src/replication.md +++ b/docs/src/replication.md @@ -1,3 +1,4 @@ + # Replication Physical replication is one of the strengths of PostgreSQL and one of the diff --git a/docs/src/resource_management.md b/docs/src/resource_management.md index 483c2b9a3b..36e39b33b1 100644 --- a/docs/src/resource_management.md +++ b/docs/src/resource_management.md @@ -1,3 +1,4 @@ + # Resource management In a typical Kubernetes cluster, pods run with unlimited resources. By default, diff --git a/docs/src/rolling_update.md b/docs/src/rolling_update.md index 023cc78c11..e0c9c74764 100644 --- a/docs/src/rolling_update.md +++ b/docs/src/rolling_update.md @@ -1,3 +1,4 @@ + # Rolling Updates The operator allows changing the PostgreSQL version used in a cluster while diff --git a/docs/src/samples.md b/docs/src/samples.md index 823b4a0f6f..f4d5945cdc 100644 --- a/docs/src/samples.md +++ b/docs/src/samples.md @@ -1,3 +1,4 @@ + # Examples The examples show configuration files for setting up diff --git a/docs/src/scheduling.md b/docs/src/scheduling.md index 79ea6fcddd..225d3e0ff7 100644 --- a/docs/src/scheduling.md +++ b/docs/src/scheduling.md @@ -1,3 +1,4 @@ + # Scheduling Scheduling, in Kubernetes, is the process responsible for placing a new pod on diff --git a/docs/src/security.md b/docs/src/security.md index 47df5292f6..266324d9ad 100644 --- a/docs/src/security.md +++ b/docs/src/security.md @@ -1,3 +1,4 @@ + # Security This section contains information about security for CloudNativePG, diff --git a/docs/src/service_management.md b/docs/src/service_management.md index b02274e231..92c378ebc5 100644 --- a/docs/src/service_management.md +++ b/docs/src/service_management.md @@ -1,3 +1,4 @@ + # Service Management A PostgreSQL cluster should only be accessed via standard Kubernetes network diff --git a/docs/src/ssl_connections.md b/docs/src/ssl_connections.md index c7324443ea..b8e54a3150 100644 --- a/docs/src/ssl_connections.md +++ b/docs/src/ssl_connections.md @@ -1,3 +1,4 @@ + # Client TLS/SSL connections !!! Seealso "Certificates" diff --git a/docs/src/storage.md b/docs/src/storage.md index 5c56e6459c..ad99a2ca21 100644 --- a/docs/src/storage.md +++ b/docs/src/storage.md @@ -1,3 +1,4 @@ + # Storage Storage is the most critical component in a database workload. diff --git a/docs/src/supported_releases.md b/docs/src/supported_releases.md index f3d032cc29..53e2d09813 100644 --- a/docs/src/supported_releases.md +++ b/docs/src/supported_releases.md @@ -1,3 +1,4 @@ + # Supported releases diff --git a/docs/src/tablespaces.md b/docs/src/tablespaces.md index 0fcf775d4e..c7f47dc43d 100644 --- a/docs/src/tablespaces.md +++ b/docs/src/tablespaces.md @@ -1,3 +1,4 @@ + # Tablespaces A tablespace is a robust and widely embraced feature in database diff --git a/docs/src/troubleshooting.md b/docs/src/troubleshooting.md index 67cfe25ce8..f5e37bec96 100644 --- a/docs/src/troubleshooting.md +++ b/docs/src/troubleshooting.md @@ -1,3 +1,4 @@ + # Troubleshooting In this page, you can find some basic information on how to troubleshoot diff --git a/docs/src/use_cases.md b/docs/src/use_cases.md index 24b92584e7..d09f6383c6 100644 --- a/docs/src/use_cases.md +++ b/docs/src/use_cases.md @@ -1,3 +1,4 @@ + # Use cases CloudNativePG has been designed to work with applications diff --git a/docs/src/wal_archiving.md b/docs/src/wal_archiving.md index 5216f96a53..3522d73060 100644 --- a/docs/src/wal_archiving.md +++ b/docs/src/wal_archiving.md @@ -1,3 +1,4 @@ + # WAL archiving WAL archiving is the process that feeds a [WAL archive](backup.md#wal-archive) From 869cb402927b4621b5aed855b58557dfa0bd2a29 Mon Sep 17 00:00:00 2001 From: Pierrick <139142330+pchovelon@users.noreply.github.com> Date: Tue, 25 Mar 2025 12:07:18 +0100 Subject: [PATCH 474/836] docs : Fix script's name in development_environment README (#7207) `./hack/setup.sh` is still used in some places. This PR changes `./hack/setup.sh` to `./hack/setup-cluster.sh` Signed-off-by: Pierrick Chovelon --- contribute/development_environment/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/contribute/development_environment/README.md b/contribute/development_environment/README.md index 988ac98290..a5d613cf03 100644 --- a/contribute/development_environment/README.md +++ b/contribute/development_environment/README.md @@ -180,7 +180,7 @@ build and deploy: ```shell cd cloudnative-pg git checkout main -./hack/setup.sh create load deploy +./hack/setup-cluster.sh create load deploy ``` This will build the operator based on the `main` branch content, create a @@ -204,7 +204,7 @@ kubectl get deploy -n cnpg-system cnpg-controller-manager Now that your system has been validated, you can tear down the local cluster with: ```shell -./hack/setup.sh destroy +./hack/setup-cluster.sh destroy ``` Congratulations, you have a suitable development environment. You are now able From 337a264a0763dffe9477a474061286295448908d Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Tue, 25 Mar 2025 13:07:41 +0100 Subject: [PATCH 475/836] chore: refresh licenses directory (#7205) Signed-off-by: Jonathan Gonzalez V. --- .../blang/semver/v4}/LICENSE | 5 +- .../cloudnative-pg/barman-cloud/pkg/LICENSE | 201 ++++++++++++++++++ .../cloudnative-pg/machinery/pkg/LICENSE | 201 ++++++++++++++++++ .../github.com/fatih/color/LICENSE.md | 20 ++ .../github.com/fxamacker/cbor/v2/LICENSE | 21 ++ .../github.com/golang/groupcache/lru/LICENSE | 191 ----------------- .../golang/protobuf/{ => proto}/LICENSE | 0 .../github.com/imdario/mergo/LICENSE | 28 --- .../github.com/mattn/go-colorable/LICENSE | 21 ++ .../github.com/mattn/go-isatty/LICENSE | 9 + .../pkg/apis/monitoring/LICENSE | 3 +- .../stern/stern/stern}/LICENSE | 0 .../github.com/x448/float16/LICENSE | 22 ++ licenses/go-licenses/go.starlark.net/LICENSE | 29 --- licenses/go-licenses/golang.org/x/exp/LICENSE | 27 --- .../go-licenses/golang.org/x/oauth2/LICENSE | 4 +- .../golang.org/x/time/rate/LICENSE | 4 +- .../evanphx/json-patch.v4}/LICENSE | 0 .../forked/golang/{net => }/LICENSE | 0 .../forked/github.com/go-yaml/yaml/NOTICE | 13 -- .../go-yaml/yaml => yaml/goyaml.v3}/LICENSE | 0 .../yaml/goyaml.v3}/NOTICE | 0 22 files changed, 503 insertions(+), 296 deletions(-) rename licenses/go-licenses/{sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util => github.com/blang/semver/v4}/LICENSE (92%) create mode 100644 licenses/go-licenses/github.com/cloudnative-pg/barman-cloud/pkg/LICENSE create mode 100644 licenses/go-licenses/github.com/cloudnative-pg/machinery/pkg/LICENSE create mode 100644 licenses/go-licenses/github.com/fatih/color/LICENSE.md create mode 100644 licenses/go-licenses/github.com/fxamacker/cbor/v2/LICENSE delete mode 100644 licenses/go-licenses/github.com/golang/groupcache/lru/LICENSE rename licenses/go-licenses/github.com/golang/protobuf/{ => proto}/LICENSE (100%) delete mode 100644 licenses/go-licenses/github.com/imdario/mergo/LICENSE create mode 100644 licenses/go-licenses/github.com/mattn/go-colorable/LICENSE create mode 100644 licenses/go-licenses/github.com/mattn/go-isatty/LICENSE rename licenses/go-licenses/{gopkg.in/yaml.v2 => github.com/stern/stern/stern}/LICENSE (100%) create mode 100644 licenses/go-licenses/github.com/x448/float16/LICENSE delete mode 100644 licenses/go-licenses/go.starlark.net/LICENSE delete mode 100644 licenses/go-licenses/golang.org/x/exp/LICENSE rename licenses/go-licenses/{github.com/evanphx/json-patch => gopkg.in/evanphx/json-patch.v4}/LICENSE (100%) rename licenses/go-licenses/k8s.io/utils/internal/third_party/forked/golang/{net => }/LICENSE (100%) delete mode 100644 licenses/go-licenses/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/NOTICE rename licenses/go-licenses/sigs.k8s.io/{kustomize/kyaml/internal/forked/github.com/go-yaml/yaml => yaml/goyaml.v3}/LICENSE (100%) rename licenses/go-licenses/{gopkg.in/yaml.v2 => sigs.k8s.io/yaml/goyaml.v3}/NOTICE (100%) diff --git a/licenses/go-licenses/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util/LICENSE b/licenses/go-licenses/github.com/blang/semver/v4/LICENSE similarity index 92% rename from licenses/go-licenses/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util/LICENSE rename to licenses/go-licenses/github.com/blang/semver/v4/LICENSE index 31f292dce5..5ba5c86fcb 100644 --- a/licenses/go-licenses/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util/LICENSE +++ b/licenses/go-licenses/github.com/blang/semver/v4/LICENSE @@ -1,6 +1,6 @@ -The MIT License (MIT) +The MIT License -Copyright (c) 2018 QRI, Inc. +Copyright (c) 2014 Benedikt Lang Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -19,3 +19,4 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/licenses/go-licenses/github.com/cloudnative-pg/barman-cloud/pkg/LICENSE b/licenses/go-licenses/github.com/cloudnative-pg/barman-cloud/pkg/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/licenses/go-licenses/github.com/cloudnative-pg/barman-cloud/pkg/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/licenses/go-licenses/github.com/cloudnative-pg/machinery/pkg/LICENSE b/licenses/go-licenses/github.com/cloudnative-pg/machinery/pkg/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/licenses/go-licenses/github.com/cloudnative-pg/machinery/pkg/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/licenses/go-licenses/github.com/fatih/color/LICENSE.md b/licenses/go-licenses/github.com/fatih/color/LICENSE.md new file mode 100644 index 0000000000..25fdaf639d --- /dev/null +++ b/licenses/go-licenses/github.com/fatih/color/LICENSE.md @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Fatih Arslan + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/licenses/go-licenses/github.com/fxamacker/cbor/v2/LICENSE b/licenses/go-licenses/github.com/fxamacker/cbor/v2/LICENSE new file mode 100644 index 0000000000..eaa8504921 --- /dev/null +++ b/licenses/go-licenses/github.com/fxamacker/cbor/v2/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019-present Faye Amacker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/licenses/go-licenses/github.com/golang/groupcache/lru/LICENSE b/licenses/go-licenses/github.com/golang/groupcache/lru/LICENSE deleted file mode 100644 index 37ec93a14f..0000000000 --- a/licenses/go-licenses/github.com/golang/groupcache/lru/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/licenses/go-licenses/github.com/golang/protobuf/LICENSE b/licenses/go-licenses/github.com/golang/protobuf/proto/LICENSE similarity index 100% rename from licenses/go-licenses/github.com/golang/protobuf/LICENSE rename to licenses/go-licenses/github.com/golang/protobuf/proto/LICENSE diff --git a/licenses/go-licenses/github.com/imdario/mergo/LICENSE b/licenses/go-licenses/github.com/imdario/mergo/LICENSE deleted file mode 100644 index 686680298d..0000000000 --- a/licenses/go-licenses/github.com/imdario/mergo/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2013 Dario Castañé. All rights reserved. -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/go-licenses/github.com/mattn/go-colorable/LICENSE b/licenses/go-licenses/github.com/mattn/go-colorable/LICENSE new file mode 100644 index 0000000000..91b5cef30e --- /dev/null +++ b/licenses/go-licenses/github.com/mattn/go-colorable/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/licenses/go-licenses/github.com/mattn/go-isatty/LICENSE b/licenses/go-licenses/github.com/mattn/go-isatty/LICENSE new file mode 100644 index 0000000000..65dc692b6b --- /dev/null +++ b/licenses/go-licenses/github.com/mattn/go-isatty/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) Yasuhiro MATSUMOTO + +MIT License (Expat) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/licenses/go-licenses/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/LICENSE b/licenses/go-licenses/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/LICENSE index e06d208186..74e6ec6963 100644 --- a/licenses/go-licenses/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/LICENSE +++ b/licenses/go-licenses/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/LICENSE @@ -176,7 +176,7 @@ Apache License END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. - + To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include @@ -199,4 +199,3 @@ Apache License WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - diff --git a/licenses/go-licenses/gopkg.in/yaml.v2/LICENSE b/licenses/go-licenses/github.com/stern/stern/stern/LICENSE similarity index 100% rename from licenses/go-licenses/gopkg.in/yaml.v2/LICENSE rename to licenses/go-licenses/github.com/stern/stern/stern/LICENSE diff --git a/licenses/go-licenses/github.com/x448/float16/LICENSE b/licenses/go-licenses/github.com/x448/float16/LICENSE new file mode 100644 index 0000000000..bf6e357854 --- /dev/null +++ b/licenses/go-licenses/github.com/x448/float16/LICENSE @@ -0,0 +1,22 @@ +MIT License + +Copyright (c) 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/licenses/go-licenses/go.starlark.net/LICENSE b/licenses/go-licenses/go.starlark.net/LICENSE deleted file mode 100644 index a6609a1437..0000000000 --- a/licenses/go-licenses/go.starlark.net/LICENSE +++ /dev/null @@ -1,29 +0,0 @@ -Copyright (c) 2017 The Bazel Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the - distribution. - -3. Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/go-licenses/golang.org/x/exp/LICENSE b/licenses/go-licenses/golang.org/x/exp/LICENSE deleted file mode 100644 index 2a7cf70da6..0000000000 --- a/licenses/go-licenses/golang.org/x/exp/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright 2009 The Go Authors. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google LLC nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/go-licenses/golang.org/x/oauth2/LICENSE b/licenses/go-licenses/golang.org/x/oauth2/LICENSE index 6a66aea5ea..2a7cf70da6 100644 --- a/licenses/go-licenses/golang.org/x/oauth2/LICENSE +++ b/licenses/go-licenses/golang.org/x/oauth2/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/licenses/go-licenses/golang.org/x/time/rate/LICENSE b/licenses/go-licenses/golang.org/x/time/rate/LICENSE index 6a66aea5ea..2a7cf70da6 100644 --- a/licenses/go-licenses/golang.org/x/time/rate/LICENSE +++ b/licenses/go-licenses/golang.org/x/time/rate/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/licenses/go-licenses/github.com/evanphx/json-patch/LICENSE b/licenses/go-licenses/gopkg.in/evanphx/json-patch.v4/LICENSE similarity index 100% rename from licenses/go-licenses/github.com/evanphx/json-patch/LICENSE rename to licenses/go-licenses/gopkg.in/evanphx/json-patch.v4/LICENSE diff --git a/licenses/go-licenses/k8s.io/utils/internal/third_party/forked/golang/net/LICENSE b/licenses/go-licenses/k8s.io/utils/internal/third_party/forked/golang/LICENSE similarity index 100% rename from licenses/go-licenses/k8s.io/utils/internal/third_party/forked/golang/net/LICENSE rename to licenses/go-licenses/k8s.io/utils/internal/third_party/forked/golang/LICENSE diff --git a/licenses/go-licenses/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/NOTICE b/licenses/go-licenses/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/NOTICE deleted file mode 100644 index 866d74a7ad..0000000000 --- a/licenses/go-licenses/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/NOTICE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2011-2016 Canonical Ltd. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/licenses/go-licenses/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/LICENSE b/licenses/go-licenses/sigs.k8s.io/yaml/goyaml.v3/LICENSE similarity index 100% rename from licenses/go-licenses/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/LICENSE rename to licenses/go-licenses/sigs.k8s.io/yaml/goyaml.v3/LICENSE diff --git a/licenses/go-licenses/gopkg.in/yaml.v2/NOTICE b/licenses/go-licenses/sigs.k8s.io/yaml/goyaml.v3/NOTICE similarity index 100% rename from licenses/go-licenses/gopkg.in/yaml.v2/NOTICE rename to licenses/go-licenses/sigs.k8s.io/yaml/goyaml.v3/NOTICE From a15ebb45e2b3ef66c924d03a51b020282c0e4145 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 25 Mar 2025 14:25:55 +0100 Subject: [PATCH 476/836] chore(deps): update operator framework to v1.39.2 (main) (#7210) --- Makefile | 2 +- config/olm-scorecard/patches/basic.config.yaml | 2 +- config/olm-scorecard/patches/olm.config.yaml | 10 +++++----- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Makefile b/Makefile index edd48bf9bd..020fd7d953 100644 --- a/Makefile +++ b/Makefile @@ -47,7 +47,7 @@ GENREF_VERSION ?= 015aaac611407c4fe591bc8700d2c67b7521efca GORELEASER_VERSION ?= v2.7.0 SPELLCHECK_VERSION ?= 0.47.0 WOKE_VERSION ?= 0.19.0 -OPERATOR_SDK_VERSION ?= v1.39.1 +OPERATOR_SDK_VERSION ?= v1.39.2 OPM_VERSION ?= v1.51.0 PREFLIGHT_VERSION ?= 1.12.1 OPENSHIFT_VERSIONS ?= v4.12-v4.18 diff --git a/config/olm-scorecard/patches/basic.config.yaml b/config/olm-scorecard/patches/basic.config.yaml index b9ec7c6c82..cea781d2ee 100644 --- a/config/olm-scorecard/patches/basic.config.yaml +++ b/config/olm-scorecard/patches/basic.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - basic-check-spec - image: quay.io/operator-framework/scorecard-test:v1.39.1 + image: quay.io/operator-framework/scorecard-test:v1.39.2 labels: suite: basic test: basic-check-spec-test diff --git a/config/olm-scorecard/patches/olm.config.yaml b/config/olm-scorecard/patches/olm.config.yaml index 25d83f98f2..fdde08eb9a 100644 --- a/config/olm-scorecard/patches/olm.config.yaml +++ b/config/olm-scorecard/patches/olm.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - olm-bundle-validation - image: quay.io/operator-framework/scorecard-test:v1.39.1 + image: quay.io/operator-framework/scorecard-test:v1.39.2 labels: suite: olm test: olm-bundle-validation-test @@ -14,7 +14,7 @@ entrypoint: - scorecard-test - olm-crds-have-validation - image: quay.io/operator-framework/scorecard-test:v1.39.1 + image: quay.io/operator-framework/scorecard-test:v1.39.2 labels: suite: olm test: olm-crds-have-validation-test @@ -24,7 +24,7 @@ entrypoint: - scorecard-test - olm-crds-have-resources - image: quay.io/operator-framework/scorecard-test:v1.39.1 + image: quay.io/operator-framework/scorecard-test:v1.39.2 labels: suite: olm test: olm-crds-have-resources-test @@ -34,7 +34,7 @@ entrypoint: - scorecard-test - olm-spec-descriptors - image: quay.io/operator-framework/scorecard-test:v1.39.1 + image: quay.io/operator-framework/scorecard-test:v1.39.2 labels: suite: olm test: olm-spec-descriptors-test @@ -44,7 +44,7 @@ entrypoint: - scorecard-test - olm-status-descriptors - image: quay.io/operator-framework/scorecard-test:v1.39.1 + image: quay.io/operator-framework/scorecard-test:v1.39.2 labels: suite: olm test: olm-status-descriptors-test From 6ce47f1dda1ff6cbc60eb5074d178f5098a92fd7 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Tue, 25 Mar 2025 16:41:30 +0100 Subject: [PATCH 477/836] chore(instance,reconcile): avoid logging the cluster while in debug mode (#7200) Signed-off-by: Armando Ruocco --- internal/management/controller/instance_controller.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/internal/management/controller/instance_controller.go b/internal/management/controller/instance_controller.go index 31d29b6e5c..a3b36a9ebb 100644 --- a/internal/management/controller/instance_controller.go +++ b/internal/management/controller/instance_controller.go @@ -88,7 +88,12 @@ func (r *InstanceReconciler) Reconcile( _ reconcile.Request, ) (reconcile.Result, error) { // set up a convenient contextLog object so we don't have to type request over and over again - contextLogger := log.FromContext(ctx) + contextLogger := log.FromContext(ctx). + WithValues( + "instance", r.instance.GetPodName(), + "cluster", r.instance.GetClusterName(), + "namespace", r.instance.GetNamespaceName(), + ) // if the context has already been cancelled, // trying to reconcile would just lead to misleading errors being reported @@ -111,7 +116,7 @@ func (r *InstanceReconciler) Reconcile( } // Print the Cluster - contextLogger.Debug("Reconciling Cluster", "cluster", cluster) + contextLogger.Debug("Reconciling Cluster") // Reconcile PostgreSQL instance parameters r.reconcileInstance(cluster) From 89b7d2f61209092ea3758770b9d8c603af3d899d Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Tue, 25 Mar 2025 23:06:33 +0100 Subject: [PATCH 478/836] doc(operator_conf.md): add missing ConfigMap fields (#7215) Signed-off-by: Armando Ruocco --- docs/src/operator_conf.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/src/operator_conf.md b/docs/src/operator_conf.md index 7da0b498cc..7686a87a8a 100644 --- a/docs/src/operator_conf.md +++ b/docs/src/operator_conf.md @@ -49,6 +49,8 @@ Name | Description `KUBERNETES_CLUSTER_DOMAIN` | Defines the domain suffix for service FQDNs within the Kubernetes cluster. If left unset, it defaults to "cluster.local". `MONITORING_QUERIES_CONFIGMAP` | The name of a ConfigMap in the operator's namespace with a set of default queries (to be specified under the key `queries`) to be applied to all created Clusters `MONITORING_QUERIES_SECRET` | The name of a Secret in the operator's namespace with a set of default queries (to be specified under the key `queries`) to be applied to all created Clusters +`OPERATOR_IMAGE_NAME` | The name of the operator image used to bootstrap Pods. Defaults to the image specified during installation. +`POSTGRES_IMAGE_NAME` | The name of the PostgreSQL image used by default for new clusters. Defaults to the version specified in the operator. `PULL_SECRET_NAME` | Name of an additional pull secret to be defined in the operator's namespace and to be used to download images `STANDBY_TCP_USER_TIMEOUT` | Defines the [`TCP_USER_TIMEOUT` socket option](https://www.postgresql.org/docs/current/runtime-config-connection.html#GUC-TCP-USER-TIMEOUT) for replication connections from standby instances to the primary. Default is 0 (system's default). From 5c51b9aa3942db2cae133884dcd17b56de56f65d Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Wed, 26 Mar 2025 07:36:12 +0100 Subject: [PATCH 479/836] feat(ip): assign copyright to the Linux Foundation (#7203) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adopt the new attribution information for contributions to CloudNativePG: Copyright © contributors to CloudNativePG, established as CloudNativePG a Series of LF Projects, LLC. Adopt the SPDX format for Apache License 2.0 Closes #7202 Signed-off-by: Gabriele Bartolini Signed-off-by: Marco Nenciarini Co-authored-by: Marco Nenciarini --- .github/e2e-matrix-generator.py | 5 ++- .github/generate-test-artifacts.py | 5 ++- .github/postgres-versions-update.py | 5 ++- .github/report-failed-test.sh | 5 ++- Makefile | 5 ++- api/v1/backup_funcs.go | 5 ++- api/v1/backup_funcs_test.go | 5 ++- api/v1/backup_types.go | 5 ++- api/v1/base_funcs.go | 5 ++- api/v1/base_funcs_test.go | 5 ++- api/v1/base_types.go | 5 ++- api/v1/cluster_conditions.go | 5 ++- api/v1/cluster_defaults.go | 5 ++- api/v1/cluster_defaults_test.go | 5 ++- api/v1/cluster_funcs.go | 5 ++- api/v1/cluster_funcs_test.go | 5 ++- api/v1/cluster_types.go | 5 ++- api/v1/clusterimagecatalog_funcs.go | 5 ++- api/v1/clusterimagecatalog_types.go | 5 ++- api/v1/common_types.go | 5 ++- api/v1/database_funcs.go | 5 ++- api/v1/database_types.go | 5 ++- api/v1/doc.go | 5 ++- api/v1/generic_funcs.go | 5 ++- api/v1/genericimagecatalog_iface.go | 19 +++++++++++ api/v1/groupversion_info.go | 5 ++- api/v1/imagecatalog_funcs.go | 5 ++- api/v1/imagecatalog_funcs_test.go | 5 ++- api/v1/imagecatalog_types.go | 5 ++- api/v1/pooler_funcs.go | 5 ++- api/v1/pooler_funcs_test.go | 5 ++- api/v1/pooler_types.go | 5 ++- api/v1/publication_funcs.go | 5 ++- api/v1/publication_types.go | 5 ++- api/v1/scheduledbackup_funcs.go | 5 ++- api/v1/scheduledbackup_funcs_test.go | 5 ++- api/v1/scheduledbackup_types.go | 5 ++- api/v1/subscription_funcs.go | 5 ++- api/v1/subscription_types.go | 5 ++- api/v1/suite_test.go | 5 ++- api/v1/zz_api_repo_funcs_to_copy.go | 5 ++- api/v1/zz_generated.deepcopy.go | 5 ++- cmd/kubectl-cnpg/main.go | 5 ++- cmd/manager/main.go | 5 ++- docker-bake.hcl | 5 ++- docs/mkdocs.yml | 1 + .../samples/monitoring/kube-stack-config.yaml | 9 +++-- hack/boilerplate.go.txt | 5 ++- hack/e2e/run-e2e-kind.sh | 6 +++- hack/e2e/run-e2e-local.sh | 5 ++- hack/e2e/run-e2e-ocp.sh | 5 ++- hack/e2e/run-e2e.sh | 5 ++- hack/install-cnpg-plugin.sh | 6 +++- hack/release.sh | 22 ++++++++----- hack/setup-cluster.sh | 5 ++- hack/show-release-diffs.sh | 33 ++++++++++--------- internal/cmd/manager/backup/cmd.go | 5 ++- internal/cmd/manager/bootstrap/cmd.go | 5 ++- internal/cmd/manager/controller/cmd.go | 5 ++- internal/cmd/manager/controller/controller.go | 5 ++- .../cmd/manager/debug/architectures/cmd.go | 5 ++- internal/cmd/manager/debug/cmd.go | 5 ++- internal/cmd/manager/instance/cmd.go | 5 ++- internal/cmd/manager/instance/initdb/cmd.go | 5 ++- internal/cmd/manager/instance/join/cmd.go | 5 ++- .../cmd/manager/instance/pgbasebackup/cmd.go | 5 ++- internal/cmd/manager/instance/restore/cmd.go | 5 ++- internal/cmd/manager/instance/restore/doc.go | 5 ++- .../cmd/manager/instance/restore/restore.go | 5 ++- .../manager/instance/restoresnapshot/cmd.go | 5 ++- .../manager/instance/restoresnapshot/doc.go | 5 ++- internal/cmd/manager/instance/run/cmd.go | 5 ++- internal/cmd/manager/instance/run/errors.go | 5 ++- .../cmd/manager/instance/run/errors_test.go | 5 ++- .../cmd/manager/instance/run/lifecycle/doc.go | 5 ++- .../instance/run/lifecycle/lifecycle.go | 5 ++- .../manager/instance/run/lifecycle/reaper.go | 5 ++- .../cmd/manager/instance/run/lifecycle/run.go | 5 ++- .../cmd/manager/instance/run/suite_test.go | 5 ++- internal/cmd/manager/instance/status/cmd.go | 5 ++- internal/cmd/manager/pgbouncer/cmd.go | 5 ++- internal/cmd/manager/pgbouncer/run/cmd.go | 5 ++- internal/cmd/manager/pgbouncer/run/log.go | 5 ++- .../cmd/manager/pgbouncer/run/log_test.go | 5 ++- .../cmd/manager/pgbouncer/run/suite_test.go | 5 ++- internal/cmd/manager/show/cmd.go | 5 ++- .../cmd/manager/show/walarchivequeue/cmd.go | 5 ++- internal/cmd/manager/walarchive/cmd.go | 5 ++- internal/cmd/manager/walrestore/cmd.go | 5 ++- internal/cmd/manager/walrestore/cmd_test.go | 5 ++- internal/cmd/manager/walrestore/suite_test.go | 5 ++- internal/cmd/plugin/backup/cmd.go | 5 ++- internal/cmd/plugin/backup/doc.go | 5 ++- internal/cmd/plugin/backup/parameters.go | 5 ++- internal/cmd/plugin/backup/parameters_test.go | 5 ++- internal/cmd/plugin/backup/suite_test.go | 5 ++- .../cmd/plugin/certificate/certificate.go | 5 ++- internal/cmd/plugin/certificate/cmd.go | 5 ++- internal/cmd/plugin/color.go | 5 ++- internal/cmd/plugin/color_test.go | 5 ++- internal/cmd/plugin/destroy/cmd.go | 5 ++- internal/cmd/plugin/destroy/destroy.go | 5 ++- internal/cmd/plugin/fence/cmd.go | 5 ++- internal/cmd/plugin/fence/fence.go | 5 ++- internal/cmd/plugin/fio/cmd.go | 5 ++- internal/cmd/plugin/fio/doc.go | 5 ++- internal/cmd/plugin/fio/fio.go | 5 ++- internal/cmd/plugin/hibernate/cmd.go | 5 ++- internal/cmd/plugin/hibernate/cmd_test.go | 19 +++++++++++ internal/cmd/plugin/hibernate/doc.go | 5 ++- internal/cmd/plugin/hibernate/suite_test.go | 19 +++++++++++ internal/cmd/plugin/install/cmd.go | 5 ++- internal/cmd/plugin/install/doc.go | 5 ++- internal/cmd/plugin/install/generate.go | 5 ++- internal/cmd/plugin/logical/database.go | 5 ++- internal/cmd/plugin/logical/doc.go | 5 ++- .../cmd/plugin/logical/externalcluster.go | 5 ++- internal/cmd/plugin/logical/psql.go | 5 ++- .../cmd/plugin/logical/publication/cmd.go | 5 ++- .../plugin/logical/publication/create/cmd.go | 5 ++- .../plugin/logical/publication/create/doc.go | 5 ++- .../logical/publication/create/publication.go | 5 ++- .../publication/create/publication_test.go | 5 ++- .../logical/publication/create/suite_test.go | 5 ++- .../cmd/plugin/logical/publication/doc.go | 5 ++- .../plugin/logical/publication/drop/cmd.go | 5 ++- .../plugin/logical/publication/drop/doc.go | 5 ++- .../cmd/plugin/logical/subscription/cmd.go | 5 ++- .../plugin/logical/subscription/create/cmd.go | 5 ++- .../plugin/logical/subscription/create/doc.go | 5 ++- .../subscription/create/subscription.go | 5 ++- .../cmd/plugin/logical/subscription/doc.go | 5 ++- .../plugin/logical/subscription/drop/cmd.go | 5 ++- .../plugin/logical/subscription/drop/doc.go | 5 ++- .../logical/subscription/syncsequences/cmd.go | 5 ++- .../logical/subscription/syncsequences/doc.go | 5 ++- .../logical/subscription/syncsequences/get.go | 5 ++- .../subscription/syncsequences/update.go | 5 ++- internal/cmd/plugin/logs/cluster.go | 5 ++- internal/cmd/plugin/logs/cluster_logs.go | 5 ++- internal/cmd/plugin/logs/cluster_logs_test.go | 5 ++- internal/cmd/plugin/logs/cluster_test.go | 5 ++- internal/cmd/plugin/logs/cmd.go | 5 ++- internal/cmd/plugin/logs/cmd_test.go | 5 ++- internal/cmd/plugin/logs/doc.go | 5 ++- internal/cmd/plugin/logs/pretty/doc.go | 5 ++- internal/cmd/plugin/logs/pretty/log_level.go | 5 ++- internal/cmd/plugin/logs/pretty/log_record.go | 5 ++- internal/cmd/plugin/logs/pretty/pretty.go | 5 ++- internal/cmd/plugin/logs/suite_test.go | 5 ++- internal/cmd/plugin/maintenance/cmd.go | 5 ++- .../cmd/plugin/maintenance/maintenance.go | 5 ++- internal/cmd/plugin/output.go | 5 ++- internal/cmd/plugin/pgadmin/cmd.go | 5 ++- internal/cmd/plugin/pgadmin/doc.go | 5 ++- internal/cmd/plugin/pgadmin/pgadmin.go | 5 ++- internal/cmd/plugin/pgadmin/pgadmin_test.go | 5 ++- internal/cmd/plugin/pgadmin/suite_test.go | 5 ++- internal/cmd/plugin/pgbench/cmd.go | 5 ++- internal/cmd/plugin/pgbench/cmd_test.go | 5 ++- internal/cmd/plugin/pgbench/doc.go | 5 ++- internal/cmd/plugin/pgbench/pgbench.go | 5 ++- internal/cmd/plugin/pgbench/suite_test.go | 5 ++- internal/cmd/plugin/plugin.go | 5 ++- internal/cmd/plugin/plugin_test.go | 5 ++- internal/cmd/plugin/printer.go | 5 ++- internal/cmd/plugin/promote/cmd.go | 5 ++- internal/cmd/plugin/promote/promote.go | 5 ++- internal/cmd/plugin/promote/promote_test.go | 19 +++++++++++ internal/cmd/plugin/promote/suite_test.go | 5 ++- internal/cmd/plugin/psql/cmd.go | 5 ++- internal/cmd/plugin/psql/doc.go | 5 ++- internal/cmd/plugin/psql/psql.go | 5 ++- internal/cmd/plugin/psql/psql_test.go | 5 ++- internal/cmd/plugin/psql/suite_test.go | 5 ++- internal/cmd/plugin/reload/cmd.go | 5 ++- internal/cmd/plugin/reload/reload.go | 5 ++- internal/cmd/plugin/report/cluster.go | 5 ++- internal/cmd/plugin/report/cluster_report.go | 5 ++- internal/cmd/plugin/report/cmd.go | 5 ++- internal/cmd/plugin/report/logs.go | 5 ++- internal/cmd/plugin/report/olm.go | 5 ++- internal/cmd/plugin/report/operator.go | 5 ++- .../cmd/plugin/report/operator_objects.go | 5 ++- internal/cmd/plugin/report/operator_report.go | 5 ++- internal/cmd/plugin/report/operator_utils.go | 5 ++- internal/cmd/plugin/report/output.go | 5 ++- internal/cmd/plugin/report/redactors.go | 5 ++- internal/cmd/plugin/report/redactors_test.go | 5 ++- internal/cmd/plugin/report/suite_test.go | 5 ++- internal/cmd/plugin/restart/cmd.go | 5 ++- internal/cmd/plugin/restart/restart.go | 5 ++- internal/cmd/plugin/snapshot/cmd.go | 5 ++- internal/cmd/plugin/snapshot/doc.go | 5 ++- internal/cmd/plugin/status/cmd.go | 5 ++- internal/cmd/plugin/status/doc.go | 5 ++- internal/cmd/plugin/status/status.go | 5 ++- internal/cmd/plugin/status/status_test.go | 5 ++- internal/cmd/plugin/status/suite_test.go | 5 ++- internal/cmd/plugin/suite_test.go | 5 ++- internal/cmd/versions/cmd.go | 5 ++- internal/cnpi/plugin/client/backup.go | 5 ++- internal/cnpi/plugin/client/client.go | 7 ++-- internal/cnpi/plugin/client/cluster.go | 5 ++- internal/cnpi/plugin/client/cluster_test.go | 5 ++- internal/cnpi/plugin/client/contracts.go | 5 ++- internal/cnpi/plugin/client/doc.go | 5 ++- internal/cnpi/plugin/client/lifecycle.go | 5 ++- internal/cnpi/plugin/client/lifecycle_test.go | 5 ++- internal/cnpi/plugin/client/reconciler.go | 5 ++- internal/cnpi/plugin/client/restore_job.go | 5 ++- internal/cnpi/plugin/client/suite_test.go | 5 ++- internal/cnpi/plugin/client/wal.go | 5 ++- internal/cnpi/plugin/connection/connection.go | 5 ++- internal/cnpi/plugin/connection/doc.go | 5 ++- internal/cnpi/plugin/connection/metadata.go | 5 ++- internal/cnpi/plugin/connection/remote.go | 5 ++- internal/cnpi/plugin/connection/unix.go | 5 ++- internal/cnpi/plugin/doc.go | 5 ++- internal/cnpi/plugin/mapping.go | 5 ++- internal/cnpi/plugin/operatorclient/client.go | 5 ++- .../cnpi/plugin/operatorclient/client_test.go | 5 ++- internal/cnpi/plugin/operatorclient/doc.go | 5 ++- .../cnpi/plugin/operatorclient/suite_test.go | 5 ++- internal/cnpi/plugin/repository/connection.go | 5 ++- internal/cnpi/plugin/repository/doc.go | 5 ++- internal/cnpi/plugin/repository/errors.go | 5 ++- internal/cnpi/plugin/repository/setup.go | 5 ++- internal/cnpi/plugin/repository/setup_test.go | 19 +++++++++++ internal/cnpi/plugin/repository/suite_test.go | 19 +++++++++++ internal/configuration/configuration.go | 5 ++- internal/configuration/configuration_test.go | 5 ++- internal/configuration/suite_test.go | 5 ++- internal/controller/backup_controller.go | 5 ++- internal/controller/backup_controller_test.go | 5 ++- internal/controller/backup_predicates.go | 5 ++- internal/controller/backup_predicates_test.go | 5 ++- internal/controller/cluster_cleanup.go | 5 ++- internal/controller/cluster_cleanup_test.go | 5 ++- internal/controller/cluster_controller.go | 5 ++- .../controller/cluster_controller_test.go | 5 ++- internal/controller/cluster_create.go | 5 ++- internal/controller/cluster_create_test.go | 5 ++- internal/controller/cluster_delete.go | 5 ++- internal/controller/cluster_delete_test.go | 5 ++- internal/controller/cluster_image.go | 7 ++-- internal/controller/cluster_pki.go | 5 ++- internal/controller/cluster_plugins.go | 5 ++- internal/controller/cluster_predicates.go | 5 ++- internal/controller/cluster_restore.go | 5 ++- internal/controller/cluster_restore_test.go | 5 ++- internal/controller/cluster_scale.go | 5 ++- internal/controller/cluster_scale_test.go | 5 ++- internal/controller/cluster_status.go | 5 ++- internal/controller/cluster_status_test.go | 5 ++- internal/controller/cluster_upgrade.go | 5 ++- internal/controller/cluster_upgrade_test.go | 5 ++- internal/controller/finalizers_delete.go | 5 ++- internal/controller/finalizers_delete_test.go | 5 ++- internal/controller/plugin_controller.go | 5 ++- internal/controller/plugin_predicates.go | 5 ++- internal/controller/plugins.go | 5 ++- internal/controller/plugins_test.go | 5 ++- internal/controller/pooler_controller.go | 5 ++- internal/controller/pooler_controller_test.go | 5 ++- internal/controller/pooler_predicates.go | 5 ++- internal/controller/pooler_predicates_test.go | 5 ++- internal/controller/pooler_resources.go | 5 ++- internal/controller/pooler_resources_test.go | 5 ++- internal/controller/pooler_status.go | 5 ++- internal/controller/pooler_status_test.go | 5 ++- internal/controller/pooler_update.go | 5 ++- internal/controller/pooler_update_test.go | 5 ++- internal/controller/replicas.go | 5 ++- internal/controller/replicas_test.go | 5 ++- internal/controller/rollout/doc.go | 5 ++- internal/controller/rollout/rollout.go | 5 ++- internal/controller/rollout/rollout_test.go | 5 ++- internal/controller/rollout/suite_test.go | 5 ++- .../controller/scheduledbackup_controller.go | 5 ++- internal/controller/suite_test.go | 5 ++- internal/management/cache/cache.go | 5 ++- internal/management/cache/doc.go | 5 ++- internal/management/cache/error.go | 5 ++- internal/management/cache/keys.go | 5 ++- internal/management/controller/cache.go | 5 ++- internal/management/controller/common.go | 5 ++- internal/management/controller/common_test.go | 7 ++-- .../controller/database_controller.go | 5 ++- .../controller/database_controller_sql.go | 5 ++- .../database_controller_sql_test.go | 7 ++-- .../controller/database_controller_test.go | 5 ++- .../management/controller/database_objects.go | 5 ++- .../controller/externalservers/doc.go | 5 ++- .../controller/externalservers/manager.go | 5 ++- .../controller/externalservers/reconciler.go | 5 ++- internal/management/controller/finalizers.go | 19 +++++++++++ .../controller/instance_controller.go | 5 ++- .../management/controller/instance_startup.go | 5 ++- .../management/controller/instance_token.go | 5 ++- internal/management/controller/manager.go | 5 ++- .../controller/publication_controller.go | 5 ++- .../controller/publication_controller_sql.go | 5 ++- .../publication_controller_sql_test.go | 5 ++- .../controller/publication_controller_test.go | 5 ++- .../management/controller/roles/contract.go | 5 ++- .../controller/roles/contract_test.go | 5 ++- internal/management/controller/roles/doc.go | 5 ++- .../management/controller/roles/postgres.go | 5 ++- .../controller/roles/postgres_errors.go | 5 ++- .../controller/roles/postgres_test.go | 5 ++- .../management/controller/roles/reconciler.go | 5 ++- .../controller/roles/reconciler_test.go | 5 ++- internal/management/controller/roles/roles.go | 5 ++- .../management/controller/roles/runnable.go | 5 ++- .../controller/roles/runnable_test.go | 5 ++- .../management/controller/roles/suite_test.go | 5 ++- .../controller/slots/infrastructure/doc.go | 5 ++- .../slots/infrastructure/postgresmanager.go | 5 ++- .../infrastructure/postgresmanager_test.go | 5 ++- .../slots/infrastructure/replicationslot.go | 5 ++- .../infrastructure/replicationslot_test.go | 5 ++- .../slots/infrastructure/suite_test.go | 5 ++- .../controller/slots/reconciler/doc.go | 5 ++- .../slots/reconciler/replicationslot.go | 5 ++- .../slots/reconciler/replicationslot_test.go | 5 ++- .../controller/slots/reconciler/suite_test.go | 5 ++- .../management/controller/slots/runner/doc.go | 5 ++- .../controller/slots/runner/runner.go | 5 ++- .../controller/slots/runner/runner_test.go | 5 ++- .../controller/slots/runner/suite_test.go | 5 ++- .../controller/subscription_controller.go | 5 ++- .../controller/subscription_controller_sql.go | 5 ++- .../subscription_controller_sql_test.go | 5 ++- .../subscription_controller_test.go | 5 ++- internal/management/controller/suite_test.go | 5 ++- .../controller/tablespaces/actions.go | 5 ++- .../controller/tablespaces/controller_test.go | 7 ++-- .../management/controller/tablespaces/doc.go | 5 ++- .../tablespaces/infrastructure/contract.go | 5 ++- .../tablespaces/infrastructure/doc.go | 5 ++- .../tablespaces/infrastructure/postgres.go | 5 ++- .../infrastructure/postgres_test.go | 5 ++- .../tablespaces/infrastructure/suite_test.go | 5 ++- .../controller/tablespaces/manager.go | 5 ++- .../controller/tablespaces/reconciler.go | 5 ++- .../controller/tablespaces/storage.go | 5 ++- .../controller/tablespaces/suite_test.go | 5 ++- .../controller/tablespaces/tablespaces.go | 5 ++- internal/management/istio/doc.go | 5 ++- internal/management/istio/istio.go | 5 ++- internal/management/linkerd/doc.go | 5 ++- internal/management/linkerd/linkerd.go | 5 ++- internal/management/utils/secrets.go | 5 ++- internal/management/utils/secrets_test.go | 5 ++- internal/management/utils/suite_test.go | 5 ++- .../management/controller/instance.go | 5 ++- .../management/controller/instance_test.go | 5 ++- .../management/controller/manager.go | 5 ++- .../management/controller/refresh.go | 5 ++- .../management/controller/refresh_test.go | 5 ++- .../management/controller/secrets.go | 5 ++- .../management/controller/secrets_test.go | 5 ++- .../management/controller/suite_test.go | 5 ++- internal/plugin/resources/doc.go | 5 ++- internal/plugin/resources/instance.go | 5 ++- internal/scheme/doc.go | 5 ++- internal/scheme/scheme.go | 5 ++- internal/tools/tools.go | 5 ++- internal/webhook/v1/backup_webhook.go | 5 ++- internal/webhook/v1/backup_webhook_test.go | 5 ++- internal/webhook/v1/cluster_webhook.go | 5 ++- internal/webhook/v1/cluster_webhook_test.go | 5 ++- internal/webhook/v1/database_webhook.go | 5 ++- internal/webhook/v1/database_webhook_test.go | 5 ++- internal/webhook/v1/doc.go | 5 ++- internal/webhook/v1/pooler_webhook.go | 5 ++- internal/webhook/v1/pooler_webhook_test.go | 5 ++- .../webhook/v1/scheduledbackup_webhook.go | 5 ++- .../v1/scheduledbackup_webhook_test.go | 5 ++- internal/webhook/v1/suite_test.go | 5 ++- pkg/certs/certs.go | 5 ++- pkg/certs/certs_test.go | 5 ++- pkg/certs/k8s.go | 5 ++- pkg/certs/k8s_test.go | 5 ++- pkg/certs/operator_deployment.go | 5 ++- pkg/certs/operator_deployment_test.go | 5 ++- pkg/certs/suite_test.go | 5 ++- pkg/certs/tls.go | 5 ++- pkg/certs/tls_test.go | 5 ++- pkg/concurrency/doc.go | 5 ++- pkg/concurrency/executed.go | 5 ++- pkg/concurrency/executed_test.go | 5 ++- pkg/concurrency/suite_test.go | 5 ++- pkg/configfile/configfile.go | 5 ++- pkg/configfile/configfile_test.go | 5 ++- pkg/configfile/connection_string.go | 5 ++- pkg/configfile/connection_string_test.go | 5 ++- pkg/configfile/suite_test.go | 5 ++- pkg/configparser/configparser.go | 5 ++- pkg/configparser/configparser_test.go | 5 ++- pkg/configparser/suite_test.go | 5 ++- pkg/executablehash/executablehash.go | 5 ++- pkg/executablehash/executablehash_test.go | 5 ++- pkg/executablehash/suite_test.go | 5 ++- pkg/management/client.go | 5 ++- pkg/management/external/doc.go | 5 ++- pkg/management/external/external.go | 5 ++- .../external/internal/pgpass/conninfo.go | 5 ++- .../external/internal/pgpass/conninfo_test.go | 5 ++- .../external/internal/pgpass/doc.go | 5 ++- .../external/internal/pgpass/pgpass.go | 5 ++- .../external/internal/pgpass/pgpass_test.go | 5 ++- .../external/internal/pgpass/suite_test.go | 5 ++- pkg/management/external/utils.go | 5 ++- pkg/management/logtest/logtest.go | 5 ++- pkg/management/pgbouncer/config/config.go | 5 ++- pkg/management/pgbouncer/config/data.go | 5 ++- pkg/management/pgbouncer/config/secrets.go | 5 ++- .../pgbouncer/config/secrets_test.go | 5 ++- pkg/management/pgbouncer/config/strings.go | 5 ++- .../pgbouncer/config/strings_test.go | 5 ++- pkg/management/pgbouncer/config/suite_test.go | 5 ++- .../pgbouncer/metricsserver/lists.go | 5 ++- .../pgbouncer/metricsserver/metricsserver.go | 5 ++- .../metricsserver/metricsserver_test.go | 5 ++- .../metricsserver/pgbouncer_collector.go | 5 ++- .../pgbouncer/metricsserver/pools.go | 5 ++- .../pgbouncer/metricsserver/pools_test.go | 5 ++- .../pgbouncer/metricsserver/stats.go | 5 ++- .../pgbouncer/metricsserver/stats_test.go | 5 ++- .../pgbouncer/metricsserver/suite_test.go | 5 ++- pkg/management/postgres/archiver/archiver.go | 5 ++- pkg/management/postgres/archiver/doc.go | 5 ++- pkg/management/postgres/backup.go | 5 ++- pkg/management/postgres/backup_test.go | 5 ++- pkg/management/postgres/configuration.go | 5 ++- pkg/management/postgres/configuration_test.go | 5 ++- pkg/management/postgres/conninfo.go | 5 ++- .../postgres/constants/constants.go | 5 ++- pkg/management/postgres/consts.go | 5 ++- pkg/management/postgres/ident.go | 5 ++- pkg/management/postgres/initdb.go | 5 ++- pkg/management/postgres/initdb_test.go | 5 ++- pkg/management/postgres/instance.go | 5 ++- pkg/management/postgres/instance_replica.go | 5 ++- pkg/management/postgres/instance_test.go | 5 ++- pkg/management/postgres/join.go | 5 ++- .../postgres/logicalimport/constants.go | 5 ++- .../postgres/logicalimport/database.go | 5 ++- .../postgres/logicalimport/database_test.go | 5 ++- pkg/management/postgres/logicalimport/doc.go | 5 ++- .../postgres/logicalimport/microservice.go | 5 ++- .../postgres/logicalimport/monolith.go | 5 ++- pkg/management/postgres/logicalimport/role.go | 5 ++- .../postgres/logicalimport/role_test.go | 5 ++- .../postgres/logicalimport/roleinheritance.go | 5 ++- .../logicalimport/roleinheritance_test.go | 5 ++- .../postgres/logicalimport/suite_test.go | 5 ++- .../postgres/logpipe/CSVReadWriter.go | 5 ++- pkg/management/postgres/logpipe/error.go | 5 ++- .../postgres/logpipe/linelogpipe.go | 5 ++- .../postgres/logpipe/loggingCollector.go | 5 ++- .../postgres/logpipe/loggingCollector_test.go | 5 ++- pkg/management/postgres/logpipe/logpipe.go | 5 ++- .../postgres/logpipe/logpipe_test.go | 5 ++- pkg/management/postgres/logpipe/pgaudit.go | 5 ++- .../postgres/logpipe/pgaudit_test.go | 5 ++- pkg/management/postgres/logpipe/record.go | 5 ++- pkg/management/postgres/logpipe/suite_test.go | 5 ++- pkg/management/postgres/logpipe/writer.go | 5 ++- pkg/management/postgres/metrics/collector.go | 5 ++- .../postgres/metrics/collector_test.go | 5 ++- .../postgres/metrics/histogram/histogram.go | 5 ++- .../postgres/metrics/mapping_test.go | 5 ++- pkg/management/postgres/metrics/mappings.go | 5 ++- pkg/management/postgres/metrics/parser.go | 5 ++- .../postgres/metrics/parser_test.go | 5 ++- pkg/management/postgres/metrics/suite_test.go | 5 ++- pkg/management/postgres/pidfile.go | 5 ++- pkg/management/postgres/pidfile_test.go | 5 ++- pkg/management/postgres/pool/connection.go | 5 ++- pkg/management/postgres/pool/pool.go | 5 ++- pkg/management/postgres/pool/pool_test.go | 5 ++- pkg/management/postgres/pool/profiles.go | 5 ++- pkg/management/postgres/pool/suite_test.go | 5 ++- pkg/management/postgres/probes.go | 5 ++- pkg/management/postgres/probes_test.go | 5 ++- pkg/management/postgres/promote.go | 5 ++- pkg/management/postgres/restore.go | 5 ++- pkg/management/postgres/restore_test.go | 5 ++- pkg/management/postgres/suite_test.go | 5 ++- pkg/management/postgres/utils/doc.go | 5 ++- pkg/management/postgres/utils/roles.go | 5 ++- pkg/management/postgres/utils/roles_test.go | 5 ++- pkg/management/postgres/utils/suite_test.go | 5 ++- pkg/management/postgres/utils/utils.go | 5 ++- pkg/management/postgres/utils/version.go | 5 ++- pkg/management/postgres/utils/version_test.go | 5 ++- pkg/management/postgres/wal.go | 5 ++- pkg/management/postgres/wal_test.go | 5 ++- .../postgres/webserver/backup_connection.go | 5 ++- .../webserver/client/common/client.go | 5 ++- .../postgres/webserver/client/common/doc.go | 5 ++- .../postgres/webserver/client/local/cache.go | 5 ++- .../webserver/client/local/cluster.go | 5 ++- .../postgres/webserver/client/local/doc.go | 5 ++- .../postgres/webserver/client/local/local.go | 5 ++- .../webserver/client/remote/backup.go | 5 ++- .../postgres/webserver/client/remote/doc.go | 5 ++- .../webserver/client/remote/instance.go | 5 ++- .../webserver/client/remote/remote.go | 5 ++- .../webserver/client/remote/request.go | 5 ++- pkg/management/postgres/webserver/doc.go | 5 ++- pkg/management/postgres/webserver/local.go | 5 ++- .../postgres/webserver/metricserver/doc.go | 5 ++- .../webserver/metricserver/metrics.go | 5 ++- .../webserver/metricserver/pg_collector.go | 5 ++- .../metricserver/pg_collector_test.go | 5 ++- .../webserver/metricserver/suite_test.go | 5 ++- .../postgres/webserver/metricserver/wal.go | 5 ++- .../webserver/metricserver/wal_test.go | 5 ++- .../postgres/webserver/plugin_backup.go | 5 ++- .../postgres/webserver/probes/checker.go | 5 ++- .../postgres/webserver/probes/doc.go | 5 ++- .../postgres/webserver/probes/isready.go | 5 ++- .../postgres/webserver/probes/query.go | 5 ++- .../postgres/webserver/probes/streaming.go | 5 ++- pkg/management/postgres/webserver/remote.go | 5 ++- .../postgres/webserver/webserver.go | 5 ++- pkg/management/upgrade/suite_test.go | 5 ++- pkg/management/upgrade/upgrade.go | 5 ++- pkg/management/upgrade/upgrade_test.go | 5 ++- pkg/management/url/url.go | 5 ++- pkg/multicache/multinamespaced_cache.go | 5 ++- pkg/podlogs/cluster_writer.go | 5 ++- pkg/podlogs/cluster_writer_test.go | 5 ++- pkg/podlogs/suite_test.go | 5 ++- pkg/podlogs/writer.go | 5 ++- pkg/podlogs/writer_test.go | 5 ++- pkg/podspec/builder.go | 5 ++- pkg/podspec/builder_test.go | 5 ++- pkg/podspec/suite_test.go | 5 ++- pkg/postgres/booleans.go | 5 ++- pkg/postgres/booleans_test.go | 5 ++- pkg/postgres/configuration.go | 5 ++- pkg/postgres/configuration_test.go | 5 ++- pkg/postgres/identifier.go | 5 ++- pkg/postgres/identifier_test.go | 5 ++- pkg/postgres/replication/doc.go | 5 ++- pkg/postgres/replication/explicit.go | 5 ++- pkg/postgres/replication/explicit_test.go | 5 ++- pkg/postgres/replication/legacy.go | 5 ++- pkg/postgres/replication/legacy_test.go | 5 ++- pkg/postgres/replication/replication.go | 5 ++- pkg/postgres/replication/suite_test.go | 5 ++- pkg/postgres/replication/utils.go | 5 ++- pkg/postgres/roles.go | 5 ++- pkg/postgres/roles_test.go | 5 ++- pkg/postgres/status.go | 5 ++- pkg/postgres/status_test.go | 5 ++- pkg/postgres/suite_test.go | 5 ++- pkg/postgres/wal.go | 5 ++- pkg/postgres/wal_test.go | 5 ++- pkg/promotiontoken/doc.go | 5 ++- pkg/promotiontoken/promotion_token.go | 5 ++- pkg/promotiontoken/promotion_token_test.go | 7 ++-- pkg/promotiontoken/suite_test.go | 5 ++- .../backup/volumesnapshot/catalog.go | 5 ++- pkg/reconciler/backup/volumesnapshot/doc.go | 5 ++- .../backup/volumesnapshot/errors.go | 5 ++- .../backup/volumesnapshot/errors_test.go | 5 ++- .../backup/volumesnapshot/offline.go | 5 ++- .../backup/volumesnapshot/offline_test.go | 5 ++- .../backup/volumesnapshot/online.go | 5 ++- .../backup/volumesnapshot/online_test.go | 5 ++- .../backup/volumesnapshot/reconciler.go | 5 ++- .../backup/volumesnapshot/reconciler_test.go | 5 ++- .../backup/volumesnapshot/resources.go | 5 ++- .../backup/volumesnapshot/resources_test.go | 5 ++- .../backup/volumesnapshot/suite_test.go | 5 ++- pkg/reconciler/hibernation/doc.go | 5 ++- pkg/reconciler/hibernation/reconciler.go | 5 ++- pkg/reconciler/hibernation/reconciler_test.go | 5 ++- pkg/reconciler/hibernation/status.go | 5 ++- pkg/reconciler/hibernation/status_test.go | 5 ++- pkg/reconciler/hibernation/suite_test.go | 5 ++- pkg/reconciler/instance/doc.go | 5 ++- pkg/reconciler/instance/metadata.go | 5 ++- pkg/reconciler/instance/metadata_test.go | 5 ++- pkg/reconciler/instance/suite_test.go | 5 ++- pkg/reconciler/persistentvolumeclaim/build.go | 5 ++- .../persistentvolumeclaim/build_test.go | 5 ++- .../persistentvolumeclaim/calculator.go | 5 ++- .../persistentvolumeclaim/calculator_test.go | 5 ++- .../persistentvolumeclaim/create.go | 5 ++- .../persistentvolumeclaim/create_test.go | 5 ++- .../persistentvolumeclaim/delete.go | 5 ++- .../persistentvolumeclaim/delete_test.go | 5 ++- pkg/reconciler/persistentvolumeclaim/doc.go | 5 ++- .../persistentvolumeclaim/instance.go | 5 ++- .../persistentvolumeclaim/metadata.go | 5 ++- .../persistentvolumeclaim/metadata_test.go | 5 ++- .../persistentvolumeclaim/reconciler.go | 5 ++- .../persistentvolumeclaim/reconciler_test.go | 5 ++- .../persistentvolumeclaim/requests.go | 5 ++- .../persistentvolumeclaim/resources.go | 5 ++- .../persistentvolumeclaim/resources_test.go | 5 ++- .../persistentvolumeclaim/status.go | 5 ++- .../persistentvolumeclaim/storagesource.go | 5 ++- .../storagesource_test.go | 5 ++- .../persistentvolumeclaim/suite_test.go | 5 ++- .../persistentvolumeclaim/validation.go | 5 ++- .../persistentvolumeclaim/validation_test.go | 5 ++- .../replicaclusterswitch/conditions.go | 5 ++- pkg/reconciler/replicaclusterswitch/doc.go | 5 ++- .../replicaclusterswitch/reconciler.go | 5 ++- .../replicaclusterswitch/shutdown_wal.go | 5 ++- pkg/resources/doc.go | 5 ++- pkg/resources/labels_annotations.go | 5 ++- pkg/resources/metadatabuilder.go | 5 ++- pkg/resources/persistentvolumeclaim.go | 5 ++- pkg/resources/retry.go | 5 ++- pkg/resources/retry_test.go | 5 ++- pkg/resources/status/conditions.go | 5 ++- pkg/resources/status/doc.go | 5 ++- pkg/resources/status/patch.go | 5 ++- pkg/resources/status/transactions.go | 5 ++- pkg/resources/suite_test.go | 5 ++- pkg/servicespec/builder.go | 5 ++- pkg/servicespec/builder_test.go | 5 ++- pkg/servicespec/suite_test.go | 5 ++- pkg/specs/containers.go | 5 ++- pkg/specs/containers_test.go | 5 ++- pkg/specs/jobs.go | 5 ++- pkg/specs/jobs_test.go | 5 ++- pkg/specs/pg_pods.go | 5 ++- pkg/specs/pg_pods_test.go | 5 ++- pkg/specs/pgbouncer/deployments.go | 5 ++- pkg/specs/pgbouncer/deployments_test.go | 5 ++- pkg/specs/pgbouncer/podmonitor.go | 5 ++- pkg/specs/pgbouncer/podmonitor_test.go | 5 ++- pkg/specs/pgbouncer/rbac.go | 5 ++- pkg/specs/pgbouncer/rbac_test.go | 5 ++- pkg/specs/pgbouncer/services.go | 5 ++- pkg/specs/pgbouncer/services_test.go | 5 ++- pkg/specs/pgbouncer/suite_test.go | 5 ++- pkg/specs/poddisruptionbudget.go | 5 ++- pkg/specs/poddisruptionbudget_test.go | 5 ++- pkg/specs/podmonitor.go | 5 ++- pkg/specs/podmonitor_test.go | 5 ++- pkg/specs/pods.go | 5 ++- pkg/specs/pods_test.go | 5 ++- pkg/specs/podspec_diff.go | 5 ++- pkg/specs/podspec_diff_test.go | 5 ++- pkg/specs/rolebinding.go | 5 ++- pkg/specs/rolebinding_test.go | 5 ++- pkg/specs/roles.go | 5 ++- pkg/specs/roles_test.go | 5 ++- pkg/specs/secrets.go | 5 ++- pkg/specs/secrets_test.go | 5 ++- pkg/specs/serviceaccount.go | 5 ++- pkg/specs/serviceaccount_test.go | 5 ++- pkg/specs/services.go | 5 ++- pkg/specs/services_test.go | 5 ++- pkg/specs/suite_test.go | 5 ++- pkg/specs/volumes.go | 5 ++- pkg/specs/volumes_test.go | 5 ++- pkg/system/compatibility/darwin.go | 5 ++- pkg/system/compatibility/doc.go | 5 ++- pkg/system/compatibility/unix.go | 5 ++- pkg/system/compatibility/windows.go | 5 ++- pkg/system/suite_test.go | 5 ++- pkg/system/system.go | 5 ++- pkg/system/system_test.go | 5 ++- pkg/utils/conditions.go | 5 ++- pkg/utils/conditions_test.go | 5 ++- pkg/utils/context.go | 5 ++- pkg/utils/discovery.go | 5 ++- pkg/utils/discovery_test.go | 5 ++- pkg/utils/exec.go | 5 ++- pkg/utils/fencing.go | 5 ++- pkg/utils/fencing_test.go | 5 ++- pkg/utils/finalizers.go | 5 ++- pkg/utils/hash/doc.go | 5 ++- pkg/utils/hash/hash.go | 5 ++- pkg/utils/hash/hash_test.go | 5 ++- pkg/utils/hash/suite_test.go | 5 ++- pkg/utils/job_conditions.go | 5 ++- pkg/utils/job_conditions_test.go | 5 ++- pkg/utils/labels_annotations.go | 5 ++- pkg/utils/labels_annotations_test.go | 5 ++- pkg/utils/math.go | 5 ++- pkg/utils/operations.go | 5 ++- pkg/utils/operations_test.go | 5 ++- pkg/utils/ownership.go | 5 ++- pkg/utils/parser.go | 5 ++- pkg/utils/parser_test.go | 5 ++- pkg/utils/pod_conditions.go | 5 ++- pkg/utils/pod_conditions_test.go | 5 ++- pkg/utils/reconciliation.go | 5 ++- pkg/utils/suite_test.go | 5 ++- pkg/versions/versions.go | 5 ++- releases/operator-manifests.go | 5 ++- tests/e2e/affinity_test.go | 5 ++- tests/e2e/apparmor_test.go | 5 ++- tests/e2e/architecture_test.go | 5 ++- tests/e2e/asserts_test.go | 5 ++- tests/e2e/backup_restore_azure_test.go | 5 ++- tests/e2e/backup_restore_azurite_test.go | 5 ++- tests/e2e/backup_restore_minio_test.go | 5 ++- tests/e2e/certificates_test.go | 5 ++- tests/e2e/cluster_microservice_test.go | 5 ++- tests/e2e/cluster_monolithic_test.go | 5 ++- tests/e2e/cluster_setup_test.go | 5 ++- tests/e2e/commons_test.go | 5 ++- tests/e2e/config_support_test.go | 5 ++- tests/e2e/configuration_update_test.go | 5 ++- tests/e2e/connection_test.go | 5 ++- .../declarative_database_management_test.go | 5 ++- tests/e2e/declarative_hibernation_test.go | 5 ++- tests/e2e/disk_space_test.go | 5 ++- tests/e2e/drain_node_test.go | 5 ++- tests/e2e/eviction_test.go | 5 ++- tests/e2e/failover_test.go | 5 ++- tests/e2e/fastfailover_test.go | 5 ++- tests/e2e/fastswitchover_test.go | 5 ++- tests/e2e/fencing_test.go | 5 ++- tests/e2e/initdb_test.go | 5 ++- tests/e2e/logs_test.go | 5 ++- tests/e2e/managed_roles_test.go | 5 ++- tests/e2e/managed_services_test.go | 5 ++- tests/e2e/metrics_test.go | 5 ++- tests/e2e/monitoring_test.go | 5 ++- tests/e2e/nodeselector_test.go | 5 ++- tests/e2e/openshift_upgrade_test.go | 8 +++-- tests/e2e/operator_deployment_test.go | 5 ++- tests/e2e/operator_ha_test.go | 5 ++- tests/e2e/operator_unavailable_test.go | 5 ++- tests/e2e/pg_basebackup_test.go | 5 ++- tests/e2e/pg_data_corruption_test.go | 5 ++- tests/e2e/pg_wal_volume_test.go | 5 ++- tests/e2e/pgbouncer_metrics_test.go | 5 ++- tests/e2e/pgbouncer_test.go | 5 ++- tests/e2e/pgbouncer_types_test.go | 5 ++- tests/e2e/pod_patch_test.go | 5 ++- tests/e2e/probes_test.go | 5 ++- tests/e2e/publication_subscription_test.go | 5 ++- tests/e2e/pvc_deletion_test.go | 5 ++- tests/e2e/replica_mode_cluster_test.go | 5 ++- tests/e2e/replication_slot_test.go | 5 ++- tests/e2e/rolling_update_test.go | 5 ++- tests/e2e/scaling_test.go | 5 ++- tests/e2e/storage_expansion_test.go | 5 ++- tests/e2e/suite_test.go | 5 ++- tests/e2e/switchover_test.go | 5 ++- tests/e2e/syncreplicas_test.go | 5 ++- tests/e2e/tablespaces_test.go | 5 ++- tests/e2e/tolerations_test.go | 5 ++- tests/e2e/update_user_test.go | 5 ++- tests/e2e/upgrade_test.go | 5 ++- tests/e2e/volume_snapshot_test.go | 5 ++- tests/e2e/wal_restore_parallel_test.go | 5 ++- tests/e2e/webhook_test.go | 5 ++- tests/labels.go | 5 ++- tests/levels.go | 5 ++- tests/utils/backups/azurite.go | 5 ++- tests/utils/backups/backup.go | 5 ++- tests/utils/backups/doc.go | 5 ++- tests/utils/cloudvendors/cloud_vendor.go | 5 ++- tests/utils/clusterutils/cluster.go | 5 ++- tests/utils/deployments/deployment.go | 5 ++- tests/utils/doc.go | 5 ++- tests/utils/endpoints.go | 5 ++- tests/utils/environment/doc.go | 5 ++- tests/utils/environment/environment.go | 5 ++- tests/utils/environment/environment_test.go | 5 ++- tests/utils/environment/suite_test.go | 5 ++- tests/utils/envsubst/doc.go | 5 ++- tests/utils/envsubst/envsubst.go | 5 ++- tests/utils/envsubst/envsubst_test.go | 5 ++- tests/utils/envsubst/suite_test.go | 5 ++- tests/utils/exec/exec.go | 5 ++- tests/utils/fencing/fencing.go | 5 ++- tests/utils/forwardconnection/doc.go | 5 ++- .../forwardconnection/forwardconnection.go | 5 ++- tests/utils/importdb/import_db.go | 5 ++- tests/utils/logs/doc.go | 5 ++- tests/utils/logs/logs.go | 5 ++- tests/utils/logs/logs_test.go | 5 ++- tests/utils/logs/suite_test.go | 5 ++- tests/utils/minio/minio.go | 5 ++- tests/utils/namespaces/namespace.go | 5 ++- tests/utils/nodes/nodes.go | 5 ++- tests/utils/objects/objects.go | 7 ++-- tests/utils/openshift/openshift.go | 5 ++- tests/utils/operator/doc.go | 5 ++- tests/utils/operator/operator.go | 7 ++-- tests/utils/operator/release.go | 5 ++- tests/utils/operator/release_test.go | 5 ++- tests/utils/operator/suite_test.go | 5 ++- tests/utils/operator/upgrade.go | 5 ++- tests/utils/operator/webhooks.go | 5 ++- tests/utils/pods/pod.go | 5 ++- tests/utils/postgres/doc.go | 5 ++- tests/utils/postgres/postgres.go | 5 ++- tests/utils/postgres/postgres_test.go | 5 ++- tests/utils/postgres/psql_connection.go | 5 ++- tests/utils/postgres/suite_test.go | 5 ++- tests/utils/proxy/proxy.go | 5 ++- .../replicationslot/replication_slots.go | 5 ++- tests/utils/run/run.go | 5 ++- tests/utils/secrets/secrets.go | 5 ++- tests/utils/services/service.go | 5 ++- tests/utils/sternmultitailer/doc.go | 5 ++- tests/utils/sternmultitailer/multitailer.go | 5 ++- tests/utils/storage/storage.go | 5 ++- tests/utils/timeouts/timeouts.go | 5 ++- tests/utils/utils.go | 5 ++- tests/utils/yaml/yaml.go | 5 ++- 820 files changed, 3417 insertions(+), 847 deletions(-) diff --git a/.github/e2e-matrix-generator.py b/.github/e2e-matrix-generator.py index 1270ef907a..6a2ef3ff64 100644 --- a/.github/e2e-matrix-generator.py +++ b/.github/e2e-matrix-generator.py @@ -1,5 +1,6 @@ # -# Copyright The CloudNativePG Contributors +# Copyright © contributors to CloudNativePG, established as +# CloudNativePG a Series of LF Projects, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,6 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +# SPDX-License-Identifier: Apache-2.0 +# import argparse import json diff --git a/.github/generate-test-artifacts.py b/.github/generate-test-artifacts.py index 64f2579999..c81268ec9c 100644 --- a/.github/generate-test-artifacts.py +++ b/.github/generate-test-artifacts.py @@ -1,5 +1,6 @@ # -# Copyright The CloudNativePG Contributors +# Copyright © contributors to CloudNativePG, established as +# CloudNativePG a Series of LF Projects, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,6 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +# SPDX-License-Identifier: Apache-2.0 +# import argparse import json diff --git a/.github/postgres-versions-update.py b/.github/postgres-versions-update.py index 92e5219bef..7a5f6055d5 100644 --- a/.github/postgres-versions-update.py +++ b/.github/postgres-versions-update.py @@ -1,5 +1,6 @@ # -# Copyright The CloudNativePG Contributors +# Copyright © contributors to CloudNativePG, established as +# CloudNativePG a Series of LF Projects, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,6 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +# SPDX-License-Identifier: Apache-2.0 +# import re import pprint diff --git a/.github/report-failed-test.sh b/.github/report-failed-test.sh index af88177a79..791d6060cf 100644 --- a/.github/report-failed-test.sh +++ b/.github/report-failed-test.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash ## -## Copyright The CloudNativePG Contributors +## Copyright © contributors to CloudNativePG, established as +## CloudNativePG a Series of LF Projects, LLC. ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. @@ -14,6 +15,8 @@ ## See the License for the specific language governing permissions and ## limitations under the License. ## +## SPDX-License-Identifier: Apache-2.0 +## echo '::echo::off' diff --git a/Makefile b/Makefile index 020fd7d953..2d810755a8 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,6 @@ # -# Copyright The CloudNativePG Contributors +# Copyright © contributors to CloudNativePG, established as +# CloudNativePG a Series of LF Projects, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,6 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +# SPDX-License-Identifier: Apache-2.0 +# # Image URL to use all building/pushing image targets IMAGE_NAME ?= ghcr.io/cloudnative-pg/cloudnative-pg-testing diff --git a/api/v1/backup_funcs.go b/api/v1/backup_funcs.go index e6264fd385..b3ba528d60 100644 --- a/api/v1/backup_funcs.go +++ b/api/v1/backup_funcs.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/api/v1/backup_funcs_test.go b/api/v1/backup_funcs_test.go index 3828a1eac7..d48fbc8b35 100644 --- a/api/v1/backup_funcs_test.go +++ b/api/v1/backup_funcs_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/api/v1/backup_types.go b/api/v1/backup_types.go index 2c7ad3d5f8..c39ede578d 100644 --- a/api/v1/backup_types.go +++ b/api/v1/backup_types.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/api/v1/base_funcs.go b/api/v1/base_funcs.go index e0fdf25203..abde3703d6 100644 --- a/api/v1/base_funcs.go +++ b/api/v1/base_funcs.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/api/v1/base_funcs_test.go b/api/v1/base_funcs_test.go index a6fa574401..b6f4f83049 100644 --- a/api/v1/base_funcs_test.go +++ b/api/v1/base_funcs_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/api/v1/base_types.go b/api/v1/base_types.go index 6ae61a7ec2..376b518c0f 100644 --- a/api/v1/base_types.go +++ b/api/v1/base_types.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/api/v1/cluster_conditions.go b/api/v1/cluster_conditions.go index ae9844632d..597b367ebf 100644 --- a/api/v1/cluster_conditions.go +++ b/api/v1/cluster_conditions.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/api/v1/cluster_defaults.go b/api/v1/cluster_defaults.go index e368655cb8..64f51e0411 100644 --- a/api/v1/cluster_defaults.go +++ b/api/v1/cluster_defaults.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/api/v1/cluster_defaults_test.go b/api/v1/cluster_defaults_test.go index 4ff95bb8ca..1c6f02e0e2 100644 --- a/api/v1/cluster_defaults_test.go +++ b/api/v1/cluster_defaults_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/api/v1/cluster_funcs.go b/api/v1/cluster_funcs.go index 9e493829bc..ab08c6eafd 100644 --- a/api/v1/cluster_funcs.go +++ b/api/v1/cluster_funcs.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/api/v1/cluster_funcs_test.go b/api/v1/cluster_funcs_test.go index 1c31cde69f..a9607b0516 100644 --- a/api/v1/cluster_funcs_test.go +++ b/api/v1/cluster_funcs_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go index 70b5c2ed3c..55a0d7cfcb 100644 --- a/api/v1/cluster_types.go +++ b/api/v1/cluster_types.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/api/v1/clusterimagecatalog_funcs.go b/api/v1/clusterimagecatalog_funcs.go index a698a1ad8f..1c28740e31 100644 --- a/api/v1/clusterimagecatalog_funcs.go +++ b/api/v1/clusterimagecatalog_funcs.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/api/v1/clusterimagecatalog_types.go b/api/v1/clusterimagecatalog_types.go index 7f0a7dc970..4bf405b629 100644 --- a/api/v1/clusterimagecatalog_types.go +++ b/api/v1/clusterimagecatalog_types.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/api/v1/common_types.go b/api/v1/common_types.go index b87e009b23..cf15b953b1 100644 --- a/api/v1/common_types.go +++ b/api/v1/common_types.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/api/v1/database_funcs.go b/api/v1/database_funcs.go index 3059538e90..4cea22a0dc 100644 --- a/api/v1/database_funcs.go +++ b/api/v1/database_funcs.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/api/v1/database_types.go b/api/v1/database_types.go index ba5e2fd31d..0862d88dbc 100644 --- a/api/v1/database_types.go +++ b/api/v1/database_types.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/api/v1/doc.go b/api/v1/doc.go index 856c513680..73b01b1476 100644 --- a/api/v1/doc.go +++ b/api/v1/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package v1 contains API Schema definitions for the postgresql v1 API group diff --git a/api/v1/generic_funcs.go b/api/v1/generic_funcs.go index 3fc7e756f8..da622c8e7b 100644 --- a/api/v1/generic_funcs.go +++ b/api/v1/generic_funcs.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/api/v1/genericimagecatalog_iface.go b/api/v1/genericimagecatalog_iface.go index 635fff02fa..2b03be377f 100644 --- a/api/v1/genericimagecatalog_iface.go +++ b/api/v1/genericimagecatalog_iface.go @@ -1,3 +1,22 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + package v1 import ( diff --git a/api/v1/groupversion_info.go b/api/v1/groupversion_info.go index ded686668d..02a19be715 100644 --- a/api/v1/groupversion_info.go +++ b/api/v1/groupversion_info.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/api/v1/imagecatalog_funcs.go b/api/v1/imagecatalog_funcs.go index 1c4420f7b8..b3a2b69da4 100644 --- a/api/v1/imagecatalog_funcs.go +++ b/api/v1/imagecatalog_funcs.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/api/v1/imagecatalog_funcs_test.go b/api/v1/imagecatalog_funcs_test.go index 0fcb3d1bd4..e424398070 100644 --- a/api/v1/imagecatalog_funcs_test.go +++ b/api/v1/imagecatalog_funcs_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/api/v1/imagecatalog_types.go b/api/v1/imagecatalog_types.go index 2d5d5c13d0..76938faec6 100644 --- a/api/v1/imagecatalog_types.go +++ b/api/v1/imagecatalog_types.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/api/v1/pooler_funcs.go b/api/v1/pooler_funcs.go index b3af4d83e0..807e9da8f2 100644 --- a/api/v1/pooler_funcs.go +++ b/api/v1/pooler_funcs.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/api/v1/pooler_funcs_test.go b/api/v1/pooler_funcs_test.go index 7a0383d703..6e452aa152 100644 --- a/api/v1/pooler_funcs_test.go +++ b/api/v1/pooler_funcs_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/api/v1/pooler_types.go b/api/v1/pooler_types.go index e81b08a50b..3dd668394b 100644 --- a/api/v1/pooler_types.go +++ b/api/v1/pooler_types.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/api/v1/publication_funcs.go b/api/v1/publication_funcs.go index f6076abafc..b74947674f 100644 --- a/api/v1/publication_funcs.go +++ b/api/v1/publication_funcs.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/api/v1/publication_types.go b/api/v1/publication_types.go index 39be47ef63..86edeb4157 100644 --- a/api/v1/publication_types.go +++ b/api/v1/publication_types.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/api/v1/scheduledbackup_funcs.go b/api/v1/scheduledbackup_funcs.go index 770683cec8..84ad3de840 100644 --- a/api/v1/scheduledbackup_funcs.go +++ b/api/v1/scheduledbackup_funcs.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/api/v1/scheduledbackup_funcs_test.go b/api/v1/scheduledbackup_funcs_test.go index ca957482dc..2f5ff2b0ab 100644 --- a/api/v1/scheduledbackup_funcs_test.go +++ b/api/v1/scheduledbackup_funcs_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/api/v1/scheduledbackup_types.go b/api/v1/scheduledbackup_types.go index b89248c49c..b17671e367 100644 --- a/api/v1/scheduledbackup_types.go +++ b/api/v1/scheduledbackup_types.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/api/v1/subscription_funcs.go b/api/v1/subscription_funcs.go index e19c7ae24f..340cbb2d07 100644 --- a/api/v1/subscription_funcs.go +++ b/api/v1/subscription_funcs.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/api/v1/subscription_types.go b/api/v1/subscription_types.go index 628ec8a4da..2a4f4f7185 100644 --- a/api/v1/subscription_types.go +++ b/api/v1/subscription_types.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/api/v1/suite_test.go b/api/v1/suite_test.go index ef8c13c144..63bb2acfcc 100644 --- a/api/v1/suite_test.go +++ b/api/v1/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/api/v1/zz_api_repo_funcs_to_copy.go b/api/v1/zz_api_repo_funcs_to_copy.go index f7cbea2733..bd55f68dec 100644 --- a/api/v1/zz_api_repo_funcs_to_copy.go +++ b/api/v1/zz_api_repo_funcs_to_copy.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index 1171af8553..b19b17b683 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -1,7 +1,8 @@ //go:build !ignore_autogenerated /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +15,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Code generated by controller-gen. DO NOT EDIT. diff --git a/cmd/kubectl-cnpg/main.go b/cmd/kubectl-cnpg/main.go index 4a8157d057..09069bad4e 100644 --- a/cmd/kubectl-cnpg/main.go +++ b/cmd/kubectl-cnpg/main.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ /* diff --git a/cmd/manager/main.go b/cmd/manager/main.go index 9feee1c6b1..86bb259a89 100644 --- a/cmd/manager/main.go +++ b/cmd/manager/main.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ /* diff --git a/docker-bake.hcl b/docker-bake.hcl index 1d5fdcf47c..c30d3f2d6e 100644 --- a/docker-bake.hcl +++ b/docker-bake.hcl @@ -1,5 +1,6 @@ # -# Copyright The CloudNativePG Contributors +# Copyright © contributors to CloudNativePG, established as +# CloudNativePG a Series of LF Projects, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,6 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +# SPDX-License-Identifier: Apache-2.0 +# variable "environment" { default = "testing" diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 2d50e97023..7375d2cd2f 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -1,5 +1,6 @@ site_name: CloudNativePG site_author: The CloudNativePG Contributors +copyright: Copyright © CloudNativePG a Series of LF Projects, LLC docs_dir: src theme: diff --git a/docs/src/samples/monitoring/kube-stack-config.yaml b/docs/src/samples/monitoring/kube-stack-config.yaml index 68c0885fbb..af91202668 100644 --- a/docs/src/samples/monitoring/kube-stack-config.yaml +++ b/docs/src/samples/monitoring/kube-stack-config.yaml @@ -1,9 +1,6 @@ -# Default values for cnp-sandbox. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. # -# -# Copyright The CloudNativePG Contributors +# Copyright © contributors to CloudNativePG, established as +# CloudNativePG a Series of LF Projects, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,6 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +# SPDX-License-Identifier: Apache-2.0 +# # -- here you can pass the whole values directly to the kube-prometheus-stack chart enabled: true diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt index 7e5acd9fee..5086ae6d74 100644 --- a/hack/boilerplate.go.txt +++ b/hack/boilerplate.go.txt @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,4 +13,6 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ diff --git a/hack/e2e/run-e2e-kind.sh b/hack/e2e/run-e2e-kind.sh index 28a2a96f94..43c9ab91cc 100755 --- a/hack/e2e/run-e2e-kind.sh +++ b/hack/e2e/run-e2e-kind.sh @@ -1,7 +1,8 @@ #!/usr/bin/env bash ## -## Copyright The CloudNativePG Contributors +## Copyright © contributors to CloudNativePG, established as +## CloudNativePG a Series of LF Projects, LLC. ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. @@ -15,6 +16,9 @@ ## See the License for the specific language governing permissions and ## limitations under the License. ## +## SPDX-License-Identifier: Apache-2.0 +## + # shellcheck disable=SC2317 # standard bash error handling set -eEuo pipefail diff --git a/hack/e2e/run-e2e-local.sh b/hack/e2e/run-e2e-local.sh index 56a3d78c6c..aa404f4f53 100755 --- a/hack/e2e/run-e2e-local.sh +++ b/hack/e2e/run-e2e-local.sh @@ -1,7 +1,8 @@ #!/usr/bin/env bash ## -## Copyright The CloudNativePG Contributors +## Copyright © contributors to CloudNativePG, established as +## CloudNativePG a Series of LF Projects, LLC. ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. @@ -15,6 +16,8 @@ ## See the License for the specific language governing permissions and ## limitations under the License. ## +## SPDX-License-Identifier: Apache-2.0 +## # standard bash error handling set -eEuo pipefail diff --git a/hack/e2e/run-e2e-ocp.sh b/hack/e2e/run-e2e-ocp.sh index 130e4c80b0..ec2f90db71 100755 --- a/hack/e2e/run-e2e-ocp.sh +++ b/hack/e2e/run-e2e-ocp.sh @@ -1,7 +1,8 @@ #!/usr/bin/env bash ## -## Copyright The CloudNativePG Contributors +## Copyright © contributors to CloudNativePG, established as +## CloudNativePG a Series of LF Projects, LLC. ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. @@ -15,6 +16,8 @@ ## See the License for the specific language governing permissions and ## limitations under the License. ## +## SPDX-License-Identifier: Apache-2.0 +## set -eEuo pipefail diff --git a/hack/e2e/run-e2e.sh b/hack/e2e/run-e2e.sh index 8e0ac588d3..b4e38497ff 100755 --- a/hack/e2e/run-e2e.sh +++ b/hack/e2e/run-e2e.sh @@ -1,7 +1,8 @@ #!/usr/bin/env bash ## -## Copyright The CloudNativePG Contributors +## Copyright © contributors to CloudNativePG, established as +## CloudNativePG a Series of LF Projects, LLC. ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. @@ -15,6 +16,8 @@ ## See the License for the specific language governing permissions and ## limitations under the License. ## +## SPDX-License-Identifier: Apache-2.0 +## # standard bash error handling set -eEuo pipefail diff --git a/hack/install-cnpg-plugin.sh b/hack/install-cnpg-plugin.sh index 9f7fdb80d1..8d7a987d22 100644 --- a/hack/install-cnpg-plugin.sh +++ b/hack/install-cnpg-plugin.sh @@ -1,6 +1,7 @@ #!/bin/sh ## -## Copyright The CloudNativePG Contributors +## Copyright © contributors to CloudNativePG, established as +## CloudNativePG a Series of LF Projects, LLC. ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. @@ -14,6 +15,9 @@ ## See the License for the specific language governing permissions and ## limitations under the License. ## +## SPDX-License-Identifier: Apache-2.0 +## + # shellcheck disable=SC2317 set -e diff --git a/hack/release.sh b/hack/release.sh index e82aeabc73..44a104aeaf 100755 --- a/hack/release.sh +++ b/hack/release.sh @@ -1,14 +1,7 @@ #!/usr/bin/env bash ## -## CloudNativePG release script -## -## This shell script automates the release process for a selected -## version of CloudNativePG. It must be invoked from a release -## branch. For details on the release procedure, please -## refer to the contribute/release_procedure.md file from the -## main folder. -## -## Copyright The CloudNativePG Contributors +## Copyright © contributors to CloudNativePG, established as +## CloudNativePG a Series of LF Projects, LLC. ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. @@ -22,6 +15,17 @@ ## See the License for the specific language governing permissions and ## limitations under the License. ## +## SPDX-License-Identifier: Apache-2.0 +## + +## CloudNativePG release script +## +## This shell script automates the release process for a selected +## version of CloudNativePG. It must be invoked from a release +## branch. For details on the release procedure, please +## refer to the contribute/release_procedure.md file from the +## main folder. +## set -o errexit -o nounset -o pipefail diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh index a2f28f7ac3..af435fcbc4 100755 --- a/hack/setup-cluster.sh +++ b/hack/setup-cluster.sh @@ -1,7 +1,8 @@ #!/usr/bin/env bash ## -## Copyright The CloudNativePG Contributors +## Copyright © contributors to CloudNativePG, established as +## CloudNativePG a Series of LF Projects, LLC. ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. @@ -15,6 +16,8 @@ ## See the License for the specific language governing permissions and ## limitations under the License. ## +## SPDX-License-Identifier: Apache-2.0 +## # standard bash error handling set -eEuo pipefail diff --git a/hack/show-release-diffs.sh b/hack/show-release-diffs.sh index cdbacc8701..12a76801a7 100755 --- a/hack/show-release-diffs.sh +++ b/hack/show-release-diffs.sh @@ -1,5 +1,23 @@ #!/usr/bin/env bash ## +## Copyright © contributors to CloudNativePG, established as +## CloudNativePG a Series of LF Projects, LLC. +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## +## SPDX-License-Identifier: Apache-2.0 +## + ## CloudNativePG - Show diffs from main for a release branch ## ## This is a helper script that prints the GitHub pull requests @@ -19,21 +37,6 @@ ## This example compares the current branch with 1.15: ## ## ./hack/show-release-diffs.sh release-1.15 -## -## Copyright The CloudNativePG Contributors -## -## Licensed under the Apache License, Version 2.0 (the "License"); -## you may not use this file except in compliance with the License. -## You may obtain a copy of the License at -## -## http://www.apache.org/licenses/LICENSE-2.0 -## -## Unless required by applicable law or agreed to in writing, software -## distributed under the License is distributed on an "AS IS" BASIS, -## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -## See the License for the specific language governing permissions and -## limitations under the License. -## set -o errexit -o nounset -o pipefail diff --git a/internal/cmd/manager/backup/cmd.go b/internal/cmd/manager/backup/cmd.go index f0295a4965..c39e0de0da 100644 --- a/internal/cmd/manager/backup/cmd.go +++ b/internal/cmd/manager/backup/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package backup implement the "controller backup" command diff --git a/internal/cmd/manager/bootstrap/cmd.go b/internal/cmd/manager/bootstrap/cmd.go index 311064e2c5..fd85706d35 100644 --- a/internal/cmd/manager/bootstrap/cmd.go +++ b/internal/cmd/manager/bootstrap/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package bootstrap implement the "controller bootstrap" command diff --git a/internal/cmd/manager/controller/cmd.go b/internal/cmd/manager/controller/cmd.go index 3a94375a80..12c5f988a5 100644 --- a/internal/cmd/manager/controller/cmd.go +++ b/internal/cmd/manager/controller/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/cmd/manager/controller/controller.go b/internal/cmd/manager/controller/controller.go index 81755ca6fb..2c337d8a1d 100644 --- a/internal/cmd/manager/controller/controller.go +++ b/internal/cmd/manager/controller/controller.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package controller implement the command used to start the operator diff --git a/internal/cmd/manager/debug/architectures/cmd.go b/internal/cmd/manager/debug/architectures/cmd.go index d395963610..b14c81702a 100644 --- a/internal/cmd/manager/debug/architectures/cmd.go +++ b/internal/cmd/manager/debug/architectures/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package architectures implement the show-architectures command diff --git a/internal/cmd/manager/debug/cmd.go b/internal/cmd/manager/debug/cmd.go index e46fdbc735..23250fdd7b 100644 --- a/internal/cmd/manager/debug/cmd.go +++ b/internal/cmd/manager/debug/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package debug implement the debug command subfeatures diff --git a/internal/cmd/manager/instance/cmd.go b/internal/cmd/manager/instance/cmd.go index dde99ee925..3a45f490ba 100644 --- a/internal/cmd/manager/instance/cmd.go +++ b/internal/cmd/manager/instance/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package instance implements the "instance" subcommand of the operator diff --git a/internal/cmd/manager/instance/initdb/cmd.go b/internal/cmd/manager/instance/initdb/cmd.go index 09928c9a90..36619bc9bd 100644 --- a/internal/cmd/manager/instance/initdb/cmd.go +++ b/internal/cmd/manager/instance/initdb/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package initdb implements the "instance init" subcommand of the operator diff --git a/internal/cmd/manager/instance/join/cmd.go b/internal/cmd/manager/instance/join/cmd.go index d29c59f09e..e30700b170 100644 --- a/internal/cmd/manager/instance/join/cmd.go +++ b/internal/cmd/manager/instance/join/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package join implements the "instance join" subcommand of the operator diff --git a/internal/cmd/manager/instance/pgbasebackup/cmd.go b/internal/cmd/manager/instance/pgbasebackup/cmd.go index 57a92cd83c..8b699cf442 100644 --- a/internal/cmd/manager/instance/pgbasebackup/cmd.go +++ b/internal/cmd/manager/instance/pgbasebackup/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package pgbasebackup implement the pgbasebackup bootstrap method diff --git a/internal/cmd/manager/instance/restore/cmd.go b/internal/cmd/manager/instance/restore/cmd.go index 120dfef278..7d8bc50926 100644 --- a/internal/cmd/manager/instance/restore/cmd.go +++ b/internal/cmd/manager/instance/restore/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package restore diff --git a/internal/cmd/manager/instance/restore/doc.go b/internal/cmd/manager/instance/restore/doc.go index edb70590d7..5a94c00223 100644 --- a/internal/cmd/manager/instance/restore/doc.go +++ b/internal/cmd/manager/instance/restore/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package restore implements the "instance restore" subcommand of the operator diff --git a/internal/cmd/manager/instance/restore/restore.go b/internal/cmd/manager/instance/restore/restore.go index 5f2b81974f..ad9d90e33f 100644 --- a/internal/cmd/manager/instance/restore/restore.go +++ b/internal/cmd/manager/instance/restore/restore.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package restore diff --git a/internal/cmd/manager/instance/restoresnapshot/cmd.go b/internal/cmd/manager/instance/restoresnapshot/cmd.go index 6d84b6f714..57a40825ea 100644 --- a/internal/cmd/manager/instance/restoresnapshot/cmd.go +++ b/internal/cmd/manager/instance/restoresnapshot/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package restoresnapshot implements the "instance restoresnapshot" subcommand of the operator diff --git a/internal/cmd/manager/instance/restoresnapshot/doc.go b/internal/cmd/manager/instance/restoresnapshot/doc.go index ecd62ffa13..be6dbbe67b 100644 --- a/internal/cmd/manager/instance/restoresnapshot/doc.go +++ b/internal/cmd/manager/instance/restoresnapshot/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package restoresnapshot implements the job command that bootstraps the snapshot volumes diff --git a/internal/cmd/manager/instance/run/cmd.go b/internal/cmd/manager/instance/run/cmd.go index e02d06d55c..37a95d7c2e 100644 --- a/internal/cmd/manager/instance/run/cmd.go +++ b/internal/cmd/manager/instance/run/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package run implements the "instance run" subcommand of the operator diff --git a/internal/cmd/manager/instance/run/errors.go b/internal/cmd/manager/instance/run/errors.go index 2b47796d8a..44900d23f2 100644 --- a/internal/cmd/manager/instance/run/errors.go +++ b/internal/cmd/manager/instance/run/errors.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package run diff --git a/internal/cmd/manager/instance/run/errors_test.go b/internal/cmd/manager/instance/run/errors_test.go index 063059d2fd..548dd90e57 100644 --- a/internal/cmd/manager/instance/run/errors_test.go +++ b/internal/cmd/manager/instance/run/errors_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package run diff --git a/internal/cmd/manager/instance/run/lifecycle/doc.go b/internal/cmd/manager/instance/run/lifecycle/doc.go index e092b18ce1..e00e9d7698 100644 --- a/internal/cmd/manager/instance/run/lifecycle/doc.go +++ b/internal/cmd/manager/instance/run/lifecycle/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package lifecycle contains the code to run and manage the lifecycle of a postgres Instance diff --git a/internal/cmd/manager/instance/run/lifecycle/lifecycle.go b/internal/cmd/manager/instance/run/lifecycle/lifecycle.go index 1e223e3e22..cc351ff90d 100644 --- a/internal/cmd/manager/instance/run/lifecycle/lifecycle.go +++ b/internal/cmd/manager/instance/run/lifecycle/lifecycle.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package lifecycle diff --git a/internal/cmd/manager/instance/run/lifecycle/reaper.go b/internal/cmd/manager/instance/run/lifecycle/reaper.go index 68f32ba3ea..55d17106d3 100644 --- a/internal/cmd/manager/instance/run/lifecycle/reaper.go +++ b/internal/cmd/manager/instance/run/lifecycle/reaper.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package lifecycle diff --git a/internal/cmd/manager/instance/run/lifecycle/run.go b/internal/cmd/manager/instance/run/lifecycle/run.go index 58127a192b..acce529c1c 100644 --- a/internal/cmd/manager/instance/run/lifecycle/run.go +++ b/internal/cmd/manager/instance/run/lifecycle/run.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package lifecycle diff --git a/internal/cmd/manager/instance/run/suite_test.go b/internal/cmd/manager/instance/run/suite_test.go index e1d9122745..5b0258a8ea 100644 --- a/internal/cmd/manager/instance/run/suite_test.go +++ b/internal/cmd/manager/instance/run/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package run diff --git a/internal/cmd/manager/instance/status/cmd.go b/internal/cmd/manager/instance/status/cmd.go index 31c8ed506d..b23f88751d 100644 --- a/internal/cmd/manager/instance/status/cmd.go +++ b/internal/cmd/manager/instance/status/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package status implement the "instance status" subcommand of the operator diff --git a/internal/cmd/manager/pgbouncer/cmd.go b/internal/cmd/manager/pgbouncer/cmd.go index a511619112..93dac35669 100644 --- a/internal/cmd/manager/pgbouncer/cmd.go +++ b/internal/cmd/manager/pgbouncer/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package pgbouncer implements the "pgbouncer" subcommand of the operator diff --git a/internal/cmd/manager/pgbouncer/run/cmd.go b/internal/cmd/manager/pgbouncer/run/cmd.go index 3650601e3e..cd3a77cf69 100644 --- a/internal/cmd/manager/pgbouncer/run/cmd.go +++ b/internal/cmd/manager/pgbouncer/run/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package run implements the "pgbouncer run" subcommand of the operator diff --git a/internal/cmd/manager/pgbouncer/run/log.go b/internal/cmd/manager/pgbouncer/run/log.go index d6022b6302..9df826cbc7 100644 --- a/internal/cmd/manager/pgbouncer/run/log.go +++ b/internal/cmd/manager/pgbouncer/run/log.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package run diff --git a/internal/cmd/manager/pgbouncer/run/log_test.go b/internal/cmd/manager/pgbouncer/run/log_test.go index d1be815db8..a291a68d62 100644 --- a/internal/cmd/manager/pgbouncer/run/log_test.go +++ b/internal/cmd/manager/pgbouncer/run/log_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package run diff --git a/internal/cmd/manager/pgbouncer/run/suite_test.go b/internal/cmd/manager/pgbouncer/run/suite_test.go index bc238656c0..febaf17610 100644 --- a/internal/cmd/manager/pgbouncer/run/suite_test.go +++ b/internal/cmd/manager/pgbouncer/run/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package run diff --git a/internal/cmd/manager/show/cmd.go b/internal/cmd/manager/show/cmd.go index b21af7f97a..1d31f7a40e 100644 --- a/internal/cmd/manager/show/cmd.go +++ b/internal/cmd/manager/show/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package show implement the show command subfeatures diff --git a/internal/cmd/manager/show/walarchivequeue/cmd.go b/internal/cmd/manager/show/walarchivequeue/cmd.go index ddec44afd6..d1f774bfe7 100644 --- a/internal/cmd/manager/show/walarchivequeue/cmd.go +++ b/internal/cmd/manager/show/walarchivequeue/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package walarchivequeue implement the wal-archive-queue command diff --git a/internal/cmd/manager/walarchive/cmd.go b/internal/cmd/manager/walarchive/cmd.go index 665c656c1f..f38e73ea48 100644 --- a/internal/cmd/manager/walarchive/cmd.go +++ b/internal/cmd/manager/walarchive/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package walarchive implement the wal-archive command diff --git a/internal/cmd/manager/walrestore/cmd.go b/internal/cmd/manager/walrestore/cmd.go index 403dedb063..127cc90350 100644 --- a/internal/cmd/manager/walrestore/cmd.go +++ b/internal/cmd/manager/walrestore/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package walrestore implement the walrestore command diff --git a/internal/cmd/manager/walrestore/cmd_test.go b/internal/cmd/manager/walrestore/cmd_test.go index bc776e6b05..24c884d315 100644 --- a/internal/cmd/manager/walrestore/cmd_test.go +++ b/internal/cmd/manager/walrestore/cmd_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package walrestore diff --git a/internal/cmd/manager/walrestore/suite_test.go b/internal/cmd/manager/walrestore/suite_test.go index 97a75d2422..9a424b57af 100644 --- a/internal/cmd/manager/walrestore/suite_test.go +++ b/internal/cmd/manager/walrestore/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package walrestore diff --git a/internal/cmd/plugin/backup/cmd.go b/internal/cmd/plugin/backup/cmd.go index af8bbf8b3d..c88d689c85 100644 --- a/internal/cmd/plugin/backup/cmd.go +++ b/internal/cmd/plugin/backup/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package backup diff --git a/internal/cmd/plugin/backup/doc.go b/internal/cmd/plugin/backup/doc.go index 5672ee33a1..5e39e93408 100644 --- a/internal/cmd/plugin/backup/doc.go +++ b/internal/cmd/plugin/backup/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package backup implements a command to request an on-demand backup diff --git a/internal/cmd/plugin/backup/parameters.go b/internal/cmd/plugin/backup/parameters.go index 126031a0e3..57bae737b0 100644 --- a/internal/cmd/plugin/backup/parameters.go +++ b/internal/cmd/plugin/backup/parameters.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package backup diff --git a/internal/cmd/plugin/backup/parameters_test.go b/internal/cmd/plugin/backup/parameters_test.go index 4b4e44cc22..c83f95985d 100644 --- a/internal/cmd/plugin/backup/parameters_test.go +++ b/internal/cmd/plugin/backup/parameters_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package backup diff --git a/internal/cmd/plugin/backup/suite_test.go b/internal/cmd/plugin/backup/suite_test.go index 2dd5a10241..2ad9f64173 100644 --- a/internal/cmd/plugin/backup/suite_test.go +++ b/internal/cmd/plugin/backup/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package backup diff --git a/internal/cmd/plugin/certificate/certificate.go b/internal/cmd/plugin/certificate/certificate.go index 18bd0f4072..b4fa7e2dc1 100644 --- a/internal/cmd/plugin/certificate/certificate.go +++ b/internal/cmd/plugin/certificate/certificate.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package certificate implement the kubectl-cnpg certificate command diff --git a/internal/cmd/plugin/certificate/cmd.go b/internal/cmd/plugin/certificate/cmd.go index 6247777aec..e8930de4ef 100644 --- a/internal/cmd/plugin/certificate/cmd.go +++ b/internal/cmd/plugin/certificate/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package certificate diff --git a/internal/cmd/plugin/color.go b/internal/cmd/plugin/color.go index 9d2acdc423..0fc9ebda50 100644 --- a/internal/cmd/plugin/color.go +++ b/internal/cmd/plugin/color.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package plugin diff --git a/internal/cmd/plugin/color_test.go b/internal/cmd/plugin/color_test.go index 6fabdb6e10..2da07bac4b 100644 --- a/internal/cmd/plugin/color_test.go +++ b/internal/cmd/plugin/color_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package plugin diff --git a/internal/cmd/plugin/destroy/cmd.go b/internal/cmd/plugin/destroy/cmd.go index a1bf87b665..0068b9bd01 100644 --- a/internal/cmd/plugin/destroy/cmd.go +++ b/internal/cmd/plugin/destroy/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package destroy diff --git a/internal/cmd/plugin/destroy/destroy.go b/internal/cmd/plugin/destroy/destroy.go index 11fbb036db..9f30cadcda 100644 --- a/internal/cmd/plugin/destroy/destroy.go +++ b/internal/cmd/plugin/destroy/destroy.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package destroy implements a command to destroy an instances of a cluster and its associated PVC diff --git a/internal/cmd/plugin/fence/cmd.go b/internal/cmd/plugin/fence/cmd.go index ab7bd6b8f7..455703d613 100644 --- a/internal/cmd/plugin/fence/cmd.go +++ b/internal/cmd/plugin/fence/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package fence diff --git a/internal/cmd/plugin/fence/fence.go b/internal/cmd/plugin/fence/fence.go index 33c4a1165a..880ba52961 100644 --- a/internal/cmd/plugin/fence/fence.go +++ b/internal/cmd/plugin/fence/fence.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package fence implements a command to fence instances in a cluster diff --git a/internal/cmd/plugin/fio/cmd.go b/internal/cmd/plugin/fio/cmd.go index fdb7b721d1..e064e38f47 100644 --- a/internal/cmd/plugin/fio/cmd.go +++ b/internal/cmd/plugin/fio/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package fio diff --git a/internal/cmd/plugin/fio/doc.go b/internal/cmd/plugin/fio/doc.go index 12f8769e35..c00f151dbc 100644 --- a/internal/cmd/plugin/fio/doc.go +++ b/internal/cmd/plugin/fio/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package fio implements the fio job creation via deployment diff --git a/internal/cmd/plugin/fio/fio.go b/internal/cmd/plugin/fio/fio.go index 8aa6088c67..b7460516d9 100644 --- a/internal/cmd/plugin/fio/fio.go +++ b/internal/cmd/plugin/fio/fio.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package fio implements the kubectl-cnpg fio sub-command diff --git a/internal/cmd/plugin/hibernate/cmd.go b/internal/cmd/plugin/hibernate/cmd.go index 5a2c0c9645..abe1adf243 100644 --- a/internal/cmd/plugin/hibernate/cmd.go +++ b/internal/cmd/plugin/hibernate/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package hibernate diff --git a/internal/cmd/plugin/hibernate/cmd_test.go b/internal/cmd/plugin/hibernate/cmd_test.go index 89f82bd8e8..6efbf7c3a3 100644 --- a/internal/cmd/plugin/hibernate/cmd_test.go +++ b/internal/cmd/plugin/hibernate/cmd_test.go @@ -1,3 +1,22 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + package hibernate import ( diff --git a/internal/cmd/plugin/hibernate/doc.go b/internal/cmd/plugin/hibernate/doc.go index a6373346ee..be531991fc 100644 --- a/internal/cmd/plugin/hibernate/doc.go +++ b/internal/cmd/plugin/hibernate/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package hibernate implements the hibernation feature diff --git a/internal/cmd/plugin/hibernate/suite_test.go b/internal/cmd/plugin/hibernate/suite_test.go index b87a96a22c..491346b9de 100644 --- a/internal/cmd/plugin/hibernate/suite_test.go +++ b/internal/cmd/plugin/hibernate/suite_test.go @@ -1,3 +1,22 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + package hibernate_test import ( diff --git a/internal/cmd/plugin/install/cmd.go b/internal/cmd/plugin/install/cmd.go index ac1ab16996..fa8d2a3f75 100644 --- a/internal/cmd/plugin/install/cmd.go +++ b/internal/cmd/plugin/install/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package install diff --git a/internal/cmd/plugin/install/doc.go b/internal/cmd/plugin/install/doc.go index 00c114b8ca..4124755c60 100644 --- a/internal/cmd/plugin/install/doc.go +++ b/internal/cmd/plugin/install/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package install implements the install plugin command diff --git a/internal/cmd/plugin/install/generate.go b/internal/cmd/plugin/install/generate.go index 243739e89f..ef9e969157 100644 --- a/internal/cmd/plugin/install/generate.go +++ b/internal/cmd/plugin/install/generate.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package install diff --git a/internal/cmd/plugin/logical/database.go b/internal/cmd/plugin/logical/database.go index c3b97d2d4c..2a416388eb 100644 --- a/internal/cmd/plugin/logical/database.go +++ b/internal/cmd/plugin/logical/database.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logical diff --git a/internal/cmd/plugin/logical/doc.go b/internal/cmd/plugin/logical/doc.go index e0b57900fd..be21904a27 100644 --- a/internal/cmd/plugin/logical/doc.go +++ b/internal/cmd/plugin/logical/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package logical contains the common features of the diff --git a/internal/cmd/plugin/logical/externalcluster.go b/internal/cmd/plugin/logical/externalcluster.go index 8f8bdcc54a..0304c4a81d 100644 --- a/internal/cmd/plugin/logical/externalcluster.go +++ b/internal/cmd/plugin/logical/externalcluster.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logical diff --git a/internal/cmd/plugin/logical/psql.go b/internal/cmd/plugin/logical/psql.go index a13d527235..c1c3af41c6 100644 --- a/internal/cmd/plugin/logical/psql.go +++ b/internal/cmd/plugin/logical/psql.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logical diff --git a/internal/cmd/plugin/logical/publication/cmd.go b/internal/cmd/plugin/logical/publication/cmd.go index 8da69947da..9ce166a5de 100644 --- a/internal/cmd/plugin/logical/publication/cmd.go +++ b/internal/cmd/plugin/logical/publication/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package publication diff --git a/internal/cmd/plugin/logical/publication/create/cmd.go b/internal/cmd/plugin/logical/publication/create/cmd.go index 3e7847e682..628dc47e1b 100644 --- a/internal/cmd/plugin/logical/publication/create/cmd.go +++ b/internal/cmd/plugin/logical/publication/create/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package create diff --git a/internal/cmd/plugin/logical/publication/create/doc.go b/internal/cmd/plugin/logical/publication/create/doc.go index 5717d3abdf..2a38546f4c 100644 --- a/internal/cmd/plugin/logical/publication/create/doc.go +++ b/internal/cmd/plugin/logical/publication/create/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package create contains the implementation of the kubectl cnpg publication create command diff --git a/internal/cmd/plugin/logical/publication/create/publication.go b/internal/cmd/plugin/logical/publication/create/publication.go index 964f35c578..66f30e69fa 100644 --- a/internal/cmd/plugin/logical/publication/create/publication.go +++ b/internal/cmd/plugin/logical/publication/create/publication.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package create diff --git a/internal/cmd/plugin/logical/publication/create/publication_test.go b/internal/cmd/plugin/logical/publication/create/publication_test.go index 1aafaa1772..a6f11ee92c 100644 --- a/internal/cmd/plugin/logical/publication/create/publication_test.go +++ b/internal/cmd/plugin/logical/publication/create/publication_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package create diff --git a/internal/cmd/plugin/logical/publication/create/suite_test.go b/internal/cmd/plugin/logical/publication/create/suite_test.go index b87263eec2..2bb5d081e4 100644 --- a/internal/cmd/plugin/logical/publication/create/suite_test.go +++ b/internal/cmd/plugin/logical/publication/create/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package create diff --git a/internal/cmd/plugin/logical/publication/doc.go b/internal/cmd/plugin/logical/publication/doc.go index f83e7f3303..d9c8f12919 100644 --- a/internal/cmd/plugin/logical/publication/doc.go +++ b/internal/cmd/plugin/logical/publication/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package publication contains the implementation of the kubectl cnpg publication command diff --git a/internal/cmd/plugin/logical/publication/drop/cmd.go b/internal/cmd/plugin/logical/publication/drop/cmd.go index b7d1166c5c..0d1c3b689c 100644 --- a/internal/cmd/plugin/logical/publication/drop/cmd.go +++ b/internal/cmd/plugin/logical/publication/drop/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package drop diff --git a/internal/cmd/plugin/logical/publication/drop/doc.go b/internal/cmd/plugin/logical/publication/drop/doc.go index 42d91045fb..517d8434f6 100644 --- a/internal/cmd/plugin/logical/publication/drop/doc.go +++ b/internal/cmd/plugin/logical/publication/drop/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package drop contains the implementation of the kubectl cnpg publication drop command diff --git a/internal/cmd/plugin/logical/subscription/cmd.go b/internal/cmd/plugin/logical/subscription/cmd.go index 706d3ff4dc..438091a0c5 100644 --- a/internal/cmd/plugin/logical/subscription/cmd.go +++ b/internal/cmd/plugin/logical/subscription/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package subscription diff --git a/internal/cmd/plugin/logical/subscription/create/cmd.go b/internal/cmd/plugin/logical/subscription/create/cmd.go index 9c234d8ddc..93b43def2f 100644 --- a/internal/cmd/plugin/logical/subscription/create/cmd.go +++ b/internal/cmd/plugin/logical/subscription/create/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package create diff --git a/internal/cmd/plugin/logical/subscription/create/doc.go b/internal/cmd/plugin/logical/subscription/create/doc.go index a2918f60e6..e5842f21d3 100644 --- a/internal/cmd/plugin/logical/subscription/create/doc.go +++ b/internal/cmd/plugin/logical/subscription/create/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package create contains the implementation of the kubectl cnpg subscription create command diff --git a/internal/cmd/plugin/logical/subscription/create/subscription.go b/internal/cmd/plugin/logical/subscription/create/subscription.go index f545364116..ede10137be 100644 --- a/internal/cmd/plugin/logical/subscription/create/subscription.go +++ b/internal/cmd/plugin/logical/subscription/create/subscription.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package create diff --git a/internal/cmd/plugin/logical/subscription/doc.go b/internal/cmd/plugin/logical/subscription/doc.go index 63b527ec2e..6185f99274 100644 --- a/internal/cmd/plugin/logical/subscription/doc.go +++ b/internal/cmd/plugin/logical/subscription/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package subscription contains the implementation of the kubectl cnpg subscription command diff --git a/internal/cmd/plugin/logical/subscription/drop/cmd.go b/internal/cmd/plugin/logical/subscription/drop/cmd.go index 1ec557ad5f..7e94df017b 100644 --- a/internal/cmd/plugin/logical/subscription/drop/cmd.go +++ b/internal/cmd/plugin/logical/subscription/drop/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package drop diff --git a/internal/cmd/plugin/logical/subscription/drop/doc.go b/internal/cmd/plugin/logical/subscription/drop/doc.go index a5af8cbcef..340d88f503 100644 --- a/internal/cmd/plugin/logical/subscription/drop/doc.go +++ b/internal/cmd/plugin/logical/subscription/drop/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package drop contains the implementatoin of the cnpg subscription drop command diff --git a/internal/cmd/plugin/logical/subscription/syncsequences/cmd.go b/internal/cmd/plugin/logical/subscription/syncsequences/cmd.go index c88f62d0ee..2996f96d1e 100644 --- a/internal/cmd/plugin/logical/subscription/syncsequences/cmd.go +++ b/internal/cmd/plugin/logical/subscription/syncsequences/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package syncsequences diff --git a/internal/cmd/plugin/logical/subscription/syncsequences/doc.go b/internal/cmd/plugin/logical/subscription/syncsequences/doc.go index 4abc977de1..7b3f79b605 100644 --- a/internal/cmd/plugin/logical/subscription/syncsequences/doc.go +++ b/internal/cmd/plugin/logical/subscription/syncsequences/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package syncsequences contains the implementation of the diff --git a/internal/cmd/plugin/logical/subscription/syncsequences/get.go b/internal/cmd/plugin/logical/subscription/syncsequences/get.go index 070b45b990..460ac424a4 100644 --- a/internal/cmd/plugin/logical/subscription/syncsequences/get.go +++ b/internal/cmd/plugin/logical/subscription/syncsequences/get.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package syncsequences contains the implementation of the diff --git a/internal/cmd/plugin/logical/subscription/syncsequences/update.go b/internal/cmd/plugin/logical/subscription/syncsequences/update.go index 82d4170770..f706593a50 100644 --- a/internal/cmd/plugin/logical/subscription/syncsequences/update.go +++ b/internal/cmd/plugin/logical/subscription/syncsequences/update.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package syncsequences diff --git a/internal/cmd/plugin/logs/cluster.go b/internal/cmd/plugin/logs/cluster.go index 3ba9e7d4b2..788b1916f4 100644 --- a/internal/cmd/plugin/logs/cluster.go +++ b/internal/cmd/plugin/logs/cluster.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logs diff --git a/internal/cmd/plugin/logs/cluster_logs.go b/internal/cmd/plugin/logs/cluster_logs.go index 23de7650cc..4c2d7a4978 100644 --- a/internal/cmd/plugin/logs/cluster_logs.go +++ b/internal/cmd/plugin/logs/cluster_logs.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logs diff --git a/internal/cmd/plugin/logs/cluster_logs_test.go b/internal/cmd/plugin/logs/cluster_logs_test.go index ffb9e14c0c..25c6be2d05 100644 --- a/internal/cmd/plugin/logs/cluster_logs_test.go +++ b/internal/cmd/plugin/logs/cluster_logs_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logs diff --git a/internal/cmd/plugin/logs/cluster_test.go b/internal/cmd/plugin/logs/cluster_test.go index 356d4a3ee3..b36f8ce066 100644 --- a/internal/cmd/plugin/logs/cluster_test.go +++ b/internal/cmd/plugin/logs/cluster_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logs diff --git a/internal/cmd/plugin/logs/cmd.go b/internal/cmd/plugin/logs/cmd.go index c898ac085b..94666c66e5 100644 --- a/internal/cmd/plugin/logs/cmd.go +++ b/internal/cmd/plugin/logs/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logs diff --git a/internal/cmd/plugin/logs/cmd_test.go b/internal/cmd/plugin/logs/cmd_test.go index 2ac6dc75f2..1be90c31ff 100644 --- a/internal/cmd/plugin/logs/cmd_test.go +++ b/internal/cmd/plugin/logs/cmd_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logs diff --git a/internal/cmd/plugin/logs/doc.go b/internal/cmd/plugin/logs/doc.go index e4fcac3519..682102cecd 100644 --- a/internal/cmd/plugin/logs/doc.go +++ b/internal/cmd/plugin/logs/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package logs implements the kubectl-cnpg logs command diff --git a/internal/cmd/plugin/logs/pretty/doc.go b/internal/cmd/plugin/logs/pretty/doc.go index 31c5069913..12c350843c 100644 --- a/internal/cmd/plugin/logs/pretty/doc.go +++ b/internal/cmd/plugin/logs/pretty/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package pretty contains the implementation of `kubectl cnpg logs pretty` diff --git a/internal/cmd/plugin/logs/pretty/log_level.go b/internal/cmd/plugin/logs/pretty/log_level.go index 5fe3b6711b..5f825c066d 100644 --- a/internal/cmd/plugin/logs/pretty/log_level.go +++ b/internal/cmd/plugin/logs/pretty/log_level.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pretty diff --git a/internal/cmd/plugin/logs/pretty/log_record.go b/internal/cmd/plugin/logs/pretty/log_record.go index bc784e4de3..edb8bf89a4 100644 --- a/internal/cmd/plugin/logs/pretty/log_record.go +++ b/internal/cmd/plugin/logs/pretty/log_record.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pretty diff --git a/internal/cmd/plugin/logs/pretty/pretty.go b/internal/cmd/plugin/logs/pretty/pretty.go index 8950a016db..23c6636aca 100644 --- a/internal/cmd/plugin/logs/pretty/pretty.go +++ b/internal/cmd/plugin/logs/pretty/pretty.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pretty diff --git a/internal/cmd/plugin/logs/suite_test.go b/internal/cmd/plugin/logs/suite_test.go index 476d2ff84b..7bb114392f 100644 --- a/internal/cmd/plugin/logs/suite_test.go +++ b/internal/cmd/plugin/logs/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logs diff --git a/internal/cmd/plugin/maintenance/cmd.go b/internal/cmd/plugin/maintenance/cmd.go index 39a9d735da..1d6c4f5b58 100644 --- a/internal/cmd/plugin/maintenance/cmd.go +++ b/internal/cmd/plugin/maintenance/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package maintenance diff --git a/internal/cmd/plugin/maintenance/maintenance.go b/internal/cmd/plugin/maintenance/maintenance.go index 414a43f3a2..bc5da6b797 100644 --- a/internal/cmd/plugin/maintenance/maintenance.go +++ b/internal/cmd/plugin/maintenance/maintenance.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package maintenance implements the kubectl-cnpg maintenance sub-command diff --git a/internal/cmd/plugin/output.go b/internal/cmd/plugin/output.go index 64f08f21f7..37d9c417da 100644 --- a/internal/cmd/plugin/output.go +++ b/internal/cmd/plugin/output.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package plugin diff --git a/internal/cmd/plugin/pgadmin/cmd.go b/internal/cmd/plugin/pgadmin/cmd.go index eb357c7a11..f4b85a6601 100644 --- a/internal/cmd/plugin/pgadmin/cmd.go +++ b/internal/cmd/plugin/pgadmin/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pgadmin diff --git a/internal/cmd/plugin/pgadmin/doc.go b/internal/cmd/plugin/pgadmin/doc.go index 4384798775..84704a7c2f 100644 --- a/internal/cmd/plugin/pgadmin/doc.go +++ b/internal/cmd/plugin/pgadmin/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package pgadmin implements the creation of a pgadmin deployment diff --git a/internal/cmd/plugin/pgadmin/pgadmin.go b/internal/cmd/plugin/pgadmin/pgadmin.go index 078164a29e..b5e09d427e 100644 --- a/internal/cmd/plugin/pgadmin/pgadmin.go +++ b/internal/cmd/plugin/pgadmin/pgadmin.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pgadmin diff --git a/internal/cmd/plugin/pgadmin/pgadmin_test.go b/internal/cmd/plugin/pgadmin/pgadmin_test.go index b1f4666b8c..0c9a4e19fd 100644 --- a/internal/cmd/plugin/pgadmin/pgadmin_test.go +++ b/internal/cmd/plugin/pgadmin/pgadmin_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pgadmin diff --git a/internal/cmd/plugin/pgadmin/suite_test.go b/internal/cmd/plugin/pgadmin/suite_test.go index 9add9d7e6d..e09bd27ada 100644 --- a/internal/cmd/plugin/pgadmin/suite_test.go +++ b/internal/cmd/plugin/pgadmin/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pgadmin diff --git a/internal/cmd/plugin/pgbench/cmd.go b/internal/cmd/plugin/pgbench/cmd.go index 2e697b04fe..5ace0e9487 100644 --- a/internal/cmd/plugin/pgbench/cmd.go +++ b/internal/cmd/plugin/pgbench/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pgbench diff --git a/internal/cmd/plugin/pgbench/cmd_test.go b/internal/cmd/plugin/pgbench/cmd_test.go index d75d1e383b..db58b5be4e 100644 --- a/internal/cmd/plugin/pgbench/cmd_test.go +++ b/internal/cmd/plugin/pgbench/cmd_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pgbench diff --git a/internal/cmd/plugin/pgbench/doc.go b/internal/cmd/plugin/pgbench/doc.go index 1f5dcc9c3c..a4269660f5 100644 --- a/internal/cmd/plugin/pgbench/doc.go +++ b/internal/cmd/plugin/pgbench/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package pgbench implements the pgbench job creation diff --git a/internal/cmd/plugin/pgbench/pgbench.go b/internal/cmd/plugin/pgbench/pgbench.go index 6b415ab949..6b5fbb81c0 100644 --- a/internal/cmd/plugin/pgbench/pgbench.go +++ b/internal/cmd/plugin/pgbench/pgbench.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package pgbench implements the kubectl-cnpg pgbench sub-command diff --git a/internal/cmd/plugin/pgbench/suite_test.go b/internal/cmd/plugin/pgbench/suite_test.go index 4a0960b6eb..3333764f1c 100644 --- a/internal/cmd/plugin/pgbench/suite_test.go +++ b/internal/cmd/plugin/pgbench/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pgbench diff --git a/internal/cmd/plugin/plugin.go b/internal/cmd/plugin/plugin.go index 432e347283..67be9b9c3f 100644 --- a/internal/cmd/plugin/plugin.go +++ b/internal/cmd/plugin/plugin.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package plugin contains the common behaviors of the kubectl-cnpg subcommand diff --git a/internal/cmd/plugin/plugin_test.go b/internal/cmd/plugin/plugin_test.go index 8d0ed55683..d0fa5b493c 100644 --- a/internal/cmd/plugin/plugin_test.go +++ b/internal/cmd/plugin/plugin_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package plugin diff --git a/internal/cmd/plugin/printer.go b/internal/cmd/plugin/printer.go index 47312bccd4..0b69c9d314 100644 --- a/internal/cmd/plugin/printer.go +++ b/internal/cmd/plugin/printer.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package plugin diff --git a/internal/cmd/plugin/promote/cmd.go b/internal/cmd/plugin/promote/cmd.go index c8e0db5bdc..e9217ae7f7 100644 --- a/internal/cmd/plugin/promote/cmd.go +++ b/internal/cmd/plugin/promote/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package promote diff --git a/internal/cmd/plugin/promote/promote.go b/internal/cmd/plugin/promote/promote.go index a2e160cb37..fde8a61dcf 100644 --- a/internal/cmd/plugin/promote/promote.go +++ b/internal/cmd/plugin/promote/promote.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package promote implement the kubectl-cnpg promote command diff --git a/internal/cmd/plugin/promote/promote_test.go b/internal/cmd/plugin/promote/promote_test.go index 46ba5ff1a6..cb2ba8c950 100644 --- a/internal/cmd/plugin/promote/promote_test.go +++ b/internal/cmd/plugin/promote/promote_test.go @@ -1,3 +1,22 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + package promote import ( diff --git a/internal/cmd/plugin/promote/suite_test.go b/internal/cmd/plugin/promote/suite_test.go index 3c0e363370..566770d356 100644 --- a/internal/cmd/plugin/promote/suite_test.go +++ b/internal/cmd/plugin/promote/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package promote diff --git a/internal/cmd/plugin/psql/cmd.go b/internal/cmd/plugin/psql/cmd.go index 514c39a853..b471109e80 100644 --- a/internal/cmd/plugin/psql/cmd.go +++ b/internal/cmd/plugin/psql/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package psql diff --git a/internal/cmd/plugin/psql/doc.go b/internal/cmd/plugin/psql/doc.go index 4b723495eb..039cf73ef0 100644 --- a/internal/cmd/plugin/psql/doc.go +++ b/internal/cmd/plugin/psql/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package psql implements the `kubectl cnpg psql` command diff --git a/internal/cmd/plugin/psql/psql.go b/internal/cmd/plugin/psql/psql.go index 35d43c16ff..362f03068e 100644 --- a/internal/cmd/plugin/psql/psql.go +++ b/internal/cmd/plugin/psql/psql.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package psql diff --git a/internal/cmd/plugin/psql/psql_test.go b/internal/cmd/plugin/psql/psql_test.go index 22058e5d8b..26be3ff2ba 100644 --- a/internal/cmd/plugin/psql/psql_test.go +++ b/internal/cmd/plugin/psql/psql_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package psql diff --git a/internal/cmd/plugin/psql/suite_test.go b/internal/cmd/plugin/psql/suite_test.go index 9ffe5eb224..6b6d008a7d 100644 --- a/internal/cmd/plugin/psql/suite_test.go +++ b/internal/cmd/plugin/psql/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package psql diff --git a/internal/cmd/plugin/reload/cmd.go b/internal/cmd/plugin/reload/cmd.go index 833e1eae2b..968c8a65ca 100644 --- a/internal/cmd/plugin/reload/cmd.go +++ b/internal/cmd/plugin/reload/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package reload diff --git a/internal/cmd/plugin/reload/reload.go b/internal/cmd/plugin/reload/reload.go index 236910207f..b97c72666a 100644 --- a/internal/cmd/plugin/reload/reload.go +++ b/internal/cmd/plugin/reload/reload.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package reload implements a command to trigger a reconciliation loop for a cluster diff --git a/internal/cmd/plugin/report/cluster.go b/internal/cmd/plugin/report/cluster.go index fa76af6461..b851e55358 100644 --- a/internal/cmd/plugin/report/cluster.go +++ b/internal/cmd/plugin/report/cluster.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package report diff --git a/internal/cmd/plugin/report/cluster_report.go b/internal/cmd/plugin/report/cluster_report.go index 68a44d5594..d2eb5aecc5 100644 --- a/internal/cmd/plugin/report/cluster_report.go +++ b/internal/cmd/plugin/report/cluster_report.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package report diff --git a/internal/cmd/plugin/report/cmd.go b/internal/cmd/plugin/report/cmd.go index 0b746264ba..f018f57888 100644 --- a/internal/cmd/plugin/report/cmd.go +++ b/internal/cmd/plugin/report/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package report diff --git a/internal/cmd/plugin/report/logs.go b/internal/cmd/plugin/report/logs.go index 4c28a3a2e8..985b5be4d7 100644 --- a/internal/cmd/plugin/report/logs.go +++ b/internal/cmd/plugin/report/logs.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package report diff --git a/internal/cmd/plugin/report/olm.go b/internal/cmd/plugin/report/olm.go index 5e179540cc..9a5c8b0548 100644 --- a/internal/cmd/plugin/report/olm.go +++ b/internal/cmd/plugin/report/olm.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package report diff --git a/internal/cmd/plugin/report/operator.go b/internal/cmd/plugin/report/operator.go index 36dac12843..d30ca842eb 100644 --- a/internal/cmd/plugin/report/operator.go +++ b/internal/cmd/plugin/report/operator.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package report diff --git a/internal/cmd/plugin/report/operator_objects.go b/internal/cmd/plugin/report/operator_objects.go index f9cb4b5ad0..e585a13bd1 100644 --- a/internal/cmd/plugin/report/operator_objects.go +++ b/internal/cmd/plugin/report/operator_objects.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package report diff --git a/internal/cmd/plugin/report/operator_report.go b/internal/cmd/plugin/report/operator_report.go index f52785854a..58af2608cb 100644 --- a/internal/cmd/plugin/report/operator_report.go +++ b/internal/cmd/plugin/report/operator_report.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package report implements the kubectl-cnpg report command diff --git a/internal/cmd/plugin/report/operator_utils.go b/internal/cmd/plugin/report/operator_utils.go index a0876ed4f1..c923e74872 100644 --- a/internal/cmd/plugin/report/operator_utils.go +++ b/internal/cmd/plugin/report/operator_utils.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package report diff --git a/internal/cmd/plugin/report/output.go b/internal/cmd/plugin/report/output.go index 3b24c5ee46..6deabf39ea 100644 --- a/internal/cmd/plugin/report/output.go +++ b/internal/cmd/plugin/report/output.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package report diff --git a/internal/cmd/plugin/report/redactors.go b/internal/cmd/plugin/report/redactors.go index bf78c1bada..7230081002 100644 --- a/internal/cmd/plugin/report/redactors.go +++ b/internal/cmd/plugin/report/redactors.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package report diff --git a/internal/cmd/plugin/report/redactors_test.go b/internal/cmd/plugin/report/redactors_test.go index 9c6a153fc7..010babb7c4 100644 --- a/internal/cmd/plugin/report/redactors_test.go +++ b/internal/cmd/plugin/report/redactors_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package report diff --git a/internal/cmd/plugin/report/suite_test.go b/internal/cmd/plugin/report/suite_test.go index c1b0b03b6d..f7a582ba54 100644 --- a/internal/cmd/plugin/report/suite_test.go +++ b/internal/cmd/plugin/report/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package report diff --git a/internal/cmd/plugin/restart/cmd.go b/internal/cmd/plugin/restart/cmd.go index 28ef5e31df..afbe7022aa 100644 --- a/internal/cmd/plugin/restart/cmd.go +++ b/internal/cmd/plugin/restart/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package restart diff --git a/internal/cmd/plugin/restart/restart.go b/internal/cmd/plugin/restart/restart.go index 4b863e426e..da50c19a5e 100644 --- a/internal/cmd/plugin/restart/restart.go +++ b/internal/cmd/plugin/restart/restart.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package restart implements a command to rollout restart a cluster or restart a single instance diff --git a/internal/cmd/plugin/snapshot/cmd.go b/internal/cmd/plugin/snapshot/cmd.go index 17039dcc7d..b5f3b1a35a 100644 --- a/internal/cmd/plugin/snapshot/cmd.go +++ b/internal/cmd/plugin/snapshot/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package snapshot diff --git a/internal/cmd/plugin/snapshot/doc.go b/internal/cmd/plugin/snapshot/doc.go index cf22d8168f..8ad0849052 100644 --- a/internal/cmd/plugin/snapshot/doc.go +++ b/internal/cmd/plugin/snapshot/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package snapshot implements the snapshot feature diff --git a/internal/cmd/plugin/status/cmd.go b/internal/cmd/plugin/status/cmd.go index 385f50e66f..b36560f758 100644 --- a/internal/cmd/plugin/status/cmd.go +++ b/internal/cmd/plugin/status/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package status diff --git a/internal/cmd/plugin/status/doc.go b/internal/cmd/plugin/status/doc.go index 1bc9104790..decdf55fe1 100644 --- a/internal/cmd/plugin/status/doc.go +++ b/internal/cmd/plugin/status/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package status implements the kubectl-cnpg status command diff --git a/internal/cmd/plugin/status/status.go b/internal/cmd/plugin/status/status.go index 890362c84a..3c83061eba 100644 --- a/internal/cmd/plugin/status/status.go +++ b/internal/cmd/plugin/status/status.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package status diff --git a/internal/cmd/plugin/status/status_test.go b/internal/cmd/plugin/status/status_test.go index d39a949924..05ae1688a0 100644 --- a/internal/cmd/plugin/status/status_test.go +++ b/internal/cmd/plugin/status/status_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package status diff --git a/internal/cmd/plugin/status/suite_test.go b/internal/cmd/plugin/status/suite_test.go index 23daf63ee5..2586fc4b6e 100644 --- a/internal/cmd/plugin/status/suite_test.go +++ b/internal/cmd/plugin/status/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package status diff --git a/internal/cmd/plugin/suite_test.go b/internal/cmd/plugin/suite_test.go index 2f0816b08b..c10994180d 100644 --- a/internal/cmd/plugin/suite_test.go +++ b/internal/cmd/plugin/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package plugin diff --git a/internal/cmd/versions/cmd.go b/internal/cmd/versions/cmd.go index 498457dcd5..4fc872cb09 100644 --- a/internal/cmd/versions/cmd.go +++ b/internal/cmd/versions/cmd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package versions builds the version subcommand for both manager and plugins diff --git a/internal/cnpi/plugin/client/backup.go b/internal/cnpi/plugin/client/backup.go index 54aab9cc4f..5178aa3a53 100644 --- a/internal/cnpi/plugin/client/backup.go +++ b/internal/cnpi/plugin/client/backup.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package client diff --git a/internal/cnpi/plugin/client/client.go b/internal/cnpi/plugin/client/client.go index 100a0e027b..bc14576e4a 100644 --- a/internal/cnpi/plugin/client/client.go +++ b/internal/cnpi/plugin/client/client.go @@ -1,17 +1,20 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package client diff --git a/internal/cnpi/plugin/client/cluster.go b/internal/cnpi/plugin/client/cluster.go index c5ae9b4456..0742dba8ca 100644 --- a/internal/cnpi/plugin/client/cluster.go +++ b/internal/cnpi/plugin/client/cluster.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package client diff --git a/internal/cnpi/plugin/client/cluster_test.go b/internal/cnpi/plugin/client/cluster_test.go index 2d574c70f4..01bbd4fd66 100644 --- a/internal/cnpi/plugin/client/cluster_test.go +++ b/internal/cnpi/plugin/client/cluster_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package client diff --git a/internal/cnpi/plugin/client/contracts.go b/internal/cnpi/plugin/client/contracts.go index d136a71d08..0f23953018 100644 --- a/internal/cnpi/plugin/client/contracts.go +++ b/internal/cnpi/plugin/client/contracts.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package client diff --git a/internal/cnpi/plugin/client/doc.go b/internal/cnpi/plugin/client/doc.go index 1cb0e5ee6d..e0285d00df 100644 --- a/internal/cnpi/plugin/client/doc.go +++ b/internal/cnpi/plugin/client/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package client contains a set of helper structures for CNPG to use the diff --git a/internal/cnpi/plugin/client/lifecycle.go b/internal/cnpi/plugin/client/lifecycle.go index 3e47d37ff4..98e3a0ddd7 100644 --- a/internal/cnpi/plugin/client/lifecycle.go +++ b/internal/cnpi/plugin/client/lifecycle.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package client diff --git a/internal/cnpi/plugin/client/lifecycle_test.go b/internal/cnpi/plugin/client/lifecycle_test.go index dd5d38ec1a..bcff9384bd 100644 --- a/internal/cnpi/plugin/client/lifecycle_test.go +++ b/internal/cnpi/plugin/client/lifecycle_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package client diff --git a/internal/cnpi/plugin/client/reconciler.go b/internal/cnpi/plugin/client/reconciler.go index c7459c158f..b234ef48fb 100644 --- a/internal/cnpi/plugin/client/reconciler.go +++ b/internal/cnpi/plugin/client/reconciler.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package client diff --git a/internal/cnpi/plugin/client/restore_job.go b/internal/cnpi/plugin/client/restore_job.go index 028c0d3e95..c6bc843b7e 100644 --- a/internal/cnpi/plugin/client/restore_job.go +++ b/internal/cnpi/plugin/client/restore_job.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package client diff --git a/internal/cnpi/plugin/client/suite_test.go b/internal/cnpi/plugin/client/suite_test.go index ba9e9a64db..7be0300f18 100644 --- a/internal/cnpi/plugin/client/suite_test.go +++ b/internal/cnpi/plugin/client/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package client diff --git a/internal/cnpi/plugin/client/wal.go b/internal/cnpi/plugin/client/wal.go index 81e0234848..e9d36b55b8 100644 --- a/internal/cnpi/plugin/client/wal.go +++ b/internal/cnpi/plugin/client/wal.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package client diff --git a/internal/cnpi/plugin/connection/connection.go b/internal/cnpi/plugin/connection/connection.go index 34e9574d05..1eb3a79794 100644 --- a/internal/cnpi/plugin/connection/connection.go +++ b/internal/cnpi/plugin/connection/connection.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package connection diff --git a/internal/cnpi/plugin/connection/doc.go b/internal/cnpi/plugin/connection/doc.go index 01cef3f037..78f836279e 100644 --- a/internal/cnpi/plugin/connection/doc.go +++ b/internal/cnpi/plugin/connection/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package connection represents a connected CNPG-i plugin diff --git a/internal/cnpi/plugin/connection/metadata.go b/internal/cnpi/plugin/connection/metadata.go index a17e4d9ae0..b8744a19e9 100644 --- a/internal/cnpi/plugin/connection/metadata.go +++ b/internal/cnpi/plugin/connection/metadata.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package connection diff --git a/internal/cnpi/plugin/connection/remote.go b/internal/cnpi/plugin/connection/remote.go index 73285c4182..159469a74e 100644 --- a/internal/cnpi/plugin/connection/remote.go +++ b/internal/cnpi/plugin/connection/remote.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package connection diff --git a/internal/cnpi/plugin/connection/unix.go b/internal/cnpi/plugin/connection/unix.go index 1a485c179e..866e62b371 100644 --- a/internal/cnpi/plugin/connection/unix.go +++ b/internal/cnpi/plugin/connection/unix.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package connection represents a connected CNPG-i plugin diff --git a/internal/cnpi/plugin/doc.go b/internal/cnpi/plugin/doc.go index c642a90b43..5b59b41c15 100644 --- a/internal/cnpi/plugin/doc.go +++ b/internal/cnpi/plugin/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package plugin contains the logics that acts as bridge between cnpg-i and the operator diff --git a/internal/cnpi/plugin/mapping.go b/internal/cnpi/plugin/mapping.go index 7b9373969b..28617ed66b 100644 --- a/internal/cnpi/plugin/mapping.go +++ b/internal/cnpi/plugin/mapping.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package plugin diff --git a/internal/cnpi/plugin/operatorclient/client.go b/internal/cnpi/plugin/operatorclient/client.go index 7a4aa84456..709c616ab4 100644 --- a/internal/cnpi/plugin/operatorclient/client.go +++ b/internal/cnpi/plugin/operatorclient/client.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package operatorclient diff --git a/internal/cnpi/plugin/operatorclient/client_test.go b/internal/cnpi/plugin/operatorclient/client_test.go index aaaa975948..d1d5a48466 100644 --- a/internal/cnpi/plugin/operatorclient/client_test.go +++ b/internal/cnpi/plugin/operatorclient/client_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package operatorclient diff --git a/internal/cnpi/plugin/operatorclient/doc.go b/internal/cnpi/plugin/operatorclient/doc.go index 89ddcf2ee0..4d339fc358 100644 --- a/internal/cnpi/plugin/operatorclient/doc.go +++ b/internal/cnpi/plugin/operatorclient/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package operatorclient contains an extended kubernetes client that supports plugin API calls diff --git a/internal/cnpi/plugin/operatorclient/suite_test.go b/internal/cnpi/plugin/operatorclient/suite_test.go index 52a34e0df9..a48d27f210 100644 --- a/internal/cnpi/plugin/operatorclient/suite_test.go +++ b/internal/cnpi/plugin/operatorclient/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package operatorclient diff --git a/internal/cnpi/plugin/repository/connection.go b/internal/cnpi/plugin/repository/connection.go index a71586f530..c7879902c1 100644 --- a/internal/cnpi/plugin/repository/connection.go +++ b/internal/cnpi/plugin/repository/connection.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package repository diff --git a/internal/cnpi/plugin/repository/doc.go b/internal/cnpi/plugin/repository/doc.go index 232d249998..03b3cbeb2a 100644 --- a/internal/cnpi/plugin/repository/doc.go +++ b/internal/cnpi/plugin/repository/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package repository contains the plugin discovery and diff --git a/internal/cnpi/plugin/repository/errors.go b/internal/cnpi/plugin/repository/errors.go index 4dc180bd9c..bfe6a3e504 100644 --- a/internal/cnpi/plugin/repository/errors.go +++ b/internal/cnpi/plugin/repository/errors.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package repository diff --git a/internal/cnpi/plugin/repository/setup.go b/internal/cnpi/plugin/repository/setup.go index b47db04283..c96a824e7e 100644 --- a/internal/cnpi/plugin/repository/setup.go +++ b/internal/cnpi/plugin/repository/setup.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package repository diff --git a/internal/cnpi/plugin/repository/setup_test.go b/internal/cnpi/plugin/repository/setup_test.go index d3030a5754..f680e2b48e 100644 --- a/internal/cnpi/plugin/repository/setup_test.go +++ b/internal/cnpi/plugin/repository/setup_test.go @@ -1,3 +1,22 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + package repository import ( diff --git a/internal/cnpi/plugin/repository/suite_test.go b/internal/cnpi/plugin/repository/suite_test.go index 4421aecdda..2672d79d7d 100644 --- a/internal/cnpi/plugin/repository/suite_test.go +++ b/internal/cnpi/plugin/repository/suite_test.go @@ -1,3 +1,22 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + package repository import ( diff --git a/internal/configuration/configuration.go b/internal/configuration/configuration.go index 56f00fa2aa..b9ffb53847 100644 --- a/internal/configuration/configuration.go +++ b/internal/configuration/configuration.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package configuration contains the configuration of the operator, reading diff --git a/internal/configuration/configuration_test.go b/internal/configuration/configuration_test.go index 97c5250334..b58bbe3b8b 100644 --- a/internal/configuration/configuration_test.go +++ b/internal/configuration/configuration_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package configuration diff --git a/internal/configuration/suite_test.go b/internal/configuration/suite_test.go index 76e06703a6..d26575cd1d 100644 --- a/internal/configuration/suite_test.go +++ b/internal/configuration/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package configuration diff --git a/internal/controller/backup_controller.go b/internal/controller/backup_controller.go index a9582701f0..50866066ae 100644 --- a/internal/controller/backup_controller.go +++ b/internal/controller/backup_controller.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/backup_controller_test.go b/internal/controller/backup_controller_test.go index 6890e2ad46..543bb00668 100644 --- a/internal/controller/backup_controller_test.go +++ b/internal/controller/backup_controller_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/backup_predicates.go b/internal/controller/backup_predicates.go index 5fcfd39dd9..ee5c541351 100644 --- a/internal/controller/backup_predicates.go +++ b/internal/controller/backup_predicates.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/backup_predicates_test.go b/internal/controller/backup_predicates_test.go index e7715ab37c..49ccd4edea 100644 --- a/internal/controller/backup_predicates_test.go +++ b/internal/controller/backup_predicates_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/cluster_cleanup.go b/internal/controller/cluster_cleanup.go index ccc7f5dec0..5370654e76 100644 --- a/internal/controller/cluster_cleanup.go +++ b/internal/controller/cluster_cleanup.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/cluster_cleanup_test.go b/internal/controller/cluster_cleanup_test.go index 23e05d836a..c6162ce9c7 100644 --- a/internal/controller/cluster_cleanup_test.go +++ b/internal/controller/cluster_cleanup_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/cluster_controller.go b/internal/controller/cluster_controller.go index 036d4e4bd2..94683926fb 100644 --- a/internal/controller/cluster_controller.go +++ b/internal/controller/cluster_controller.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package controller contains the controller of the CRD diff --git a/internal/controller/cluster_controller_test.go b/internal/controller/cluster_controller_test.go index 9439f0b86b..4f51710266 100644 --- a/internal/controller/cluster_controller_test.go +++ b/internal/controller/cluster_controller_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/cluster_create.go b/internal/controller/cluster_create.go index 4feef214ce..7d4fd93279 100644 --- a/internal/controller/cluster_create.go +++ b/internal/controller/cluster_create.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/cluster_create_test.go b/internal/controller/cluster_create_test.go index 9d40db537a..0e0f0a0318 100644 --- a/internal/controller/cluster_create_test.go +++ b/internal/controller/cluster_create_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/cluster_delete.go b/internal/controller/cluster_delete.go index cd775442e7..44bf9945fe 100644 --- a/internal/controller/cluster_delete.go +++ b/internal/controller/cluster_delete.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/cluster_delete_test.go b/internal/controller/cluster_delete_test.go index 5a1d6be4a3..34bbf1c95b 100644 --- a/internal/controller/cluster_delete_test.go +++ b/internal/controller/cluster_delete_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/cluster_image.go b/internal/controller/cluster_image.go index 46f106bd53..63c16b85c3 100644 --- a/internal/controller/cluster_image.go +++ b/internal/controller/cluster_image.go @@ -1,17 +1,20 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/cluster_pki.go b/internal/controller/cluster_pki.go index 1b8813752a..38b93adfda 100644 --- a/internal/controller/cluster_pki.go +++ b/internal/controller/cluster_pki.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/cluster_plugins.go b/internal/controller/cluster_plugins.go index 845af5e6f7..468cd738d4 100644 --- a/internal/controller/cluster_plugins.go +++ b/internal/controller/cluster_plugins.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package controller contains the controller of the CRD diff --git a/internal/controller/cluster_predicates.go b/internal/controller/cluster_predicates.go index a8e96dfde4..42490a2a06 100644 --- a/internal/controller/cluster_predicates.go +++ b/internal/controller/cluster_predicates.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/cluster_restore.go b/internal/controller/cluster_restore.go index 8edd37eb30..590ad4db7f 100644 --- a/internal/controller/cluster_restore.go +++ b/internal/controller/cluster_restore.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/cluster_restore_test.go b/internal/controller/cluster_restore_test.go index 364f7aa821..fc67aa18ed 100644 --- a/internal/controller/cluster_restore_test.go +++ b/internal/controller/cluster_restore_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/cluster_scale.go b/internal/controller/cluster_scale.go index d4244a3b24..0fc21229e6 100644 --- a/internal/controller/cluster_scale.go +++ b/internal/controller/cluster_scale.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/cluster_scale_test.go b/internal/controller/cluster_scale_test.go index 4ed725a82a..2c9f24e513 100644 --- a/internal/controller/cluster_scale_test.go +++ b/internal/controller/cluster_scale_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/cluster_status.go b/internal/controller/cluster_status.go index a092f955c2..5703b18bcf 100644 --- a/internal/controller/cluster_status.go +++ b/internal/controller/cluster_status.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/cluster_status_test.go b/internal/controller/cluster_status_test.go index d7e24c8489..605c0b32ea 100644 --- a/internal/controller/cluster_status_test.go +++ b/internal/controller/cluster_status_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/cluster_upgrade.go b/internal/controller/cluster_upgrade.go index 3fc56d42b1..b13c94f31d 100644 --- a/internal/controller/cluster_upgrade.go +++ b/internal/controller/cluster_upgrade.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/cluster_upgrade_test.go b/internal/controller/cluster_upgrade_test.go index 9c0fea0e27..8737fb1a44 100644 --- a/internal/controller/cluster_upgrade_test.go +++ b/internal/controller/cluster_upgrade_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/finalizers_delete.go b/internal/controller/finalizers_delete.go index e630576ca6..a791bd58b2 100644 --- a/internal/controller/finalizers_delete.go +++ b/internal/controller/finalizers_delete.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/finalizers_delete_test.go b/internal/controller/finalizers_delete_test.go index fcb7b40849..5f0e1a55b4 100644 --- a/internal/controller/finalizers_delete_test.go +++ b/internal/controller/finalizers_delete_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/plugin_controller.go b/internal/controller/plugin_controller.go index 83efdef4eb..8f0885aca9 100644 --- a/internal/controller/plugin_controller.go +++ b/internal/controller/plugin_controller.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package controller contains the controller of the CRD diff --git a/internal/controller/plugin_predicates.go b/internal/controller/plugin_predicates.go index 119eb2d2d3..dfcf0fb02a 100644 --- a/internal/controller/plugin_predicates.go +++ b/internal/controller/plugin_predicates.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/plugins.go b/internal/controller/plugins.go index 6924b83005..a63d6302d8 100644 --- a/internal/controller/plugins.go +++ b/internal/controller/plugins.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/plugins_test.go b/internal/controller/plugins_test.go index c822faa009..1cdeb8fa0f 100644 --- a/internal/controller/plugins_test.go +++ b/internal/controller/plugins_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/pooler_controller.go b/internal/controller/pooler_controller.go index 63a1651175..8f64989d08 100644 --- a/internal/controller/pooler_controller.go +++ b/internal/controller/pooler_controller.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/pooler_controller_test.go b/internal/controller/pooler_controller_test.go index 9e22fb7302..5d6f10be87 100644 --- a/internal/controller/pooler_controller_test.go +++ b/internal/controller/pooler_controller_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/pooler_predicates.go b/internal/controller/pooler_predicates.go index 9813ff59b8..ebd14931db 100644 --- a/internal/controller/pooler_predicates.go +++ b/internal/controller/pooler_predicates.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/pooler_predicates_test.go b/internal/controller/pooler_predicates_test.go index 5a324fb27d..3ea269e8be 100644 --- a/internal/controller/pooler_predicates_test.go +++ b/internal/controller/pooler_predicates_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/pooler_resources.go b/internal/controller/pooler_resources.go index 40997786ed..5fea930362 100644 --- a/internal/controller/pooler_resources.go +++ b/internal/controller/pooler_resources.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/pooler_resources_test.go b/internal/controller/pooler_resources_test.go index 14468170e7..988673c389 100644 --- a/internal/controller/pooler_resources_test.go +++ b/internal/controller/pooler_resources_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/pooler_status.go b/internal/controller/pooler_status.go index af9f9062fa..a1b06ebca6 100644 --- a/internal/controller/pooler_status.go +++ b/internal/controller/pooler_status.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/pooler_status_test.go b/internal/controller/pooler_status_test.go index 18891442b2..bf8434cfed 100644 --- a/internal/controller/pooler_status_test.go +++ b/internal/controller/pooler_status_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/pooler_update.go b/internal/controller/pooler_update.go index edfa3f1927..cc34254f3e 100644 --- a/internal/controller/pooler_update.go +++ b/internal/controller/pooler_update.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/pooler_update_test.go b/internal/controller/pooler_update_test.go index 8774d1ad8b..36cd44771d 100644 --- a/internal/controller/pooler_update_test.go +++ b/internal/controller/pooler_update_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/replicas.go b/internal/controller/replicas.go index 24f3ab2c14..a4f02db0d8 100644 --- a/internal/controller/replicas.go +++ b/internal/controller/replicas.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/replicas_test.go b/internal/controller/replicas_test.go index fcba496d90..634a9dafa3 100644 --- a/internal/controller/replicas_test.go +++ b/internal/controller/replicas_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/rollout/doc.go b/internal/controller/rollout/doc.go index a36ea9c5f3..04c1c9afd9 100644 --- a/internal/controller/rollout/doc.go +++ b/internal/controller/rollout/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package rollout contains the rollout manager, allowing diff --git a/internal/controller/rollout/rollout.go b/internal/controller/rollout/rollout.go index cfe930fb6b..34ac26fad0 100644 --- a/internal/controller/rollout/rollout.go +++ b/internal/controller/rollout/rollout.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package rollout diff --git a/internal/controller/rollout/rollout_test.go b/internal/controller/rollout/rollout_test.go index bafaf17666..6d4e6d0d6c 100644 --- a/internal/controller/rollout/rollout_test.go +++ b/internal/controller/rollout/rollout_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package rollout diff --git a/internal/controller/rollout/suite_test.go b/internal/controller/rollout/suite_test.go index 3a8ee45027..82c0fa602e 100644 --- a/internal/controller/rollout/suite_test.go +++ b/internal/controller/rollout/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package rollout diff --git a/internal/controller/scheduledbackup_controller.go b/internal/controller/scheduledbackup_controller.go index 4fd0138e64..913b1fa00d 100644 --- a/internal/controller/scheduledbackup_controller.go +++ b/internal/controller/scheduledbackup_controller.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index 8da21a71f0..8315875b88 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/management/cache/cache.go b/internal/management/cache/cache.go index 4bdc0519f4..3860cbd17e 100644 --- a/internal/management/cache/cache.go +++ b/internal/management/cache/cache.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package cache diff --git a/internal/management/cache/doc.go b/internal/management/cache/doc.go index 41acc1351a..b187a141c3 100644 --- a/internal/management/cache/doc.go +++ b/internal/management/cache/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package cache contains the constants and functions for reading/writing to the process local cache diff --git a/internal/management/cache/error.go b/internal/management/cache/error.go index f26af13e43..5747e9e4fd 100644 --- a/internal/management/cache/error.go +++ b/internal/management/cache/error.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package cache diff --git a/internal/management/cache/keys.go b/internal/management/cache/keys.go index 2792f882c3..d7259a06de 100644 --- a/internal/management/cache/keys.go +++ b/internal/management/cache/keys.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package cache diff --git a/internal/management/controller/cache.go b/internal/management/controller/cache.go index 1d4f6960a4..4f6bfaf6ce 100644 --- a/internal/management/controller/cache.go +++ b/internal/management/controller/cache.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/management/controller/common.go b/internal/management/controller/common.go index 27d54efce6..51a3d30c8a 100644 --- a/internal/management/controller/common.go +++ b/internal/management/controller/common.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/management/controller/common_test.go b/internal/management/controller/common_test.go index 901ab93f61..4beb734e2c 100644 --- a/internal/management/controller/common_test.go +++ b/internal/management/controller/common_test.go @@ -1,17 +1,20 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/management/controller/database_controller.go b/internal/management/controller/database_controller.go index 55b19aeb04..8fcb2181fc 100644 --- a/internal/management/controller/database_controller.go +++ b/internal/management/controller/database_controller.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/management/controller/database_controller_sql.go b/internal/management/controller/database_controller_sql.go index 504633aecf..c2211ce852 100644 --- a/internal/management/controller/database_controller_sql.go +++ b/internal/management/controller/database_controller_sql.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/management/controller/database_controller_sql_test.go b/internal/management/controller/database_controller_sql_test.go index aa9b3aef0e..3949d76441 100644 --- a/internal/management/controller/database_controller_sql_test.go +++ b/internal/management/controller/database_controller_sql_test.go @@ -1,17 +1,20 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/management/controller/database_controller_test.go b/internal/management/controller/database_controller_test.go index f872322e5c..569aa87950 100644 --- a/internal/management/controller/database_controller_test.go +++ b/internal/management/controller/database_controller_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/management/controller/database_objects.go b/internal/management/controller/database_objects.go index a0fb2e7e3c..a25dd73f28 100644 --- a/internal/management/controller/database_objects.go +++ b/internal/management/controller/database_objects.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/management/controller/externalservers/doc.go b/internal/management/controller/externalservers/doc.go index 47362eb33d..fdf07d5b77 100644 --- a/internal/management/controller/externalservers/doc.go +++ b/internal/management/controller/externalservers/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package externalservers contains the reconciler of external servers, taking diff --git a/internal/management/controller/externalservers/manager.go b/internal/management/controller/externalservers/manager.go index db488336aa..3846f8b177 100644 --- a/internal/management/controller/externalservers/manager.go +++ b/internal/management/controller/externalservers/manager.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package externalservers diff --git a/internal/management/controller/externalservers/reconciler.go b/internal/management/controller/externalservers/reconciler.go index 5d246a145a..3e5a298e2e 100644 --- a/internal/management/controller/externalservers/reconciler.go +++ b/internal/management/controller/externalservers/reconciler.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package externalservers diff --git a/internal/management/controller/finalizers.go b/internal/management/controller/finalizers.go index ed334d16fb..ca355d95cd 100644 --- a/internal/management/controller/finalizers.go +++ b/internal/management/controller/finalizers.go @@ -1,3 +1,22 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + package controller import ( diff --git a/internal/management/controller/instance_controller.go b/internal/management/controller/instance_controller.go index a3b36a9ebb..78ee1b6be4 100644 --- a/internal/management/controller/instance_controller.go +++ b/internal/management/controller/instance_controller.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/management/controller/instance_startup.go b/internal/management/controller/instance_startup.go index a2a593cd39..c8e06fc7bd 100644 --- a/internal/management/controller/instance_startup.go +++ b/internal/management/controller/instance_startup.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/management/controller/instance_token.go b/internal/management/controller/instance_token.go index f1f74f52a8..564e62a7aa 100644 --- a/internal/management/controller/instance_token.go +++ b/internal/management/controller/instance_token.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/management/controller/manager.go b/internal/management/controller/manager.go index 426f85fd14..f73ae7c8ad 100644 --- a/internal/management/controller/manager.go +++ b/internal/management/controller/manager.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package controller contains the functions in PostgreSQL instance manager diff --git a/internal/management/controller/publication_controller.go b/internal/management/controller/publication_controller.go index 06fb6dad6a..f1fe600164 100644 --- a/internal/management/controller/publication_controller.go +++ b/internal/management/controller/publication_controller.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/management/controller/publication_controller_sql.go b/internal/management/controller/publication_controller_sql.go index 269b5ab04a..99c182c329 100644 --- a/internal/management/controller/publication_controller_sql.go +++ b/internal/management/controller/publication_controller_sql.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/management/controller/publication_controller_sql_test.go b/internal/management/controller/publication_controller_sql_test.go index b993b93576..d7635e9080 100644 --- a/internal/management/controller/publication_controller_sql_test.go +++ b/internal/management/controller/publication_controller_sql_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // nolint: dupl diff --git a/internal/management/controller/publication_controller_test.go b/internal/management/controller/publication_controller_test.go index cd4a051ea5..4991ba2a19 100644 --- a/internal/management/controller/publication_controller_test.go +++ b/internal/management/controller/publication_controller_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/management/controller/roles/contract.go b/internal/management/controller/roles/contract.go index d72cf3e675..932ac2826e 100644 --- a/internal/management/controller/roles/contract.go +++ b/internal/management/controller/roles/contract.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package roles diff --git a/internal/management/controller/roles/contract_test.go b/internal/management/controller/roles/contract_test.go index 10c6e5e1a9..3bdac7506d 100644 --- a/internal/management/controller/roles/contract_test.go +++ b/internal/management/controller/roles/contract_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package roles diff --git a/internal/management/controller/roles/doc.go b/internal/management/controller/roles/doc.go index d2e69fec67..1d302ba207 100644 --- a/internal/management/controller/roles/doc.go +++ b/internal/management/controller/roles/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package roles contains the code needed to reconcile roles with PostgreSQL diff --git a/internal/management/controller/roles/postgres.go b/internal/management/controller/roles/postgres.go index b93089a381..d8d8c32ff5 100644 --- a/internal/management/controller/roles/postgres.go +++ b/internal/management/controller/roles/postgres.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package roles diff --git a/internal/management/controller/roles/postgres_errors.go b/internal/management/controller/roles/postgres_errors.go index 92a0111ba5..587a46997e 100644 --- a/internal/management/controller/roles/postgres_errors.go +++ b/internal/management/controller/roles/postgres_errors.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package roles diff --git a/internal/management/controller/roles/postgres_test.go b/internal/management/controller/roles/postgres_test.go index 4357f62f0c..6a4c1fa34e 100644 --- a/internal/management/controller/roles/postgres_test.go +++ b/internal/management/controller/roles/postgres_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package roles diff --git a/internal/management/controller/roles/reconciler.go b/internal/management/controller/roles/reconciler.go index af850c7f72..a3c34f4518 100644 --- a/internal/management/controller/roles/reconciler.go +++ b/internal/management/controller/roles/reconciler.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package roles diff --git a/internal/management/controller/roles/reconciler_test.go b/internal/management/controller/roles/reconciler_test.go index 8e49d9a692..201b1733df 100644 --- a/internal/management/controller/roles/reconciler_test.go +++ b/internal/management/controller/roles/reconciler_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package roles diff --git a/internal/management/controller/roles/roles.go b/internal/management/controller/roles/roles.go index 739eaaa158..d9df3f5792 100644 --- a/internal/management/controller/roles/roles.go +++ b/internal/management/controller/roles/roles.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package roles diff --git a/internal/management/controller/roles/runnable.go b/internal/management/controller/roles/runnable.go index 585dea7ae4..79970c064f 100644 --- a/internal/management/controller/roles/runnable.go +++ b/internal/management/controller/roles/runnable.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package roles diff --git a/internal/management/controller/roles/runnable_test.go b/internal/management/controller/roles/runnable_test.go index 370ac6bab2..0b57d2d71b 100644 --- a/internal/management/controller/roles/runnable_test.go +++ b/internal/management/controller/roles/runnable_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package roles diff --git a/internal/management/controller/roles/suite_test.go b/internal/management/controller/roles/suite_test.go index 460878349a..c7fe63ee13 100644 --- a/internal/management/controller/roles/suite_test.go +++ b/internal/management/controller/roles/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package roles diff --git a/internal/management/controller/slots/infrastructure/doc.go b/internal/management/controller/slots/infrastructure/doc.go index d96244d8e9..f6d3af89c4 100644 --- a/internal/management/controller/slots/infrastructure/doc.go +++ b/internal/management/controller/slots/infrastructure/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package infrastructure contains the structs and interfaces needed to manage replication slots diff --git a/internal/management/controller/slots/infrastructure/postgresmanager.go b/internal/management/controller/slots/infrastructure/postgresmanager.go index 9f192a852a..229ce2e610 100644 --- a/internal/management/controller/slots/infrastructure/postgresmanager.go +++ b/internal/management/controller/slots/infrastructure/postgresmanager.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package infrastructure diff --git a/internal/management/controller/slots/infrastructure/postgresmanager_test.go b/internal/management/controller/slots/infrastructure/postgresmanager_test.go index 79352e29df..748e8c09a7 100644 --- a/internal/management/controller/slots/infrastructure/postgresmanager_test.go +++ b/internal/management/controller/slots/infrastructure/postgresmanager_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package infrastructure diff --git a/internal/management/controller/slots/infrastructure/replicationslot.go b/internal/management/controller/slots/infrastructure/replicationslot.go index 9cda0b2971..a5d2e5de03 100644 --- a/internal/management/controller/slots/infrastructure/replicationslot.go +++ b/internal/management/controller/slots/infrastructure/replicationslot.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package infrastructure diff --git a/internal/management/controller/slots/infrastructure/replicationslot_test.go b/internal/management/controller/slots/infrastructure/replicationslot_test.go index cd350b1828..80e94b471a 100644 --- a/internal/management/controller/slots/infrastructure/replicationslot_test.go +++ b/internal/management/controller/slots/infrastructure/replicationslot_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package infrastructure diff --git a/internal/management/controller/slots/infrastructure/suite_test.go b/internal/management/controller/slots/infrastructure/suite_test.go index 30bf0edf16..5861937c63 100644 --- a/internal/management/controller/slots/infrastructure/suite_test.go +++ b/internal/management/controller/slots/infrastructure/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package infrastructure diff --git a/internal/management/controller/slots/reconciler/doc.go b/internal/management/controller/slots/reconciler/doc.go index 8092314cd5..7a06a8fc94 100644 --- a/internal/management/controller/slots/reconciler/doc.go +++ b/internal/management/controller/slots/reconciler/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package reconciler contains all the logic needed to reconcile replication slots diff --git a/internal/management/controller/slots/reconciler/replicationslot.go b/internal/management/controller/slots/reconciler/replicationslot.go index 6d7382330b..a1a4a2c54c 100644 --- a/internal/management/controller/slots/reconciler/replicationslot.go +++ b/internal/management/controller/slots/reconciler/replicationslot.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package reconciler diff --git a/internal/management/controller/slots/reconciler/replicationslot_test.go b/internal/management/controller/slots/reconciler/replicationslot_test.go index 8179b80b03..961a639389 100644 --- a/internal/management/controller/slots/reconciler/replicationslot_test.go +++ b/internal/management/controller/slots/reconciler/replicationslot_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package reconciler diff --git a/internal/management/controller/slots/reconciler/suite_test.go b/internal/management/controller/slots/reconciler/suite_test.go index e8a1f7999e..ea24d0a66d 100644 --- a/internal/management/controller/slots/reconciler/suite_test.go +++ b/internal/management/controller/slots/reconciler/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package reconciler diff --git a/internal/management/controller/slots/runner/doc.go b/internal/management/controller/slots/runner/doc.go index 0a71445997..93f791917a 100644 --- a/internal/management/controller/slots/runner/doc.go +++ b/internal/management/controller/slots/runner/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package runner contains the runner that replicates slots from the primary to the replicas diff --git a/internal/management/controller/slots/runner/runner.go b/internal/management/controller/slots/runner/runner.go index 9b200c76af..0ded0031de 100644 --- a/internal/management/controller/slots/runner/runner.go +++ b/internal/management/controller/slots/runner/runner.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package runner diff --git a/internal/management/controller/slots/runner/runner_test.go b/internal/management/controller/slots/runner/runner_test.go index 32442d754e..34c55ab219 100644 --- a/internal/management/controller/slots/runner/runner_test.go +++ b/internal/management/controller/slots/runner/runner_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package runner diff --git a/internal/management/controller/slots/runner/suite_test.go b/internal/management/controller/slots/runner/suite_test.go index 330e97976b..313a1f1ac9 100644 --- a/internal/management/controller/slots/runner/suite_test.go +++ b/internal/management/controller/slots/runner/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package runner diff --git a/internal/management/controller/subscription_controller.go b/internal/management/controller/subscription_controller.go index 4f8d5c7583..6056488455 100644 --- a/internal/management/controller/subscription_controller.go +++ b/internal/management/controller/subscription_controller.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/management/controller/subscription_controller_sql.go b/internal/management/controller/subscription_controller_sql.go index c9b7e9857d..c81a0f5a9d 100644 --- a/internal/management/controller/subscription_controller_sql.go +++ b/internal/management/controller/subscription_controller_sql.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/management/controller/subscription_controller_sql_test.go b/internal/management/controller/subscription_controller_sql_test.go index 8afe3019f6..d3628858b2 100644 --- a/internal/management/controller/subscription_controller_sql_test.go +++ b/internal/management/controller/subscription_controller_sql_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // nolint: dupl diff --git a/internal/management/controller/subscription_controller_test.go b/internal/management/controller/subscription_controller_test.go index 49eacfe556..99a2dd1f81 100644 --- a/internal/management/controller/subscription_controller_test.go +++ b/internal/management/controller/subscription_controller_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/management/controller/suite_test.go b/internal/management/controller/suite_test.go index c9e4c918ec..dc97682f30 100644 --- a/internal/management/controller/suite_test.go +++ b/internal/management/controller/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/management/controller/tablespaces/actions.go b/internal/management/controller/tablespaces/actions.go index 64fcc059d6..7df6d9d580 100644 --- a/internal/management/controller/tablespaces/actions.go +++ b/internal/management/controller/tablespaces/actions.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package tablespaces diff --git a/internal/management/controller/tablespaces/controller_test.go b/internal/management/controller/tablespaces/controller_test.go index 000bd19aad..378c8d6d61 100644 --- a/internal/management/controller/tablespaces/controller_test.go +++ b/internal/management/controller/tablespaces/controller_test.go @@ -1,17 +1,20 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package tablespaces diff --git a/internal/management/controller/tablespaces/doc.go b/internal/management/controller/tablespaces/doc.go index 735eab3ebb..d1d885ad9c 100644 --- a/internal/management/controller/tablespaces/doc.go +++ b/internal/management/controller/tablespaces/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package tablespaces contains the runner to declarative tablespace diff --git a/internal/management/controller/tablespaces/infrastructure/contract.go b/internal/management/controller/tablespaces/infrastructure/contract.go index d9a3bd16a0..0538358da0 100644 --- a/internal/management/controller/tablespaces/infrastructure/contract.go +++ b/internal/management/controller/tablespaces/infrastructure/contract.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package infrastructure diff --git a/internal/management/controller/tablespaces/infrastructure/doc.go b/internal/management/controller/tablespaces/infrastructure/doc.go index 1ecfb68d7a..32c3049a2f 100644 --- a/internal/management/controller/tablespaces/infrastructure/doc.go +++ b/internal/management/controller/tablespaces/infrastructure/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package infrastructure contains the structs and interfaces needed to manage declarative tablespace diff --git a/internal/management/controller/tablespaces/infrastructure/postgres.go b/internal/management/controller/tablespaces/infrastructure/postgres.go index b83fac27b4..8b60421941 100644 --- a/internal/management/controller/tablespaces/infrastructure/postgres.go +++ b/internal/management/controller/tablespaces/infrastructure/postgres.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package infrastructure diff --git a/internal/management/controller/tablespaces/infrastructure/postgres_test.go b/internal/management/controller/tablespaces/infrastructure/postgres_test.go index 4d9618bb73..9ca57cb017 100644 --- a/internal/management/controller/tablespaces/infrastructure/postgres_test.go +++ b/internal/management/controller/tablespaces/infrastructure/postgres_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package infrastructure diff --git a/internal/management/controller/tablespaces/infrastructure/suite_test.go b/internal/management/controller/tablespaces/infrastructure/suite_test.go index 25c2aee4fe..9a9b2091f4 100644 --- a/internal/management/controller/tablespaces/infrastructure/suite_test.go +++ b/internal/management/controller/tablespaces/infrastructure/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package infrastructure diff --git a/internal/management/controller/tablespaces/manager.go b/internal/management/controller/tablespaces/manager.go index 0ea317061f..027929c383 100644 --- a/internal/management/controller/tablespaces/manager.go +++ b/internal/management/controller/tablespaces/manager.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package tablespaces diff --git a/internal/management/controller/tablespaces/reconciler.go b/internal/management/controller/tablespaces/reconciler.go index c8f10295e1..85b82d2339 100644 --- a/internal/management/controller/tablespaces/reconciler.go +++ b/internal/management/controller/tablespaces/reconciler.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package tablespaces diff --git a/internal/management/controller/tablespaces/storage.go b/internal/management/controller/tablespaces/storage.go index d8ca019dff..23feffffef 100644 --- a/internal/management/controller/tablespaces/storage.go +++ b/internal/management/controller/tablespaces/storage.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package tablespaces diff --git a/internal/management/controller/tablespaces/suite_test.go b/internal/management/controller/tablespaces/suite_test.go index 103b56df8e..eca0dd4266 100644 --- a/internal/management/controller/tablespaces/suite_test.go +++ b/internal/management/controller/tablespaces/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package tablespaces diff --git a/internal/management/controller/tablespaces/tablespaces.go b/internal/management/controller/tablespaces/tablespaces.go index 9d1699c117..bc7ca05763 100644 --- a/internal/management/controller/tablespaces/tablespaces.go +++ b/internal/management/controller/tablespaces/tablespaces.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package tablespaces diff --git a/internal/management/istio/doc.go b/internal/management/istio/doc.go index 23758bc70b..868d2fe783 100644 --- a/internal/management/istio/doc.go +++ b/internal/management/istio/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package istio implements functions needed to integrate with istio-proxy diff --git a/internal/management/istio/istio.go b/internal/management/istio/istio.go index 2bd4ae9228..1698497f4c 100644 --- a/internal/management/istio/istio.go +++ b/internal/management/istio/istio.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package istio diff --git a/internal/management/linkerd/doc.go b/internal/management/linkerd/doc.go index 386578fb51..58b114d430 100644 --- a/internal/management/linkerd/doc.go +++ b/internal/management/linkerd/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package linkerd implements functions needed to integrate with linkerd-proxy diff --git a/internal/management/linkerd/linkerd.go b/internal/management/linkerd/linkerd.go index d600dad137..66717c0038 100644 --- a/internal/management/linkerd/linkerd.go +++ b/internal/management/linkerd/linkerd.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package linkerd diff --git a/internal/management/utils/secrets.go b/internal/management/utils/secrets.go index d60a19b1da..adeabc12c8 100644 --- a/internal/management/utils/secrets.go +++ b/internal/management/utils/secrets.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package utils contains uncategorized utilities only used diff --git a/internal/management/utils/secrets_test.go b/internal/management/utils/secrets_test.go index 6df8d4030f..8a2545a807 100644 --- a/internal/management/utils/secrets_test.go +++ b/internal/management/utils/secrets_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/internal/management/utils/suite_test.go b/internal/management/utils/suite_test.go index b9dffc58de..0eff704ab7 100644 --- a/internal/management/utils/suite_test.go +++ b/internal/management/utils/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/internal/pgbouncer/management/controller/instance.go b/internal/pgbouncer/management/controller/instance.go index 461552cc0d..d0c471acaa 100644 --- a/internal/pgbouncer/management/controller/instance.go +++ b/internal/pgbouncer/management/controller/instance.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/pgbouncer/management/controller/instance_test.go b/internal/pgbouncer/management/controller/instance_test.go index 86e0c3b379..0893587617 100644 --- a/internal/pgbouncer/management/controller/instance_test.go +++ b/internal/pgbouncer/management/controller/instance_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/pgbouncer/management/controller/manager.go b/internal/pgbouncer/management/controller/manager.go index 634a34a9e1..c115343ed0 100644 --- a/internal/pgbouncer/management/controller/manager.go +++ b/internal/pgbouncer/management/controller/manager.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package controller contains the functions in pgbouncer instance manager diff --git a/internal/pgbouncer/management/controller/refresh.go b/internal/pgbouncer/management/controller/refresh.go index f64794d915..9c4d49a512 100644 --- a/internal/pgbouncer/management/controller/refresh.go +++ b/internal/pgbouncer/management/controller/refresh.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/pgbouncer/management/controller/refresh_test.go b/internal/pgbouncer/management/controller/refresh_test.go index 13e91aaa14..e504ec1eda 100644 --- a/internal/pgbouncer/management/controller/refresh_test.go +++ b/internal/pgbouncer/management/controller/refresh_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/pgbouncer/management/controller/secrets.go b/internal/pgbouncer/management/controller/secrets.go index e781963617..a12dd6e392 100644 --- a/internal/pgbouncer/management/controller/secrets.go +++ b/internal/pgbouncer/management/controller/secrets.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/pgbouncer/management/controller/secrets_test.go b/internal/pgbouncer/management/controller/secrets_test.go index 8da0a4be70..6f8d210b3c 100644 --- a/internal/pgbouncer/management/controller/secrets_test.go +++ b/internal/pgbouncer/management/controller/secrets_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/pgbouncer/management/controller/suite_test.go b/internal/pgbouncer/management/controller/suite_test.go index c12d956a01..c2ec695961 100644 --- a/internal/pgbouncer/management/controller/suite_test.go +++ b/internal/pgbouncer/management/controller/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package controller diff --git a/internal/plugin/resources/doc.go b/internal/plugin/resources/doc.go index a26a0e07fb..0053dc6a8b 100644 --- a/internal/plugin/resources/doc.go +++ b/internal/plugin/resources/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package resources contains reusable functions for the plugin commands diff --git a/internal/plugin/resources/instance.go b/internal/plugin/resources/instance.go index 612fb37034..c1a2400eed 100644 --- a/internal/plugin/resources/instance.go +++ b/internal/plugin/resources/instance.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package resources diff --git a/internal/scheme/doc.go b/internal/scheme/doc.go index 1f1e89c07e..b9a1289eec 100644 --- a/internal/scheme/doc.go +++ b/internal/scheme/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package scheme offers a builder capable of generating a scheme with the resources known by the CNP manager diff --git a/internal/scheme/scheme.go b/internal/scheme/scheme.go index a26858459f..adc11c16d0 100644 --- a/internal/scheme/scheme.go +++ b/internal/scheme/scheme.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package scheme diff --git a/internal/tools/tools.go b/internal/tools/tools.go index b868e6be3c..913a770727 100644 --- a/internal/tools/tools.go +++ b/internal/tools/tools.go @@ -2,7 +2,8 @@ // +build tools /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,6 +16,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package tools is used to track dependencies of tools we use in our diff --git a/internal/webhook/v1/backup_webhook.go b/internal/webhook/v1/backup_webhook.go index 681e40dc22..7fbe2d5610 100644 --- a/internal/webhook/v1/backup_webhook.go +++ b/internal/webhook/v1/backup_webhook.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/internal/webhook/v1/backup_webhook_test.go b/internal/webhook/v1/backup_webhook_test.go index 60c73f9eec..2cd23679f6 100644 --- a/internal/webhook/v1/backup_webhook_test.go +++ b/internal/webhook/v1/backup_webhook_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/internal/webhook/v1/cluster_webhook.go b/internal/webhook/v1/cluster_webhook.go index 23dc3a6faf..6ff165401e 100644 --- a/internal/webhook/v1/cluster_webhook.go +++ b/internal/webhook/v1/cluster_webhook.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/internal/webhook/v1/cluster_webhook_test.go b/internal/webhook/v1/cluster_webhook_test.go index 38e9b3b57c..6edc5a1694 100644 --- a/internal/webhook/v1/cluster_webhook_test.go +++ b/internal/webhook/v1/cluster_webhook_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/internal/webhook/v1/database_webhook.go b/internal/webhook/v1/database_webhook.go index 797042495e..f56cb5699f 100644 --- a/internal/webhook/v1/database_webhook.go +++ b/internal/webhook/v1/database_webhook.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/internal/webhook/v1/database_webhook_test.go b/internal/webhook/v1/database_webhook_test.go index c50f3d681d..d0259ab149 100644 --- a/internal/webhook/v1/database_webhook_test.go +++ b/internal/webhook/v1/database_webhook_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/internal/webhook/v1/doc.go b/internal/webhook/v1/doc.go index 8298d1e71a..2f62eb58a8 100644 --- a/internal/webhook/v1/doc.go +++ b/internal/webhook/v1/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package v1 contains the webhooks for the postgresql v1 API group diff --git a/internal/webhook/v1/pooler_webhook.go b/internal/webhook/v1/pooler_webhook.go index 5526955a89..dfbed90bd7 100644 --- a/internal/webhook/v1/pooler_webhook.go +++ b/internal/webhook/v1/pooler_webhook.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/internal/webhook/v1/pooler_webhook_test.go b/internal/webhook/v1/pooler_webhook_test.go index c49da31e18..49579d2474 100644 --- a/internal/webhook/v1/pooler_webhook_test.go +++ b/internal/webhook/v1/pooler_webhook_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/internal/webhook/v1/scheduledbackup_webhook.go b/internal/webhook/v1/scheduledbackup_webhook.go index 4cfeb98cc7..a391af4ba0 100644 --- a/internal/webhook/v1/scheduledbackup_webhook.go +++ b/internal/webhook/v1/scheduledbackup_webhook.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/internal/webhook/v1/scheduledbackup_webhook_test.go b/internal/webhook/v1/scheduledbackup_webhook_test.go index 173df06ebc..80dc6a86ed 100644 --- a/internal/webhook/v1/scheduledbackup_webhook_test.go +++ b/internal/webhook/v1/scheduledbackup_webhook_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/internal/webhook/v1/suite_test.go b/internal/webhook/v1/suite_test.go index 5bd0c55f58..1fb13ba7db 100644 --- a/internal/webhook/v1/suite_test.go +++ b/internal/webhook/v1/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package v1 diff --git a/pkg/certs/certs.go b/pkg/certs/certs.go index 6efd147c01..73d3a3f13a 100644 --- a/pkg/certs/certs.go +++ b/pkg/certs/certs.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package certs handle the PKI infrastructure of the operator diff --git a/pkg/certs/certs_test.go b/pkg/certs/certs_test.go index 503553552c..f1b44ae4e1 100644 --- a/pkg/certs/certs_test.go +++ b/pkg/certs/certs_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package certs diff --git a/pkg/certs/k8s.go b/pkg/certs/k8s.go index d12e3b8d03..a974927718 100644 --- a/pkg/certs/k8s.go +++ b/pkg/certs/k8s.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package certs diff --git a/pkg/certs/k8s_test.go b/pkg/certs/k8s_test.go index 043b13be82..effa53106e 100644 --- a/pkg/certs/k8s_test.go +++ b/pkg/certs/k8s_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package certs diff --git a/pkg/certs/operator_deployment.go b/pkg/certs/operator_deployment.go index 92c897e21f..0a294a7d63 100644 --- a/pkg/certs/operator_deployment.go +++ b/pkg/certs/operator_deployment.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package certs diff --git a/pkg/certs/operator_deployment_test.go b/pkg/certs/operator_deployment_test.go index 96e9adc72b..6ba58ad9ec 100644 --- a/pkg/certs/operator_deployment_test.go +++ b/pkg/certs/operator_deployment_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package certs diff --git a/pkg/certs/suite_test.go b/pkg/certs/suite_test.go index f3c6358576..fa2ded2e04 100644 --- a/pkg/certs/suite_test.go +++ b/pkg/certs/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package certs diff --git a/pkg/certs/tls.go b/pkg/certs/tls.go index 41e72396aa..30c5b25738 100644 --- a/pkg/certs/tls.go +++ b/pkg/certs/tls.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package certs diff --git a/pkg/certs/tls_test.go b/pkg/certs/tls_test.go index 66039695eb..6c62242280 100644 --- a/pkg/certs/tls_test.go +++ b/pkg/certs/tls_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package certs diff --git a/pkg/concurrency/doc.go b/pkg/concurrency/doc.go index 7565222dab..0394bf9285 100644 --- a/pkg/concurrency/doc.go +++ b/pkg/concurrency/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package concurrency contains utilities for goroutines coordination diff --git a/pkg/concurrency/executed.go b/pkg/concurrency/executed.go index d8375986e5..bb11de807d 100644 --- a/pkg/concurrency/executed.go +++ b/pkg/concurrency/executed.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package concurrency diff --git a/pkg/concurrency/executed_test.go b/pkg/concurrency/executed_test.go index ae7dff79e4..0e6054ca2c 100644 --- a/pkg/concurrency/executed_test.go +++ b/pkg/concurrency/executed_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package concurrency diff --git a/pkg/concurrency/suite_test.go b/pkg/concurrency/suite_test.go index e75a189d0d..fb6f57c6e5 100644 --- a/pkg/concurrency/suite_test.go +++ b/pkg/concurrency/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package concurrency diff --git a/pkg/configfile/configfile.go b/pkg/configfile/configfile.go index 9b5aa1b584..28adccbd30 100644 --- a/pkg/configfile/configfile.go +++ b/pkg/configfile/configfile.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package configfile contains primitives needed to manage a configuration file diff --git a/pkg/configfile/configfile_test.go b/pkg/configfile/configfile_test.go index 3d2854ff05..e3ad955fbf 100644 --- a/pkg/configfile/configfile_test.go +++ b/pkg/configfile/configfile_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package configfile diff --git a/pkg/configfile/connection_string.go b/pkg/configfile/connection_string.go index f2610cf9e1..8ac53e5e47 100644 --- a/pkg/configfile/connection_string.go +++ b/pkg/configfile/connection_string.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package configfile diff --git a/pkg/configfile/connection_string_test.go b/pkg/configfile/connection_string_test.go index ef80b61c16..510e1f0367 100644 --- a/pkg/configfile/connection_string_test.go +++ b/pkg/configfile/connection_string_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package configfile diff --git a/pkg/configfile/suite_test.go b/pkg/configfile/suite_test.go index 76d4a19203..47aeb5a7d8 100644 --- a/pkg/configfile/suite_test.go +++ b/pkg/configfile/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package configfile diff --git a/pkg/configparser/configparser.go b/pkg/configparser/configparser.go index e9b9580323..af87d657dd 100644 --- a/pkg/configparser/configparser.go +++ b/pkg/configparser/configparser.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ /* diff --git a/pkg/configparser/configparser_test.go b/pkg/configparser/configparser_test.go index 7c5193aae7..709d7b3b81 100644 --- a/pkg/configparser/configparser_test.go +++ b/pkg/configparser/configparser_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package configparser diff --git a/pkg/configparser/suite_test.go b/pkg/configparser/suite_test.go index 89994b0c53..c058301bbd 100644 --- a/pkg/configparser/suite_test.go +++ b/pkg/configparser/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package configparser diff --git a/pkg/executablehash/executablehash.go b/pkg/executablehash/executablehash.go index 163b0f5c24..836adb0e65 100644 --- a/pkg/executablehash/executablehash.go +++ b/pkg/executablehash/executablehash.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package executablehash detect the SHA256 of the running binary diff --git a/pkg/executablehash/executablehash_test.go b/pkg/executablehash/executablehash_test.go index 7a5bdaee16..fa3935d2e1 100644 --- a/pkg/executablehash/executablehash_test.go +++ b/pkg/executablehash/executablehash_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package executablehash diff --git a/pkg/executablehash/suite_test.go b/pkg/executablehash/suite_test.go index 3263008eaf..b1d6919a48 100644 --- a/pkg/executablehash/suite_test.go +++ b/pkg/executablehash/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package executablehash diff --git a/pkg/management/client.go b/pkg/management/client.go index 51f1fc02a7..cd13f351d2 100644 --- a/pkg/management/client.go +++ b/pkg/management/client.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package management contains all the features needed by the instance diff --git a/pkg/management/external/doc.go b/pkg/management/external/doc.go index 334b5f31e1..e2bcb842c6 100644 --- a/pkg/management/external/doc.go +++ b/pkg/management/external/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package external contains the functions needed to manage servers which are external to this diff --git a/pkg/management/external/external.go b/pkg/management/external/external.go index b6413c600d..24f5c658d2 100644 --- a/pkg/management/external/external.go +++ b/pkg/management/external/external.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package external diff --git a/pkg/management/external/internal/pgpass/conninfo.go b/pkg/management/external/internal/pgpass/conninfo.go index 7c6f451972..c6484877ef 100644 --- a/pkg/management/external/internal/pgpass/conninfo.go +++ b/pkg/management/external/internal/pgpass/conninfo.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pgpass diff --git a/pkg/management/external/internal/pgpass/conninfo_test.go b/pkg/management/external/internal/pgpass/conninfo_test.go index 4588707221..975b475597 100644 --- a/pkg/management/external/internal/pgpass/conninfo_test.go +++ b/pkg/management/external/internal/pgpass/conninfo_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pgpass diff --git a/pkg/management/external/internal/pgpass/doc.go b/pkg/management/external/internal/pgpass/doc.go index bee39766a7..af2c86802e 100644 --- a/pkg/management/external/internal/pgpass/doc.go +++ b/pkg/management/external/internal/pgpass/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package pgpass allows the user to generate a PostgreSQL .pgpass file diff --git a/pkg/management/external/internal/pgpass/pgpass.go b/pkg/management/external/internal/pgpass/pgpass.go index 6f87a68b89..4ab49dd9a1 100644 --- a/pkg/management/external/internal/pgpass/pgpass.go +++ b/pkg/management/external/internal/pgpass/pgpass.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pgpass diff --git a/pkg/management/external/internal/pgpass/pgpass_test.go b/pkg/management/external/internal/pgpass/pgpass_test.go index ec15759895..27fcb6da8c 100644 --- a/pkg/management/external/internal/pgpass/pgpass_test.go +++ b/pkg/management/external/internal/pgpass/pgpass_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pgpass diff --git a/pkg/management/external/internal/pgpass/suite_test.go b/pkg/management/external/internal/pgpass/suite_test.go index 48bb92fff5..a09ba17025 100644 --- a/pkg/management/external/internal/pgpass/suite_test.go +++ b/pkg/management/external/internal/pgpass/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pgpass diff --git a/pkg/management/external/utils.go b/pkg/management/external/utils.go index 959b2a2cfc..510fd27ed6 100644 --- a/pkg/management/external/utils.go +++ b/pkg/management/external/utils.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package external diff --git a/pkg/management/logtest/logtest.go b/pkg/management/logtest/logtest.go index 0a90638d5e..77b797e02a 100644 --- a/pkg/management/logtest/logtest.go +++ b/pkg/management/logtest/logtest.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package logtest contains the testing utils for the logging subsystem of the instance manager diff --git a/pkg/management/pgbouncer/config/config.go b/pkg/management/pgbouncer/config/config.go index 303c5c6d96..ba74f54082 100644 --- a/pkg/management/pgbouncer/config/config.go +++ b/pkg/management/pgbouncer/config/config.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package config diff --git a/pkg/management/pgbouncer/config/data.go b/pkg/management/pgbouncer/config/data.go index 64b861f39f..c0664f94ec 100644 --- a/pkg/management/pgbouncer/config/data.go +++ b/pkg/management/pgbouncer/config/data.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package config diff --git a/pkg/management/pgbouncer/config/secrets.go b/pkg/management/pgbouncer/config/secrets.go index f3185db95a..09b9312018 100644 --- a/pkg/management/pgbouncer/config/secrets.go +++ b/pkg/management/pgbouncer/config/secrets.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package config contains the code related to the generation of the PgBouncer configuration diff --git a/pkg/management/pgbouncer/config/secrets_test.go b/pkg/management/pgbouncer/config/secrets_test.go index 7be5e8d006..d34df9628c 100644 --- a/pkg/management/pgbouncer/config/secrets_test.go +++ b/pkg/management/pgbouncer/config/secrets_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package config diff --git a/pkg/management/pgbouncer/config/strings.go b/pkg/management/pgbouncer/config/strings.go index ef01497b89..30c350bf62 100644 --- a/pkg/management/pgbouncer/config/strings.go +++ b/pkg/management/pgbouncer/config/strings.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package config diff --git a/pkg/management/pgbouncer/config/strings_test.go b/pkg/management/pgbouncer/config/strings_test.go index d0b901276b..f6585584e3 100644 --- a/pkg/management/pgbouncer/config/strings_test.go +++ b/pkg/management/pgbouncer/config/strings_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package config diff --git a/pkg/management/pgbouncer/config/suite_test.go b/pkg/management/pgbouncer/config/suite_test.go index 08e6f7f809..bdea666cb3 100644 --- a/pkg/management/pgbouncer/config/suite_test.go +++ b/pkg/management/pgbouncer/config/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package config diff --git a/pkg/management/pgbouncer/metricsserver/lists.go b/pkg/management/pgbouncer/metricsserver/lists.go index 802a8e317a..b0b66aa4b8 100644 --- a/pkg/management/pgbouncer/metricsserver/lists.go +++ b/pkg/management/pgbouncer/metricsserver/lists.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package metricsserver diff --git a/pkg/management/pgbouncer/metricsserver/metricsserver.go b/pkg/management/pgbouncer/metricsserver/metricsserver.go index 61a55b8dd6..cd37f5a565 100644 --- a/pkg/management/pgbouncer/metricsserver/metricsserver.go +++ b/pkg/management/pgbouncer/metricsserver/metricsserver.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package metricsserver contains the web server powering metrics diff --git a/pkg/management/pgbouncer/metricsserver/metricsserver_test.go b/pkg/management/pgbouncer/metricsserver/metricsserver_test.go index 1a80045d40..726961e8b5 100644 --- a/pkg/management/pgbouncer/metricsserver/metricsserver_test.go +++ b/pkg/management/pgbouncer/metricsserver/metricsserver_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package metricsserver diff --git a/pkg/management/pgbouncer/metricsserver/pgbouncer_collector.go b/pkg/management/pgbouncer/metricsserver/pgbouncer_collector.go index e498d370f1..77e12bbe1a 100644 --- a/pkg/management/pgbouncer/metricsserver/pgbouncer_collector.go +++ b/pkg/management/pgbouncer/metricsserver/pgbouncer_collector.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package metricsserver enables to expose a set of metrics and collectors on a given postgres instance diff --git a/pkg/management/pgbouncer/metricsserver/pools.go b/pkg/management/pgbouncer/metricsserver/pools.go index d6edd77fd9..a77f789a81 100644 --- a/pkg/management/pgbouncer/metricsserver/pools.go +++ b/pkg/management/pgbouncer/metricsserver/pools.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package metricsserver diff --git a/pkg/management/pgbouncer/metricsserver/pools_test.go b/pkg/management/pgbouncer/metricsserver/pools_test.go index 8fd3e928d3..f96f318d63 100644 --- a/pkg/management/pgbouncer/metricsserver/pools_test.go +++ b/pkg/management/pgbouncer/metricsserver/pools_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package metricsserver diff --git a/pkg/management/pgbouncer/metricsserver/stats.go b/pkg/management/pgbouncer/metricsserver/stats.go index 1001699f79..2c46ff72e1 100644 --- a/pkg/management/pgbouncer/metricsserver/stats.go +++ b/pkg/management/pgbouncer/metricsserver/stats.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package metricsserver diff --git a/pkg/management/pgbouncer/metricsserver/stats_test.go b/pkg/management/pgbouncer/metricsserver/stats_test.go index ddfcbadc4f..4afa31d1ad 100644 --- a/pkg/management/pgbouncer/metricsserver/stats_test.go +++ b/pkg/management/pgbouncer/metricsserver/stats_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package metricsserver diff --git a/pkg/management/pgbouncer/metricsserver/suite_test.go b/pkg/management/pgbouncer/metricsserver/suite_test.go index ab7aa16d64..7038b67d87 100644 --- a/pkg/management/pgbouncer/metricsserver/suite_test.go +++ b/pkg/management/pgbouncer/metricsserver/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package metricsserver diff --git a/pkg/management/postgres/archiver/archiver.go b/pkg/management/postgres/archiver/archiver.go index 5e8c193b41..61dda4fb8d 100644 --- a/pkg/management/postgres/archiver/archiver.go +++ b/pkg/management/postgres/archiver/archiver.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package archiver diff --git a/pkg/management/postgres/archiver/doc.go b/pkg/management/postgres/archiver/doc.go index e29ef4fe9d..6b40b65e25 100644 --- a/pkg/management/postgres/archiver/doc.go +++ b/pkg/management/postgres/archiver/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package archiver contains the logic of the CloudNativePG WAL archiver diff --git a/pkg/management/postgres/backup.go b/pkg/management/postgres/backup.go index f79c4d4a9a..80b6caced3 100644 --- a/pkg/management/postgres/backup.go +++ b/pkg/management/postgres/backup.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/management/postgres/backup_test.go b/pkg/management/postgres/backup_test.go index bfd037e4a1..a8cfb1504a 100644 --- a/pkg/management/postgres/backup_test.go +++ b/pkg/management/postgres/backup_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/management/postgres/configuration.go b/pkg/management/postgres/configuration.go index 3a9cfa557f..9ae0787382 100644 --- a/pkg/management/postgres/configuration.go +++ b/pkg/management/postgres/configuration.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/management/postgres/configuration_test.go b/pkg/management/postgres/configuration_test.go index 2e3b211397..7b1dc538ac 100644 --- a/pkg/management/postgres/configuration_test.go +++ b/pkg/management/postgres/configuration_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/management/postgres/conninfo.go b/pkg/management/postgres/conninfo.go index 8b43d42c26..d82e1ce45c 100644 --- a/pkg/management/postgres/conninfo.go +++ b/pkg/management/postgres/conninfo.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/management/postgres/constants/constants.go b/pkg/management/postgres/constants/constants.go index 5f26ed85f6..b84ef3b7e9 100644 --- a/pkg/management/postgres/constants/constants.go +++ b/pkg/management/postgres/constants/constants.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package constants provides the needed constants in the postgres package diff --git a/pkg/management/postgres/consts.go b/pkg/management/postgres/consts.go index 244c174d1f..e38e157a58 100644 --- a/pkg/management/postgres/consts.go +++ b/pkg/management/postgres/consts.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/management/postgres/ident.go b/pkg/management/postgres/ident.go index 9c50f96c8b..0b40640054 100644 --- a/pkg/management/postgres/ident.go +++ b/pkg/management/postgres/ident.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/management/postgres/initdb.go b/pkg/management/postgres/initdb.go index 6fe2d9a37f..2675271ab8 100644 --- a/pkg/management/postgres/initdb.go +++ b/pkg/management/postgres/initdb.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package postgres contains the function about starting up, diff --git a/pkg/management/postgres/initdb_test.go b/pkg/management/postgres/initdb_test.go index 0df37770c9..d6a6d5d906 100644 --- a/pkg/management/postgres/initdb_test.go +++ b/pkg/management/postgres/initdb_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/management/postgres/instance.go b/pkg/management/postgres/instance.go index ced61cff6e..9076de82d6 100644 --- a/pkg/management/postgres/instance.go +++ b/pkg/management/postgres/instance.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/management/postgres/instance_replica.go b/pkg/management/postgres/instance_replica.go index 84dca8e1a0..236348dc65 100644 --- a/pkg/management/postgres/instance_replica.go +++ b/pkg/management/postgres/instance_replica.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/management/postgres/instance_test.go b/pkg/management/postgres/instance_test.go index defe47e317..8aade97331 100644 --- a/pkg/management/postgres/instance_test.go +++ b/pkg/management/postgres/instance_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/management/postgres/join.go b/pkg/management/postgres/join.go index 73212522d6..a341e54e7f 100644 --- a/pkg/management/postgres/join.go +++ b/pkg/management/postgres/join.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/management/postgres/logicalimport/constants.go b/pkg/management/postgres/logicalimport/constants.go index 19c2853538..818237c759 100644 --- a/pkg/management/postgres/logicalimport/constants.go +++ b/pkg/management/postgres/logicalimport/constants.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logicalimport diff --git a/pkg/management/postgres/logicalimport/database.go b/pkg/management/postgres/logicalimport/database.go index 82885a91c1..eb48507582 100644 --- a/pkg/management/postgres/logicalimport/database.go +++ b/pkg/management/postgres/logicalimport/database.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logicalimport diff --git a/pkg/management/postgres/logicalimport/database_test.go b/pkg/management/postgres/logicalimport/database_test.go index 945d2f78cd..7ab009240a 100644 --- a/pkg/management/postgres/logicalimport/database_test.go +++ b/pkg/management/postgres/logicalimport/database_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logicalimport diff --git a/pkg/management/postgres/logicalimport/doc.go b/pkg/management/postgres/logicalimport/doc.go index 9ba516d8be..49c11da4e5 100644 --- a/pkg/management/postgres/logicalimport/doc.go +++ b/pkg/management/postgres/logicalimport/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package logicalimport contains the logic needed to import a logical snapshot diff --git a/pkg/management/postgres/logicalimport/microservice.go b/pkg/management/postgres/logicalimport/microservice.go index a34593e563..de461ba189 100644 --- a/pkg/management/postgres/logicalimport/microservice.go +++ b/pkg/management/postgres/logicalimport/microservice.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logicalimport diff --git a/pkg/management/postgres/logicalimport/monolith.go b/pkg/management/postgres/logicalimport/monolith.go index 5d95d7561c..017ef6e68d 100644 --- a/pkg/management/postgres/logicalimport/monolith.go +++ b/pkg/management/postgres/logicalimport/monolith.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logicalimport diff --git a/pkg/management/postgres/logicalimport/role.go b/pkg/management/postgres/logicalimport/role.go index 9594d320f8..e7c4f5ee8f 100644 --- a/pkg/management/postgres/logicalimport/role.go +++ b/pkg/management/postgres/logicalimport/role.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logicalimport diff --git a/pkg/management/postgres/logicalimport/role_test.go b/pkg/management/postgres/logicalimport/role_test.go index 006301bd57..5c221f1f66 100644 --- a/pkg/management/postgres/logicalimport/role_test.go +++ b/pkg/management/postgres/logicalimport/role_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logicalimport diff --git a/pkg/management/postgres/logicalimport/roleinheritance.go b/pkg/management/postgres/logicalimport/roleinheritance.go index 9866a0d3a8..88bfcf4af4 100644 --- a/pkg/management/postgres/logicalimport/roleinheritance.go +++ b/pkg/management/postgres/logicalimport/roleinheritance.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logicalimport diff --git a/pkg/management/postgres/logicalimport/roleinheritance_test.go b/pkg/management/postgres/logicalimport/roleinheritance_test.go index bcea42216f..75c22e2921 100644 --- a/pkg/management/postgres/logicalimport/roleinheritance_test.go +++ b/pkg/management/postgres/logicalimport/roleinheritance_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logicalimport diff --git a/pkg/management/postgres/logicalimport/suite_test.go b/pkg/management/postgres/logicalimport/suite_test.go index 51acce5942..fd3f87b0af 100644 --- a/pkg/management/postgres/logicalimport/suite_test.go +++ b/pkg/management/postgres/logicalimport/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logicalimport diff --git a/pkg/management/postgres/logpipe/CSVReadWriter.go b/pkg/management/postgres/logpipe/CSVReadWriter.go index 606f9b7e79..22fdae76be 100644 --- a/pkg/management/postgres/logpipe/CSVReadWriter.go +++ b/pkg/management/postgres/logpipe/CSVReadWriter.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logpipe diff --git a/pkg/management/postgres/logpipe/error.go b/pkg/management/postgres/logpipe/error.go index 6cb3ea9571..5321e69cac 100644 --- a/pkg/management/postgres/logpipe/error.go +++ b/pkg/management/postgres/logpipe/error.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logpipe diff --git a/pkg/management/postgres/logpipe/linelogpipe.go b/pkg/management/postgres/logpipe/linelogpipe.go index 1e8b716772..53bf53d2fc 100644 --- a/pkg/management/postgres/logpipe/linelogpipe.go +++ b/pkg/management/postgres/logpipe/linelogpipe.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package logpipe implements reading csv logs from PostgreSQL logging_collector diff --git a/pkg/management/postgres/logpipe/loggingCollector.go b/pkg/management/postgres/logpipe/loggingCollector.go index 59f2b3d8f5..6d6fb34ae3 100644 --- a/pkg/management/postgres/logpipe/loggingCollector.go +++ b/pkg/management/postgres/logpipe/loggingCollector.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logpipe diff --git a/pkg/management/postgres/logpipe/loggingCollector_test.go b/pkg/management/postgres/logpipe/loggingCollector_test.go index 8d7df45ce0..85d296d741 100644 --- a/pkg/management/postgres/logpipe/loggingCollector_test.go +++ b/pkg/management/postgres/logpipe/loggingCollector_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logpipe diff --git a/pkg/management/postgres/logpipe/logpipe.go b/pkg/management/postgres/logpipe/logpipe.go index 81b6b2543a..0a7b63e68a 100644 --- a/pkg/management/postgres/logpipe/logpipe.go +++ b/pkg/management/postgres/logpipe/logpipe.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package logpipe implements reading csv logs from PostgreSQL logging_collector diff --git a/pkg/management/postgres/logpipe/logpipe_test.go b/pkg/management/postgres/logpipe/logpipe_test.go index 70a4902872..28fc736b23 100644 --- a/pkg/management/postgres/logpipe/logpipe_test.go +++ b/pkg/management/postgres/logpipe/logpipe_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logpipe diff --git a/pkg/management/postgres/logpipe/pgaudit.go b/pkg/management/postgres/logpipe/pgaudit.go index 45a6bc8ed3..14a392af3f 100644 --- a/pkg/management/postgres/logpipe/pgaudit.go +++ b/pkg/management/postgres/logpipe/pgaudit.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logpipe diff --git a/pkg/management/postgres/logpipe/pgaudit_test.go b/pkg/management/postgres/logpipe/pgaudit_test.go index 5cf9729162..f8a8d17ef9 100644 --- a/pkg/management/postgres/logpipe/pgaudit_test.go +++ b/pkg/management/postgres/logpipe/pgaudit_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logpipe diff --git a/pkg/management/postgres/logpipe/record.go b/pkg/management/postgres/logpipe/record.go index 217da099d4..dcd221c7e0 100644 --- a/pkg/management/postgres/logpipe/record.go +++ b/pkg/management/postgres/logpipe/record.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logpipe diff --git a/pkg/management/postgres/logpipe/suite_test.go b/pkg/management/postgres/logpipe/suite_test.go index a37b1b0aca..86dee363e2 100644 --- a/pkg/management/postgres/logpipe/suite_test.go +++ b/pkg/management/postgres/logpipe/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logpipe diff --git a/pkg/management/postgres/logpipe/writer.go b/pkg/management/postgres/logpipe/writer.go index ec323ebfaa..07e1f91d53 100644 --- a/pkg/management/postgres/logpipe/writer.go +++ b/pkg/management/postgres/logpipe/writer.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logpipe diff --git a/pkg/management/postgres/metrics/collector.go b/pkg/management/postgres/metrics/collector.go index 4e8fb282cb..e6910f2282 100644 --- a/pkg/management/postgres/metrics/collector.go +++ b/pkg/management/postgres/metrics/collector.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // This code is inspired on [postgres_exporter](https://github.com/prometheus-community/postgres_exporter) diff --git a/pkg/management/postgres/metrics/collector_test.go b/pkg/management/postgres/metrics/collector_test.go index f8399472b1..0a29fb7a48 100644 --- a/pkg/management/postgres/metrics/collector_test.go +++ b/pkg/management/postgres/metrics/collector_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package metrics diff --git a/pkg/management/postgres/metrics/histogram/histogram.go b/pkg/management/postgres/metrics/histogram/histogram.go index 3223a188eb..4cb279176d 100644 --- a/pkg/management/postgres/metrics/histogram/histogram.go +++ b/pkg/management/postgres/metrics/histogram/histogram.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // This code is inspired on [postgres_exporter](https://github.com/prometheus-community/postgres_exporter) diff --git a/pkg/management/postgres/metrics/mapping_test.go b/pkg/management/postgres/metrics/mapping_test.go index 4b3b51dbda..08c120b024 100644 --- a/pkg/management/postgres/metrics/mapping_test.go +++ b/pkg/management/postgres/metrics/mapping_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package metrics diff --git a/pkg/management/postgres/metrics/mappings.go b/pkg/management/postgres/metrics/mappings.go index 814ab520f2..20294c280f 100644 --- a/pkg/management/postgres/metrics/mappings.go +++ b/pkg/management/postgres/metrics/mappings.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package metrics diff --git a/pkg/management/postgres/metrics/parser.go b/pkg/management/postgres/metrics/parser.go index aa36e6454f..2d598b11e6 100644 --- a/pkg/management/postgres/metrics/parser.go +++ b/pkg/management/postgres/metrics/parser.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package metrics enables to expose a set of metrics and collectors on a given postgres instance diff --git a/pkg/management/postgres/metrics/parser_test.go b/pkg/management/postgres/metrics/parser_test.go index c185aede05..1ad1d4659a 100644 --- a/pkg/management/postgres/metrics/parser_test.go +++ b/pkg/management/postgres/metrics/parser_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package metrics diff --git a/pkg/management/postgres/metrics/suite_test.go b/pkg/management/postgres/metrics/suite_test.go index d5b483093d..357af3edcb 100644 --- a/pkg/management/postgres/metrics/suite_test.go +++ b/pkg/management/postgres/metrics/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package metrics diff --git a/pkg/management/postgres/pidfile.go b/pkg/management/postgres/pidfile.go index d5f1b4f7b2..41471e73a1 100644 --- a/pkg/management/postgres/pidfile.go +++ b/pkg/management/postgres/pidfile.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/management/postgres/pidfile_test.go b/pkg/management/postgres/pidfile_test.go index 2fddf281d9..271b00b243 100644 --- a/pkg/management/postgres/pidfile_test.go +++ b/pkg/management/postgres/pidfile_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/management/postgres/pool/connection.go b/pkg/management/postgres/pool/connection.go index ddfdb2181f..c642733ba9 100644 --- a/pkg/management/postgres/pool/connection.go +++ b/pkg/management/postgres/pool/connection.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pool diff --git a/pkg/management/postgres/pool/pool.go b/pkg/management/postgres/pool/pool.go index f1ec6e5a8f..eda3064b4e 100644 --- a/pkg/management/postgres/pool/pool.go +++ b/pkg/management/postgres/pool/pool.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package pool contain an implementation of a connection pool to multiple diff --git a/pkg/management/postgres/pool/pool_test.go b/pkg/management/postgres/pool/pool_test.go index 3d3016d8ed..44345be63a 100644 --- a/pkg/management/postgres/pool/pool_test.go +++ b/pkg/management/postgres/pool/pool_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pool diff --git a/pkg/management/postgres/pool/profiles.go b/pkg/management/postgres/pool/profiles.go index ca441a49d0..f6b4339f12 100644 --- a/pkg/management/postgres/pool/profiles.go +++ b/pkg/management/postgres/pool/profiles.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pool diff --git a/pkg/management/postgres/pool/suite_test.go b/pkg/management/postgres/pool/suite_test.go index 2a5c4479ce..16377c3b2b 100644 --- a/pkg/management/postgres/pool/suite_test.go +++ b/pkg/management/postgres/pool/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pool diff --git a/pkg/management/postgres/probes.go b/pkg/management/postgres/probes.go index 84144f1645..e1d3ffe3fa 100644 --- a/pkg/management/postgres/probes.go +++ b/pkg/management/postgres/probes.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/management/postgres/probes_test.go b/pkg/management/postgres/probes_test.go index 1973f861bc..bf1abc688e 100644 --- a/pkg/management/postgres/probes_test.go +++ b/pkg/management/postgres/probes_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/management/postgres/promote.go b/pkg/management/postgres/promote.go index 7a4440f5ea..0eed1ac216 100644 --- a/pkg/management/postgres/promote.go +++ b/pkg/management/postgres/promote.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/management/postgres/restore.go b/pkg/management/postgres/restore.go index 1737646db2..58e55569ef 100644 --- a/pkg/management/postgres/restore.go +++ b/pkg/management/postgres/restore.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/management/postgres/restore_test.go b/pkg/management/postgres/restore_test.go index ad19c76453..811b1a8b8b 100644 --- a/pkg/management/postgres/restore_test.go +++ b/pkg/management/postgres/restore_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/management/postgres/suite_test.go b/pkg/management/postgres/suite_test.go index 46c0674161..1d88c1b84f 100644 --- a/pkg/management/postgres/suite_test.go +++ b/pkg/management/postgres/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/management/postgres/utils/doc.go b/pkg/management/postgres/utils/doc.go index cb665b51a5..d052651a77 100644 --- a/pkg/management/postgres/utils/doc.go +++ b/pkg/management/postgres/utils/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package utils holds generic utils about postgres instances diff --git a/pkg/management/postgres/utils/roles.go b/pkg/management/postgres/utils/roles.go index 11dca63299..29d65a275c 100644 --- a/pkg/management/postgres/utils/roles.go +++ b/pkg/management/postgres/utils/roles.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/management/postgres/utils/roles_test.go b/pkg/management/postgres/utils/roles_test.go index 4355f4c900..b698d11e5e 100644 --- a/pkg/management/postgres/utils/roles_test.go +++ b/pkg/management/postgres/utils/roles_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/management/postgres/utils/suite_test.go b/pkg/management/postgres/utils/suite_test.go index 0eb35b04a5..a2378b1405 100644 --- a/pkg/management/postgres/utils/suite_test.go +++ b/pkg/management/postgres/utils/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/management/postgres/utils/utils.go b/pkg/management/postgres/utils/utils.go index e4e7ab3f39..5093f10900 100644 --- a/pkg/management/postgres/utils/utils.go +++ b/pkg/management/postgres/utils/utils.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/management/postgres/utils/version.go b/pkg/management/postgres/utils/version.go index 348a132dc3..91b489f9d5 100644 --- a/pkg/management/postgres/utils/version.go +++ b/pkg/management/postgres/utils/version.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/management/postgres/utils/version_test.go b/pkg/management/postgres/utils/version_test.go index 52631e4ceb..69188ad9bc 100644 --- a/pkg/management/postgres/utils/version_test.go +++ b/pkg/management/postgres/utils/version_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/management/postgres/wal.go b/pkg/management/postgres/wal.go index 3a8288994e..457bfb5fec 100644 --- a/pkg/management/postgres/wal.go +++ b/pkg/management/postgres/wal.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/management/postgres/wal_test.go b/pkg/management/postgres/wal_test.go index 83006abbac..fd4e1b2970 100644 --- a/pkg/management/postgres/wal_test.go +++ b/pkg/management/postgres/wal_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/management/postgres/webserver/backup_connection.go b/pkg/management/postgres/webserver/backup_connection.go index 60b3559918..4e2a91f9c8 100644 --- a/pkg/management/postgres/webserver/backup_connection.go +++ b/pkg/management/postgres/webserver/backup_connection.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package webserver diff --git a/pkg/management/postgres/webserver/client/common/client.go b/pkg/management/postgres/webserver/client/common/client.go index 06bee98158..1a59d735af 100644 --- a/pkg/management/postgres/webserver/client/common/client.go +++ b/pkg/management/postgres/webserver/client/common/client.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package common diff --git a/pkg/management/postgres/webserver/client/common/doc.go b/pkg/management/postgres/webserver/client/common/doc.go index 35dc461c8e..1dda96faa4 100644 --- a/pkg/management/postgres/webserver/client/common/doc.go +++ b/pkg/management/postgres/webserver/client/common/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package common provides common utilities for the webserver client. diff --git a/pkg/management/postgres/webserver/client/local/cache.go b/pkg/management/postgres/webserver/client/local/cache.go index a950018cbc..03cb8c70cb 100644 --- a/pkg/management/postgres/webserver/client/local/cache.go +++ b/pkg/management/postgres/webserver/client/local/cache.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package local diff --git a/pkg/management/postgres/webserver/client/local/cluster.go b/pkg/management/postgres/webserver/client/local/cluster.go index d1229d4f55..f04b253bd8 100644 --- a/pkg/management/postgres/webserver/client/local/cluster.go +++ b/pkg/management/postgres/webserver/client/local/cluster.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package local diff --git a/pkg/management/postgres/webserver/client/local/doc.go b/pkg/management/postgres/webserver/client/local/doc.go index 1fdc0bca97..25f2e67749 100644 --- a/pkg/management/postgres/webserver/client/local/doc.go +++ b/pkg/management/postgres/webserver/client/local/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package local provides a client to interact with the local webserver endpoints. diff --git a/pkg/management/postgres/webserver/client/local/local.go b/pkg/management/postgres/webserver/client/local/local.go index 3e8df28922..9a61e2e34d 100644 --- a/pkg/management/postgres/webserver/client/local/local.go +++ b/pkg/management/postgres/webserver/client/local/local.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package local diff --git a/pkg/management/postgres/webserver/client/remote/backup.go b/pkg/management/postgres/webserver/client/remote/backup.go index 0740eed40b..af8a98b92d 100644 --- a/pkg/management/postgres/webserver/client/remote/backup.go +++ b/pkg/management/postgres/webserver/client/remote/backup.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package remote diff --git a/pkg/management/postgres/webserver/client/remote/doc.go b/pkg/management/postgres/webserver/client/remote/doc.go index f5a83f43e9..a49fd7381b 100644 --- a/pkg/management/postgres/webserver/client/remote/doc.go +++ b/pkg/management/postgres/webserver/client/remote/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package remote contains the client capable of querying the webserver remote endpoint. diff --git a/pkg/management/postgres/webserver/client/remote/instance.go b/pkg/management/postgres/webserver/client/remote/instance.go index 0f526251f9..4ed248df92 100644 --- a/pkg/management/postgres/webserver/client/remote/instance.go +++ b/pkg/management/postgres/webserver/client/remote/instance.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package remote diff --git a/pkg/management/postgres/webserver/client/remote/remote.go b/pkg/management/postgres/webserver/client/remote/remote.go index a70524a3fc..2a183f2bd6 100644 --- a/pkg/management/postgres/webserver/client/remote/remote.go +++ b/pkg/management/postgres/webserver/client/remote/remote.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package remote diff --git a/pkg/management/postgres/webserver/client/remote/request.go b/pkg/management/postgres/webserver/client/remote/request.go index a08e767e50..506c105ee8 100644 --- a/pkg/management/postgres/webserver/client/remote/request.go +++ b/pkg/management/postgres/webserver/client/remote/request.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package remote diff --git a/pkg/management/postgres/webserver/doc.go b/pkg/management/postgres/webserver/doc.go index 52f44cd5e7..0449655e5e 100644 --- a/pkg/management/postgres/webserver/doc.go +++ b/pkg/management/postgres/webserver/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package webserver contains the web server powering probes, backups and metrics diff --git a/pkg/management/postgres/webserver/local.go b/pkg/management/postgres/webserver/local.go index 27b15db9e1..e2a71f9f02 100644 --- a/pkg/management/postgres/webserver/local.go +++ b/pkg/management/postgres/webserver/local.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package webserver diff --git a/pkg/management/postgres/webserver/metricserver/doc.go b/pkg/management/postgres/webserver/metricserver/doc.go index 169837dcbc..8e2cfb22fc 100644 --- a/pkg/management/postgres/webserver/metricserver/doc.go +++ b/pkg/management/postgres/webserver/metricserver/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package metricserver contains the web server powering metrics diff --git a/pkg/management/postgres/webserver/metricserver/metrics.go b/pkg/management/postgres/webserver/metricserver/metrics.go index 7e3ac339e7..9e9ce07623 100644 --- a/pkg/management/postgres/webserver/metricserver/metrics.go +++ b/pkg/management/postgres/webserver/metricserver/metrics.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package metricserver diff --git a/pkg/management/postgres/webserver/metricserver/pg_collector.go b/pkg/management/postgres/webserver/metricserver/pg_collector.go index 392ce31c8a..efef347dc1 100644 --- a/pkg/management/postgres/webserver/metricserver/pg_collector.go +++ b/pkg/management/postgres/webserver/metricserver/pg_collector.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package metricserver diff --git a/pkg/management/postgres/webserver/metricserver/pg_collector_test.go b/pkg/management/postgres/webserver/metricserver/pg_collector_test.go index 914944403c..6bb9c5cb48 100644 --- a/pkg/management/postgres/webserver/metricserver/pg_collector_test.go +++ b/pkg/management/postgres/webserver/metricserver/pg_collector_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package metricserver diff --git a/pkg/management/postgres/webserver/metricserver/suite_test.go b/pkg/management/postgres/webserver/metricserver/suite_test.go index c4a0eab15b..c5dc36e28f 100644 --- a/pkg/management/postgres/webserver/metricserver/suite_test.go +++ b/pkg/management/postgres/webserver/metricserver/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package metricserver diff --git a/pkg/management/postgres/webserver/metricserver/wal.go b/pkg/management/postgres/webserver/metricserver/wal.go index 3f855f12a6..4ced92fd05 100644 --- a/pkg/management/postgres/webserver/metricserver/wal.go +++ b/pkg/management/postgres/webserver/metricserver/wal.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package metricserver diff --git a/pkg/management/postgres/webserver/metricserver/wal_test.go b/pkg/management/postgres/webserver/metricserver/wal_test.go index 9542711290..f46642caf9 100644 --- a/pkg/management/postgres/webserver/metricserver/wal_test.go +++ b/pkg/management/postgres/webserver/metricserver/wal_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package metricserver diff --git a/pkg/management/postgres/webserver/plugin_backup.go b/pkg/management/postgres/webserver/plugin_backup.go index 3fe1292d80..e8a34ba05c 100644 --- a/pkg/management/postgres/webserver/plugin_backup.go +++ b/pkg/management/postgres/webserver/plugin_backup.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package webserver diff --git a/pkg/management/postgres/webserver/probes/checker.go b/pkg/management/postgres/webserver/probes/checker.go index c85bba8b6b..1ee4520021 100644 --- a/pkg/management/postgres/webserver/probes/checker.go +++ b/pkg/management/postgres/webserver/probes/checker.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package probes diff --git a/pkg/management/postgres/webserver/probes/doc.go b/pkg/management/postgres/webserver/probes/doc.go index 2350a83307..778ef4aac3 100644 --- a/pkg/management/postgres/webserver/probes/doc.go +++ b/pkg/management/postgres/webserver/probes/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package probes contains the implementation of startup, liveness and diff --git a/pkg/management/postgres/webserver/probes/isready.go b/pkg/management/postgres/webserver/probes/isready.go index 35c8deb370..6a93055bf7 100644 --- a/pkg/management/postgres/webserver/probes/isready.go +++ b/pkg/management/postgres/webserver/probes/isready.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package probes diff --git a/pkg/management/postgres/webserver/probes/query.go b/pkg/management/postgres/webserver/probes/query.go index f61a433163..2e2bce5313 100644 --- a/pkg/management/postgres/webserver/probes/query.go +++ b/pkg/management/postgres/webserver/probes/query.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package probes diff --git a/pkg/management/postgres/webserver/probes/streaming.go b/pkg/management/postgres/webserver/probes/streaming.go index 99bd5cd8aa..aaeee79366 100644 --- a/pkg/management/postgres/webserver/probes/streaming.go +++ b/pkg/management/postgres/webserver/probes/streaming.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package probes diff --git a/pkg/management/postgres/webserver/remote.go b/pkg/management/postgres/webserver/remote.go index 4aeb7da9a1..57c814d5f6 100644 --- a/pkg/management/postgres/webserver/remote.go +++ b/pkg/management/postgres/webserver/remote.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package webserver diff --git a/pkg/management/postgres/webserver/webserver.go b/pkg/management/postgres/webserver/webserver.go index a9ff87d4b3..5a68a30e3f 100644 --- a/pkg/management/postgres/webserver/webserver.go +++ b/pkg/management/postgres/webserver/webserver.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package webserver diff --git a/pkg/management/upgrade/suite_test.go b/pkg/management/upgrade/suite_test.go index 4b84498c71..a5ac4c0595 100644 --- a/pkg/management/upgrade/suite_test.go +++ b/pkg/management/upgrade/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package upgrade diff --git a/pkg/management/upgrade/upgrade.go b/pkg/management/upgrade/upgrade.go index 3c21d4a727..972bc03f6a 100644 --- a/pkg/management/upgrade/upgrade.go +++ b/pkg/management/upgrade/upgrade.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package upgrade manages the in-place upgrade of the instance manager diff --git a/pkg/management/upgrade/upgrade_test.go b/pkg/management/upgrade/upgrade_test.go index 6bb7e4d9ea..8a13741da4 100644 --- a/pkg/management/upgrade/upgrade_test.go +++ b/pkg/management/upgrade/upgrade_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package upgrade diff --git a/pkg/management/url/url.go b/pkg/management/url/url.go index d18d94cc58..c54cde8634 100644 --- a/pkg/management/url/url.go +++ b/pkg/management/url/url.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package url holds the constants for webserver routing diff --git a/pkg/multicache/multinamespaced_cache.go b/pkg/multicache/multinamespaced_cache.go index a3222085db..6998327db9 100644 --- a/pkg/multicache/multinamespaced_cache.go +++ b/pkg/multicache/multinamespaced_cache.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package multicache implements a cache that is able to work on multiple namespaces but also able to diff --git a/pkg/podlogs/cluster_writer.go b/pkg/podlogs/cluster_writer.go index ffabfb227c..70941b4fd6 100644 --- a/pkg/podlogs/cluster_writer.go +++ b/pkg/podlogs/cluster_writer.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package podlogs diff --git a/pkg/podlogs/cluster_writer_test.go b/pkg/podlogs/cluster_writer_test.go index 96fea0859b..982700dbec 100644 --- a/pkg/podlogs/cluster_writer_test.go +++ b/pkg/podlogs/cluster_writer_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package podlogs diff --git a/pkg/podlogs/suite_test.go b/pkg/podlogs/suite_test.go index 793e424c49..60466821b5 100644 --- a/pkg/podlogs/suite_test.go +++ b/pkg/podlogs/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package podlogs diff --git a/pkg/podlogs/writer.go b/pkg/podlogs/writer.go index 96d306a269..21bb572ebd 100644 --- a/pkg/podlogs/writer.go +++ b/pkg/podlogs/writer.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package podlogs contains code to fetch logs from Kubernetes pods diff --git a/pkg/podlogs/writer_test.go b/pkg/podlogs/writer_test.go index ff296e06e7..fa5d3d73d9 100644 --- a/pkg/podlogs/writer_test.go +++ b/pkg/podlogs/writer_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package podlogs diff --git a/pkg/podspec/builder.go b/pkg/podspec/builder.go index e7d0be01b0..b4402cf31a 100644 --- a/pkg/podspec/builder.go +++ b/pkg/podspec/builder.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package podspec contains various utilities to deal with Pod Specs diff --git a/pkg/podspec/builder_test.go b/pkg/podspec/builder_test.go index 28e8027c4b..fdc5313440 100644 --- a/pkg/podspec/builder_test.go +++ b/pkg/podspec/builder_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package podspec diff --git a/pkg/podspec/suite_test.go b/pkg/podspec/suite_test.go index 112a5c2938..c2b2a4e412 100644 --- a/pkg/podspec/suite_test.go +++ b/pkg/podspec/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package podspec diff --git a/pkg/postgres/booleans.go b/pkg/postgres/booleans.go index 365bff9a9a..8a06a01a5f 100644 --- a/pkg/postgres/booleans.go +++ b/pkg/postgres/booleans.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/postgres/booleans_test.go b/pkg/postgres/booleans_test.go index d4733355f7..a568c55166 100644 --- a/pkg/postgres/booleans_test.go +++ b/pkg/postgres/booleans_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/postgres/configuration.go b/pkg/postgres/configuration.go index 021ac045f5..12fabdfc20 100644 --- a/pkg/postgres/configuration.go +++ b/pkg/postgres/configuration.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/postgres/configuration_test.go b/pkg/postgres/configuration_test.go index adc3891582..8d1bacac00 100644 --- a/pkg/postgres/configuration_test.go +++ b/pkg/postgres/configuration_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/postgres/identifier.go b/pkg/postgres/identifier.go index 2d8f42f8b9..ca55cdc868 100644 --- a/pkg/postgres/identifier.go +++ b/pkg/postgres/identifier.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package postgres contains the function covering the PostgreSQL diff --git a/pkg/postgres/identifier_test.go b/pkg/postgres/identifier_test.go index f47c8d8d0f..1f2ecbb333 100644 --- a/pkg/postgres/identifier_test.go +++ b/pkg/postgres/identifier_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/postgres/replication/doc.go b/pkg/postgres/replication/doc.go index cf972013c3..27a19a506c 100644 --- a/pkg/postgres/replication/doc.go +++ b/pkg/postgres/replication/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package replication contains the code relative to the diff --git a/pkg/postgres/replication/explicit.go b/pkg/postgres/replication/explicit.go index 834b7d2cb6..443d633edb 100644 --- a/pkg/postgres/replication/explicit.go +++ b/pkg/postgres/replication/explicit.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package replication diff --git a/pkg/postgres/replication/explicit_test.go b/pkg/postgres/replication/explicit_test.go index 4870f945dd..c2a12a9a69 100644 --- a/pkg/postgres/replication/explicit_test.go +++ b/pkg/postgres/replication/explicit_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package replication diff --git a/pkg/postgres/replication/legacy.go b/pkg/postgres/replication/legacy.go index 569930732b..cd56d8c6fc 100644 --- a/pkg/postgres/replication/legacy.go +++ b/pkg/postgres/replication/legacy.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package replication diff --git a/pkg/postgres/replication/legacy_test.go b/pkg/postgres/replication/legacy_test.go index 96467ab3e0..3f87f07558 100644 --- a/pkg/postgres/replication/legacy_test.go +++ b/pkg/postgres/replication/legacy_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package replication diff --git a/pkg/postgres/replication/replication.go b/pkg/postgres/replication/replication.go index c3746dd091..1864f81df2 100644 --- a/pkg/postgres/replication/replication.go +++ b/pkg/postgres/replication/replication.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package replication diff --git a/pkg/postgres/replication/suite_test.go b/pkg/postgres/replication/suite_test.go index 6b249bedcd..e693b884a2 100644 --- a/pkg/postgres/replication/suite_test.go +++ b/pkg/postgres/replication/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package replication diff --git a/pkg/postgres/replication/utils.go b/pkg/postgres/replication/utils.go index efc4f42b20..f39c28d576 100644 --- a/pkg/postgres/replication/utils.go +++ b/pkg/postgres/replication/utils.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package replication diff --git a/pkg/postgres/roles.go b/pkg/postgres/roles.go index d956e862b4..b2565cf987 100644 --- a/pkg/postgres/roles.go +++ b/pkg/postgres/roles.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/postgres/roles_test.go b/pkg/postgres/roles_test.go index bf4858242f..722ebd23c3 100644 --- a/pkg/postgres/roles_test.go +++ b/pkg/postgres/roles_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/postgres/status.go b/pkg/postgres/status.go index 3afafcdb6b..332807ce09 100644 --- a/pkg/postgres/status.go +++ b/pkg/postgres/status.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/postgres/status_test.go b/pkg/postgres/status_test.go index d3a06f762d..369bad982e 100644 --- a/pkg/postgres/status_test.go +++ b/pkg/postgres/status_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/postgres/suite_test.go b/pkg/postgres/suite_test.go index eb00961f69..9fe32b8ed4 100644 --- a/pkg/postgres/suite_test.go +++ b/pkg/postgres/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/postgres/wal.go b/pkg/postgres/wal.go index 1ca31f0559..bf9de3103c 100644 --- a/pkg/postgres/wal.go +++ b/pkg/postgres/wal.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/postgres/wal_test.go b/pkg/postgres/wal_test.go index f2631b7cbe..92a7d17cb5 100644 --- a/pkg/postgres/wal_test.go +++ b/pkg/postgres/wal_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/pkg/promotiontoken/doc.go b/pkg/promotiontoken/doc.go index d79f0e7a8e..8728b341d4 100644 --- a/pkg/promotiontoken/doc.go +++ b/pkg/promotiontoken/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package promotiontoken : This package contains the promotion token related operations diff --git a/pkg/promotiontoken/promotion_token.go b/pkg/promotiontoken/promotion_token.go index d3a0051769..edd2f3fcb1 100644 --- a/pkg/promotiontoken/promotion_token.go +++ b/pkg/promotiontoken/promotion_token.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package promotiontoken diff --git a/pkg/promotiontoken/promotion_token_test.go b/pkg/promotiontoken/promotion_token_test.go index cce2e91479..0291293339 100644 --- a/pkg/promotiontoken/promotion_token_test.go +++ b/pkg/promotiontoken/promotion_token_test.go @@ -1,17 +1,20 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package promotiontoken diff --git a/pkg/promotiontoken/suite_test.go b/pkg/promotiontoken/suite_test.go index a2af066a89..34e4953ba0 100644 --- a/pkg/promotiontoken/suite_test.go +++ b/pkg/promotiontoken/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package promotiontoken diff --git a/pkg/reconciler/backup/volumesnapshot/catalog.go b/pkg/reconciler/backup/volumesnapshot/catalog.go index 38048b4803..6b75ac48c0 100644 --- a/pkg/reconciler/backup/volumesnapshot/catalog.go +++ b/pkg/reconciler/backup/volumesnapshot/catalog.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package volumesnapshot diff --git a/pkg/reconciler/backup/volumesnapshot/doc.go b/pkg/reconciler/backup/volumesnapshot/doc.go index 1a02f8dbca..c70a9fbdca 100644 --- a/pkg/reconciler/backup/volumesnapshot/doc.go +++ b/pkg/reconciler/backup/volumesnapshot/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package volumesnapshot contains the VolumeSnapshot reconciler diff --git a/pkg/reconciler/backup/volumesnapshot/errors.go b/pkg/reconciler/backup/volumesnapshot/errors.go index e5c9d4c4e9..c11421a4f0 100644 --- a/pkg/reconciler/backup/volumesnapshot/errors.go +++ b/pkg/reconciler/backup/volumesnapshot/errors.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package volumesnapshot diff --git a/pkg/reconciler/backup/volumesnapshot/errors_test.go b/pkg/reconciler/backup/volumesnapshot/errors_test.go index 81536f9371..4105512fe3 100644 --- a/pkg/reconciler/backup/volumesnapshot/errors_test.go +++ b/pkg/reconciler/backup/volumesnapshot/errors_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package volumesnapshot diff --git a/pkg/reconciler/backup/volumesnapshot/offline.go b/pkg/reconciler/backup/volumesnapshot/offline.go index 826a6ef1be..11944fb8a1 100644 --- a/pkg/reconciler/backup/volumesnapshot/offline.go +++ b/pkg/reconciler/backup/volumesnapshot/offline.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package volumesnapshot diff --git a/pkg/reconciler/backup/volumesnapshot/offline_test.go b/pkg/reconciler/backup/volumesnapshot/offline_test.go index ac0175ab7c..dc2fdd7296 100644 --- a/pkg/reconciler/backup/volumesnapshot/offline_test.go +++ b/pkg/reconciler/backup/volumesnapshot/offline_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package volumesnapshot diff --git a/pkg/reconciler/backup/volumesnapshot/online.go b/pkg/reconciler/backup/volumesnapshot/online.go index f57c012909..c33aed8c1a 100644 --- a/pkg/reconciler/backup/volumesnapshot/online.go +++ b/pkg/reconciler/backup/volumesnapshot/online.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package volumesnapshot diff --git a/pkg/reconciler/backup/volumesnapshot/online_test.go b/pkg/reconciler/backup/volumesnapshot/online_test.go index 7c00dd17f4..d3e61cf4bf 100644 --- a/pkg/reconciler/backup/volumesnapshot/online_test.go +++ b/pkg/reconciler/backup/volumesnapshot/online_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package volumesnapshot diff --git a/pkg/reconciler/backup/volumesnapshot/reconciler.go b/pkg/reconciler/backup/volumesnapshot/reconciler.go index 2ff568edff..47ffb826a4 100644 --- a/pkg/reconciler/backup/volumesnapshot/reconciler.go +++ b/pkg/reconciler/backup/volumesnapshot/reconciler.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package volumesnapshot diff --git a/pkg/reconciler/backup/volumesnapshot/reconciler_test.go b/pkg/reconciler/backup/volumesnapshot/reconciler_test.go index e3950ea970..31165768f2 100644 --- a/pkg/reconciler/backup/volumesnapshot/reconciler_test.go +++ b/pkg/reconciler/backup/volumesnapshot/reconciler_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package volumesnapshot diff --git a/pkg/reconciler/backup/volumesnapshot/resources.go b/pkg/reconciler/backup/volumesnapshot/resources.go index 71794502f9..2342ebc5b6 100644 --- a/pkg/reconciler/backup/volumesnapshot/resources.go +++ b/pkg/reconciler/backup/volumesnapshot/resources.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package volumesnapshot diff --git a/pkg/reconciler/backup/volumesnapshot/resources_test.go b/pkg/reconciler/backup/volumesnapshot/resources_test.go index ea65460e57..b58210ef2d 100644 --- a/pkg/reconciler/backup/volumesnapshot/resources_test.go +++ b/pkg/reconciler/backup/volumesnapshot/resources_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package volumesnapshot diff --git a/pkg/reconciler/backup/volumesnapshot/suite_test.go b/pkg/reconciler/backup/volumesnapshot/suite_test.go index 98ea99c492..1e926187ad 100644 --- a/pkg/reconciler/backup/volumesnapshot/suite_test.go +++ b/pkg/reconciler/backup/volumesnapshot/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package volumesnapshot diff --git a/pkg/reconciler/hibernation/doc.go b/pkg/reconciler/hibernation/doc.go index 0e3392f678..2c7906f2a9 100644 --- a/pkg/reconciler/hibernation/doc.go +++ b/pkg/reconciler/hibernation/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package hibernation contains all the logic to hibernate a CNPG cluster diff --git a/pkg/reconciler/hibernation/reconciler.go b/pkg/reconciler/hibernation/reconciler.go index 4265278b2b..e24176efe0 100644 --- a/pkg/reconciler/hibernation/reconciler.go +++ b/pkg/reconciler/hibernation/reconciler.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package hibernation diff --git a/pkg/reconciler/hibernation/reconciler_test.go b/pkg/reconciler/hibernation/reconciler_test.go index bd27fd90e7..8385e957af 100644 --- a/pkg/reconciler/hibernation/reconciler_test.go +++ b/pkg/reconciler/hibernation/reconciler_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package hibernation diff --git a/pkg/reconciler/hibernation/status.go b/pkg/reconciler/hibernation/status.go index 8f16caa2a3..bb07f72bd6 100644 --- a/pkg/reconciler/hibernation/status.go +++ b/pkg/reconciler/hibernation/status.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package hibernation diff --git a/pkg/reconciler/hibernation/status_test.go b/pkg/reconciler/hibernation/status_test.go index ba0012451f..481753052c 100644 --- a/pkg/reconciler/hibernation/status_test.go +++ b/pkg/reconciler/hibernation/status_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package hibernation diff --git a/pkg/reconciler/hibernation/suite_test.go b/pkg/reconciler/hibernation/suite_test.go index eb2cce6cd8..74db02902a 100644 --- a/pkg/reconciler/hibernation/suite_test.go +++ b/pkg/reconciler/hibernation/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package hibernation diff --git a/pkg/reconciler/instance/doc.go b/pkg/reconciler/instance/doc.go index b410b4211b..e32da09ac2 100644 --- a/pkg/reconciler/instance/doc.go +++ b/pkg/reconciler/instance/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package instance contains all the logic to reconcile an instance pod diff --git a/pkg/reconciler/instance/metadata.go b/pkg/reconciler/instance/metadata.go index dae9f9b311..213237e8d9 100644 --- a/pkg/reconciler/instance/metadata.go +++ b/pkg/reconciler/instance/metadata.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package instance diff --git a/pkg/reconciler/instance/metadata_test.go b/pkg/reconciler/instance/metadata_test.go index 5bbcd571d1..23a469e737 100644 --- a/pkg/reconciler/instance/metadata_test.go +++ b/pkg/reconciler/instance/metadata_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package instance diff --git a/pkg/reconciler/instance/suite_test.go b/pkg/reconciler/instance/suite_test.go index c2e88914f6..5d3ff5a753 100644 --- a/pkg/reconciler/instance/suite_test.go +++ b/pkg/reconciler/instance/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package instance diff --git a/pkg/reconciler/persistentvolumeclaim/build.go b/pkg/reconciler/persistentvolumeclaim/build.go index d8c2492eb6..a47c3621fe 100644 --- a/pkg/reconciler/persistentvolumeclaim/build.go +++ b/pkg/reconciler/persistentvolumeclaim/build.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim diff --git a/pkg/reconciler/persistentvolumeclaim/build_test.go b/pkg/reconciler/persistentvolumeclaim/build_test.go index 71f7a6c5cf..a9747ec1ab 100644 --- a/pkg/reconciler/persistentvolumeclaim/build_test.go +++ b/pkg/reconciler/persistentvolumeclaim/build_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim diff --git a/pkg/reconciler/persistentvolumeclaim/calculator.go b/pkg/reconciler/persistentvolumeclaim/calculator.go index c15dbc1c1d..59920e6947 100644 --- a/pkg/reconciler/persistentvolumeclaim/calculator.go +++ b/pkg/reconciler/persistentvolumeclaim/calculator.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim diff --git a/pkg/reconciler/persistentvolumeclaim/calculator_test.go b/pkg/reconciler/persistentvolumeclaim/calculator_test.go index 6b06797b1d..68bd494544 100644 --- a/pkg/reconciler/persistentvolumeclaim/calculator_test.go +++ b/pkg/reconciler/persistentvolumeclaim/calculator_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim diff --git a/pkg/reconciler/persistentvolumeclaim/create.go b/pkg/reconciler/persistentvolumeclaim/create.go index 255a3f06fd..8885b27349 100644 --- a/pkg/reconciler/persistentvolumeclaim/create.go +++ b/pkg/reconciler/persistentvolumeclaim/create.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim diff --git a/pkg/reconciler/persistentvolumeclaim/create_test.go b/pkg/reconciler/persistentvolumeclaim/create_test.go index 5f8b08bc9e..6dcf250402 100644 --- a/pkg/reconciler/persistentvolumeclaim/create_test.go +++ b/pkg/reconciler/persistentvolumeclaim/create_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim diff --git a/pkg/reconciler/persistentvolumeclaim/delete.go b/pkg/reconciler/persistentvolumeclaim/delete.go index 9d2e51db6a..36e6f2664c 100644 --- a/pkg/reconciler/persistentvolumeclaim/delete.go +++ b/pkg/reconciler/persistentvolumeclaim/delete.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim diff --git a/pkg/reconciler/persistentvolumeclaim/delete_test.go b/pkg/reconciler/persistentvolumeclaim/delete_test.go index 204b251e83..e3d51b4437 100644 --- a/pkg/reconciler/persistentvolumeclaim/delete_test.go +++ b/pkg/reconciler/persistentvolumeclaim/delete_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim diff --git a/pkg/reconciler/persistentvolumeclaim/doc.go b/pkg/reconciler/persistentvolumeclaim/doc.go index 1bf1beecb5..09fe5b8d0d 100644 --- a/pkg/reconciler/persistentvolumeclaim/doc.go +++ b/pkg/reconciler/persistentvolumeclaim/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package persistentvolumeclaim contains all the logic to reconcile and build PVCS diff --git a/pkg/reconciler/persistentvolumeclaim/instance.go b/pkg/reconciler/persistentvolumeclaim/instance.go index 3dd3ad388f..44db15d0a0 100644 --- a/pkg/reconciler/persistentvolumeclaim/instance.go +++ b/pkg/reconciler/persistentvolumeclaim/instance.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim diff --git a/pkg/reconciler/persistentvolumeclaim/metadata.go b/pkg/reconciler/persistentvolumeclaim/metadata.go index 5106a65b86..85ae5b65cc 100644 --- a/pkg/reconciler/persistentvolumeclaim/metadata.go +++ b/pkg/reconciler/persistentvolumeclaim/metadata.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim diff --git a/pkg/reconciler/persistentvolumeclaim/metadata_test.go b/pkg/reconciler/persistentvolumeclaim/metadata_test.go index 49fa3cfe6e..d57f1072ac 100644 --- a/pkg/reconciler/persistentvolumeclaim/metadata_test.go +++ b/pkg/reconciler/persistentvolumeclaim/metadata_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim diff --git a/pkg/reconciler/persistentvolumeclaim/reconciler.go b/pkg/reconciler/persistentvolumeclaim/reconciler.go index 727fab4be6..111f5614fe 100644 --- a/pkg/reconciler/persistentvolumeclaim/reconciler.go +++ b/pkg/reconciler/persistentvolumeclaim/reconciler.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim diff --git a/pkg/reconciler/persistentvolumeclaim/reconciler_test.go b/pkg/reconciler/persistentvolumeclaim/reconciler_test.go index 78e5a44e42..33d84a116a 100644 --- a/pkg/reconciler/persistentvolumeclaim/reconciler_test.go +++ b/pkg/reconciler/persistentvolumeclaim/reconciler_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim diff --git a/pkg/reconciler/persistentvolumeclaim/requests.go b/pkg/reconciler/persistentvolumeclaim/requests.go index 71d30a1008..d43d9b9e6c 100644 --- a/pkg/reconciler/persistentvolumeclaim/requests.go +++ b/pkg/reconciler/persistentvolumeclaim/requests.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim diff --git a/pkg/reconciler/persistentvolumeclaim/resources.go b/pkg/reconciler/persistentvolumeclaim/resources.go index 6377ca4f85..1ca30256fc 100644 --- a/pkg/reconciler/persistentvolumeclaim/resources.go +++ b/pkg/reconciler/persistentvolumeclaim/resources.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim diff --git a/pkg/reconciler/persistentvolumeclaim/resources_test.go b/pkg/reconciler/persistentvolumeclaim/resources_test.go index 791f70e30a..e72894f4b2 100644 --- a/pkg/reconciler/persistentvolumeclaim/resources_test.go +++ b/pkg/reconciler/persistentvolumeclaim/resources_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim diff --git a/pkg/reconciler/persistentvolumeclaim/status.go b/pkg/reconciler/persistentvolumeclaim/status.go index fbef1258a1..3ca29d16dc 100644 --- a/pkg/reconciler/persistentvolumeclaim/status.go +++ b/pkg/reconciler/persistentvolumeclaim/status.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim diff --git a/pkg/reconciler/persistentvolumeclaim/storagesource.go b/pkg/reconciler/persistentvolumeclaim/storagesource.go index f1e625d36b..350e59f8c2 100644 --- a/pkg/reconciler/persistentvolumeclaim/storagesource.go +++ b/pkg/reconciler/persistentvolumeclaim/storagesource.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim diff --git a/pkg/reconciler/persistentvolumeclaim/storagesource_test.go b/pkg/reconciler/persistentvolumeclaim/storagesource_test.go index 216d3c5e5d..6e33ce34a4 100644 --- a/pkg/reconciler/persistentvolumeclaim/storagesource_test.go +++ b/pkg/reconciler/persistentvolumeclaim/storagesource_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim diff --git a/pkg/reconciler/persistentvolumeclaim/suite_test.go b/pkg/reconciler/persistentvolumeclaim/suite_test.go index 7af04511f7..b47104600a 100644 --- a/pkg/reconciler/persistentvolumeclaim/suite_test.go +++ b/pkg/reconciler/persistentvolumeclaim/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim diff --git a/pkg/reconciler/persistentvolumeclaim/validation.go b/pkg/reconciler/persistentvolumeclaim/validation.go index ac12af1f5f..7c7a1e9460 100644 --- a/pkg/reconciler/persistentvolumeclaim/validation.go +++ b/pkg/reconciler/persistentvolumeclaim/validation.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim diff --git a/pkg/reconciler/persistentvolumeclaim/validation_test.go b/pkg/reconciler/persistentvolumeclaim/validation_test.go index e85ab56655..6b157dd4c8 100644 --- a/pkg/reconciler/persistentvolumeclaim/validation_test.go +++ b/pkg/reconciler/persistentvolumeclaim/validation_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package persistentvolumeclaim diff --git a/pkg/reconciler/replicaclusterswitch/conditions.go b/pkg/reconciler/replicaclusterswitch/conditions.go index d2ef885c82..73d89850db 100644 --- a/pkg/reconciler/replicaclusterswitch/conditions.go +++ b/pkg/reconciler/replicaclusterswitch/conditions.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package replicaclusterswitch diff --git a/pkg/reconciler/replicaclusterswitch/doc.go b/pkg/reconciler/replicaclusterswitch/doc.go index dc4e51bb24..7ad8598b56 100644 --- a/pkg/reconciler/replicaclusterswitch/doc.go +++ b/pkg/reconciler/replicaclusterswitch/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package replicaclusterswitch contains the logic needed to turn on the replica cluster feature on an diff --git a/pkg/reconciler/replicaclusterswitch/reconciler.go b/pkg/reconciler/replicaclusterswitch/reconciler.go index fd185a7e49..07a0f0ee7d 100644 --- a/pkg/reconciler/replicaclusterswitch/reconciler.go +++ b/pkg/reconciler/replicaclusterswitch/reconciler.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package replicaclusterswitch diff --git a/pkg/reconciler/replicaclusterswitch/shutdown_wal.go b/pkg/reconciler/replicaclusterswitch/shutdown_wal.go index 9b7413bf30..1a04d63280 100644 --- a/pkg/reconciler/replicaclusterswitch/shutdown_wal.go +++ b/pkg/reconciler/replicaclusterswitch/shutdown_wal.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package replicaclusterswitch diff --git a/pkg/resources/doc.go b/pkg/resources/doc.go index 6501187d05..1936616039 100644 --- a/pkg/resources/doc.go +++ b/pkg/resources/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package resources contains a set of Kubernetes generic utilities diff --git a/pkg/resources/labels_annotations.go b/pkg/resources/labels_annotations.go index 9b2049df59..7bd17656e6 100644 --- a/pkg/resources/labels_annotations.go +++ b/pkg/resources/labels_annotations.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package resources diff --git a/pkg/resources/metadatabuilder.go b/pkg/resources/metadatabuilder.go index ed2e1902d9..300f4f9ee3 100644 --- a/pkg/resources/metadatabuilder.go +++ b/pkg/resources/metadatabuilder.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package resources diff --git a/pkg/resources/persistentvolumeclaim.go b/pkg/resources/persistentvolumeclaim.go index fd1c2c6168..4988222585 100644 --- a/pkg/resources/persistentvolumeclaim.go +++ b/pkg/resources/persistentvolumeclaim.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package resources diff --git a/pkg/resources/retry.go b/pkg/resources/retry.go index 4a0d13aa1b..0c38bbb234 100644 --- a/pkg/resources/retry.go +++ b/pkg/resources/retry.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package resources diff --git a/pkg/resources/retry_test.go b/pkg/resources/retry_test.go index 7379c2905c..7503998c60 100644 --- a/pkg/resources/retry_test.go +++ b/pkg/resources/retry_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package resources diff --git a/pkg/resources/status/conditions.go b/pkg/resources/status/conditions.go index c8b814aded..00c87d5e9a 100644 --- a/pkg/resources/status/conditions.go +++ b/pkg/resources/status/conditions.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package status diff --git a/pkg/resources/status/doc.go b/pkg/resources/status/doc.go index d998bfbcbb..b830750400 100644 --- a/pkg/resources/status/doc.go +++ b/pkg/resources/status/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package status contains all the function needed to interact properly with the resources status diff --git a/pkg/resources/status/patch.go b/pkg/resources/status/patch.go index 3613a46f43..51f8847e20 100644 --- a/pkg/resources/status/patch.go +++ b/pkg/resources/status/patch.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package status diff --git a/pkg/resources/status/transactions.go b/pkg/resources/status/transactions.go index ca04d437a9..590352b807 100644 --- a/pkg/resources/status/transactions.go +++ b/pkg/resources/status/transactions.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package status diff --git a/pkg/resources/suite_test.go b/pkg/resources/suite_test.go index 9accbbf2ed..62d320da65 100644 --- a/pkg/resources/suite_test.go +++ b/pkg/resources/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package resources diff --git a/pkg/servicespec/builder.go b/pkg/servicespec/builder.go index 8f1cbaae36..1b8dbc7038 100644 --- a/pkg/servicespec/builder.go +++ b/pkg/servicespec/builder.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package servicespec contains various utilities to deal with Service Specs diff --git a/pkg/servicespec/builder_test.go b/pkg/servicespec/builder_test.go index 11916a3955..9529666796 100644 --- a/pkg/servicespec/builder_test.go +++ b/pkg/servicespec/builder_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package servicespec diff --git a/pkg/servicespec/suite_test.go b/pkg/servicespec/suite_test.go index 2c2460ac8d..98796d834c 100644 --- a/pkg/servicespec/suite_test.go +++ b/pkg/servicespec/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package servicespec diff --git a/pkg/specs/containers.go b/pkg/specs/containers.go index 88a56af2d7..ed18f04ad6 100644 --- a/pkg/specs/containers.go +++ b/pkg/specs/containers.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs diff --git a/pkg/specs/containers_test.go b/pkg/specs/containers_test.go index e8573c9fd3..3839a090bc 100644 --- a/pkg/specs/containers_test.go +++ b/pkg/specs/containers_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs diff --git a/pkg/specs/jobs.go b/pkg/specs/jobs.go index bbd5cd936c..b651d5cfa1 100644 --- a/pkg/specs/jobs.go +++ b/pkg/specs/jobs.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs diff --git a/pkg/specs/jobs_test.go b/pkg/specs/jobs_test.go index 378f1ae813..e4e7ea1daa 100644 --- a/pkg/specs/jobs_test.go +++ b/pkg/specs/jobs_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs diff --git a/pkg/specs/pg_pods.go b/pkg/specs/pg_pods.go index a2ac41572a..1ca30e55a0 100644 --- a/pkg/specs/pg_pods.go +++ b/pkg/specs/pg_pods.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs diff --git a/pkg/specs/pg_pods_test.go b/pkg/specs/pg_pods_test.go index 4cf2022de3..b41a3833ea 100644 --- a/pkg/specs/pg_pods_test.go +++ b/pkg/specs/pg_pods_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs diff --git a/pkg/specs/pgbouncer/deployments.go b/pkg/specs/pgbouncer/deployments.go index 78553d186a..cb25082c2f 100644 --- a/pkg/specs/pgbouncer/deployments.go +++ b/pkg/specs/pgbouncer/deployments.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package pgbouncer contains the specification of the K8s resources diff --git a/pkg/specs/pgbouncer/deployments_test.go b/pkg/specs/pgbouncer/deployments_test.go index 67ba6bc306..31af3a075e 100644 --- a/pkg/specs/pgbouncer/deployments_test.go +++ b/pkg/specs/pgbouncer/deployments_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pgbouncer diff --git a/pkg/specs/pgbouncer/podmonitor.go b/pkg/specs/pgbouncer/podmonitor.go index 4ea6abb33b..92a64c9bb6 100644 --- a/pkg/specs/pgbouncer/podmonitor.go +++ b/pkg/specs/pgbouncer/podmonitor.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pgbouncer diff --git a/pkg/specs/pgbouncer/podmonitor_test.go b/pkg/specs/pgbouncer/podmonitor_test.go index 49db8ab4e0..dfceeb3a7c 100644 --- a/pkg/specs/pgbouncer/podmonitor_test.go +++ b/pkg/specs/pgbouncer/podmonitor_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pgbouncer diff --git a/pkg/specs/pgbouncer/rbac.go b/pkg/specs/pgbouncer/rbac.go index f134ca9149..05fb89c52f 100644 --- a/pkg/specs/pgbouncer/rbac.go +++ b/pkg/specs/pgbouncer/rbac.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pgbouncer diff --git a/pkg/specs/pgbouncer/rbac_test.go b/pkg/specs/pgbouncer/rbac_test.go index 0404927eb1..a4f961ed45 100644 --- a/pkg/specs/pgbouncer/rbac_test.go +++ b/pkg/specs/pgbouncer/rbac_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pgbouncer diff --git a/pkg/specs/pgbouncer/services.go b/pkg/specs/pgbouncer/services.go index 4b63446b04..0b8653533e 100644 --- a/pkg/specs/pgbouncer/services.go +++ b/pkg/specs/pgbouncer/services.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pgbouncer diff --git a/pkg/specs/pgbouncer/services_test.go b/pkg/specs/pgbouncer/services_test.go index 62d14a25c3..0163399b43 100644 --- a/pkg/specs/pgbouncer/services_test.go +++ b/pkg/specs/pgbouncer/services_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pgbouncer diff --git a/pkg/specs/pgbouncer/suite_test.go b/pkg/specs/pgbouncer/suite_test.go index ab693cbd2c..d8e2c982c1 100644 --- a/pkg/specs/pgbouncer/suite_test.go +++ b/pkg/specs/pgbouncer/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package pgbouncer diff --git a/pkg/specs/poddisruptionbudget.go b/pkg/specs/poddisruptionbudget.go index cf88f80454..b44180c737 100644 --- a/pkg/specs/poddisruptionbudget.go +++ b/pkg/specs/poddisruptionbudget.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs diff --git a/pkg/specs/poddisruptionbudget_test.go b/pkg/specs/poddisruptionbudget_test.go index 115ae0825b..a58132a748 100644 --- a/pkg/specs/poddisruptionbudget_test.go +++ b/pkg/specs/poddisruptionbudget_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs diff --git a/pkg/specs/podmonitor.go b/pkg/specs/podmonitor.go index 7d1e00bfcb..dcf9ef2ee6 100644 --- a/pkg/specs/podmonitor.go +++ b/pkg/specs/podmonitor.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs diff --git a/pkg/specs/podmonitor_test.go b/pkg/specs/podmonitor_test.go index 1673d6f28a..30215e43da 100644 --- a/pkg/specs/podmonitor_test.go +++ b/pkg/specs/podmonitor_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs diff --git a/pkg/specs/pods.go b/pkg/specs/pods.go index e9c308cb11..0146862804 100644 --- a/pkg/specs/pods.go +++ b/pkg/specs/pods.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package specs contains the specification of the K8s resources diff --git a/pkg/specs/pods_test.go b/pkg/specs/pods_test.go index fd2e7e1bbd..dcb69dc12e 100644 --- a/pkg/specs/pods_test.go +++ b/pkg/specs/pods_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs diff --git a/pkg/specs/podspec_diff.go b/pkg/specs/podspec_diff.go index 15ab552799..38523a142a 100644 --- a/pkg/specs/podspec_diff.go +++ b/pkg/specs/podspec_diff.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs diff --git a/pkg/specs/podspec_diff_test.go b/pkg/specs/podspec_diff_test.go index cfbd40e907..8ac5f50f3c 100644 --- a/pkg/specs/podspec_diff_test.go +++ b/pkg/specs/podspec_diff_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs diff --git a/pkg/specs/rolebinding.go b/pkg/specs/rolebinding.go index d532e2f61c..d5968cec6f 100644 --- a/pkg/specs/rolebinding.go +++ b/pkg/specs/rolebinding.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs diff --git a/pkg/specs/rolebinding_test.go b/pkg/specs/rolebinding_test.go index c101194b19..3ba0cfca00 100644 --- a/pkg/specs/rolebinding_test.go +++ b/pkg/specs/rolebinding_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs diff --git a/pkg/specs/roles.go b/pkg/specs/roles.go index ac328cc66e..8b18c95e10 100644 --- a/pkg/specs/roles.go +++ b/pkg/specs/roles.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs diff --git a/pkg/specs/roles_test.go b/pkg/specs/roles_test.go index 0d3df97d28..98ce4213fa 100644 --- a/pkg/specs/roles_test.go +++ b/pkg/specs/roles_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs diff --git a/pkg/specs/secrets.go b/pkg/specs/secrets.go index d7503dd026..1ec994966e 100644 --- a/pkg/specs/secrets.go +++ b/pkg/specs/secrets.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs diff --git a/pkg/specs/secrets_test.go b/pkg/specs/secrets_test.go index 6648b76294..22ab765033 100644 --- a/pkg/specs/secrets_test.go +++ b/pkg/specs/secrets_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs diff --git a/pkg/specs/serviceaccount.go b/pkg/specs/serviceaccount.go index 0862e308ec..3e0758edbe 100644 --- a/pkg/specs/serviceaccount.go +++ b/pkg/specs/serviceaccount.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs diff --git a/pkg/specs/serviceaccount_test.go b/pkg/specs/serviceaccount_test.go index 066df1107b..15f16abd28 100644 --- a/pkg/specs/serviceaccount_test.go +++ b/pkg/specs/serviceaccount_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs diff --git a/pkg/specs/services.go b/pkg/specs/services.go index 454f828709..28b58e3ab9 100644 --- a/pkg/specs/services.go +++ b/pkg/specs/services.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs diff --git a/pkg/specs/services_test.go b/pkg/specs/services_test.go index 86c108c7a6..2fc0ff0cc6 100644 --- a/pkg/specs/services_test.go +++ b/pkg/specs/services_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs diff --git a/pkg/specs/suite_test.go b/pkg/specs/suite_test.go index 32e32787f5..ce425b74f5 100644 --- a/pkg/specs/suite_test.go +++ b/pkg/specs/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs diff --git a/pkg/specs/volumes.go b/pkg/specs/volumes.go index ffa755054f..186040e268 100644 --- a/pkg/specs/volumes.go +++ b/pkg/specs/volumes.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs diff --git a/pkg/specs/volumes_test.go b/pkg/specs/volumes_test.go index 97cd88844e..ce2b59dda1 100644 --- a/pkg/specs/volumes_test.go +++ b/pkg/specs/volumes_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package specs diff --git a/pkg/system/compatibility/darwin.go b/pkg/system/compatibility/darwin.go index 64c76e9e24..271b27a80b 100644 --- a/pkg/system/compatibility/darwin.go +++ b/pkg/system/compatibility/darwin.go @@ -2,7 +2,8 @@ // +build darwin /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,6 +16,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package compatibility diff --git a/pkg/system/compatibility/doc.go b/pkg/system/compatibility/doc.go index 5777cdce6d..fbec0a7e3a 100644 --- a/pkg/system/compatibility/doc.go +++ b/pkg/system/compatibility/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package compatibility provides a layer to cross-compile with other OS than Linux diff --git a/pkg/system/compatibility/unix.go b/pkg/system/compatibility/unix.go index 9b17bf5af5..d860d250d5 100644 --- a/pkg/system/compatibility/unix.go +++ b/pkg/system/compatibility/unix.go @@ -2,7 +2,8 @@ // +build linux /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,6 +16,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package compatibility provides a layer to cross-compile with other OS than Linux diff --git a/pkg/system/compatibility/windows.go b/pkg/system/compatibility/windows.go index af7334301d..9077194c77 100644 --- a/pkg/system/compatibility/windows.go +++ b/pkg/system/compatibility/windows.go @@ -2,7 +2,8 @@ // +build windows /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,6 +16,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package compatibility provides a layer to cross-compile with other OS than Linux diff --git a/pkg/system/suite_test.go b/pkg/system/suite_test.go index e68e62963b..a1806b574a 100644 --- a/pkg/system/suite_test.go +++ b/pkg/system/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package system diff --git a/pkg/system/system.go b/pkg/system/system.go index dcc80e1a62..a6371ff2aa 100644 --- a/pkg/system/system.go +++ b/pkg/system/system.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package system provides an interface with the operating system diff --git a/pkg/system/system_test.go b/pkg/system/system_test.go index e5682a789c..0feb9a27c1 100644 --- a/pkg/system/system_test.go +++ b/pkg/system/system_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package system diff --git a/pkg/utils/conditions.go b/pkg/utils/conditions.go index fa1afcac11..918b97423c 100644 --- a/pkg/utils/conditions.go +++ b/pkg/utils/conditions.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/utils/conditions_test.go b/pkg/utils/conditions_test.go index edb1ed9780..b03e92f5d2 100644 --- a/pkg/utils/conditions_test.go +++ b/pkg/utils/conditions_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/utils/context.go b/pkg/utils/context.go index 1f5b25a06e..128cb28340 100644 --- a/pkg/utils/context.go +++ b/pkg/utils/context.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/utils/discovery.go b/pkg/utils/discovery.go index 9f4fc4637d..2c9a602144 100644 --- a/pkg/utils/discovery.go +++ b/pkg/utils/discovery.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/utils/discovery_test.go b/pkg/utils/discovery_test.go index efd23e0c2c..50a7535b2f 100644 --- a/pkg/utils/discovery_test.go +++ b/pkg/utils/discovery_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/utils/exec.go b/pkg/utils/exec.go index f9844f5365..57e88e7a90 100644 --- a/pkg/utils/exec.go +++ b/pkg/utils/exec.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package utils contains otherwise uncategorized kubernetes diff --git a/pkg/utils/fencing.go b/pkg/utils/fencing.go index 13e9aca7bd..83ec99599c 100644 --- a/pkg/utils/fencing.go +++ b/pkg/utils/fencing.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/utils/fencing_test.go b/pkg/utils/fencing_test.go index 27b1043cca..00563141a3 100644 --- a/pkg/utils/fencing_test.go +++ b/pkg/utils/fencing_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/utils/finalizers.go b/pkg/utils/finalizers.go index ba9ed64f16..a1f5da62b9 100644 --- a/pkg/utils/finalizers.go +++ b/pkg/utils/finalizers.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/utils/hash/doc.go b/pkg/utils/hash/doc.go index f7f47923f9..76b99e7a53 100644 --- a/pkg/utils/hash/doc.go +++ b/pkg/utils/hash/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package hash allows the user to get a hash number for a given Kubernetes diff --git a/pkg/utils/hash/hash.go b/pkg/utils/hash/hash.go index e3f55d62fb..3f02a37310 100644 --- a/pkg/utils/hash/hash.go +++ b/pkg/utils/hash/hash.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package hash diff --git a/pkg/utils/hash/hash_test.go b/pkg/utils/hash/hash_test.go index 1dddad3614..8c5e7fa323 100644 --- a/pkg/utils/hash/hash_test.go +++ b/pkg/utils/hash/hash_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package hash diff --git a/pkg/utils/hash/suite_test.go b/pkg/utils/hash/suite_test.go index 74e1f06114..fec5faaacf 100644 --- a/pkg/utils/hash/suite_test.go +++ b/pkg/utils/hash/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package hash diff --git a/pkg/utils/job_conditions.go b/pkg/utils/job_conditions.go index ccbe148caf..fe1864a9d1 100644 --- a/pkg/utils/job_conditions.go +++ b/pkg/utils/job_conditions.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/utils/job_conditions_test.go b/pkg/utils/job_conditions_test.go index 27f91c0c18..590eb12ce2 100644 --- a/pkg/utils/job_conditions_test.go +++ b/pkg/utils/job_conditions_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/utils/labels_annotations.go b/pkg/utils/labels_annotations.go index 32d634962a..4023be2248 100644 --- a/pkg/utils/labels_annotations.go +++ b/pkg/utils/labels_annotations.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/utils/labels_annotations_test.go b/pkg/utils/labels_annotations_test.go index 2de3e3544f..a6975fb096 100644 --- a/pkg/utils/labels_annotations_test.go +++ b/pkg/utils/labels_annotations_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/utils/math.go b/pkg/utils/math.go index 7a98f3d02c..03b13ece60 100644 --- a/pkg/utils/math.go +++ b/pkg/utils/math.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/utils/operations.go b/pkg/utils/operations.go index 78ae73afb8..13083e7739 100644 --- a/pkg/utils/operations.go +++ b/pkg/utils/operations.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/utils/operations_test.go b/pkg/utils/operations_test.go index f619946664..fd26743532 100644 --- a/pkg/utils/operations_test.go +++ b/pkg/utils/operations_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/utils/ownership.go b/pkg/utils/ownership.go index 277a875949..dee78cd514 100644 --- a/pkg/utils/ownership.go +++ b/pkg/utils/ownership.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/utils/parser.go b/pkg/utils/parser.go index 6cca7e804e..a889c2cbc9 100644 --- a/pkg/utils/parser.go +++ b/pkg/utils/parser.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/utils/parser_test.go b/pkg/utils/parser_test.go index 8c2d5b1fe7..9e34831005 100644 --- a/pkg/utils/parser_test.go +++ b/pkg/utils/parser_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/utils/pod_conditions.go b/pkg/utils/pod_conditions.go index f206761361..dde8a688ac 100644 --- a/pkg/utils/pod_conditions.go +++ b/pkg/utils/pod_conditions.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/utils/pod_conditions_test.go b/pkg/utils/pod_conditions_test.go index 987b6e77cb..185dfc9179 100644 --- a/pkg/utils/pod_conditions_test.go +++ b/pkg/utils/pod_conditions_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/utils/reconciliation.go b/pkg/utils/reconciliation.go index 2d0798a287..13aca8c5e5 100644 --- a/pkg/utils/reconciliation.go +++ b/pkg/utils/reconciliation.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/utils/suite_test.go b/pkg/utils/suite_test.go index 768193fc2f..e6852d10fc 100644 --- a/pkg/utils/suite_test.go +++ b/pkg/utils/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/pkg/versions/versions.go b/pkg/versions/versions.go index f6c527b9ad..3c3b08c26d 100644 --- a/pkg/versions/versions.go +++ b/pkg/versions/versions.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package versions contains the version of the CloudNativePG operator and the software diff --git a/releases/operator-manifests.go b/releases/operator-manifests.go index 5f7e1e90ac..eb2f7e25c6 100644 --- a/releases/operator-manifests.go +++ b/releases/operator-manifests.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package releases contains the filesystem with operator manifests with all the diff --git a/tests/e2e/affinity_test.go b/tests/e2e/affinity_test.go index 69d2ce92a8..e5e2de357a 100644 --- a/tests/e2e/affinity_test.go +++ b/tests/e2e/affinity_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/apparmor_test.go b/tests/e2e/apparmor_test.go index c38a37401c..dd2ab68201 100644 --- a/tests/e2e/apparmor_test.go +++ b/tests/e2e/apparmor_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/architecture_test.go b/tests/e2e/architecture_test.go index 4aeb3992b3..f159e8fe44 100644 --- a/tests/e2e/architecture_test.go +++ b/tests/e2e/architecture_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/asserts_test.go b/tests/e2e/asserts_test.go index 45847935f3..68bd5d55b4 100644 --- a/tests/e2e/asserts_test.go +++ b/tests/e2e/asserts_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/backup_restore_azure_test.go b/tests/e2e/backup_restore_azure_test.go index d83e2c6a2e..a8b538c653 100644 --- a/tests/e2e/backup_restore_azure_test.go +++ b/tests/e2e/backup_restore_azure_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/backup_restore_azurite_test.go b/tests/e2e/backup_restore_azurite_test.go index c0f8281ef7..c4d6f9ddf1 100644 --- a/tests/e2e/backup_restore_azurite_test.go +++ b/tests/e2e/backup_restore_azurite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/backup_restore_minio_test.go b/tests/e2e/backup_restore_minio_test.go index 9e065f3250..96664129ca 100644 --- a/tests/e2e/backup_restore_minio_test.go +++ b/tests/e2e/backup_restore_minio_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/certificates_test.go b/tests/e2e/certificates_test.go index 26a82acaf4..6c553623f3 100644 --- a/tests/e2e/certificates_test.go +++ b/tests/e2e/certificates_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/cluster_microservice_test.go b/tests/e2e/cluster_microservice_test.go index 7db2359986..198f3278ea 100644 --- a/tests/e2e/cluster_microservice_test.go +++ b/tests/e2e/cluster_microservice_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/cluster_monolithic_test.go b/tests/e2e/cluster_monolithic_test.go index 5d52f4fb94..2351278503 100644 --- a/tests/e2e/cluster_monolithic_test.go +++ b/tests/e2e/cluster_monolithic_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/cluster_setup_test.go b/tests/e2e/cluster_setup_test.go index 1e20854751..cf6672e3e4 100644 --- a/tests/e2e/cluster_setup_test.go +++ b/tests/e2e/cluster_setup_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/commons_test.go b/tests/e2e/commons_test.go index 50b12cb1f6..4315e16411 100644 --- a/tests/e2e/commons_test.go +++ b/tests/e2e/commons_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/config_support_test.go b/tests/e2e/config_support_test.go index 4f02ba0dca..92977539d7 100644 --- a/tests/e2e/config_support_test.go +++ b/tests/e2e/config_support_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/configuration_update_test.go b/tests/e2e/configuration_update_test.go index 4690105b1f..ddf4c1001f 100644 --- a/tests/e2e/configuration_update_test.go +++ b/tests/e2e/configuration_update_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/connection_test.go b/tests/e2e/connection_test.go index 27b770922b..8eb28dd75c 100644 --- a/tests/e2e/connection_test.go +++ b/tests/e2e/connection_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/declarative_database_management_test.go b/tests/e2e/declarative_database_management_test.go index 07f87d94b4..34c12d5e43 100644 --- a/tests/e2e/declarative_database_management_test.go +++ b/tests/e2e/declarative_database_management_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/declarative_hibernation_test.go b/tests/e2e/declarative_hibernation_test.go index c08333f120..b8eb7997ae 100644 --- a/tests/e2e/declarative_hibernation_test.go +++ b/tests/e2e/declarative_hibernation_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/disk_space_test.go b/tests/e2e/disk_space_test.go index 3915a66f1f..6668943610 100644 --- a/tests/e2e/disk_space_test.go +++ b/tests/e2e/disk_space_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/drain_node_test.go b/tests/e2e/drain_node_test.go index a5dd5d350b..cc79f49c0e 100644 --- a/tests/e2e/drain_node_test.go +++ b/tests/e2e/drain_node_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/eviction_test.go b/tests/e2e/eviction_test.go index 54b9a6f42d..59881d4055 100644 --- a/tests/e2e/eviction_test.go +++ b/tests/e2e/eviction_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/failover_test.go b/tests/e2e/failover_test.go index 23f922fca4..e88e0a291e 100644 --- a/tests/e2e/failover_test.go +++ b/tests/e2e/failover_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/fastfailover_test.go b/tests/e2e/fastfailover_test.go index 7b356c74b2..4de03448a0 100644 --- a/tests/e2e/fastfailover_test.go +++ b/tests/e2e/fastfailover_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/fastswitchover_test.go b/tests/e2e/fastswitchover_test.go index 9b96bfc6e3..e8a478e646 100644 --- a/tests/e2e/fastswitchover_test.go +++ b/tests/e2e/fastswitchover_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/fencing_test.go b/tests/e2e/fencing_test.go index 366b805dd1..407801f9c7 100644 --- a/tests/e2e/fencing_test.go +++ b/tests/e2e/fencing_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/initdb_test.go b/tests/e2e/initdb_test.go index ef9fc8e739..29a3635667 100644 --- a/tests/e2e/initdb_test.go +++ b/tests/e2e/initdb_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/logs_test.go b/tests/e2e/logs_test.go index a8da797b3e..905171b732 100644 --- a/tests/e2e/logs_test.go +++ b/tests/e2e/logs_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/managed_roles_test.go b/tests/e2e/managed_roles_test.go index 748b7a0217..5e3cb25f84 100644 --- a/tests/e2e/managed_roles_test.go +++ b/tests/e2e/managed_roles_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/managed_services_test.go b/tests/e2e/managed_services_test.go index 348d051472..7409707cb6 100644 --- a/tests/e2e/managed_services_test.go +++ b/tests/e2e/managed_services_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/metrics_test.go b/tests/e2e/metrics_test.go index 71890aa882..d44dc12a9b 100644 --- a/tests/e2e/metrics_test.go +++ b/tests/e2e/metrics_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/monitoring_test.go b/tests/e2e/monitoring_test.go index c1ef082a10..415e88694e 100644 --- a/tests/e2e/monitoring_test.go +++ b/tests/e2e/monitoring_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/nodeselector_test.go b/tests/e2e/nodeselector_test.go index e7b3d1a045..a422db46cc 100644 --- a/tests/e2e/nodeselector_test.go +++ b/tests/e2e/nodeselector_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/openshift_upgrade_test.go b/tests/e2e/openshift_upgrade_test.go index 0ba60354ab..9672475b24 100644 --- a/tests/e2e/openshift_upgrade_test.go +++ b/tests/e2e/openshift_upgrade_test.go @@ -1,18 +1,22 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ + package e2e import ( diff --git a/tests/e2e/operator_deployment_test.go b/tests/e2e/operator_deployment_test.go index d451723b8c..95263e6bc4 100644 --- a/tests/e2e/operator_deployment_test.go +++ b/tests/e2e/operator_deployment_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/operator_ha_test.go b/tests/e2e/operator_ha_test.go index 94a65fe81c..6eb1a78509 100644 --- a/tests/e2e/operator_ha_test.go +++ b/tests/e2e/operator_ha_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/operator_unavailable_test.go b/tests/e2e/operator_unavailable_test.go index 96125d7b23..2ee70fa746 100644 --- a/tests/e2e/operator_unavailable_test.go +++ b/tests/e2e/operator_unavailable_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/pg_basebackup_test.go b/tests/e2e/pg_basebackup_test.go index ea0b11806f..7a589859a6 100644 --- a/tests/e2e/pg_basebackup_test.go +++ b/tests/e2e/pg_basebackup_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/pg_data_corruption_test.go b/tests/e2e/pg_data_corruption_test.go index 44acfafed8..e70ba116b7 100644 --- a/tests/e2e/pg_data_corruption_test.go +++ b/tests/e2e/pg_data_corruption_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/pg_wal_volume_test.go b/tests/e2e/pg_wal_volume_test.go index 75c4d52a59..164ee134b8 100644 --- a/tests/e2e/pg_wal_volume_test.go +++ b/tests/e2e/pg_wal_volume_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/pgbouncer_metrics_test.go b/tests/e2e/pgbouncer_metrics_test.go index 6fbf4d4fab..bb400afced 100644 --- a/tests/e2e/pgbouncer_metrics_test.go +++ b/tests/e2e/pgbouncer_metrics_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/pgbouncer_test.go b/tests/e2e/pgbouncer_test.go index c8c21fbd69..6631e15124 100644 --- a/tests/e2e/pgbouncer_test.go +++ b/tests/e2e/pgbouncer_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/pgbouncer_types_test.go b/tests/e2e/pgbouncer_types_test.go index 5be71b0e8b..59bf929f79 100644 --- a/tests/e2e/pgbouncer_types_test.go +++ b/tests/e2e/pgbouncer_types_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/pod_patch_test.go b/tests/e2e/pod_patch_test.go index 3ad5a84eb9..f34a50bb38 100644 --- a/tests/e2e/pod_patch_test.go +++ b/tests/e2e/pod_patch_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/probes_test.go b/tests/e2e/probes_test.go index 8bf817c6cc..b43ed7b950 100644 --- a/tests/e2e/probes_test.go +++ b/tests/e2e/probes_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/publication_subscription_test.go b/tests/e2e/publication_subscription_test.go index 197b4b28ec..a3ea4166af 100644 --- a/tests/e2e/publication_subscription_test.go +++ b/tests/e2e/publication_subscription_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/pvc_deletion_test.go b/tests/e2e/pvc_deletion_test.go index 05c5747a26..8c6ac17099 100644 --- a/tests/e2e/pvc_deletion_test.go +++ b/tests/e2e/pvc_deletion_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/replica_mode_cluster_test.go b/tests/e2e/replica_mode_cluster_test.go index 6c4b942bf9..f2c2f06c3d 100644 --- a/tests/e2e/replica_mode_cluster_test.go +++ b/tests/e2e/replica_mode_cluster_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/replication_slot_test.go b/tests/e2e/replication_slot_test.go index 73c4ba87d3..e858ab5378 100644 --- a/tests/e2e/replication_slot_test.go +++ b/tests/e2e/replication_slot_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/rolling_update_test.go b/tests/e2e/rolling_update_test.go index aa4e6c9528..174591400f 100644 --- a/tests/e2e/rolling_update_test.go +++ b/tests/e2e/rolling_update_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/scaling_test.go b/tests/e2e/scaling_test.go index d47f86ed30..1dcab4cf7d 100644 --- a/tests/e2e/scaling_test.go +++ b/tests/e2e/scaling_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/storage_expansion_test.go b/tests/e2e/storage_expansion_test.go index 283a4383fd..8eb3d51a71 100644 --- a/tests/e2e/storage_expansion_test.go +++ b/tests/e2e/storage_expansion_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/suite_test.go b/tests/e2e/suite_test.go index 46dbcf1491..6dd39ef207 100644 --- a/tests/e2e/suite_test.go +++ b/tests/e2e/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/switchover_test.go b/tests/e2e/switchover_test.go index dc773c16ed..5b6955173a 100644 --- a/tests/e2e/switchover_test.go +++ b/tests/e2e/switchover_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/syncreplicas_test.go b/tests/e2e/syncreplicas_test.go index 221d2c42e3..912240c543 100644 --- a/tests/e2e/syncreplicas_test.go +++ b/tests/e2e/syncreplicas_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/tablespaces_test.go b/tests/e2e/tablespaces_test.go index 5a7929221b..0dfdf25237 100644 --- a/tests/e2e/tablespaces_test.go +++ b/tests/e2e/tablespaces_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/tolerations_test.go b/tests/e2e/tolerations_test.go index 16f81c6d52..18f06215ae 100644 --- a/tests/e2e/tolerations_test.go +++ b/tests/e2e/tolerations_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/update_user_test.go b/tests/e2e/update_user_test.go index aec3dc1b7c..2b746bf90c 100644 --- a/tests/e2e/update_user_test.go +++ b/tests/e2e/update_user_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/upgrade_test.go b/tests/e2e/upgrade_test.go index bebd63687a..7bd70acfa5 100644 --- a/tests/e2e/upgrade_test.go +++ b/tests/e2e/upgrade_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/volume_snapshot_test.go b/tests/e2e/volume_snapshot_test.go index dcc6ea808d..9c2488e1ba 100644 --- a/tests/e2e/volume_snapshot_test.go +++ b/tests/e2e/volume_snapshot_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/wal_restore_parallel_test.go b/tests/e2e/wal_restore_parallel_test.go index 5314ec6b7f..b1dfde9045 100644 --- a/tests/e2e/wal_restore_parallel_test.go +++ b/tests/e2e/wal_restore_parallel_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/e2e/webhook_test.go b/tests/e2e/webhook_test.go index e0d496f0b2..0931112fe3 100644 --- a/tests/e2e/webhook_test.go +++ b/tests/e2e/webhook_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package e2e diff --git a/tests/labels.go b/tests/labels.go index ee37925343..50f7698db6 100644 --- a/tests/labels.go +++ b/tests/labels.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package tests contains the test infrastructure of the CloudNativePG operator diff --git a/tests/levels.go b/tests/levels.go index 9209f49fd2..724ff114d3 100644 --- a/tests/levels.go +++ b/tests/levels.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package tests diff --git a/tests/utils/backups/azurite.go b/tests/utils/backups/azurite.go index 47bf7e22ce..f846901006 100644 --- a/tests/utils/backups/azurite.go +++ b/tests/utils/backups/azurite.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package backups diff --git a/tests/utils/backups/backup.go b/tests/utils/backups/backup.go index 67c04f10ec..70c3904aae 100644 --- a/tests/utils/backups/backup.go +++ b/tests/utils/backups/backup.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package backups diff --git a/tests/utils/backups/doc.go b/tests/utils/backups/doc.go index 3064ea0449..2e04155498 100644 --- a/tests/utils/backups/doc.go +++ b/tests/utils/backups/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package backups provides backup utilities diff --git a/tests/utils/cloudvendors/cloud_vendor.go b/tests/utils/cloudvendors/cloud_vendor.go index 2fcc54ea1a..5619f228a8 100644 --- a/tests/utils/cloudvendors/cloud_vendor.go +++ b/tests/utils/cloudvendors/cloud_vendor.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package cloudvendors provides the variables to define on which cloud vendor the e2e test is running diff --git a/tests/utils/clusterutils/cluster.go b/tests/utils/clusterutils/cluster.go index b237a0a9c3..01ad4c421a 100644 --- a/tests/utils/clusterutils/cluster.go +++ b/tests/utils/clusterutils/cluster.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package clusterutils provides functions to handle cluster actions diff --git a/tests/utils/deployments/deployment.go b/tests/utils/deployments/deployment.go index 0f9c409136..89031c8ae3 100644 --- a/tests/utils/deployments/deployment.go +++ b/tests/utils/deployments/deployment.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package deployments contains functions to control deployments diff --git a/tests/utils/doc.go b/tests/utils/doc.go index 72e13d50e2..1ce7dbcf8a 100644 --- a/tests/utils/doc.go +++ b/tests/utils/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package utils contains helper functions/methods for e2e diff --git a/tests/utils/endpoints.go b/tests/utils/endpoints.go index 7699def778..2773ecfbfd 100644 --- a/tests/utils/endpoints.go +++ b/tests/utils/endpoints.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/tests/utils/environment/doc.go b/tests/utils/environment/doc.go index 5c0dbc857f..c0e5f380b2 100644 --- a/tests/utils/environment/doc.go +++ b/tests/utils/environment/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package environment contains functions to handle the TestingEnvironment struct diff --git a/tests/utils/environment/environment.go b/tests/utils/environment/environment.go index cbd9c4c971..f66a4944c7 100644 --- a/tests/utils/environment/environment.go +++ b/tests/utils/environment/environment.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package environment diff --git a/tests/utils/environment/environment_test.go b/tests/utils/environment/environment_test.go index 914e8386a9..ab4a341dc2 100644 --- a/tests/utils/environment/environment_test.go +++ b/tests/utils/environment/environment_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package environment diff --git a/tests/utils/environment/suite_test.go b/tests/utils/environment/suite_test.go index 61c876f728..aea6bf454d 100644 --- a/tests/utils/environment/suite_test.go +++ b/tests/utils/environment/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package environment diff --git a/tests/utils/envsubst/doc.go b/tests/utils/envsubst/doc.go index a7d2676b31..c1bc024bdc 100644 --- a/tests/utils/envsubst/doc.go +++ b/tests/utils/envsubst/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package envsubst manage the replacemes of env variables in a file diff --git a/tests/utils/envsubst/envsubst.go b/tests/utils/envsubst/envsubst.go index c4290b38b5..a9233ac824 100644 --- a/tests/utils/envsubst/envsubst.go +++ b/tests/utils/envsubst/envsubst.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package envsubst diff --git a/tests/utils/envsubst/envsubst_test.go b/tests/utils/envsubst/envsubst_test.go index da5df755e0..6b2cdc4a5b 100644 --- a/tests/utils/envsubst/envsubst_test.go +++ b/tests/utils/envsubst/envsubst_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package envsubst diff --git a/tests/utils/envsubst/suite_test.go b/tests/utils/envsubst/suite_test.go index 4ac5c2b4c5..9a0bcee5b0 100644 --- a/tests/utils/envsubst/suite_test.go +++ b/tests/utils/envsubst/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package envsubst diff --git a/tests/utils/exec/exec.go b/tests/utils/exec/exec.go index 7196a273da..789814738d 100644 --- a/tests/utils/exec/exec.go +++ b/tests/utils/exec/exec.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package exec provides functions to execute commands inside pods or from local diff --git a/tests/utils/fencing/fencing.go b/tests/utils/fencing/fencing.go index f48ffaf420..edbdf34286 100644 --- a/tests/utils/fencing/fencing.go +++ b/tests/utils/fencing/fencing.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package fencing provides functions to manage the fencing on cnpg clusters diff --git a/tests/utils/forwardconnection/doc.go b/tests/utils/forwardconnection/doc.go index 0e2f7af2bf..5292443626 100644 --- a/tests/utils/forwardconnection/doc.go +++ b/tests/utils/forwardconnection/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package forwardconnection provides an easy interface to create diff --git a/tests/utils/forwardconnection/forwardconnection.go b/tests/utils/forwardconnection/forwardconnection.go index 9b70f7c52b..8ac8441941 100644 --- a/tests/utils/forwardconnection/forwardconnection.go +++ b/tests/utils/forwardconnection/forwardconnection.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package forwardconnection diff --git a/tests/utils/importdb/import_db.go b/tests/utils/importdb/import_db.go index 1316a76fab..87454d84e1 100644 --- a/tests/utils/importdb/import_db.go +++ b/tests/utils/importdb/import_db.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package importdb contains the functions to import a database diff --git a/tests/utils/logs/doc.go b/tests/utils/logs/doc.go index 4af5e3c745..9e3a7506ee 100644 --- a/tests/utils/logs/doc.go +++ b/tests/utils/logs/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package logs provides a way to parse and get the logs of a pod diff --git a/tests/utils/logs/logs.go b/tests/utils/logs/logs.go index f344713426..f37142ccf9 100644 --- a/tests/utils/logs/logs.go +++ b/tests/utils/logs/logs.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logs diff --git a/tests/utils/logs/logs_test.go b/tests/utils/logs/logs_test.go index 9f951bc06e..460b3b082d 100644 --- a/tests/utils/logs/logs_test.go +++ b/tests/utils/logs/logs_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logs diff --git a/tests/utils/logs/suite_test.go b/tests/utils/logs/suite_test.go index 329766c9e7..e7365f2638 100644 --- a/tests/utils/logs/suite_test.go +++ b/tests/utils/logs/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package logs diff --git a/tests/utils/minio/minio.go b/tests/utils/minio/minio.go index 27befd0a26..2e1189f0ef 100644 --- a/tests/utils/minio/minio.go +++ b/tests/utils/minio/minio.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package minio contains all the require functions to setup a MinIO deployment and diff --git a/tests/utils/namespaces/namespace.go b/tests/utils/namespaces/namespace.go index 70717fe816..8f4a1a4114 100644 --- a/tests/utils/namespaces/namespace.go +++ b/tests/utils/namespaces/namespace.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package namespaces provides utilities to manage namespaces diff --git a/tests/utils/nodes/nodes.go b/tests/utils/nodes/nodes.go index 95d4bd3122..faabde4c13 100644 --- a/tests/utils/nodes/nodes.go +++ b/tests/utils/nodes/nodes.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package nodes contains the helper methods/functions for nodes diff --git a/tests/utils/objects/objects.go b/tests/utils/objects/objects.go index af956106be..3f08903fd5 100644 --- a/tests/utils/objects/objects.go +++ b/tests/utils/objects/objects.go @@ -1,17 +1,20 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package objects provides functions to manage pure objects in Kubernetes diff --git a/tests/utils/openshift/openshift.go b/tests/utils/openshift/openshift.go index 2901962e83..be3d35a150 100644 --- a/tests/utils/openshift/openshift.go +++ b/tests/utils/openshift/openshift.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package openshift provides functions to work with OLM CRDs diff --git a/tests/utils/operator/doc.go b/tests/utils/operator/doc.go index a4e7050ee6..7680695d8c 100644 --- a/tests/utils/operator/doc.go +++ b/tests/utils/operator/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package operator provides functions to handle and manage the operator diff --git a/tests/utils/operator/operator.go b/tests/utils/operator/operator.go index ca705e9e41..f5f333eb26 100644 --- a/tests/utils/operator/operator.go +++ b/tests/utils/operator/operator.go @@ -1,17 +1,20 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package operator diff --git a/tests/utils/operator/release.go b/tests/utils/operator/release.go index af372f0ffb..10c9a4cb86 100644 --- a/tests/utils/operator/release.go +++ b/tests/utils/operator/release.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package operator diff --git a/tests/utils/operator/release_test.go b/tests/utils/operator/release_test.go index 611141572e..090a91c2eb 100644 --- a/tests/utils/operator/release_test.go +++ b/tests/utils/operator/release_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package operator diff --git a/tests/utils/operator/suite_test.go b/tests/utils/operator/suite_test.go index b49f44d833..b2d40da5d8 100644 --- a/tests/utils/operator/suite_test.go +++ b/tests/utils/operator/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package operator diff --git a/tests/utils/operator/upgrade.go b/tests/utils/operator/upgrade.go index 1959c2d557..798a7dc315 100644 --- a/tests/utils/operator/upgrade.go +++ b/tests/utils/operator/upgrade.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package operator provide functions to handle operator install/uninstall process diff --git a/tests/utils/operator/webhooks.go b/tests/utils/operator/webhooks.go index b4d94462a5..351606c58c 100644 --- a/tests/utils/operator/webhooks.go +++ b/tests/utils/operator/webhooks.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package operator diff --git a/tests/utils/pods/pod.go b/tests/utils/pods/pod.go index 1db187e89d..1021655e54 100644 --- a/tests/utils/pods/pod.go +++ b/tests/utils/pods/pod.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package pods provides pod utilities to manage pods inside K8s diff --git a/tests/utils/postgres/doc.go b/tests/utils/postgres/doc.go index f394238a09..622dd658f3 100644 --- a/tests/utils/postgres/doc.go +++ b/tests/utils/postgres/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package postgres provides functions to handle postgres in cnpg clusters diff --git a/tests/utils/postgres/postgres.go b/tests/utils/postgres/postgres.go index e06de1e5b6..54f0327bf6 100644 --- a/tests/utils/postgres/postgres.go +++ b/tests/utils/postgres/postgres.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/tests/utils/postgres/postgres_test.go b/tests/utils/postgres/postgres_test.go index bc449cd4e5..db6f72b272 100644 --- a/tests/utils/postgres/postgres_test.go +++ b/tests/utils/postgres/postgres_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/tests/utils/postgres/psql_connection.go b/tests/utils/postgres/psql_connection.go index 051a9e234e..ecd7980899 100644 --- a/tests/utils/postgres/psql_connection.go +++ b/tests/utils/postgres/psql_connection.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/tests/utils/postgres/suite_test.go b/tests/utils/postgres/suite_test.go index 70d4a52fcb..703df68d4c 100644 --- a/tests/utils/postgres/suite_test.go +++ b/tests/utils/postgres/suite_test.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package postgres diff --git a/tests/utils/proxy/proxy.go b/tests/utils/proxy/proxy.go index f4e0aded9f..0a303d2e61 100644 --- a/tests/utils/proxy/proxy.go +++ b/tests/utils/proxy/proxy.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package proxy provides functions to use the proxy subresource to call a pod diff --git a/tests/utils/replicationslot/replication_slots.go b/tests/utils/replicationslot/replication_slots.go index f1913ffdb1..ad78f4a04c 100644 --- a/tests/utils/replicationslot/replication_slots.go +++ b/tests/utils/replicationslot/replication_slots.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package replicationslot provides functions to manage the replication slot of a diff --git a/tests/utils/run/run.go b/tests/utils/run/run.go index d1baa7d80e..dd57c64498 100644 --- a/tests/utils/run/run.go +++ b/tests/utils/run/run.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package run contains functions to execute commands locally diff --git a/tests/utils/secrets/secrets.go b/tests/utils/secrets/secrets.go index 856e0d1ff2..7cda99e028 100644 --- a/tests/utils/secrets/secrets.go +++ b/tests/utils/secrets/secrets.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package secrets provides functions to manage and handle secrets diff --git a/tests/utils/services/service.go b/tests/utils/services/service.go index 32acea4382..b7bfda0f2c 100644 --- a/tests/utils/services/service.go +++ b/tests/utils/services/service.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package services provides functions tomanage services inside K8s diff --git a/tests/utils/sternmultitailer/doc.go b/tests/utils/sternmultitailer/doc.go index fa5b323e49..854aac8bb9 100644 --- a/tests/utils/sternmultitailer/doc.go +++ b/tests/utils/sternmultitailer/doc.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package sternmultitailer handle the logs of every pod in the tests diff --git a/tests/utils/sternmultitailer/multitailer.go b/tests/utils/sternmultitailer/multitailer.go index 9218ae1949..b6e06baa0f 100644 --- a/tests/utils/sternmultitailer/multitailer.go +++ b/tests/utils/sternmultitailer/multitailer.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package sternmultitailer diff --git a/tests/utils/storage/storage.go b/tests/utils/storage/storage.go index 3bc2eab59d..afb47c8e28 100644 --- a/tests/utils/storage/storage.go +++ b/tests/utils/storage/storage.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package storage provides functions to manage enything related to storage diff --git a/tests/utils/timeouts/timeouts.go b/tests/utils/timeouts/timeouts.go index 860a5e9df0..6eb1c2dc36 100644 --- a/tests/utils/timeouts/timeouts.go +++ b/tests/utils/timeouts/timeouts.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package timeouts contains the timeouts for the E2E test suite diff --git a/tests/utils/utils.go b/tests/utils/utils.go index 58a81c8894..18ac87ada7 100644 --- a/tests/utils/utils.go +++ b/tests/utils/utils.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ package utils diff --git a/tests/utils/yaml/yaml.go b/tests/utils/yaml/yaml.go index c04f844978..f5601a6322 100644 --- a/tests/utils/yaml/yaml.go +++ b/tests/utils/yaml/yaml.go @@ -1,5 +1,6 @@ /* -Copyright The CloudNativePG Contributors +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,6 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +SPDX-License-Identifier: Apache-2.0 */ // Package yaml provides functions to handle yaml files From ebc084fc03b7ff627129d319f39c170a115f892c Mon Sep 17 00:00:00 2001 From: Marcus Dunn <51931484+MarcusDunn@users.noreply.github.com> Date: Wed, 26 Mar 2025 10:46:20 -0700 Subject: [PATCH 480/836] feat: allow customization of taints for node drain detection (#6928) Introduce the `DRAIN_TAINTS` operator configuration option, enabling users to specify which taint keys should be recognized as indicators of node drain. By default, this includes commonly used taints from `kubectl`, Cluster Autoscaler, and Karpenter: - `node.kubernetes.io/unschedulable` - `ToBeDeletedByClusterAutoscaler` - `karpenter.sh/disrupted` - `karpenter.sh/disruption` This enhancement provides greater flexibility in handling node drain scenarios, allowing adaptation to different cluster configurations and scheduling strategies. Fixes: #5299 Signed-off-by: Marcus Dunn Signed-off-by: Leonardo Cecchi Signed-off-by: Armando Ruocco Signed-off-by: Marco Nenciarini Co-authored-by: Leonardo Cecchi Co-authored-by: Armando Ruocco Co-authored-by: Marco Nenciarini --- docs/src/operator_conf.md | 1 + internal/cmd/manager/controller/controller.go | 1 + internal/configuration/configuration.go | 25 +++ internal/controller/cluster_controller.go | 9 +- .../controller/cluster_controller_test.go | 41 +++++ internal/controller/cluster_predicates.go | 38 ++++- .../controller/cluster_predicates_test.go | 149 ++++++++++++++++++ internal/controller/replicas.go | 27 +++- internal/webhook/v1/cluster_webhook.go | 2 +- tests/e2e/drain_node_test.go | 74 +++++++++ ...cluster-drain-node-karpenter.yaml.template | 33 ++++ 11 files changed, 389 insertions(+), 11 deletions(-) create mode 100644 internal/controller/cluster_predicates_test.go create mode 100644 tests/e2e/fixtures/drain-node/cluster-drain-node-karpenter.yaml.template diff --git a/docs/src/operator_conf.md b/docs/src/operator_conf.md index 7686a87a8a..b7df8ff003 100644 --- a/docs/src/operator_conf.md +++ b/docs/src/operator_conf.md @@ -53,6 +53,7 @@ Name | Description `POSTGRES_IMAGE_NAME` | The name of the PostgreSQL image used by default for new clusters. Defaults to the version specified in the operator. `PULL_SECRET_NAME` | Name of an additional pull secret to be defined in the operator's namespace and to be used to download images `STANDBY_TCP_USER_TIMEOUT` | Defines the [`TCP_USER_TIMEOUT` socket option](https://www.postgresql.org/docs/current/runtime-config-connection.html#GUC-TCP-USER-TIMEOUT) for replication connections from standby instances to the primary. Default is 0 (system's default). +`DRAIN_TAINTS` | Specifies the taint keys that should be interpreted as indicators of node drain. By default, it includes the taints commonly applied by [kubectl](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/), [Cluster Autoscaler](https://github.com/kubernetes/autoscaler), and [Karpenter](https://github.com/aws/karpenter-provider-aws): `node.kubernetes.io/unschedulable`, `ToBeDeletedByClusterAutoscaler`, `karpenter.sh/disrupted`, `karpenter.sh/disruption`. Values in `INHERITED_ANNOTATIONS` and `INHERITED_LABELS` support path-like wildcards. For example, the value `example.com/*` will match both the value `example.com/one` and `example.com/two`. diff --git a/internal/cmd/manager/controller/controller.go b/internal/cmd/manager/controller/controller.go index 2c337d8a1d..4da98cffdd 100644 --- a/internal/cmd/manager/controller/controller.go +++ b/internal/cmd/manager/controller/controller.go @@ -226,6 +226,7 @@ func RunController( mgr, discoveryClient, pluginRepository, + conf.DrainTaints, ).SetupWithManager(ctx, mgr, maxConcurrentReconciles); err != nil { setupLog.Error(err, "unable to create controller", "controller", "Cluster") return err diff --git a/internal/configuration/configuration.go b/internal/configuration/configuration.go index b9ffb53847..28192b57df 100644 --- a/internal/configuration/configuration.go +++ b/internal/configuration/configuration.go @@ -49,6 +49,27 @@ const ( DefaultKubernetesClusterDomain = "cluster.local" ) +// DefaultDrainTaints is the default list of taints the operator will watch and treat +// as Unschedule +var DefaultDrainTaints = []string{ + // Kubernetes well-known unschedulable taint + // See: https://kubernetes.io/docs/reference/labels-annotations-taints/#node-kubernetes-io-unschedulable + "node.kubernetes.io/unschedulable", + + // Used by the Kubernetes Cluster Autoscaler + // nolint: lll + // See: https://github.com/kubernetes/autoscaler/blob/aa1d413ea3bf319b56c7b2e65ade1a028e149439/cluster-autoscaler/cloudprovider/oci/nodepools/consts/annotations.go#L27 + "ToBeDeletedByClusterAutoscaler", + + // Used by Karpenter termination controller + // See: https://karpenter.sh/docs/concepts/disruption/#termination-controller + "karpenter.sh/disrupted", + + // Used by Karpenter disruption controller + // See: https://karpenter.sh/v0.32/concepts/disruption/#disruption-controller + "karpenter.sh/disruption", +} + // DefaultPluginSocketDir is the default directory where the plugin sockets are located. const DefaultPluginSocketDir = "/plugins" @@ -144,6 +165,9 @@ type Data struct { // KubernetesClusterDomain defines the domain suffix for service FQDNs // within the Kubernetes cluster. If left unset, it defaults to `cluster.local`. KubernetesClusterDomain string `json:"kubernetesClusterDomain" env:"KUBERNETES_CLUSTER_DOMAIN"` + + // DrainTaints is a list of taints the operator will watch and treat as Unschedule + DrainTaints []string `json:"drainTaints" env:"DRAIN_TAINTS"` } // Current is the configuration used by the operator @@ -161,6 +185,7 @@ func newDefaultConfig() *Data { ExpiringCheckThreshold: ExpiringCheckThreshold, StandbyTCPUserTimeout: 0, KubernetesClusterDomain: DefaultKubernetesClusterDomain, + DrainTaints: DefaultDrainTaints, } } diff --git a/internal/controller/cluster_controller.go b/internal/controller/cluster_controller.go index 94683926fb..c5613d33ad 100644 --- a/internal/controller/cluster_controller.go +++ b/internal/controller/cluster_controller.go @@ -89,6 +89,7 @@ type ClusterReconciler struct { InstanceClient remote.InstanceClient Plugins repository.Interface + drainTaints []string rolloutManager *rolloutManager.Manager } @@ -97,6 +98,7 @@ func NewClusterReconciler( mgr manager.Manager, discoveryClient *discovery.DiscoveryClient, plugins repository.Interface, + drainTaints []string, ) *ClusterReconciler { return &ClusterReconciler{ InstanceClient: remote.NewClient().Instance(), @@ -109,6 +111,7 @@ func NewClusterReconciler( configuration.Current.GetClustersRolloutDelay(), configuration.Current.GetInstancesRolloutDelay(), ), + drainTaints: drainTaints, } } @@ -1081,7 +1084,7 @@ func (r *ClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manag Watches( &corev1.Node{}, handler.EnqueueRequestsFromMapFunc(r.mapNodeToClusters()), - builder.WithPredicates(nodesPredicate), + builder.WithPredicates(r.nodesPredicate()), ). Watches( &apiv1.ImageCatalog{}, @@ -1369,11 +1372,13 @@ func filterClustersUsingConfigMap( func (r *ClusterReconciler) mapNodeToClusters() handler.MapFunc { return func(ctx context.Context, obj client.Object) []reconcile.Request { node := obj.(*corev1.Node) + // exit if the node is schedulable (e.g. not cordoned) // could be expanded here with other conditions (e.g. pressure or issues) - if !node.Spec.Unschedulable { + if !isNodeUnschedulableOrBeingDrained(node, r.drainTaints) { return nil } + var childPods corev1.PodList // get all the pods handled by the operator on that node err := r.List(ctx, &childPods, diff --git a/internal/controller/cluster_controller_test.go b/internal/controller/cluster_controller_test.go index 4f51710266..ec4c0214c1 100644 --- a/internal/controller/cluster_controller_test.go +++ b/internal/controller/cluster_controller_test.go @@ -29,6 +29,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/persistentvolumeclaim" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" @@ -273,3 +274,43 @@ var _ = Describe("Updating target primary", func() { }) }) }) + +var _ = Describe("isNodeUnschedulableOrBeingDrained", func() { + node := &corev1.Node{} + nodeUnschedulable := &corev1.Node{ + Spec: corev1.NodeSpec{ + Unschedulable: true, + }, + } + nodeTainted := &corev1.Node{ + Spec: corev1.NodeSpec{ + Taints: []corev1.Taint{ + { + Key: "karpenter.sh/disrupted", + Effect: corev1.TaintEffectNoSchedule, + }, + }, + }, + } + nodeWithUnknownTaint := &corev1.Node{ + Spec: corev1.NodeSpec{ + Taints: []corev1.Taint{ + { + Key: "unknown.io/taint", + Effect: corev1.TaintEffectPreferNoSchedule, + }, + }, + }, + } + + DescribeTable( + "it detects nodes that are unschedulable or being drained", + func(node *corev1.Node, expected bool) { + Expect(isNodeUnschedulableOrBeingDrained(node, configuration.DefaultDrainTaints)).To(Equal(expected)) + }, + Entry("plain node", node, false), + Entry("node is unschedulable", nodeUnschedulable, true), + Entry("node is tainted", nodeTainted, true), + Entry("node has an unknown taint", nodeWithUnknownTaint, false), + ) +}) diff --git a/internal/controller/cluster_predicates.go b/internal/controller/cluster_predicates.go index 42490a2a06..8647337f43 100644 --- a/internal/controller/cluster_predicates.go +++ b/internal/controller/cluster_predicates.go @@ -20,6 +20,8 @@ SPDX-License-Identifier: Apache-2.0 package controller import ( + "slices" + corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" @@ -72,12 +74,42 @@ var ( return isUsefulClusterSecret(e.ObjectNew) }, } +) - nodesPredicate = predicate.Funcs{ +func (r *ClusterReconciler) nodesPredicate() predicate.Funcs { + return predicate.Funcs{ UpdateFunc: func(e event.UpdateEvent) bool { oldNode, oldOk := e.ObjectOld.(*corev1.Node) newNode, newOk := e.ObjectNew.(*corev1.Node) - return oldOk && newOk && oldNode.Spec.Unschedulable != newNode.Spec.Unschedulable + if !oldOk || !newOk { + return false + } + + if oldNode.Spec.Unschedulable != newNode.Spec.Unschedulable { + return true + } + + // check if any of the watched drain taints have changed. + for _, taint := range r.drainTaints { + oldTaintIndex := slices.IndexFunc(oldNode.Spec.Taints, func(t corev1.Taint) bool { return t.Key == taint }) + newTaintIndex := slices.IndexFunc(newNode.Spec.Taints, func(t corev1.Taint) bool { return t.Key == taint }) + + switch { + case oldTaintIndex == -1 && newTaintIndex == -1: + continue + case oldTaintIndex == -1 || newTaintIndex == -1: + return true + } + + // exists in both - check if value or effect is different + oldTaint := oldNode.Spec.Taints[oldTaintIndex] + newTaint := newNode.Spec.Taints[newTaintIndex] + if oldTaint.Value != newTaint.Value || oldTaint.Effect != newTaint.Effect { + return true + } + } + + return false }, CreateFunc: func(_ event.CreateEvent) bool { return false @@ -89,7 +121,7 @@ var ( return false }, } -) +} func isOwnedByClusterOrSatisfiesPredicate( object client.Object, diff --git a/internal/controller/cluster_predicates_test.go b/internal/controller/cluster_predicates_test.go new file mode 100644 index 0000000000..6eb4553696 --- /dev/null +++ b/internal/controller/cluster_predicates_test.go @@ -0,0 +1,149 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package controller + +import ( + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + + "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("nodesPredicate", func() { + fakeReconciler := &ClusterReconciler{ + drainTaints: configuration.DefaultDrainTaints, + } + nodesPredicateFunctions := fakeReconciler.nodesPredicate() + + pod := &corev1.Pod{} + nodeWithNoTaints := &corev1.Node{} + unschedulableNode := &corev1.Node{ + Spec: corev1.NodeSpec{ + Unschedulable: true, + }, + } + nodeWithKarpenterNoSchedulableTaint := &corev1.Node{ + Spec: corev1.NodeSpec{ + Taints: []corev1.Taint{ + { + Key: "karpenter.sh/disrupted", + Effect: corev1.TaintEffectNoSchedule, + }, + }, + }, + } + nodeWithKarpenterNoExecuteTaint := &corev1.Node{ + Spec: corev1.NodeSpec{ + Taints: []corev1.Taint{ + { + Key: "karpenter.sh/disrupted", + Effect: corev1.TaintEffectNoExecute, + }, + }, + }, + } + nodeWithAutoscalerTaint := &corev1.Node{ + Spec: corev1.NodeSpec{ + Taints: []corev1.Taint{ + { + Key: "ToBeDeletedByClusterAutoscaler", + }, + }, + }, + } + + DescribeTable( + "always skips node creation", + func(node client.Object, expectedResult bool) { + createEvent := event.CreateEvent{ + Object: node, + } + + result := nodesPredicateFunctions.Create(createEvent) + Expect(result).To(Equal(expectedResult)) + }, + Entry("with a node", nodeWithNoTaints, false), + Entry("with a pod", pod, false), + ) + + DescribeTable( + "always skips node delete", + func(node client.Object, expectedResult bool) { + deleteEvent := event.DeleteEvent{ + Object: node, + } + + result := nodesPredicateFunctions.Delete(deleteEvent) + Expect(result).To(Equal(expectedResult)) + }, + Entry("with a node", nodeWithNoTaints, false), + Entry("with a pod", pod, false), + ) + + DescribeTable( + "always skips generic events", + func(node client.Object, expectedResult bool) { + genericEvent := event.GenericEvent{ + Object: node, + } + + result := nodesPredicateFunctions.Generic(genericEvent) + Expect(result).To(Equal(expectedResult)) + }, + Entry("with a node", nodeWithNoTaints, false), + Entry("with a pod", pod, false), + ) + + DescribeTable( + "node updates", + func(objectOld, objectNew client.Object, expectedResult bool) { + updateEventOldToNew := event.UpdateEvent{ + ObjectOld: objectOld, + ObjectNew: objectNew, + } + updateEventNewToOld := event.UpdateEvent{ + ObjectOld: objectOld, + ObjectNew: objectNew, + } + + result := nodesPredicateFunctions.Update(updateEventOldToNew) + Expect(result).To(Equal(expectedResult)) + + result = nodesPredicateFunctions.Update(updateEventNewToOld) + Expect(result).To(Equal(expectedResult)) + }, + Entry("with the same node", + nodeWithNoTaints, nodeWithNoTaints, false), + Entry("with the same tainted node", + nodeWithKarpenterNoSchedulableTaint, nodeWithKarpenterNoSchedulableTaint, false), + Entry("when a node becomes unschedulable", + nodeWithNoTaints, unschedulableNode, true), + Entry("when a node gets the karpenter disruption taint", + nodeWithNoTaints, nodeWithKarpenterNoSchedulableTaint, true), + Entry("when a node gets the karpenter disruption taint value changed", + nodeWithKarpenterNoSchedulableTaint, nodeWithKarpenterNoExecuteTaint, true), + Entry("when a node taints changed", + nodeWithKarpenterNoSchedulableTaint, nodeWithAutoscalerTaint, true), + ) +}) diff --git a/internal/controller/replicas.go b/internal/controller/replicas.go index a4f02db0d8..5ab1ab6a08 100644 --- a/internal/controller/replicas.go +++ b/internal/controller/replicas.go @@ -67,7 +67,7 @@ func (r *ClusterReconciler) reconcileTargetPrimaryFromPods( if primary := status.Items[0]; (primary.IsPrimary || (cluster.IsReplica() && primary.IsPodReady)) && primary.Pod.Name == cluster.Status.CurrentPrimary && cluster.Status.TargetPrimary == cluster.Status.CurrentPrimary { - isPrimaryOnUnschedulableNode, err := r.isNodeUnschedulable(ctx, primary.Node) + isPrimaryOnUnschedulableNode, err := r.isNodeUnschedulableOrBeingDrained(ctx, primary.Node) if err != nil { contextLogger.Error(err, "while checking if current primary is on an unschedulable node") // in case of error it's better to proceed with the normal target primary reconciliation @@ -169,14 +169,31 @@ func (r *ClusterReconciler) reconcileTargetPrimaryForNonReplicaCluster( return mostAdvancedInstance.Pod.Name, r.setPrimaryInstance(ctx, cluster, mostAdvancedInstance.Pod.Name) } -// isNodeUnschedulable checks whether a node is set to unschedulable -func (r *ClusterReconciler) isNodeUnschedulable(ctx context.Context, nodeName string) (bool, error) { +// isNodeUnschedulableOrBeingDrained checks if a node is currently being drained. +// nolint: lll +// Copied from https://github.com/kubernetes-sigs/aws-ebs-csi-driver/blob/7bacf2d36f397bd098b3388403e8759c480be7e5/cmd/hooks/prestop.go#L91 +func isNodeUnschedulableOrBeingDrained(node *corev1.Node, drainTaints []string) bool { + for _, taint := range node.Spec.Taints { + if slices.Contains(drainTaints, taint.Key) { + return true + } + } + + return node.Spec.Unschedulable +} + +// isNodeUnschedulableOrBeingDrained checks whether a node is set to unschedulable +func (r *ClusterReconciler) isNodeUnschedulableOrBeingDrained( + ctx context.Context, + nodeName string, +) (bool, error) { var node corev1.Node err := r.Get(ctx, client.ObjectKey{Name: nodeName}, &node) if err != nil { return false, err } - return node.Spec.Unschedulable, nil + + return isNodeUnschedulableOrBeingDrained(&node, r.drainTaints), nil } // Pick the next primary on a schedulable node, if the current is running on an unschedulable one, @@ -220,7 +237,7 @@ func (r *ClusterReconciler) setPrimaryOnSchedulableNode( // Start looking for the next primary among the pods for _, candidate := range podsOnOtherNodes.Items { // If candidate on an unschedulable node too, skip it - if unschedulable, _ := r.isNodeUnschedulable(ctx, candidate.Node); unschedulable { + if status, _ := r.isNodeUnschedulableOrBeingDrained(ctx, candidate.Node); status { continue } diff --git a/internal/webhook/v1/cluster_webhook.go b/internal/webhook/v1/cluster_webhook.go index 6ff165401e..2e7f79aad7 100644 --- a/internal/webhook/v1/cluster_webhook.go +++ b/internal/webhook/v1/cluster_webhook.go @@ -1969,7 +1969,7 @@ func (v *ClusterCustomValidator) validateTolerations(r *apiv1.Cluster) field.Err return allErrors } -// validateTaintEffect is used from validateTollerations and is a verbatim copy of the code +// validateTaintEffect is used from validateToleration and is a verbatim copy of the code // at https://github.com/kubernetes/kubernetes/blob/4d38d21/pkg/apis/core/validation/validation.go#L3087 func validateTaintEffect(effect *corev1.TaintEffect, allowEmpty bool, fldPath *field.Path) field.ErrorList { if !allowEmpty && len(*effect) == 0 { diff --git a/tests/e2e/drain_node_test.go b/tests/e2e/drain_node_test.go index cc79f49c0e..4cd2ff25a2 100644 --- a/tests/e2e/drain_node_test.go +++ b/tests/e2e/drain_node_test.go @@ -84,6 +84,80 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La nodesWithLabels = nil }) + Context("Default maintenance and pvc", func() { + const sampleFile = fixturesDir + "/drain-node/cluster-drain-node-karpenter.yaml.template" + const clusterName = "cluster-drain-node-karpenter" + + It("will remove the pod from a node tainted by karpenter", func() { + const namespacePrefix = "drain-node-e2e-karpeter-initiated" + + var namespace string + + By("creating the namespace and the cluster", func() { + var err error + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) + Expect(err).ToNot(HaveOccurred()) + AssertCreateCluster(namespace, clusterName, sampleFile, env) + }) + + By("waiting for the jobs to be removed", func() { + timeout := 180 + Eventually(func() (int, error) { + podList, err := pods.List(env.Ctx, env.Client, namespace) + if err != nil { + return 0, err + } + return len(podList.Items), err + }, timeout).Should(BeEquivalentTo(3)) + }) + + tableLocator := TableLocator{ + Namespace: namespace, + ClusterName: clusterName, + DatabaseName: postgres.AppDBName, + TableName: "test", + } + + By("loading test data", func() { + AssertCreateTestData(env, tableLocator) + }) + + oldPrimary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + + By("adding a taint from karpenter to the node containing the primary", func() { + cmd := fmt.Sprintf("kubectl taint nodes %v karpenter.sh/disruption:NoSchedule", oldPrimary.Spec.NodeName) + _, _, err := run.Run(cmd) + Expect(err).ToNot(HaveOccurred()) + }) + + By("verifying failover after drain", func() { + timeout := 180 + Eventually(func() (string, error) { + pod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + if err != nil { + return "", err + } + return pod.Name, err + }, timeout).ShouldNot(BeEquivalentTo(oldPrimary.Name)) + }) + + By("removing karpenter taint from node", func() { + cmd := fmt.Sprintf( + "kubectl taint nodes %v karpenter.sh/disruption=NoSchedule:NoSchedule-", + oldPrimary.Spec.NodeName, + ) + _, _, err := run.Run(cmd) + Expect(err).ToNot(HaveOccurred()) + }) + + By("data is present and standbys are streaming", func() { + AssertDataExpectedCount(env, tableLocator, 2) + AssertClusterStandbysAreStreaming(namespace, clusterName, 140) + }) + }) + }) + Context("Maintenance on, reuse pvc on", func() { // Initialize empty global namespace variable var namespace string diff --git a/tests/e2e/fixtures/drain-node/cluster-drain-node-karpenter.yaml.template b/tests/e2e/fixtures/drain-node/cluster-drain-node-karpenter.yaml.template new file mode 100644 index 0000000000..e268530882 --- /dev/null +++ b/tests/e2e/fixtures/drain-node/cluster-drain-node-karpenter.yaml.template @@ -0,0 +1,33 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: cluster-drain-node-karpenter +spec: + instances: 3 + + affinity: + nodeSelector: + drain: 'drain' + + postgresql: + parameters: + log_checkpoints: "on" + log_lock_waits: "on" + log_min_duration_statement: '1000' + log_statement: 'ddl' + log_temp_files: '1024' + log_autovacuum_min_duration: '1s' + log_replication_commands: 'on' + wal_receiver_timeout: '2s' + + bootstrap: + initdb: + database: app + owner: appuser + + storage: + size: 1Gi + storageClass: ${E2E_DEFAULT_STORAGE_CLASS} + walStorage: + storageClass: ${E2E_DEFAULT_STORAGE_CLASS} + size: 1Gi From 46597ecc77e216a9bf52ded75ef6621101fd2ec8 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Wed, 26 Mar 2025 19:12:14 +0100 Subject: [PATCH 481/836] docs: fix spellcheck error (#7234) The issue has been introduced by merging #6928 Signed-off-by: Marco Nenciarini --- .wordlist-en-custom.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index eb470824da..40b69b523c 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -210,6 +210,7 @@ Istio's JSON Jihyuk Jitendra +Karpenter KinD Krew KubeCon From 19e0a641908bdde605bea8c8ba32d992e3bb6e84 Mon Sep 17 00:00:00 2001 From: Francesco Canovai Date: Wed, 26 Mar 2025 19:29:15 +0100 Subject: [PATCH 482/836] test: update csi-driver-host-path to v1.16.1 (#7228) Change the image and the url of the yaml files. Closes #6886 Signed-off-by: Francesco Canovai --- hack/setup-cluster.sh | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh index af435fcbc4..93b6cde34d 100755 --- a/hack/setup-cluster.sh +++ b/hack/setup-cluster.sh @@ -28,7 +28,7 @@ fi # Defaults KIND_NODE_DEFAULT_VERSION=v1.32.3 -CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=v1.15.0 +CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=v1.16.1 EXTERNAL_SNAPSHOTTER_VERSION=v8.2.0 EXTERNAL_PROVISIONER_VERSION=v5.2.0 EXTERNAL_RESIZER_VERSION=v1.13.1 @@ -300,11 +300,18 @@ deploy_csi_host_path() { kubectl apply -f "${CSI_BASE_URL}"/external-resizer/"${EXTERNAL_RESIZER_VERSION}"/deploy/kubernetes/rbac.yaml ## Install driver and plugin - kubectl apply -f "${CSI_BASE_URL}"/csi-driver-host-path/"${CSI_DRIVER_HOST_PATH_VERSION}"/deploy/kubernetes-1.27/hostpath/csi-hostpath-driverinfo.yaml - kubectl apply -f "${CSI_BASE_URL}"/csi-driver-host-path/"${CSI_DRIVER_HOST_PATH_VERSION}"/deploy/kubernetes-1.27/hostpath/csi-hostpath-plugin.yaml + ## Create a temporary file for the modified plugin deployment. This is needed + ## because csi-driver-host-path plugin yaml tends to lag behind a few versions. + plugin_file="${TEMP_DIR}/csi-hostpath-plugin.yaml" + curl -sSL "${CSI_BASE_URL}/csi-driver-host-path/${CSI_DRIVER_HOST_PATH_VERSION}/deploy/kubernetes-1.30/hostpath/csi-hostpath-plugin.yaml" | + sed "s|registry.k8s.io/sig-storage/hostpathplugin:.*|registry.k8s.io/sig-storage/hostpathplugin:${CSI_DRIVER_HOST_PATH_VERSION}|g" > "${plugin_file}" + + kubectl apply -f "${CSI_BASE_URL}"/csi-driver-host-path/"${CSI_DRIVER_HOST_PATH_VERSION}"/deploy/kubernetes-1.30/hostpath/csi-hostpath-driverinfo.yaml + kubectl apply -f "${plugin_file}" + rm "${plugin_file}" ## create volumesnapshotclass - kubectl apply -f "${CSI_BASE_URL}"/csi-driver-host-path/"${CSI_DRIVER_HOST_PATH_VERSION}"/deploy/kubernetes-1.27/hostpath/csi-hostpath-snapshotclass.yaml + kubectl apply -f "${CSI_BASE_URL}"/csi-driver-host-path/"${CSI_DRIVER_HOST_PATH_VERSION}"/deploy/kubernetes-1.30/hostpath/csi-hostpath-snapshotclass.yaml ## Prevent VolumeSnapshot E2e test to fail when taking a ## snapshot of a running PostgreSQL instance From a2373e5ebd1bae52ad687a976daaee8811554da3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niccol=C3=B2=20Fei?= Date: Wed, 26 Mar 2025 21:00:10 +0100 Subject: [PATCH 483/836] test: cleanup leftover logs from db management E2Es (#7217) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes #6411 Usually, at the end of a green test, `cleanupNamespace()` will take care of deleting the testing logs captured and issuing an `objects.Delete` request (we do not actually wait for the namespace to be gone though). This Database management E2E is an exception to the usual way we create/destroy testing namespaces. Here we want to test that Database object finalizers do not prevent the deletion of a namespace, hence we need to ensure the namespace is properly gone before declaring that the test succeded. After doing that, we now cleanup the testing logs. Signed-off-by: Niccolò Fei --- .../declarative_database_management_test.go | 3 +++ tests/utils/namespaces/namespace.go | 21 +++++++++++++------ 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/tests/e2e/declarative_database_management_test.go b/tests/e2e/declarative_database_management_test.go index 34c12d5e43..d896b7986f 100644 --- a/tests/e2e/declarative_database_management_test.go +++ b/tests/e2e/declarative_database_management_test.go @@ -226,6 +226,9 @@ var _ = Describe("Declarative database management", Label(tests.LabelSmoke, test By("deleting the namespace and making sure it succeeds before timeout", func() { err := namespaces.DeleteNamespaceAndWait(env.Ctx, env.Client, namespace, 120) Expect(err).ToNot(HaveOccurred()) + // we need to cleanup testing logs adhoc since we are not using a testingNamespace for this test + err = namespaces.CleanupClusterLogs(namespace, CurrentSpecReport().Failed()) + Expect(err).ToNot(HaveOccurred()) }) }) }) diff --git a/tests/utils/namespaces/namespace.go b/tests/utils/namespaces/namespace.go index 8f4a1a4114..b2d33756b3 100644 --- a/tests/utils/namespaces/namespace.go +++ b/tests/utils/namespaces/namespace.go @@ -64,6 +64,18 @@ func getPreserveNamespaces() []string { return preserveNamespacesList } +// CleanupClusterLogs cleans up the cluster logs of a given namespace +func CleanupClusterLogs(namespace string, testFailed bool) error { + exists, _ := fileutils.FileExists(path.Join(SternLogDirectory, namespace)) + if exists && !testFailed { + if err := fileutils.RemoveDirectory(path.Join(SternLogDirectory, namespace)); err != nil { + return err + } + } + + return nil +} + // cleanupNamespace does cleanup duty related to the tear-down of a namespace, // and is intended to be called in a DeferCleanup clause func cleanupNamespace( @@ -79,12 +91,9 @@ func cleanupNamespace( if len(namespace) == 0 { return fmt.Errorf("namespace is empty") } - exists, _ := fileutils.FileExists(path.Join(SternLogDirectory, namespace)) - if exists && !testFailed { - err := fileutils.RemoveDirectory(path.Join(SternLogDirectory, namespace)) - if err != nil { - return err - } + + if err := CleanupClusterLogs(namespace, testFailed); err != nil { + return err } return deleteNamespace(ctx, crudClient, namespace) From c037ca30e558b45d7b9a7b9fe61589ec2f26a1ad Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Wed, 26 Mar 2025 23:15:06 +0100 Subject: [PATCH 484/836] feat: allow bypassing the validation webhook (#7196) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch introduces the `cnpg.io/validation` annotation, enabling users to disable the validation webhook on CloudNativePG-managed resources. This capability is essential for making unrestricted modifications to the cluster definition, allowing operations that the operator would typically prevent—such as reducing volume sizes and rebuilding them individually. However, this increased flexibility comes with the risk of executing potentially harmful operations, so it should be used with caution. Fixes: #7117 Signed-off-by: Leonardo Cecchi Signed-off-by: Armando Ruocco Signed-off-by: Marco Nenciarini Signed-off-by: Gabriele Bartolini Co-authored-by: Armando Ruocco Co-authored-by: Marco Nenciarini Co-authored-by: Gabriele Bartolini --- docs/src/labels_annotations.md | 7 + docs/src/release_notes/v1.26.md | 4 + internal/webhook/v1/backup_webhook.go | 2 +- internal/webhook/v1/cluster_webhook.go | 2 +- internal/webhook/v1/common.go | 133 ++++++++++++++++++ internal/webhook/v1/common_test.go | 172 ++++++++++++++++++++++++ internal/webhook/v1/database_webhook.go | 2 +- internal/webhook/v1/pooler_webhook.go | 2 +- pkg/utils/labels_annotations.go | 4 + 9 files changed, 324 insertions(+), 4 deletions(-) create mode 100644 internal/webhook/v1/common.go create mode 100644 internal/webhook/v1/common_test.go diff --git a/docs/src/labels_annotations.md b/docs/src/labels_annotations.md index 69c48d6c2b..072a8f2d70 100644 --- a/docs/src/labels_annotations.md +++ b/docs/src/labels_annotations.md @@ -228,6 +228,13 @@ CloudNativePG manages the following predefined annotations: `cnpg.io/snapshotEndTime` : The time a snapshot was marked as ready to use. +`cnpg.io/validation` +: When set to `disabled` on a CloudNativePG-managed custom resource, the + validation webhook allows all changes without restriction. + + **⚠️ WARNING:** Disabling validation may permit unsafe or destructive + operations. Use this setting with caution and at your own risk. + `cnpg.io/volumeSnapshotDeadline` : Applied to `Backup` and `ScheduledBackup` resources, allows you to control how long the operator should retry recoverable errors before considering the diff --git a/docs/src/release_notes/v1.26.md b/docs/src/release_notes/v1.26.md index 92409602a1..0f0e517c44 100644 --- a/docs/src/release_notes/v1.26.md +++ b/docs/src/release_notes/v1.26.md @@ -36,6 +36,10 @@ on the release branch in GitHub. ### Enhancements: +- Implemented the `cnpg.io/validation` annotation, allowing users to disable + the validation webhook on CloudNativePG-managed resources. Use with caution, + as this can permit unrestricted changes. (#7196) + - Introduced the `STANDBY_TCP_USER_TIMEOUT` operator configuration setting, which, if specified, sets the `tcp_user_timeout` parameter on all standby instances managed by the operator. diff --git a/internal/webhook/v1/backup_webhook.go b/internal/webhook/v1/backup_webhook.go index 7fbe2d5610..b9582a81ef 100644 --- a/internal/webhook/v1/backup_webhook.go +++ b/internal/webhook/v1/backup_webhook.go @@ -43,7 +43,7 @@ var backupLog = log.WithName("backup-resource").WithValues("version", "v1") // SetupBackupWebhookWithManager registers the webhook for Backup in the manager. func SetupBackupWebhookWithManager(mgr ctrl.Manager) error { return ctrl.NewWebhookManagedBy(mgr).For(&apiv1.Backup{}). - WithValidator(&BackupCustomValidator{}). + WithValidator(newBypassableValidator(&BackupCustomValidator{})). WithDefaulter(&BackupCustomDefaulter{}). Complete() } diff --git a/internal/webhook/v1/cluster_webhook.go b/internal/webhook/v1/cluster_webhook.go index 2e7f79aad7..1671ce99ba 100644 --- a/internal/webhook/v1/cluster_webhook.go +++ b/internal/webhook/v1/cluster_webhook.go @@ -62,7 +62,7 @@ var clusterLog = log.WithName("cluster-resource").WithValues("version", "v1") // SetupClusterWebhookWithManager registers the webhook for Cluster in the manager. func SetupClusterWebhookWithManager(mgr ctrl.Manager) error { return ctrl.NewWebhookManagedBy(mgr).For(&apiv1.Cluster{}). - WithValidator(&ClusterCustomValidator{}). + WithValidator(newBypassableValidator(&ClusterCustomValidator{})). WithDefaulter(&ClusterCustomDefaulter{}). Complete() } diff --git a/internal/webhook/v1/common.go b/internal/webhook/v1/common.go new file mode 100644 index 0000000000..6cac64f987 --- /dev/null +++ b/internal/webhook/v1/common.go @@ -0,0 +1,133 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package v1 + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" +) + +const ( + // validationEnabledAnnotationValue is the value of that "validation" + // annotation that is set when the validation is enabled + validationEnabledAnnotationValue = "enabled" + + // validationDisabledAnnotationValue is the value of that "validation" + // annotation that is set when the validation is disabled + validationDisabledAnnotationValue = "disabled" +) + +// isValidationEnabled checks whether validation webhooks are +// enabled or disabled +func isValidationEnabled(obj client.Object) (bool, error) { + value := obj.GetAnnotations()[utils.WebhookValidationAnnotationName] + switch value { + case validationEnabledAnnotationValue, "": + return true, nil + + case validationDisabledAnnotationValue: + return false, nil + + default: + return true, fmt.Errorf( + `invalid %q annotation: %q (expected "enabled" or "disabled")`, + utils.WebhookValidationAnnotationName, value) + } +} + +// bypassableValidator implements a custom validator that enables an +// existing custom validator to be enabled or disabled via an annotation. +type bypassableValidator struct { + validator admission.CustomValidator +} + +// newBypassableValidator creates a new custom validator that enables an +// existing custom validator to be enabled or disabled via an annotation. +func newBypassableValidator(validator admission.CustomValidator) *bypassableValidator { + return &bypassableValidator{ + validator: validator, + } +} + +// ValidateCreate validates the object on creation. +// The optional warnings will be added to the response as warning messages. +// Return an error if the object is invalid. +func (b bypassableValidator) ValidateCreate( + ctx context.Context, + obj runtime.Object, +) (admission.Warnings, error) { + return validate(obj, func() (admission.Warnings, error) { + return b.validator.ValidateCreate(ctx, obj) + }) +} + +// ValidateUpdate validates the object on update. +// The optional warnings will be added to the response as warning messages. +// Return an error if the object is invalid. +func (b bypassableValidator) ValidateUpdate( + ctx context.Context, + oldObj runtime.Object, + newObj runtime.Object, +) (admission.Warnings, error) { + return validate(newObj, func() (admission.Warnings, error) { + return b.validator.ValidateUpdate(ctx, oldObj, newObj) + }) +} + +// ValidateDelete validates the object on deletion. +// The optional warnings will be added to the response as warning messages. +// Return an error if the object is invalid. +func (b bypassableValidator) ValidateDelete( + ctx context.Context, + obj runtime.Object, +) (admission.Warnings, error) { + return validate(obj, func() (admission.Warnings, error) { + return b.validator.ValidateDelete(ctx, obj) + }) +} + +const validationDisabledWarning = "validation webhook is disabled — all changes are accepted without validation. " + + "This may lead to unsafe or destructive operations. Proceed with extreme caution." + +func validate(obj runtime.Object, validator func() (admission.Warnings, error)) (admission.Warnings, error) { + var warnings admission.Warnings + + validationEnabled, err := isValidationEnabled(obj.(client.Object)) + if err != nil { + // If the validation annotation value is unexpected, we continue validating + // the object but we warn the user that the value was wrong + warnings = append(warnings, err.Error()) + } + + if !validationEnabled { + warnings = append(warnings, validationDisabledWarning) + return warnings, nil + } + + validationWarnings, err := validator() + warnings = append(warnings, validationWarnings...) + return warnings, err +} diff --git a/internal/webhook/v1/common_test.go b/internal/webhook/v1/common_test.go new file mode 100644 index 0000000000..126877e215 --- /dev/null +++ b/internal/webhook/v1/common_test.go @@ -0,0 +1,172 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package v1 + +import ( + "context" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func newClusterWithValidationAnnotation(value string) *apiv1.Cluster { + return &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.WebhookValidationAnnotationName: value, + }, + }, + } +} + +var _ = Describe("Validation webhook validation parser", func() { + It("ensures that with no annotations the validation checking is enabled", func() { + cluster := &apiv1.Cluster{} + Expect(isValidationEnabled(cluster)).To(BeTrue()) + }) + + It("ensures that with validation can be explicitly enabled", func() { + cluster := newClusterWithValidationAnnotation(validationEnabledAnnotationValue) + Expect(isValidationEnabled(cluster)).To(BeTrue()) + }) + + It("ensures that with validation can be explicitly disabled", func() { + cluster := newClusterWithValidationAnnotation(validationDisabledAnnotationValue) + Expect(isValidationEnabled(cluster)).To(BeFalse()) + }) + + It("ensures that with validation is enabled when the annotation value is unknown", func() { + cluster := newClusterWithValidationAnnotation("idontknow") + status, err := isValidationEnabled(cluster) + Expect(err).To(HaveOccurred()) + Expect(status).To(BeTrue()) + }) +}) + +type fakeCustomValidator struct { + calls []string + + createWarnings admission.Warnings + createError error + + updateWarnings admission.Warnings + updateError error + + deleteWarnings admission.Warnings + deleteError error +} + +func (f *fakeCustomValidator) ValidateCreate( + _ context.Context, + _ runtime.Object, +) (admission.Warnings, error) { + f.calls = append(f.calls, "create") + return f.createWarnings, f.createError +} + +func (f *fakeCustomValidator) ValidateUpdate( + _ context.Context, + _ runtime.Object, + _ runtime.Object, +) (admission.Warnings, error) { + f.calls = append(f.calls, "update") + return f.updateWarnings, f.updateError +} + +func (f *fakeCustomValidator) ValidateDelete( + _ context.Context, + _ runtime.Object, +) (admission.Warnings, error) { + f.calls = append(f.calls, "delete") + return f.deleteWarnings, f.deleteError +} + +var _ = Describe("Bypassable validator", func() { + fakeCreateError := fmt.Errorf("fake error") + fakeUpdateError := fmt.Errorf("fake error") + fakeDeleteError := fmt.Errorf("fake error") + + disabledCluster := newClusterWithValidationAnnotation(validationDisabledAnnotationValue) + enabledCluster := newClusterWithValidationAnnotation(validationEnabledAnnotationValue) + wrongCluster := newClusterWithValidationAnnotation("dontknow") + + fakeErrorValidator := &fakeCustomValidator{ + createError: fakeCreateError, + deleteError: fakeDeleteError, + updateError: fakeUpdateError, + } + + DescribeTable( + "validator callbacks", + func(ctx SpecContext, c *apiv1.Cluster, expectedError, withWarnings bool) { + b := newBypassableValidator(fakeErrorValidator) + + By("creation entrypoint", func() { + result, err := b.ValidateCreate(ctx, c) + if expectedError { + Expect(err).To(Equal(fakeCreateError)) + } else { + Expect(err).ToNot(HaveOccurred()) + } + + if withWarnings { + Expect(result).To(HaveLen(1)) + } + }) + + By("update entrypoint", func() { + result, err := b.ValidateUpdate(ctx, enabledCluster, c) + if expectedError { + Expect(err).To(Equal(fakeUpdateError)) + } else { + Expect(err).ToNot(HaveOccurred()) + } + + if withWarnings { + Expect(result).To(HaveLen(1)) + } + }) + + By("delete entrypoint", func() { + result, err := b.ValidateDelete(ctx, c) + if expectedError { + Expect(err).To(Equal(fakeDeleteError)) + } else { + Expect(err).ToNot(HaveOccurred()) + } + + if withWarnings { + Expect(result).To(HaveLen(1)) + } + }) + }, + Entry("validation is disabled", disabledCluster, false, true), + Entry("validation is enabled", enabledCluster, true, false), + Entry("validation value is not expected", wrongCluster, true, true), + ) +}) diff --git a/internal/webhook/v1/database_webhook.go b/internal/webhook/v1/database_webhook.go index f56cb5699f..34249556b7 100644 --- a/internal/webhook/v1/database_webhook.go +++ b/internal/webhook/v1/database_webhook.go @@ -41,7 +41,7 @@ var databaseLog = log.WithName("database-resource").WithValues("version", "v1") // SetupDatabaseWebhookWithManager registers the webhook for Database in the manager. func SetupDatabaseWebhookWithManager(mgr ctrl.Manager) error { return ctrl.NewWebhookManagedBy(mgr).For(&apiv1.Database{}). - WithValidator(&DatabaseCustomValidator{}). + WithValidator(newBypassableValidator(&DatabaseCustomValidator{})). WithDefaulter(&DatabaseCustomDefaulter{}). Complete() } diff --git a/internal/webhook/v1/pooler_webhook.go b/internal/webhook/v1/pooler_webhook.go index dfbed90bd7..34594b0618 100644 --- a/internal/webhook/v1/pooler_webhook.go +++ b/internal/webhook/v1/pooler_webhook.go @@ -97,7 +97,7 @@ var poolerLog = log.WithName("pooler-resource").WithValues("version", "v1") // SetupPoolerWebhookWithManager registers the webhook for Pooler in the manager. func SetupPoolerWebhookWithManager(mgr ctrl.Manager) error { return ctrl.NewWebhookManagedBy(mgr).For(&apiv1.Pooler{}). - WithValidator(&PoolerCustomValidator{}). + WithValidator(newBypassableValidator(&PoolerCustomValidator{})). Complete() } diff --git a/pkg/utils/labels_annotations.go b/pkg/utils/labels_annotations.go index 4023be2248..37143c54bb 100644 --- a/pkg/utils/labels_annotations.go +++ b/pkg/utils/labels_annotations.go @@ -246,6 +246,10 @@ const ( // PodPatchAnnotationName is the name of the annotation containing the // patch to apply to the pod PodPatchAnnotationName = MetadataNamespace + "/podPatch" + + // WebhookValidationAnnotationName is the name of the annotation describing if + // the validation webhook should be enabled or disabled + WebhookValidationAnnotationName = MetadataNamespace + "/validation" ) type annotationStatus string From ffd2141152bdfaf3eefd6998379265b64e767ac0 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 27 Mar 2025 08:05:00 +0100 Subject: [PATCH 485/836] fix(deps): update kubernetes patches (main) (#7219) This PR contains the following updates: https://github.com/kubernetes/api `v0.32.2` -> `v0.32.3` https://github.com/kubernetes/apiextensions-apiserver `v0.32.2` -> `v0.32.3` https://github.com/kubernetes/apimachinery `v0.32.2` -> `v0.32.3` https://github.com/kubernetes/cli-runtime `v0.32.2` -> `v0.32.3` https://github.com/kubernetes/client-go `v0.32.2` -> `v0.32.3` https://github.com/kubernetes/utils `24370be` -> `1f6e0b7` https://github.com/kubernetes-sigs/controller-runtime `v0.20.3` -> `v0.20.4` --- go.mod | 14 +++++++------- go.sum | 28 ++++++++++++++-------------- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/go.mod b/go.mod index df8e4f363d..64f7716b6e 100644 --- a/go.mod +++ b/go.mod @@ -38,13 +38,13 @@ require ( golang.org/x/term v0.30.0 google.golang.org/grpc v1.71.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.32.2 - k8s.io/apiextensions-apiserver v0.32.2 - k8s.io/apimachinery v0.32.2 - k8s.io/cli-runtime v0.32.2 - k8s.io/client-go v0.32.2 - k8s.io/utils v0.0.0-20241210054802-24370beab758 - sigs.k8s.io/controller-runtime v0.20.3 + k8s.io/api v0.32.3 + k8s.io/apiextensions-apiserver v0.32.3 + k8s.io/apimachinery v0.32.3 + k8s.io/cli-runtime v0.32.3 + k8s.io/client-go v0.32.3 + k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e + sigs.k8s.io/controller-runtime v0.20.4 sigs.k8s.io/yaml v1.4.0 ) diff --git a/go.sum b/go.sum index d841fae2ac..69d7b623dd 100644 --- a/go.sum +++ b/go.sum @@ -279,24 +279,24 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.32.2 h1:bZrMLEkgizC24G9eViHGOPbW+aRo9duEISRIJKfdJuw= -k8s.io/api v0.32.2/go.mod h1:hKlhk4x1sJyYnHENsrdCWw31FEmCijNGPJO5WzHiJ6Y= -k8s.io/apiextensions-apiserver v0.32.2 h1:2YMk285jWMk2188V2AERy5yDwBYrjgWYggscghPCvV4= -k8s.io/apiextensions-apiserver v0.32.2/go.mod h1:GPwf8sph7YlJT3H6aKUWtd0E+oyShk/YHWQHf/OOgCA= -k8s.io/apimachinery v0.32.2 h1:yoQBR9ZGkA6Rgmhbp/yuT9/g+4lxtsGYwW6dR6BDPLQ= -k8s.io/apimachinery v0.32.2/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= -k8s.io/cli-runtime v0.32.2 h1:aKQR4foh9qeyckKRkNXUccP9moxzffyndZAvr+IXMks= -k8s.io/cli-runtime v0.32.2/go.mod h1:a/JpeMztz3xDa7GCyyShcwe55p8pbcCVQxvqZnIwXN8= -k8s.io/client-go v0.32.2 h1:4dYCD4Nz+9RApM2b/3BtVvBHw54QjMFUl1OLcJG5yOA= -k8s.io/client-go v0.32.2/go.mod h1:fpZ4oJXclZ3r2nDOv+Ux3XcJutfrwjKTCHz2H3sww94= +k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= +k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= +k8s.io/apiextensions-apiserver v0.32.3 h1:4D8vy+9GWerlErCwVIbcQjsWunF9SUGNu7O7hiQTyPY= +k8s.io/apiextensions-apiserver v0.32.3/go.mod h1:8YwcvVRMVzw0r1Stc7XfGAzB/SIVLunqApySV5V7Dss= +k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= +k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/cli-runtime v0.32.3 h1:khLF2ivU2T6Q77H97atx3REY9tXiA3OLOjWJxUrdvss= +k8s.io/cli-runtime v0.32.3/go.mod h1:vZT6dZq7mZAca53rwUfdFSZjdtLyfF61mkf/8q+Xjak= +k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= +k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 h1:hcha5B1kVACrLujCKLbr8XWMxCxzQx42DY8QKYJrDLg= k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7/go.mod h1:GewRfANuJ70iYzvn+i4lezLDAFzvjxZYK1gn1lWcfas= -k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0= -k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/controller-runtime v0.20.3 h1:I6Ln8JfQjHH7JbtCD2HCYHoIzajoRxPNuvhvcDbZgkI= -sigs.k8s.io/controller-runtime v0.20.3/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= +k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e h1:KqK5c/ghOm8xkHYhlodbp6i6+r+ChV2vuAuVRdFbLro= +k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU= +sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/kustomize/api v0.19.0 h1:F+2HB2mU1MSiR9Hp1NEgoU2q9ItNOaBJl0I4Dlus5SQ= From e30ba9853eaa29d53e7b2e8134182e836ed63a3b Mon Sep 17 00:00:00 2001 From: Francesco Canovai Date: Thu, 27 Mar 2025 10:39:06 +0100 Subject: [PATCH 486/836] docs: fix titles in nav menu (#7232) Move the license comment in the documentation markdown files to the second line, as it was messing with the title detection. Closes #7231 Signed-off-by: Francesco Canovai Signed-off-by: Marco Nenciarini Co-authored-by: Marco Nenciarini --- docs/markdown/pkg.tpl | 3 +-- docs/src/appendixes/object_stores.md | 2 +- docs/src/applications.md | 3 +-- docs/src/architecture.md | 2 +- docs/src/backup.md | 3 +-- docs/src/backup_barmanobjectstore.md | 2 +- docs/src/backup_recovery.md | 2 +- docs/src/backup_volumesnapshot.md | 2 +- docs/src/before_you_start.md | 2 +- docs/src/benchmarking.md | 2 +- docs/src/bootstrap.md | 2 +- docs/src/certificates.md | 2 +- docs/src/cloudnative-pg.v1.md | 3 +-- docs/src/cluster_conf.md | 2 +- docs/src/connection_pooling.md | 2 +- docs/src/container_images.md | 2 +- docs/src/controller.md | 2 +- docs/src/database_import.md | 2 +- docs/src/declarative_database_management.md | 2 +- docs/src/declarative_hibernation.md | 2 +- docs/src/declarative_role_management.md | 2 +- docs/src/e2e.md | 2 +- docs/src/failover.md | 2 +- docs/src/failure_modes.md | 2 +- docs/src/faq.md | 2 +- docs/src/fencing.md | 2 +- docs/src/image_catalog.md | 2 +- docs/src/index.md | 2 +- docs/src/installation_upgrade.md | 2 +- docs/src/instance_manager.md | 2 +- docs/src/kubectl-plugin.md | 2 +- docs/src/kubernetes_upgrade.md | 2 +- docs/src/labels_annotations.md | 2 +- docs/src/logging.md | 2 +- docs/src/logical_replication.md | 2 +- docs/src/monitoring.md | 2 +- docs/src/networking.md | 2 +- docs/src/operator_capability_levels.md | 2 +- docs/src/operator_conf.md | 2 +- docs/src/postgis.md | 2 +- docs/src/postgresql_conf.md | 2 +- docs/src/preview_version.md | 2 +- docs/src/quickstart.md | 2 +- docs/src/recovery.md | 2 +- docs/src/release_notes.md | 2 +- docs/src/release_notes/edb-cloud-native-postgresql.md | 2 +- docs/src/release_notes/v1.24.md | 2 +- docs/src/release_notes/v1.25.md | 2 +- docs/src/release_notes/v1.26.md | 2 +- docs/src/replica_cluster.md | 2 +- docs/src/replication.md | 2 +- docs/src/resource_management.md | 2 +- docs/src/rolling_update.md | 2 +- docs/src/samples.md | 2 +- docs/src/scheduling.md | 2 +- docs/src/security.md | 2 +- docs/src/service_management.md | 2 +- docs/src/ssl_connections.md | 2 +- docs/src/storage.md | 2 +- docs/src/supported_releases.md | 2 +- docs/src/tablespaces.md | 2 +- docs/src/troubleshooting.md | 2 +- docs/src/use_cases.md | 2 +- docs/src/wal_archiving.md | 2 +- 64 files changed, 64 insertions(+), 68 deletions(-) diff --git a/docs/markdown/pkg.tpl b/docs/markdown/pkg.tpl index 542b5bd178..ed4875592b 100644 --- a/docs/markdown/pkg.tpl +++ b/docs/markdown/pkg.tpl @@ -1,7 +1,6 @@ {{ define "packages" -}} - - # API Reference + {{ $grpname := "" -}} {{- range $idx, $val := .packages -}} diff --git a/docs/src/appendixes/object_stores.md b/docs/src/appendixes/object_stores.md index 59d175244b..017011bf1a 100644 --- a/docs/src/appendixes/object_stores.md +++ b/docs/src/appendixes/object_stores.md @@ -1,5 +1,5 @@ - # Appendix A - Common object stores for backups + You can store the [backup](../backup.md) files in any service that is supported by the Barman Cloud infrastructure. That is: diff --git a/docs/src/applications.md b/docs/src/applications.md index bc3db66533..5828e99e17 100644 --- a/docs/src/applications.md +++ b/docs/src/applications.md @@ -1,5 +1,5 @@ - # Connecting from an application + Applications are supposed to work with the services created by CloudNativePG in the same Kubernetes cluster. @@ -82,4 +82,3 @@ and correspond to the `postgres` user. !!! Important Superuser access over the network is disabled by default. - diff --git a/docs/src/architecture.md b/docs/src/architecture.md index 577397a4fa..bc12515094 100644 --- a/docs/src/architecture.md +++ b/docs/src/architecture.md @@ -1,5 +1,5 @@ - # Architecture + !!! Hint For a deeper understanding, we recommend reading our article on the CNCF diff --git a/docs/src/backup.md b/docs/src/backup.md index eddeb28b39..0f01434a0e 100644 --- a/docs/src/backup.md +++ b/docs/src/backup.md @@ -1,5 +1,5 @@ - # Backup + PostgreSQL natively provides first class backup and recovery capabilities based on file system level (physical) copy. These have been successfully used for @@ -378,4 +378,3 @@ spec: In the previous example, CloudNativePG will invariably choose the primary instance even if the `Cluster` is set to prefer replicas. - diff --git a/docs/src/backup_barmanobjectstore.md b/docs/src/backup_barmanobjectstore.md index fb1f1f29e3..5583736a69 100644 --- a/docs/src/backup_barmanobjectstore.md +++ b/docs/src/backup_barmanobjectstore.md @@ -1,5 +1,5 @@ - # Backup on object stores + CloudNativePG natively supports **online/hot backup** of PostgreSQL clusters through continuous physical backup and WAL archiving on an object diff --git a/docs/src/backup_recovery.md b/docs/src/backup_recovery.md index ac1e0915ac..742ceb403b 100644 --- a/docs/src/backup_recovery.md +++ b/docs/src/backup_recovery.md @@ -1,4 +1,4 @@ - # Backup and Recovery + [Backup](backup.md) and [recovery](recovery.md) are in two separate sections. diff --git a/docs/src/backup_volumesnapshot.md b/docs/src/backup_volumesnapshot.md index aedd03654b..08da92baf6 100644 --- a/docs/src/backup_volumesnapshot.md +++ b/docs/src/backup_volumesnapshot.md @@ -1,5 +1,5 @@ - # Backup on volume snapshots + !!! Warning As noted in the [backup document](backup.md), a cold snapshot explicitly diff --git a/docs/src/before_you_start.md b/docs/src/before_you_start.md index 36d2f9a7a3..4d6935631a 100644 --- a/docs/src/before_you_start.md +++ b/docs/src/before_you_start.md @@ -1,5 +1,5 @@ - # Before You Start + Before we get started, it is essential to go over some terminology that is specific to Kubernetes and PostgreSQL. diff --git a/docs/src/benchmarking.md b/docs/src/benchmarking.md index 129c75cd19..189545c938 100644 --- a/docs/src/benchmarking.md +++ b/docs/src/benchmarking.md @@ -1,5 +1,5 @@ - # Benchmarking + The CNPG kubectl plugin provides an easy way for benchmarking a PostgreSQL deployment in Kubernetes using CloudNativePG. diff --git a/docs/src/bootstrap.md b/docs/src/bootstrap.md index 860a471bda..342ccf08e8 100644 --- a/docs/src/bootstrap.md +++ b/docs/src/bootstrap.md @@ -1,5 +1,5 @@ - # Bootstrap + This section describes the options available to create a new PostgreSQL cluster and the design rationale behind them. diff --git a/docs/src/certificates.md b/docs/src/certificates.md index 7ec3f45784..96e764bbf3 100644 --- a/docs/src/certificates.md +++ b/docs/src/certificates.md @@ -1,5 +1,5 @@ - # Certificates + CloudNativePG was designed to natively support TLS certificates. To set up a cluster, the operator requires: diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md index 416b7c7101..afd71dee01 100644 --- a/docs/src/cloudnative-pg.v1.md +++ b/docs/src/cloudnative-pg.v1.md @@ -1,6 +1,5 @@ - - # API Reference +

Package v1 contains API Schema definitions for the postgresql v1 API group

diff --git a/docs/src/cluster_conf.md b/docs/src/cluster_conf.md index 9e34c4822e..2ecbf0f1ad 100644 --- a/docs/src/cluster_conf.md +++ b/docs/src/cluster_conf.md @@ -1,5 +1,5 @@ - # Instance pod configuration + ## Projected volumes diff --git a/docs/src/connection_pooling.md b/docs/src/connection_pooling.md index db4863dade..57d961835c 100644 --- a/docs/src/connection_pooling.md +++ b/docs/src/connection_pooling.md @@ -1,5 +1,5 @@ - # Connection pooling + CloudNativePG provides native support for connection pooling with [PgBouncer](https://www.pgbouncer.org/), one of the most popular open source diff --git a/docs/src/container_images.md b/docs/src/container_images.md index 79e6b0ba76..248cf1e02e 100644 --- a/docs/src/container_images.md +++ b/docs/src/container_images.md @@ -1,5 +1,5 @@ - # Container Image Requirements + The CloudNativePG operator for Kubernetes is designed to work with any compatible container image of PostgreSQL that complies diff --git a/docs/src/controller.md b/docs/src/controller.md index 00f49595a0..b90e1ae1db 100644 --- a/docs/src/controller.md +++ b/docs/src/controller.md @@ -1,5 +1,5 @@ - # Custom Pod Controller + Kubernetes uses the [Controller pattern](https://kubernetes.io/docs/concepts/architecture/controller/) diff --git a/docs/src/database_import.md b/docs/src/database_import.md index f6507f1cd1..1c19a501ff 100644 --- a/docs/src/database_import.md +++ b/docs/src/database_import.md @@ -1,5 +1,5 @@ - # Importing Postgres databases + This section describes how to import one or more existing PostgreSQL databases inside a brand new CloudNativePG cluster. diff --git a/docs/src/declarative_database_management.md b/docs/src/declarative_database_management.md index b4f83e582c..85bc01c66a 100644 --- a/docs/src/declarative_database_management.md +++ b/docs/src/declarative_database_management.md @@ -1,5 +1,5 @@ - # PostgreSQL Database Management + CloudNativePG simplifies PostgreSQL database provisioning by automatically creating an application database named `app` by default. This default behavior diff --git a/docs/src/declarative_hibernation.md b/docs/src/declarative_hibernation.md index c9116bca5b..dba076d8f3 100644 --- a/docs/src/declarative_hibernation.md +++ b/docs/src/declarative_hibernation.md @@ -1,5 +1,5 @@ - # Declarative hibernation + CloudNativePG is designed to keep PostgreSQL clusters up, running and available anytime. diff --git a/docs/src/declarative_role_management.md b/docs/src/declarative_role_management.md index 652be3ff5e..5c177ca9a7 100644 --- a/docs/src/declarative_role_management.md +++ b/docs/src/declarative_role_management.md @@ -1,5 +1,5 @@ - # PostgreSQL Role Management + From its inception, CloudNativePG has managed the creation of specific roles required in PostgreSQL instances: diff --git a/docs/src/e2e.md b/docs/src/e2e.md index 2122227ede..0b175ddfb2 100644 --- a/docs/src/e2e.md +++ b/docs/src/e2e.md @@ -1,5 +1,5 @@ - # End-to-End Tests + CloudNativePG is automatically tested after each commit via a suite of **End-to-end (E2E) tests** (or integration tests) diff --git a/docs/src/failover.md b/docs/src/failover.md index 7a922890cf..e28da3d933 100644 --- a/docs/src/failover.md +++ b/docs/src/failover.md @@ -1,5 +1,5 @@ - # Automated failover + In the case of unexpected errors on the primary for longer than the `.spec.failoverDelay` (by default `0` seconds), the cluster will go into diff --git a/docs/src/failure_modes.md b/docs/src/failure_modes.md index 5b887be256..c26d180235 100644 --- a/docs/src/failure_modes.md +++ b/docs/src/failure_modes.md @@ -1,5 +1,5 @@ - # Failure Modes + !!! Note In previous versions of CloudNativePG, this page included specific failure diff --git a/docs/src/faq.md b/docs/src/faq.md index dd6d38c96a..b80134c895 100644 --- a/docs/src/faq.md +++ b/docs/src/faq.md @@ -1,5 +1,5 @@ - # Frequently Asked Questions (FAQ) + ## Running PostgreSQL in Kubernetes diff --git a/docs/src/fencing.md b/docs/src/fencing.md index 7195453cf7..25e21b1462 100644 --- a/docs/src/fencing.md +++ b/docs/src/fencing.md @@ -1,5 +1,5 @@ - # Fencing + Fencing in CloudNativePG is the ultimate process of protecting the data in one, more, or even all instances of a PostgreSQL cluster when they diff --git a/docs/src/image_catalog.md b/docs/src/image_catalog.md index 0753a46676..4d19ecafe1 100644 --- a/docs/src/image_catalog.md +++ b/docs/src/image_catalog.md @@ -1,5 +1,5 @@ - # Image Catalog + `ImageCatalog` and `ClusterImageCatalog` are essential resources that empower you to define images for creating a `Cluster`. diff --git a/docs/src/index.md b/docs/src/index.md index 33df61a23e..705697e0c3 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -1,5 +1,5 @@ - # CloudNativePG + **CloudNativePG** is an open-source [operator](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) diff --git a/docs/src/installation_upgrade.md b/docs/src/installation_upgrade.md index 3525ca0560..cd03af6e27 100644 --- a/docs/src/installation_upgrade.md +++ b/docs/src/installation_upgrade.md @@ -1,5 +1,5 @@ - # Installation and upgrades + ## Installation on Kubernetes diff --git a/docs/src/instance_manager.md b/docs/src/instance_manager.md index b16d399389..0e1641d313 100644 --- a/docs/src/instance_manager.md +++ b/docs/src/instance_manager.md @@ -1,5 +1,5 @@ - # Postgres instance manager + CloudNativePG does not rely on an external tool for failover management. It simply relies on the Kubernetes API server and a native key component called: diff --git a/docs/src/kubectl-plugin.md b/docs/src/kubectl-plugin.md index 3aab8c4ee3..9c37c4ceb9 100644 --- a/docs/src/kubectl-plugin.md +++ b/docs/src/kubectl-plugin.md @@ -1,5 +1,5 @@ - # Kubectl Plugin + CloudNativePG provides a plugin for `kubectl` to manage a cluster in Kubernetes. diff --git a/docs/src/kubernetes_upgrade.md b/docs/src/kubernetes_upgrade.md index 8ad3771f40..5cab4f8f84 100644 --- a/docs/src/kubernetes_upgrade.md +++ b/docs/src/kubernetes_upgrade.md @@ -1,5 +1,5 @@ - # Kubernetes Upgrade and Maintenance + Maintaining an up-to-date Kubernetes cluster is crucial for ensuring optimal performance and security, particularly for self-managed clusters, especially diff --git a/docs/src/labels_annotations.md b/docs/src/labels_annotations.md index 072a8f2d70..30c0a2e963 100644 --- a/docs/src/labels_annotations.md +++ b/docs/src/labels_annotations.md @@ -1,5 +1,5 @@ - # Labels and annotations + Resources in Kubernetes are organized in a flat structure, with no hierarchical information or relationship between them. However, such resources and objects diff --git a/docs/src/logging.md b/docs/src/logging.md index 4535200897..a543a5fd25 100644 --- a/docs/src/logging.md +++ b/docs/src/logging.md @@ -1,5 +1,5 @@ - # Logging + CloudNativePG outputs logs in JSON format directly to standard output, including PostgreSQL logs, without persisting them to storage for security reasons. This diff --git a/docs/src/logical_replication.md b/docs/src/logical_replication.md index bb89f04270..5bd36027b9 100644 --- a/docs/src/logical_replication.md +++ b/docs/src/logical_replication.md @@ -1,5 +1,5 @@ - # Logical Replication + PostgreSQL extends its replication capabilities beyond physical replication, which operates at the level of exact block addresses and byte-by-byte copying, diff --git a/docs/src/monitoring.md b/docs/src/monitoring.md index eeb40511d6..9c72548899 100644 --- a/docs/src/monitoring.md +++ b/docs/src/monitoring.md @@ -1,5 +1,5 @@ - # Monitoring + !!! Important Installing Prometheus and Grafana is beyond the scope of this project. diff --git a/docs/src/networking.md b/docs/src/networking.md index d46af9f267..3840550433 100644 --- a/docs/src/networking.md +++ b/docs/src/networking.md @@ -1,5 +1,5 @@ - # Networking + CloudNativePG assumes the underlying Kubernetes cluster has the required connectivity already set up. diff --git a/docs/src/operator_capability_levels.md b/docs/src/operator_capability_levels.md index 3d1ca43f43..b261378490 100644 --- a/docs/src/operator_capability_levels.md +++ b/docs/src/operator_capability_levels.md @@ -1,5 +1,5 @@ - # Operator capability levels + These capabilities were implemented by CloudNativePG, classified using the diff --git a/docs/src/operator_conf.md b/docs/src/operator_conf.md index b7df8ff003..92ea33a47c 100644 --- a/docs/src/operator_conf.md +++ b/docs/src/operator_conf.md @@ -1,5 +1,5 @@ - # Operator configuration + The operator for CloudNativePG is installed from a standard deployment manifest and follows the convention over configuration paradigm. diff --git a/docs/src/postgis.md b/docs/src/postgis.md index bcab823f6f..cfbcc3b976 100644 --- a/docs/src/postgis.md +++ b/docs/src/postgis.md @@ -1,5 +1,5 @@ - # PostGIS + [PostGIS](https://postgis.net/) is a very popular open source extension for PostgreSQL that introduces support for storing GIS (Geographic Information diff --git a/docs/src/postgresql_conf.md b/docs/src/postgresql_conf.md index b4bd3eb388..6c1660ac86 100644 --- a/docs/src/postgresql_conf.md +++ b/docs/src/postgresql_conf.md @@ -1,5 +1,5 @@ - # PostgreSQL Configuration + Users that are familiar with PostgreSQL are aware of the existence of the following three files to configure an instance: diff --git a/docs/src/preview_version.md b/docs/src/preview_version.md index 758427fe46..adaf7032a3 100644 --- a/docs/src/preview_version.md +++ b/docs/src/preview_version.md @@ -1,5 +1,5 @@ - # Preview Versions + CloudNativePG candidate releases are pre-release versions made available for testing before the community issues a new generally available (GA) release. diff --git a/docs/src/quickstart.md b/docs/src/quickstart.md index d91fef3255..79e4a35fac 100644 --- a/docs/src/quickstart.md +++ b/docs/src/quickstart.md @@ -1,5 +1,5 @@ - # Quickstart + This section guides you through testing a PostgreSQL cluster on your local machine by deploying CloudNativePG on a local Kubernetes cluster diff --git a/docs/src/recovery.md b/docs/src/recovery.md index 06715663e7..a78f3e240f 100644 --- a/docs/src/recovery.md +++ b/docs/src/recovery.md @@ -1,5 +1,5 @@ - # Recovery + In PostgreSQL terminology, recovery is the process of starting a PostgreSQL instance using an existing backup. The PostgreSQL recovery mechanism diff --git a/docs/src/release_notes.md b/docs/src/release_notes.md index 0df52fe60b..5077b9e604 100644 --- a/docs/src/release_notes.md +++ b/docs/src/release_notes.md @@ -1,5 +1,5 @@ - # Release notes + History of user-visible changes for CloudNativePG, classified for each minor release. diff --git a/docs/src/release_notes/edb-cloud-native-postgresql.md b/docs/src/release_notes/edb-cloud-native-postgresql.md index e68113133c..26fded50ae 100644 --- a/docs/src/release_notes/edb-cloud-native-postgresql.md +++ b/docs/src/release_notes/edb-cloud-native-postgresql.md @@ -1,5 +1,5 @@ - # Release notes for 1.14.0 and earlier + The first public release of CloudNativePG is version 1.15.0. Before that, the product was entirely owned by EDB and distributed under the name of diff --git a/docs/src/release_notes/v1.24.md b/docs/src/release_notes/v1.24.md index 315bfcc28d..96c103fee0 100644 --- a/docs/src/release_notes/v1.24.md +++ b/docs/src/release_notes/v1.24.md @@ -1,5 +1,5 @@ - # Release notes for CloudNativePG 1.24 + History of user-visible changes in the 1.24 minor release of CloudNativePG. diff --git a/docs/src/release_notes/v1.25.md b/docs/src/release_notes/v1.25.md index c53c343f08..550c684160 100644 --- a/docs/src/release_notes/v1.25.md +++ b/docs/src/release_notes/v1.25.md @@ -1,5 +1,5 @@ - # Release notes for CloudNativePG 1.25 + History of user-visible changes in the 1.25 minor release of CloudNativePG. diff --git a/docs/src/release_notes/v1.26.md b/docs/src/release_notes/v1.26.md index 0f0e517c44..69e24971ad 100644 --- a/docs/src/release_notes/v1.26.md +++ b/docs/src/release_notes/v1.26.md @@ -1,5 +1,5 @@ - # Release notes for CloudNativePG 1.26 + History of user-visible changes in the 1.26 minor release of CloudNativePG. diff --git a/docs/src/replica_cluster.md b/docs/src/replica_cluster.md index ba2b66dffc..37654f72af 100644 --- a/docs/src/replica_cluster.md +++ b/docs/src/replica_cluster.md @@ -1,5 +1,5 @@ - # Replica clusters + A replica cluster is a CloudNativePG `Cluster` resource designed to replicate data from another PostgreSQL instance, ideally also managed by diff --git a/docs/src/replication.md b/docs/src/replication.md index fc47f2784e..642048cb2b 100644 --- a/docs/src/replication.md +++ b/docs/src/replication.md @@ -1,5 +1,5 @@ - # Replication + Physical replication is one of the strengths of PostgreSQL and one of the reasons why some of the largest organizations in the world have chosen it for diff --git a/docs/src/resource_management.md b/docs/src/resource_management.md index 36e39b33b1..fef4e68bbd 100644 --- a/docs/src/resource_management.md +++ b/docs/src/resource_management.md @@ -1,5 +1,5 @@ - # Resource management + In a typical Kubernetes cluster, pods run with unlimited resources. By default, they might be allowed to use as much CPU and RAM as needed. diff --git a/docs/src/rolling_update.md b/docs/src/rolling_update.md index e0c9c74764..1d04a1e2ee 100644 --- a/docs/src/rolling_update.md +++ b/docs/src/rolling_update.md @@ -1,5 +1,5 @@ - # Rolling Updates + The operator allows changing the PostgreSQL version used in a cluster while applications are running against it. diff --git a/docs/src/samples.md b/docs/src/samples.md index f4d5945cdc..1539e118f3 100644 --- a/docs/src/samples.md +++ b/docs/src/samples.md @@ -1,5 +1,5 @@ - # Examples + The examples show configuration files for setting up your PostgreSQL cluster. diff --git a/docs/src/scheduling.md b/docs/src/scheduling.md index 225d3e0ff7..abe3594a78 100644 --- a/docs/src/scheduling.md +++ b/docs/src/scheduling.md @@ -1,5 +1,5 @@ - # Scheduling + Scheduling, in Kubernetes, is the process responsible for placing a new pod on the best node possible, based on several criteria. diff --git a/docs/src/security.md b/docs/src/security.md index 266324d9ad..7b993c94e5 100644 --- a/docs/src/security.md +++ b/docs/src/security.md @@ -1,5 +1,5 @@ - # Security + This section contains information about security for CloudNativePG, that are analyzed at 3 different layers: Code, Container and Cluster. diff --git a/docs/src/service_management.md b/docs/src/service_management.md index 92c378ebc5..c9d815e9bb 100644 --- a/docs/src/service_management.md +++ b/docs/src/service_management.md @@ -1,5 +1,5 @@ - # Service Management + A PostgreSQL cluster should only be accessed via standard Kubernetes network services directly managed by CloudNativePG. For more details, refer to the diff --git a/docs/src/ssl_connections.md b/docs/src/ssl_connections.md index b8e54a3150..a9f910e4f2 100644 --- a/docs/src/ssl_connections.md +++ b/docs/src/ssl_connections.md @@ -1,5 +1,5 @@ - # Client TLS/SSL connections + !!! Seealso "Certificates" See [Certificates](certificates.md) diff --git a/docs/src/storage.md b/docs/src/storage.md index ad99a2ca21..1a25c09e3f 100644 --- a/docs/src/storage.md +++ b/docs/src/storage.md @@ -1,5 +1,5 @@ - # Storage + Storage is the most critical component in a database workload. Storage must always be available, scale, perform well, diff --git a/docs/src/supported_releases.md b/docs/src/supported_releases.md index 53e2d09813..d7f9af333c 100644 --- a/docs/src/supported_releases.md +++ b/docs/src/supported_releases.md @@ -1,5 +1,5 @@ - # Supported releases + diff --git a/docs/src/tablespaces.md b/docs/src/tablespaces.md index c7f47dc43d..86e5c0847b 100644 --- a/docs/src/tablespaces.md +++ b/docs/src/tablespaces.md @@ -1,5 +1,5 @@ - # Tablespaces + A tablespace is a robust and widely embraced feature in database management systems. It offers a powerful means to enhance the vertical diff --git a/docs/src/troubleshooting.md b/docs/src/troubleshooting.md index f5e37bec96..ebcbf1045f 100644 --- a/docs/src/troubleshooting.md +++ b/docs/src/troubleshooting.md @@ -1,5 +1,5 @@ - # Troubleshooting + In this page, you can find some basic information on how to troubleshoot CloudNativePG in your Kubernetes cluster deployment. diff --git a/docs/src/use_cases.md b/docs/src/use_cases.md index d09f6383c6..d0369b88e5 100644 --- a/docs/src/use_cases.md +++ b/docs/src/use_cases.md @@ -1,5 +1,5 @@ - # Use cases + CloudNativePG has been designed to work with applications that reside in the same Kubernetes cluster, for a full cloud native diff --git a/docs/src/wal_archiving.md b/docs/src/wal_archiving.md index 3522d73060..d9e34485bf 100644 --- a/docs/src/wal_archiving.md +++ b/docs/src/wal_archiving.md @@ -1,5 +1,5 @@ - # WAL archiving + WAL archiving is the process that feeds a [WAL archive](backup.md#wal-archive) in CloudNativePG. From aa81a1022c1ce8a507fc3eb8c68d62de8850140c Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Thu, 27 Mar 2025 11:03:10 +0100 Subject: [PATCH 487/836] docs: various updates (#7143) Signed-off-by: Gabriele Bartolini Signed-off-by: Marco Nenciarini Co-authored-by: Marco Nenciarini --- README.md | 202 +++++++++--------- .../cloudnative-pg.clusterserviceversion.yaml | 106 +++++---- docs/src/architecture.md | 7 +- docs/src/e2e.md | 2 +- docs/src/index.md | 202 ++++++++++-------- docs/src/installation_upgrade.md | 3 +- docs/src/replication.md | 6 +- 7 files changed, 289 insertions(+), 239 deletions(-) diff --git a/README.md b/README.md index 4290915a59..9b9b956b2d 100644 --- a/README.md +++ b/README.md @@ -5,20 +5,17 @@ [![Documentation][documentation-badge]][documentation] [![Stack Overflow](https://img.shields.io/badge/stackoverflow-cloudnative--pg-blue?logo=stackoverflow&logoColor=%23F48024&link=https%3A%2F%2Fstackoverflow.com%2Fquestions%2Ftagged%2Fcloudnative-pg)][stackoverflow] -# Welcome to the CloudNativePG project! +# Welcome to the CloudNativePG Project! -**CloudNativePG** is a comprehensive open source platform designed to -seamlessly manage [PostgreSQL](https://www.postgresql.org/) databases within -Kubernetes environments, covering the entire operational lifecycle from initial -deployment to ongoing maintenance. The main component is the CloudNativePG -operator. +**CloudNativePG (CNPG)** is an open-source platform designed to seamlessly +manage [PostgreSQL](https://www.postgresql.org/) databases in Kubernetes +environments. It covers the entire operational lifecycle—from deployment to +ongoing maintenance—through its core component, the CloudNativePG operator. -CloudNativePG was originally built and sponsored by [EDB](https://www.enterprisedb.com). +## Table of Contents -## Table of content - -- [Code of conduct](CODE_OF_CONDUCT.md) -- [Governance policies](https://github.com/cloudnative-pg/governance/blob/main/GOVERNANCE.md) +- [Code of Conduct](CODE_OF_CONDUCT.md) +- [Governance Policies](https://github.com/cloudnative-pg/governance/blob/main/GOVERNANCE.md) - [Contributing](CONTRIBUTING.md) - [Adopters](ADOPTERS.md) - [Commercial Support](https://cloudnative-pg.io/support/) @@ -26,96 +23,74 @@ CloudNativePG was originally built and sponsored by [EDB](https://www.enterprise ## Getting Started -The best way to get started is with the ["Quickstart"](docs/src/quickstart.md) -section in the documentation. +The best way to get started is the [Quickstart Guide](https://cloudnative-pg.io/documentation/current/quickstart/). ## Scope -The goal of CloudNativePG is to increase the adoption of PostgreSQL, one of the -most loved DBMS in traditional VM and bare metal environments, inside -Kubernetes, thus making the database an integral part of the development -process and GitOps CI/CD automated pipelines. - -### In scope - -CloudNativePG has been designed by Postgres experts with Kubernetes -administrators in mind. Put simply, it leverages Kubernetes by extending its -controller and by defining, in a programmatic way, all the actions that a good -DBA would normally do when managing a highly available PostgreSQL database -cluster. - -Since the inception, our philosophy has been to adopt a Kubernetes native -approach to PostgreSQL cluster management, making incremental decisions that -would answer the fundamental question: "What would a Kubernetes user expect -from a Postgres operator?". - -The most important decision we made is to have the status of a PostgreSQL -cluster directly available in the `Cluster` resource, so to inspect it through -the Kubernetes API. We've fully embraced the operator pattern and eventual -consistency, two of the core principles upon which Kubernetes is built for -managing complex applications. - -As a result, the operator is responsible for managing the status of the -`Cluster` resource, keeping it up to date with the information that each -PostgreSQL instance manager regularly reports back through the API server. -Changes to the cluster status might trigger, for example, actions like: - -* a PostgreSQL failover where, after an unexpected failure of a cluster's - primary instance, the operator itself elects the new primary, updates the - status, and directly coordinates the operation through the reconciliation - loop, by relying on the instance managers - -* scaling up or down the number of read-only replicas, based on a positive or - negative variation in the number of desired instances in the cluster, so that - the operator creates or removes the required resources to run PostgreSQL, - such as persistent volumes, persistent volume claims, pods, secrets, config - maps, and then coordinates cloning and streaming replication tasks - -* updates of the endpoints of the PostgreSQL services that applications rely on - to interact with the database, as Kubernetes represents the single source of - truth and authority - -* updates of container images in a rolling fashion, following a change in the - image name, by first updating the pods where replicas are running, and then - the primary, issuing a switchover first - -The latter example is based on another pillar of CloudNativePG: -immutable application containers - as explained in the -[blog article "Why EDB Chose Immutable Application Containers"](https://www.enterprisedb.com/blog/why-edb-chose-immutable-application-containers). - -The above list can be extended. However, the gist is that CloudNativePG -exclusively relies on the Kubernetes API server and the instance manager to -coordinate the complex operations that need to take place in a business -continuity PostgreSQL cluster, without requiring any assistance from an -intermediate management tool responsible for high availability and failover -management like similar open source operators. - -CloudNativePG also manages additional resources to help the `Cluster` resource -manage PostgreSQL - currently `Backup`, `ClusterImageCatalog`, `ImageCatalog`, -`Pooler`, and `ScheduledBackup`. - -Fully embracing Kubernetes means adopting a hands-off approach during temporary -failures of the Kubernetes API server. In such instances, the operator refrains -from taking action, deferring decisions until the API server is operational -again. Meanwhile, Postgres instances persist, maintaining operations based on -the latest known state of the cluster. - -### Out of scope - -CloudNativePG is exclusively focused on the PostgreSQL database management -system maintained by the PostgreSQL Global Development Group (PGDG). We are not -currently considering adding to CloudNativePG extensions or capabilities that -are included in forks of the PostgreSQL database management system, unless in -the form of extensible or pluggable frameworks. [The operator itself can be extended -via a plugin interface called CNPG-I](https://github.com/cloudnative-pg/cnpg-i). - -CloudNativePG doesn't intend to pursue database independence (e.g. control a -MariaDB cluster). +### Mission + +CloudNativePG aims to increase PostgreSQL adoption within Kubernetes by making +it an integral part of the development process and GitOps-driven CI/CD +automation. + +### Core Principles & Features + +Designed by PostgreSQL experts for Kubernetes administrators, CloudNativePG +follows a Kubernetes-native approach to PostgreSQL primary/standby cluster +management. Instead of relying on external high-availability tools (like +Patroni, repmgr, or Stolon), it integrates directly with the Kubernetes API to +automate database operations that a skilled DBA would perform manually. + +Key design decisions include: + +- Direct integration with Kubernetes API: The PostgreSQL cluster’s status is + available directly in the `Cluster` resource, allowing users to inspect it + via the Kubernetes API. +- Operator pattern: The operator ensures that the desired PostgreSQL state is + reconciled automatically, following Kubernetes best practices. +- Immutable application containers: Updates follow an immutable infrastructure + model, as explained in + ["Why EDB Chose Immutable Application Containers"](https://www.enterprisedb.com/blog/why-edb-chose-immutable-application-containers). + +### How CloudNativePG Works + +The operator continuously monitors and updates the PostgreSQL cluster state. +Examples of automated actions include: + +- Failover management: If the primary instance fails, the operator elects a new + primary, updates the cluster status, and orchestrates the transition. +- Scaling read replicas: When the number of desired replicas changes, the + operator provisions or removes resources such as persistent volumes, secrets, + and config maps while managing streaming replication. +- Service updates: Kubernetes remains the single source of truth, ensuring + that PostgreSQL service endpoints are always up to date. +- Rolling updates: When an image is updated, the operator follows a rolling + strategy—first updating replica pods before performing a controlled + switchover for the primary. + +CloudNativePG manages additional Kubernetes resources to enhance PostgreSQL +management, including: `Backup`, `ClusterImageCatalog`, `Database`, +`ImageCatalog`, `Pooler`, `Publication`, `ScheduledBackup`, and `Subscription`. + +## Out of Scope + +- **Kubernetes only:** CloudNativePG is dedicated to vanilla Kubernetes + maintained by the [Cloud Native Computing Foundation + (CNCF)](https://kubernetes.io/). +- **PostgreSQL only:** CloudNativePG is dedicated to vanilla PostgreSQL + maintained by the [PostgreSQL Global Development Group + (PGDG)](https://www.postgresql.org/about/). +- **No support for forks:** Features from PostgreSQL forks will only be + considered if they can be integrated as extensions or pluggable frameworks. +- **Not a general-purpose database operator:** CloudNativePG does not support + other databases (e.g., MariaDB). + +CloudNativePG can be extended via the [CNPG-I plugin interface](https://github.com/cloudnative-pg/cnpg-i). ## Communications -- [Slack Channel](https://join.slack.com/t/cloudnativepg/shared_invite/zt-30a6l6bp3-u1lNAmh~N02Cfiv2utKTFg) - [Github Discussions](https://github.com/cloudnative-pg/cloudnative-pg/discussions) +- [Slack Channel](https://join.slack.com/t/cloudnativepg/shared_invite/zt-30a6l6bp3-u1lNAmh~N02Cfiv2utKTFg) - [Twitter](https://twitter.com/CloudNativePg) - [Mastodon](https://mastodon.social/@CloudNativePG) - [Bluesky](https://bsky.app/profile/cloudnativepg.bsky.social) @@ -144,6 +119,7 @@ organization to this list! ### Useful links - [Data on Kubernetes (DoK) Community](https://dok.community/) +- ["Cloud Neutral Postgres Databases with Kubernetes and CloudNativePG" by Gabriele Bartolini](https://www.cncf.io/blog/2024/11/20/cloud-neutral-postgres-databases-with-kubernetes-and-cloudnativepg/) (November 2024) - ["How to migrate your PostgreSQL database in Kubernetes with ~0 downtime from anywhere" by Gabriele Bartolini](https://gabrielebartolini.it/articles/2024/03/cloudnativepg-recipe-5-how-to-migrate-your-postgresql-database-in-kubernetes-with-~0-downtime-from-anywhere/) (March 2024) - ["Maximizing Microservice Databases with Kubernetes, Postgres, and CloudNativePG" by Gabriele Bartolini](https://gabrielebartolini.it/articles/2024/02/maximizing-microservice-databases-with-kubernetes-postgres-and-cloudnativepg/) (February 2024) - ["Recommended Architectures for PostgreSQL in Kubernetes" by Gabriele Bartolini](https://www.cncf.io/blog/2023/09/29/recommended-architectures-for-postgresql-in-kubernetes/) (September 2023) @@ -153,15 +129,43 @@ organization to this list! - ["Shift-Left Security: The Path To PostgreSQL On Kubernetes" by Gabriele Bartolini](https://www.tfir.io/shift-left-security-the-path-to-postgresql-on-kubernetes/) (April 2021) - ["Local Persistent Volumes and PostgreSQL usage in Kubernetes" by Gabriele Bartolini](https://www.2ndquadrant.com/en/blog/local-persistent-volumes-and-postgresql-usage-in-kubernetes/) (June 2020) -## Star History +--- + +

+We are a Cloud Native Computing Foundation Sandbox project. +

-[![Star History Chart](https://api.star-history.com/svg?repos=cloudnative-pg/cloudnative-pg&type=Date)](https://star-history.com/#cloudnative-pg/cloudnative-pg&Date) +

+ + + + CNCF logo + +

-## Trademarks +--- -*[Postgres, PostgreSQL and the Slonik Logo](https://www.postgresql.org/about/policies/trademarks/) +

+CloudNativePG was originally built and sponsored by EDB. +

+ +

+ + + + EDB logo + +

+ +--- + +

+Postgres, PostgreSQL, and the Slonik Logo are trademarks or registered trademarks of the PostgreSQL Community Association -of Canada, and used with their permission.* +of Canada, and used with their permission. +

+ +--- [cncf-landscape]: https://landscape.cncf.io/?item=app-definition-and-development--database--cloudnativepg [stackoverflow]: https://stackoverflow.com/questions/tagged/cloudnative-pg diff --git a/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml b/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml index cf046f2cbb..1a5de3dca5 100644 --- a/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml +++ b/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml @@ -28,50 +28,68 @@ spec: description: | Main features: - * Direct integration with Kubernetes API server for High Availability, - without requiring an external tool - * Self-Healing capability, through: - * failover of the primary instance by promoting the most aligned replica - * automated recreation of a replica - * Planned switchover of the primary instance by promoting a selected replica - * Scale up/down capabilities - * Definition of an arbitrary number of instances (minimum 1 - one primary server) - * Definition of the *read-write* service, to connect your applications to the only primary server of the cluster - * Definition of the *read-only* service, to connect your applications to any of the instances for reading workloads - * Declarative management of PostgreSQL configuration - * Declarative management of Postgres roles, users and groups - * Support for Local Persistent Volumes with PVC templates - * Reuse of Persistent Volumes storage in Pods - * Separate volume for WAL files - * Rolling updates for PostgreSQL minor versions - * In-place or rolling updates for operator upgrades - * TLS connections and client certificate authentication - * Support for custom TLS certificates (including integration with cert-manager) - * Continuous WAL archiving to an object store (AWS S3 and S3-compatible, Azure Blob Storage, and Google Cloud Storage) - * Backups on volume snapshots (where supported by the underlying storage classes) - * Backups on object stores (AWS S3 and S3-compatible, Azure Blob Storage, and Google Cloud Storage) - * Full recovery and Point-In-Time recovery from an existing backup on volume snapshots or object stores - * Offline import of existing PostgreSQL databases, including major upgrades of PostgreSQL - * Fencing of an entire PostgreSQL cluster, or a subset of the instances in a declarative way - * Hibernation of a PostgreSQL cluster in a declarative way - * Support for Synchronous Replicas - * Support for HA physical replication slots at cluster level - * Backup from a standby - * Backup retention policies (based on recovery window, only on object stores) - * Parallel WAL archiving and restore to allow the database to keep up with WAL - generation on high write systems - * Support tagging backup files uploaded to an object store to enable optional - retention management at the object store layer Replica clusters for - * PostgreSQL deployments across multiple Kubernetes - clusters, enabling private, public, hybrid, and multi-cloud architectures - * Connection pooling with PgBouncer - * Support for node affinity via `nodeSelector` - * Native customizable exporter of user defined metrics for Prometheus through the `metrics` port (9187) - * Standard output logging of PostgreSQL error messages in JSON format - * Automatically set `readOnlyRootFilesystem` security context for pods - * `cnpg` plugin for `kubectl` - * Simple bind and search+bind LDAP client authentication - * Multi-arch format container images + - Direct integration with the Kubernetes API server for High Availability, + eliminating the need for external tools. + - Self-healing capabilities, including: + - Automated failover by promoting the most aligned replica. + - Automatic recreation of failed replicas. + - Planned switchover of the primary instance by promoting a selected replica. + - Declarative management of key PostgreSQL configurations, including: + - PostgreSQL settings. + - Roles, users, and groups. + - Databases, extensions, and schemas. + - Tablespaces (including temporary tablespaces). + - Flexible instance definition, supporting any number of instances (minimum 1 + primary server). + - Scale-up/down capabilities to dynamically adjust cluster size. + - Read-Write and Read-Only Services, ensuring applications connect correctly: + - *Read-Write Service*: Routes connections to the primary server. + - *Read-Only Service*: Distributes connections among replicas for read workloads. + - Support for quorum-based and priority-based PostgreSQL Synchronous + Replication. + - Replica clusters enabling PostgreSQL distributed topologies across multiple + Kubernetes clusters (private, public, hybrid, and multi-cloud). + - Delayed Replica clusters for point-in-time access to historical data. + - Persistent volume management, including: + - Support for Local Persistent Volumes with PVC templates. + - Reuse of Persistent Volumes storage in Pods. + - Separate volumes for WAL files and tablespaces. + - Backup and recovery options, including: + - Integration with the [Barman Cloud plugin](https://github.com/cloudnative-pg/plugin-barman-cloud) + for continuous online backup via WAL archiving to AWS S3, S3-compatible + services, Azure Blob Storage, and Google Cloud Storage, with support for + retention policies based on a configurable recovery window. + - Backups using volume snapshots (where supported by storage classes). + - Full and Point-In-Time recovery from volume snapshots or object stores (via Barman Cloud plugin). + - Backup from standby replicas to reduce primary workload impact. + - Offline and online import of PostgreSQL databases, including major upgrades: + - *Offline Import*: Direct restore from existing databases. + - *Online Import*: PostgreSQL native logical replication via the `Subscription` resource. + - High Availability physical replication slots, including synchronization of + user-defined replication slots. + - Parallel WAL archiving and restore, ensuring high-performance data + synchronization in high-write environments. + - TLS support, including: + - Secure connections and client certificate authentication. + - Custom TLS certificates (integrated with `cert-manager`). + - Startup and readiness probes, including replica probes based on desired lag + from the primary. + - Declarative rolling updates for: + - PostgreSQL minor versions. + - Operator upgrades (in-place or rolling updates). + - Standard output logging of PostgreSQL error messages in JSON format for + easier integration with log aggregation tools. + - Prometheus-compatible metrics exporter (`metrics` port 9187) for custom + monitoring. + - `cnpg` plugin for `kubectl` to simplify cluster operations. + - Cluster hibernation for resource efficiency in inactive states. + - Fencing of PostgreSQL clusters (full cluster or subset) to isolate instances + when needed. + - Connection pooling with PgBouncer for improved database efficiency. + - OLM (Operator Lifecycle Manager) installation support for streamlined + deployments. + - Multi-arch container images, including Software Bill of Materials (SBOM) and + provenance attestations for security compliance. install: spec: diff --git a/docs/src/architecture.md b/docs/src/architecture.md index bc12515094..74fddca5e7 100644 --- a/docs/src/architecture.md +++ b/docs/src/architecture.md @@ -411,9 +411,10 @@ This is typically triggered by: declarative configuration, enabling you to automate these procedures as part of your Infrastructure as Code (IaC) process, including GitOps. -The designated primary in the above example is fed via WAL streaming -(`primary_conninfo`), with fallback option for file-based WAL shipping through -the `restore_command` and `barman-cloud-wal-restore`. +In the example above, the designated primary receives WAL updates via streaming +replication (`primary_conninfo`). As a fallback, it can retrieve WAL segments +from an object store using file-based WAL shipping—for instance, with the +Barman Cloud plugin through `restore_command` and `barman-cloud-wal-restore`. CloudNativePG allows you to define topologies with multiple replica clusters. You can also define replica clusters with a lower number of replicas, and then diff --git a/docs/src/e2e.md b/docs/src/e2e.md index 0b175ddfb2..be32a8a178 100644 --- a/docs/src/e2e.md +++ b/docs/src/e2e.md @@ -6,7 +6,7 @@ commit via a suite of **End-to-end (E2E) tests** (or integration tests) which ensure that the operator correctly deploys and manages PostgreSQL clusters. -Kubernetes versions 1.25 through 1.29, and PostgreSQL versions 12 through 16, +Kubernetes versions 1.27 through 1.32, and PostgreSQL versions 13 through 17, are tested for each commit, helping detect bugs at an early stage of the development process. diff --git a/docs/src/index.md b/docs/src/index.md index 705697e0c3..5225000f40 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -1,38 +1,43 @@ # CloudNativePG -**CloudNativePG** is an open-source +CloudNativePG (CNPG) is an open-source [operator](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) designed to manage [PostgreSQL](https://www.postgresql.org/) workloads on any -supported [Kubernetes](https://kubernetes.io) cluster. It supports deployment in -private, public, hybrid, and multi-cloud environments, thanks to -its [distributed topology](replica_cluster.md#distributed-topology) -feature. - -CloudNativePG adheres to DevOps principles and concepts such as declarative -configuration and immutable infrastructure. - -It defines a new Kubernetes resource called `Cluster` representing a PostgreSQL -cluster made up of a single primary and an optional number of replicas that co-exist -in a chosen Kubernetes namespace for High Availability and offloading of -read-only queries. - -Applications that reside in the same Kubernetes cluster can access the -PostgreSQL database using a service solely managed by the operator, without -needing to worry about changes in the primary role following a failover or -switchover. Applications that reside outside the Kubernetes cluster can -leverage the service template capability and a `LoadBalancer` service to expose -PostgreSQL via TCP. Additionally, web applications can take advantage of the -native connection pooler based on PgBouncer. +supported [Kubernetes](https://kubernetes.io) cluster. +It fosters cloud-neutrality through seamless deployment in private, public, +hybrid, and multi-cloud environments via its +[distributed topology](replica_cluster.md#distributed-topology) feature. + +Built around DevOps principles, CloudNativePG embraces declarative +configuration and immutable infrastructure, ensuring reliability and automation +in database management. + +At its core, CloudNativePG introduces a custom Kubernetes resource called +`Cluster`, representing a PostgreSQL cluster with: + +- A single primary instance for write operations. +- Optional replicas for High Availability and read scaling. + +These instances reside within a Kubernetes namespace, allowing applications to +connect seamlessly using operator-managed services. Failovers and switchovers +occur transparently, eliminating the need for manual intervention. + +For applications inside the Kubernetes cluster, CNPG provides a microservice +database approach, enabling co-location of PostgreSQL clusters and applications +in the same namespace for optimized access. +For applications outside the cluster, CNPG offers flexible connectivity through +service templates and `LoadBalancer` services for direct TCP exposure. +Additionally, web applications can take advantage of the native connection +pooler based on PgBouncer. CloudNativePG was originally built by [EDB](https://www.enterprisedb.com), then released open source under Apache License 2.0. -It has been submitted for the [CNCF Sandbox in September 2024](https://github.com/cncf/sandbox/issues/128). -The [source code repository is in Github](https://github.com/cloudnative-pg/cloudnative-pg). +The [source code repository is in GitHub](https://github.com/cloudnative-pg/cloudnative-pg). !!! Note Based on the [Operator Capability Levels model](operator_capability_levels.md), - users can expect a **"Level V - Auto Pilot"** subset of capabilities from the + users can expect a "Level V - Auto Pilot" subset of capabilities from the CloudNativePG Operator. ## Supported Kubernetes distributions @@ -59,6 +64,9 @@ in three different flavors: Red Hat UBI images are primarily intended for OLM consumption. +All container images are signed and include SBOM and provenance attestations, +provided separately for each architecture. + ### Operands The PostgreSQL operand container images are available for all @@ -66,86 +74,104 @@ The PostgreSQL operand container images are available for all across multiple architectures, directly from the [`postgres-containers` project's GitHub Container Registry](https://github.com/cloudnative-pg/postgres-containers/pkgs/container/postgresql). -Daily jobs ensure that critical vulnerabilities (CVEs) in the entire stack are +All container images are signed and include SBOM and provenance attestations, +provided separately for each architecture. + +Weekly jobs ensure that critical vulnerabilities (CVEs) in the entire stack are promptly addressed. Additionally, the community provides images for the [PostGIS extension](postgis.md). ## Main features -* Direct integration with Kubernetes API server for High Availability, - without requiring an external tool -* Self-Healing capability, through: - * failover of the primary instance by promoting the most aligned replica - * automated recreation of a replica -* Planned switchover of the primary instance by promoting a selected replica -* Scale up/down capabilities -* Definition of an arbitrary number of instances (minimum 1 - one primary server) -* Definition of the *read-write* service, to connect your applications to the only primary server of the cluster -* Definition of the *read-only* service, to connect your applications to any of the instances for reading workloads -* Declarative management of PostgreSQL configuration, including certain popular - Postgres extensions through the cluster `spec`: `pgaudit`, `auto_explain`, - `pg_stat_statements`, and `pg_failover_slots` -* Declarative management of Postgres roles, users and groups -* Declarative management of Postgres databases, including extensions and schemas -* Support for Local Persistent Volumes with PVC templates -* Reuse of Persistent Volumes storage in Pods -* Separate volumes for WAL files and tablespaces -* Declarative management of Postgres tablespaces, including temporary tablespaces -* Rolling updates for PostgreSQL minor versions -* In-place or rolling updates for operator upgrades -* TLS connections and client certificate authentication -* Support for custom TLS certificates (including integration with cert-manager) -* Continuous WAL archiving to an object store (AWS S3 and S3-compatible, Azure - Blob Storage, and Google Cloud Storage) -* Backups on volume snapshots (where supported by the underlying storage classes) -* Backups on object stores (AWS S3 and S3-compatible, Azure Blob Storage, and Google Cloud Storage) -* Full recovery and Point-In-Time recovery from an existing backup on volume snapshots or object stores -* Offline import of existing PostgreSQL databases, including major upgrades of PostgreSQL -* Online import of existing PostgreSQL databases, including major upgrades of - PostgreSQL, through PostgreSQL native logical replication (declarative, via - the `Subscription` resource) -* Fencing of an entire PostgreSQL cluster, or a subset of the instances in a declarative way -* Hibernation of a PostgreSQL cluster in a declarative way -* Support for quorum-based and priority-based Synchronous Replication -* Support for HA physical replication slots at cluster level -* Synchronization of user defined physical replication slots -* Backup from a standby -* Backup retention policies (based on recovery window, only on object stores) -* Parallel WAL archiving and restore to allow the database to keep up with WAL - generation on high write systems -* Support tagging backup files uploaded to an object store to enable optional - retention management at the object store layer -* Replica clusters for PostgreSQL distributed topologies spanning multiple - Kubernetes clusters, enabling private, public, hybrid, and multi-cloud - architectures with support for controlled switchover. -* Delayed Replica clusters -* Connection pooling with PgBouncer -* Support for node affinity via `nodeSelector` -* Native customizable exporter of user defined metrics for Prometheus through the `metrics` port (9187) -* Standard output logging of PostgreSQL error messages in JSON format -* Automatically set `readOnlyRootFilesystem` security context for pods -* `cnpg` plugin for `kubectl` -* Simple bind and search+bind LDAP client authentication -* Multi-arch format container images -* OLM installation +- Direct integration with the Kubernetes API server for High Availability, + eliminating the need for external tools. +- Self-healing capabilities, including: + - Automated failover by promoting the most aligned replica. + - Automatic recreation of failed replicas. +- Planned switchover of the primary instance by promoting a selected replica. +- Declarative management of key PostgreSQL configurations, including: + - PostgreSQL settings. + - Roles, users, and groups. + - Databases, extensions, and schemas. + - Tablespaces (including temporary tablespaces). +- Flexible instance definition, supporting any number of instances (minimum 1 + primary server). +- Scale-up/down capabilities to dynamically adjust cluster size. +- Read-Write and Read-Only Services, ensuring applications connect correctly: + - *Read-Write Service*: Routes connections to the primary server. + - *Read-Only Service*: Distributes connections among replicas for read workloads. +- Support for quorum-based and priority-based PostgreSQL Synchronous + Replication. +- Replica clusters enabling PostgreSQL distributed topologies across multiple + Kubernetes clusters (private, public, hybrid, and multi-cloud). +- Delayed Replica clusters for point-in-time access to historical data. +- Persistent volume management, including: + - Support for Local Persistent Volumes with PVC templates. + - Reuse of Persistent Volumes storage in Pods. + - Separate volumes for WAL files and tablespaces. +- Backup and recovery options, including: + - Integration with the [Barman Cloud plugin](https://github.com/cloudnative-pg/plugin-barman-cloud) + for continuous online backup via WAL archiving to AWS S3, S3-compatible + services, Azure Blob Storage, and Google Cloud Storage, with support for + retention policies based on a configurable recovery window. + - Backups using volume snapshots (where supported by storage classes). + - Full and Point-In-Time recovery from volume snapshots or object stores (via Barman Cloud plugin). + - Backup from standby replicas to reduce primary workload impact. +- Offline and online import of PostgreSQL databases, including major upgrades: + - *Offline Import*: Direct restore from existing databases. + - *Online Import*: PostgreSQL native logical replication via the `Subscription` resource. +- High Availability physical replication slots, including synchronization of + user-defined replication slots. +- Parallel WAL archiving and restore, ensuring high-performance data + synchronization in high-write environments. +- TLS support, including: + - Secure connections and client certificate authentication. + - Custom TLS certificates (integrated with `cert-manager`). +- Startup and readiness probes, including replica probes based on desired lag + from the primary. +- Declarative rolling updates for: + - PostgreSQL minor versions. + - Operator upgrades (in-place or rolling updates). +- Standard output logging of PostgreSQL error messages in JSON format for + easier integration with log aggregation tools. +- Prometheus-compatible metrics exporter (`metrics` port 9187) for custom + monitoring. +- `cnpg` plugin for `kubectl` to simplify cluster operations. +- Cluster hibernation for resource efficiency in inactive states. +- Fencing of PostgreSQL clusters (full cluster or subset) to isolate instances + when needed. +- Connection pooling with PgBouncer for improved database efficiency. +- OLM (Operator Lifecycle Manager) installation support for streamlined + deployments. +- Multi-arch container images, including Software Bill of Materials (SBOM) and + provenance attestations for security compliance. !!! Info CloudNativePG does not use `StatefulSet`s for managing data persistence. - Rather, it manages persistent volume claims (PVCs) directly. If you are - curious, read ["Custom Pod Controller"](controller.md) to know more. + Instead, it directly manages Persistent Volume Claims (PVCs). + See ["Custom Pod Controller"](controller.md) for more details. ## About this guide -Follow the instructions in the ["Quickstart"](quickstart.md) to test CloudNativePG -on a local Kubernetes cluster using Kind, or Minikube. +Follow the instructions in the ["Quickstart"](quickstart.md) to test +CloudNativePG on a local Kubernetes cluster using Kind, or Minikube. In case you are not familiar with some basic terminology on Kubernetes and PostgreSQL, please consult the ["Before you start" section](before_you_start.md). -*[Postgres, PostgreSQL and the Slonik Logo](https://www.postgresql.org/about/policies/trademarks/) +The CloudNativePG documentation is licensed under a Creative Commons +Attribution 4.0 International License. + +--- + +*[Postgres, PostgreSQL, and the Slonik Logo](https://www.postgresql.org/about/policies/trademarks/) are trademarks or registered trademarks of the PostgreSQL Community Association of Canada, and used with their permission.* -The CloudNativePG documentation is licensed under a Creative Commons -Attribution 4.0 International License. +--- + +CloudNativePG is a +[Cloud Native Computing Foundation Sandbox project](https://www.cncf.io/sandbox-projects/). + +![](https://github.com/cncf/artwork/blob/main/other/cncf/horizontal/color/cncf-color.png?raw=true) diff --git a/docs/src/installation_upgrade.md b/docs/src/installation_upgrade.md index cd03af6e27..62a4f719a5 100644 --- a/docs/src/installation_upgrade.md +++ b/docs/src/installation_upgrade.md @@ -19,7 +19,8 @@ kubectl apply --server-side -f \ You can verify that with: ```sh -kubectl get deployment -n cnpg-system cnpg-controller-manager +kubectl rollout status deployment \ + -n cnpg-system cnpg-controller-manager ``` ### Using the `cnpg` plugin for `kubectl` diff --git a/docs/src/replication.md b/docs/src/replication.md index 642048cb2b..f992796c38 100644 --- a/docs/src/replication.md +++ b/docs/src/replication.md @@ -395,9 +395,9 @@ number of standbys are available. Make sure you have a clear understanding of what *ready/available* means for a replica and set your expectations accordingly. By default, a replica is considered ready when it has successfully connected to the source at least - once. However, CloudNativePG allows you to configure startup probes for - replicas. For more details, please refer to the - ["Advanced Startup Probe" section](instance_manager.md#advanced-startup-probe). + once. However, CloudNativePG allows you to configure startup and readiness + probes for replicas based on maximum lag. For more details, please refer to + the ["Postgres instance manager" section](instance_manager.md). This setting balances data safety with availability, enabling applications to continue writing during temporary standby unavailability—hence, it’s also known From 50e28eae8f950eb070812f07cef0075755bcf605 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 27 Mar 2025 14:37:21 +0100 Subject: [PATCH 488/836] fix(deps): update all non-major go dependencies (main) (#7237) This PR contains the following updates: https://github.com/goreleaser/goreleaser `v2.7.0` -> `v2.8.1` https://github.com/jackc/pgx `v5.7.2` -> `v5.7.4` https://github.com/onsi/ginkgo `v2.23.0` -> `v2.23.3` https://github.com/onsi/gomega `v1.36.2` -> `v1.36.3` github.com/google/go-cmp `v0.6.0` -> `v0.7.0` golang.org/x/crypto` `v0.35.0` -> `v0.36.0` golang.org/x/net` `v0.36.0` -> `v0.37.0` golang.org/x/sync` `v0.11.0` -> `v0.12.0` golang.org/x/text` `v0.22.0` -> `v0.23.0` --- Makefile | 2 +- go.mod | 16 ++++++++-------- go.sum | 32 ++++++++++++++++---------------- 3 files changed, 25 insertions(+), 25 deletions(-) diff --git a/Makefile b/Makefile index 2d810755a8..133599d1e1 100644 --- a/Makefile +++ b/Makefile @@ -47,7 +47,7 @@ POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions KUSTOMIZE_VERSION ?= v5.6.0 CONTROLLER_TOOLS_VERSION ?= v0.17.2 GENREF_VERSION ?= 015aaac611407c4fe591bc8700d2c67b7521efca -GORELEASER_VERSION ?= v2.7.0 +GORELEASER_VERSION ?= v2.8.1 SPELLCHECK_VERSION ?= 0.47.0 WOKE_VERSION ?= 0.19.0 OPERATOR_SDK_VERSION ?= v1.39.2 diff --git a/go.mod b/go.mod index 64f7716b6e..dd710e3a5c 100644 --- a/go.mod +++ b/go.mod @@ -16,15 +16,15 @@ require ( github.com/go-logr/logr v1.4.2 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.1 - github.com/jackc/pgx/v5 v5.7.2 + github.com/jackc/pgx/v5 v5.7.4 github.com/jackc/puddle/v2 v2.2.2 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 github.com/kubernetes-csi/external-snapshotter/client/v8 v8.2.0 github.com/lib/pq v1.10.9 github.com/logrusorgru/aurora/v4 v4.0.0 github.com/mitchellh/go-ps v1.0.0 - github.com/onsi/ginkgo/v2 v2.23.0 - github.com/onsi/gomega v1.36.2 + github.com/onsi/ginkgo/v2 v2.23.3 + github.com/onsi/gomega v1.36.3 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.80.1 github.com/prometheus/client_golang v1.21.1 github.com/robfig/cron v1.2.0 @@ -67,7 +67,7 @@ require ( github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.3 // indirect github.com/google/gnostic-models v0.6.9 // indirect - github.com/google/go-cmp v0.6.0 // indirect + github.com/google/go-cmp v0.7.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect github.com/google/uuid v1.6.0 // indirect @@ -98,12 +98,12 @@ require ( github.com/spf13/pflag v1.0.6 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xlab/treeprint v1.2.0 // indirect - golang.org/x/crypto v0.35.0 // indirect - golang.org/x/net v0.36.0 // indirect + golang.org/x/crypto v0.36.0 // indirect + golang.org/x/net v0.37.0 // indirect golang.org/x/oauth2 v0.27.0 // indirect - golang.org/x/sync v0.11.0 // indirect + golang.org/x/sync v0.12.0 // indirect golang.org/x/sys v0.31.0 // indirect - golang.org/x/text v0.22.0 // indirect + golang.org/x/text v0.23.0 // indirect golang.org/x/time v0.9.0 // indirect golang.org/x/tools v0.30.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 69d7b623dd..cb017ef1d4 100644 --- a/go.sum +++ b/go.sum @@ -68,8 +68,8 @@ github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl76 github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -91,8 +91,8 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.7.2 h1:mLoDLV6sonKlvjIEsV56SkWNCnuNv531l94GaIzO+XI= -github.com/jackc/pgx/v5 v5.7.2/go.mod h1:ncY89UGWxg82EykZUwSpUKEfccBGGYq1xjrOpsbsfGQ= +github.com/jackc/pgx/v5 v5.7.4 h1:9wKznZrhWa2QiHL+NjTSPP6yjl3451BX3imWDnokYlg= +github.com/jackc/pgx/v5 v5.7.4/go.mod h1:ncY89UGWxg82EykZUwSpUKEfccBGGYq1xjrOpsbsfGQ= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -143,10 +143,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.23.0 h1:FA1xjp8ieYDzlgS5ABTpdUDB7wtngggONc8a7ku2NqQ= -github.com/onsi/ginkgo/v2 v2.23.0/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM= -github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= -github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= +github.com/onsi/ginkgo/v2 v2.23.3 h1:edHxnszytJ4lD9D5Jjc4tiDkPBZ3siDeJJkUZJJVkp0= +github.com/onsi/ginkgo/v2 v2.23.3/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM= +github.com/onsi/gomega v1.36.3 h1:hID7cr8t3Wp26+cYnfcjR6HpJ00fdogN6dqZ1t6IylU= +github.com/onsi/gomega v1.36.3/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -218,23 +218,23 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs= -golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ= +golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= +golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.36.0 h1:vWF2fRbw4qslQsQzgFqZff+BItCvGFQqKzKIzx1rmoA= -golang.org/x/net v0.36.0/go.mod h1:bFmbeoIPfrw4sMHNhb4J9f6+tPziuGjq7Jk/38fxi1I= +golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= +golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= -golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -246,8 +246,8 @@ golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= -golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= From 78cec2769283e753342d913a481ba91c3f51c0d9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niccol=C3=B2=20Fei?= Date: Thu, 27 Mar 2025 17:04:43 +0100 Subject: [PATCH 489/836] ci: add workflow to automate the refresh of the licenses directory (#7214) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes #5393 Signed-off-by: Niccolò Fei Signed-off-by: Jonathan Gonzalez V. Co-authored-by: Jonathan Gonzalez V. --- .github/workflows/refresh-licenses.yml | 41 ++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 .github/workflows/refresh-licenses.yml diff --git a/.github/workflows/refresh-licenses.yml b/.github/workflows/refresh-licenses.yml new file mode 100644 index 0000000000..4194df5fcc --- /dev/null +++ b/.github/workflows/refresh-licenses.yml @@ -0,0 +1,41 @@ +# Refresh the "licenses" directory and create a PR if there are any changes + +name: Refresh licenses directory +on: + workflow_dispatch: + schedule: + - cron: "30 0 * * 1" + +env: + GOLANG_VERSION: "1.24.x" + +jobs: + licenses: + name: Refresh licenses + runs-on: ubuntu-24.04 + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Install Go + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GOLANG_VERSION }} + check-latest: true + + - name: Generate licenses + run: | + make licenses + + - name: Create Pull Request if licenses have been updated + uses: peter-evans/create-pull-request@v7 + with: + token: ${{ secrets.REPO_GHA_PAT }} + title: "chore: refresh licenses directory" + body: "Refresh the licenses directory" + branch: "license-updater" + author: "license-updater " + add-paths: | + licenses/** + commit-message: "chore: refresh licenses directory" + signoff: true From a6747c96514e6d2110111b75090e80ee79c0448f Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Thu, 27 Mar 2025 22:31:01 +0100 Subject: [PATCH 490/836] feat(barman-cloud): remove support for Barman <= 3.4 (#7220) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch removes compatibility with Barman versions 3.4 and earlier, which have been deprecated. These versions, released before April 2023, are no longer maintained and require specific workarounds that increase code complexity. Removing support simplifies the backup integration logic and aligns the operator with recent, supported versions of Barman (≥ 3.5). We have simultaneously reduced technical debt by eliminating the capability detection framework that required checking the version of Barman Cloud before every execution of the program. Users running an operand version released before January 2023 must upgrade to a more recent version before updating the operator, as older operand images may not be compatible with the updated backup mechanisms introduced in this patch. This commit represents progress in transitioning users to the recommended Barman Cloud plugin. Closes #3954 Signed-off-by: Marco Nenciarini Signed-off-by: Leonardo Cecchi Co-authored-by: Leonardo Cecchi --- .../bases/postgresql.cnpg.io_clusters.yaml | 10 ++----- go.mod | 4 +-- go.sum | 8 ++--- pkg/management/postgres/backup.go | 29 ++----------------- pkg/management/postgres/backup_test.go | 25 +++++----------- pkg/management/postgres/restore.go | 10 +++---- 6 files changed, 23 insertions(+), 63 deletions(-) diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml index 4e27dd2999..38de43f033 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml @@ -1152,14 +1152,11 @@ spec: description: |- Compress a backup file (a tar file per tablespace) while streaming it to the object store. Available options are empty string (no - compression, default), `gzip`, `bzip2`, `lz4`, `snappy`, `xz`, and `zstd`. + compression, default), `gzip`, `bzip2`, and `snappy`. enum: - bzip2 - gzip - - lz4 - snappy - - xz - - zstd type: string encryption: description: |- @@ -2661,14 +2658,11 @@ spec: description: |- Compress a backup file (a tar file per tablespace) while streaming it to the object store. Available options are empty string (no - compression, default), `gzip`, `bzip2`, `lz4`, `snappy`, `xz`, and `zstd`. + compression, default), `gzip`, `bzip2`, and `snappy`. enum: - bzip2 - gzip - - lz4 - snappy - - xz - - zstd type: string encryption: description: |- diff --git a/go.mod b/go.mod index dd710e3a5c..3a7aff37b6 100644 --- a/go.mod +++ b/go.mod @@ -8,9 +8,9 @@ require ( github.com/avast/retry-go/v4 v4.6.1 github.com/blang/semver v3.5.1+incompatible github.com/cheynewallace/tabby v1.1.1 - github.com/cloudnative-pg/barman-cloud v0.3.0 + github.com/cloudnative-pg/barman-cloud v0.3.1 github.com/cloudnative-pg/cnpg-i v0.1.1-0.20250321093050-de4ab51537cb - github.com/cloudnative-pg/machinery v0.1.0 + github.com/cloudnative-pg/machinery v0.2.0 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/evanphx/json-patch/v5 v5.9.11 github.com/go-logr/logr v1.4.2 diff --git a/go.sum b/go.sum index cb017ef1d4..b37382fcc2 100644 --- a/go.sum +++ b/go.sum @@ -18,12 +18,12 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cheynewallace/tabby v1.1.1 h1:JvUR8waht4Y0S3JF17G6Vhyt+FRhnqVCkk8l4YrOU54= github.com/cheynewallace/tabby v1.1.1/go.mod h1:Pba/6cUL8uYqvOc9RkyvFbHGrQ9wShyrn6/S/1OYVys= -github.com/cloudnative-pg/barman-cloud v0.3.0 h1:tCtIF7nsHDH5X7nAXXd7VqNKKNGHrycXAyyKKKpdGS4= -github.com/cloudnative-pg/barman-cloud v0.3.0/go.mod h1:8m6W117343zT28ctcskUYEu/dy+MX3hUUW4DynH8MLI= +github.com/cloudnative-pg/barman-cloud v0.3.1 h1:kzkY77k2lN/caoyh7ibXDSZjJeSJTNvnVt6Gfa8Iq5M= +github.com/cloudnative-pg/barman-cloud v0.3.1/go.mod h1:4HL3AjY9oEl2Ed0HSkyvTZEQPhwyFOaAnuCz9lfVeYQ= github.com/cloudnative-pg/cnpg-i v0.1.1-0.20250321093050-de4ab51537cb h1:FPORwCxjZwlnKnF7dOkuOAz0GBSQ3Hrn+8lm4uMiWeM= github.com/cloudnative-pg/cnpg-i v0.1.1-0.20250321093050-de4ab51537cb/go.mod h1:n+kbHm3rzRCY5IJKuE1tGMbG6JaeYz8yycYoLt7BeKo= -github.com/cloudnative-pg/machinery v0.1.0 h1:tjRmsqQmsO/OlaT0uFmkEtVqgr+SGPM88cKZOHYKLBo= -github.com/cloudnative-pg/machinery v0.1.0/go.mod h1:0V3vm44FaIsY+x4pm8ORry7xCC3AJiO+ebfPNxeP5Ck= +github.com/cloudnative-pg/machinery v0.2.0 h1:x8OAwxdeL/6wkbxqorz+nX6UovTyx7/TBeCfiRebR2o= +github.com/cloudnative-pg/machinery v0.2.0/go.mod h1:Kg8W8Tb/1UFGGtw3hR8S5SytSWddlHaCnJSgBo4x/nc= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= diff --git a/pkg/management/postgres/backup.go b/pkg/management/postgres/backup.go index 80b6caced3..46648c4abb 100644 --- a/pkg/management/postgres/backup.go +++ b/pkg/management/postgres/backup.go @@ -28,7 +28,6 @@ import ( "time" barmanBackup "github.com/cloudnative-pg/barman-cloud/pkg/backup" - barmanCapabilities "github.com/cloudnative-pg/barman-cloud/pkg/capabilities" barmanCatalog "github.com/cloudnative-pg/barman-cloud/pkg/catalog" barmanCommand "github.com/cloudnative-pg/barman-cloud/pkg/command" barmanCredentials "github.com/cloudnative-pg/barman-cloud/pkg/credentials" @@ -68,7 +67,6 @@ type BackupCommand struct { Env []string Log log.Logger Instance *Instance - Capabilities *barmanCapabilities.Capabilities barmanBackup *barmanBackup.Command } @@ -82,11 +80,6 @@ func NewBarmanBackupCommand( instance *Instance, log log.Logger, ) (*BackupCommand, error) { - capabilities, err := barmanCapabilities.CurrentCapabilities() - if err != nil { - return nil, err - } - return &BackupCommand{ Cluster: cluster, Backup: backup, @@ -95,8 +88,7 @@ func NewBarmanBackupCommand( Env: os.Environ(), Instance: instance, Log: log, - Capabilities: capabilities, - barmanBackup: barmanBackup.NewBackupCommand(cluster.Spec.Backup.BarmanObjectStore, capabilities), + barmanBackup: barmanBackup.NewBackupCommand(cluster.Spec.Backup.BarmanObjectStore), }, nil } @@ -104,9 +96,6 @@ func NewBarmanBackupCommand( // barman-cloud-backup func (b *BackupCommand) Start(ctx context.Context) error { contextLogger := log.FromContext(ctx) - if err := b.ensureCompatibility(); err != nil { - return err - } b.setupBackupStatus() @@ -145,15 +134,6 @@ func (b *BackupCommand) Start(ctx context.Context) error { return nil } -func (b *BackupCommand) ensureCompatibility() error { - postgresVers, err := b.Instance.GetPgVersion() - if err != nil { - return err - } - - return b.barmanBackup.IsCompatible(postgresVers) -} - func (b *BackupCommand) retryWithRefreshedCluster( ctx context.Context, cb func() error, @@ -234,7 +214,6 @@ func (b *BackupCommand) takeBackup(ctx context.Context) error { b.Backup.Status.BackupName, backupStatus.ServerName, b.Env, - b.Cluster, postgres.BackupTemporaryDirectory, ) if err != nil { @@ -249,7 +228,7 @@ func (b *BackupCommand) takeBackup(ctx context.Context) error { b.Backup.Status.SetAsCompleted() barmanBackup, err := b.barmanBackup.GetExecutedBackupInfo( - ctx, b.Backup.Status.BackupName, backupStatus.ServerName, b.Cluster, b.Env) + ctx, b.Backup.Status.BackupName, backupStatus.ServerName, b.Env) if err != nil { return err } @@ -348,9 +327,7 @@ func (b *BackupCommand) setupBackupStatus() { barmanConfiguration := b.Cluster.Spec.Backup.BarmanObjectStore backupStatus := b.Backup.GetStatus() - if b.Capabilities.ShouldExecuteBackupWithName(b.Cluster) { - backupStatus.BackupName = fmt.Sprintf("backup-%v", pgTime.ToCompactISO8601(time.Now())) - } + backupStatus.BackupName = fmt.Sprintf("backup-%v", pgTime.ToCompactISO8601(time.Now())) backupStatus.BarmanCredentials = barmanConfiguration.BarmanCredentials backupStatus.EndpointCA = barmanConfiguration.EndpointCA backupStatus.EndpointURL = barmanConfiguration.EndpointURL diff --git a/pkg/management/postgres/backup_test.go b/pkg/management/postgres/backup_test.go index a8cfb1504a..87d654c81c 100644 --- a/pkg/management/postgres/backup_test.go +++ b/pkg/management/postgres/backup_test.go @@ -25,7 +25,6 @@ import ( "strings" barmanBackup "github.com/cloudnative-pg/barman-cloud/pkg/backup" - barmanCapabilities "github.com/cloudnative-pg/barman-cloud/pkg/capabilities" "github.com/cloudnative-pg/machinery/pkg/log" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -120,8 +119,6 @@ var _ = Describe("testing backup command", func() { }, }, } - capabilities, err := barmanCapabilities.CurrentCapabilities() - Expect(err).ShouldNot(HaveOccurred()) backupCommand = BackupCommand{ Cluster: cluster, Backup: backup, @@ -130,11 +127,10 @@ var _ = Describe("testing backup command", func() { WithObjects(cluster, backup). WithStatusSubresource(cluster, backup). Build(), - Recorder: &record.FakeRecorder{}, - Env: os.Environ(), - Log: log.FromContext(context.Background()), - Instance: &Instance{}, - Capabilities: capabilities, + Recorder: &record.FakeRecorder{}, + Env: os.Environ(), + Log: log.FromContext(context.Background()), + Instance: &Instance{}, } }) @@ -154,16 +150,9 @@ var _ = Describe("testing backup command", func() { var _ = Describe("generate backup options", func() { const namespace = "test" - var ( - capabilities *barmanCapabilities.Capabilities - cluster *apiv1.Cluster - ) + var cluster *apiv1.Cluster BeforeEach(func() { - var err error - capabilities, err = barmanCapabilities.CurrentCapabilities() - Expect(err).ShouldNot(HaveOccurred()) - cluster = &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{Name: "test-cluster", Namespace: namespace}, Spec: apiv1.ClusterSpec{ @@ -185,7 +174,7 @@ var _ = Describe("generate backup options", func() { extraOptions := []string{"--min-chunk-size=5MB", "--read-timeout=60", "-vv"} cluster.Spec.Backup.BarmanObjectStore.Data.AdditionalCommandArgs = extraOptions - cmd := barmanBackup.NewBackupCommand(cluster.Spec.Backup.BarmanObjectStore, capabilities) + cmd := barmanBackup.NewBackupCommand(cluster.Spec.Backup.BarmanObjectStore) options, err := cmd.GetDataConfiguration([]string{}) Expect(err).ToNot(HaveOccurred()) @@ -206,7 +195,7 @@ var _ = Describe("generate backup options", func() { "--encryption=aes256", } cluster.Spec.Backup.BarmanObjectStore.Data.AdditionalCommandArgs = extraOptions - cmd := barmanBackup.NewBackupCommand(cluster.Spec.Backup.BarmanObjectStore, capabilities) + cmd := barmanBackup.NewBackupCommand(cluster.Spec.Backup.BarmanObjectStore) options, err := cmd.GetDataConfiguration([]string{}) Expect(err).ToNot(HaveOccurred()) diff --git a/pkg/management/postgres/restore.go b/pkg/management/postgres/restore.go index 58e55569ef..ee0c63acb5 100644 --- a/pkg/management/postgres/restore.go +++ b/pkg/management/postgres/restore.go @@ -35,11 +35,11 @@ import ( "time" barmanArchiver "github.com/cloudnative-pg/barman-cloud/pkg/archiver" - barmanCapabilities "github.com/cloudnative-pg/barman-cloud/pkg/capabilities" barmanCatalog "github.com/cloudnative-pg/barman-cloud/pkg/catalog" barmanCommand "github.com/cloudnative-pg/barman-cloud/pkg/command" barmanCredentials "github.com/cloudnative-pg/barman-cloud/pkg/credentials" barmanRestorer "github.com/cloudnative-pg/barman-cloud/pkg/restorer" + barmanUtils "github.com/cloudnative-pg/barman-cloud/pkg/utils" restore "github.com/cloudnative-pg/cnpg-i/pkg/restore/job" "github.com/cloudnative-pg/machinery/pkg/envmap" "github.com/cloudnative-pg/machinery/pkg/execlog" @@ -452,13 +452,13 @@ func (info InitInfo) restoreDataDir(ctx context.Context, backup *apiv1.Backup, e contextLogger.Info("Starting barman-cloud-restore", "options", options) - cmd := exec.Command(barmanCapabilities.BarmanCloudRestore, options...) // #nosec G204 + cmd := exec.Command(barmanUtils.BarmanCloudRestore, options...) // #nosec G204 cmd.Env = env - err = execlog.RunStreaming(cmd, barmanCapabilities.BarmanCloudRestore) + err = execlog.RunStreaming(cmd, barmanUtils.BarmanCloudRestore) if err != nil { var exitError *exec.ExitError if errors.As(err, &exitError) { - err = barmanCommand.UnmarshalBarmanCloudRestoreExitCode(ctx, exitError.ExitCode()) + err = barmanCommand.UnmarshalBarmanCloudRestoreExitCode(exitError.ExitCode()) } contextLogger.Error(err, "Can't restore backup") @@ -655,7 +655,7 @@ func (info InitInfo) writeCustomRestoreWalConfig(cluster *apiv1.Cluster, conf st func getRestoreWalConfig(ctx context.Context, backup *apiv1.Backup) (string, error) { var err error - cmd := []string{barmanCapabilities.BarmanCloudWalRestore} + cmd := []string{barmanUtils.BarmanCloudWalRestore} if backup.Status.EndpointURL != "" { cmd = append(cmd, "--endpoint-url", backup.Status.EndpointURL) } From 7a0963d9a6c89e45dafd24d3cf69750759ed5e26 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Fri, 28 Mar 2025 05:46:19 +0100 Subject: [PATCH 491/836] feat: declarative offline in-place major upgrades of PostgreSQL (#6664) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR introduces the ability to upgrade PostgreSQL clusters to a higher major version, enabling a fully declarative approach for major version upgrades within CloudNativePG. **IMPORTANT:** While the upgrade job area still requires refactoring, we’ve decided to merge this patch into the main branch and release version 1.26.0-rc1 to gather early community feedback at KubeCon Europe 2025 in London. The refactoring will likely require a second release candidate before the final 1.26.0 release. With this update, users can specify a new image with a higher major version using any supported method. The operator tracks the previous image version in `.status.majorVersionUpgradeFromImage`, which also serves as an indicator that a major version upgrade is in progress. The upgrade process starts by deleting all pods and jobs to ensure the data directory is inaccessible. A dedicated upgrade job is created for the primary instance, using an `initContainer` to copy the old PostgreSQL installation and running `pg_upgrade --link` for an efficient migration. Once the upgrade completes, all non-primary instances are removed, and new standbys are cloned to restore the desired number of instances. Closes #4682 Signed-off-by: Marco Nenciarini Signed-off-by: Armando Ruocco Signed-off-by: Leonardo Cecchi Signed-off-by: Niccolò Fei Signed-off-by: Gabriele Bartolini Signed-off-by: Francesco Canovai Co-authored-by: Armando Ruocco Co-authored-by: Leonardo Cecchi Co-authored-by: Niccolò Fei Co-authored-by: Gabriele Bartolini Co-authored-by: Francesco Canovai --- .github/workflows/continuous-delivery.yml | 4 +- .wordlist-en-custom.txt | 1 + api/v1/cluster_funcs.go | 33 +- api/v1/cluster_types.go | 11 +- api/v1/zz_generated.deepcopy.go | 5 + .../bases/postgresql.cnpg.io_clusters.yaml | 8 +- contribute/e2e_testing_environment/README.md | 1 + docs/mkdocs.yml | 1 + docs/src/cloudnative-pg.v1.md | 8 + docs/src/e2e.md | 3 + docs/src/index.md | 1 + docs/src/operator_capability_levels.md | 22 +- docs/src/postgres_upgrades.md | 204 ++++++++ docs/src/release_notes/v1.26.md | 12 +- docs/src/samples/cluster-example.yaml | 4 +- internal/cmd/manager/instance/cmd.go | 2 + .../cmd/manager/instance/pgbasebackup/cmd.go | 2 +- internal/cmd/manager/instance/upgrade/cmd.go | 40 ++ .../manager/instance/upgrade/execute/cmd.go | 434 ++++++++++++++++++ .../manager/instance/upgrade/prepare/cmd.go | 126 +++++ internal/cmd/plugin/pgbench/pgbench.go | 2 +- internal/cmd/plugin/promote/promote.go | 2 +- internal/cmd/plugin/restart/restart.go | 4 +- internal/cmd/plugin/status/status.go | 2 +- internal/controller/cluster_controller.go | 38 +- internal/controller/cluster_image.go | 121 +++-- internal/controller/cluster_image_test.go | 214 +++++++++ internal/controller/cluster_scale.go | 28 +- internal/controller/cluster_scale_test.go | 35 +- internal/controller/cluster_status.go | 4 +- internal/controller/cluster_upgrade.go | 4 +- internal/controller/cluster_upgrade_test.go | 3 + internal/controller/suite_test.go | 1 + .../controller/instance_controller.go | 12 +- internal/webhook/v1/cluster_webhook.go | 39 +- internal/webhook/v1/cluster_webhook_test.go | 22 +- pkg/management/postgres/initdb.go | 2 +- pkg/management/postgres/join.go | 2 +- pkg/reconciler/majorupgrade/doc.go | 30 ++ pkg/reconciler/majorupgrade/job.go | 87 ++++ pkg/reconciler/majorupgrade/job_test.go | 72 +++ pkg/reconciler/majorupgrade/reconciler.go | 286 ++++++++++++ .../majorupgrade/reconciler_test.go | 229 +++++++++ pkg/reconciler/majorupgrade/suite_test.go | 32 ++ pkg/resources/status/patch.go | 2 +- pkg/resources/status/transactions.go | 26 +- pkg/specs/containers.go | 2 +- pkg/specs/jobs.go | 34 +- pkg/specs/pg_pods_test.go | 3 + pkg/specs/pods.go | 4 +- pkg/specs/pods_test.go | 3 + pkg/specs/volumes.go | 4 +- pkg/specs/volumes_test.go | 2 +- tests/e2e/cluster_major_upgrade_test.go | 356 ++++++++++++++ tests/e2e/cluster_microservice_test.go | 19 +- tests/labels.go | 3 + tests/utils/postgres/postgres.go | 18 + 57 files changed, 2475 insertions(+), 194 deletions(-) create mode 100644 docs/src/postgres_upgrades.md create mode 100644 internal/cmd/manager/instance/upgrade/cmd.go create mode 100644 internal/cmd/manager/instance/upgrade/execute/cmd.go create mode 100644 internal/cmd/manager/instance/upgrade/prepare/cmd.go create mode 100644 internal/controller/cluster_image_test.go create mode 100644 pkg/reconciler/majorupgrade/doc.go create mode 100644 pkg/reconciler/majorupgrade/job.go create mode 100644 pkg/reconciler/majorupgrade/job_test.go create mode 100644 pkg/reconciler/majorupgrade/reconciler.go create mode 100644 pkg/reconciler/majorupgrade/reconciler_test.go create mode 100644 pkg/reconciler/majorupgrade/suite_test.go create mode 100644 tests/e2e/cluster_major_upgrade_test.go diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index f296d9fe6b..9f6d940f9d 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -22,8 +22,8 @@ on: description: > Feature Type (backup-restore, basic, cluster-metadata, declarative-databases, disruptive, importing-databases, maintenance, no-openshift, observability, operator, performance, plugin, - pod-scheduling, postgres-configuration, publication-subscription, recovery, replication, - security, self-healing, service-connectivity, smoke, snapshot, storage, tablespaces, upgrade) + pod-scheduling, postgres-configuration, postgres-major-upgrade, publication-subscription, recovery, + replication, security, self-healing, service-connectivity, smoke, snapshot, storage, tablespaces, upgrade) required: false log_level: description: 'Log level for operator (error, warning, info, debug(default), trace)' diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index 40b69b523c..268856eaa1 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -961,6 +961,7 @@ lsn lt lz macOS +majorVersionUpgradeFromImage malcolm mallocs managedRoleSecretVersion diff --git a/api/v1/cluster_funcs.go b/api/v1/cluster_funcs.go index ab08c6eafd..0f36bcdb1f 100644 --- a/api/v1/cluster_funcs.go +++ b/api/v1/cluster_funcs.go @@ -394,26 +394,6 @@ func (cluster *Cluster) SetInContext(ctx context.Context) context.Context { return context.WithValue(ctx, utils.ContextKeyCluster, cluster) } -// GetImageName get the name of the image that should be used -// to create the pods -func (cluster *Cluster) GetImageName() string { - // If the image is specified in the status, use that one - // It should be there since the first reconciliation - if len(cluster.Status.Image) > 0 { - return cluster.Status.Image - } - - // Fallback to the information we have in the spec - if len(cluster.Spec.ImageName) > 0 { - return cluster.Spec.ImageName - } - - // TODO: check: does a scenario exists in which we do have an imageCatalog - // and no status.image? In that case this should probably error out, not - // returning the default image name. - return configuration.Current.PostgresImageName -} - // GetPostgresqlVersion gets the PostgreSQL image version detecting it from the // image name or from the ImageCatalogRef. // Example: @@ -421,13 +401,20 @@ func (cluster *Cluster) GetImageName() string { // ghcr.io/cloudnative-pg/postgresql:14.0 corresponds to version (14,0) // ghcr.io/cloudnative-pg/postgresql:13.2 corresponds to version (13,2) func (cluster *Cluster) GetPostgresqlVersion() (version.Data, error) { + if cluster.Status.Image != "" { + return version.FromTag(reference.New(cluster.Status.Image).Tag) + } + + if cluster.Spec.ImageName != "" { + return version.FromTag(reference.New(cluster.Spec.ImageName).Tag) + } + if cluster.Spec.ImageCatalogRef != nil { return version.FromTag(strconv.Itoa(cluster.Spec.ImageCatalogRef.Major)) } - image := cluster.GetImageName() - tag := reference.New(image).Tag - return version.FromTag(tag) + // Fallback for unit tests where a cluster is created without status or defaults + return version.FromTag(reference.New(configuration.Current.PostgresImageName).Tag) } // GetImagePullSecret get the name of the pull secret to use diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go index 55a0d7cfcb..c570c9b6b0 100644 --- a/api/v1/cluster_types.go +++ b/api/v1/cluster_types.go @@ -196,7 +196,6 @@ type ImageCatalogRef struct { // +kubebuilder:validation:XValidation:rule="self.kind == 'ImageCatalog' || self.kind == 'ClusterImageCatalog'",message="Only image catalogs are supported" // +kubebuilder:validation:XValidation:rule="self.apiGroup == 'postgresql.cnpg.io'",message="Only image catalogs are supported" corev1.TypedLocalObjectReference `json:",inline"` - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Major is immutable" // The major version of PostgreSQL we want to use from the ImageCatalog Major int `json:"major"` } @@ -587,7 +586,10 @@ const ( // PhaseUpgrade upgrade in process PhaseUpgrade = "Upgrading cluster" - // PhaseUpgradeDelayed is set when a cluster need to be upgraded + // PhaseMajorUpgrade major version upgrade in process + PhaseMajorUpgrade = "Upgrading Postgres major version" + + // PhaseUpgradeDelayed is set when a cluster needs to be upgraded, // but the operation is being delayed by the operator configuration PhaseUpgradeDelayed = "Cluster upgrade delayed" @@ -942,6 +944,11 @@ type ClusterStatus struct { // +optional Image string `json:"image,omitempty"` + // MajorVersionUpgradeFromImage contains the image that was + // running before the major version upgrade started. + // +optional + MajorVersionUpgradeFromImage *string `json:"majorVersionUpgradeFromImage,omitempty"` + // PluginStatus is the status of the loaded plugins // +optional PluginStatus []PluginStatus `json:"pluginStatus,omitempty"` diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index b19b17b683..5943b91175 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -958,6 +958,11 @@ func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.MajorVersionUpgradeFromImage != nil { + in, out := &in.MajorVersionUpgradeFromImage, &out.MajorVersionUpgradeFromImage + *out = new(string) + **out = **in + } if in.PluginStatus != nil { in, out := &in.PluginStatus, &out.PluginStatus *out = make([]PluginStatus, len(*in)) diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml index 38de43f033..611ca6b9e3 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml @@ -3074,9 +3074,6 @@ spec: description: The major version of PostgreSQL we want to use from the ImageCatalog type: integer - x-kubernetes-validations: - - message: Major is immutable - rule: self == oldSelf name: description: Name is the name of resource being referenced type: string @@ -6180,6 +6177,11 @@ spec: description: ID of the latest generated node (used to avoid node name clashing) type: integer + majorVersionUpgradeFromImage: + description: |- + MajorVersionUpgradeFromImage contains the image that was + running before the major version upgrade started. + type: string managedRolesStatus: description: ManagedRolesStatus reports the state of the managed roles in the cluster diff --git a/contribute/e2e_testing_environment/README.md b/contribute/e2e_testing_environment/README.md index 53d0ee747d..45ed0e5937 100644 --- a/contribute/e2e_testing_environment/README.md +++ b/contribute/e2e_testing_environment/README.md @@ -185,6 +185,7 @@ exported, it will select all medium test cases from the feature type provided. | `tablespaces` | | `publication-subscription` | | `declarative-databases` | +| `postgres-major-upgrade` | ex: ```shell diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 7375d2cd2f..2ef9d1cc85 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -55,6 +55,7 @@ nav: - connection_pooling.md - replica_cluster.md - kubernetes_upgrade.md + - postgres_upgrades.md - kubectl-plugin.md - failover.md - troubleshooting.md diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md index afd71dee01..22a0a1d80d 100644 --- a/docs/src/cloudnative-pg.v1.md +++ b/docs/src/cloudnative-pg.v1.md @@ -2265,6 +2265,14 @@ This field is reported when .spec.failoverDelay is populated or dur

Image contains the image name used by the pods

+majorVersionUpgradeFromImage
+string + + +

MajorVersionUpgradeFromImage contains the image that was +running before the major version upgrade started.

+ + pluginStatus
[]PluginStatus diff --git a/docs/src/e2e.md b/docs/src/e2e.md index be32a8a178..b2410fe456 100644 --- a/docs/src/e2e.md +++ b/docs/src/e2e.md @@ -133,3 +133,6 @@ and the following suite of E2E tests are performed on that cluster: * **Declarative databases** * Declarative creation of databases with default (retain) reclaim policy * Declarative creation of databases with delete reclaim policy + +* **Major version upgrade** + * Upgrade to the latest major version diff --git a/docs/src/index.md b/docs/src/index.md index 5225000f40..c1f8ef984a 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -118,6 +118,7 @@ Additionally, the community provides images for the [PostGIS extension](postgis. - Backups using volume snapshots (where supported by storage classes). - Full and Point-In-Time recovery from volume snapshots or object stores (via Barman Cloud plugin). - Backup from standby replicas to reduce primary workload impact. +- Offline in-place major upgrades of PostgreSQL - Offline and online import of PostgreSQL databases, including major upgrades: - *Offline Import*: Direct restore from existing databases. - *Online Import*: PostgreSQL native logical replication via the `Subscription` resource. diff --git a/docs/src/operator_capability_levels.md b/docs/src/operator_capability_levels.md index b261378490..b7eeb208dd 100644 --- a/docs/src/operator_capability_levels.md +++ b/docs/src/operator_capability_levels.md @@ -308,11 +308,8 @@ or a subsequent switchover of the cluster. ### Upgrade of the managed workload The operand can be upgraded using a declarative configuration approach as -part of changing the CR and, in particular, the `imageName` parameter. The -operator prevents major upgrades of PostgreSQL while making it possible to go -in both directions in terms of minor PostgreSQL releases within a major -version, enabling updates and rollbacks. - +part of changing the CR and, in particular, the `imageName` parameter. +This is normally initiated by security updates or Postgres minor version updates. In the presence of standby servers, the operator performs rolling updates starting from the replicas. It does this by dropping the existing pod and creating a new one with the new requested operand image that reuses the underlying storage. @@ -324,11 +321,24 @@ The setting to use depends on the business requirements, as the operation might generate some downtime for the applications. This downtime can range from a few seconds to minutes, based on the actual database workload. +### Offline In-Place Major Upgrades of PostgreSQL + +CloudNativePG supports declarative offline in-place major upgrades when a new +operand container image with a higher PostgreSQL major version is applied to a +cluster. The upgrade can be triggered by updating the image tag via the +`.spec.imageName` option or by using an image catalog to manage version +changes. During the upgrade, all cluster pods are shut down to ensure data +consistency. A new job is then created to validate the upgrade conditions, +execute `pg_upgrade`, and create new directories for `PGDATA`, WAL files, and +tablespaces if needed. Once the upgrade is complete, replicas are re-created. +Failed upgrades can be rolled back. + ### Display cluster availability status during upgrade At any time, convey the cluster's high availability status, for example, `Setting up primary`, `Creating a new replica`, `Cluster in healthy state`, -`Switchover in progress`, `Failing over`, and `Upgrading cluster`. +`Switchover in progress`, `Failing over`, `Upgrading cluster`, and `Upgrading +Postgres major version`. ## Level 3: Full lifecycle diff --git a/docs/src/postgres_upgrades.md b/docs/src/postgres_upgrades.md new file mode 100644 index 0000000000..dac7a53480 --- /dev/null +++ b/docs/src/postgres_upgrades.md @@ -0,0 +1,204 @@ +# PostgreSQL Upgrades + + +PostgreSQL upgrades fall into two categories: + +- [Minor version upgrades](#minor-version-upgrades) (e.g., from 17.0 to 17.1) +- [Major version upgrades](#major-version-upgrades) (e.g., from 16.x to 17.0) + +## Minor Version Upgrades + +PostgreSQL version numbers follow a `major.minor` format. For instance, in +version 17.1: + +- `17` is the major version +- `1` is the minor version + +Minor releases are fully compatible with earlier and later minor releases of +the same major version. They include bug fixes and security updates but do not +introduce changes to the internal storage format. +For example, PostgreSQL 17.1 is compatible with 17.0 and 17.4. + +### Upgrading a Minor Version in CloudNativePG + +To upgrade to a newer minor version, simply update the PostgreSQL container +image reference in your cluster definition, either directly or via image catalogs. +CloudNativePG will trigger a [rolling update of the cluster](rolling_update.md), +replacing each instance one by one, starting with the replicas. Once all +replicas have been updated, it will perform either a switchover or a restart of +the primary to complete the process. + +## Major Version Upgrades + +Major PostgreSQL releases introduce changes to the internal data storage +format, requiring a more structured upgrade process. + +CloudNativePG supports three methods for performing major upgrades: + +1. [Logical dump/restore](database_import.md) – Blue/green deployment, offline. +2. [Native logical replication](logical_replication.md#example-of-live-migration-and-major-postgres-upgrade-with-logical-replication) – Blue/green deployment, online. +3. Physical with `pg_upgrade` – In-place upgrade, offline (covered in the + ["Offline In-Place Major Upgrades" section](#offline-in-place-major-upgrades) below). + +Each method has trade-offs in terms of downtime, complexity, and data volume +handling. The best approach depends on your upgrade strategy and operational +constraints. + +!!! Important + We strongly recommend testing all methods in a controlled environment + before proceeding with a production upgrade. + +## Offline In-Place Major Upgrades + +CloudNativePG performs an **offline in-place major upgrade** when a new operand +container image with a higher PostgreSQL major version is declaratively +requested for a cluster. + +You can trigger the upgrade in one of two ways: + +- By updating the major version in the image tag via the `.spec.imageName` + option. +- Using an [image catalog](image_catalog.md) to manage version changes. + +For details on supported image tags, see +["Image Tag Requirements"](container_images.md#image-tag-requirements). + +!!! Warning + CloudNativePG is not responsible for PostgreSQL extensions. You must ensure + that extensions in the source PostgreSQL image are compatible with those in the + target image and that upgrade paths are supported. Thoroughly test the upgrade + process in advance to avoid unexpected issues. + The [extensions management feature](declarative_database_management.md#managing-extensions-in-a-database) + can help manage extension upgrades declaratively. + +### Upgrade Process + +1. Shuts down all cluster pods to ensure data consistency. +2. Records the previous PostgreSQL version in the cluster’s status under + `.status.majorVersionUpgradeFromImage`. +3. Initiates a new upgrade job, which: + - Verifies that the binaries in the image and the data files align with a + major upgrade request. + - Creates new directories for `PGDATA`, and where applicable, WAL files and + tablespaces. + - Performs the upgrade using `pg_upgrade` with the `--link` option. + - Upon successful completion, replaces the original directories with their + upgraded counterparts. + +!!! Warning + During the upgrade process, the entire PostgreSQL cluster, including + replicas, is unavailable to applications. Ensure that your system can + tolerate this downtime before proceeding. + +!!! Warning + Performing an in-place upgrade is an exceptional operation that carries inherent + risks. It is strongly recommended to take a full backup of the cluster before + initiating the upgrade process. + +!!! Info + For detailed guidance on `pg_upgrade`, refer to the official + [PostgreSQL documentation](https://www.postgresql.org/docs/current/pgupgrade.html). + +### Post-Upgrade Actions + +If the upgrade is successful, CloudNativePG: + +- Destroys the PVCs of replicas (if available). +- Scales up replicas as required. + +!!! Warning + Re-cloning replicas can be time-consuming, especially for very large + databases. Plan accordingly to accommodate potential delays. After completing + the upgrade, it is strongly recommended to take a full backup. Existing backup + data (namely base backups and WAL files) is only available for the previous + minor PostgreSQL release. + +!!! Warning + `pg_upgrade` doesn't transfer optimizer statistics. After the upgrade, you + may want to run `ANALYZE` on your databases to update them. + +If the upgrade fails, you must manually revert the major version change in the +cluster's configuration and delete the upgrade job, as CloudNativePG cannot +automatically decide the rollback. + +!!! Important + This process **protects your existing database from data loss**, as no data + is modified during the upgrade. If the upgrade fails, a rollback is + usually possible, without having to perform a full recovery from a backup. + Ensure you monitor the process closely and take corrective action if needed. + +### Example: Performing a Major Upgrade + +Consider the following PostgreSQL cluster running version 16: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: cluster-example +spec: + imageName: ghcr.io/cloudnative-pg/postgresql:16-minimal-bookworm + instances: 3 + storage: + size: 1Gi +``` + +You can check the current PostgreSQL version using the following command: + +```sh +kubectl cnpg psql cluster-example -- -qAt -c 'SELECT version()' +``` + +This will return output similar to: + +```console +PostgreSQL 16.x ... +``` + +To upgrade the cluster to version 17, update the `imageName` field by changing +the major version tag from `16` to `17`: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: cluster-example +spec: + imageName: ghcr.io/cloudnative-pg/postgresql:17-minimal-bookworm + instances: 3 + storage: + size: 1Gi +``` + +### Upgrade Process + +1. Cluster shutdown – All cluster pods are terminated to ensure a consistent + upgrade. +2. Upgrade job execution – A new job is created with the name of the primary + pod, appended with the suffix `-major-upgrade`. This job runs `pg_upgrade` + on the primary’s persistent volume group. +3. Post-upgrade steps: + - The PVC groups of the replicas (`cluster-example-2` and + `cluster-example-3`) are removed. + - The primary pod is restarted. + - Two new replicas (`cluster-example-4` and `cluster-example-5`) are + re-cloned from the upgraded primary. + +Once the upgrade is complete, you can verify the new major version by running +the same command: + +```sh +kubectl cnpg psql cluster-example -- -qAt -c 'SELECT version()' +``` + +This should now return output similar to: + +```console +PostgreSQL 17.x ... +``` + +You can now update the statistics by running `ANALYZE` on the `app` database: + +```sh +kubectl cnpg psql cluster-example -- app -c 'ANALYZE' +``` diff --git a/docs/src/release_notes/v1.26.md b/docs/src/release_notes/v1.26.md index 69e24971ad..426cb56cf9 100644 --- a/docs/src/release_notes/v1.26.md +++ b/docs/src/release_notes/v1.26.md @@ -23,6 +23,15 @@ on the release branch in GitHub. ### Features: +- **Declarative Offline In-Place Major Upgrades of PostgreSQL**: Introduced + support for offline in-place major upgrades when a new operand container + image with a higher PostgreSQL major version is applied to a cluster. During + the upgrade, all cluster pods are shut down to ensure data consistency. A new + job is created to validate upgrade conditions, run `pg_upgrade`, and set up new + directories for `PGDATA`, WAL files, and tablespaces as needed. Once the + upgrade is complete, replicas are re-created. Failed upgrades can be rolled + back declaratively. (#6664) + - **Improved Startup and Readiness Probes for Replicas**: Enhanced support for Kubernetes startup and readiness probes in PostgreSQL instances, providing greater control over replicas based on the streaming lag. (#6623) @@ -31,9 +40,6 @@ on the release branch in GitHub. `extensions` and `schemas` stanzas in the Database resource to declaratively create, modify, and drop PostgreSQL extensions and schemas within a database. (#7062) -- **MAIN FEATURE #1**: short description -- **MAIN FEATURE #2**: short description - ### Enhancements: - Implemented the `cnpg.io/validation` annotation, allowing users to disable diff --git a/docs/src/samples/cluster-example.yaml b/docs/src/samples/cluster-example.yaml index fb331362f5..924f62bf25 100644 --- a/docs/src/samples/cluster-example.yaml +++ b/docs/src/samples/cluster-example.yaml @@ -4,6 +4,8 @@ metadata: name: cluster-example spec: instances: 3 - + imageName: ghcr.io/cloudnative-pg/postgresql:13 + storage: size: 1Gi + diff --git a/internal/cmd/manager/instance/cmd.go b/internal/cmd/manager/instance/cmd.go index 3a45f490ba..62ac1e068f 100644 --- a/internal/cmd/manager/instance/cmd.go +++ b/internal/cmd/manager/instance/cmd.go @@ -33,6 +33,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/instance/restoresnapshot" "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/instance/run" "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/instance/status" + "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/instance/upgrade" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" ) @@ -56,6 +57,7 @@ func NewCmd() *cobra.Command { cmd.AddCommand(pgbasebackup.NewCmd()) cmd.AddCommand(restore.NewCmd()) cmd.AddCommand(restoresnapshot.NewCmd()) + cmd.AddCommand(upgrade.NewCmd()) return cmd } diff --git a/internal/cmd/manager/instance/pgbasebackup/cmd.go b/internal/cmd/manager/instance/pgbasebackup/cmd.go index 8b699cf442..d3f7487984 100644 --- a/internal/cmd/manager/instance/pgbasebackup/cmd.go +++ b/internal/cmd/manager/instance/pgbasebackup/cmd.go @@ -138,7 +138,7 @@ func (env *CloneInfo) bootstrapUsingPgbasebackup(ctx context.Context) error { if err != nil { contextLogger.Warning( "Error while parsing PostgreSQL server version to define connection options, defaulting to PostgreSQL 11", - "imageName", cluster.GetImageName(), + "imageName", cluster.Status.Image, "err", err) } else if pgVersion.Major() >= 12 { // We explicitly disable wal_sender_timeout for join-related pg_basebackup executions. diff --git a/internal/cmd/manager/instance/upgrade/cmd.go b/internal/cmd/manager/instance/upgrade/cmd.go new file mode 100644 index 0000000000..6275e01445 --- /dev/null +++ b/internal/cmd/manager/instance/upgrade/cmd.go @@ -0,0 +1,40 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package upgrade implements the "instance upgrade" subcommand of the operator +package upgrade + +import ( + "github.com/spf13/cobra" + + "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/instance/upgrade/execute" + "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/instance/upgrade/prepare" +) + +// NewCmd creates the "instance upgrade" subcommand +func NewCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "upgrade", + } + + cmd.AddCommand(prepare.NewCmd()) + cmd.AddCommand(execute.NewCmd()) + + return cmd +} diff --git a/internal/cmd/manager/instance/upgrade/execute/cmd.go b/internal/cmd/manager/instance/upgrade/execute/cmd.go new file mode 100644 index 0000000000..40c192a705 --- /dev/null +++ b/internal/cmd/manager/instance/upgrade/execute/cmd.go @@ -0,0 +1,434 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package execute implements the "instance upgrade execute" subcommand +package execute + +import ( + "context" + "fmt" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + "time" + + "github.com/blang/semver" + "github.com/cloudnative-pg/machinery/pkg/env" + "github.com/cloudnative-pg/machinery/pkg/execlog" + "github.com/cloudnative-pg/machinery/pkg/fileutils" + "github.com/cloudnative-pg/machinery/pkg/fileutils/compatibility" + "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/spf13/cobra" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime/pkg/client" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/internal/management/controller" + "github.com/cloudnative-pg/cloudnative-pg/internal/management/istio" + "github.com/cloudnative-pg/cloudnative-pg/internal/management/linkerd" + "github.com/cloudnative-pg/cloudnative-pg/pkg/configfile" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/constants" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/utils" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/metricserver" + "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" +) + +// NewCmd creates the cobra command +func NewCmd() *cobra.Command { + var pgData string + var podName string + var clusterName string + var namespace string + var pgUpgrade string + + cmd := &cobra.Command{ + Use: "execute [options]", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + oldBinDirFile := args[0] + ctx := cmd.Context() + + // The fields in the instance are needed to correctly + // download the secret containing the TLS + // certificates + instance := postgres.NewInstance(). + WithNamespace(namespace). + WithPodName(podName). + WithClusterName(clusterName) + + // Read the old bindir from the passed file + oldBinDirBytes, err := fileutils.ReadFile(oldBinDirFile) + if err != nil { + return fmt.Errorf("error while reading the old bindir: %w", err) + } + + oldBinDir := strings.TrimSpace(string(oldBinDirBytes)) + return upgradeSubCommand(ctx, instance, pgData, oldBinDir, pgUpgrade) + }, + PostRunE: func(cmd *cobra.Command, _ []string) error { + if err := istio.TryInvokeQuitEndpoint(cmd.Context()); err != nil { + return err + } + + return linkerd.TryInvokeShutdownEndpoint(cmd.Context()) + }, + } + + cmd.Flags().StringVar(&pgData, "pg-data", os.Getenv("PGDATA"), "The PGDATA to be created") + cmd.Flags().StringVar(&podName, "pod-name", os.Getenv("POD_NAME"), "The name of this pod, to "+ + "be checked against the cluster state") + cmd.Flags().StringVar(&namespace, "namespace", os.Getenv("NAMESPACE"), "The namespace of "+ + "the cluster and of the Pod in k8s") + cmd.Flags().StringVar(&clusterName, "cluster-name", os.Getenv("CLUSTER_NAME"), "The name of "+ + "the current cluster in k8s, used to download TLS certificates") + cmd.Flags().StringVar(&pgUpgrade, "pg-upgrade", env.GetOrDefault("PG_UPGRADE", "pg_upgrade"), + `The path of "pg_upgrade" executable. Defaults to "pg_upgrade".`) + + return cmd +} + +func upgradeSubCommand( + ctx context.Context, + instance *postgres.Instance, + pgData string, + oldBinDir string, + pgUpgrade string, +) error { + contextLogger := log.FromContext(ctx) + + client, err := management.NewControllerRuntimeClient() + if err != nil { + contextLogger.Error(err, "Error creating Kubernetes client") + return err + } + + clusterObjectKey := ctrl.ObjectKey{Name: instance.GetClusterName(), Namespace: instance.GetNamespaceName()} + if err = management.WaitForGetClusterWithClient(ctx, client, clusterObjectKey); err != nil { + return err + } + + // Create a fake reconciler just to download the secrets and + // the cluster definition + metricExporter := metricserver.NewExporter(instance) + reconciler := controller.NewInstanceReconciler(instance, client, metricExporter) + + // Download the cluster definition from the API server + var cluster apiv1.Cluster + if err := reconciler.GetClient().Get(ctx, clusterObjectKey, &cluster); err != nil { + contextLogger.Error(err, "Error while getting cluster") + return err + } + + // Since we're directly using the reconciler here, we cannot + // tell if the secrets were correctly downloaded or not. + // If they were the following "pg_upgrade" command will work, if + // they don't "pg_upgrade" with fail, complaining that the + // cryptographic material is not available. + reconciler.RefreshSecrets(ctx, &cluster) + + if err := reconciler.ReconcileWalStorage(ctx); err != nil { + return fmt.Errorf("error while reconciling the WAL storage: %w", err) + } + + if err := fileutils.EnsureDirectoryExists(postgres.GetSocketDir()); err != nil { + return fmt.Errorf("while creating socket directory: %w", err) + } + + contextLogger.Info("Searching for failed upgrades") + + var failedDirs []string + for _, dir := range []string{specs.PgDataPath, specs.PgWalVolumePgWalPath} { + matches, err := filepath.Glob(dir + "*.failed_*") + if err != nil { + return fmt.Errorf("error matching paths: %w", err) + } + failedDirs = append(failedDirs, matches...) + } + if len(failedDirs) > 0 { + return fmt.Errorf("found failed upgrade directories: %v", failedDirs) + } + + contextLogger.Info("Starting the upgrade process") + + newDataDir := fmt.Sprintf("%s-new", specs.PgDataPath) + var newWalDir *string + if cluster.ShouldCreateWalArchiveVolume() { + newWalDir = ptr.To(fmt.Sprintf("%s-new", specs.PgWalVolumePgWalPath)) + } + + contextLogger.Info("Ensuring the new data directory does not exist", "directory", newDataDir) + + if err := os.RemoveAll(newDataDir); err != nil { + return fmt.Errorf("failed to remove the directory: %w", err) + } + + if newWalDir != nil { + contextLogger.Info("Ensuring the new pg_wal directory does not exist", "directory", *newWalDir) + if err := os.RemoveAll(*newWalDir); err != nil { + return fmt.Errorf("failed to remove the directory: %w", err) + } + } + + contextLogger.Info("Creating data directory", "directory", newDataDir) + if err := runInitDB(newDataDir, newWalDir); err != nil { + return fmt.Errorf("error while creating the data directory: %w", err) + } + + contextLogger.Info("Preparing configuration files", "directory", newDataDir) + if err := prepareConfigurationFiles(ctx, cluster, newDataDir); err != nil { + return err + } + + contextLogger.Info("Checking if we have anything to update") + // Read pg_version from both the old and new data directories + oldVersion, err := utils.GetPgdataVersion(pgData) + if err != nil { + return fmt.Errorf("error while reading the old version: %w", err) + } + + newVersion, err := utils.GetPgdataVersion(newDataDir) + if err != nil { + return fmt.Errorf("error while reading the new version: %w", err) + } + + if oldVersion.Equals(newVersion) { + contextLogger.Info("Versions are the same, no need to upgrade") + if err := os.RemoveAll(newDataDir); err != nil { + return fmt.Errorf("failed to remove the directory: %w", err) + } + return nil + } + + // We need to make sure that the permissions are the right ones + // in some systems they may be messed up even if we fix them before + _ = fileutils.EnsurePgDataPerms(pgData) + _ = fileutils.EnsurePgDataPerms(newDataDir) + + contextLogger.Info("Running pg_upgrade") + + if err := runPgUpgrade(pgData, pgUpgrade, newDataDir, oldBinDir); err != nil { + // TODO: in case of failures we should dump the content of the pg_upgrade logs + return fmt.Errorf("error while running pg_upgrade: %w", err) + } + + err = moveDataInPlace(ctx, pgData, oldVersion, newDataDir, newWalDir) + if err != nil { + contextLogger.Error(err, + "Error while moving the data in place, saving the new data directory to avoid data loss") + + suffixTimestamp := fileutils.FormatFriendlyTimestamp(time.Now()) + + dirToBeSaved := []string{ + newDataDir, + pgData + ".old", + } + if newWalDir != nil { + dirToBeSaved = append(dirToBeSaved, + *newWalDir, + specs.PgWalVolumePgWalPath+".old", + ) + } + + for _, dir := range dirToBeSaved { + failedPgDataName := fmt.Sprintf("%s.failed_%s", dir, suffixTimestamp) + if errInner := moveDirIfExists(ctx, dir, failedPgDataName); errInner != nil { + contextLogger.Error(errInner, "Error while saving a directory after a failure", "dir", dir) + } + } + + return err + } + + contextLogger.Info("Upgrade completed successfully") + + return nil +} + +func runInitDB(destDir string, walDir *string) error { + // Invoke initdb to generate a data directory + options := []string{ + "--username", + "postgres", + "-D", + destDir, + } + + if walDir != nil { + options = append(options, "--waldir", *walDir) + } + + // Certain CSI drivers may add setgid permissions on newly created folders. + // A default umask is set to attempt to avoid this, by revoking group/other + // permission bits on the PGDATA + _ = compatibility.Umask(0o077) + + initdbCmd := exec.Command(constants.InitdbName, options...) // #nosec + if err := execlog.RunStreaming(initdbCmd, constants.InitdbName); err != nil { + return err + } + + return nil +} + +func prepareConfigurationFiles(ctx context.Context, cluster apiv1.Cluster, destDir string) error { + // Always read the custom and override configuration files created by the operator + _, err := configfile.EnsureIncludes(path.Join(destDir, "postgresql.conf"), + constants.PostgresqlCustomConfigurationFile, + constants.PostgresqlOverrideConfigurationFile, + ) + if err != nil { + return fmt.Errorf("appending inclusion directives to postgresql.conf file resulted in an error: %w", err) + } + + newInstance := postgres.Instance{PgData: destDir} + if _, err := newInstance.RefreshConfigurationFilesFromCluster(ctx, &cluster, false); err != nil { + return fmt.Errorf("error while creating the configuration files for new datadir %q: %w", destDir, err) + } + + if _, err := newInstance.RefreshPGIdent(ctx, nil); err != nil { + return fmt.Errorf("error while creating the pg_ident.conf file for new datadir %q: %w", destDir, err) + } + + // Create a stub for the configuration file + // to be filled during the real startup of this instance + err = fileutils.CreateEmptyFile(path.Join(destDir, constants.PostgresqlOverrideConfigurationFile)) + if err != nil { + return fmt.Errorf("creating the operator managed configuration file '%v' resulted in an error: %w", + constants.PostgresqlOverrideConfigurationFile, err) + } + + return nil +} + +func runPgUpgrade(oldDataDir string, pgUpgrade string, newDataDir string, oldBinDir string) error { + // Run the pg_upgrade command + cmd := exec.Command(pgUpgrade, + "--link", + "--username", "postgres", + "--old-bindir", oldBinDir, + "--old-datadir", oldDataDir, + "--new-datadir", newDataDir, + ) // #nosec + cmd.Dir = newDataDir + if err := execlog.RunStreaming(cmd, path.Base(pgUpgrade)); err != nil { + return fmt.Errorf("error while running %q: %w", cmd, err) + } + + return nil +} + +func moveDataInPlace( + ctx context.Context, + pgData string, + oldVersion semver.Version, + newDataDir string, + newWalDir *string, +) error { + contextLogger := log.FromContext(ctx) + + contextLogger.Info("Cleaning up the new data directory") + if err := os.RemoveAll(path.Join(newDataDir, "delete_old_cluster.sh")); err != nil { + return fmt.Errorf("error while removing the delete_old_cluster.sh script: %w", err) + } + + contextLogger.Info("Moving the old data directory") + if err := os.Rename(pgData, pgData+".old"); err != nil { + return fmt.Errorf("error while moving the old data directory: %w", err) + } + + if newWalDir != nil { + contextLogger.Info("Moving the old pg_wal directory") + if err := os.Rename(specs.PgWalVolumePgWalPath, specs.PgWalVolumePgWalPath+".old"); err != nil { + return fmt.Errorf("error while moving the old pg_wal directory: %w", err) + } + } + + contextLogger.Info("Moving the new data directory in place") + if err := os.Rename(newDataDir, pgData); err != nil { + return fmt.Errorf("error while moving the new data directory: %w", err) + } + + if newWalDir != nil { + contextLogger.Info("Moving the new pg_wal directory in place") + if err := os.Rename(*newWalDir, specs.PgWalVolumePgWalPath); err != nil { + return fmt.Errorf("error while moving the pg_wal directory content: %w", err) + } + if err := fileutils.RemoveFile(specs.PgWalPath); err != nil { + return fmt.Errorf("error while removing the symlink to pg_wal: %w", err) + } + if err := os.Symlink(specs.PgWalVolumePgWalPath, specs.PgWalPath); err != nil { + return fmt.Errorf("error while creating the symlink to pg_wal: %w", err) + } + } + + contextLogger.Info("Removing the old data directory and pg_wal directory") + if err := os.RemoveAll(pgData + ".old"); err != nil { + return fmt.Errorf("error while removing the old data directory: %w", err) + } + if err := os.RemoveAll(specs.PgWalVolumePgWalPath + ".old"); err != nil { + return fmt.Errorf("error while removing the old pg_wal directory: %w", err) + } + + contextLogger.Info("Cleaning up the previous version directory from tablespaces") + if err := removeMatchingPaths(ctx, + path.Join(pgData, "pg_tblspc", "*", fmt.Sprintf("PG_%v_*", oldVersion.Major))); err != nil { + return fmt.Errorf("error while removing the old tablespaces directories: %w", err) + } + + return nil +} + +func removeMatchingPaths(ctx context.Context, pattern string) error { + contextLogger := log.FromContext(ctx) + contextLogger.Info("Removing matching paths", "pattern", pattern) + + // Find all matching paths + matches, err := filepath.Glob(pattern) + if err != nil { + return fmt.Errorf("error matching paths: %w", err) + } + + // Iterate through the matches and remove each + for _, match := range matches { + contextLogger.Info("Removing path", "path", match) + err := os.RemoveAll(match) + if err != nil { + return fmt.Errorf("failed to remove %s: %w", match, err) + } + } + + return nil +} + +func moveDirIfExists(ctx context.Context, oldPath string, newPath string) error { + contextLogger := log.FromContext(ctx) + if _, errExists := os.Stat(oldPath); !os.IsNotExist(errExists) { + contextLogger.Info("Moving directory", "oldPath", oldPath, "newPath", newPath) + err := os.Rename(oldPath, newPath) + if err != nil { + return err + } + } + + return nil +} diff --git a/internal/cmd/manager/instance/upgrade/prepare/cmd.go b/internal/cmd/manager/instance/upgrade/prepare/cmd.go new file mode 100644 index 0000000000..b5a102a960 --- /dev/null +++ b/internal/cmd/manager/instance/upgrade/prepare/cmd.go @@ -0,0 +1,126 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package prepare implement the "instance upgrade prepare" subcommand +package prepare + +import ( + "context" + "fmt" + "os" + "os/exec" + "path" + + "github.com/cloudnative-pg/machinery/pkg/env" + "github.com/cloudnative-pg/machinery/pkg/fileutils" + "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/postgres/pgconfig" + "github.com/spf13/cobra" +) + +// NewCmd create the cobra command +func NewCmd() *cobra.Command { + var pgConfig string + + cmd := cobra.Command{ + Use: "prepare [target]", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + contextLogger := log.FromContext(cmd.Context()) + dest := args[0] + + if err := copyPostgresInstallation(cmd.Context(), pgConfig, dest); err != nil { + contextLogger.Error(err, "Failed to copy the PostgreSQL installation") + return err + } + + return nil + }, + } + + cmd.Flags().StringVar(&pgConfig, "pg-config", env.GetOrDefault("PG_CONFIG", "pg_config"), + `The path of "pg_config" executable. Defaults to "pg_config".`) + + return &cmd +} + +// copyPostgresInstallation replicates the PostgreSQL installation to the specified destination directory +// for use by the pg_upgrade command as the old binary directory. +// +// Steps performed: +// 1. Removes the existing destination directory if it exists. +// 2. Retrieves the PostgreSQL binary, library, and shared directories using pg_config. +// 3. Creates the corresponding directories in the destination path. +// 4. Copies the contents of the PostgreSQL directories to the destination. +// 5. Creates a bindir.txt file in the destination directory with the path to the binary directory. +func copyPostgresInstallation(ctx context.Context, pgConfig string, dest string) error { + contextLogger := log.FromContext(ctx) + + dest = path.Clean(dest) + + contextLogger.Info("Copying the PostgreSQL installation to the destination", "destination", dest) + + contextLogger.Info("Removing the destination directory", "directory", dest) + if err := os.RemoveAll(dest); err != nil { + return fmt.Errorf("failed to remove the directory: %w", err) + } + + contextLogger.Info("Creating the destination directory", "directory", dest) + if err := os.MkdirAll(dest, 0o750); err != nil { + return fmt.Errorf("failed to create the directory: %w", err) + } + + copyLocations := []pgconfig.ConfigurationParameter{pgconfig.BinDir, pgconfig.PkgLibDir, pgconfig.ShareDir} + for _, config := range copyLocations { + sourceDir, err := pgconfig.GetConfigurationParameter(pgConfig, config) + if err != nil { + return err + } + sourceDir = path.Clean(sourceDir) + destDir := path.Clean(path.Join(dest, sourceDir)) + + if config == pgconfig.BinDir { + destFile := path.Join(dest, "bindir.txt") + contextLogger.Info("Creating the bindir.txt file", "file", destFile) + if _, err := fileutils.WriteStringToFile(destFile, fmt.Sprintf("%s\n", destDir)); err != nil { + return fmt.Errorf("failed to write the %q file: %w", destFile, err) + } + } + + contextLogger.Info("Creating the directory", "directory", destDir) + if err := os.MkdirAll(destDir, 0o750); err != nil { + return fmt.Errorf("failed to create the directory: %w", err) + } + + contextLogger.Info("Copying the files", "source", sourceDir, "destination", destDir) + + // We use "cp" instead of os.CopyFS because the latter doesn't + // support symbolic links as of Go 1.24 and we don't want to + // include any other dependencies in the project nor + // re-implementing the wheel. + // + // This should be re-evaluated in the future and the + // requirement to have "cp" in the image should be removed. + if err := exec.Command("cp", "-a", sourceDir+"/.", destDir).Run(); err != nil { //nolint:gosec + return fmt.Errorf("failed to copy the files: %w", err) + } + } + + return nil +} diff --git a/internal/cmd/plugin/pgbench/pgbench.go b/internal/cmd/plugin/pgbench/pgbench.go index 6b5fbb81c0..a9ec47f5af 100644 --- a/internal/cmd/plugin/pgbench/pgbench.go +++ b/internal/cmd/plugin/pgbench/pgbench.go @@ -154,7 +154,7 @@ func (cmd *pgBenchRun) buildJob(cluster *apiv1.Cluster) *batchv1.Job { Containers: []corev1.Container{ { Name: "pgbench", - Image: cluster.GetImageName(), + Image: cluster.Status.Image, ImagePullPolicy: corev1.PullAlways, Env: cmd.buildEnvVariables(), Command: []string{pgBenchKeyWord}, diff --git a/internal/cmd/plugin/promote/promote.go b/internal/cmd/plugin/promote/promote.go index fde8a61dcf..4f181d71e4 100644 --- a/internal/cmd/plugin/promote/promote.go +++ b/internal/cmd/plugin/promote/promote.go @@ -67,7 +67,7 @@ func Promote(ctx context.Context, cli client.Client, } if err := status.PatchWithOptimisticLock(ctx, cli, &cluster, reconcileTargetPrimaryFunc, - status.SetClusterReadyConditionTX, + status.SetClusterReadyCondition, ); err != nil { return err } diff --git a/internal/cmd/plugin/restart/restart.go b/internal/cmd/plugin/restart/restart.go index da50c19a5e..639349417a 100644 --- a/internal/cmd/plugin/restart/restart.go +++ b/internal/cmd/plugin/restart/restart.go @@ -75,8 +75,8 @@ func instanceRestart(ctx context.Context, clusterName, node string) error { ctx, plugin.Client, &cluster, - status.SetPhaseTX(apiv1.PhaseInplacePrimaryRestart, "Requested by the user"), - status.SetClusterReadyConditionTX, + status.SetPhase(apiv1.PhaseInplacePrimaryRestart, "Requested by the user"), + status.SetClusterReadyCondition, ); err != nil { return fmt.Errorf("while requesting restart on primary POD for cluster %v: %w", clusterName, err) } diff --git a/internal/cmd/plugin/status/status.go b/internal/cmd/plugin/status/status.go index 3c83061eba..4f8300d6e2 100644 --- a/internal/cmd/plugin/status/status.go +++ b/internal/cmd/plugin/status/status.go @@ -263,7 +263,7 @@ func (fullStatus *PostgresqlStatus) printBasicInfo(ctx context.Context, k8sClien if primaryInstanceStatus != nil { summary.AddLine("System ID:", primaryInstanceStatus.SystemID) } - summary.AddLine("PostgreSQL Image:", cluster.GetImageName()) + summary.AddLine("PostgreSQL Image:", cluster.Status.Image) if cluster.IsReplica() { summary.AddLine("Designated primary:", primaryInstance) summary.AddLine("Source cluster: ", cluster.Spec.ReplicaCluster.Source) diff --git a/internal/controller/cluster_controller.go b/internal/controller/cluster_controller.go index c5613d33ad..6427391f79 100644 --- a/internal/controller/cluster_controller.go +++ b/internal/controller/cluster_controller.go @@ -57,6 +57,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/hibernation" instanceReconciler "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/instance" + "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/majorupgrade" "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/persistentvolumeclaim" "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/replicaclusterswitch" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" @@ -732,6 +733,20 @@ func (r *ClusterReconciler) reconcileResources( return res, err } + // In-place Postgres major version upgrades + if result, err := majorupgrade.Reconcile( + ctx, + r.Client, + cluster, + resources.instances.Items, + resources.pvcs.Items, + resources.jobs.Items, + ); err != nil { + return ctrl.Result{}, fmt.Errorf("cannot reconcile in-place major version upgrades: %w", err) + } else if result != nil { + return *result, err + } + // Reconcile Pods if res, err := r.reconcilePods(ctx, cluster, resources, instancesStatus); !res.IsZero() || err != nil { return res, err @@ -1099,6 +1114,19 @@ func (r *ClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manag Complete(r) } +// jobOwnerIndexFunc maps a job definition to its owning cluster and +// is used as an index function to speed up the lookup of jobs +// created by the operator. +func jobOwnerIndexFunc(rawObj client.Object) []string { + job := rawObj.(*batchv1.Job) + + if ownerName, ok := IsOwnedByCluster(job); ok { + return []string{ownerName} + } + + return nil +} + // createFieldIndexes creates the indexes needed by this controller func (r *ClusterReconciler) createFieldIndexes(ctx context.Context, mgr ctrl.Manager) error { // Create a new indexed field on Pods. This field will be used to easily @@ -1203,15 +1231,7 @@ func (r *ClusterReconciler) createFieldIndexes(ctx context.Context, mgr ctrl.Man return mgr.GetFieldIndexer().IndexField( ctx, &batchv1.Job{}, - jobOwnerKey, func(rawObj client.Object) []string { - job := rawObj.(*batchv1.Job) - - if ownerName, ok := IsOwnedByCluster(job); ok { - return []string{ownerName} - } - - return nil - }) + jobOwnerKey, jobOwnerIndexFunc) } // IsOwnedByCluster checks that an object is owned by a Cluster and returns diff --git a/internal/controller/cluster_image.go b/internal/controller/cluster_image.go index 63c16b85c3..d838adb756 100644 --- a/internal/controller/cluster_image.go +++ b/internal/controller/cluster_image.go @@ -23,7 +23,9 @@ import ( "context" "fmt" + "github.com/cloudnative-pg/machinery/pkg/image/reference" "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/postgres/version" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" @@ -34,32 +36,89 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/status" ) -// reconcileImage sets the image inside the status, to be used by the following -// functions of the reconciler loop +// reconcileImage processes the image request, executes it, and stores +// the result in the .status.image field. If the user requested a +// major version upgrade, the current image is saved in the +// .status.majorVersionUpgradeFromImage field. This allows for +// reverting the upgrade if it doesn't complete successfully. func (r *ClusterReconciler) reconcileImage(ctx context.Context, cluster *apiv1.Cluster) (*ctrl.Result, error) { contextLogger := log.FromContext(ctx) - oldCluster := cluster.DeepCopy() + image, err := r.getConfiguredImage(ctx, cluster) + if err != nil { + return &ctrl.Result{}, r.RegisterPhase(ctx, cluster, apiv1.PhaseImageCatalogError, err.Error()) + } + + currentDataImage := cluster.Status.Image + if cluster.Status.MajorVersionUpgradeFromImage != nil { + currentDataImage = *cluster.Status.MajorVersionUpgradeFromImage + } + + // Case 1: the cluster is being initialized and there is still no + // running image. In this case, we should simply apply the image selected by the user. + if currentDataImage == "" { + return nil, status.PatchWithOptimisticLock( + ctx, + r.Client, + cluster, + status.SetImage(image), + status.SetMajorVersionUpgradeFromImage(nil), + ) + } + + // Case 2: there's a running image. The code checks if the user selected + // an image of the same major version or if a change in the major + // version has been requested. + var majorVersionUpgradeFromImage *string + currentVersion, err := version.FromTag(reference.New(currentDataImage).Tag) + if err != nil { + contextLogger.Error(err, "While parsing current major versions") + return nil, err + } + + requestedVersion, err := version.FromTag(reference.New(image).Tag) + if err != nil { + contextLogger.Error(err, "While parsing requested major versions") + return nil, err + } + + switch { + case currentVersion.Major() < requestedVersion.Major(): + // The current major version is older than the requested one + majorVersionUpgradeFromImage = ¤tDataImage + case currentVersion.Major() == requestedVersion.Major(): + // The major versions are the same, cancel the update + majorVersionUpgradeFromImage = nil + default: + contextLogger.Info( + "Cannot downgrade the PostgreSQL major version. Forcing the current image.", + "currentImage", currentDataImage, + "requestedImage", image) + image = currentDataImage + } + + return nil, status.PatchWithOptimisticLock( + ctx, + r.Client, + cluster, + status.SetImage(image), + status.SetMajorVersionUpgradeFromImage(majorVersionUpgradeFromImage), + ) +} + +func (r *ClusterReconciler) getConfiguredImage(ctx context.Context, cluster *apiv1.Cluster) (string, error) { + contextLogger := log.FromContext(ctx) // If ImageName is defined and different from the current image in the status, we update the status - if cluster.Spec.ImageName != "" && cluster.Status.Image != cluster.Spec.ImageName { - cluster.Status.Image = cluster.Spec.ImageName - if err := r.Status().Patch(ctx, cluster, client.MergeFrom(oldCluster)); err != nil { - contextLogger.Error( - err, - "While patching cluster status to set the image name from the cluster Spec", - "imageName", cluster.Status.Image, - ) - return nil, err - } - return nil, nil + if cluster.Spec.ImageName != "" { + return cluster.Spec.ImageName, nil } - // If ImageName was defined, we rely on what the user requested if cluster.Spec.ImageCatalogRef == nil { - return nil, nil + return "", fmt.Errorf("ImageName is not defined and no catalog is referenced") } contextLogger = contextLogger.WithValues("catalogRef", cluster.Spec.ImageCatalogRef) @@ -74,15 +133,13 @@ func (r *ClusterReconciler) reconcileImage(ctx context.Context, cluster *apiv1.C catalog = &apiv1.ImageCatalog{} default: contextLogger.Info("Unknown catalog kind") - return &ctrl.Result{}, r.RegisterPhase(ctx, cluster, apiv1.PhaseImageCatalogError, - "Invalid image catalog type") + return "", fmt.Errorf("invalid image catalog type") } apiGroup := cluster.Spec.ImageCatalogRef.APIGroup if apiGroup == nil || *apiGroup != apiv1.SchemeGroupVersion.Group { contextLogger.Info("Unknown catalog group") - return &ctrl.Result{}, r.RegisterPhase(ctx, cluster, apiv1.PhaseImageCatalogError, - "Invalid image catalog group") + return "", fmt.Errorf("invalid image catalog group") } // Get the referenced catalog @@ -92,10 +149,11 @@ func (r *ClusterReconciler) reconcileImage(ctx context.Context, cluster *apiv1.C if apierrs.IsNotFound(err) { r.Recorder.Eventf(cluster, "Warning", "DiscoverImage", "Cannot get %v/%v", catalogKind, catalogName) - return &ctrl.Result{}, nil + contextLogger.Info("catalog not found", "catalogKind", catalogKind, "catalogName", catalogName) + return "", fmt.Errorf("catalog %s/%s not found", catalogKind, catalogName) } - return nil, err + return "", err } // Catalog found, we try to find the image for the major version @@ -111,25 +169,10 @@ func (r *ClusterReconciler) reconcileImage(ctx context.Context, cluster *apiv1.C catalogName) contextLogger.Info("cannot find requested major version", "requestedMajorVersion", requestedMajorVersion) - return &ctrl.Result{}, r.RegisterPhase(ctx, cluster, apiv1.PhaseImageCatalogError, - "Selected major version is not available in the catalog") - } - - // If the image is different, we set it into the cluster status - if cluster.Status.Image != catalogImage { - cluster.Status.Image = catalogImage - patch := client.MergeFrom(oldCluster) - if err := r.Status().Patch(ctx, cluster, patch); err != nil { - patchBytes, _ := patch.Data(cluster) - contextLogger.Error( - err, - "While patching cluster status to set the image name from the catalog", - "patch", string(patchBytes)) - return nil, err - } + return "", fmt.Errorf("selected major version is not available in the catalog") } - return nil, nil + return catalogImage, nil } func (r *ClusterReconciler) getClustersForImageCatalogsToClustersMapper( diff --git a/internal/controller/cluster_image_test.go b/internal/controller/cluster_image_test.go new file mode 100644 index 0000000000..ec69797b25 --- /dev/null +++ b/internal/controller/cluster_image_test.go @@ -0,0 +1,214 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package controller + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + schemeBuilder "github.com/cloudnative-pg/cloudnative-pg/internal/scheme" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func newFakeReconcilerFor(cluster *apiv1.Cluster, catalog *apiv1.ImageCatalog) *ClusterReconciler { + fakeClient := fake.NewClientBuilder(). + WithScheme(schemeBuilder.BuildWithAllKnownScheme()). + WithRuntimeObjects(cluster). + WithStatusSubresource(cluster). + Build() + + if catalog != nil { + _ = fakeClient.Create(context.Background(), catalog) + } + + return &ClusterReconciler{ + Client: fakeClient, + Recorder: record.NewFakeRecorder(10), + } +} + +var _ = Describe("Cluster image detection", func() { + It("gets the image from .spec.imageName", func(ctx SpecContext) { + // This is a simple situation, having a cluster with an + // explicit image. The image should be directly set into the + // status and the reconciliation loop can proceed. + // No major version upgrade have been requested. + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-example", + Namespace: "default", + }, + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:15.2", + }, + } + r := newFakeReconcilerFor(cluster, nil) + + result, err := r.reconcileImage(ctx, cluster) + Expect(err).Error().ShouldNot(HaveOccurred()) + Expect(result).To(BeNil()) + + Expect(cluster.Status.Image).To(Equal("postgres:15.2")) + Expect(cluster.Status.MajorVersionUpgradeFromImage).To(BeNil()) + }) + + It("gets the image from an image catalog", func(ctx SpecContext) { + // This is slightly more complex, having an image catalog reference + // instead of an explicit image name. No major version upgrade have + // been requested, the reconciliation loop can proceed correctly + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-example", + Namespace: "default", + }, + Spec: apiv1.ClusterSpec{ + ImageCatalogRef: &apiv1.ImageCatalogRef{ + TypedLocalObjectReference: corev1.TypedLocalObjectReference{ + Name: "catalog", + Kind: "ImageCatalog", + APIGroup: &apiv1.SchemeGroupVersion.Group, + }, + Major: 15, + }, + }, + } + catalog := &apiv1.ImageCatalog{ + ObjectMeta: metav1.ObjectMeta{ + Name: "catalog", + Namespace: "default", + }, + Spec: apiv1.ImageCatalogSpec{ + Images: []apiv1.CatalogImage{ + { + Image: "postgres:15.2", + Major: 15, + }, + }, + }, + } + + r := newFakeReconcilerFor(cluster, catalog) + result, err := r.reconcileImage(ctx, cluster) + Expect(err).Error().ShouldNot(HaveOccurred()) + Expect(result).To(BeNil()) + + Expect(cluster.Status.Image).To(Equal("postgres:15.2")) + Expect(cluster.Status.MajorVersionUpgradeFromImage).To(BeNil()) + }) + + It("gets the name from the image catalog, but the catalog is incomplete", func(ctx SpecContext) { + // As a variant of the previous case, the catalog may be + // incomplete and have no image for the selected major. When + // this happens, the reconciliation loop should be stopped and + // the proper phase should be set. + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-example", + Namespace: "default", + }, + Spec: apiv1.ClusterSpec{ + ImageCatalogRef: &apiv1.ImageCatalogRef{ + TypedLocalObjectReference: corev1.TypedLocalObjectReference{ + Name: "catalog", + Kind: "ImageCatalog", + APIGroup: &apiv1.SchemeGroupVersion.Group, + }, + Major: 15, + }, + }, + } + catalog := &apiv1.ImageCatalog{ + ObjectMeta: metav1.ObjectMeta{ + Name: "catalog", + Namespace: "default", + }, + Spec: apiv1.ImageCatalogSpec{ + Images: []apiv1.CatalogImage{ + { + Image: "postgres:11.2", + Major: 11, + }, + }, + }, + } + + r := newFakeReconcilerFor(cluster, catalog) + result, err := r.reconcileImage(ctx, cluster) + Expect(err).Error().ShouldNot(HaveOccurred()) + Expect(result).ToNot(BeNil()) + + Expect(cluster.Status.Phase).To(Equal(apiv1.PhaseImageCatalogError)) + }) + + It("skips major version downgrades", func(ctx SpecContext) { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-example", + Namespace: "default", + }, + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:15.2", + }, + Status: apiv1.ClusterStatus{ + Image: "postgres:16.2", + }, + } + + r := newFakeReconcilerFor(cluster, nil) + + result, err := r.reconcileImage(ctx, cluster) + Expect(err).Error().ShouldNot(HaveOccurred()) + Expect(result).To(BeNil()) + + Expect(cluster.Status.Image).To(Equal("postgres:16.2")) + }) + + It("process major version upgrades", func(ctx SpecContext) { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-example", + Namespace: "default", + }, + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:17.2", + }, + Status: apiv1.ClusterStatus{ + Image: "postgres:16.2", + }, + } + + r := newFakeReconcilerFor(cluster, nil) + + result, err := r.reconcileImage(ctx, cluster) + Expect(err).Error().ShouldNot(HaveOccurred()) + Expect(result).To(BeNil()) + + Expect(cluster.Status.Image).To(Equal("postgres:17.2")) + Expect(cluster.Status.MajorVersionUpgradeFromImage).ToNot(BeNil()) + Expect(*cluster.Status.MajorVersionUpgradeFromImage).To(Equal("postgres:16.2")) + }) +}) diff --git a/internal/controller/cluster_scale.go b/internal/controller/cluster_scale.go index 0fc21229e6..ae7889a96e 100644 --- a/internal/controller/cluster_scale.go +++ b/internal/controller/cluster_scale.go @@ -32,7 +32,7 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/persistentvolumeclaim" - "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) // scaleDownCluster handles the scaling down operations of a PostgreSQL cluster. @@ -101,13 +101,25 @@ func (r *ClusterReconciler) ensureInstanceJobAreDeleted( ) error { contextLogger := log.FromContext(ctx) - for _, jobName := range specs.GetPossibleJobNames(instanceName) { - job := &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - Name: jobName, - Namespace: cluster.Namespace, - }, - } + var jobList batchv1.JobList + if err := r.List( + ctx, + &jobList, + client.InNamespace(cluster.Namespace), + client.MatchingFields{jobOwnerKey: cluster.Name}, + client.MatchingLabels{ + utils.InstanceNameLabelName: instanceName, + utils.ClusterLabelName: cluster.Name, + }, + client.HasLabels{ + utils.JobRoleLabelName, + }, + ); err != nil { + return fmt.Errorf("while looking for stale jobs of instance %s: %w", instanceName, err) + } + + for i := range jobList.Items { + job := &jobList.Items[i] // This job was working against the PVC of this Pod, // let's remove it diff --git a/internal/controller/cluster_scale_test.go b/internal/controller/cluster_scale_test.go index 2c9f24e513..7ee3ed3b07 100644 --- a/internal/controller/cluster_scale_test.go +++ b/internal/controller/cluster_scale_test.go @@ -33,7 +33,7 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" schemeBuilder "github.com/cloudnative-pg/cloudnative-pg/internal/scheme" "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/persistentvolumeclaim" - "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -172,35 +172,58 @@ var _ = Describe("cluster scale pod and job deletion logic", func() { ) BeforeEach(func() { - fakeClientSet = fake.NewClientBuilder().WithScheme(schemeBuilder.BuildWithAllKnownScheme()).Build() + fakeClientSet = fake. + NewClientBuilder(). + WithScheme(schemeBuilder.BuildWithAllKnownScheme()). + WithIndex(&batchv1.Job{}, jobOwnerKey, jobOwnerIndexFunc). + Build() ctx, cancel = context.WithCancel(context.Background()) reconciler = &ClusterReconciler{ Client: fakeClientSet, } + instanceName = "test-instance" + cluster = &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "test-cluster", Namespace: "default", }, } - - instanceName = "test-instance" + cluster.TypeMeta = metav1.TypeMeta{ + Kind: apiv1.ClusterKind, + APIVersion: apiv1.SchemeGroupVersion.String(), + } }) AfterEach(func() { cancel() }) + It("creates the cluster", func(ctx SpecContext) { + err := fakeClientSet.Create(ctx, cluster) + Expect(err).ToNot(HaveOccurred()) + }) + It("should delete all the jobs", func(ctx SpecContext) { - for _, jobName := range specs.GetPossibleJobNames(instanceName) { + jobNames := []string{ + cluster.Name + "-initdb", + cluster.Name + "-pgbasebackup", + } + for _, jobName := range jobNames { job := &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: jobName, Namespace: cluster.Namespace, + Labels: map[string]string{ + utils.InstanceNameLabelName: instanceName, + utils.ClusterLabelName: cluster.Name, + utils.JobRoleLabelName: "test", + }, }, } + cluster.SetInheritedDataAndOwnership(&job.ObjectMeta) err := fakeClientSet.Create(ctx, job) Expect(err).NotTo(HaveOccurred()) } @@ -208,7 +231,7 @@ var _ = Describe("cluster scale pod and job deletion logic", func() { err := reconciler.ensureInstanceJobAreDeleted(ctx, cluster, instanceName) Expect(err).NotTo(HaveOccurred()) - for _, jobName := range specs.GetPossibleJobNames(instanceName) { + for _, jobName := range jobNames { var expectedJob batchv1.Job err = fakeClientSet.Get(context.Background(), types.NamespacedName{Name: jobName, Namespace: cluster.Namespace}, diff --git a/internal/controller/cluster_status.go b/internal/controller/cluster_status.go index 5703b18bcf..62d0aa56ca 100644 --- a/internal/controller/cluster_status.go +++ b/internal/controller/cluster_status.go @@ -739,8 +739,8 @@ func (r *ClusterReconciler) RegisterPhase(ctx context.Context, ctx, r.Client, cluster, - status.SetPhaseTX(phase, reason), - status.SetClusterReadyConditionTX, + status.SetPhase(phase, reason), + status.SetClusterReadyCondition, ) } diff --git a/internal/controller/cluster_upgrade.go b/internal/controller/cluster_upgrade.go index b13c94f31d..c8be184023 100644 --- a/internal/controller/cluster_upgrade.go +++ b/internal/controller/cluster_upgrade.go @@ -495,7 +495,7 @@ func getProjectedVolumeConfigurationFromPod(pod corev1.Pod) *corev1.ProjectedVol } func checkPodImageIsOutdated(_ context.Context, pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, error) { - targetImageName := cluster.GetImageName() + targetImageName := cluster.Status.Image pgCurrentImageName, err := specs.GetPostgresImageName(*pod) if err != nil { @@ -680,7 +680,7 @@ func (r *ClusterReconciler) upgradePod( ) error { log.FromContext(ctx).Info("Recreating instance pod", "pod", pod.Name, - "to", cluster.GetImageName(), + "to", cluster.Status.Image, "reason", reason, ) diff --git a/internal/controller/cluster_upgrade_test.go b/internal/controller/cluster_upgrade_test.go index 8737fb1a44..cda4e66f6b 100644 --- a/internal/controller/cluster_upgrade_test.go +++ b/internal/controller/cluster_upgrade_test.go @@ -55,6 +55,9 @@ var _ = Describe("Pod upgrade", Ordered, func() { Spec: apiv1.ClusterSpec{ ImageName: "postgres:13.11", }, + Status: apiv1.ClusterStatus{ + Image: "postgres:13.11", + }, } configuration.Current = configuration.NewConfiguration() }) diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index 8315875b88..d567ba4dc1 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -77,6 +77,7 @@ func buildTestEnvironment() *testingEnvironment { k8sClient := fake.NewClientBuilder().WithScheme(scheme). WithStatusSubresource(&apiv1.Cluster{}, &apiv1.Backup{}, &apiv1.Pooler{}, &corev1.Service{}, &corev1.ConfigMap{}, &corev1.Secret{}). + WithIndex(&batchv1.Job{}, jobOwnerKey, jobOwnerIndexFunc). Build() Expect(err).ToNot(HaveOccurred()) diff --git a/internal/management/controller/instance_controller.go b/internal/management/controller/instance_controller.go index 78ee1b6be4..b8c8aebe48 100644 --- a/internal/management/controller/instance_controller.go +++ b/internal/management/controller/instance_controller.go @@ -338,8 +338,8 @@ func (r *InstanceReconciler) restartPrimaryInplaceIfRequested( ctx, r.client, cluster, - clusterstatus.SetPhaseTX(apiv1.PhaseHealthy, "Primary instance restarted in-place"), - clusterstatus.SetClusterReadyConditionTX, + clusterstatus.SetPhase(apiv1.PhaseHealthy, "Primary instance restarted in-place"), + clusterstatus.SetClusterReadyCondition, ) } return false, nil @@ -1074,8 +1074,8 @@ func (r *InstanceReconciler) processConfigReloadAndManageRestart(ctx context.Con ctx, r.client, cluster, - clusterstatus.SetPhaseTX(phase, phaseReason), - clusterstatus.SetClusterReadyConditionTX, + clusterstatus.SetPhase(phase, phaseReason), + clusterstatus.SetClusterReadyCondition, ) } @@ -1098,8 +1098,8 @@ func (r *InstanceReconciler) triggerRestartForDecrease(ctx context.Context, clus ctx, r.client, cluster, - clusterstatus.SetPhaseTX(phase, phaseReason), - clusterstatus.SetClusterReadyConditionTX, + clusterstatus.SetPhase(phase, phaseReason), + clusterstatus.SetClusterReadyCondition, ) } diff --git a/internal/webhook/v1/cluster_webhook.go b/internal/webhook/v1/cluster_webhook.go index 1671ce99ba..3de0101272 100644 --- a/internal/webhook/v1/cluster_webhook.go +++ b/internal/webhook/v1/cluster_webhook.go @@ -1231,22 +1231,26 @@ func (v *ClusterCustomValidator) validateImageChange(r, old *apiv1.Cluster) fiel var result field.ErrorList var newVersion, oldVersion version.Data var err error - var newImagePath *field.Path + var fieldPath *field.Path if r.Spec.ImageCatalogRef != nil { - newImagePath = field.NewPath("spec", "imageCatalogRef") + fieldPath = field.NewPath("spec", "imageCatalogRef", "major") } else { - newImagePath = field.NewPath("spec", "imageName") + fieldPath = field.NewPath("spec", "imageName") } - r.Status.Image = "" - newVersion, err = r.GetPostgresqlVersion() + newCluster := r.DeepCopy() + newCluster.Status.Image = "" + newVersion, err = newCluster.GetPostgresqlVersion() if err != nil { // The validation error will be already raised by the // validateImageName function return result } - old.Status.Image = "" + old = old.DeepCopy() + if old.Status.MajorVersionUpgradeFromImage != nil { + old.Status.Image = *old.Status.MajorVersionUpgradeFromImage + } oldVersion, err = old.GetPostgresqlVersion() if err != nil { // The validation error will be already raised by the @@ -1254,18 +1258,27 @@ func (v *ClusterCustomValidator) validateImageChange(r, old *apiv1.Cluster) fiel return result } - status := version.IsUpgradePossible(oldVersion, newVersion) - - if !status { + if oldVersion.Major() > newVersion.Major() { result = append( result, field.Invalid( - newImagePath, - newVersion, - fmt.Sprintf("can't upgrade between majors %v and %v", - oldVersion, newVersion))) + fieldPath, + fmt.Sprintf("%v", newVersion.Major()), + fmt.Sprintf("can't downgrade from majors %v to %v", + oldVersion.Major(), newVersion.Major()))) } + // TODO: Upgrading to versions 14 and 15 would require carrying information around about the collation used. + // See https://git.postgresql.org/gitweb/?p=postgresql.git;a=commitdiff;h=9637badd9. + // This is not implemented yet, and users should not upgrade to old versions anyway, so we are blocking it. + if oldVersion.Major() < newVersion.Major() && newVersion.Major() < 16 { + result = append( + result, + field.Invalid( + fieldPath, + fmt.Sprintf("%v", newVersion.Major()), + "major upgrades are only supported to version 16 or higher")) + } return result } diff --git a/internal/webhook/v1/cluster_webhook_test.go b/internal/webhook/v1/cluster_webhook_test.go index 6edc5a1694..ae22f9d074 100644 --- a/internal/webhook/v1/cluster_webhook_test.go +++ b/internal/webhook/v1/cluster_webhook_test.go @@ -1245,7 +1245,7 @@ var _ = Describe("validate image name change", func() { }) }) Context("using image catalog", func() { - It("complains on major upgrades", func() { + It("complains on major downgrades", func() { clusterOld := &apiv1.Cluster{ Spec: apiv1.ClusterSpec{ ImageCatalogRef: &apiv1.ImageCatalogRef{ @@ -1253,7 +1253,7 @@ var _ = Describe("validate image name change", func() { Name: "test", Kind: "ImageCatalog", }, - Major: 15, + Major: 16, }, }, } @@ -1264,7 +1264,7 @@ var _ = Describe("validate image name change", func() { Name: "test", Kind: "ImageCatalog", }, - Major: 16, + Major: 15, }, }, } @@ -1291,10 +1291,10 @@ var _ = Describe("validate image name change", func() { } Expect(v.validateImageChange(clusterNew, clusterOld)).To(BeEmpty()) }) - It("complains on major upgrades", func() { + It("complains on major downgrades", func() { clusterOld := &apiv1.Cluster{ Spec: apiv1.ClusterSpec{ - ImageName: "postgres:16.1", + ImageName: "postgres:17.1", }, } clusterNew := &apiv1.Cluster{ @@ -1304,7 +1304,7 @@ var _ = Describe("validate image name change", func() { Name: "test", Kind: "ImageCatalog", }, - Major: 17, + Major: 16, }, }, } @@ -1369,7 +1369,7 @@ var _ = Describe("validate image name change", func() { } Expect(v.validateImageChange(clusterNew, clusterOld)).To(BeEmpty()) }) - It("complains on major upgrades", func() { + It("complains on major downgrades", func() { clusterOld := &apiv1.Cluster{ Spec: apiv1.ClusterSpec{ ImageCatalogRef: &apiv1.ImageCatalogRef{ @@ -1377,18 +1377,18 @@ var _ = Describe("validate image name change", func() { Name: "test", Kind: "ImageCatalog", }, - Major: 16, + Major: 17, }, }, } clusterNew := &apiv1.Cluster{ Spec: apiv1.ClusterSpec{ - ImageName: "postgres:17.1", + ImageName: "postgres:16.1", }, } Expect(v.validateImageChange(clusterNew, clusterOld)).To(HaveLen(1)) }) - It("complains going from imageCatalogRef to different major default imageName", func() { + It("complains going from imageCatalogRef to lower major default imageName", func() { clusterOld := &apiv1.Cluster{ Spec: apiv1.ClusterSpec{ ImageCatalogRef: &apiv1.ImageCatalogRef{ @@ -1396,7 +1396,7 @@ var _ = Describe("validate image name change", func() { Name: "test", Kind: "ImageCatalog", }, - Major: 16, + Major: 18, }, }, } diff --git a/pkg/management/postgres/initdb.go b/pkg/management/postgres/initdb.go index 2675271ab8..a2c6ad2ed7 100644 --- a/pkg/management/postgres/initdb.go +++ b/pkg/management/postgres/initdb.go @@ -506,7 +506,7 @@ func (info InitInfo) Bootstrap(ctx context.Context) error { // In case of import bootstrap, we restore the standard configuration file content if isImportBootstrap { - /// Write standard replication configuration + // Write standard replication configuration if _, err = configurePostgresOverrideConfFile(info.PgData, primaryConnInfo, ""); err != nil { return fmt.Errorf("while configuring Postgres for replication: %w", err) } diff --git a/pkg/management/postgres/join.go b/pkg/management/postgres/join.go index a341e54e7f..fda8bdcdcb 100644 --- a/pkg/management/postgres/join.go +++ b/pkg/management/postgres/join.go @@ -81,7 +81,7 @@ func (info InitInfo) Join(ctx context.Context, cluster *apiv1.Cluster) error { if err != nil { log.Warning( "Error while parsing PostgreSQL server version to define connection options, defaulting to PostgreSQL 11", - "imageName", cluster.GetImageName(), + "image", cluster.Status.Image, "err", err) } else if pgVersion.Major() >= 12 { // We explicitly disable wal_sender_timeout for join-related pg_basebackup executions. diff --git a/pkg/reconciler/majorupgrade/doc.go b/pkg/reconciler/majorupgrade/doc.go new file mode 100644 index 0000000000..f75d3082f2 --- /dev/null +++ b/pkg/reconciler/majorupgrade/doc.go @@ -0,0 +1,30 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package majorupgrade provides the logic for upgrading a PostgreSQL cluster +// to a new major version. +// +// The upgrade process consists of the following steps: +// +// 1. Delete all Pods in the cluster. +// 2. Create and initiate the major upgrade job. +// 3. Wait for the job to complete. +// 4. If the upgrade job completes successfully, start new Pods for the upgraded version. +// Otherwise, stop and wait for input by the user. +package majorupgrade diff --git a/pkg/reconciler/majorupgrade/job.go b/pkg/reconciler/majorupgrade/job.go new file mode 100644 index 0000000000..f72690ab37 --- /dev/null +++ b/pkg/reconciler/majorupgrade/job.go @@ -0,0 +1,87 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package majorupgrade + +import ( + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" +) + +const jobMajorUpgrade = "major-upgrade" + +// isMajorUpgradeJob tells if the passed Job definition corresponds to +// the job handling the major upgrade +func isMajorUpgradeJob(job *batchv1.Job) bool { + return job.GetLabels()[utils.JobRoleLabelName] == string(jobMajorUpgrade) +} + +// getTargetImageFromMajorUpgradeJob gets the image that is being used as +// target of the major upgrade process. +func getTargetImageFromMajorUpgradeJob(job *batchv1.Job) (string, bool) { + if !isMajorUpgradeJob(job) { + return "", false + } + + for _, container := range job.Spec.Template.Spec.Containers { + if container.Name == string(jobMajorUpgrade) { + return container.Image, true + } + } + + return "", false +} + +// createMajorUpgradeJobDefinition creates a job to upgrade the primary node to a new Postgres major version +func createMajorUpgradeJobDefinition(cluster *apiv1.Cluster, nodeSerial int) *batchv1.Job { + oldImage := *cluster.Status.MajorVersionUpgradeFromImage + + prepareCommand := []string{ + "/controller/manager", + "instance", + "upgrade", + "prepare", + "/controller/old", + } + oldVersionInitContainer := corev1.Container{ + Name: "prepare", + Image: oldImage, + ImagePullPolicy: cluster.Spec.ImagePullPolicy, + Command: prepareCommand, + VolumeMounts: specs.CreatePostgresVolumeMounts(*cluster), + Resources: cluster.Spec.Resources, + SecurityContext: specs.CreateContainerSecurityContext(cluster.GetSeccompProfile()), + } + + majorUpgradeCommand := []string{ + "/controller/manager", + "instance", + "upgrade", + "execute", + "/controller/old/bindir.txt", + } + job := specs.CreatePrimaryJob(*cluster, nodeSerial, jobMajorUpgrade, majorUpgradeCommand) + job.Spec.Template.Spec.InitContainers = append(job.Spec.Template.Spec.InitContainers, oldVersionInitContainer) + + return job +} diff --git a/pkg/reconciler/majorupgrade/job_test.go b/pkg/reconciler/majorupgrade/job_test.go new file mode 100644 index 0000000000..6f15aee5ee --- /dev/null +++ b/pkg/reconciler/majorupgrade/job_test.go @@ -0,0 +1,72 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package majorupgrade + +import ( + batchv1 "k8s.io/api/batch/v1" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Major upgrade Job generation", func() { + oldImageName := "postgres:16" + newImageName := "postgres:17" + + cluster := apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: newImageName, + Bootstrap: &apiv1.BootstrapConfiguration{ + InitDB: &apiv1.BootstrapInitDB{}, + }, + }, + Status: apiv1.ClusterStatus{ + Image: newImageName, + MajorVersionUpgradeFromImage: &oldImageName, + }, + } + + It("creates major upgrade jobs", func() { + majorUpgradeJob := createMajorUpgradeJobDefinition(&cluster, 1) + Expect(majorUpgradeJob).ToNot(BeNil()) + Expect(majorUpgradeJob.Spec.Template.Spec.Containers[0].Image).To(Equal(newImageName)) + }) + + It("is able to discover which target image was used", func() { + majorUpgradeJob := createMajorUpgradeJobDefinition(&cluster, 1) + Expect(majorUpgradeJob).ToNot(BeNil()) + + imgName, found := getTargetImageFromMajorUpgradeJob(majorUpgradeJob) + Expect(found).To(BeTrue()) + Expect(imgName).To(Equal(newImageName)) + }) + + DescribeTable( + "Tells major upgrade jobs apart from jobs of other types", + func(job *batchv1.Job, isMajorUpgrade bool) { + Expect(isMajorUpgradeJob(job)).To(Equal(isMajorUpgrade)) + }, + Entry("initdb jobs are not major upgrades", specs.CreatePrimaryJobViaInitdb(cluster, 1), false), + Entry("major-upgrade jobs are major upgrades", createMajorUpgradeJobDefinition(&cluster, 1), true), + ) +}) diff --git a/pkg/reconciler/majorupgrade/reconciler.go b/pkg/reconciler/majorupgrade/reconciler.go new file mode 100644 index 0000000000..ed150dc81b --- /dev/null +++ b/pkg/reconciler/majorupgrade/reconciler.go @@ -0,0 +1,286 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package majorupgrade + +import ( + "context" + "fmt" + "time" + + "github.com/cloudnative-pg/machinery/pkg/log" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" + "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/status" + "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" +) + +// ErrIncoherentMajorUpgradeJob is raised when the major upgrade job +// is missing the target image +var ErrIncoherentMajorUpgradeJob = fmt.Errorf("major upgrade job is missing the target image") + +// ErrNoPrimaryPVCFound is raised when the list of PVCs doesn't +// include any primary instance. +var ErrNoPrimaryPVCFound = fmt.Errorf("no primary PVC found") + +// Reconcile implements the major version upgrade logic. +func Reconcile( + ctx context.Context, + c client.Client, + cluster *apiv1.Cluster, + instances []corev1.Pod, + pvcs []corev1.PersistentVolumeClaim, + jobs []batchv1.Job, +) (*ctrl.Result, error) { + contextLogger := log.FromContext(ctx) + + if majorUpgradeJob := getMajorUpdateJob(jobs); majorUpgradeJob != nil { + return majorVersionUpgradeHandleCompletion(ctx, c, cluster, majorUpgradeJob, pvcs) + } + + if cluster.Status.MajorVersionUpgradeFromImage == nil { + return nil, nil + } + + desiredVersion, err := cluster.GetPostgresqlVersion() + if err != nil { + contextLogger.Error(err, "Unable to retrieve the new PostgreSQL version") + return nil, err + } + + primaryNodeSerial, err := getPrimarySerial(pvcs) + if err != nil || primaryNodeSerial == 0 { + contextLogger.Error(err, "Unable to retrieve the primary node serial") + return nil, err + } + + contextLogger.Info("Reconciling in-place major version upgrades", + "primaryNodeSerial", primaryNodeSerial, "desiredVersion", desiredVersion.Major()) + + err = registerPhase(ctx, c, cluster, apiv1.PhaseMajorUpgrade, + fmt.Sprintf("Upgrading cluster to major version %v", desiredVersion.Major())) + if err != nil { + return nil, err + } + + if result, err := deleteAllPodsInMajorUpgradePreparation(ctx, c, instances, jobs); err != nil { + contextLogger.Error(err, "Unable to delete pods and jobs in preparation for major upgrade") + return nil, err + } else if result != nil { + return result, err + } + + if result, err := createMajorUpgradeJob(ctx, c, cluster, primaryNodeSerial); err != nil { + contextLogger.Error(err, "Unable to create major upgrade job") + return nil, err + } else if result != nil { + return result, err + } + + return &ctrl.Result{RequeueAfter: 30 * time.Second}, nil +} + +func getMajorUpdateJob(items []batchv1.Job) *batchv1.Job { + for _, job := range items { + if isMajorUpgradeJob(&job) { + return &job + } + } + + return nil +} + +func deleteAllPodsInMajorUpgradePreparation( + ctx context.Context, + c client.Client, + instances []corev1.Pod, + jobs []batchv1.Job, +) (*ctrl.Result, error) { + foundSomethingToDelete := false + + for _, pod := range instances { + if pod.GetDeletionTimestamp() != nil { + continue + } + + foundSomethingToDelete = true + if err := c.Delete(ctx, &pod); err != nil { + return nil, err + } + } + + for _, job := range jobs { + if job.GetDeletionTimestamp() != nil { + continue + } + + foundSomethingToDelete = true + if err := c.Delete(ctx, &job, &client.DeleteOptions{ + PropagationPolicy: ptr.To(metav1.DeletePropagationForeground), + }); err != nil { + return nil, err + } + } + + if foundSomethingToDelete { + return &ctrl.Result{RequeueAfter: 30 * time.Second}, nil + } + + return nil, nil +} + +func createMajorUpgradeJob( + ctx context.Context, + c client.Client, + cluster *apiv1.Cluster, + primaryNodeSerial int, +) (*ctrl.Result, error) { + contextLogger := log.FromContext(ctx) + + job := createMajorUpgradeJobDefinition(cluster, primaryNodeSerial) + + if err := ctrl.SetControllerReference(cluster, job, c.Scheme()); err != nil { + contextLogger.Error(err, "Unable to set the owner reference for major upgrade job") + return nil, err + } + + utils.SetOperatorVersion(&job.ObjectMeta, versions.Version) + utils.InheritAnnotations(&job.ObjectMeta, cluster.Annotations, + cluster.GetFixedInheritedAnnotations(), configuration.Current) + utils.InheritAnnotations(&job.Spec.Template.ObjectMeta, cluster.Annotations, + cluster.GetFixedInheritedAnnotations(), configuration.Current) + utils.InheritLabels(&job.ObjectMeta, cluster.Labels, + cluster.GetFixedInheritedLabels(), configuration.Current) + utils.InheritLabels(&job.Spec.Template.ObjectMeta, cluster.Labels, + cluster.GetFixedInheritedLabels(), configuration.Current) + utils.SetInstanceRole(job.Spec.Template.ObjectMeta, specs.ClusterRoleLabelPrimary) + + contextLogger.Info("Creating new major upgrade Job", + "jobName", job.Name, + "primary", true) + + if err := c.Create(ctx, job); err != nil { + if errors.IsAlreadyExists(err) { + // This Job was already created, maybe the cache is stale. + return &ctrl.Result{RequeueAfter: 30 * time.Second}, nil + } + return nil, err + } + + return nil, nil +} + +func majorVersionUpgradeHandleCompletion( + ctx context.Context, + c client.Client, + cluster *apiv1.Cluster, + job *batchv1.Job, + pvcs []corev1.PersistentVolumeClaim, +) (*ctrl.Result, error) { + contextLogger := log.FromContext(ctx) + + if !utils.JobHasOneCompletion(*job) { + contextLogger.Info("Major upgrade job not completed.") + return &ctrl.Result{RequeueAfter: 30 * time.Second}, nil + } + + for _, pvc := range pvcs { + if pvc.GetDeletionTimestamp() != nil { + continue + } + + if specs.IsPrimary(pvc.ObjectMeta) { + continue + } + + if err := c.Delete(ctx, &pvc); err != nil { + // Ignore if NotFound, otherwise report the error + if !errors.IsNotFound(err) { + return nil, err + } + } + } + + jobImage, ok := getTargetImageFromMajorUpgradeJob(job) + if !ok { + return nil, ErrIncoherentMajorUpgradeJob + } + + if err := status.PatchWithOptimisticLock( + ctx, + c, + cluster, + status.SetMajorVersionUpgradeFromImage(&jobImage), + ); err != nil { + contextLogger.Error(err, "Unable to update cluster status after major upgrade completed.") + return nil, err + } + + if err := c.Delete(ctx, job, &client.DeleteOptions{ + PropagationPolicy: ptr.To(metav1.DeletePropagationForeground), + }); err != nil { + contextLogger.Error(err, "Unable to delete major upgrade job.") + return nil, err + } + + return &ctrl.Result{Requeue: true}, nil +} + +// registerPhase sets a phase into the cluster +func registerPhase( + ctx context.Context, + c client.Client, + cluster *apiv1.Cluster, + phase string, + reason string, +) error { + return status.PatchWithOptimisticLock( + ctx, + c, + cluster, + status.SetPhase(phase, reason), + status.SetClusterReadyCondition, + ) +} + +// getPrimarySerial tries to obtain the primary serial from a group of PVCs +func getPrimarySerial( + pvcs []corev1.PersistentVolumeClaim, +) (int, error) { + for _, pvc := range pvcs { + instanceRole, _ := utils.GetInstanceRole(pvc.ObjectMeta.Labels) + if instanceRole != specs.ClusterRoleLabelPrimary { + continue + } + + return specs.GetNodeSerial(pvc.ObjectMeta) + } + + return 0, ErrNoPrimaryPVCFound +} diff --git a/pkg/reconciler/majorupgrade/reconciler_test.go b/pkg/reconciler/majorupgrade/reconciler_test.go new file mode 100644 index 0000000000..ea7cdfb81c --- /dev/null +++ b/pkg/reconciler/majorupgrade/reconciler_test.go @@ -0,0 +1,229 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package majorupgrade + +import ( + "fmt" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + schemeBuilder "github.com/cloudnative-pg/cloudnative-pg/internal/scheme" + "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Major upgrade job status reconciliation", func() { + It("waits until the job completed", func(ctx SpecContext) { + job := buildRunningUpgradeJob() + cluster := &apiv1.Cluster{} + fakeClient := fake.NewClientBuilder(). + WithScheme(schemeBuilder.BuildWithAllKnownScheme()). + WithRuntimeObjects(job, cluster). + WithStatusSubresource(cluster). + Build() + + result, err := majorVersionUpgradeHandleCompletion(ctx, fakeClient, cluster, job, nil) + Expect(err).ToNot(HaveOccurred()) + Expect(result).ToNot(BeNil()) + + // the job has not been deleted + Expect(job.ObjectMeta.DeletionTimestamp).To(BeNil()) + }) + + It("deletes the replica PVCs when and makes the cluster use the new image", func(ctx SpecContext) { + job := buildCompletedUpgradeJob() + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-example", + }, + } + pvcs := []corev1.PersistentVolumeClaim{ + buildPrimaryPVC(1), + buildReplicaPVC(2), + buildReplicaPVC(3), + } + + objects := []runtime.Object{ + job, + cluster, + } + for i := range pvcs { + objects = append(objects, &pvcs[i]) + } + fakeClient := fake.NewClientBuilder(). + WithScheme(schemeBuilder.BuildWithAllKnownScheme()). + WithRuntimeObjects(objects...). + WithStatusSubresource(cluster). + Build() + + result, err := majorVersionUpgradeHandleCompletion(ctx, fakeClient, cluster, job, pvcs) + Expect(err).ToNot(HaveOccurred()) + Expect(result).ToNot(BeNil()) + Expect(*result).To(Equal(ctrl.Result{Requeue: true})) + + // the replica PVCs have been deleted + for i := range pvcs { + if !specs.IsPrimary(pvcs[i].ObjectMeta) { + var pvc corev1.PersistentVolumeClaim + err := fakeClient.Get(ctx, client.ObjectKeyFromObject(&pvcs[i]), &pvc) + Expect(err).To(MatchError(errors.IsNotFound, "is not found")) + } + } + + // the upgrade has been marked as done + Expect(cluster.Status.MajorVersionUpgradeFromImage).ToNot(BeNil()) + Expect(*cluster.Status.MajorVersionUpgradeFromImage).To(Equal("postgres:16")) + + // the job has been deleted + var tempJob batchv1.Job + err = fakeClient.Get(ctx, client.ObjectKeyFromObject(job), &tempJob) + Expect(err).To(MatchError(errors.IsNotFound, "is not found")) + }) +}) + +var _ = Describe("Major upgrade job decoding", func() { + It("is able to find the target image", func() { + job := buildCompletedUpgradeJob() + imageName, ok := getTargetImageFromMajorUpgradeJob(job) + Expect(ok).To(BeTrue()) + Expect(imageName).To(Equal("postgres:16")) + }) +}) + +var _ = Describe("PVC metadata decoding", func() { + It("is able to find the serial number of the primary server", func() { + pvcs := []corev1.PersistentVolumeClaim{ + buildReplicaPVC(1), + buildPrimaryPVC(2), + } + + Expect(getPrimarySerial(pvcs)).To(Equal(2)) + }) + + It("raises an error if no primary PVC is found", func() { + pvcs := []corev1.PersistentVolumeClaim{ + buildReplicaPVC(1), + buildReplicaPVC(2), + } + + Expect(getPrimarySerial(pvcs)).Error().To(BeEquivalentTo(ErrNoPrimaryPVCFound)) + }) + + It("raises an error if the primary PVC has an invalid serial", func() { + pvcs := []corev1.PersistentVolumeClaim{ + buildReplicaPVC(1), + buildInvalidPrimaryPVC(2), + } + + Expect(getPrimarySerial(pvcs)).Error().To(HaveOccurred()) + }) +}) + +func buildPrimaryPVC(serial int) corev1.PersistentVolumeClaim { + return corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("cluster-example-%d", serial), + Labels: map[string]string{ + utils.ClusterRoleLabelName: specs.ClusterRoleLabelPrimary, + }, + Annotations: map[string]string{ + utils.ClusterSerialAnnotationName: fmt.Sprintf("%v", serial), + }, + }, + } +} + +func buildInvalidPrimaryPVC(serial int) corev1.PersistentVolumeClaim { + return corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("cluster-example-%d", serial), + Labels: map[string]string{ + utils.ClusterRoleLabelName: specs.ClusterRoleLabelPrimary, + }, + Annotations: map[string]string{ + utils.ClusterSerialAnnotationName: fmt.Sprintf("%v - this is a test", serial), + }, + }, + } +} + +func buildReplicaPVC(serial int) corev1.PersistentVolumeClaim { + return corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("cluster-example-%d", serial), + Labels: map[string]string{ + utils.ClusterRoleLabelName: specs.ClusterRoleLabelReplica, + }, + Annotations: map[string]string{ + utils.ClusterSerialAnnotationName: fmt.Sprintf("%v", serial), + }, + }, + } +} + +func buildCompletedUpgradeJob() *batchv1.Job { + return &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-example-major-upgrade", + Labels: map[string]string{ + utils.JobRoleLabelName: jobMajorUpgrade, + }, + }, + Spec: batchv1.JobSpec{ + Completions: ptr.To[int32](1), + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: jobMajorUpgrade, + Image: "postgres:16", + }, + }, + }, + }, + }, + Status: batchv1.JobStatus{ + Succeeded: 1, + }, + } +} + +func buildRunningUpgradeJob() *batchv1.Job { + return &batchv1.Job{ + Spec: batchv1.JobSpec{ + Completions: ptr.To[int32](1), + }, + Status: batchv1.JobStatus{ + Succeeded: 0, + }, + } +} diff --git a/pkg/reconciler/majorupgrade/suite_test.go b/pkg/reconciler/majorupgrade/suite_test.go new file mode 100644 index 0000000000..a4f8479de6 --- /dev/null +++ b/pkg/reconciler/majorupgrade/suite_test.go @@ -0,0 +1,32 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package majorupgrade + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestMajorUpgrade(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Major upgrade reconciler") +} diff --git a/pkg/resources/status/patch.go b/pkg/resources/status/patch.go index 51f8847e20..5bc7e59483 100644 --- a/pkg/resources/status/patch.go +++ b/pkg/resources/status/patch.go @@ -39,7 +39,7 @@ func PatchWithOptimisticLock( ctx context.Context, c client.Client, cluster *apiv1.Cluster, - txs ...func(cluster *apiv1.Cluster), + txs ...Transaction, ) error { if cluster == nil { return nil diff --git a/pkg/resources/status/transactions.go b/pkg/resources/status/transactions.go index 590352b807..4c9f678f28 100644 --- a/pkg/resources/status/transactions.go +++ b/pkg/resources/status/transactions.go @@ -26,9 +26,12 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" ) -// SetClusterReadyConditionTX updates the cluster's readiness condition +// Transaction is a function that modifies a cluster +type Transaction func(cluster *apiv1.Cluster) + +// SetClusterReadyCondition updates the cluster's readiness condition // according to the cluster phase -func SetClusterReadyConditionTX(cluster *apiv1.Cluster) { +func SetClusterReadyCondition(cluster *apiv1.Cluster) { if cluster.Status.Conditions == nil { cluster.Status.Conditions = []metav1.Condition{} } @@ -52,10 +55,25 @@ func SetClusterReadyConditionTX(cluster *apiv1.Cluster) { meta.SetStatusCondition(&cluster.Status.Conditions, condition) } -// SetPhaseTX is a transaction that sets the cluster phase and reason -func SetPhaseTX(phase string, reason string) func(cluster *apiv1.Cluster) { +// SetPhase is a transaction that sets the cluster phase and reason +func SetPhase(phase string, reason string) Transaction { return func(cluster *apiv1.Cluster) { cluster.Status.Phase = phase cluster.Status.PhaseReason = reason } } + +// SetImage is a transaction that sets the cluster image +func SetImage(image string) Transaction { + return func(cluster *apiv1.Cluster) { + cluster.Status.Image = image + } +} + +// SetMajorVersionUpgradeFromImage is a transaction that sets the cluster as upgrading to a newer major version +// starting from the provided image +func SetMajorVersionUpgradeFromImage(image *string) Transaction { + return func(cluster *apiv1.Cluster) { + cluster.Status.MajorVersionUpgradeFromImage = image + } +} diff --git a/pkg/specs/containers.go b/pkg/specs/containers.go index ed18f04ad6..27ebfe3f65 100644 --- a/pkg/specs/containers.go +++ b/pkg/specs/containers.go @@ -41,7 +41,7 @@ func createBootstrapContainer(cluster apiv1.Cluster) corev1.Container { "bootstrap", "/controller/manager", }, - VolumeMounts: createPostgresVolumeMounts(cluster), + VolumeMounts: CreatePostgresVolumeMounts(cluster), Resources: cluster.Spec.Resources, SecurityContext: CreateContainerSecurityContext(cluster.GetSeccompProfile()), } diff --git a/pkg/specs/jobs.go b/pkg/specs/jobs.go index b651d5cfa1..12ae77f200 100644 --- a/pkg/specs/jobs.go +++ b/pkg/specs/jobs.go @@ -89,7 +89,7 @@ func CreatePrimaryJobViaInitdb(cluster apiv1.Cluster, nodeSerial int) *batchv1.J initCommand = append(initCommand, buildCommonInitJobFlags(cluster)...) if cluster.Spec.Bootstrap.InitDB.Import != nil { - return createPrimaryJob(cluster, nodeSerial, jobRoleImport, initCommand) + return CreatePrimaryJob(cluster, nodeSerial, jobRoleImport, initCommand) } if cluster.ShouldInitDBRunPostInitApplicationSQLRefs() { @@ -107,7 +107,7 @@ func CreatePrimaryJobViaInitdb(cluster apiv1.Cluster, nodeSerial int) *batchv1.J "--post-init-sql-refs-folder", postInitSQLRefsFolder.toString()) } - return createPrimaryJob(cluster, nodeSerial, jobRoleInitDB, initCommand) + return CreatePrimaryJob(cluster, nodeSerial, jobRoleInitDB, initCommand) } func buildInitDBFlags(cluster apiv1.Cluster) (initCommand []string) { @@ -196,7 +196,7 @@ func CreatePrimaryJobViaRestoreSnapshot( initCommand = append(initCommand, buildCommonInitJobFlags(cluster)...) - job := createPrimaryJob(cluster, nodeSerial, jobRoleSnapshotRecovery, initCommand) + job := CreatePrimaryJob(cluster, nodeSerial, jobRoleSnapshotRecovery, initCommand) addBarmanEndpointCAToJobFromCluster(cluster, backup, job) @@ -213,7 +213,7 @@ func CreatePrimaryJobViaRecovery(cluster apiv1.Cluster, nodeSerial int, backup * initCommand = append(initCommand, buildCommonInitJobFlags(cluster)...) - job := createPrimaryJob(cluster, nodeSerial, jobRoleFullRecovery, initCommand) + job := CreatePrimaryJob(cluster, nodeSerial, jobRoleFullRecovery, initCommand) addBarmanEndpointCAToJobFromCluster(cluster, backup, job) @@ -254,7 +254,7 @@ func CreatePrimaryJobViaPgBaseBackup(cluster apiv1.Cluster, nodeSerial int) *bat initCommand = append(initCommand, buildCommonInitJobFlags(cluster)...) - return createPrimaryJob(cluster, nodeSerial, jobRolePGBaseBackup, initCommand) + return CreatePrimaryJob(cluster, nodeSerial, jobRolePGBaseBackup, initCommand) } // JoinReplicaInstance create a new PostgreSQL node, copying the contents from another Pod @@ -268,7 +268,7 @@ func JoinReplicaInstance(cluster apiv1.Cluster, nodeSerial int) *batchv1.Job { initCommand = append(initCommand, buildCommonInitJobFlags(cluster)...) - return createPrimaryJob(cluster, nodeSerial, jobRoleJoin, initCommand) + return CreatePrimaryJob(cluster, nodeSerial, jobRoleJoin, initCommand) } // RestoreReplicaInstance creates a new PostgreSQL replica starting from a volume snapshot backup @@ -282,7 +282,7 @@ func RestoreReplicaInstance(cluster apiv1.Cluster, nodeSerial int) *batchv1.Job initCommand = append(initCommand, buildCommonInitJobFlags(cluster)...) - job := createPrimaryJob(cluster, nodeSerial, jobRoleSnapshotRecovery, initCommand) + job := CreatePrimaryJob(cluster, nodeSerial, jobRoleSnapshotRecovery, initCommand) return job } @@ -308,25 +308,14 @@ const ( jobRoleSnapshotRecovery jobRole = "snapshot-recovery" ) -var jobRoleList = []jobRole{jobRoleImport, jobRoleInitDB, jobRolePGBaseBackup, jobRoleFullRecovery, jobRoleJoin} - // getJobName returns a string indicating the job name func (role jobRole) getJobName(instanceName string) string { return fmt.Sprintf("%s-%s", instanceName, role) } -// GetPossibleJobNames get all the possible job names for a given instance -func GetPossibleJobNames(instanceName string) []string { - res := make([]string, len(jobRoleList)) - for idx, role := range jobRoleList { - res[idx] = role.getJobName(instanceName) - } - return res -} - -// createPrimaryJob create a job that executes the provided command. +// CreatePrimaryJob create a job that executes the provided command. // The role should describe the purpose of the executed job -func createPrimaryJob(cluster apiv1.Cluster, nodeSerial int, role jobRole, initCommand []string) *batchv1.Job { +func CreatePrimaryJob(cluster apiv1.Cluster, nodeSerial int, role jobRole, initCommand []string) *batchv1.Job { instanceName := GetInstanceName(cluster.Name, nodeSerial) jobName := role.getJobName(instanceName) @@ -339,6 +328,7 @@ func createPrimaryJob(cluster apiv1.Cluster, nodeSerial int, role jobRole, initC Labels: map[string]string{ utils.InstanceNameLabelName: instanceName, utils.ClusterLabelName: cluster.Name, + utils.JobRoleLabelName: string(role), }, }, Spec: batchv1.JobSpec{ @@ -359,12 +349,12 @@ func createPrimaryJob(cluster apiv1.Cluster, nodeSerial int, role jobRole, initC Containers: []corev1.Container{ { Name: string(role), - Image: cluster.GetImageName(), + Image: cluster.Status.Image, ImagePullPolicy: cluster.Spec.ImagePullPolicy, Env: envConfig.EnvVars, EnvFrom: envConfig.EnvFrom, Command: initCommand, - VolumeMounts: createPostgresVolumeMounts(cluster), + VolumeMounts: CreatePostgresVolumeMounts(cluster), Resources: cluster.Spec.Resources, SecurityContext: CreateContainerSecurityContext(cluster.GetSeccompProfile()), }, diff --git a/pkg/specs/pg_pods_test.go b/pkg/specs/pg_pods_test.go index b41a3833ea..982be12afc 100644 --- a/pkg/specs/pg_pods_test.go +++ b/pkg/specs/pg_pods_test.go @@ -37,6 +37,9 @@ var _ = Describe("Extract the used image name", func() { Name: "clusterName", Namespace: "default", }, + Status: apiv1.ClusterStatus{ + Image: configuration.Current.PostgresImageName, + }, } pod, err := NewInstance(context.TODO(), cluster, 1, true) Expect(err).ToNot(HaveOccurred()) diff --git a/pkg/specs/pods.go b/pkg/specs/pods.go index 0146862804..85a4b67337 100644 --- a/pkg/specs/pods.go +++ b/pkg/specs/pods.go @@ -211,11 +211,11 @@ func createPostgresContainers(cluster apiv1.Cluster, envConfig EnvConfig, enable containers := []corev1.Container{ { Name: PostgresContainerName, - Image: cluster.GetImageName(), + Image: cluster.Status.Image, ImagePullPolicy: cluster.Spec.ImagePullPolicy, Env: envConfig.EnvVars, EnvFrom: envConfig.EnvFrom, - VolumeMounts: createPostgresVolumeMounts(cluster), + VolumeMounts: CreatePostgresVolumeMounts(cluster), // This is the default startup probe, and can be overridden // the user configuration in cluster.spec.probes.startup StartupProbe: &corev1.Probe{ diff --git a/pkg/specs/pods_test.go b/pkg/specs/pods_test.go index dcb69dc12e..ac2eba31d2 100644 --- a/pkg/specs/pods_test.go +++ b/pkg/specs/pods_test.go @@ -941,6 +941,9 @@ var _ = Describe("NewInstance", func() { utils.PodPatchAnnotationName: `[{"op": "replace", "path": "/spec/containers/0/image", "value": "new-image:latest"}]`, // nolint: lll }, }, + Status: v1.ClusterStatus{ + Image: "test", + }, } pod, err := NewInstance(ctx, cluster, 1, true) diff --git a/pkg/specs/volumes.go b/pkg/specs/volumes.go index 186040e268..4ac7dbddb9 100644 --- a/pkg/specs/volumes.go +++ b/pkg/specs/volumes.go @@ -222,7 +222,9 @@ func createVolumesAndVolumeMountsForSQLRefs( return volumes, volumeMounts } -func createPostgresVolumeMounts(cluster apiv1.Cluster) []corev1.VolumeMount { +// CreatePostgresVolumeMounts creates the volume mounts that are used +// by PostgreSQL Pods +func CreatePostgresVolumeMounts(cluster apiv1.Cluster) []corev1.VolumeMount { volumeMounts := []corev1.VolumeMount{ { Name: "pgdata", diff --git a/pkg/specs/volumes_test.go b/pkg/specs/volumes_test.go index ce2b59dda1..6370da9368 100644 --- a/pkg/specs/volumes_test.go +++ b/pkg/specs/volumes_test.go @@ -298,7 +298,7 @@ var _ = Describe("test createVolumesAndVolumeMountsForSQLRefs", func() { var _ = DescribeTable("test creation of volume mounts", func(cluster apiv1.Cluster, mounts []corev1.VolumeMount) { - mts := createPostgresVolumeMounts(cluster) + mts := CreatePostgresVolumeMounts(cluster) Expect(mts).NotTo(BeEmpty()) for _, mt := range mounts { Expect(mts).To(ContainElement(mt)) diff --git a/tests/e2e/cluster_major_upgrade_test.go b/tests/e2e/cluster_major_upgrade_test.go new file mode 100644 index 0000000000..2386e40ad1 --- /dev/null +++ b/tests/e2e/cluster_major_upgrade_test.go @@ -0,0 +1,356 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package e2e + +import ( + "context" + "fmt" + "os" + "strconv" + "time" + + "github.com/cloudnative-pg/machinery/pkg/image/reference" + "github.com/cloudnative-pg/machinery/pkg/postgres/version" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/rand" + "sigs.k8s.io/controller-runtime/pkg/client" + + v1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" + "github.com/cloudnative-pg/cloudnative-pg/tests" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/environment" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/storage" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Postgres Major Upgrade", Label(tests.LabelPostgresMajorUpgrade), func() { + const ( + level = tests.Medium + namespacePrefix = "cluster-major-upgrade" + postgisEntry = "postgis" + postgresqlEntry = "postgresql" + postgresqlMinimalEntry = "postgresql-minimal" + ) + + var namespace string + + type scenario struct { + startingCluster *v1.Cluster + startingMajor int + targetImage string + targetMajor int + } + scenarios := map[string]*scenario{} + + generateBaseCluster := func(namespace string, storageClass string) *v1.Cluster { + return &v1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pg-major-upgrade", + Namespace: namespace, + }, + Spec: v1.ClusterSpec{ + Instances: 3, + StorageConfiguration: v1.StorageConfiguration{ + StorageClass: &storageClass, + Size: "1Gi", + }, + WalStorage: &v1.StorageConfiguration{ + StorageClass: &storageClass, + Size: "1Gi", + }, + PostgresConfiguration: v1.PostgresConfiguration{ + Parameters: map[string]string{ + "log_checkpoints": "on", + "log_lock_waits": "on", + "log_min_duration_statement": "1000", + "log_statement": "ddl", + "log_temp_files": "1024", + "log_autovacuum_min_duration": "1000", + "log_replication_commands": "on", + }, + }, + }, + } + } + + generatePostgreSQLCluster := func(namespace string, storageClass string, majorVersion int) *v1.Cluster { + cluster := generateBaseCluster(namespace, storageClass) + cluster.Spec.ImageName = "ghcr.io/cloudnative-pg/postgresql:" + strconv.Itoa(majorVersion) + cluster.Spec.Bootstrap = &v1.BootstrapConfiguration{ + InitDB: &v1.BootstrapInitDB{ + PostInitSQL: []string{ + "CREATE EXTENSION IF NOT EXISTS pg_stat_statements;", + "CREATE EXTENSION IF NOT EXISTS pg_trgm;", + }, + }, + } + cluster.Spec.PostgresConfiguration.Parameters["pg_stat_statements.track"] = "top" + return cluster + } + generatePostgreSQLMinimalCluster := func(namespace string, storageClass string, majorVersion int) *v1.Cluster { + cluster := generatePostgreSQLCluster(namespace, storageClass, majorVersion) + cluster.Spec.ImageName = fmt.Sprintf("ghcr.io/cloudnative-pg/postgresql:%d-minimal-bookworm", majorVersion) + return cluster + } + + generatePostGISCluster := func(namespace string, storageClass string, majorVersion int) *v1.Cluster { + cluster := generateBaseCluster(namespace, storageClass) + cluster.Spec.ImageName = "ghcr.io/cloudnative-pg/postgis:" + strconv.Itoa(majorVersion) + cluster.Spec.Bootstrap = &v1.BootstrapConfiguration{ + InitDB: &v1.BootstrapInitDB{ + PostInitApplicationSQL: []string{ + "CREATE EXTENSION postgis", + "CREATE EXTENSION postgis_raster", + "CREATE EXTENSION postgis_sfcgal", + "CREATE EXTENSION fuzzystrmatch", + "CREATE EXTENSION address_standardizer", + "CREATE EXTENSION address_standardizer_data_us", + "CREATE EXTENSION postgis_tiger_geocoder", + "CREATE EXTENSION postgis_topology", + "CREATE TABLE geometries (name varchar, geom geometry)", + "INSERT INTO geometries VALUES" + + " ('Point', 'POINT(0 0)')," + + " ('Linestring', 'LINESTRING(0 0, 1 1, 2 1, 2 2)')," + + " ('Polygon', 'POLYGON((0 0, 1 0, 1 1, 0 1, 0 0))')," + + " ('PolygonWithHole', 'POLYGON((0 0, 10 0, 10 10, 0 10, 0 0),(1 1, 1 2, 2 2, 2 1, 1 1))')," + + " ('Collection', 'GEOMETRYCOLLECTION(POINT(2 0),POLYGON((0 0, 1 0, 1 1, 0 1, 0 0)))');", + }, + }, + } + return cluster + } + + determineVersionsForTesting := func() (uint64, uint64) { + currentImage := os.Getenv("POSTGRES_IMG") + Expect(currentImage).ToNot(BeEmpty()) + + currentVersion, err := version.FromTag(reference.New(currentImage).Tag) + Expect(err).NotTo(HaveOccurred()) + currentMajor := currentVersion.Major() + + targetVersion, err := version.FromTag(reference.New(versions.DefaultImageName).Tag) + Expect(err).ToNot(HaveOccurred()) + targetMajor := targetVersion.Major() + + // If same version, choose a previous one for testing + if currentMajor == targetMajor { + currentMajor = targetMajor - (uint64(rand.Int() % 4)) - 1 + GinkgoWriter.Printf("Using %v as the current major version instead.\n", currentMajor) + } + + return currentMajor, targetMajor + } + + buildScenarios := func( + namespace string, storageClass string, currentMajor, targetMajor uint64, + ) map[string]*scenario { + return map[string]*scenario{ + postgisEntry: { + startingCluster: generatePostGISCluster(namespace, storageClass, int(currentMajor)), + startingMajor: int(currentMajor), + targetImage: fmt.Sprintf("ghcr.io/cloudnative-pg/postgis:%v", targetMajor), + targetMajor: int(targetMajor), + }, + postgresqlEntry: { + startingCluster: generatePostgreSQLCluster(namespace, storageClass, int(currentMajor)), + startingMajor: int(currentMajor), + targetImage: fmt.Sprintf("ghcr.io/cloudnative-pg/postgresql:%v", targetMajor), + targetMajor: int(targetMajor), + }, + postgresqlMinimalEntry: { + startingCluster: generatePostgreSQLMinimalCluster(namespace, storageClass, int(currentMajor)), + startingMajor: int(currentMajor), + targetImage: fmt.Sprintf("ghcr.io/cloudnative-pg/postgresql:%v-minimal-bookworm", targetMajor), + targetMajor: int(targetMajor), + }, + } + } + + verifyPodsChanged := func( + ctx context.Context, client client.Client, cluster *v1.Cluster, oldPodsUUIDs []types.UID, + ) { + Eventually(func(g Gomega) { + podList, err := clusterutils.ListPods(ctx, client, cluster.Name, cluster.Namespace) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(podList.Items).To(HaveLen(len(oldPodsUUIDs))) + for _, pod := range podList.Items { + g.Expect(oldPodsUUIDs).NotTo(ContainElement(pod.UID)) + } + }).WithTimeout(5 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + } + + verifyPVCsChanged := func( + ctx context.Context, client client.Client, cluster *v1.Cluster, oldPVCsUUIDs []types.UID, + ) { + Eventually(func(g Gomega) { + pvcList, err := storage.GetPVCList(ctx, client, cluster.Namespace) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(pvcList.Items).To(HaveLen(len(oldPVCsUUIDs))) + for _, pvc := range pvcList.Items { + if pvc.Labels[utils.ClusterInstanceRoleLabelName] == specs.ClusterRoleLabelReplica { + g.Expect(oldPVCsUUIDs).NotTo(ContainElement(pvc.UID)) + } else { + g.Expect(oldPVCsUUIDs).To(ContainElement(pvc.UID)) + } + } + }).WithTimeout(5 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + } + + verifyPostgresVersion := func( + env *environment.TestingEnvironment, primary *corev1.Pod, oldStdOut string, targetMajor int, + ) { + Eventually(func(g Gomega) { + stdOut, stdErr, err := exec.EventuallyExecQueryInInstancePod(env.Ctx, env.Client, env.Interface, + env.RestClientConfig, + exec.PodLocator{Namespace: primary.GetNamespace(), PodName: primary.GetName()}, postgres.AppDBName, + "SELECT version();", 60, objects.PollingTime) + g.Expect(err).ToNot(HaveOccurred(), "failed to execute version query") + g.Expect(stdErr).To(BeEmpty(), "unexpected stderr output when checking version") + g.Expect(stdOut).ToNot(Equal(oldStdOut), "postgres version did not change") + g.Expect(stdOut).To(ContainSubstring(strconv.Itoa(targetMajor)), + fmt.Sprintf("version string doesn't contain expected major version %d: %s", targetMajor, stdOut)) + }).WithTimeout(5 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + } + + verifyCleanupAfterUpgrade := func(ctx context.Context, client client.Client, primary *corev1.Pod) { + shouldHaveBeenDeleted := []string{ + "/var/lib/postgresql/data/pgdata/pg_upgrade_output.d", + "/var/lib/postgresql/data/pgdata-new", + "/var/lib/postgresql/data/pgwal-new", + } + timeout := time.Second * 20 + for _, path := range shouldHaveBeenDeleted { + _, stdErr, err := exec.CommandInInstancePod(ctx, client, env.Interface, env.RestClientConfig, + exec.PodLocator{Namespace: primary.GetNamespace(), PodName: primary.GetName()}, &timeout, + "stat", path) + Expect(err).To(HaveOccurred(), "path: %s", path) + Expect(stdErr).To(ContainSubstring("No such file or directory"), "path: %s", path) + } + } + + BeforeEach(func() { + if testLevelEnv.Depth < int(level) { + Skip("Test depth is lower than the amount requested for this test") + } + + currentMajor, targetMajor := determineVersionsForTesting() + var err error + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) + Expect(err).ToNot(HaveOccurred()) + + storageClass := os.Getenv("E2E_DEFAULT_STORAGE_CLASS") + Expect(storageClass).ToNot(BeEmpty()) + + // We cannot use generated entries in the DescribeTable, so we use the scenario key as a constant, but + // define the actual content here. + // See https://onsi.github.io/ginkgo/#mental-model-table-specs-are-just-syntactic-sugar + scenarios = buildScenarios(namespace, storageClass, currentMajor, targetMajor) + }) + + DescribeTable("can upgrade a Cluster to a newer major version", func(scenarioName string) { + By("Creating the starting cluster") + scenario := scenarios[scenarioName] + cluster := scenario.startingCluster + err := env.Client.Create(env.Ctx, cluster) + Expect(err).NotTo(HaveOccurred()) + AssertClusterIsReady(cluster.Namespace, cluster.Name, testTimeouts[timeouts.ClusterIsReady], + env) + + By("Collecting the pods UUIDs") + podList, err := clusterutils.ListPods(env.Ctx, env.Client, cluster.Name, cluster.Namespace) + Expect(err).ToNot(HaveOccurred()) + oldPodsUUIDs := make([]types.UID, len(podList.Items)) + for i, pod := range podList.Items { + oldPodsUUIDs[i] = pod.UID + } + + By("Collecting the PVCs UUIDs") + pvcList, err := storage.GetPVCList(env.Ctx, env.Client, cluster.Namespace) + Expect(err).ToNot(HaveOccurred()) + oldPVCsUUIDs := make([]types.UID, len(pvcList.Items)) + for i, pvc := range pvcList.Items { + oldPVCsUUIDs[i] = pvc.UID + } + + By("Checking the starting version of the cluster") + primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, cluster.Namespace, cluster.Name) + Expect(err).ToNot(HaveOccurred()) + + oldStdOut, stdErr, err := exec.EventuallyExecQueryInInstancePod(env.Ctx, env.Client, env.Interface, + env.RestClientConfig, + exec.PodLocator{Namespace: primary.GetNamespace(), PodName: primary.GetName()}, postgres.AppDBName, + "SELECT version();", 60, objects.PollingTime) + Expect(err).ToNot(HaveOccurred()) + Expect(stdErr).To(BeEmpty()) + Expect(oldStdOut).To(ContainSubstring(strconv.Itoa(scenario.startingMajor))) + + By("Updating the major") + cluster, err = clusterutils.Get(env.Ctx, env.Client, cluster.Namespace, cluster.Name) + Expect(err).ToNot(HaveOccurred()) + cluster.Spec.ImageName = scenario.targetImage + // We wrap this in an Eventually to avoid possible failures if the cluster changes + Eventually(func() error { + return env.Client.Update(env.Ctx, cluster) + }).WithTimeout(1 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + By("Waiting for the cluster to be in the major upgrade phase") + Eventually(func(g Gomega) { + cluster, err = clusterutils.Get(env.Ctx, env.Client, cluster.Namespace, cluster.Name) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cluster.Status.Phase).To(Equal(v1.PhaseMajorUpgrade)) + }).WithTimeout(1 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + AssertClusterIsReady(cluster.Namespace, cluster.Name, testTimeouts[timeouts.ClusterIsReady], env) + + // The upgrade destroys all the original pods and creates new ones. We want to make sure that we have + // the same amount of pods as before, but with different UUIDs. + By("Verifying the pods UUIDs have changed") + verifyPodsChanged(env.Ctx, env.Client, cluster, oldPodsUUIDs) + + // The upgrade destroys all the original PVCs and creates new ones, except for the ones associated to the + // primary. We want to make sure that we have the same amount of PVCs as before, but with different UUIDs, + // which should be the same instead for the primary PVCs. + By("Verifying the replicas' PVCs have changed") + verifyPVCsChanged(env.Ctx, env.Client, cluster, oldPVCsUUIDs) + + // Check that the version has been updated + By("Verifying the cluster is running the target version") + verifyPostgresVersion(env, primary, oldStdOut, scenario.targetMajor) + + // Expect temporary files to be deleted + By("Checking no leftovers exist from the upgrade") + verifyCleanupAfterUpgrade(env.Ctx, env.Client, primary) + }, + Entry("PostGIS", postgisEntry), + Entry("PostgreSQL", postgresqlEntry), + Entry("PostgreSQL minimal", postgresqlMinimalEntry), + ) +}) diff --git a/tests/e2e/cluster_microservice_test.go b/tests/e2e/cluster_microservice_test.go index 198f3278ea..dbea8c78f4 100644 --- a/tests/e2e/cluster_microservice_test.go +++ b/tests/e2e/cluster_microservice_test.go @@ -24,13 +24,10 @@ import ( "os" "strings" - "github.com/cloudnative-pg/machinery/pkg/image/reference" - "github.com/cloudnative-pg/machinery/pkg/postgres/version" batchv1 "k8s.io/api/batch/v1" "k8s.io/apimachinery/pkg/types" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" "github.com/cloudnative-pg/cloudnative-pg/tests" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/exec" @@ -174,7 +171,7 @@ var _ = Describe("Imports with Microservice Approach", Label(tests.LabelImportin Expect(postgresImage).ShouldNot(BeEmpty(), "POSTGRES_IMG env should not be empty") // this test case is only applicable if we are not already on the latest major - if shouldSkip(postgresImage) { + if postgres.IsLatestMajor(postgresImage) { Skip("Already running on the latest major. This test is not applicable for PostgreSQL " + postgresImage) } @@ -194,20 +191,6 @@ var _ = Describe("Imports with Microservice Approach", Label(tests.LabelImportin }) }) -// shouldSkip skip this test if the current POSTGRES_IMG is already the latest major -func shouldSkip(postgresImage string) bool { - // Get the current tag - currentImageReference := reference.New(postgresImage) - currentImageVersion, err := version.FromTag(currentImageReference.Tag) - Expect(err).ToNot(HaveOccurred()) - // Get the default tag - defaultImageReference := reference.New(versions.DefaultImageName) - defaultImageVersion, err := version.FromTag(defaultImageReference.Tag) - Expect(err).ToNot(HaveOccurred()) - - return currentImageVersion.Major() >= defaultImageVersion.Major() -} - // assertCreateTableWithDataOnSourceCluster will create on the source Cluster, as postgres superUser: // 1. a new user `micro` // 2. a new table with 2 records owned by `micro` in the `app` database diff --git a/tests/labels.go b/tests/labels.go index 50f7698db6..15d5461f6e 100644 --- a/tests/labels.go +++ b/tests/labels.go @@ -97,4 +97,7 @@ const ( // LabelUpgrade is a label for upgrade tests LabelUpgrade = "upgrade" + + // LabelPostgresMajorUpgrade is a label for Cluster major version upgrade tests + LabelPostgresMajorUpgrade = "postgres-major-upgrade" ) diff --git a/tests/utils/postgres/postgres.go b/tests/utils/postgres/postgres.go index 54f0327bf6..3c081fccc2 100644 --- a/tests/utils/postgres/postgres.go +++ b/tests/utils/postgres/postgres.go @@ -134,3 +134,21 @@ func BumpPostgresImageMajorVersion(postgresImage string) (string, error) { return imageReference.GetNormalizedName(), nil } + +// IsLatestMajor returns true if the given postgresImage is using latest Postgres major version +func IsLatestMajor(postgresImage string) bool { + // Get the current tag + currentImageReference := reference.New(postgresImage) + currentImageVersion, err := version.FromTag(currentImageReference.Tag) + if err != nil { + return false + } + // Get the default tag + defaultImageReference := reference.New(versions.DefaultImageName) + defaultImageVersion, err := version.FromTag(defaultImageReference.Tag) + if err != nil { + return false + } + + return currentImageVersion.Major() >= defaultImageVersion.Major() +} From bd7bf7c7119b73d047f1c1798b1fcbcd44922865 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Fri, 28 Mar 2025 14:11:51 +0100 Subject: [PATCH 492/836] chore: add FOSSA badge to the README.md (#7249) Signed-off-by: Jonathan Gonzalez V. Signed-off-by: Gabriele Bartolini Co-authored-by: Gabriele Bartolini --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index 9b9b956b2d..0fb9e7a64a 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,7 @@ [![OpenSSF Best Practices](https://www.bestpractices.dev/projects/9933/badge)][openssf] [![Documentation][documentation-badge]][documentation] [![Stack Overflow](https://img.shields.io/badge/stackoverflow-cloudnative--pg-blue?logo=stackoverflow&logoColor=%23F48024&link=https%3A%2F%2Fstackoverflow.com%2Fquestions%2Ftagged%2Fcloudnative-pg)][stackoverflow] +[![FOSSA Status][fossa-badge]][fossa] # Welcome to the CloudNativePG Project! @@ -174,3 +175,5 @@ of Canada, and used with their permission. [license]: https://github.com/cloudnative-pg/cloudnative-pg?tab=Apache-2.0-1-ov-file#readme [openssf]: https://www.bestpractices.dev/projects/9933 [documentation-badge]: https://img.shields.io/badge/Documentation-white?logo=data%3Aimage%2Fpng%3Bbase64%2CiVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAGN0lEQVR4nJRXXWwcVxU%2B8%2F%2BzP%2BPZtR2v7dqy07jUJUALNaiK6lZyUVVKWgGKaIv8QCMekBAVQlQICcEzVZFQVYFKQhASEBHlISJPCRJEshTFChgrIYHEiYMh69jetffHM7Mzc%2B9Bs7vjnTs7yZpZWbt37s%2F5zne%2Bc861CD0eXRkbHc3NfjeffvxNAGEAgULD2756v35%2B3qe1Nc4fnQVEXlA2LnOcXlCF8S%2B6vvVgq%2FL3M65X3e51PvfQCU4WJgZe%2B8GQ8fS7AKgjBB8KEHwjDXZSjkf0CREAaXM2eI9c65siqWxWl360Xl74ANHz%2Fy8AitxnTBfmz%2BhyYS4wGhwObQCIHSA0AigOMBzvOsXzd4pnjyL6NMmWEH8hi2b28Og3%2FqRJA0ewfQy0v1vGO2NovwPo%2FEU%2FwVgSU1PI%2BSu79v3lJAB8HM%2BTI%2FO%2FUUXzM4xHIe0xI4DdRqOAwnF%2F38ePPyzaDIDh%2FMxcWh462m08aojuGY97C0nrAEHg9BlF0fmeAPr0J15vbaKsp0BZQzEDEAlP9B209UIIVXUta%2FQEQHwxgxFjTc%2BRskAwrgVWmHtg22vMPJwLDqGUNJIAMHVAkGu3WdpZz6NAkgSXpINSycluV28er1a3rJ4M3F2%2F9AtCvXKycRrTQttrjINjxxxIL9jevxdaDHU%2FTBr6pL5ruzuLZubgUQBOY2hPij3GBUe7tBCMBRE2KrXVSz0BBI%2FtPVgtV%2F%2FxkZ5WSjI%2F%2BFIXC3sHJwgT4yFqrZFFTSlVrp3sGYLwcfxSmXCbS00j2Ms4K7qkOsFx6qdTuiHtG4AimfmM8NyvOvR2G48qXtZ2fsfrN7%2BqpcRyUp0glKiimDm4TwAcHBp%2B9WeA4ki0GMWNR9OVF8BZvn7xtI%2FF09H8jzLEgz6yLwCDuelnFXHkTZZOytCOEdqDOtGwsm%2BNj00fXt%2B6%2Bj4vcA7bwNrZwENmXwAKuZnvsNRThs5ozMPfPiHyoDF7xiduHcXb70A8dRFheHjiySQATBZk0nl9MHPkBEWUoEtYjyrPFNwGzfdlD37Zdu98KCv%2BMmD2BYpUCvcST39e0%2BS1Wr249FAAg7mPzWrS5NstEbE0xrsiA6QN1PfRFLnhr%2BspxVJTlY8Mw1DqNXeyCQFREEXz9cHB0QOev73QaNhOF4B%2B45PHFHFgDhJTqjuubJFqX1KQco7NTTuW8kq95k2G4eLEGzM7lfItnjNeTKcOfV%2FT8hOuV77A9IK0XjgMpCO0ZiuV3L%2F6njCFAOmucGB3OII5XgCXEJTDdZLElVbu3Vz0fWexvL30k0B6ggBACOmIUBAEUKX0dDTvW7RCYcdZPq6n%2FSsQnUO2RuyBRgQ9Rc5mMvJ6CNIj1nXfd9qWAsCkaZzJAk1L8UjVqY737dSjfCGrPHWqXL32Q0mB%2F2BXnke00WaEYv2aTzAbnuV5pcWkDGAAGJmhSafh6hjr%2BW2SVYHrP7bb%2BOdPW%2FUgflGlTM2gaK%2Ft7tp6%2BN6yixdN89DcIwGktIFPABfNbwoQqQWEUnDJzg1g0jDeK5p7Kp7nensXFI7uyAr%2FLyM7fYLnpa6LYScE8vDnot5hrKlslm%2BfE3nVxJgO4o3KcYu%2FF8XM8yFQ27n%2F65Te%2FzKl3Jhpjj6TCIDneRD5%2FItxr1vdkALw7p1qfeWPpjHxMtsXaPxu6FLc%2BrnbSB1r7fcrlr36nqwMzQfnplJDryQCGOh%2FbLjhcM%2FEvQ4Pdund9xRV5m1LfTXaF%2BK9gsLGB9nsgddcz8thM%2FarPzYM8%2FFazf9sMFaU%2Fi%2FwvNANwEhPvUGR8ozn7d%2BiDKXixtKpbHp81nV9E7puRy31ixKUbOe%2Fv3Ud891ghhDrL5Z975eaOvV%2BCNRp0Gfz%2BcJjDABdTwlpdfKbId0t5XYAcHz5D5ZVtWUp9%2Flog2L7PgVJqZx0HOE5Cqghemv1%2Bt%2FeGBmZ%2BdB2yNN72UEpnzXG32YADA186i3bIpPxMhuKrFK%2Fd77JUnbkKbYvRJlC8DzKSZK76Lq1he2dKy%2BZuSfesSz5a2xHDbLJ%2BJaqdv5H4EUY%2BzbG2m9HgN7mg81bfw4W1uu7AjvHaqDhqF%2FZ3Fq5XFy%2FcESSDsx5fvZ7wLEsNfXk%2BjlVHfpSCOB%2FAQAA%2F%2F8zd8orZc2N9AAAAABJRU5ErkJggg%3D%3D +[fossa-badge]: https://app.fossa.com/api/projects/git%2Bgithub.com%2Fcloudnative-pg%2Fcloudnative-pg.svg?type=small +[fossa]: https://app.fossa.com/projects/git%2Bgithub.com%2Fcloudnative-pg%2Fcloudnative-pg?ref=badge_small From a6c913a62d101ce162f0b4cb49e1541e78f8f991 Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Fri, 28 Mar 2025 16:10:03 +0100 Subject: [PATCH 493/836] docs: release notes for 1.26.0-rc1 (#7128) Closes #7123 Signed-off-by: Gabriele Bartolini Signed-off-by: Marco Nenciarini Co-authored-by: Marco Nenciarini --- .wordlist-en-custom.txt | 4 + docs/src/appendixes/object_stores.md | 6 ++ docs/src/backup.md | 15 ++-- docs/src/backup_barmanobjectstore.md | 6 ++ docs/src/backup_volumesnapshot.md | 4 + docs/src/container_images.md | 57 ++++++------- docs/src/installation_upgrade.md | 15 +++- docs/src/preview_version.md | 6 +- docs/src/recovery.md | 6 ++ docs/src/release_notes.md | 3 +- docs/src/release_notes/v1.26.md | 123 +++++++++++++++++++-------- docs/src/replica_cluster.md | 8 ++ docs/src/wal_archiving.md | 6 ++ 13 files changed, 179 insertions(+), 80 deletions(-) diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index 268856eaa1..512c2ba1f8 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -222,6 +222,8 @@ LDAPBindAsAuth LDAPBindSearchAuth LDAPConfig LDAPScheme +LF +LLC LPV LSN LTS @@ -521,6 +523,7 @@ YXBw YY YYYY Zalando +Zstandard abd accessKeyId accessModes @@ -1195,6 +1198,7 @@ robfig roleRef rollingupdatestatus rollout +rollouts rpo rto runonserver diff --git a/docs/src/appendixes/object_stores.md b/docs/src/appendixes/object_stores.md index 017011bf1a..5975e7ddd7 100644 --- a/docs/src/appendixes/object_stores.md +++ b/docs/src/appendixes/object_stores.md @@ -1,6 +1,12 @@ # Appendix A - Common object stores for backups +!!! Warning + With the deprecation of native Barman Cloud support in CloudNativePG in + favor of the Barman Cloud Plugin, this page—and the backup and recovery + documentation—may undergo changes before the official release of version + 1.26.0. + You can store the [backup](../backup.md) files in any service that is supported by the Barman Cloud infrastructure. That is: diff --git a/docs/src/backup.md b/docs/src/backup.md index 0f01434a0e..7a1196c7e9 100644 --- a/docs/src/backup.md +++ b/docs/src/backup.md @@ -1,6 +1,12 @@ # Backup +!!! Warning + With the deprecation of native Barman Cloud support in CloudNativePG in + favor of the Barman Cloud Plugin, this page—and the backup and recovery + documentation—may undergo changes before the official release of version + 1.26.0. + PostgreSQL natively provides first class backup and recovery capabilities based on file system level (physical) copy. These have been successfully used for more than 15 years in mission critical production databases, helping @@ -28,7 +34,9 @@ The WAL archive can only be stored on object stores at the moment. On the other hand, CloudNativePG supports two ways to store physical base backups: - on [object stores](backup_barmanobjectstore.md), as tarballs - optionally - compressed + compressed: + - Using the Barman Cloud plugin + - Natively via `.spec.backup.barmanObjectStore` (*deprecated, to be removed in CloudNativePG 1.28*) - on [Kubernetes Volume Snapshots](backup_volumesnapshot.md), if supported by the underlying storage class @@ -42,11 +50,6 @@ On the other hand, CloudNativePG supports two ways to store physical base backup the supported [Container Storage Interface (CSI) drivers](https://kubernetes-csi.github.io/docs/drivers.html) that provide snapshotting capabilities. -!!! Info - Starting with version 1.25, CloudNativePG includes experimental support for - backup and recovery using plugins, such as the - [Barman Cloud plugin](https://github.com/cloudnative-pg/plugin-barman-cloud). - ## WAL archive The WAL archive in PostgreSQL is at the heart of **continuous backup**, and it diff --git a/docs/src/backup_barmanobjectstore.md b/docs/src/backup_barmanobjectstore.md index 5583736a69..8c062f64bf 100644 --- a/docs/src/backup_barmanobjectstore.md +++ b/docs/src/backup_barmanobjectstore.md @@ -1,6 +1,12 @@ # Backup on object stores +!!! Warning + With the deprecation of native Barman Cloud support in CloudNativePG in + favor of the Barman Cloud Plugin, this page—and the backup and recovery + documentation—may undergo changes before the official release of version + 1.26.0. + CloudNativePG natively supports **online/hot backup** of PostgreSQL clusters through continuous physical backup and WAL archiving on an object store. This means that the database is always up (no downtime required) diff --git a/docs/src/backup_volumesnapshot.md b/docs/src/backup_volumesnapshot.md index 08da92baf6..814867cd40 100644 --- a/docs/src/backup_volumesnapshot.md +++ b/docs/src/backup_volumesnapshot.md @@ -58,6 +58,8 @@ volumes of a given storage class, and managed as `VolumeSnapshot` and ## How to configure Volume Snapshot backups + + CloudNativePG allows you to configure a given Postgres cluster for Volume Snapshot backups through the `backup.volumeSnapshot` stanza. @@ -333,6 +335,8 @@ spec: ## Example of Volume Snapshot Backup + + The following example shows how to configure volume snapshot base backups on an EKS cluster on AWS using the `ebs-sc` storage class and the `csi-aws-vsc` volume snapshot class. diff --git a/docs/src/container_images.md b/docs/src/container_images.md index 248cf1e02e..208c5d2a7a 100644 --- a/docs/src/container_images.md +++ b/docs/src/container_images.md @@ -1,43 +1,40 @@ # Container Image Requirements -The CloudNativePG operator for Kubernetes is designed to -work with any compatible container image of PostgreSQL that complies -with the following requirements: - -- PostgreSQL executables that must be in the path: - - `initdb` - - `postgres` - - `pg_ctl` - - `pg_controldata` - - `pg_basebackup` -- Barman Cloud executables that must be in the path: - - `barman-cloud-backup` - - `barman-cloud-backup-delete` - - `barman-cloud-backup-list` - - `barman-cloud-check-wal-archive` - - `barman-cloud-restore` - - `barman-cloud-wal-archive` - - `barman-cloud-wal-restore` -- PGAudit extension installed (optional - only if PGAudit is required - in the deployed clusters) -- Appropriate locale settings -- `du` (optional, for `kubectl cnpg status`) +The CloudNativePG operator for Kubernetes is designed to work with any +compatible PostgreSQL container image that meets the following requirements: + +- PostgreSQL executables must be available in the system path: + - `initdb` + - `postgres` + - `pg_ctl` + - `pg_controldata` + - `pg_basebackup` +- Proper locale settings configured + +Optional Components: + +- [PGAudit](https://www.pgaudit.org/) extension (only required if audit logging + is needed) +- `du` (used for `kubectl cnpg status`) !!! Important - Only [PostgreSQL versions supported by the PGDG](https://postgresql.org/) are allowed. + Only [PostgreSQL versions officially supported by PGDG](https://postgresql.org/) are allowed. + +!!! Info + Barman Cloud executables are no longer required in CloudNativePG. The + recommended approach is to use the dedicated [Barman Cloud Plugin](https://github.com/cloudnative-pg/plugin-barman-cloud). -No entry point and/or command is required in the image definition, as -CloudNativePG overrides it with its instance manager. +No entry point or command is required in the image definition. CloudNativePG +automatically overrides it with its instance manager. !!! Warning - Application Container Images will be used by CloudNativePG - in a **Primary with multiple/optional Hot Standby Servers Architecture** - only. + CloudNativePG only supports **Primary with multiple/optional Hot Standby + Servers architecture** for PostgreSQL application container images. -The CloudNativePG community provides and supports +The CloudNativePG community provides and maintains [public PostgreSQL container images](https://github.com/cloudnative-pg/postgres-containers) -that work with CloudNativePG, and publishes them on +that are fully compatible with CloudNativePG. These images are published on [ghcr.io](https://ghcr.io/cloudnative-pg/postgresql). ## Image Tag Requirements diff --git a/docs/src/installation_upgrade.md b/docs/src/installation_upgrade.md index 62a4f719a5..083b99c080 100644 --- a/docs/src/installation_upgrade.md +++ b/docs/src/installation_upgrade.md @@ -253,12 +253,21 @@ When versions are not directly upgradable, the old version needs to be removed before installing the new one. This won't affect user data but only the operator itself. -### Upgrading to 1.25 from a previous minor version + + + +### Upgrading to 1.25 from a previous minor version !!! Warning Every time you are upgrading to a higher minor release, make sure you diff --git a/docs/src/preview_version.md b/docs/src/preview_version.md index adaf7032a3..273f5b60af 100644 --- a/docs/src/preview_version.md +++ b/docs/src/preview_version.md @@ -36,12 +36,10 @@ are not backwards compatible and could be removed entirely. There are currently no preview versions available. - diff --git a/docs/src/recovery.md b/docs/src/recovery.md index a78f3e240f..9f915aab31 100644 --- a/docs/src/recovery.md +++ b/docs/src/recovery.md @@ -1,6 +1,12 @@ # Recovery +!!! Warning + With the deprecation of native Barman Cloud support in CloudNativePG in + favor of the Barman Cloud Plugin, this page—and the backup and recovery + documentation—may undergo changes before the official release of version + 1.26.0. + In PostgreSQL terminology, recovery is the process of starting a PostgreSQL instance using an existing backup. The PostgreSQL recovery mechanism is very solid and rich. It also supports point-in-time recovery (PITR), which allows diff --git a/docs/src/release_notes.md b/docs/src/release_notes.md index 5077b9e604..24fc667355 100644 --- a/docs/src/release_notes.md +++ b/docs/src/release_notes.md @@ -3,7 +3,8 @@ History of user-visible changes for CloudNativePG, classified for each minor release. - + +- [CloudNativePG 1.26 - Release Candidate](release_notes/v1.26.md) - [CloudNativePG 1.25](release_notes/v1.25.md) - [CloudNativePG 1.24](release_notes/v1.24.md) diff --git a/docs/src/release_notes/v1.26.md b/docs/src/release_notes/v1.26.md index 426cb56cf9..b9ce2daf28 100644 --- a/docs/src/release_notes/v1.26.md +++ b/docs/src/release_notes/v1.26.md @@ -4,22 +4,41 @@ History of user-visible changes in the 1.26 minor release of CloudNativePG. For a complete list of changes, please refer to the -[commits](https://github.com/cloudnative-pg/cloudnative-pg/commits/release-1.26 +[commits](https://github.com/cloudnative-pg/cloudnative-pg/commits/release-1.26) on the release branch in GitHub. ## Version 1.26.0-rc1 -**Release date:** Mon DD, 20YY - -### Important changes: - -- The `hibernate on/off` commands in the `cnpg` plugin for `kubectl` now serve - as shortcuts for the declarative hibernation procedure. The previously - available imperative implementation has been removed in favor of the - declarative approach. Additionally, the `hibernate status` command has been - removed in favor of the standard `status` command. - **Do not upgrade to version 1.26 of both the plugin and the operator unless - you are prepared to migrate to the declarative method.** +**Release date:** Mar 28, 2025 + +### Important Changes + +- **CloudNativePG is now officially a CNCF project**: CloudNativePG has been + accepted into the Cloud Native Computing Foundation (CNCF), marking a + significant milestone in its evolution. As part of this transition, the project + is now governed under **CloudNativePG, a Series of LF Projects, LLC**, ensuring + long-term sustainability and community-driven innovation. (#7203) + +- **Deprecation of Native Barman Cloud Support**: Native support for Barman + Cloud backups and recovery is now deprecated and will be fully removed in + CloudNativePG 1.28.0. Users must begin migrating their existing clusters to the + new [Barman Cloud Plugin](https://github.com/cloudnative-pg/plugin-barman-cloud) + to ensure a smooth transition. (#6876) + +- **End of Support for Barman 3.4 and Earlier**: CloudNativePG no longer + supports Barman versions 3.4 and earlier, including the capability detection + framework. Users running older operand versions (from before April 2023) must + update their operand before upgrading the operator to avoid compatibility + issues. (#7220) + +- **Hibernation Command Changes**: The `hibernate on` and `hibernate off` + commands in the `cnpg` plugin for `kubectl` now serve as shortcuts for + declarative hibernation. The previous imperative approach has been removed in + favor of this method. Additionally, the `hibernate status` command has been + removed, as its functionality is now covered by the standard `status` + command. **Warning:** Do not upgrade to version 1.26 of both the plugin and + the operator unless you are prepared to migrate to the declarative + hibernation method. (#7155) ### Features: @@ -40,46 +59,78 @@ on the release branch in GitHub. `extensions` and `schemas` stanzas in the Database resource to declaratively create, modify, and drop PostgreSQL extensions and schemas within a database. (#7062) -### Enhancements: - -- Implemented the `cnpg.io/validation` annotation, allowing users to disable - the validation webhook on CloudNativePG-managed resources. Use with caution, - as this can permit unrestricted changes. (#7196) +### Enhancements - Introduced the `STANDBY_TCP_USER_TIMEOUT` operator configuration setting, - which, if specified, sets the `tcp_user_timeout` parameter on all standby - instances managed by the operator. + allowing users to specify the `tcp_user_timeout` parameter on all standby + instances managed by the operator. (#7036) + +- Added the `pg_extensions` metric, providing information about installed + PostgreSQL extensions and their latest available versions. (#7195) + +- Introduced the `DRAIN_TAINTS` operator configuration option, enabling users + to customize which node taints indicate a node is being drained. This + replaces the previous fixed behavior of only recognizing + `node.kubernetes.io/unschedulable` as a drain signal. - Added the `KUBERNETES_CLUSTER_DOMAIN` configuration option to the operator, allowing users to specify the domain suffix for fully qualified domain names (FQDNs) generated within the Kubernetes cluster. If not set, it defaults to - `cluster.local`. (#6989) + `cluster.local`. (#6989) + +- Added support for LZ4, XZ, and Zstandard compression methods when archiving + WAL files via Barman Cloud (*deprecated*). (#7151) + +- Implemented the `cnpg.io/validation` annotation, enabling users to disable + the validation webhook on CloudNativePG-managed resources. Use with caution, + as this allows unrestricted changes. (#7196) + +- Added support for patching PostgreSQL instance pods using the + `cnpg.io/podPatch` annotation with a JSON Patch. This may introduce + discrepancies between the operator’s expectations and Kubernetes behavior, so + it should be used with caution. (#6323) + +- Added support for collecting `pg_stat_wal` metrics in PostgreSQL 18. (#7005) + +- CloudNativePG Interface (CNPG-I): + + - A plugin can now trigger instance rollouts by implementing the `EVALUATE` + verb, ensuring that plugin-induced changes are properly reconciled. (#7126) -- feat: support customizable pod patches via annotations (#6323) +### Fixes -- `cnpg` plugin updates: - - ... +- Resolved a race condition that caused the operator to perform two switchovers + when updating the PostgreSQL configuration. (#6991) -### Security: +- Corrected the `PodMonitor` configuration by adjusting the `matchLabels` scope + for the targeted pooler and cluster pods. Previously, the `matchLabels` were + too broad, inadvertently inheriting labels from the cluster and leading to data + collection from unintended targets. (#7063) -- Add ... -- Improve ... +- Added a webhook warning for clusters with a missing unit (e.g., MB, GB) in + the `shared_buffers` configuration. This will become an error in future + releases. Users should update their configurations to include explicit units + (e.g., `512MB` instead of `512`). (#7160) -### Fixes: +- Treated timeout errors during volume snapshot creation as retryable to + prevent unnecessary backup failures. (#7010) -- Enhance ... -- Disable ... -- Gracefully handle ... -- Wait ... -- Fix ... -- Address ... - `cnpg` plugin: - - ... - - ... + - Ensured that the primary Pod is recreated during an imperative restart when + `primaryUpdateMethod` is set to `restart`, aligning its definition with the + replicas. (#7122) + +- CloudNativePG Interface (CNPG-I): + - Implemented automatic reloading of TLS certificates for plugins when they + change. (#7029) + - Ensured the operator properly closes the plugin connection when + performing a backup using the plugin. (#7095, #7096) + - Fixed an issue that prevented WALs from being archived on a former + primary node when using a plugin. (#6964) ### Supported versions -- Kubernetes 1.31, 1.30, and 1.29 +- Kubernetes 1.32, 1.31, and 1.30 - PostgreSQL 17, 16, 15, 14, and 13 - PostgreSQL 17.X is the default image - PostgreSQL 13 support ends on November 12, 2025 diff --git a/docs/src/replica_cluster.md b/docs/src/replica_cluster.md index 37654f72af..9c1262f72f 100644 --- a/docs/src/replica_cluster.md +++ b/docs/src/replica_cluster.md @@ -1,6 +1,12 @@ # Replica clusters +!!! Warning + With the deprecation of native Barman Cloud support in CloudNativePG in + favor of the Barman Cloud Plugin, this page—and the backup and recovery + documentation—may undergo changes before the official release of version + 1.26.0. + A replica cluster is a CloudNativePG `Cluster` resource designed to replicate data from another PostgreSQL instance, ideally also managed by CloudNativePG. @@ -99,6 +105,8 @@ recovery. There are three main options: When configuring the external cluster, you have the following options: + + - **`barmanObjectStore` section**: - Enables use of the WAL archive, with CloudNativePG automatically setting the `restore_command` in the designated primary instance. diff --git a/docs/src/wal_archiving.md b/docs/src/wal_archiving.md index d9e34485bf..06872e6829 100644 --- a/docs/src/wal_archiving.md +++ b/docs/src/wal_archiving.md @@ -1,6 +1,12 @@ # WAL archiving +!!! Warning + With the deprecation of native Barman Cloud support in CloudNativePG in + favor of the Barman Cloud Plugin, this page—and the backup and recovery + documentation—may undergo changes before the official release of version + 1.26.0. + WAL archiving is the process that feeds a [WAL archive](backup.md#wal-archive) in CloudNativePG. From 1f1b38cb546236cb1b34ca80ca6d602cf70bb5fd Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 28 Mar 2025 16:51:56 +0100 Subject: [PATCH 494/836] Version tag to 1.26.0-rc1 (#7253) Signed-off-by: Marco Nenciarini Co-authored-by: Marco Nenciarini --- docs/src/installation_upgrade.md | 6 +- docs/src/kubectl-plugin.md | 30 +- pkg/versions/versions.go | 6 +- releases/cnpg-1.26.0-rc1.yaml | 18012 +++++++++++++++++++++++++++++ 4 files changed, 18033 insertions(+), 21 deletions(-) create mode 100644 releases/cnpg-1.26.0-rc1.yaml diff --git a/docs/src/installation_upgrade.md b/docs/src/installation_upgrade.md index 083b99c080..5d00b272a3 100644 --- a/docs/src/installation_upgrade.md +++ b/docs/src/installation_upgrade.md @@ -8,12 +8,12 @@ The operator can be installed like any other resource in Kubernetes, through a YAML manifest applied via `kubectl`. -You can install the [latest operator manifest](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.25/releases/cnpg-1.25.1.yaml) +You can install the [latest operator manifest](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-1.26.0-rc1.yaml) for this minor release as follows: ```sh kubectl apply --server-side -f \ - https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.25/releases/cnpg-1.25.1.yaml + https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-1.26.0-rc1.yaml ``` You can verify that with: @@ -74,7 +74,7 @@ specific minor release, you can just run: ```sh curl -sSfL \ - https://raw.githubusercontent.com/cloudnative-pg/artifacts/release-1.25/manifests/operator-manifest.yaml | \ + https://raw.githubusercontent.com/cloudnative-pg/artifacts/release-1.26/manifests/operator-manifest.yaml | \ kubectl apply --server-side -f - ``` diff --git a/docs/src/kubectl-plugin.md b/docs/src/kubectl-plugin.md index 9c37c4ceb9..90e2edc290 100644 --- a/docs/src/kubectl-plugin.md +++ b/docs/src/kubectl-plugin.md @@ -31,11 +31,11 @@ them in your systems. #### Debian packages -For example, let's install the 1.25.1 release of the plugin, for an Intel based +For example, let's install the 1.26.0-rc1 release of the plugin, for an Intel based 64 bit server. First, we download the right `.deb` file. ```sh -wget https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.25.1/kubectl-cnpg_1.25.1_linux_x86_64.deb \ +wget https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.26.0-rc1/kubectl-cnpg_1.26.0-rc1_linux_x86_64.deb \ --output-document kube-plugin.deb ``` @@ -46,17 +46,17 @@ $ sudo dpkg -i kube-plugin.deb Selecting previously unselected package cnpg. (Reading database ... 6688 files and directories currently installed.) Preparing to unpack kube-plugin.deb ... -Unpacking cnpg (1.25.1) ... -Setting up cnpg (1.25.1) ... +Unpacking cnpg (1.26.0-rc1) ... +Setting up cnpg (1.26.0-rc1) ... ``` #### RPM packages -As in the example for `.rpm` packages, let's install the 1.25.1 release for an +As in the example for `.rpm` packages, let's install the 1.26.0-rc1 release for an Intel 64 bit machine. Note the `--output` flag to provide a file name. ```sh -curl -L https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.25.1/kubectl-cnpg_1.25.1_linux_x86_64.rpm \ +curl -L https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.26.0-rc1/kubectl-cnpg_1.26.0-rc1_linux_x86_64.rpm \ --output kube-plugin.rpm ``` @@ -70,7 +70,7 @@ Dependencies resolved. Package Architecture Version Repository Size ==================================================================================================== Installing: - cnpg x86_64 1.25.1 @commandline 20 M + cnpg x86_64 1.26.0-rc1 @commandline 20 M Transaction Summary ==================================================================================================== @@ -294,9 +294,9 @@ sandbox-3 0/604DE38 0/604DE38 0/604DE38 0/604DE38 00:00:00 00:00:00 00 Instances status Name Current LSN Replication role Status QoS Manager Version Node ---- ----------- ---------------- ------ --- --------------- ---- -sandbox-1 0/604DE38 Primary OK BestEffort 1.25.1 k8s-eu-worker -sandbox-2 0/604DE38 Standby (async) OK BestEffort 1.25.1 k8s-eu-worker2 -sandbox-3 0/604DE38 Standby (async) OK BestEffort 1.25.1 k8s-eu-worker +sandbox-1 0/604DE38 Primary OK BestEffort 1.26.0-rc1 k8s-eu-worker +sandbox-2 0/604DE38 Standby (async) OK BestEffort 1.26.0-rc1 k8s-eu-worker2 +sandbox-3 0/604DE38 Standby (async) OK BestEffort 1.26.0-rc1 k8s-eu-worker ``` If you require more detailed status information, use the `--verbose` option (or @@ -350,9 +350,9 @@ sandbox-primary primary 1 1 1 Instances status Name Current LSN Replication role Status QoS Manager Version Node ---- ----------- ---------------- ------ --- --------------- ---- -sandbox-1 0/6053720 Primary OK BestEffort 1.25.1 k8s-eu-worker -sandbox-2 0/6053720 Standby (async) OK BestEffort 1.25.1 k8s-eu-worker2 -sandbox-3 0/6053720 Standby (async) OK BestEffort 1.25.1 k8s-eu-worker +sandbox-1 0/6053720 Primary OK BestEffort 1.26.0-rc1 k8s-eu-worker +sandbox-2 0/6053720 Standby (async) OK BestEffort 1.26.0-rc1 k8s-eu-worker2 +sandbox-3 0/6053720 Standby (async) OK BestEffort 1.26.0-rc1 k8s-eu-worker ``` With an additional `-v` (e.g. `kubectl cnpg status sandbox -v -v`), you can @@ -575,12 +575,12 @@ Archive: report_operator_.zip ```output ====== Beginning of Previous Log ===== -2023-03-28T12:56:41.251711811Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.25.1","build":{"Version":"1.25.1+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} +2023-03-28T12:56:41.251711811Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.26.0-rc1","build":{"Version":"1.26.0-rc1+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} 2023-03-28T12:56:41.251851909Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"} ====== End of Previous Log ===== -2023-03-28T12:57:09.854306024Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.25.1","build":{"Version":"1.25.1+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} +2023-03-28T12:57:09.854306024Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.26.0-rc1","build":{"Version":"1.26.0-rc1+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} 2023-03-28T12:57:09.854363943Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"} ``` diff --git a/pkg/versions/versions.go b/pkg/versions/versions.go index 3c3b08c26d..15ef5a12d4 100644 --- a/pkg/versions/versions.go +++ b/pkg/versions/versions.go @@ -23,13 +23,13 @@ package versions const ( // Version is the version of the operator - Version = "1.25.1" + Version = "1.26.0-rc1" // DefaultImageName is the default image used by the operator to create pods DefaultImageName = "ghcr.io/cloudnative-pg/postgresql:17.4" // DefaultOperatorImageName is the default operator image used by the controller in the pods running PostgreSQL - DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.25.1" + DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.26.0-rc1" ) // BuildInfo is a struct containing all the info about the build @@ -39,7 +39,7 @@ type BuildInfo struct { var ( // buildVersion injected during the build - buildVersion = "1.25.1" + buildVersion = "1.26.0-rc1" // buildCommit injected during the build buildCommit = "none" diff --git a/releases/cnpg-1.26.0-rc1.yaml b/releases/cnpg-1.26.0-rc1.yaml new file mode 100644 index 0000000000..9c091246ed --- /dev/null +++ b/releases/cnpg-1.26.0-rc1.yaml @@ -0,0 +1,18012 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-system +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.2 + name: backups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Backup + listKind: BackupList + plural: backups + singular: backup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.method + name: Method + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .status.error + name: Error + type: string + name: v1 + schema: + openAPIV3Schema: + description: A Backup resource is a request for a PostgreSQL backup by the + user. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the backup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + type: object + status: + description: |- + Most recently observed status of the backup. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + azureCredentials: + description: The credentials to use to upload data to Azure Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without providing + explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + backupId: + description: The ID of the Barman backup + type: string + backupLabelFile: + description: Backup label file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + backupName: + description: The Name of the Barman backup + type: string + beginLSN: + description: The starting xlog + type: string + beginWal: + description: The starting WAL + type: string + commandError: + description: The backup command output in case of error + type: string + commandOutput: + description: Unused. Retained for compatibility with old versions. + type: string + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data. This may not be populated in case of errors. + type: string + encryption: + description: Encryption method required to S3 API + type: string + endLSN: + description: The ending xlog + type: string + endWal: + description: The ending WAL + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + error: + description: The detected error + type: string + googleCredentials: + description: The credentials to use to upload data to Google Cloud + Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage JSON + file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + instanceID: + description: Information to identify the instance where the backup + has been taken from + properties: + ContainerID: + description: The container ID + type: string + podName: + description: The pod name + type: string + type: object + method: + description: The backup method being used + type: string + online: + description: Whether the backup was online/hot (`true`) or offline/cold + (`false`) + type: boolean + phase: + description: The last backup status + type: string + pluginMetadata: + additionalProperties: + type: string + description: A map containing the plugin metadata + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without providing + explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the region + name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + snapshotBackupStatus: + description: Status of the volumeSnapshot backup + properties: + elements: + description: The elements list, populated with the gathered volume + snapshots + items: + description: BackupSnapshotElementStatus is a volume snapshot + that is part of a volume snapshot method backup + properties: + name: + description: Name is the snapshot resource name + type: string + tablespaceName: + description: |- + TablespaceName is the name of the snapshotted tablespace. Only set + when type is PG_TABLESPACE + type: string + type: + description: Type is tho role of the snapshot in the cluster, + such as PG_DATA, PG_WAL and PG_TABLESPACE + type: string + required: + - name + - type + type: object + type: array + type: object + startedAt: + description: When the backup was started + format: date-time + type: string + stoppedAt: + description: When the backup was terminated + format: date-time + type: string + tablespaceMapFile: + description: Tablespace map file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.2 + name: clusterimagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ClusterImageCatalog + listKind: ClusterImageCatalogList + plural: clusterimagecatalogs + singular: clusterimagecatalog + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ClusterImageCatalog is the Schema for the clusterimagecatalogs + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ClusterImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.2 + name: clusters.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Cluster + listKind: ClusterList + plural: clusters + singular: cluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Number of instances + jsonPath: .status.instances + name: Instances + type: integer + - description: Number of ready instances + jsonPath: .status.readyInstances + name: Ready + type: integer + - description: Cluster current status + jsonPath: .status.phase + name: Status + type: string + - description: Primary pod + jsonPath: .status.currentPrimary + name: Primary + type: string + name: v1 + schema: + openAPIV3Schema: + description: Cluster is the Schema for the PostgreSQL API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the cluster. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + affinity: + description: Affinity/Anti-affinity rules for Pods + properties: + additionalPodAffinity: + description: AdditionalPodAffinity allows to specify pod affinity + terms to be passed to all the cluster's pods. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + additionalPodAntiAffinity: + description: |- + AdditionalPodAntiAffinity allows to specify pod anti-affinity terms to be added to the ones generated + by the operator if EnablePodAntiAffinity is set to true (default) or to be used exclusively if set to false. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + enablePodAntiAffinity: + description: |- + Activates anti-affinity for the pods. The operator will define pods + anti-affinity unless this field is explicitly set to false + type: boolean + nodeAffinity: + description: |- + NodeAffinity describes node affinity scheduling rules for the pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is map of key-value pairs used to define the nodes on which + the pods can run. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + podAntiAffinityType: + description: |- + PodAntiAffinityType allows the user to decide whether pod anti-affinity between cluster instance has to be + considered a strong requirement during scheduling or not. Allowed values are: "preferred" (default if empty) or + "required". Setting it to "required", could lead to instances remaining pending until new kubernetes nodes are + added if all the existing nodes don't match the required pod anti-affinity rule. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + type: string + tolerations: + description: |- + Tolerations is a list of Tolerations that should be set for all the pods, in order to allow them to run + on tainted nodes. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologyKey: + description: |- + TopologyKey to use for anti-affinity configuration. See k8s documentation + for more info on that + type: string + type: object + backup: + description: The configuration to be used for backups + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2`, and `snappy`. + enum: + - bzip2 + - gzip + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage + JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the + region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2`, + `lz4`, `snappy`, `xz`, and `zstd`. + enum: + - bzip2 + - gzip + - lz4 + - snappy + - xz + - zstd + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + retentionPolicy: + description: |- + RetentionPolicy is the retention policy to be used for backups + and WALs (i.e. '60d'). The retention policy is expressed in the form + of `XXu` where `XX` is a positive integer and `u` is in `[dwm]` - + days, weeks, months. + It's currently only applicable when using the BarmanObjectStore method. + pattern: ^[1-9][0-9]*[dwm]$ + type: string + target: + default: prefer-standby + description: |- + The policy to decide which instance should perform backups. Available + options are empty string, which will default to `prefer-standby` policy, + `primary` to have backups run always on primary instances, `prefer-standby` + to have backups run preferably on the most updated standby, if available. + enum: + - primary + - prefer-standby + type: string + volumeSnapshot: + description: VolumeSnapshot provides the configuration for the + execution of volume snapshot backups. + properties: + annotations: + additionalProperties: + type: string + description: Annotations key-value pairs that will be added + to .metadata.annotations snapshot resources. + type: object + className: + description: |- + ClassName specifies the Snapshot Class to be used for PG_DATA PersistentVolumeClaim. + It is the default class for the other types if no specific class is present + type: string + labels: + additionalProperties: + type: string + description: Labels are key-value pairs that will be added + to .metadata.labels snapshot resources. + type: object + online: + default: true + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + type: boolean + onlineConfiguration: + default: + immediateCheckpoint: false + waitForArchive: true + description: Configuration parameters to control the online/hot + backup with volume snapshots + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + snapshotOwnerReference: + default: none + description: SnapshotOwnerReference indicates the type of + owner reference the snapshot should have + enum: + - none + - cluster + - backup + type: string + tablespaceClassName: + additionalProperties: + type: string + description: |- + TablespaceClassName specifies the Snapshot Class to be used for the tablespaces. + defaults to the PGDATA Snapshot Class, if set + type: object + walClassName: + description: WalClassName specifies the Snapshot Class to + be used for the PG_WAL PersistentVolumeClaim. + type: string + type: object + type: object + bootstrap: + description: Instructions to bootstrap this cluster + properties: + initdb: + description: Bootstrap the cluster via initdb + properties: + builtinLocale: + description: |- + Specifies the locale name when the builtin provider is used. + This option requires `localeProvider` to be set to `builtin`. + Available from PostgreSQL 17. + type: string + dataChecksums: + description: |- + Whether the `-k` option should be passed to initdb, + enabling checksums on data pages (default: `false`) + type: boolean + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + encoding: + description: The value to be passed as option `--encoding` + for initdb (default:`UTF8`) + type: string + icuLocale: + description: |- + Specifies the ICU locale when the ICU provider is used. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 15. + type: string + icuRules: + description: |- + Specifies additional collation rules to customize the behavior of the default collation. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 16. + type: string + import: + description: |- + Bootstraps the new cluster by importing data from an existing PostgreSQL + instance using logical backup (`pg_dump` and `pg_restore`) + properties: + databases: + description: The databases to import + items: + type: string + type: array + pgDumpExtraOptions: + description: |- + List of custom options to pass to the `pg_dump` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + pgRestoreExtraOptions: + description: |- + List of custom options to pass to the `pg_restore` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + postImportApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after is imported - to be used with extreme care + (by default empty). Only available in microservice type. + items: + type: string + type: array + roles: + description: The roles to import + items: + type: string + type: array + schemaOnly: + description: |- + When set to true, only the `pre-data` and `post-data` sections of + `pg_restore` are invoked, avoiding data import. Default: `false`. + type: boolean + source: + description: The source of the import + properties: + externalCluster: + description: The name of the externalCluster used + for import + type: string + required: + - externalCluster + type: object + type: + description: The import type. Can be `microservice` or + `monolith`. + enum: + - microservice + - monolith + type: string + required: + - databases + - source + - type + type: object + locale: + description: Sets the default collation order and character + classification in the new database. + type: string + localeCType: + description: The value to be passed as option `--lc-ctype` + for initdb (default:`C`) + type: string + localeCollate: + description: The value to be passed as option `--lc-collate` + for initdb (default:`C`) + type: string + localeProvider: + description: |- + This option sets the locale provider for databases created in the new cluster. + Available from PostgreSQL 16. + type: string + options: + description: |- + The list of options that must be passed to initdb when creating the cluster. + Deprecated: This could lead to inconsistent configurations, + please use the explicit provided parameters instead. + If defined, explicit values will be ignored. + items: + type: string + type: array + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + postInitApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitApplicationSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the application database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitSQL: + description: |- + List of SQL queries to be executed as a superuser in the `postgres` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `postgres` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitTemplateSQL: + description: |- + List of SQL queries to be executed as a superuser in the `template1` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitTemplateSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `template1` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + walSegmentSize: + description: |- + The value in megabytes (1 to 1024) to be passed to the `--wal-segsize` + option for initdb (default: empty, resulting in PostgreSQL default: 16MB) + maximum: 1024 + minimum: 1 + type: integer + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider + is set to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is + set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set + to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + pg_basebackup: + description: |- + Bootstrap the cluster taking a physical backup of another compatible + PostgreSQL instance + properties: + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: The name of the server of which we need to take + a physical backup + minLength: 1 + type: string + required: + - source + type: object + recovery: + description: Bootstrap the cluster from a backup + properties: + backup: + description: |- + The backup object containing the physical base backup from which to + initiate the recovery procedure. + Mutually exclusive with `source` and `volumeSnapshots`. + properties: + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + name: + description: Name of the referent. + type: string + required: + - name + type: object + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + recoveryTarget: + description: |- + By default, the recovery process applies all the available + WAL files in the archive (full recovery). However, you can also + end the recovery as soon as a consistent state is reached or + recover to a point-in-time (PITR) by specifying a `RecoveryTarget` object, + as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...). + More info: https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET + properties: + backupID: + description: |- + The ID of the backup from which to start the recovery process. + If empty (default) the operator will automatically detect the backup + based on targetTime or targetLSN if specified. Otherwise use the + latest available backup in chronological order. + type: string + exclusive: + description: |- + Set the target to be exclusive. If omitted, defaults to false, so that + in Postgres, `recovery_target_inclusive` will be true + type: boolean + targetImmediate: + description: End recovery as soon as a consistent state + is reached + type: boolean + targetLSN: + description: The target LSN (Log Sequence Number) + type: string + targetName: + description: |- + The target name (to be previously created + with `pg_create_restore_point`) + type: string + targetTLI: + description: The target timeline ("latest" or a positive + integer) + type: string + targetTime: + description: The target time as a timestamp in the RFC3339 + standard + type: string + targetXID: + description: The target transaction ID + type: string + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: |- + The external cluster whose backup we will restore. This is also + used as the name of the folder under which the backup is stored, + so it must be set to the name of the source cluster + Mutually exclusive with `backup`. + type: string + volumeSnapshots: + description: |- + The static PVC data source(s) from which to initiate the + recovery procedure. Currently supporting `VolumeSnapshot` + and `PersistentVolumeClaim` resources that map an existing + PVC group, compatible with CloudNativePG, and taken with + a cold backup copy on a fenced Postgres instance (limitation + which will be removed in the future when online backup + will be implemented). + Mutually exclusive with `backup`. + properties: + storage: + description: Configuration of the storage of the instances + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + tablespaceStorage: + additionalProperties: + description: |- + TypedLocalObjectReference contains enough information to let you locate the + typed referenced object inside the same namespace. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + description: Configuration of the storage for PostgreSQL + tablespaces + type: object + walStorage: + description: Configuration of the storage for PostgreSQL + WAL (Write-Ahead Log) + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + required: + - storage + type: object + type: object + type: object + certificates: + description: The configuration for the CA and related certificates + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + description: + description: Description of this PostgreSQL cluster + type: string + enablePDB: + default: true + description: |- + Manage the `PodDisruptionBudget` resources within the cluster. When + configured as `true` (default setting), the pod disruption budgets + will safeguard the primary node from being terminated. Conversely, + setting it to `false` will result in the absence of any + `PodDisruptionBudget` resource, permitting the shutdown of all nodes + hosting the PostgreSQL cluster. This latter configuration is + advisable for any PostgreSQL cluster employed for + development/staging purposes. + type: boolean + enableSuperuserAccess: + default: false + description: |- + When this option is enabled, the operator will use the `SuperuserSecret` + to update the `postgres` user password (if the secret is + not present, the operator will automatically create one). When this + option is disabled, the operator will ignore the `SuperuserSecret` content, delete + it when automatically created, and then blank the password of the `postgres` + user by setting it to `NULL`. Disabled by default. + type: boolean + env: + description: |- + Env follows the Env format to pass environment variables + to the pods created in the cluster + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + EnvFrom follows the EnvFrom format to pass environment variables + sources to the pods to be used by Env + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each key in + the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + ephemeralVolumeSource: + description: EphemeralVolumeSource allows the user to configure the + source of ephemeral volumes. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to + consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the + PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + ephemeralVolumesSizeLimit: + description: |- + EphemeralVolumesSizeLimit allows the user to set the limits for the ephemeral + volumes + properties: + shm: + anyOf: + - type: integer + - type: string + description: Shm is the size limit of the shared memory volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + temporaryData: + anyOf: + - type: integer + - type: string + description: TemporaryData is the size limit of the temporary + data volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + externalClusters: + description: The list of external clusters which are used in the configuration + items: + description: |- + ExternalCluster represents the connection parameters to an + external cluster which is used in the other sections of the configuration + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2`, and `snappy`. + enum: + - bzip2 + - gzip + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud + Storage JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing + the region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2`, + `lz4`, `snappy`, `xz`, and `zstd`. + enum: + - bzip2 + - gzip + - lz4 + - snappy + - xz + - zstd + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + connectionParameters: + additionalProperties: + type: string + description: The list of connection parameters, such as dbname, + host, username, etc + type: object + name: + description: The server name, required + type: string + password: + description: |- + The reference to the password to be used to connect to the server. + If a password is provided, CloudNativePG creates a PostgreSQL + passfile at `/controller/external/NAME/pass` (where "NAME" is the + cluster's name). This passfile is automatically referenced in the + connection string when establishing a connection to the remote + PostgreSQL server from the current PostgreSQL `Cluster`. This ensures + secure and efficient password management for external clusters. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + plugin: + description: |- + The configuration of the plugin that is taking care + of WAL archiving and backups for this external cluster + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + isWALArchiver: + default: false + description: |- + Only one plugin can be declared as WALArchiver. + Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + sslCert: + description: |- + The reference to an SSL certificate to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslKey: + description: |- + The reference to an SSL private key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslRootCert: + description: |- + The reference to an SSL CA public key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - name + type: object + type: array + failoverDelay: + default: 0 + description: |- + The amount of time (in seconds) to wait before triggering a failover + after the primary PostgreSQL instance in the cluster was detected + to be unhealthy + format: int32 + type: integer + imageCatalogRef: + description: Defines the major PostgreSQL version we want to use within + an ImageCatalog + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + major: + description: The major version of PostgreSQL we want to use from + the ImageCatalog + type: integer + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - major + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: Only image catalogs are supported + rule: self.kind == 'ImageCatalog' || self.kind == 'ClusterImageCatalog' + - message: Only image catalogs are supported + rule: self.apiGroup == 'postgresql.cnpg.io' + imageName: + description: |- + Name of the container image, supporting both tags (`:`) + and digests for deterministic and repeatable deployments + (`:@sha256:`) + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of `Always`, `Never` or `IfNotPresent`. + If not defined, it defaults to `IfNotPresent`. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + imagePullSecrets: + description: The list of pull secrets to be used to pull the images + items: + description: |- + LocalObjectReference contains enough information to let you locate a + local object with a known type inside the same namespace + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + type: array + inheritedMetadata: + description: Metadata that will be inherited by all objects related + to the Cluster + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + instances: + default: 1 + description: Number of instances required in the cluster + minimum: 1 + type: integer + livenessProbeTimeout: + description: |- + LivenessProbeTimeout is the time (in seconds) that is allowed for a PostgreSQL instance + to successfully respond to the liveness probe (default 30). + The Liveness probe failure threshold is derived from this value using the formula: + ceiling(livenessProbe / 10). + format: int32 + type: integer + logLevel: + default: info + description: 'The instances'' log level, one of the following values: + error, warning, info (default), debug, trace' + enum: + - error + - warning + - info + - debug + - trace + type: string + managed: + description: The configuration that is used by the portions of PostgreSQL + that are managed by the instance manager + properties: + roles: + description: Database roles managed by the `Cluster` + items: + description: |- + RoleConfiguration is the representation, in Kubernetes, of a PostgreSQL role + with the additional field Ensure specifying whether to ensure the presence or + absence of the role in the database + + The defaults of the CREATE ROLE command are applied + Reference: https://www.postgresql.org/docs/current/sql-createrole.html + properties: + bypassrls: + description: |- + Whether a role bypasses every row-level security (RLS) policy. + Default is `false`. + type: boolean + comment: + description: Description of the role + type: string + connectionLimit: + default: -1 + description: |- + If the role can log in, this specifies how many concurrent + connections the role can make. `-1` (the default) means no limit. + format: int64 + type: integer + createdb: + description: |- + When set to `true`, the role being defined will be allowed to create + new databases. Specifying `false` (default) will deny a role the + ability to create databases. + type: boolean + createrole: + description: |- + Whether the role will be permitted to create, alter, drop, comment + on, change the security label for, and grant or revoke membership in + other roles. Default is `false`. + type: boolean + disablePassword: + description: DisablePassword indicates that a role's password + should be set to NULL in Postgres + type: boolean + ensure: + default: present + description: Ensure the role is `present` or `absent` - + defaults to "present" + enum: + - present + - absent + type: string + inRoles: + description: |- + List of one or more existing roles to which this role will be + immediately added as a new member. Default empty. + items: + type: string + type: array + inherit: + default: true + description: |- + Whether a role "inherits" the privileges of roles it is a member of. + Defaults is `true`. + type: boolean + login: + description: |- + Whether the role is allowed to log in. A role having the `login` + attribute can be thought of as a user. Roles without this attribute + are useful for managing database privileges, but are not users in + the usual sense of the word. Default is `false`. + type: boolean + name: + description: Name of the role + type: string + passwordSecret: + description: |- + Secret containing the password of the role (if present) + If null, the password will be ignored unless DisablePassword is set + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + replication: + description: |- + Whether a role is a replication role. A role must have this + attribute (or be a superuser) in order to be able to connect to the + server in replication mode (physical or logical replication) and in + order to be able to create or drop replication slots. A role having + the `replication` attribute is a very highly privileged role, and + should only be used on roles actually used for replication. Default + is `false`. + type: boolean + superuser: + description: |- + Whether the role is a `superuser` who can override all access + restrictions within the database - superuser status is dangerous and + should be used only when really needed. You must yourself be a + superuser to create a new superuser. Defaults is `false`. + type: boolean + validUntil: + description: |- + Date and time after which the role's password is no longer valid. + When omitted, the password will never expire (default). + format: date-time + type: string + required: + - name + type: object + type: array + services: + description: Services roles managed by the `Cluster` + properties: + additional: + description: Additional is a list of additional managed services + specified by the user. + items: + description: |- + ManagedService represents a specific service managed by the cluster. + It includes the type of service and its associated template specification. + properties: + selectorType: + description: |- + SelectorType specifies the type of selectors that the service will have. + Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services. + enum: + - rw + - r + - ro + type: string + serviceTemplate: + description: ServiceTemplate is the template specification + for the service. + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only + supported for certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information + on service's port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed + by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains + the configurations of session affinity. + properties: + clientIP: + description: clientIP contains the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic is + distributed to Service endpoints. Implementations can use this field as a + hint, but are not required to guarantee strict adherence. If the field is + not set, the implementation will apply its default routing strategy. If set + to "PreferClose", implementations should prioritize endpoints that are + topologically close (e.g., same zone). + This is a beta field and requires enabling ServiceTrafficDistribution feature. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + updateStrategy: + default: patch + description: UpdateStrategy describes how the service + differences should be reconciled + enum: + - patch + - replace + type: string + required: + - selectorType + - serviceTemplate + type: object + type: array + disabledDefaultServices: + description: |- + DisabledDefaultServices is a list of service types that are disabled by default. + Valid values are "r", and "ro", representing read, and read-only services. + items: + description: |- + ServiceSelectorType describes a valid value for generating the service selectors. + It indicates which type of service the selector applies to, such as read-write, read, or read-only + enum: + - rw + - r + - ro + type: string + type: array + type: object + type: object + maxSyncReplicas: + default: 0 + description: |- + The target value for the synchronous replication quorum, that can be + decreased if the number of ready standbys is lower than this. + Undefined or 0 disable synchronous replication. + minimum: 0 + type: integer + minSyncReplicas: + default: 0 + description: |- + Minimum number of instances required in synchronous replication with the + primary. Undefined or 0 allow writes to complete when no standby is + available. + minimum: 0 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this cluster + properties: + customQueriesConfigMap: + description: The list of config maps containing the custom queries + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + customQueriesSecret: + description: The list of secrets containing the custom queries + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + disableDefaultQueries: + default: false + description: |- + Whether the default queries should be injected. + Set it to `true` if you don't want to inject default queries into the cluster. + Default: false. + type: boolean + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + tls: + description: |- + Configure TLS communication for the metrics endpoint. + Changing tls.enabled option will force a rollout of all instances. + properties: + enabled: + default: false + description: |- + Enable TLS for the monitoring endpoint. + Changing this option will force a rollout of all instances. + type: boolean + type: object + type: object + nodeMaintenanceWindow: + description: Define a maintenance window for the Kubernetes nodes + properties: + inProgress: + default: false + description: Is there a node maintenance activity in progress? + type: boolean + reusePVC: + default: true + description: |- + Reuse the existing PVC (wait for the node to come + up again) or not (recreate it elsewhere - when `instances` >1) + type: boolean + type: object + plugins: + description: |- + The plugins configuration, containing + any plugin to be loaded with the corresponding configuration + items: + description: |- + PluginConfiguration specifies a plugin that need to be loaded for this + cluster to be reconciled + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + isWALArchiver: + default: false + description: |- + Only one plugin can be declared as WALArchiver. + Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + type: array + postgresGID: + default: 26 + description: The GID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresUID: + default: 26 + description: The UID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresql: + description: Configuration of the PostgreSQL server + properties: + enableAlterSystem: + description: |- + If this parameter is true, the user will be able to invoke `ALTER SYSTEM` + on this CloudNativePG Cluster. + This should only be used for debugging and troubleshooting. + Defaults to false. + type: boolean + ldap: + description: Options to specify LDAP configuration + properties: + bindAsAuth: + description: Bind as authentication configuration + properties: + prefix: + description: Prefix for the bind authentication option + type: string + suffix: + description: Suffix for the bind authentication option + type: string + type: object + bindSearchAuth: + description: Bind+Search authentication configuration + properties: + baseDN: + description: Root DN to begin the user search + type: string + bindDN: + description: DN of the user to bind to the directory + type: string + bindPassword: + description: Secret with the password for the user to + bind to the directory + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + searchAttribute: + description: Attribute to match against the username + type: string + searchFilter: + description: Search filter to use when doing the search+bind + authentication + type: string + type: object + port: + description: LDAP server port + type: integer + scheme: + description: LDAP schema to be used, possible options are + `ldap` and `ldaps` + enum: + - ldap + - ldaps + type: string + server: + description: LDAP hostname or IP address + type: string + tls: + description: Set to 'true' to enable LDAP over TLS. 'false' + is default + type: boolean + type: object + parameters: + additionalProperties: + type: string + description: PostgreSQL configuration options (postgresql.conf) + type: object + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + pg_ident: + description: |- + PostgreSQL User Name Maps rules (lines to be appended + to the pg_ident.conf file) + items: + type: string + type: array + promotionTimeout: + description: |- + Specifies the maximum number of seconds to wait when promoting an instance to primary. + Default value is 40000000, greater than one year in seconds, + big enough to simulate an infinite timeout + format: int32 + type: integer + shared_preload_libraries: + description: Lists of shared preload libraries to add to the default + ones + items: + type: string + type: array + syncReplicaElectionConstraint: + description: |- + Requirements to be met by sync replicas. This will affect how the "synchronous_standby_names" parameter will be + set up. + properties: + enabled: + description: This flag enables the constraints for sync replicas + type: boolean + nodeLabelsAntiAffinity: + description: A list of node labels values to extract and compare + to evaluate if the pods reside in the same topology or not + items: + type: string + type: array + required: + - enabled + type: object + synchronous: + description: Configuration of the PostgreSQL synchronous replication + feature + properties: + dataDurability: + default: required + description: |- + If set to "required", data durability is strictly enforced. Write operations + with synchronous commit settings (`on`, `remote_write`, or `remote_apply`) will + block if there are insufficient healthy replicas, ensuring data persistence. + If set to "preferred", data durability is maintained when healthy replicas + are available, but the required number of instances will adjust dynamically + if replicas become unavailable. This setting relaxes strict durability enforcement + to allow for operational continuity. This setting is only applicable if both + `standbyNamesPre` and `standbyNamesPost` are unset (empty). + enum: + - required + - preferred + type: string + maxStandbyNamesFromCluster: + description: |- + Specifies the maximum number of local cluster pods that can be + automatically included in the `synchronous_standby_names` option in + PostgreSQL. + type: integer + method: + description: |- + Method to select synchronous replication standbys from the listed + servers, accepting 'any' (quorum-based synchronous replication) or + 'first' (priority-based synchronous replication) as values. + enum: + - any + - first + type: string + number: + description: |- + Specifies the number of synchronous standby servers that + transactions must wait for responses from. + type: integer + x-kubernetes-validations: + - message: The number of synchronous replicas should be greater + than zero + rule: self > 0 + standbyNamesPost: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` after local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + standbyNamesPre: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` before local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + required: + - method + - number + type: object + x-kubernetes-validations: + - message: dataDurability set to 'preferred' requires empty 'standbyNamesPre' + and empty 'standbyNamesPost' + rule: self.dataDurability!='preferred' || ((!has(self.standbyNamesPre) + || self.standbyNamesPre.size()==0) && (!has(self.standbyNamesPost) + || self.standbyNamesPost.size()==0)) + type: object + primaryUpdateMethod: + default: restart + description: |- + Method to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be with a switchover (`switchover`) or in-place (`restart` - default) + enum: + - switchover + - restart + type: string + primaryUpdateStrategy: + default: unsupervised + description: |- + Deployment strategy to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be automated (`unsupervised` - default) or manual (`supervised`) + enum: + - unsupervised + - supervised + type: string + priorityClassName: + description: |- + Name of the priority class which will be used in every generated Pod, if the PriorityClass + specified does not exist, the pod will not be able to schedule. Please refer to + https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass + for more information + type: string + probes: + description: |- + The configuration of the probes to be injected + in the PostgreSQL Pods. + properties: + liveness: + description: The liveness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + readiness: + description: The readiness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + maximumLag: + anyOf: + - type: integer + - type: string + description: Lag limit. Used only for `streaming` strategy + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: + description: The probe strategy + enum: + - pg_isready + - streaming + - query + type: string + type: object + startup: + description: The startup probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + maximumLag: + anyOf: + - type: integer + - type: string + description: Lag limit. Used only for `streaming` strategy + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: + description: The probe strategy + enum: + - pg_isready + - streaming + - query + type: string + type: object + type: object + projectedVolumeTemplate: + description: |- + Template to be used to define projected volumes, projected volumes will be mounted + under `/projected` base folder + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root to write + the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap data + to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the + pod: only annotations, labels, name, namespace + and uid are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must not + be absolute or contain the ''..'' path. Must + be utf-8 encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the secret data to + project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether the Secret + or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information about the + serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + replica: + description: Replica cluster configuration + properties: + enabled: + description: |- + If replica mode is enabled, this cluster will be a replica of an + existing cluster. Replica cluster can be created from a recovery + object store or via streaming through pg_basebackup. + Refer to the Replica clusters page of the documentation for more information. + type: boolean + minApplyDelay: + description: |- + When replica mode is enabled, this parameter allows you to replay + transactions only when the system time is at least the configured + time past the commit time. This provides an opportunity to correct + data loss errors. Note that when this parameter is set, a promotion + token cannot be used. + type: string + primary: + description: |- + Primary defines which Cluster is defined to be the primary in the distributed PostgreSQL cluster, based on the + topology specified in externalClusters + type: string + promotionToken: + description: |- + A demotion token generated by an external cluster used to + check if the promotion requirements are met. + type: string + self: + description: |- + Self defines the name of this cluster. It is used to determine if this is a primary + or a replica cluster, comparing it with `primary` + type: string + source: + description: The name of the external cluster which is the replication + origin + minLength: 1 + type: string + required: + - source + type: object + replicationSlots: + default: + highAvailability: + enabled: true + description: Replication slots management configuration + properties: + highAvailability: + default: + enabled: true + description: Replication slots for high availability configuration + properties: + enabled: + default: true + description: |- + If enabled (default), the operator will automatically manage replication slots + on the primary instance and use them in streaming replication + connections with all the standby instances that are part of the HA + cluster. If disabled, the operator will not take advantage + of replication slots in streaming connections with the replicas. + This feature also controls replication slots in replica cluster, + from the designated primary to its cascading replicas. + type: boolean + slotPrefix: + default: _cnpg_ + description: |- + Prefix for replication slots managed by the operator for HA. + It may only contain lower case letters, numbers, and the underscore character. + This can only be set at creation time. By default set to `_cnpg_`. + pattern: ^[0-9a-z_]*$ + type: string + type: object + synchronizeReplicas: + description: Configures the synchronization of the user defined + physical replication slots + properties: + enabled: + default: true + description: When set to true, every replication slot that + is on the primary is synchronized on each standby + type: boolean + excludePatterns: + description: List of regular expression patterns to match + the names of replication slots to be excluded (by default + empty) + items: + type: string + type: array + required: + - enabled + type: object + updateInterval: + default: 30 + description: |- + Standby will update the status of the local replication slots + every `updateInterval` seconds (default 30). + minimum: 1 + type: integer + type: object + resources: + description: |- + Resources requirements of every generated Pod. Please refer to + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + for more information. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + schedulerName: + description: |- + If specified, the pod will be dispatched by specified Kubernetes + scheduler. If not specified, the pod will be dispatched by the default + scheduler. More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/ + type: string + seccompProfile: + description: |- + The SeccompProfile applied to every Pod and Container. + Defaults to: `RuntimeDefault` + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + serviceAccountTemplate: + description: Configure the generation of the service account + properties: + metadata: + description: |- + Metadata are the metadata to be used for the generated + service account + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + required: + - metadata + type: object + smartShutdownTimeout: + default: 180 + description: |- + The time in seconds that controls the window of time reserved for the smart shutdown of Postgres to complete. + Make sure you reserve enough time for the operator to request a fast shutdown of Postgres + (that is: `stopDelay` - `smartShutdownTimeout`). + format: int32 + type: integer + startDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + successfully start up (default 3600). + The startup probe failure threshold is derived from this value using the formula: + ceiling(startDelay / 10). + format: int32 + type: integer + stopDelay: + default: 1800 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + gracefully shutdown (default 1800) + format: int32 + type: integer + storage: + description: Configuration of the storage of the instances + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + superuserSecret: + description: |- + The secret containing the superuser password. If not defined a new + secret will be created with a randomly generated password + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + switchoverDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a primary PostgreSQL instance + to gracefully shutdown during a switchover. + Default value is 3600 seconds (1 hour). + format: int32 + type: integer + tablespaces: + description: The tablespaces configuration + items: + description: |- + TablespaceConfiguration is the configuration of a tablespace, and includes + the storage specification for the tablespace + properties: + name: + description: The name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + properties: + name: + type: string + type: object + storage: + description: The storage configuration for the tablespace + properties: + pvcTemplate: + description: Template to be used to generate the Persistent + Volume Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to + the PersistentVolume backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + temporary: + default: false + description: |- + When set to true, the tablespace will be added as a `temp_tablespaces` + entry in PostgreSQL, and will be available to automatically house temp + database objects, or other temporary files. Please refer to PostgreSQL + documentation for more information on the `temp_tablespaces` GUC. + type: boolean + required: + - name + - storage + type: object + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints specifies how to spread matching pods among the given topology. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ + items: + description: TopologySpreadConstraint specifies how to spread matching + pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + walStorage: + description: Configuration of the storage for PostgreSQL WAL (Write-Ahead + Log) + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + required: + - instances + type: object + x-kubernetes-validations: + - message: imageName and imageCatalogRef are mutually exclusive + rule: '!(has(self.imageCatalogRef) && has(self.imageName))' + status: + description: |- + Most recently observed status of the cluster. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + availableArchitectures: + description: AvailableArchitectures reports the available architectures + of a cluster + items: + description: AvailableArchitecture represents the state of a cluster's + architecture + properties: + goArch: + description: GoArch is the name of the executable architecture + type: string + hash: + description: Hash is the hash of the executable + type: string + required: + - goArch + - hash + type: object + type: array + azurePVCUpdateEnabled: + description: AzurePVCUpdateEnabled shows if the PVC online upgrade + is enabled for this cluster + type: boolean + certificates: + description: The configuration for the CA and related certificates, + initialized with defaults. + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + expirations: + additionalProperties: + type: string + description: Expiration dates for all certificates. + type: object + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + cloudNativePGCommitHash: + description: The commit hash number of which this operator running + type: string + cloudNativePGOperatorHash: + description: The hash of the binary of the operator + type: string + conditions: + description: Conditions for cluster object + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + configMapResourceVersion: + description: |- + The list of resource versions of the configmaps, + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + configmap data + properties: + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the config maps used to pass metrics. + Map keys are the config map names, map values are the versions + type: object + type: object + currentPrimary: + description: Current primary instance + type: string + currentPrimaryFailingSinceTimestamp: + description: |- + The timestamp when the primary was detected to be unhealthy + This field is reported when `.spec.failoverDelay` is populated or during online upgrades + type: string + currentPrimaryTimestamp: + description: The timestamp when the last actual promotion to primary + has occurred + type: string + danglingPVC: + description: |- + List of all the PVCs created by this cluster and still available + which are not attached to a Pod + items: + type: string + type: array + demotionToken: + description: |- + DemotionToken is a JSON token containing the information + from pg_controldata such as Database system identifier, Latest checkpoint's + TimeLineID, Latest checkpoint's REDO location, Latest checkpoint's REDO + WAL file, and Time of latest checkpoint + type: string + firstRecoverabilityPoint: + description: |- + The first recoverability point, stored as a date in RFC3339 format. + This field is calculated from the content of FirstRecoverabilityPointByMethod + type: string + firstRecoverabilityPointByMethod: + additionalProperties: + format: date-time + type: string + description: The first recoverability point, stored as a date in RFC3339 + format, per backup method type + type: object + healthyPVC: + description: List of all the PVCs not dangling nor initializing + items: + type: string + type: array + image: + description: Image contains the image name used by the pods + type: string + initializingPVC: + description: List of all the PVCs that are being initialized by this + cluster + items: + type: string + type: array + instanceNames: + description: List of instance names in the cluster + items: + type: string + type: array + instances: + description: The total number of PVC Groups detected in the cluster. + It may differ from the number of existing instance pods. + type: integer + instancesReportedState: + additionalProperties: + description: InstanceReportedState describes the last reported state + of an instance during a reconciliation loop + properties: + isPrimary: + description: indicates if an instance is the primary one + type: boolean + timeLineID: + description: indicates on which TimelineId the instance is + type: integer + required: + - isPrimary + type: object + description: The reported state of the instances during the last reconciliation + loop + type: object + instancesStatus: + additionalProperties: + items: + type: string + type: array + description: InstancesStatus indicates in which status the instances + are + type: object + jobCount: + description: How many Jobs have been created by this cluster + format: int32 + type: integer + lastFailedBackup: + description: Stored as a date in RFC3339 format + type: string + lastPromotionToken: + description: |- + LastPromotionToken is the last verified promotion token that + was used to promote a replica cluster + type: string + lastSuccessfulBackup: + description: |- + Last successful backup, stored as a date in RFC3339 format + This field is calculated from the content of LastSuccessfulBackupByMethod + type: string + lastSuccessfulBackupByMethod: + additionalProperties: + format: date-time + type: string + description: Last successful backup, stored as a date in RFC3339 format, + per backup method type + type: object + latestGeneratedNode: + description: ID of the latest generated node (used to avoid node name + clashing) + type: integer + majorVersionUpgradeFromImage: + description: |- + MajorVersionUpgradeFromImage contains the image that was + running before the major version upgrade started. + type: string + managedRolesStatus: + description: ManagedRolesStatus reports the state of the managed roles + in the cluster + properties: + byStatus: + additionalProperties: + items: + type: string + type: array + description: ByStatus gives the list of roles in each state + type: object + cannotReconcile: + additionalProperties: + items: + type: string + type: array + description: |- + CannotReconcile lists roles that cannot be reconciled in PostgreSQL, + with an explanation of the cause + type: object + passwordStatus: + additionalProperties: + description: PasswordState represents the state of the password + of a managed RoleConfiguration + properties: + resourceVersion: + description: the resource version of the password secret + type: string + transactionID: + description: the last transaction ID to affect the role + definition in PostgreSQL + format: int64 + type: integer + type: object + description: PasswordStatus gives the last transaction id and + password secret version for each managed role + type: object + type: object + onlineUpdateEnabled: + description: OnlineUpdateEnabled shows if the online upgrade is enabled + inside the cluster + type: boolean + phase: + description: Current phase of the cluster + type: string + phaseReason: + description: Reason for the current phase + type: string + pluginStatus: + description: PluginStatus is the status of the loaded plugins + items: + description: PluginStatus is the status of a loaded plugin + properties: + backupCapabilities: + description: |- + BackupCapabilities are the list of capabilities of the + plugin regarding the Backup management + items: + type: string + type: array + capabilities: + description: |- + Capabilities are the list of capabilities of the + plugin + items: + type: string + type: array + name: + description: Name is the name of the plugin + type: string + operatorCapabilities: + description: |- + OperatorCapabilities are the list of capabilities of the + plugin regarding the reconciler + items: + type: string + type: array + restoreJobHookCapabilities: + description: |- + RestoreJobHookCapabilities are the list of capabilities of the + plugin regarding the RestoreJobHook management + items: + type: string + type: array + status: + description: Status contain the status reported by the plugin + through the SetStatusInCluster interface + type: string + version: + description: |- + Version is the version of the plugin loaded by the + latest reconciliation loop + type: string + walCapabilities: + description: |- + WALCapabilities are the list of capabilities of the + plugin regarding the WAL management + items: + type: string + type: array + required: + - name + - version + type: object + type: array + poolerIntegrations: + description: The integration needed by poolers referencing the cluster + properties: + pgBouncerIntegration: + description: PgBouncerIntegrationStatus encapsulates the needed + integration for the pgbouncer poolers referencing the cluster + properties: + secrets: + items: + type: string + type: array + type: object + type: object + pvcCount: + description: How many PVCs have been created by this cluster + format: int32 + type: integer + readService: + description: Current list of read pods + type: string + readyInstances: + description: The total number of ready instances in the cluster. It + is equal to the number of ready instance pods. + type: integer + resizingPVC: + description: List of all the PVCs that have ResizingPVC condition. + items: + type: string + type: array + secretsResourceVersion: + description: |- + The list of resource versions of the secrets + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + secret data + properties: + applicationSecretVersion: + description: The resource version of the "app" user secret + type: string + barmanEndpointCA: + description: The resource version of the Barman Endpoint CA if + provided + type: string + caSecretVersion: + description: Unused. Retained for compatibility with old versions. + type: string + clientCaSecretVersion: + description: The resource version of the PostgreSQL client-side + CA secret version + type: string + externalClusterSecretVersion: + additionalProperties: + type: string + description: The resource versions of the external cluster secrets + type: object + managedRoleSecretVersion: + additionalProperties: + type: string + description: The resource versions of the managed roles secrets + type: object + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the secrets used to pass metrics. + Map keys are the secret names, map values are the versions + type: object + replicationSecretVersion: + description: The resource version of the "streaming_replica" user + secret + type: string + serverCaSecretVersion: + description: The resource version of the PostgreSQL server-side + CA secret version + type: string + serverSecretVersion: + description: The resource version of the PostgreSQL server-side + secret version + type: string + superuserSecretVersion: + description: The resource version of the "postgres" user secret + type: string + type: object + switchReplicaClusterStatus: + description: SwitchReplicaClusterStatus is the status of the switch + to replica cluster + properties: + inProgress: + description: InProgress indicates if there is an ongoing procedure + of switching a cluster to a replica cluster. + type: boolean + type: object + tablespacesStatus: + description: TablespacesStatus reports the state of the declarative + tablespaces in the cluster + items: + description: TablespaceState represents the state of a tablespace + in a cluster + properties: + error: + description: Error is the reconciliation error, if any + type: string + name: + description: Name is the name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + type: string + state: + description: State is the latest reconciliation state + type: string + required: + - name + - state + type: object + type: array + targetPrimary: + description: |- + Target primary instance, this is different from the previous one + during a switchover or a failover + type: string + targetPrimaryTimestamp: + description: The timestamp when the last request for a new primary + has occurred + type: string + timelineID: + description: The timeline of the Postgres cluster + type: integer + topology: + description: Instances topology. + properties: + instances: + additionalProperties: + additionalProperties: + type: string + description: PodTopologyLabels represent the topology of a Pod. + map[labelName]labelValue + type: object + description: Instances contains the pod topology of the instances + type: object + nodesUsed: + description: |- + NodesUsed represents the count of distinct nodes accommodating the instances. + A value of '1' suggests that all instances are hosted on a single node, + implying the absence of High Availability (HA). Ideally, this value should + be the same as the number of instances in the Postgres HA cluster, implying + shared nothing architecture on the compute side. + format: int32 + type: integer + successfullyExtracted: + description: |- + SuccessfullyExtracted indicates if the topology data was extract. It is useful to enact fallback behaviors + in synchronous replica election in case of failures + type: boolean + type: object + unusablePVC: + description: List of all the PVCs that are unusable because another + PVC is missing + items: + type: string + type: array + writeService: + description: Current write pod + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.2 + name: databases.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Database + listKind: DatabaseList + plural: databases + singular: database + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Database is the Schema for the databases API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired Database. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allowConnections: + description: |- + Maps to the `ALLOW_CONNECTIONS` parameter of `CREATE DATABASE` and + `ALTER DATABASE`. If false then no one can connect to this database. + type: boolean + builtinLocale: + description: |- + Maps to the `BUILTIN_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the locale name when the + builtin provider is used. This option requires `localeProvider` to + be set to `builtin`. Available from PostgreSQL 17. + type: string + x-kubernetes-validations: + - message: builtinLocale is immutable + rule: self == oldSelf + cluster: + description: The name of the PostgreSQL cluster hosting the database. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + collationVersion: + description: |- + Maps to the `COLLATION_VERSION` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: collationVersion is immutable + rule: self == oldSelf + connectionLimit: + description: |- + Maps to the `CONNECTION LIMIT` clause of `CREATE DATABASE` and + `ALTER DATABASE`. How many concurrent connections can be made to + this database. -1 (the default) means no limit. + type: integer + databaseReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this database. + enum: + - delete + - retain + type: string + encoding: + description: |- + Maps to the `ENCODING` parameter of `CREATE DATABASE`. This setting + cannot be changed. Character set encoding to use in the database. + type: string + x-kubernetes-validations: + - message: encoding is immutable + rule: self == oldSelf + ensure: + default: present + description: Ensure the PostgreSQL database is `present` or `absent` + - defaults to "present". + enum: + - present + - absent + type: string + extensions: + description: The list of extensions to be managed in the database + items: + description: ExtensionSpec configures an extension in a database + properties: + ensure: + default: present + description: |- + Specifies whether an extension/schema should be present or absent in + the database. If set to `present`, the extension/schema will be + created if it does not exist. If set to `absent`, the + extension/schema will be removed if it exists. + enum: + - present + - absent + type: string + name: + description: Name of the extension/schema + type: string + schema: + description: |- + The name of the schema in which to install the extension's objects, + in case the extension allows its contents to be relocated. If not + specified (default), and the extension's control file does not + specify a schema either, the current default object creation schema + is used. + type: string + version: + description: |- + The version of the extension to install. If empty, the operator will + install the default version (whatever is specified in the + extension's control file) + type: string + required: + - name + type: object + type: array + icuLocale: + description: |- + Maps to the `ICU_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the ICU locale when the ICU + provider is used. This option requires `localeProvider` to be set to + `icu`. Available from PostgreSQL 15. + type: string + x-kubernetes-validations: + - message: icuLocale is immutable + rule: self == oldSelf + icuRules: + description: |- + Maps to the `ICU_RULES` parameter of `CREATE DATABASE`. This setting + cannot be changed. Specifies additional collation rules to customize + the behavior of the default collation. This option requires + `localeProvider` to be set to `icu`. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: icuRules is immutable + rule: self == oldSelf + isTemplate: + description: |- + Maps to the `IS_TEMPLATE` parameter of `CREATE DATABASE` and `ALTER + DATABASE`. If true, this database is considered a template and can + be cloned by any user with `CREATEDB` privileges. + type: boolean + locale: + description: |- + Maps to the `LOCALE` parameter of `CREATE DATABASE`. This setting + cannot be changed. Sets the default collation order and character + classification in the new database. + type: string + x-kubernetes-validations: + - message: locale is immutable + rule: self == oldSelf + localeCType: + description: |- + Maps to the `LC_CTYPE` parameter of `CREATE DATABASE`. This setting + cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCType is immutable + rule: self == oldSelf + localeCollate: + description: |- + Maps to the `LC_COLLATE` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCollate is immutable + rule: self == oldSelf + localeProvider: + description: |- + Maps to the `LOCALE_PROVIDER` parameter of `CREATE DATABASE`. This + setting cannot be changed. This option sets the locale provider for + databases created in the new cluster. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: localeProvider is immutable + rule: self == oldSelf + name: + description: The name of the database to create inside PostgreSQL. + This setting cannot be changed. + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + - message: the name postgres is reserved + rule: self != 'postgres' + - message: the name template0 is reserved + rule: self != 'template0' + - message: the name template1 is reserved + rule: self != 'template1' + owner: + description: |- + Maps to the `OWNER` parameter of `CREATE DATABASE`. + Maps to the `OWNER TO` command of `ALTER DATABASE`. + The role name of the user who owns the database inside PostgreSQL. + type: string + schemas: + description: The list of schemas to be managed in the database + items: + description: SchemaSpec configures a schema in a database + properties: + ensure: + default: present + description: |- + Specifies whether an extension/schema should be present or absent in + the database. If set to `present`, the extension/schema will be + created if it does not exist. If set to `absent`, the + extension/schema will be removed if it exists. + enum: + - present + - absent + type: string + name: + description: Name of the extension/schema + type: string + owner: + description: |- + The role name of the user who owns the schema inside PostgreSQL. + It maps to the `AUTHORIZATION` parameter of `CREATE SCHEMA` and the + `OWNER TO` command of `ALTER SCHEMA`. + type: string + required: + - name + type: object + type: array + tablespace: + description: |- + Maps to the `TABLESPACE` parameter of `CREATE DATABASE`. + Maps to the `SET TABLESPACE` command of `ALTER DATABASE`. + The name of the tablespace (in PostgreSQL) that will be associated + with the new database. This tablespace will be the default + tablespace used for objects created in this database. + type: string + template: + description: |- + Maps to the `TEMPLATE` parameter of `CREATE DATABASE`. This setting + cannot be changed. The name of the template from which to create + this database. + type: string + x-kubernetes-validations: + - message: template is immutable + rule: self == oldSelf + required: + - cluster + - name + - owner + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider is set + to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + status: + description: |- + Most recently observed status of the Database. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + applied: + description: Applied is true if the database was reconciled correctly + type: boolean + extensions: + description: Extensions is the status of the managed extensions + items: + description: DatabaseObjectStatus is the status of the managed database + objects + properties: + applied: + description: |- + True of the object has been installed successfully in + the database + type: boolean + message: + description: Message is the object reconciliation message + type: string + name: + description: The name of the object + type: string + required: + - applied + - name + type: object + type: array + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + schemas: + description: Schemas is the status of the managed schemas + items: + description: DatabaseObjectStatus is the status of the managed database + objects + properties: + applied: + description: |- + True of the object has been installed successfully in + the database + type: boolean + message: + description: Message is the object reconciliation message + type: string + name: + description: The name of the object + type: string + required: + - applied + - name + type: object + type: array + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.2 + name: imagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ImageCatalog + listKind: ImageCatalogList + plural: imagecatalogs + singular: imagecatalog + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ImageCatalog is the Schema for the imagecatalogs API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.2 + name: poolers.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Pooler + listKind: PoolerList + plural: poolers + singular: pooler + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.type + name: Type + type: string + name: v1 + schema: + openAPIV3Schema: + description: Pooler is the Schema for the poolers API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the Pooler. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: |- + This is the cluster reference on which the Pooler will work. + Pooler name should never match with any cluster name within the same namespace. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + deploymentStrategy: + description: The deployment strategy to use for pgbouncer to replace + existing pods with new ones + properties: + rollingUpdate: + description: |- + Rolling update config params. Present only if DeploymentStrategyType = + RollingUpdate. + properties: + maxSurge: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be scheduled above the desired number of + pods. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + This can not be 0 if MaxUnavailable is 0. + Absolute number is calculated from percentage by rounding up. + Defaults to 25%. + Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when + the rolling update starts, such that the total number of old and new pods do not exceed + 130% of desired pods. Once old pods have been killed, + new ReplicaSet can be scaled up further, ensuring that total number of pods running + at any time during the update is at most 130% of desired pods. + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be unavailable during the update. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + Absolute number is calculated from percentage by rounding down. + This can not be 0 if MaxSurge is 0. + Defaults to 25%. + Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods + immediately when the rolling update starts. Once new pods are ready, old ReplicaSet + can be scaled down further, followed by scaling up the new ReplicaSet, ensuring + that the total number of pods available at all times during the update is at + least 70% of desired pods. + x-kubernetes-int-or-string: true + type: object + type: + description: Type of deployment. Can be "Recreate" or "RollingUpdate". + Default is RollingUpdate. + type: string + type: object + instances: + default: 1 + description: 'The number of replicas we want. Default: 1.' + format: int32 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this pooler. + properties: + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + type: object + pgbouncer: + description: The PgBouncer configuration + properties: + authQuery: + description: |- + The query that will be used to download the hash of the password + of a certain user. Default: "SELECT usename, passwd FROM public.user_search($1)". + In case it is specified, also an AuthQuerySecret has to be specified and + no automatic CNPG Cluster integration will be triggered. + type: string + authQuerySecret: + description: |- + The credentials of the user that need to be used for the authentication + query. In case it is specified, also an AuthQuery + (e.g. "SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1") + has to be specified and no automatic CNPG Cluster integration will be triggered. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + parameters: + additionalProperties: + type: string + description: |- + Additional parameters to be passed to PgBouncer - please check + the CNPG documentation for a list of options you can configure + type: object + paused: + default: false + description: |- + When set to `true`, PgBouncer will disconnect from the PostgreSQL + server, first waiting for all queries to complete, and pause all new + client connections until this value is set to `false` (default). Internally, + the operator calls PgBouncer's `PAUSE` and `RESUME` commands. + type: boolean + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + poolMode: + default: session + description: 'The pool mode. Default: `session`.' + enum: + - session + - transaction + type: string + type: object + serviceTemplate: + description: Template for the Service to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information on service's + port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the configurations + of session affinity. + properties: + clientIP: + description: clientIP contains the configurations of Client + IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic is + distributed to Service endpoints. Implementations can use this field as a + hint, but are not required to guarantee strict adherence. If the field is + not set, the implementation will apply its default routing strategy. If set + to "PreferClose", implementations should prioritize endpoints that are + topologically close (e.g., same zone). + This is a beta field and requires enabling ServiceTrafficDistribution feature. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + template: + description: The template of the Pod to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the pod. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + activeDeadlineSeconds: + description: |- + Optional duration in seconds the pod may be active on the node relative to + StartTime before the system will actively try to mark it failed and kill associated containers. + Value must be a positive integer. + format: int64 + type: integer + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + automountServiceAccountToken: + description: AutomountServiceAccountToken indicates whether + a service account token should be automatically mounted. + type: boolean + containers: + description: |- + List of containers belonging to the pod. + Containers cannot currently be added or removed. + There must be at least one container in a Pod. + Cannot be updated. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + dnsConfig: + description: |- + Specifies the DNS parameters of a pod. + Parameters specified here will be merged to the generated DNS + configuration based on DNSPolicy. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver + options of a pod. + properties: + name: + description: |- + Name is this DNS resolver option's name. + Required. + type: string + value: + description: Value is this DNS resolver option's + value. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + dnsPolicy: + description: |- + Set DNS policy for the pod. + Defaults to "ClusterFirst". + Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + To have DNS options set along with hostNetwork, you have to specify DNS policy + explicitly to 'ClusterFirstWithHostNet'. + type: string + enableServiceLinks: + description: |- + EnableServiceLinks indicates whether information about services should be injected into pod's + environment variables, matching the syntax of Docker links. + Optional: Defaults to true. + type: boolean + ephemeralContainers: + description: |- + List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + pod to perform user-initiated actions such as debugging. This list cannot be specified when + creating a pod, and it cannot be modified by updating the pod spec. In order to add an + ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + items: + description: |- + An EphemeralContainer is a temporary container that you may add to an existing Pod for + user-initiated activities such as debugging. Ephemeral containers have no resource or + scheduling guarantees, and they will not be restarted when they exit or when a Pod is + removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the + Pod to exceed its resource allocation. + + To add an ephemeral container, use the ephemeralcontainers subresource of an existing + Pod. Ephemeral containers may not be removed or restarted. + properties: + args: + description: |- + Arguments to the entrypoint. + The image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: Lifecycle is not allowed for ephemeral + containers. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the ephemeral container specified as a DNS_LABEL. + This name must be unique among all containers, init containers and ephemeral containers. + type: string + ports: + description: Ports are not allowed for ephemeral containers. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + already allocated to the pod. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for the container to manage the restart behavior of each + container within a pod. + This may only be set for init containers. You cannot set this field on + ephemeral containers. + type: string + securityContext: + description: |- + Optional: SecurityContext defines the security options the ephemeral container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + targetContainerName: + description: |- + If set, the name of the container from PodSpec that this ephemeral container targets. + The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + If not set then the ephemeral container uses the namespaces configured in the Pod spec. + + The container runtime must implement support for this feature. If the runtime does not + support namespace targeting then the result of setting this field is undefined. + type: string + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + hostAliases: + description: |- + HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + file if specified. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + description: IP address of the host file entry. + type: string + required: + - ip + type: object + type: array + x-kubernetes-list-map-keys: + - ip + x-kubernetes-list-type: map + hostIPC: + description: |- + Use the host's ipc namespace. + Optional: Default to false. + type: boolean + hostNetwork: + description: |- + Host networking requested for this pod. Use the host's network namespace. + If this option is set, the ports that will be used must be specified. + Default to false. + type: boolean + hostPID: + description: |- + Use the host's pid namespace. + Optional: Default to false. + type: boolean + hostUsers: + description: |- + Use the host's user namespace. + Optional: Default to true. + If set to true or not present, the pod will be run in the host user namespace, useful + for when the pod needs a feature only available to the host user namespace, such as + loading a kernel module with CAP_SYS_MODULE. + When set to false, a new userns is created for the pod. Setting false is useful for + mitigating container breakout vulnerabilities even allowing users to run their + containers as root without actually having root privileges on the host. + This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. + type: boolean + hostname: + description: |- + Specifies the hostname of the Pod + If not specified, the pod's hostname will be set to a system-defined value. + type: string + imagePullSecrets: + description: |- + ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + If specified, these secrets will be passed to individual puller implementations for them to use. + More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + initContainers: + description: |- + List of initialization containers belonging to the pod. + Init containers are executed in order prior to containers being started. If any + init container fails, the pod is considered to have failed and is handled according + to its restartPolicy. The name for an init container or normal container must be + unique among all containers. + Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. + The resourceRequirements of an init container are taken into account during scheduling + by finding the highest request/limit for each resource type, and then using the max of + of that value or the sum of the normal containers. Limits are applied to init containers + in a similar fashion. + Init containers cannot currently be added or removed. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + nodeName: + description: |- + NodeName indicates in which node this pod is scheduled. + If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. + Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. + This field should not be used to express a desire for the pod to be scheduled on a specific node. + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the pod to fit on a node. + Selector which must match a node's labels for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + x-kubernetes-map-type: atomic + os: + description: |- + Specifies the OS of the containers in the pod. + Some pod and container fields are restricted if this is set. + + If the OS field is set to linux, the following fields must be unset: + -securityContext.windowsOptions + + If the OS field is set to windows, following fields must be unset: + - spec.hostPID + - spec.hostIPC + - spec.hostUsers + - spec.securityContext.appArmorProfile + - spec.securityContext.seLinuxOptions + - spec.securityContext.seccompProfile + - spec.securityContext.fsGroup + - spec.securityContext.fsGroupChangePolicy + - spec.securityContext.sysctls + - spec.shareProcessNamespace + - spec.securityContext.runAsUser + - spec.securityContext.runAsGroup + - spec.securityContext.supplementalGroups + - spec.securityContext.supplementalGroupsPolicy + - spec.containers[*].securityContext.appArmorProfile + - spec.containers[*].securityContext.seLinuxOptions + - spec.containers[*].securityContext.seccompProfile + - spec.containers[*].securityContext.capabilities + - spec.containers[*].securityContext.readOnlyRootFilesystem + - spec.containers[*].securityContext.privileged + - spec.containers[*].securityContext.allowPrivilegeEscalation + - spec.containers[*].securityContext.procMount + - spec.containers[*].securityContext.runAsUser + - spec.containers[*].securityContext.runAsGroup + properties: + name: + description: |- + Name is the name of the operating system. The currently supported values are linux and windows. + Additional value may be defined in future and can be one of: + https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + Clients should expect to handle additional values and treat unrecognized values in this field as os: null + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + This field will be autopopulated at admission time by the RuntimeClass admission controller. If + the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + The RuntimeClass admission controller will reject Pod create requests which have the overhead already + set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md + type: object + preemptionPolicy: + description: |- + PreemptionPolicy is the Policy for preempting pods with lower priority. + One of Never, PreemptLowerPriority. + Defaults to PreemptLowerPriority if unset. + type: string + priority: + description: |- + The priority value. Various system components use this field to find the + priority of the pod. When Priority Admission Controller is enabled, it + prevents users from setting this field. The admission controller populates + this field from PriorityClassName. + The higher the value, the higher the priority. + format: int32 + type: integer + priorityClassName: + description: |- + If specified, indicates the pod's priority. "system-node-critical" and + "system-cluster-critical" are two special keywords which indicate the + highest priorities with the former being the highest priority. Any other + name must be defined by creating a PriorityClass object with that name. + If not specified, the pod priority will be default or zero if there is no + default. + type: string + readinessGates: + description: |- + If specified, all readiness gates will be evaluated for pod readiness. + A pod is ready when all its containers are ready AND + all conditions specified in the readiness gates have status equal to "True" + More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates + items: + description: PodReadinessGate contains the reference to + a pod condition + properties: + conditionType: + description: ConditionType refers to a condition in + the pod's condition list with matching type. + type: string + required: + - conditionType + type: object + type: array + x-kubernetes-list-type: atomic + resourceClaims: + description: |- + ResourceClaims defines which ResourceClaims must be allocated + and reserved before the Pod is allowed to start. The resources + will be made available to those containers which consume them + by name. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. + items: + description: |- + PodResourceClaim references exactly one ResourceClaim, either directly + or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim + for the pod. + + It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. + Containers that need access to the ResourceClaim reference it with this name. + properties: + name: + description: |- + Name uniquely identifies this resource claim inside the pod. + This must be a DNS_LABEL. + type: string + resourceClaimName: + description: |- + ResourceClaimName is the name of a ResourceClaim object in the same + namespace as this pod. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + resourceClaimTemplateName: + description: |- + ResourceClaimTemplateName is the name of a ResourceClaimTemplate + object in the same namespace as this pod. + + The template will be used to create a new ResourceClaim, which will + be bound to this pod. When this pod is deleted, the ResourceClaim + will also be deleted. The pod name and resource name, along with a + generated component, will be used to form a unique name for the + ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + + This field is immutable and no changes will be made to the + corresponding ResourceClaim by the control plane after creating the + ResourceClaim. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + resources: + description: |- + Resources is the total amount of CPU and Memory resources required by all + containers in the pod. It supports specifying Requests and Limits for + "cpu" and "memory" resource names only. ResourceClaims are not supported. + + This field enables fine-grained control over resource allocation for the + entire pod, allowing resource sharing among containers in a pod. + + This is an alpha field and requires enabling the PodLevelResources feature + gate. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for all containers within the pod. + One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. + Default to Always. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy + type: string + runtimeClassName: + description: |- + RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + empty definition that uses the default runtime handler. + More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + type: string + schedulerName: + description: |- + If specified, the pod will be dispatched by specified scheduler. + If not specified, the pod will be dispatched by default scheduler. + type: string + schedulingGates: + description: |- + SchedulingGates is an opaque list of values that if specified will block scheduling the pod. + If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + scheduler will not attempt to schedule the pod. + + SchedulingGates can only be set at pod creation time, and be removed only afterwards. + items: + description: PodSchedulingGate is associated to a Pod to + guard its scheduling. + properties: + name: + description: |- + Name of the scheduling gate. + Each scheduling gate must have a unique name field. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + securityContext: + description: |- + SecurityContext holds pod-level security attributes and common container settings. + Optional: Defaults to empty. See type description for default values of each field. + properties: + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxChangePolicy: + description: |- + seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. + It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. + Valid values are "MountOption" and "Recursive". + + "Recursive" means relabeling of all files on all Pod volumes by the container runtime. + This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. + + "MountOption" mounts all eligible Pod volumes with `-o context` mount option. + This requires all Pods that share the same volume to use the same SELinux label. + It is not possible to share the same volume among privileged and unprivileged Pods. + Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes + whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their + CSIDriver instance. Other volumes are always re-labelled recursively. + "MountOption" value is allowed only when SELinuxMount feature gate is enabled. + + If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. + If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes + and "Recursive" for all other volumes. + + This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers. + + All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. + Note that this field cannot be set when spec.os.name is windows. + type: string + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in + addition to the container's primary GID and fsGroup (if specified). If + the SupplementalGroupsPolicy feature is enabled, the + supplementalGroupsPolicy field determines whether these are in addition + to or instead of any group memberships defined in the container image. + If unspecified, no additional groups are added, though group memberships + defined in the container image may still be used, depending on the + supplementalGroupsPolicy field. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + description: |- + Defines how supplemental groups of the first container processes are calculated. + Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + and the container runtime must implement support for this feature. + Note that this field cannot be set when spec.os.name is windows. + type: string + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be + set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: |- + DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. + Deprecated: Use serviceAccountName instead. + type: string + serviceAccountName: + description: |- + ServiceAccountName is the name of the ServiceAccount to use to run this pod. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + type: string + setHostnameAsFQDN: + description: |- + If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). + In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). + In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. + If a pod does not have FQDN, this has no effect. + Default to false. + type: boolean + shareProcessNamespace: + description: |- + Share a single process namespace between all of the containers in a pod. + When this is set containers will be able to view and signal processes from other containers + in the same pod, and the first process in each container will not be assigned PID 1. + HostPID and ShareProcessNamespace cannot both be set. + Optional: Default to false. + type: boolean + subdomain: + description: |- + If specified, the fully qualified Pod hostname will be "...svc.". + If not specified, the pod will not have a domainname at all. + type: string + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + If this value is nil, the default grace period will be used instead. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: If specified, the pod's tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of pods ought to spread across topology + domains. Scheduler will schedule pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + description: |- + List of volumes that can be mounted by containers belonging to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk + in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in + the blob storage + type: string + fsType: + default: ext4 + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage account Managed: azure + managed data disk (only in managed availability + set). defaults to shared' + type: string + readOnly: + default: false + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default + is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers. + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about + the pod that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing the + pod field + properties: + fieldRef: + description: 'Required: Selects a field of + the pod: only annotations, labels, name, + namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path. + Must be utf-8 encoded. The first item of + the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource + that is attached to a kubelet's host machine and then + exposed to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target + worldwide names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. + properties: + driver: + description: driver is the name of the driver to + use for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the + specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + image: + description: |- + image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + The volume is resolved at pod startup depending on which PullPolicy value is provided: + + - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + + The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + The volume will be mounted read-only (ro) and non-executable files (noexec). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + default: default + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon + Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources + secrets, configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume + root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the + configMap data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about + the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name, namespace and uid + are supported.' + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file + to be created. Must not be absolute + or contain the ''..'' path. Must + be utf-8 encoded. The first item + of the relative path must not + start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the + secret data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + default: /etc/ceph/keyring + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. + properties: + fsType: + default: xfs + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the + ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the + ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL + communication with Gateway, default false + type: boolean + storageMode: + default: ThinProvisioned + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage + Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether the + Secret or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy + Based Management (SPBM) profile ID associated + with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + required: + - containers + type: object + type: object + type: + default: rw + description: 'Type of service to forward traffic to. Default: `rw`.' + enum: + - rw + - ro + - r + type: string + required: + - cluster + - pgbouncer + type: object + status: + description: |- + Most recently observed status of the Pooler. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + instances: + description: The number of pods trying to be scheduled + format: int32 + type: integer + secrets: + description: The resource version of the config object + properties: + clientCA: + description: The client CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + pgBouncerSecrets: + description: The version of the secrets used by PgBouncer + properties: + authQuery: + description: The auth query secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + serverCA: + description: The server CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + serverTLS: + description: The server TLS secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.2 + name: publications.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Publication + listKind: PublicationList + plural: publications + singular: publication + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Publication is the Schema for the publications API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PublicationSpec defines the desired state of Publication + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "publisher" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "publisher" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + name: + description: The name of the publication inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Publication parameters part of the `WITH` clause as expected by + PostgreSQL `CREATE PUBLICATION` command + type: object + publicationReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this publication + enum: + - delete + - retain + type: string + target: + description: Target of the publication as expected by PostgreSQL `CREATE + PUBLICATION` command + properties: + allTables: + description: |- + Marks the publication as one that replicates changes for all tables + in the database, including tables created in the future. + Corresponding to `FOR ALL TABLES` in PostgreSQL. + type: boolean + x-kubernetes-validations: + - message: allTables is immutable + rule: self == oldSelf + objects: + description: Just the following schema objects + items: + description: PublicationTargetObject is an object to publish + properties: + table: + description: |- + Specifies a list of tables to add to the publication. Corresponding + to `FOR TABLE` in PostgreSQL. + properties: + columns: + description: The columns to publish + items: + type: string + type: array + name: + description: The table name + type: string + only: + description: Whether to limit to the table only or include + all its descendants + type: boolean + schema: + description: The schema name + type: string + required: + - name + type: object + tablesInSchema: + description: |- + Marks the publication as one that replicates changes for all tables + in the specified list of schemas, including tables created in the + future. Corresponding to `FOR TABLES IN SCHEMA` in PostgreSQL. + type: string + type: object + x-kubernetes-validations: + - message: tablesInSchema and table are mutually exclusive + rule: (has(self.tablesInSchema) && !has(self.table)) || (!has(self.tablesInSchema) + && has(self.table)) + maxItems: 100000 + type: array + x-kubernetes-validations: + - message: specifying a column list when the publication also + publishes tablesInSchema is not supported + rule: '!(self.exists(o, has(o.table) && has(o.table.columns)) + && self.exists(o, has(o.tablesInSchema)))' + type: object + x-kubernetes-validations: + - message: allTables and objects are mutually exclusive + rule: (has(self.allTables) && !has(self.objects)) || (!has(self.allTables) + && has(self.objects)) + required: + - cluster + - dbname + - name + - target + type: object + status: + description: PublicationStatus defines the observed state of Publication + properties: + applied: + description: Applied is true if the publication was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.2 + name: scheduledbackups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ScheduledBackup + listKind: ScheduledBackupList + plural: scheduledbackups + singular: scheduledbackup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .status.lastScheduleTime + name: Last Backup + type: date + name: v1 + schema: + openAPIV3Schema: + description: ScheduledBackup is the Schema for the scheduledbackups API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ScheduledBackup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + backupOwnerReference: + default: none + description: |- + Indicates which ownerReference should be put inside the created backup resources.
+ - none: no owner reference for created backup objects (same behavior as before the field was introduced)
+ - self: sets the Scheduled backup object as owner of the backup
+ - cluster: set the cluster as owner of the backup
+ enum: + - none + - self + - cluster + type: string + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + immediate: + description: If the first backup has to be immediately start after + creation or not + type: boolean + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + schedule: + description: |- + The schedule does not follow the same format used in Kubernetes CronJobs + as it includes an additional seconds specifier, + see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format + type: string + suspend: + description: If this backup is suspended or not + type: boolean + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + - schedule + type: object + status: + description: |- + Most recently observed status of the ScheduledBackup. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + lastCheckTime: + description: The latest time the schedule + format: date-time + type: string + lastScheduleTime: + description: Information when was the last time that backup was successfully + scheduled. + format: date-time + type: string + nextScheduleTime: + description: Next time we will run a backup + format: date-time + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.2 + name: subscriptions.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Subscription + listKind: SubscriptionList + plural: subscriptions + singular: subscription + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Subscription is the Schema for the subscriptions API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SubscriptionSpec defines the desired state of Subscription + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "subscriber" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "subscriber" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + externalClusterName: + description: The name of the external cluster with the publication + ("publisher") + type: string + name: + description: The name of the subscription inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Subscription parameters part of the `WITH` clause as expected by + PostgreSQL `CREATE SUBSCRIPTION` command + type: object + publicationDBName: + description: |- + The name of the database containing the publication on the external + cluster. Defaults to the one in the external cluster definition. + type: string + publicationName: + description: |- + The name of the publication inside the PostgreSQL database in the + "publisher" + type: string + subscriptionReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this subscription + enum: + - delete + - retain + type: string + required: + - cluster + - dbname + - externalClusterName + - name + - publicationName + type: object + status: + description: SubscriptionStatus defines the observed state of Subscription + properties: + applied: + description: Applied is true if the subscription was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cnpg-manager +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps/status + - secrets/status + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - pods + - pods/exec + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - pods/status + verbs: + - get +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - get + - patch +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +- apiGroups: + - monitoring.coreos.com + resources: + - podmonitors + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups + - clusters + - databases + - poolers + - publications + - scheduledbackups + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups/status + - databases/status + - publications/status + - scheduledbackups/status + - subscriptions/status + verbs: + - get + - patch + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusterimagecatalogs + - imagecatalogs + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/finalizers + - poolers/finalizers + verbs: + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/status + - poolers/status + verbs: + - get + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - create + - get + - list + - patch + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cnpg-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cnpg-manager +subjects: +- kind: ServiceAccount + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: v1 +data: + queries: | + backends: + query: | + SELECT sa.datname + , sa.usename + , sa.application_name + , states.state + , COALESCE(sa.count, 0) AS total + , COALESCE(sa.max_tx_secs, 0) AS max_tx_duration_seconds + FROM ( VALUES ('active') + , ('idle') + , ('idle in transaction') + , ('idle in transaction (aborted)') + , ('fastpath function call') + , ('disabled') + ) AS states(state) + LEFT JOIN ( + SELECT datname + , state + , usename + , COALESCE(application_name, '') AS application_name + , COUNT(*) + , COALESCE(EXTRACT (EPOCH FROM (max(now() - xact_start))), 0) AS max_tx_secs + FROM pg_catalog.pg_stat_activity + GROUP BY datname, state, usename, application_name + ) sa ON states.state = sa.state + WHERE sa.usename IS NOT NULL + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - usename: + usage: "LABEL" + description: "Name of the user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - state: + usage: "LABEL" + description: "State of the backend" + - total: + usage: "GAUGE" + description: "Number of backends" + - max_tx_duration_seconds: + usage: "GAUGE" + description: "Maximum duration of a transaction in seconds" + + backends_waiting: + query: | + SELECT count(*) AS total + FROM pg_catalog.pg_locks blocked_locks + JOIN pg_catalog.pg_locks blocking_locks + ON blocking_locks.locktype = blocked_locks.locktype + AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database + AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation + AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page + AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple + AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid + AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid + AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid + AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid + AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid + AND blocking_locks.pid != blocked_locks.pid + JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid + WHERE NOT blocked_locks.granted + metrics: + - total: + usage: "GAUGE" + description: "Total number of backends that are currently waiting on other queries" + + pg_database: + query: | + SELECT datname + , pg_catalog.pg_database_size(datname) AS size_bytes + , pg_catalog.age(datfrozenxid) AS xid_age + , pg_catalog.mxid_age(datminmxid) AS mxid_age + FROM pg_catalog.pg_database + WHERE datallowconn + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - size_bytes: + usage: "GAUGE" + description: "Disk space used by the database" + - xid_age: + usage: "GAUGE" + description: "Number of transactions from the frozen XID to the current one" + - mxid_age: + usage: "GAUGE" + description: "Number of multiple transactions (Multixact) from the frozen XID to the current one" + + pg_postmaster: + query: | + SELECT EXTRACT(EPOCH FROM pg_postmaster_start_time) AS start_time + FROM pg_catalog.pg_postmaster_start_time() + metrics: + - start_time: + usage: "GAUGE" + description: "Time at which postgres started (based on epoch)" + + pg_replication: + query: "SELECT CASE WHEN ( + NOT pg_catalog.pg_is_in_recovery() + OR pg_catalog.pg_last_wal_receive_lsn() = pg_catalog.pg_last_wal_replay_lsn()) + THEN 0 + ELSE GREATEST (0, + EXTRACT(EPOCH FROM (now() - pg_catalog.pg_last_xact_replay_timestamp()))) + END AS lag, + pg_catalog.pg_is_in_recovery() AS in_recovery, + EXISTS (TABLE pg_stat_wal_receiver) AS is_wal_receiver_up, + (SELECT count(*) FROM pg_catalog.pg_stat_replication) AS streaming_replicas" + metrics: + - lag: + usage: "GAUGE" + description: "Replication lag behind primary in seconds" + - in_recovery: + usage: "GAUGE" + description: "Whether the instance is in recovery" + - is_wal_receiver_up: + usage: "GAUGE" + description: "Whether the instance wal_receiver is up" + - streaming_replicas: + usage: "GAUGE" + description: "Number of streaming replicas connected to the instance" + + pg_replication_slots: + query: | + SELECT slot_name, + slot_type, + database, + active, + (CASE pg_catalog.pg_is_in_recovery() + WHEN TRUE THEN pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_last_wal_receive_lsn(), restart_lsn) + ELSE pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), restart_lsn) + END) as pg_wal_lsn_diff + FROM pg_catalog.pg_replication_slots + WHERE NOT temporary + metrics: + - slot_name: + usage: "LABEL" + description: "Name of the replication slot" + - slot_type: + usage: "LABEL" + description: "Type of the replication slot" + - database: + usage: "LABEL" + description: "Name of the database" + - active: + usage: "GAUGE" + description: "Flag indicating whether the slot is active" + - pg_wal_lsn_diff: + usage: "GAUGE" + description: "Replication lag in bytes" + + pg_stat_archiver: + query: | + SELECT archived_count + , failed_count + , COALESCE(EXTRACT(EPOCH FROM (now() - last_archived_time)), -1) AS seconds_since_last_archival + , COALESCE(EXTRACT(EPOCH FROM (now() - last_failed_time)), -1) AS seconds_since_last_failure + , COALESCE(EXTRACT(EPOCH FROM last_archived_time), -1) AS last_archived_time + , COALESCE(EXTRACT(EPOCH FROM last_failed_time), -1) AS last_failed_time + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_archived_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_archived_wal_start_lsn + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_failed_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_failed_wal_start_lsn + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_archiver + metrics: + - archived_count: + usage: "COUNTER" + description: "Number of WAL files that have been successfully archived" + - failed_count: + usage: "COUNTER" + description: "Number of failed attempts for archiving WAL files" + - seconds_since_last_archival: + usage: "GAUGE" + description: "Seconds since the last successful archival operation" + - seconds_since_last_failure: + usage: "GAUGE" + description: "Seconds since the last failed archival operation" + - last_archived_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving succeeded" + - last_failed_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving failed" + - last_archived_wal_start_lsn: + usage: "GAUGE" + description: "Archived WAL start LSN" + - last_failed_wal_start_lsn: + usage: "GAUGE" + description: "Last failed WAL LSN" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_bgwriter: + runonserver: "<17.0.0" + query: | + SELECT checkpoints_timed + , checkpoints_req + , checkpoint_write_time + , checkpoint_sync_time + , buffers_checkpoint + , buffers_clean + , maxwritten_clean + , buffers_backend + , buffers_backend_fsync + , buffers_alloc + FROM pg_catalog.pg_stat_bgwriter + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - checkpoint_write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds" + - checkpoint_sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds" + - buffers_checkpoint: + usage: "COUNTER" + description: "Number of buffers written during checkpoints" + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_backend: + usage: "COUNTER" + description: "Number of buffers written directly by a backend" + - buffers_backend_fsync: + usage: "COUNTER" + description: "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + + pg_stat_bgwriter_17: + runonserver: ">=17.0.0" + name: pg_stat_bgwriter + query: | + SELECT buffers_clean + , maxwritten_clean + , buffers_alloc + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_bgwriter + metrics: + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_checkpointer: + runonserver: ">=17.0.0" + query: | + SELECT num_timed AS checkpoints_timed + , num_requested AS checkpoints_req + , restartpoints_timed + , restartpoints_req + , restartpoints_done + , write_time + , sync_time + , buffers_written + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_checkpointer + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - restartpoints_timed: + usage: "COUNTER" + description: "Number of scheduled restartpoints due to timeout or after a failed attempt to perform it" + - restartpoints_req: + usage: "COUNTER" + description: "Number of requested restartpoints that have been performed" + - restartpoints_done: + usage: "COUNTER" + description: "Number of restartpoints that have been performed" + - write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are written to disk, in milliseconds" + - sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are synchronized to disk, in milliseconds" + - buffers_written: + usage: "COUNTER" + description: "Number of buffers written during checkpoints and restartpoints" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_database: + query: | + SELECT datname + , xact_commit + , xact_rollback + , blks_read + , blks_hit + , tup_returned + , tup_fetched + , tup_inserted + , tup_updated + , tup_deleted + , conflicts + , temp_files + , temp_bytes + , deadlocks + , blk_read_time + , blk_write_time + FROM pg_catalog.pg_stat_database + metrics: + - datname: + usage: "LABEL" + description: "Name of this database" + - xact_commit: + usage: "COUNTER" + description: "Number of transactions in this database that have been committed" + - xact_rollback: + usage: "COUNTER" + description: "Number of transactions in this database that have been rolled back" + - blks_read: + usage: "COUNTER" + description: "Number of disk blocks read in this database" + - blks_hit: + usage: "COUNTER" + description: "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)" + - tup_returned: + usage: "COUNTER" + description: "Number of rows returned by queries in this database" + - tup_fetched: + usage: "COUNTER" + description: "Number of rows fetched by queries in this database" + - tup_inserted: + usage: "COUNTER" + description: "Number of rows inserted by queries in this database" + - tup_updated: + usage: "COUNTER" + description: "Number of rows updated by queries in this database" + - tup_deleted: + usage: "COUNTER" + description: "Number of rows deleted by queries in this database" + - conflicts: + usage: "COUNTER" + description: "Number of queries canceled due to conflicts with recovery in this database" + - temp_files: + usage: "COUNTER" + description: "Number of temporary files created by queries in this database" + - temp_bytes: + usage: "COUNTER" + description: "Total amount of data written to temporary files by queries in this database" + - deadlocks: + usage: "COUNTER" + description: "Number of deadlocks detected in this database" + - blk_read_time: + usage: "COUNTER" + description: "Time spent reading data file blocks by backends in this database, in milliseconds" + - blk_write_time: + usage: "COUNTER" + description: "Time spent writing data file blocks by backends in this database, in milliseconds" + + pg_stat_replication: + primary: true + query: | + SELECT usename + , COALESCE(application_name, '') AS application_name + , COALESCE(client_addr::text, '') AS client_addr + , COALESCE(client_port::text, '') AS client_port + , EXTRACT(EPOCH FROM backend_start) AS backend_start + , COALESCE(pg_catalog.age(backend_xmin), 0) AS backend_xmin_age + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), sent_lsn) AS sent_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), write_lsn) AS write_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), flush_lsn) AS flush_diff_bytes + , COALESCE(pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), replay_lsn),0) AS replay_diff_bytes + , COALESCE((EXTRACT(EPOCH FROM write_lag)),0)::float AS write_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM flush_lag)),0)::float AS flush_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM replay_lag)),0)::float AS replay_lag_seconds + FROM pg_catalog.pg_stat_replication + metrics: + - usename: + usage: "LABEL" + description: "Name of the replication user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - client_addr: + usage: "LABEL" + description: "Client IP address" + - client_port: + usage: "LABEL" + description: "Client TCP port" + - backend_start: + usage: "COUNTER" + description: "Time when this process was started" + - backend_xmin_age: + usage: "COUNTER" + description: "The age of this standby's xmin horizon" + - sent_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location sent on this connection" + - write_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location written to disk by this standby server" + - flush_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location flushed to disk by this standby server" + - replay_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location replayed into the database on this standby server" + - write_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it" + - flush_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it" + - replay_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it" + + pg_settings: + query: | + SELECT name, + CASE setting WHEN 'on' THEN '1' WHEN 'off' THEN '0' ELSE setting END AS setting + FROM pg_catalog.pg_settings + WHERE vartype IN ('integer', 'real', 'bool') + ORDER BY 1 + metrics: + - name: + usage: "LABEL" + description: "Name of the setting" + - setting: + usage: "GAUGE" + description: "Setting value" + + pg_extensions: + query: | + SELECT + current_database() as datname, + name as extname, + default_version, + installed_version, + CASE + WHEN default_version = installed_version THEN 0 + ELSE 1 + END AS update_available + FROM pg_catalog.pg_available_extensions + WHERE installed_version IS NOT NULL + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - extname: + usage: "LABEL" + description: "Extension name" + - default_version: + usage: "LABEL" + description: "Default version" + - installed_version: + usage: "LABEL" + description: "Installed version" + - update_available: + usage: "GAUGE" + description: "An update is available" + target_databases: + - '*' +kind: ConfigMap +metadata: + labels: + cnpg.io/reload: "" + name: cnpg-default-monitoring + namespace: cnpg-system +--- +apiVersion: v1 +kind: Service +metadata: + name: cnpg-webhook-service + namespace: cnpg-system +spec: + ports: + - port: 443 + targetPort: 9443 + selector: + app.kubernetes.io/name: cloudnative-pg +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-controller-manager + namespace: cnpg-system +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: cloudnative-pg + template: + metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + spec: + containers: + - args: + - controller + - --leader-elect + - --max-concurrent-reconciles=10 + - --config-map-name=cnpg-controller-manager-config + - --secret-name=cnpg-controller-manager-config + - --webhook-port=9443 + command: + - /manager + env: + - name: OPERATOR_IMAGE_NAME + value: ghcr.io/cloudnative-pg/cloudnative-pg:1.26.0-rc1 + - name: OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MONITORING_QUERIES_CONFIGMAP + value: cnpg-default-monitoring + image: ghcr.io/cloudnative-pg/cloudnative-pg:1.26.0-rc1 + livenessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + name: manager + ports: + - containerPort: 8080 + name: metrics + protocol: TCP + - containerPort: 9443 + name: webhook-server + protocol: TCP + readinessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + resources: + limits: + cpu: 100m + memory: 200Mi + requests: + cpu: 100m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 10001 + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + startupProbe: + failureThreshold: 6 + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + periodSeconds: 5 + volumeMounts: + - mountPath: /controller + name: scratch-data + - mountPath: /run/secrets/cnpg.io/webhook + name: webhook-certificates + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: cnpg-manager + terminationGracePeriodSeconds: 10 + volumes: + - emptyDir: {} + name: scratch-data + - name: webhook-certificates + secret: + defaultMode: 420 + optional: true + secretName: cnpg-webhook-cert +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: cnpg-mutating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: mbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: mcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-database + failurePolicy: Fail + name: mdatabase.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - databases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: mscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: cnpg-validating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: vbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: vcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-database + failurePolicy: Fail + name: vdatabase.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - databases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-pooler + failurePolicy: Fail + name: vpooler.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - poolers + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: vscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None From e5874bc675a14708006970055010e3b51a455e7e Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Fri, 28 Mar 2025 17:38:36 +0100 Subject: [PATCH 495/836] fix(security): switch to pinDigest to follow OSSF recommendations (#7258) Following the OSSF recommendations we should pin all the dependencies in our workflows https://github.com/ossf/scorecard/blob/026dc41355a4e40c7b64e7413b726c0bce326356/docs/checks.md#pinned-dependencies Closes #7257 Signed-off-by: Jonathan Gonzalez V. --- .github/renovate.json5 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/renovate.json5 b/.github/renovate.json5 index e707b55b1d..31a0e631af 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -411,13 +411,13 @@ 'patch', ], groupName: 'all non-major github action', - pinDigests: false, + pinDigests: true, }, { matchDepTypes: [ 'action', ], - pinDigests: false, + pinDigests: true, }, { groupName: 'kubernetes CSI', From 78b8f6f3a0bb0e644ca7072df810c572fe3042d3 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 1 Apr 2025 09:17:45 +0200 Subject: [PATCH 496/836] chore(deps): pin dependencies (main) (#7259) --- .github/workflows/backport.yml | 12 +- .github/workflows/chatops.yml | 4 +- .github/workflows/close-inactive-issues.yml | 2 +- .github/workflows/codeql-analysis.yml | 10 +- .github/workflows/continuous-delivery.yml | 158 +++++++++--------- .github/workflows/continuous-integration.yml | 100 +++++------ .github/workflows/k8s-versions-check.yml | 12 +- .../latest-postgres-version-check.yml | 8 +- .github/workflows/pr_verify_linked_issue.yml | 2 +- .github/workflows/refresh-licenses.yml | 6 +- .github/workflows/registry-clean.yml | 4 +- .github/workflows/release-pr.yml | 4 +- .github/workflows/release-publish.yml | 50 +++--- .github/workflows/release-tag.yml | 4 +- .github/workflows/snyk.yml | 10 +- .github/workflows/spellcheck.yml | 6 +- .github/workflows/sync-api.yml | 2 +- 17 files changed, 197 insertions(+), 197 deletions(-) diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 666ae4db8b..3ea607af9c 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -24,7 +24,7 @@ jobs: steps: - name: Label the pull request - uses: actions-ecosystem/action-add-labels@v1 + uses: actions-ecosystem/action-add-labels@18f1af5e3544586314bbe15c0273249c770b2daf # v1 if: ${{ !contains(github.event.pull_request.labels.*.name, 'do not backport') }} with: github_token: ${{ secrets.REPO_GHA_PAT }} @@ -36,7 +36,7 @@ jobs: release-1.25 - name: Create comment - uses: peter-evans/create-or-update-comment@v4 + uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4 with: token: ${{ secrets.GITHUB_TOKEN }} issue-number: ${{ github.event.pull_request.number }} @@ -49,7 +49,7 @@ jobs: reactions: heart - name: Remove redundant labels - uses: actions-ecosystem/action-remove-labels@v1 + uses: actions-ecosystem/action-remove-labels@2ce5d41b4b6aa8503e285553f75ed56e0a40bae0 # v1 if: ${{ contains(github.event.pull_request.labels.*.name, 'do not backport') }} with: github_token: ${{ secrets.REPO_GHA_PAT }} @@ -82,14 +82,14 @@ jobs: - name: Checkout code if: contains( github.event.pull_request.labels.*.name, matrix.branch ) - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 with: fetch-depth: 0 ref: ${{ matrix.branch }} token: ${{ secrets.REPO_GHA_PAT }} - name: Install Go - uses: actions/setup-go@v5 + uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true @@ -144,7 +144,7 @@ jobs: runs-on: ubuntu-24.04 steps: - name: create ticket - uses: dacbd/create-issue-action@v2 + uses: dacbd/create-issue-action@cdb57ab6ff8862aa09fee2be6ba77a59581921c2 # v2 with: token: ${{ secrets.GITHUB_TOKEN }} title: Backport failure for pull request ${{ env.PR }} diff --git a/.github/workflows/chatops.yml b/.github/workflows/chatops.yml index eac136d990..4e16d70498 100644 --- a/.github/workflows/chatops.yml +++ b/.github/workflows/chatops.yml @@ -18,11 +18,11 @@ jobs: steps: - name: Check User Permission id: checkUser - uses: actions-cool/check-user-permission@v2 + uses: actions-cool/check-user-permission@7b90a27f92f3961b368376107661682c441f6103 # v2 with: require: 'write' - name: Add "ok to merge :ok_hand:" label to PR - uses: actions-ecosystem/action-add-labels@v1.1.3 + uses: actions-ecosystem/action-add-labels@18f1af5e3544586314bbe15c0273249c770b2daf # v1.1.3 if: steps.checkUser.outputs.require-result == 'true' with: github_token: ${{ secrets.REPO_GHA_PAT }} diff --git a/.github/workflows/close-inactive-issues.yml b/.github/workflows/close-inactive-issues.yml index f24654a23f..ce0a2b4a1d 100644 --- a/.github/workflows/close-inactive-issues.yml +++ b/.github/workflows/close-inactive-issues.yml @@ -12,7 +12,7 @@ jobs: issues: write #pull-requests: write steps: - - uses: actions/stale@v9 + - uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9 with: days-before-issue-stale: 60 days-before-issue-close: 14 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 7e2134c70c..c26474661c 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -43,7 +43,7 @@ jobs: should_skip: ${{ steps.skip_check.outputs.should_skip == 'true' && github.ref != 'refs/heads/main' }} steps: - id: skip_check - uses: fkirc/skip-duplicate-actions@v5.3.1 + uses: fkirc/skip-duplicate-actions@f75f66ce1886f00957d99748a42c724f4330bdcf # v5.3.1 with: concurrent_skipping: 'same_content' skip_after_successful_duplicate: 'true' @@ -64,17 +64,17 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: Install Go - uses: actions/setup-go@v5 + uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v3 + uses: github/codeql-action/init@1b549b9259bda1cb5ddde3b41741a82a2d15a841 # v3 with: languages: "go" build-mode: manual @@ -91,6 +91,6 @@ jobs: make - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v3 + uses: github/codeql-action/analyze@1b549b9259bda1cb5ddde3b41741a82a2d15a841 # v3 with: category: "/language:go" diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index 9f6d940f9d..d8b46eff6d 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -73,7 +73,7 @@ jobs: branch: [release-1.22, release-1.24, release-1.25] steps: - name: Invoke workflow with inputs - uses: benc-uk/workflow-dispatch@v1 + uses: benc-uk/workflow-dispatch@e2e5e9a103e331dad343f381a29e654aea3cf8fc # v1 with: workflow: continuous-delivery ref: ${{ matrix.branch }} @@ -97,7 +97,7 @@ jobs: steps: - name: Check for Command id: command - uses: xt0rted/slash-command-action@v2 + uses: xt0rted/slash-command-action@bf51f8f5f4ea3d58abc7eca58f77104182b23e88 # v2 continue-on-error: false with: command: test @@ -147,11 +147,11 @@ jobs: echo "LOG_LEVEL=${LOG_LEVEL}" >> $GITHUB_ENV - name: Resolve Git reference - uses: xt0rted/pull-request-comment-branch@v3 + uses: xt0rted/pull-request-comment-branch@e8b8daa837e8ea7331c0003c9c316a64c6d8b0b1 # v3 id: refs - name: Create comment - uses: peter-evans/create-or-update-comment@v4 + uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4 with: token: ${{ secrets.GITHUB_TOKEN }} repository: ${{ github.repository }} @@ -266,14 +266,14 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 with: ref: ${{ needs.evaluate_options.outputs.git_ref }} # To identify the commit we need the history and all the tags. fetch-depth: 0 - name: Install Go - uses: actions/setup-go@v5 + uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true @@ -337,7 +337,7 @@ jobs: echo PWD=$(pwd) >> $GITHUB_ENV - name: Run GoReleaser - uses: goreleaser/goreleaser-action@v6 + uses: goreleaser/goreleaser-action@90a3faa9d0182683851fbfa97ca1a2cb983bfca3 # v6 with: distribution: goreleaser version: v2 @@ -348,22 +348,22 @@ jobs: VERSION: ${{ env.VERSION }} - name: Set up QEMU - uses: docker/setup-qemu-action@v3 + uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3 with: platforms: ${{ env.PLATFORMS }} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3 - name: Login into docker registry - uses: docker/login-action@v3 + uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 with: registry: ${{ env.REGISTRY }} username: ${{ env.REGISTRY_USER }} password: ${{ env.REGISTRY_PASSWORD }} - name: Build and push - uses: docker/bake-action@v6 + uses: docker/bake-action@4ba453fbc2db7735392b93edf935aaf9b1e8f747 # v6 id: bake-push env: environment: "testing" @@ -379,7 +379,7 @@ jobs: - name: Install cosign if: env.SIGN_IMAGES == 'true' - uses: sigstore/cosign-installer@v3 + uses: sigstore/cosign-installer@d7d6bc7722e3daa8354c50bcb52f4837da5e9b6a # v3 # See https://github.blog/security/supply-chain-security/safeguard-container-signing-capability-actions/ # and https://github.com/actions/starter-workflows/blob/main/ci/docker-publish.yml for more details on # how to use cosign. @@ -411,7 +411,7 @@ jobs: make generate-manifest - name: Upload the operator manifest as artifact in workflow - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: operator-manifest.yaml path: dist/operator-manifest.yaml @@ -425,7 +425,7 @@ jobs: # NOTE: we only fire this in TEST DEPTH = 4, as that is the level of the # upgrade test name: Build binary for upgrade test - uses: goreleaser/goreleaser-action@v6 + uses: goreleaser/goreleaser-action@90a3faa9d0182683851fbfa97ca1a2cb983bfca3 # v6 if: | always() && !cancelled() && needs.evaluate_options.outputs.test_level == '4' @@ -447,7 +447,7 @@ jobs: # NOTE: we only fire this in TEST DEPTH = 4, as that is the level of the # upgrade test name: Build and push image for upgrade test - uses: docker/build-push-action@v6 + uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4 # v6 if: | always() && !cancelled() && needs.evaluate_options.outputs.test_level == '4' @@ -477,7 +477,7 @@ jobs: steps: - name: Checkout artifact - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 with: repository: cloudnative-pg/artifacts token: ${{ secrets.REPO_GHA_PAT }} @@ -500,7 +500,7 @@ jobs: rm -fr manifests/operator-manifest.yaml - name: Prepare the operator manifest - uses: actions/download-artifact@v4 + uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4 with: name: operator-manifest.yaml path: manifests @@ -519,7 +519,7 @@ jobs: git commit -m "${COMMIT_MESSAGE}" - name: Push changes - uses: ad-m/github-push-action@v0.8.0 + uses: ad-m/github-push-action@d91a481090679876dfc4178fef17f286781251df # v0.8.0 with: github_token: ${{ secrets.REPO_GHA_PAT }} repository: cloudnative-pg/artifacts @@ -560,7 +560,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 with: ref: ${{ needs.evaluate_options.outputs.git_ref }} - @@ -636,26 +636,26 @@ jobs: echo "-----------------------------------------------------" - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 with: ref: ${{ needs.evaluate_options.outputs.git_ref }} - name: Install Go - uses: actions/setup-go@v5 + uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true - ## In case hack/setup-cluster.sh need pull operand image from registry name: Login into docker registry - uses: docker/login-action@v3 + uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 with: registry: ${{ env.REGISTRY }} username: ${{ env.REGISTRY_USER }} password: ${{ env.REGISTRY_PASSWORD }} - name: Install Kind - uses: helm/kind-action@v1.12.0 + uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0 with: install_only: true version: ${{ env.KIND_VERSION }} @@ -713,7 +713,7 @@ jobs: - name: Archive test artifacts if: (always() && !cancelled()) - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: testartifacts-${{ env.MATRIX }} path: testartifacts-${{ env.MATRIX }}/ @@ -739,7 +739,7 @@ jobs: # Archive logs for failed test cases if there are any name: Archive Kind logs if: failure() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: kind-logs-${{ matrix.id }} path: kind-logs/ @@ -747,7 +747,7 @@ jobs: - name: Archive e2e failure contexts if: failure() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: test-failure-contexts-${{ matrix.id }} path: | @@ -758,7 +758,7 @@ jobs: - name: Archive e2e logs if: failure() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: cluster-logs-${{ matrix.id }} path: | @@ -790,12 +790,12 @@ jobs: steps: - name: Azure Login - uses: azure/login@v2.2.0 + uses: azure/login@a65d910e8af852a8061c627c456678983e180302 # v2.2.0 with: creds: ${{ secrets.AZURE_CREDENTIALS }} - name: Create AKS shared resources - uses: nick-fields/retry@v3 + uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3 id: setup with: timeout_minutes: 10 @@ -861,18 +861,18 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 with: ref: ${{ needs.evaluate_options.outputs.git_ref }} - name: Install Go - uses: actions/setup-go@v5 + uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true - name: Prepare the environment - uses: nick-fields/retry@v3 + uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3 with: timeout_seconds: 300 max_attempts: 3 @@ -881,7 +881,7 @@ jobs: sudo apt-get install -y gettext-base - name: Install ginkgo - uses: nick-fields/retry@v3 + uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3 with: timeout_minutes: 1 max_attempts: 3 @@ -890,24 +890,24 @@ jobs: - ## In case hack/setup-cluster.sh need pull operand image from registry name: Login into docker registry - uses: docker/login-action@v3 + uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 with: registry: ${{ env.REGISTRY }} username: ${{ env.REGISTRY_USER }} password: ${{ env.REGISTRY_PASSWORD }} - name: Azure Login - uses: azure/login@v2.2.0 + uses: azure/login@a65d910e8af852a8061c627c456678983e180302 # v2.2.0 with: creds: ${{ secrets.AZURE_CREDENTIALS }} - name: Install kubectl - uses: azure/setup-kubectl@v4 + uses: azure/setup-kubectl@3e0aec4d80787158d308d7b364cb1b702e7feb7f # v4 with: version: v${{ env.K8S_VERSION }} - name: Create AKS cluster - uses: nick-fields/retry@v3 + uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3 with: timeout_minutes: 10 max_attempts: 3 @@ -953,7 +953,7 @@ jobs: # use rook to get the small PV we use in the tests. # It can still take a while to deploy rook. name: Set up Rook - uses: nick-fields/retry@v3 + uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3 with: timeout_minutes: 27 max_attempts: 1 @@ -1031,7 +1031,7 @@ jobs: - name: Archive test artifacts if: (always() && !cancelled()) - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: testartifacts-${{ env.MATRIX }} path: testartifacts-${{ env.MATRIX }}/ @@ -1056,7 +1056,7 @@ jobs: - name: Archive e2e failure contexts if: failure() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: test-failure-contexts-${{ matrix.id }} path: | @@ -1066,7 +1066,7 @@ jobs: - name: Archive e2e logs if: failure() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: cluster-logs-${{ matrix.id }} path: | @@ -1137,13 +1137,13 @@ jobs: - name: Azure Login if: always() - uses: azure/login@v2.2.0 + uses: azure/login@a65d910e8af852a8061c627c456678983e180302 # v2.2.0 with: creds: ${{ secrets.AZURE_CREDENTIALS }} - name: Teardown AKS shared resources if: always() - uses: nick-fields/retry@v3 + uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3 with: timeout_minutes: 5 max_attempts: 3 @@ -1206,26 +1206,26 @@ jobs: echo "CLUSTER_NAME=${{ env.E2E_SUFFIX }}-test-${{ github.run_number }}-$( echo ${{ matrix.id }} | tr -d '_.-' )" >> $GITHUB_ENV - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 with: ref: ${{ needs.evaluate_options.outputs.git_ref }} - name: Install Go - uses: actions/setup-go@v5 + uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true - ## In case hack/setup-cluster.sh need pull operand image from registry name: Login into docker registry - uses: docker/login-action@v3 + uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 with: registry: ${{ env.REGISTRY }} username: ${{ env.REGISTRY_USER }} password: ${{ env.REGISTRY_PASSWORD }} - name: Prepare the environment - uses: nick-fields/retry@v3 + uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3 with: timeout_seconds: 300 max_attempts: 3 @@ -1234,7 +1234,7 @@ jobs: sudo apt-get install -y gettext-base - name: Install ginkgo - uses: nick-fields/retry@v3 + uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3 with: timeout_minutes: 1 max_attempts: 3 @@ -1242,14 +1242,14 @@ jobs: go install github.com/onsi/ginkgo/v2/ginkgo - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@v4 + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4 with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} aws-region: ${{ env.AWS_REGION }} - name: Install eksctl - uses: nick-fields/retry@v3 + uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3 with: timeout_minutes: 1 max_attempts: 3 @@ -1301,7 +1301,7 @@ jobs: kubectl get storageclass - name: Setup Velero - uses: nick-fields/retry@v3 + uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3 env: VELERO_VERSION: "v1.15.2" VELERO_AWS_PLUGIN_VERSION: "v1.11.1" @@ -1400,7 +1400,7 @@ jobs: - name: Archive test artifacts if: (always() && !cancelled()) - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: testartifacts-${{ env.MATRIX }} path: testartifacts-${{ env.MATRIX }}/ @@ -1425,7 +1425,7 @@ jobs: - name: Archive e2e failure contexts if: failure() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: test-failure-contexts-${{ matrix.id }} path: | @@ -1435,7 +1435,7 @@ jobs: - name: Archive e2e logs if: failure() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: cluster-logs-${{ matrix.id }} path: | @@ -1589,26 +1589,26 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 with: ref: ${{ needs.evaluate_options.outputs.git_ref }} - name: Install Go - uses: actions/setup-go@v5 + uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true - ## In case hack/setup-cluster.sh need pull operand image from registry name: Login into docker registry - uses: docker/login-action@v3 + uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 with: registry: ${{ env.REGISTRY }} username: ${{ env.REGISTRY_USER }} password: ${{ env.REGISTRY_PASSWORD }} - name: Prepare the environment - uses: nick-fields/retry@v3 + uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3 with: timeout_seconds: 300 max_attempts: 3 @@ -1617,7 +1617,7 @@ jobs: sudo apt-get install -y gettext-base - name: Install ginkgo - uses: nick-fields/retry@v3 + uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3 with: timeout_seconds: 120 max_attempts: 3 @@ -1635,12 +1635,12 @@ jobs: - name: Authenticate to Google Cloud id: 'auth' - uses: google-github-actions/auth@v2 + uses: google-github-actions/auth@6fc4af4b145ae7821d527454aa9bd537d1f2dc5f # v2 with: credentials_json: '${{ secrets.GCP_SERVICE_ACCOUNT }}' - name: Set up Cloud SDK and kubectl - uses: google-github-actions/setup-gcloud@v2 + uses: google-github-actions/setup-gcloud@6189d56e4096ee891640bb02ac264be376592d6a # v2 with: project_id: ${{ secrets.GCP_PROJECT_ID }} install_components: 'kubectl,gke-gcloud-auth-plugin' @@ -1732,7 +1732,7 @@ jobs: - name: Archive test artifacts if: (always() && !cancelled()) - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: testartifacts-${{ env.MATRIX }} path: testartifacts-${{ env.MATRIX }}/ @@ -1757,7 +1757,7 @@ jobs: - name: Archive e2e failure contexts if: failure() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: test-failure-contexts-${{ matrix.id }} path: | @@ -1767,7 +1767,7 @@ jobs: - name: Archive e2e logs if: failure() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: cluster-logs-${{ matrix.id }} path: | @@ -1899,28 +1899,28 @@ jobs: echo "CLUSTER_NAME=${{ env.E2E_SUFFIX }}-ocp-${{ github.run_number}}-$( echo ${{ matrix.k8s_version }} | tr -d '.' )" >> $GITHUB_ENV - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 with: ref: ${{ needs.evaluate_options.outputs.git_ref }} fetch-depth: 0 - name: Install Go - uses: actions/setup-go@v5 + uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true - name: Set up QEMU - uses: docker/setup-qemu-action@v3 + uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3 with: platforms: ${{ env.PLATFORMS }} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3 - ## In case hack/setup-cluster.sh need pull operand image from registry name: Login into docker registry - uses: docker/login-action@v3 + uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 with: registry: ${{ env.REGISTRY }} username: ${{ env.REGISTRY_USER }} @@ -1936,7 +1936,7 @@ jobs: make olm-catalog - name: Install OC Installer and client - uses: redhat-actions/openshift-tools-installer@v1 + uses: redhat-actions/openshift-tools-installer@144527c7d98999f2652264c048c7a9bd103f8a82 # v1 with: source: "mirror" openshift-install: ${{ matrix.k8s_version }} @@ -2026,7 +2026,7 @@ jobs: - name: Archive test artifacts if: (always() && !cancelled()) - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: testartifacts-${{ env.MATRIX }} path: testartifacts-${{ env.MATRIX }}/ @@ -2051,7 +2051,7 @@ jobs: - name: Archive e2e failure contexts if: failure() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: test-failure-contexts-${{ matrix.id }} path: | @@ -2061,7 +2061,7 @@ jobs: - name: Archive e2e logs if: failure() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: cluster-logs-${{ matrix.id }} path: | @@ -2112,7 +2112,7 @@ jobs: run: mkdir test-artifacts - name: Download all artifacts to the directory - uses: actions/download-artifact@v4 + uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4 with: path: test-artifacts pattern: testartifacts-* @@ -2130,13 +2130,13 @@ jobs: - name: Compute the E2E test summary id: generate-summary - uses: cloudnative-pg/ciclops@v1.3.1 + uses: cloudnative-pg/ciclops@f5a7b357a09f09052ec0358ac49e020f151f1653 # v1.3.1 with: artifact_directory: test-artifacts/data - name: If there is an overflow summary, archive it if: steps.generate-summary.outputs.Overflow - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: ${{ steps.generate-summary.outputs.Overflow }} path: ${{ steps.generate-summary.outputs.Overflow }} @@ -2145,7 +2145,7 @@ jobs: - name: Send the Ciclops view over Slack # Send the Ciclops thermometer on every scheduled run on `main`. # or when there are systematic failures in release branches - uses: rtCamp/action-slack-notify@v2 + uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990 # v2 if: | github.repository_owner == env.REPOSITORY_OWNER && ( @@ -2202,7 +2202,7 @@ jobs: - name: Label the PR as "ok to merge :ok_hand:" if: | env.OK_LABEL == '' - uses: actions-ecosystem/action-add-labels@v1.1.3 + uses: actions-ecosystem/action-add-labels@18f1af5e3544586314bbe15c0273249c770b2daf # v1.1.3 with: github_token: ${{ secrets.REPO_GHA_PAT }} number: ${{ github.event.issue.number }} @@ -2231,7 +2231,7 @@ jobs: - name: Remove "ok to merge :ok_hand:" label from PR if: | env.OK_LABEL != '' - uses: actions-ecosystem/action-remove-labels@v1.3.0 + uses: actions-ecosystem/action-remove-labels@2ce5d41b4b6aa8503e285553f75ed56e0a40bae0 # v1.3.0 with: github_token: ${{ secrets.REPO_GHA_PAT }} number: ${{ github.event.issue.number }} diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 9eef899069..26bab4fd20 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -57,7 +57,7 @@ jobs: steps: - name: Invoke workflow with inputs - uses: benc-uk/workflow-dispatch@v1 + uses: benc-uk/workflow-dispatch@e2e5e9a103e331dad343f381a29e654aea3cf8fc # v1 with: workflow: continuous-integration ref: ${{ matrix.branch }} @@ -73,7 +73,7 @@ jobs: should_skip: ${{ steps.skip_check.outputs.should_skip == 'true' && github.ref != 'refs/heads/main' }} steps: - id: skip_check - uses: fkirc/skip-duplicate-actions@v5.3.1 + uses: fkirc/skip-duplicate-actions@f75f66ce1886f00957d99748a42c724f4330bdcf # v5.3.1 with: concurrent_skipping: 'same_content' skip_after_successful_duplicate: 'true' @@ -97,10 +97,10 @@ jobs: renovate-changed: ${{ steps.filter.outputs.renovate-changed }} steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: Check for changes - uses: dorny/paths-filter@v3.0.2 + uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2 id: filter # Remember to add new folders in the operator-changed filter if needed with: @@ -148,10 +148,10 @@ jobs: runs-on: ubuntu-24.04 steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: Install Go - uses: actions/setup-go@v5 + uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5 with: # Disable setup-go caching. Cache is better handled by the golangci-lint action cache: false @@ -159,7 +159,7 @@ jobs: check-latest: true - name: Run golangci-lint - uses: golangci/golangci-lint-action@v6 + uses: golangci/golangci-lint-action@55c2c1448f86e01eaae002a5a3a9624417608d84 # v6 with: version: ${{ env.GOLANGCI_LINT_VERSION }} @@ -178,7 +178,7 @@ jobs: runs-on: ubuntu-24.04 steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: Validate Renovate JSON run: npx --yes --package renovate -- renovate-config-validator @@ -197,7 +197,7 @@ jobs: runs-on: ubuntu-24.04 steps: - name: Run govulncheck - uses: golang/govulncheck-action@v1 + uses: golang/govulncheck-action@b625fbe08f3bccbe446d94fbf87fcc875a4f50ee # v1 with: go-version-input: ${{ env.GOLANG_VERSION }} check-latest: true @@ -216,10 +216,10 @@ jobs: SHELLCHECK_OPTS: -a -S style steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: Run ShellCheck - uses: ludeeus/action-shellcheck@2.0.0 + uses: ludeeus/action-shellcheck@00cae500b08a931fb5698e11e79bfbd38e612a38 # 2.0.0 generate-unit-tests-jobs: name: Generate jobs for unit tests @@ -239,7 +239,7 @@ jobs: latest_k8s_version: ${{ steps.get-k8s-versions.outputs.latest_k8s_version }} steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: Get k8s versions for unit test id: get-k8s-versions @@ -279,10 +279,10 @@ jobs: k8s-version: ${{ fromJSON(needs.generate-unit-tests-jobs.outputs.k8sMatrix) }} steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: Install Go - uses: actions/setup-go@v5 + uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true @@ -318,10 +318,10 @@ jobs: runs-on: ubuntu-24.04 steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: Install Go - uses: actions/setup-go@v5 + uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true @@ -353,10 +353,10 @@ jobs: runs-on: ubuntu-24.04 steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: Install Go - uses: actions/setup-go@v5 + uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true @@ -420,13 +420,13 @@ jobs: push: ${{ env.PUSH }} steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 with: # To identify the commit we need the history and all the tags. fetch-depth: 0 - name: Install Go - uses: actions/setup-go@v5 + uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true @@ -485,7 +485,7 @@ jobs: echo PWD=$(pwd) >> $GITHUB_ENV - name: Run GoReleaser to build kubectl plugin - uses: goreleaser/goreleaser-action@v6 + uses: goreleaser/goreleaser-action@90a3faa9d0182683851fbfa97ca1a2cb983bfca3 # v6 if: | github.event_name == 'schedule' || ( @@ -505,7 +505,7 @@ jobs: # Send Slack notification if the kubectl plugin build fails. # To avoid message overflow, we only report runs scheduled on main or release branches - name: Slack Notification - uses: rtCamp/action-slack-notify@v2 + uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990 # v2 if: | failure() && github.repository_owner == env.REPOSITORY_OWNER && @@ -525,7 +525,7 @@ jobs: SLACK_MESSAGE: Building kubernetes plugin failed! - name: Run GoReleaser - uses: goreleaser/goreleaser-action@v6 + uses: goreleaser/goreleaser-action@90a3faa9d0182683851fbfa97ca1a2cb983bfca3 # v6 with: distribution: goreleaser version: v2 @@ -537,23 +537,23 @@ jobs: RACE: "true" - name: Set up QEMU - uses: docker/setup-qemu-action@v3 + uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3 with: platforms: ${{ env.PLATFORMS }} cache-image: false - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3 - name: Login into docker registry - uses: docker/login-action@v3 + uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 with: registry: ${{ env.REGISTRY }} username: ${{ env.REGISTRY_USER }} password: ${{ env.REGISTRY_PASSWORD }} - name: Build and push - uses: docker/bake-action@v6 + uses: docker/bake-action@4ba453fbc2db7735392b93edf935aaf9b1e8f747 # v6 id: bake-push env: environment: "testing" @@ -579,7 +579,7 @@ jobs: echo "CATALOG_IMG=${UBI}-catalog" >> $GITHUB_ENV - name: Dockle scan distroless image - uses: erzz/dockle-action@v1 + uses: erzz/dockle-action@69369bc745ee29813f730231a821bcd4f71cd290 # v1 if: env.PUSH == 'true' with: image: ${{ env.CONTROLLER_IMG }} @@ -588,7 +588,7 @@ jobs: accept-keywords: key - name: Dockle scan UBI image - uses: erzz/dockle-action@v1 + uses: erzz/dockle-action@69369bc745ee29813f730231a821bcd4f71cd290 # v1 if: env.PUSH == 'true' env: DOCKLE_IGNORES: CIS-DI-0009 @@ -611,7 +611,7 @@ jobs: args: --severity-threshold=high --file=Dockerfile - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@v3 + uses: github/codeql-action/upload-sarif@1b549b9259bda1cb5ddde3b41741a82a2d15a841 # v3 if: | !github.event.repository.fork && !github.event.pull_request.head.repo.fork @@ -623,7 +623,7 @@ jobs: if: | env.SIGN_IMAGES == 'true' && env.PUSH == 'true' - uses: sigstore/cosign-installer@v3 + uses: sigstore/cosign-installer@d7d6bc7722e3daa8354c50bcb52f4837da5e9b6a # v3 # See https://github.blog/security/supply-chain-security/safeguard-container-signing-capability-actions/ # and https://github.com/actions/starter-workflows/blob/main/ci/docker-publish.yml for more details on # how to use cosign. @@ -653,28 +653,28 @@ jobs: needs.buildx.outputs.push == 'true' steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 with: fetch-depth: 0 ref: ${{ needs.buildx.outputs.commit }} - name: Set up QEMU - uses: docker/setup-qemu-action@v3 + uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3 with: platforms: ${{ env.PLATFORMS }} cache-image: false - name: Install Go - uses: actions/setup-go@v5 + uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3 - name: Login into docker registry - uses: docker/login-action@v3 + uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 with: registry: ${{ env.REGISTRY }} username: ${{ env.REGISTRY_USER }} @@ -689,7 +689,7 @@ jobs: make olm-catalog - name: Archive the bundle manifests - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: bundle path: | @@ -709,10 +709,10 @@ jobs: needs.olm-bundle.result == 'success' steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: Install Go - uses: actions/setup-go@v5 + uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true @@ -722,7 +722,7 @@ jobs: make operator-sdk preflight - name: Loging to container registry - uses: docker/login-action@v3 + uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 with: registry: ${{ env.REGISTRY }} username: ${{ env.REGISTRY_USER }} @@ -737,7 +737,7 @@ jobs: --docker-config $HOME/.docker/config.json - name: Archive the preflight results - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: preflight_results path: | @@ -766,32 +766,32 @@ jobs: github.repository_owner == 'cloudnative-pg' steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: Setting up KinD cluster - uses: helm/kind-action@v1.12.0 + uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0 with: wait: "600s" version: ${{ env.KIND_VERSION }} - name: Set up QEMU - uses: docker/setup-qemu-action@v3 + uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3 with: platforms: ${{ env.PLATFORMS }} cache-image: false - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3 - name: Login into docker registry - uses: docker/login-action@v3 + uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 with: registry: ${{ env.REGISTRY }} username: ${{ env.REGISTRY_USER }} password: ${{ env.REGISTRY_PASSWORD }} - name: Install Go - uses: actions/setup-go@v5 + uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true @@ -824,20 +824,20 @@ jobs: OPP_RELEASE_INDEX_NAME: "catalog_tmp" steps: - name: Checkout community-operators - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 with: repository: k8s-operatorhub/community-operators persist-credentials: false - name: Login into docker registry - uses: redhat-actions/podman-login@v1 + uses: redhat-actions/podman-login@4934294ad0449894bcd1e9f191899d7292469603 # v1 with: registry: ${{ env.REGISTRY }} username: ${{ env.REGISTRY_USER }} password: ${{ env.REGISTRY_PASSWORD }} - name: Download the bundle - uses: actions/download-artifact@v4 + uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4 with: name: bundle diff --git a/.github/workflows/k8s-versions-check.yml b/.github/workflows/k8s-versions-check.yml index 43da7705b6..47b5c0438d 100644 --- a/.github/workflows/k8s-versions-check.yml +++ b/.github/workflows/k8s-versions-check.yml @@ -37,7 +37,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - # There is no command to get EKS k8s versions, we have to parse the documentation name: Get updated EKS versions @@ -49,7 +49,7 @@ jobs: if: github.event.inputs.limit == null || github.event.inputs.limit == 'eks' - name: Azure Login - uses: azure/login@v2.2.0 + uses: azure/login@a65d910e8af852a8061c627c456678983e180302 # v2.2.0 with: creds: ${{ secrets.AZURE_CREDENTIALS }} if: github.event.inputs.limit == null || github.event.inputs.limit == 'aks' @@ -64,19 +64,19 @@ jobs: if: github.event.inputs.limit == null || github.event.inputs.limit == 'aks' - name: 'Auth GKE' - uses: 'google-github-actions/auth@v2' + uses: 'google-github-actions/auth@6fc4af4b145ae7821d527454aa9bd537d1f2dc5f' # v2 with: credentials_json: '${{ secrets.GCP_SERVICE_ACCOUNT }}' if: github.event.inputs.limit == null || github.event.inputs.limit == 'gke' - name: Set up Cloud SDK for GKE - uses: google-github-actions/setup-gcloud@v2 + uses: google-github-actions/setup-gcloud@6189d56e4096ee891640bb02ac264be376592d6a # v2 with: project_id: ${{ secrets.GCP_PROJECT_ID }} if: github.event.inputs.limit == null || github.event.inputs.limit == 'gke' - name: Install YQ - uses: frenck/action-setup-yq@v1 + uses: frenck/action-setup-yq@c4b5be8b4a215c536a41d436757d9feb92836d4f # v1 if: github.event.inputs.limit == null || github.event.inputs.limit == 'gke' - name: Get updated GKE versions @@ -123,7 +123,7 @@ jobs: if: github.event.inputs.limit == null || github.event.inputs.limit == 'ocp' - name: Create Pull Request if versions have been updated - uses: peter-evans/create-pull-request@v7 + uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7 with: token: ${{ secrets.REPO_GHA_PAT }} title: "feat: Public Cloud K8S versions update" diff --git a/.github/workflows/latest-postgres-version-check.yml b/.github/workflows/latest-postgres-version-check.yml index 395bf9aaa4..bc9f639252 100644 --- a/.github/workflows/latest-postgres-version-check.yml +++ b/.github/workflows/latest-postgres-version-check.yml @@ -17,10 +17,10 @@ jobs: runs-on: ubuntu-24.04 steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: Set up Python 3.9 - uses: actions/setup-python@v5 + uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5 with: python-version: 3.9 @@ -67,7 +67,7 @@ jobs: - name: Create PR to update PostgreSQL version if: env.LATEST_POSTGRES_VERSION_IMAGE != env.CURRENT_POSTGRES_VERSION_IMAGE - uses: peter-evans/create-pull-request@v7 + uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7 env: GITHUB_TOKEN: ${{ secrets.REPO_GHA_PAT }} with: @@ -80,7 +80,7 @@ jobs: - name: Create Pull Request if postgresql versions have been updated if: env.LATEST_POSTGRES_VERSION_IMAGE == env.CURRENT_POSTGRES_VERSION_IMAGE - uses: peter-evans/create-pull-request@v7 + uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7 env: GITHUB_TOKEN: ${{ secrets.REPO_GHA_PAT }} with: diff --git a/.github/workflows/pr_verify_linked_issue.yml b/.github/workflows/pr_verify_linked_issue.yml index 6cc10ece1c..fb724912dd 100644 --- a/.github/workflows/pr_verify_linked_issue.yml +++ b/.github/workflows/pr_verify_linked_issue.yml @@ -22,7 +22,7 @@ jobs: if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-issue') }} steps: - name: Verify Linked Issue - uses: hattan/verify-linked-issue-action@v1.1.5 + uses: hattan/verify-linked-issue-action@2d8e2e47a462cc7b07ba5e6cab6f9d57bd36672e # v1.1.5 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/refresh-licenses.yml b/.github/workflows/refresh-licenses.yml index 4194df5fcc..8979611ec4 100644 --- a/.github/workflows/refresh-licenses.yml +++ b/.github/workflows/refresh-licenses.yml @@ -15,10 +15,10 @@ jobs: runs-on: ubuntu-24.04 steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: Install Go - uses: actions/setup-go@v5 + uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true @@ -28,7 +28,7 @@ jobs: make licenses - name: Create Pull Request if licenses have been updated - uses: peter-evans/create-pull-request@v7 + uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7 with: token: ${{ secrets.REPO_GHA_PAT }} title: "chore: refresh licenses directory" diff --git a/.github/workflows/registry-clean.yml b/.github/workflows/registry-clean.yml index 4b9d931e44..028f95b3b3 100644 --- a/.github/workflows/registry-clean.yml +++ b/.github/workflows/registry-clean.yml @@ -17,7 +17,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Delete '-testing' operator images in ${{ env.IMAGE_NAME }} - uses: snok/container-retention-policy@v3.0.0 + uses: snok/container-retention-policy@4f22ef80902ad409ed55a99dc5133cc1250a0d03 # v3.0.0 with: image-names: ${{ env.IMAGE_NAME }} cut-off: 5d @@ -26,7 +26,7 @@ jobs: token: ${{ secrets.GITHUB_TOKEN }} - name: Delete '-testing' operand images - uses: snok/container-retention-policy@v3.0.0 + uses: snok/container-retention-policy@4f22ef80902ad409ed55a99dc5133cc1250a0d03 # v3.0.0 if: ${{ github.repository_owner == 'cloudnative-pg' }} with: image-names: ${{ env.CONTAINER_IMAGE_NAMES }} diff --git a/.github/workflows/release-pr.yml b/.github/workflows/release-pr.yml index 29fbc22605..10c53d1171 100644 --- a/.github/workflows/release-pr.yml +++ b/.github/workflows/release-pr.yml @@ -13,7 +13,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: Get tag run: | @@ -28,7 +28,7 @@ jobs: - name: Pull Request id: open-pr - uses: repo-sync/pull-request@v2.12 + uses: repo-sync/pull-request@572331753c3787dee4a6c0b6719c889af9646b81 # v2.12 with: destination_branch: ${{ env.DEST }} github_token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml index 2222cadc9d..7d994a5a1d 100644 --- a/.github/workflows/release-publish.yml +++ b/.github/workflows/release-publish.yml @@ -27,7 +27,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 with: # To identify the commit we need the history and all the tags. fetch-depth: 0 @@ -56,7 +56,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: Get tag run: | @@ -75,7 +75,7 @@ jobs: /src/docs/src/${{ env.FILE }} - name: Release - uses: softprops/action-gh-release@v2 + uses: softprops/action-gh-release@c95fe1489396fe8a9eb87c0abf8aa5b2ef267fda # v2 with: body_path: release_notes.md draft: false @@ -100,13 +100,13 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 with: # To identify the commit we need the history and all the tags. fetch-depth: 0 - name: Install Go - uses: actions/setup-go@v5 + uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true @@ -136,7 +136,7 @@ jobs: - name: Import GPG key id: import_gpg - uses: crazy-max/ghaction-import-gpg@v6 + uses: crazy-max/ghaction-import-gpg@cb9bde2e2525e640591a934b1fd28eef1dcaf5e5 # v6 with: gpg_private_key: ${{ secrets.GPG_PRIVATE_KEY }} passphrase: ${{ secrets.GPG_PASSPHRASE }} @@ -150,7 +150,7 @@ jobs: echo "$GPG_PRIVATE_KEY" > gpg_signing_key.asc - name: Run GoReleaser - uses: goreleaser/goreleaser-action@v6 + uses: goreleaser/goreleaser-action@90a3faa9d0182683851fbfa97ca1a2cb983bfca3 # v6 with: distribution: goreleaser version: v2 @@ -167,7 +167,7 @@ jobs: if: | needs.check-version.outputs.is_latest == 'true' && needs.check-version.outputs.is_stable == 'true' - uses: rajatjindal/krew-release-bot@v0.0.47 + uses: rajatjindal/krew-release-bot@3d9faef30a82761d610544f62afddca00993eef9 # v0.0.47 with: krew_template_file: dist/krew/cnpg.yaml - @@ -179,22 +179,22 @@ jobs: echo "PLATFORMS=${platforms}" >> $GITHUB_ENV - name: Set up QEMU - uses: docker/setup-qemu-action@v3 + uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3 with: platforms: ${{ env.PLATFORMS }} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3 - name: Login to ghcr.io - uses: docker/login-action@v3 + uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 with: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Build and push - uses: docker/bake-action@v6 + uses: docker/bake-action@4ba453fbc2db7735392b93edf935aaf9b1e8f747 # v6 id: bake-push env: environment: "production" @@ -210,7 +210,7 @@ jobs: targets: "default" - name: Install cosign - uses: sigstore/cosign-installer@v3 + uses: sigstore/cosign-installer@d7d6bc7722e3daa8354c50bcb52f4837da5e9b6a # v3 # See https://github.blog/security/supply-chain-security/safeguard-container-signing-capability-actions/ # and https://github.com/actions/starter-workflows/blob/main/ci/docker-publish.yml for more details on # how to use cosign. @@ -234,26 +234,26 @@ jobs: needs.check-version.outputs.is_stable == 'true' steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 with: fetch-depth: 0 - name: Set up QEMU - uses: docker/setup-qemu-action@v3 + uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3 with: platforms: ${{ needs.release-binaries.outputs.platforms }} - name: Install Go - uses: actions/setup-go@v5 + uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3 - name: Login to ghcr.io - uses: docker/login-action@v3 + uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 with: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} @@ -276,7 +276,7 @@ jobs: make olm-catalog - name: Archive the bundle manifests - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: bundle path: | @@ -297,14 +297,14 @@ jobs: VERSION: ${{ needs.release-binaries.outputs.version }} steps: - name: Checkout community-operators - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 with: repository: k8s-operatorhub/community-operators fetch-depth: 0 persist-credentials: false - name: Download the bundle - uses: actions/download-artifact@v4 + uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4 with: name: bundle @@ -315,7 +315,7 @@ jobs: rm -fr cloudnative-pg-catalog.yaml bundle.Dockerfile *.zip bundle/ - name: Create Remote Pull Request - uses: peter-evans/create-pull-request@v7 + uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7 with: token: ${{ secrets.REPO_GHA_PAT }} commit-message: "operator cloudnative-pg (${{ env.VERSION }})" @@ -374,7 +374,7 @@ jobs: steps: - name: Checkout artifact - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 with: repository: cloudnative-pg/artifacts token: ${{ secrets.REPO_GHA_PAT }} @@ -387,7 +387,7 @@ jobs: git config user.name "${{ needs.release-binaries.outputs.author_name }}" - name: Download the bundle - uses: actions/download-artifact@v4 + uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4 with: name: bundle - @@ -409,7 +409,7 @@ jobs: git commit -sm "${COMMIT_MESSAGE}" - name: Push commit - uses: ad-m/github-push-action@v0.8.0 + uses: ad-m/github-push-action@d91a481090679876dfc4178fef17f286781251df # v0.8.0 with: github_token: ${{ secrets.REPO_GHA_PAT }} repository: cloudnative-pg/artifacts diff --git a/.github/workflows/release-tag.yml b/.github/workflows/release-tag.yml index 3708028ec6..a22254936a 100644 --- a/.github/workflows/release-tag.yml +++ b/.github/workflows/release-tag.yml @@ -17,11 +17,11 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: Create tag if: github.event.pull_request.merged == true && startsWith(github.head_ref, 'release/v') - uses: christophebedard/tag-version-commit@v1.7.0 + uses: christophebedard/tag-version-commit@57ffb155fc61c8ab098fcfa273469b532c1d4ce7 # v1.7.0 with: token: ${{ secrets.REPO_GHA_PAT }} version_regex: '^Version tag to ([0-9]+\.[0-9]+\.[0-9]+(?:-[a-z][0-9a-z]*)?)' diff --git a/.github/workflows/snyk.yml b/.github/workflows/snyk.yml index 9fdb83fe0b..d12761a60a 100644 --- a/.github/workflows/snyk.yml +++ b/.github/workflows/snyk.yml @@ -14,10 +14,10 @@ jobs: runs-on: ubuntu-24.04 steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: Static Code Analysis - uses: snyk/actions/golang@0.4.0 + uses: snyk/actions/golang@b98d498629f1c368650224d6d212bf7dfa89e4bf # 0.4.0 continue-on-error: true env: SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} @@ -26,12 +26,12 @@ jobs: args: --sarif-file-output=snyk-static.sarif - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@v3 + uses: github/codeql-action/upload-sarif@1b549b9259bda1cb5ddde3b41741a82a2d15a841 # v3 with: sarif_file: snyk-static.sarif - name: Vulnerability scan - uses: snyk/actions/golang@0.4.0 + uses: snyk/actions/golang@b98d498629f1c368650224d6d212bf7dfa89e4bf # 0.4.0 continue-on-error: true env: SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} @@ -39,6 +39,6 @@ jobs: args: --sarif-file-output=snyk-test.sarif - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@v3 + uses: github/codeql-action/upload-sarif@1b549b9259bda1cb5ddde3b41741a82a2d15a841 # v3 with: sarif_file: snyk-test.sarif diff --git a/.github/workflows/spellcheck.yml b/.github/workflows/spellcheck.yml index 27e4d031f8..75a19a5596 100644 --- a/.github/workflows/spellcheck.yml +++ b/.github/workflows/spellcheck.yml @@ -11,10 +11,10 @@ jobs: runs-on: ubuntu-24.04 steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: woke - uses: get-woke/woke-action@v0 + uses: get-woke/woke-action@b2ec032c4a2c912142b38a6a453ad62017813ed0 # v0 with: # Cause the check to fail on any broke rules fail-on-error: true @@ -25,7 +25,7 @@ jobs: runs-on: ubuntu-24.04 steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: Spellcheck uses: rojopolis/spellcheck-github-actions@0.47.0 diff --git a/.github/workflows/sync-api.yml b/.github/workflows/sync-api.yml index 8160eb257e..c592679497 100644 --- a/.github/workflows/sync-api.yml +++ b/.github/workflows/sync-api.yml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Invoke repository dispatch - uses: peter-evans/repository-dispatch@v3 + uses: peter-evans/repository-dispatch@ff45666b9427631e3450c54a1bcbee4d9ff4d7c0 # v3 with: token: ${{ secrets.REPO_GHA_PAT }} repository: cloudnative-pg/api From 2e017908c40a4c98f8522903b0ab684a369cfb08 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Tue, 1 Apr 2025 09:33:48 +0200 Subject: [PATCH 497/836] feat(security): add OSSF Scorecard action (#7251) Adding the OSSF Scorecard action to add the report and badge Signed-off-by: Jonathan Gonzalez V. --- .github/workflows/ossf_scorecard.yml | 79 ++++++++++++++++++++++++++++ README.md | 3 ++ 2 files changed, 82 insertions(+) create mode 100644 .github/workflows/ossf_scorecard.yml diff --git a/.github/workflows/ossf_scorecard.yml b/.github/workflows/ossf_scorecard.yml new file mode 100644 index 0000000000..6b385da77d --- /dev/null +++ b/.github/workflows/ossf_scorecard.yml @@ -0,0 +1,79 @@ +# This workflow uses actions that are not certified by GitHub. They are provided +# by a third-party and are governed by separate terms of service, privacy +# policy, and support documentation. + +name: Scorecard supply-chain security +on: + workflow_dispatch: + # For Branch-Protection check. Only the default branch is supported. See + # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection + branch_protection_rule: + # To guarantee Maintained check is occasionally updated. See + # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained + schedule: + - cron: '17 9 * * 1' + push: + branches: [ "main" ] + +# Declare default permissions as read only. +permissions: read-all + +jobs: + analysis: + name: Scorecard analysis + runs-on: ubuntu-latest + # `publish_results: true` only works when run from the default branch. conditional can be removed if disabled. + if: github.event.repository.default_branch == github.ref_name || github.event_name == 'pull_request' + permissions: + # Needed to upload the results to code-scanning dashboard. + security-events: write + # Needed to publish results and get a badge (see publish_results below). + id-token: write + # Uncomment the permissions below if installing in a private repository. + # contents: read + # actions: read + + steps: + - name: "Checkout code" + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + persist-credentials: false + + - name: "Run analysis" + uses: ossf/scorecard-action@f49aabe0b5af0936a0987cfb85d86b75731b0186 # v2.4.1 + with: + results_file: results.sarif + results_format: sarif + # (Optional) "write" PAT token. Uncomment the `repo_token` line below if: + # - you want to enable the Branch-Protection check on a *public* repository, or + # - you are installing Scorecard on a *private* repository + # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action?tab=readme-ov-file#authentication-with-fine-grained-pat-optional. + # repo_token: ${{ secrets.SCORECARD_TOKEN }} + + # Public repositories: + # - Publish results to OpenSSF REST API for easy access by consumers + # - Allows the repository to include the Scorecard badge. + # - See https://github.com/ossf/scorecard-action#publishing-results. + # For private repositories: + # - `publish_results` will always be set to `false`, regardless + # of the value entered here. + publish_results: true + + # (Optional) Uncomment file_mode if you have a .gitattributes with files marked export-ignore + # file_mode: git + + # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF + # format to the repository Actions tab. + - name: "Upload artifact" + uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4.6.1 + with: + name: SARIF file + path: results.sarif + retention-days: 5 + + # Upload the results to GitHub's code scanning dashboard (optional). + # Commenting out will disable upload of results to your repo's Code Scanning dashboard + - name: "Upload to code-scanning" + uses: github/codeql-action/upload-sarif@v3 + with: + sarif_file: results.sarif diff --git a/README.md b/README.md index 0fb9e7a64a..00d090b9c9 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,7 @@ [![Latest Release](https://img.shields.io/github/v/release/cloudnative-pg/cloudnative-pg.svg)][latest-release] [![GitHub License](https://img.shields.io/github/license/cloudnative-pg/cloudnative-pg)][license] [![OpenSSF Best Practices](https://www.bestpractices.dev/projects/9933/badge)][openssf] +[![OpenSSF Scorecard Badge][openssf-scorecard-badge]][openssf-socrecard-view] [![Documentation][documentation-badge]][documentation] [![Stack Overflow](https://img.shields.io/badge/stackoverflow-cloudnative--pg-blue?logo=stackoverflow&logoColor=%23F48024&link=https%3A%2F%2Fstackoverflow.com%2Fquestions%2Ftagged%2Fcloudnative-pg)][stackoverflow] [![FOSSA Status][fossa-badge]][fossa] @@ -174,6 +175,8 @@ of Canada, and used with their permission. [documentation]: https://cloudnative-pg.io/documentation/current/ [license]: https://github.com/cloudnative-pg/cloudnative-pg?tab=Apache-2.0-1-ov-file#readme [openssf]: https://www.bestpractices.dev/projects/9933 +[openssf-scorecard-badge]: https://api.scorecard.dev/projects/github.com/cloudnative-pg/cloudnative-pg/badge +[openssf-socrecard-view]: https://scorecard.dev/viewer/?uri=github.com/cloudnative-pg/cloudnative-pg [documentation-badge]: https://img.shields.io/badge/Documentation-white?logo=data%3Aimage%2Fpng%3Bbase64%2CiVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAGN0lEQVR4nJRXXWwcVxU%2B8%2F%2BzP%2BPZtR2v7dqy07jUJUALNaiK6lZyUVVKWgGKaIv8QCMekBAVQlQICcEzVZFQVYFKQhASEBHlISJPCRJEshTFChgrIYHEiYMh69jetffHM7Mzc%2B9Bs7vjnTs7yZpZWbt37s%2F5zne%2Bc861CD0eXRkbHc3NfjeffvxNAGEAgULD2756v35%2B3qe1Nc4fnQVEXlA2LnOcXlCF8S%2B6vvVgq%2FL3M65X3e51PvfQCU4WJgZe%2B8GQ8fS7AKgjBB8KEHwjDXZSjkf0CREAaXM2eI9c65siqWxWl360Xl74ANHz%2Fy8AitxnTBfmz%2BhyYS4wGhwObQCIHSA0AigOMBzvOsXzd4pnjyL6NMmWEH8hi2b28Og3%2FqRJA0ewfQy0v1vGO2NovwPo%2FEU%2FwVgSU1PI%2BSu79v3lJAB8HM%2BTI%2FO%2FUUXzM4xHIe0xI4DdRqOAwnF%2F38ePPyzaDIDh%2FMxcWh462m08aojuGY97C0nrAEHg9BlF0fmeAPr0J15vbaKsp0BZQzEDEAlP9B209UIIVXUta%2FQEQHwxgxFjTc%2BRskAwrgVWmHtg22vMPJwLDqGUNJIAMHVAkGu3WdpZz6NAkgSXpINSycluV28er1a3rJ4M3F2%2F9AtCvXKycRrTQttrjINjxxxIL9jevxdaDHU%2FTBr6pL5ruzuLZubgUQBOY2hPij3GBUe7tBCMBRE2KrXVSz0BBI%2FtPVgtV%2F%2FxkZ5WSjI%2F%2BFIXC3sHJwgT4yFqrZFFTSlVrp3sGYLwcfxSmXCbS00j2Ms4K7qkOsFx6qdTuiHtG4AimfmM8NyvOvR2G48qXtZ2fsfrN7%2BqpcRyUp0glKiimDm4TwAcHBp%2B9WeA4ki0GMWNR9OVF8BZvn7xtI%2FF09H8jzLEgz6yLwCDuelnFXHkTZZOytCOEdqDOtGwsm%2BNj00fXt%2B6%2Bj4vcA7bwNrZwENmXwAKuZnvsNRThs5ozMPfPiHyoDF7xiduHcXb70A8dRFheHjiySQATBZk0nl9MHPkBEWUoEtYjyrPFNwGzfdlD37Zdu98KCv%2BMmD2BYpUCvcST39e0%2BS1Wr249FAAg7mPzWrS5NstEbE0xrsiA6QN1PfRFLnhr%2BspxVJTlY8Mw1DqNXeyCQFREEXz9cHB0QOev73QaNhOF4B%2B45PHFHFgDhJTqjuubJFqX1KQco7NTTuW8kq95k2G4eLEGzM7lfItnjNeTKcOfV%2FT8hOuV77A9IK0XjgMpCO0ZiuV3L%2F6njCFAOmucGB3OII5XgCXEJTDdZLElVbu3Vz0fWexvL30k0B6ggBACOmIUBAEUKX0dDTvW7RCYcdZPq6n%2FSsQnUO2RuyBRgQ9Rc5mMvJ6CNIj1nXfd9qWAsCkaZzJAk1L8UjVqY737dSjfCGrPHWqXL32Q0mB%2F2BXnke00WaEYv2aTzAbnuV5pcWkDGAAGJmhSafh6hjr%2BW2SVYHrP7bb%2BOdPW%2FUgflGlTM2gaK%2Ft7tp6%2BN6yixdN89DcIwGktIFPABfNbwoQqQWEUnDJzg1g0jDeK5p7Kp7nensXFI7uyAr%2FLyM7fYLnpa6LYScE8vDnot5hrKlslm%2BfE3nVxJgO4o3KcYu%2FF8XM8yFQ27n%2F65Te%2FzKl3Jhpjj6TCIDneRD5%2FItxr1vdkALw7p1qfeWPpjHxMtsXaPxu6FLc%2BrnbSB1r7fcrlr36nqwMzQfnplJDryQCGOh%2FbLjhcM%2FEvQ4Pdund9xRV5m1LfTXaF%2BK9gsLGB9nsgddcz8thM%2FarPzYM8%2FFazf9sMFaU%2Fi%2FwvNANwEhPvUGR8ozn7d%2BiDKXixtKpbHp81nV9E7puRy31ixKUbOe%2Fv3Ud891ghhDrL5Z975eaOvV%2BCNRp0Gfz%2BcJjDABdTwlpdfKbId0t5XYAcHz5D5ZVtWUp9%2Flog2L7PgVJqZx0HOE5Cqghemv1%2Bt%2FeGBmZ%2BdB2yNN72UEpnzXG32YADA186i3bIpPxMhuKrFK%2Fd77JUnbkKbYvRJlC8DzKSZK76Lq1he2dKy%2BZuSfesSz5a2xHDbLJ%2BJaqdv5H4EUY%2BzbG2m9HgN7mg81bfw4W1uu7AjvHaqDhqF%2FZ3Fq5XFy%2FcESSDsx5fvZ7wLEsNfXk%2BjlVHfpSCOB%2FAQAA%2F%2F8zd8orZc2N9AAAAABJRU5ErkJggg%3D%3D [fossa-badge]: https://app.fossa.com/api/projects/git%2Bgithub.com%2Fcloudnative-pg%2Fcloudnative-pg.svg?type=small [fossa]: https://app.fossa.com/projects/git%2Bgithub.com%2Fcloudnative-pg%2Fcloudnative-pg?ref=badge_small From f3b6e7a7af564c62c3cbae2632b3c689d15ed267 Mon Sep 17 00:00:00 2001 From: Jaime Silvela Date: Wed, 2 Apr 2025 10:42:35 +0200 Subject: [PATCH 498/836] test(major_upgrade): refetch cluster while executing image update (#7267) Signed-off-by: Jaime Silvela Signed-off-by: Armando Ruocco Co-authored-by: Armando Ruocco --- tests/e2e/cluster_major_upgrade_test.go | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/tests/e2e/cluster_major_upgrade_test.go b/tests/e2e/cluster_major_upgrade_test.go index 2386e40ad1..86704741ef 100644 --- a/tests/e2e/cluster_major_upgrade_test.go +++ b/tests/e2e/cluster_major_upgrade_test.go @@ -313,13 +313,19 @@ var _ = Describe("Postgres Major Upgrade", Label(tests.LabelPostgresMajorUpgrade Expect(oldStdOut).To(ContainSubstring(strconv.Itoa(scenario.startingMajor))) By("Updating the major") - cluster, err = clusterutils.Get(env.Ctx, env.Client, cluster.Namespace, cluster.Name) - Expect(err).ToNot(HaveOccurred()) - cluster.Spec.ImageName = scenario.targetImage - // We wrap this in an Eventually to avoid possible failures if the cluster changes Eventually(func() error { + cluster, err = clusterutils.Get(env.Ctx, env.Client, cluster.Namespace, cluster.Name) + if err != nil { + return err + } + cluster.Spec.ImageName = scenario.targetImage return env.Client.Update(env.Ctx, cluster) - }).WithTimeout(1 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + }).WithTimeout(1*time.Minute).WithPolling(10*time.Second).Should( + Succeed(), + "Failed to update cluster image from %s to %s", + cluster.Spec.ImageName, + scenario.targetImage, + ) By("Waiting for the cluster to be in the major upgrade phase") Eventually(func(g Gomega) { From e5ef2fdaccd2c8fbdbf319f1224a0f90bbcfed30 Mon Sep 17 00:00:00 2001 From: smiyc <36233521+smiyc@users.noreply.github.com> Date: Wed, 2 Apr 2025 15:25:18 +0200 Subject: [PATCH 499/836] adding 1.26.0-rc1 as a version to the bug template (#7280) Fixes #7277 Signed-off-by: Daniel Chambre --- .github/ISSUE_TEMPLATE/bug.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml index 354cf7cb17..e200839b06 100644 --- a/.github/ISSUE_TEMPLATE/bug.yml +++ b/.github/ISSUE_TEMPLATE/bug.yml @@ -48,6 +48,7 @@ body: label: Version description: What is the version of CloudNativePG you are running? options: + - "1.26.0-rc1" - "1.25 (latest patch)" - "1.24 (latest patch)" - "trunk (main)" From d7138e7ce7e15fd8baed26175318055480c7148f Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Wed, 9 Apr 2025 09:59:39 +0200 Subject: [PATCH 500/836] fix: add missing pinDigest into renovate for spellcheck group (#7311) Closes #7257 Signed-off-by: Jonathan Gonzalez V. --- .github/renovate.json5 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/renovate.json5 b/.github/renovate.json5 index 31a0e631af..16987dc737 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -449,7 +449,7 @@ { groupName: 'spellcheck', separateMajorMinor: false, - pinDigests: false, + pinDigests: true, matchPackageNames: [ 'jonasbn/github-action-spellcheck{/,}**', 'rojopolis/spellcheck-github-actions{/,}**', From d7ee079b0dba3c32d29b6ee055629210b43459ea Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 9 Apr 2025 10:41:53 +0200 Subject: [PATCH 501/836] chore(deps): update spellcheck to v0.48.0 (main) (#7305) --- .github/workflows/spellcheck.yml | 2 +- Makefile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/spellcheck.yml b/.github/workflows/spellcheck.yml index 75a19a5596..8693cf2439 100644 --- a/.github/workflows/spellcheck.yml +++ b/.github/workflows/spellcheck.yml @@ -28,4 +28,4 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: Spellcheck - uses: rojopolis/spellcheck-github-actions@0.47.0 + uses: rojopolis/spellcheck-github-actions@23dc186319866e1de224f94fe1d31b72797aeec7 # 0.48.0 diff --git a/Makefile b/Makefile index 133599d1e1..e572a80da5 100644 --- a/Makefile +++ b/Makefile @@ -48,7 +48,7 @@ KUSTOMIZE_VERSION ?= v5.6.0 CONTROLLER_TOOLS_VERSION ?= v0.17.2 GENREF_VERSION ?= 015aaac611407c4fe591bc8700d2c67b7521efca GORELEASER_VERSION ?= v2.8.1 -SPELLCHECK_VERSION ?= 0.47.0 +SPELLCHECK_VERSION ?= 0.48.0 WOKE_VERSION ?= 0.19.0 OPERATOR_SDK_VERSION ?= v1.39.2 OPM_VERSION ?= v1.51.0 From 79ed22eac46d9a82ee7cf3e4e281af05a8f7ac90 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 9 Apr 2025 11:07:42 +0200 Subject: [PATCH 502/836] chore(deps): pin github/codeql-action action to 45775bd (main) (#7307) --- .github/workflows/ossf_scorecard.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ossf_scorecard.yml b/.github/workflows/ossf_scorecard.yml index 6b385da77d..07d15cfd8d 100644 --- a/.github/workflows/ossf_scorecard.yml +++ b/.github/workflows/ossf_scorecard.yml @@ -74,6 +74,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard (optional). # Commenting out will disable upload of results to your repo's Code Scanning dashboard - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@v3 + uses: github/codeql-action/upload-sarif@45775bd8235c68ba998cffa5171334d58593da47 # v3 with: sarif_file: results.sarif From 9ba69c6d2ab9023bdafb06b7b6cbadd344534d81 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 9 Apr 2025 11:08:23 +0200 Subject: [PATCH 503/836] chore(deps): update github/codeql-action digest to 45775bd (main) (#7309) --- .github/workflows/codeql-analysis.yml | 4 ++-- .github/workflows/continuous-integration.yml | 2 +- .github/workflows/snyk.yml | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index c26474661c..32265c9841 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -74,7 +74,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@1b549b9259bda1cb5ddde3b41741a82a2d15a841 # v3 + uses: github/codeql-action/init@45775bd8235c68ba998cffa5171334d58593da47 # v3 with: languages: "go" build-mode: manual @@ -91,6 +91,6 @@ jobs: make - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@1b549b9259bda1cb5ddde3b41741a82a2d15a841 # v3 + uses: github/codeql-action/analyze@45775bd8235c68ba998cffa5171334d58593da47 # v3 with: category: "/language:go" diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 26bab4fd20..6e9e15dbdb 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -611,7 +611,7 @@ jobs: args: --severity-threshold=high --file=Dockerfile - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@1b549b9259bda1cb5ddde3b41741a82a2d15a841 # v3 + uses: github/codeql-action/upload-sarif@45775bd8235c68ba998cffa5171334d58593da47 # v3 if: | !github.event.repository.fork && !github.event.pull_request.head.repo.fork diff --git a/.github/workflows/snyk.yml b/.github/workflows/snyk.yml index d12761a60a..ac03598042 100644 --- a/.github/workflows/snyk.yml +++ b/.github/workflows/snyk.yml @@ -26,7 +26,7 @@ jobs: args: --sarif-file-output=snyk-static.sarif - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@1b549b9259bda1cb5ddde3b41741a82a2d15a841 # v3 + uses: github/codeql-action/upload-sarif@45775bd8235c68ba998cffa5171334d58593da47 # v3 with: sarif_file: snyk-static.sarif @@ -39,6 +39,6 @@ jobs: args: --sarif-file-output=snyk-test.sarif - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@1b549b9259bda1cb5ddde3b41741a82a2d15a841 # v3 + uses: github/codeql-action/upload-sarif@45775bd8235c68ba998cffa5171334d58593da47 # v3 with: sarif_file: snyk-test.sarif From 9d9caf27d3bbccb3e0b76405a42a30c5f8f7630e Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 9 Apr 2025 11:24:22 +0200 Subject: [PATCH 504/836] chore(deps): update crazy-max/ghaction-import-gpg digest to e89d409 (main) (#7270) --- .github/workflows/release-publish.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml index 7d994a5a1d..dd06f48f2d 100644 --- a/.github/workflows/release-publish.yml +++ b/.github/workflows/release-publish.yml @@ -136,7 +136,7 @@ jobs: - name: Import GPG key id: import_gpg - uses: crazy-max/ghaction-import-gpg@cb9bde2e2525e640591a934b1fd28eef1dcaf5e5 # v6 + uses: crazy-max/ghaction-import-gpg@e89d40939c28e39f97cf32126055eeae86ba74ec # v6 with: gpg_private_key: ${{ secrets.GPG_PRIVATE_KEY }} passphrase: ${{ secrets.GPG_PASSPHRASE }} From a24dba3053d2b4749594ba87c8698b301e763572 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Wed, 9 Apr 2025 12:20:52 +0200 Subject: [PATCH 505/836] feat: remove Azure CSI driver resize workaround (#7297) This commit removes `ENABLE_AZURE_PVC_UPDATES` configuration and the related status field. The configuration is no longer needed for the operator to resize Azure volumes correctly. This change has been incorporated in the Azure CSI driver since version 1.11.0. Closes #7296 Signed-off-by: Armando Ruocco Signed-off-by: Marco Nenciarini Co-authored-by: Marco Nenciarini --- api/v1/cluster_types.go | 4 - .../bases/postgresql.cnpg.io_clusters.yaml | 4 - docs/src/cloudnative-pg.v1.md | 7 -- docs/src/operator_conf.md | 1 - docs/src/storage.md | 104 ------------------ internal/configuration/configuration.go | 3 - internal/controller/cluster_create.go | 13 --- internal/controller/cluster_upgrade.go | 16 --- 8 files changed, 152 deletions(-) diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go index c570c9b6b0..3b7fd86cca 100644 --- a/api/v1/cluster_types.go +++ b/api/v1/cluster_types.go @@ -936,10 +936,6 @@ type ClusterStatus struct { // +optional OnlineUpdateEnabled bool `json:"onlineUpdateEnabled,omitempty"` - // AzurePVCUpdateEnabled shows if the PVC online upgrade is enabled for this cluster - // +optional - AzurePVCUpdateEnabled bool `json:"azurePVCUpdateEnabled,omitempty"` - // Image contains the image name used by the pods // +optional Image string `json:"image,omitempty"` diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml index 611ca6b9e3..943ff26301 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml @@ -5927,10 +5927,6 @@ spec: - hash type: object type: array - azurePVCUpdateEnabled: - description: AzurePVCUpdateEnabled shows if the PVC online upgrade - is enabled for this cluster - type: boolean certificates: description: The configuration for the CA and related certificates, initialized with defaults. diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md index 22a0a1d80d..5fd53c54e1 100644 --- a/docs/src/cloudnative-pg.v1.md +++ b/docs/src/cloudnative-pg.v1.md @@ -2251,13 +2251,6 @@ This field is reported when .spec.failoverDelay is populated or dur

OnlineUpdateEnabled shows if the online upgrade is enabled inside the cluster

-azurePVCUpdateEnabled
-bool - - -

AzurePVCUpdateEnabled shows if the PVC online upgrade is enabled for this cluster

- - image
string diff --git a/docs/src/operator_conf.md b/docs/src/operator_conf.md index 92ea33a47c..6be43321ab 100644 --- a/docs/src/operator_conf.md +++ b/docs/src/operator_conf.md @@ -39,7 +39,6 @@ Name | Description `CERTIFICATE_DURATION` | Determines the lifetime of the generated certificates in days. Default is 90. `CLUSTERS_ROLLOUT_DELAY` | The duration (in seconds) to wait between the roll-outs of different clusters during an operator upgrade. This setting controls the timing of upgrades across clusters, spreading them out to reduce system impact. The default value is `0` which means no delay between PostgreSQL cluster upgrades. `CREATE_ANY_SERVICE` | When set to `true`, will create `-any` service for the cluster. Default is `false` -`ENABLE_AZURE_PVC_UPDATES` | Enables to delete Postgres pod if its PVC is stuck in Resizing condition. This feature is mainly for the Azure environment (default `false`) `ENABLE_INSTANCE_MANAGER_INPLACE_UPDATES` | When set to `true`, enables in-place updates of the instance manager after an update of the operator, avoiding rolling updates of the cluster (default `false`) `EXPIRING_CHECK_THRESHOLD` | Determines the threshold, in days, for identifying a certificate as expiring. Default is 7. `INCLUDE_PLUGINS` | A comma-separated list of plugins to be always included in the Cluster's reconciliation. diff --git a/docs/src/storage.md b/docs/src/storage.md index 1a25c09e3f..cacf8644ac 100644 --- a/docs/src/storage.md +++ b/docs/src/storage.md @@ -259,110 +259,6 @@ doesn't support that, you must delete the pod to trigger the resize. The best way to proceed is to delete one pod at a time, starting from replicas and waiting for each pod to be back up. -### Expanding PVC volumes on AKS - -Currently, [Azure can resize the PVC's volume without restarting the pod only on specific regions](https://learn.microsoft.com/en-us/azure/aks/azure-disk-csi#resize-a-persistent-volume-without-downtime). -CloudNativePG has overcome this limitation through the -`ENABLE_AZURE_PVC_UPDATES` environment variable in the -[operator configuration](operator_conf.md#available-options). -When set to `true`, CloudNativePG triggers a rolling update of the -Postgres cluster. - -Alternatively, you can use the following workaround to manually resize the -volume in AKS. - -#### Workaround for volume expansion on AKS - -You can manually resize a PVC on AKS. As an example, suppose you have a cluster -with three replicas: - -``` -$ kubectl get pods -NAME READY STATUS RESTARTS AGE -cluster-example-1 1/1 Running 0 2m37s -cluster-example-2 1/1 Running 0 2m22s -cluster-example-3 1/1 Running 0 2m10s -``` - -An Azure disk can be expanded only while in "unattached" state, as described in the -[Kubernetes documentation](https://github.com/kubernetes-sigs/azuredisk-csi-driver/blob/master/docs/known-issues/sizegrow.md). -This means that, to resize a disk used by a PostgreSQL cluster, you need to -perform a manual rollout, first cordoning the node that hosts the pod using the -PVC bound to the disk. This prevents the operator from re-creating the pod and -immediately reattaching it to its PVC before the background disk resizing is -complete. - -First, edit the cluster definition, applying the new size. In this example, the -new size is `2Gi`. - -``` -apiVersion: postgresql.cnpg.io/v1 -kind: Cluster -metadata: - name: cluster-example -spec: - instances: 3 - - storage: - storageClass: default - size: 2Gi -``` - -Assuming the `cluster-example-1` pod is the cluster's primary, you can proceed -with the replicas first. For example, start with cordoning the Kubernetes node -that hosts the `cluster-example-3` pod: - -``` -kubectl cordon -``` - -Then delete the `cluster-example-3` pod: - -``` -$ kubectl delete pod/cluster-example-3 -``` - -Run the following command: - -``` -kubectl get pvc -w -o=jsonpath='{.status.conditions[].message}' cluster-example-3 -``` - -Wait until you see the following output: - -``` -Waiting for user to (re-)start a Pod to finish file system resize of volume on node. -``` - -Then, you can uncordon the node: - -``` -kubectl uncordon -``` - -Wait for the pod to be re-created correctly and get in a "Running and Ready" state: - -``` -kubectl get pods -w cluster-example-3 -cluster-example-3 0/1 Init:0/1 0 12m -cluster-example-3 1/1 Running 0 12m -``` - -Verify the PVC expansion by running the following command, which returns `2Gi` -as configured: - -``` -kubectl get pvc cluster-example-3 -o=jsonpath='{.status.capacity.storage}' -``` - -You can repeat these steps for the remaining pods. - -!!! Important - Leave the resizing of the disk associated with the primary instance as the - last disk, after promoting through a switchover a new resized pod, using - `kubectl cnpg promote`. For example, use `kubectl cnpg promote cluster-example 3` - to promote `cluster-example-3` to primary. - ### Re-creating storage If the storage class doesn't support volume expansion, you can still regenerate diff --git a/internal/configuration/configuration.go b/internal/configuration/configuration.go index 28192b57df..c88905f847 100644 --- a/internal/configuration/configuration.go +++ b/internal/configuration/configuration.go @@ -124,9 +124,6 @@ type Data struct { // replacing the executable in a pod without restarting EnableInstanceManagerInplaceUpdates bool `json:"enableInstanceManagerInplaceUpdates" env:"ENABLE_INSTANCE_MANAGER_INPLACE_UPDATES"` //nolint - // EnableAzurePVCUpdates enables the live update of PVC in Azure environment - EnableAzurePVCUpdates bool `json:"enableAzurePVCUpdates" env:"ENABLE_AZURE_PVC_UPDATES"` - // This is the lifetime of the generated certificates CertificateDuration int `json:"certificateDuration" env:"CERTIFICATE_DURATION"` diff --git a/internal/controller/cluster_create.go b/internal/controller/cluster_create.go index 7d4fd93279..be7db3268a 100644 --- a/internal/controller/cluster_create.go +++ b/internal/controller/cluster_create.go @@ -1345,19 +1345,6 @@ func (r *ClusterReconciler) ensureInstancesAreCreated( ) return ctrl.Result{RequeueAfter: 1 * time.Second}, ErrNextLoop } - - if configuration.Current.EnableAzurePVCUpdates { - for _, resizingPVC := range cluster.Status.ResizingPVC { - // if the pvc is in resizing state we requeue and wait - if resizingPVC == instancePVC.Name { - contextLogger.Info( - "PVC is in resizing status, retrying in 5 seconds", - "instance", instanceToCreate.Name, - ) - return ctrl.Result{RequeueAfter: 5 * time.Second}, ErrNextLoop - } - } - } } // If this cluster has been restarted, mark the Pod with the latest restart time diff --git a/internal/controller/cluster_upgrade.go b/internal/controller/cluster_upgrade.go index c8be184023..98b15a45fe 100644 --- a/internal/controller/cluster_upgrade.go +++ b/internal/controller/cluster_upgrade.go @@ -357,7 +357,6 @@ func isPodNeedingRollout( checkers := map[string]rolloutChecker{ "pod has missing PVCs": checkHasMissingPVCs, - "pod has PVC requiring resizing": checkHasResizingPVC, "pod projected volume is outdated": checkProjectedVolumeIsOutdated, "pod image is outdated": checkPodImageIsOutdated, "cluster has different restart annotation": checkClusterHasDifferentRestartAnnotation, @@ -407,21 +406,6 @@ func hasValidPodSpec(pod *corev1.Pod) bool { return err == nil } -func checkHasResizingPVC(_ context.Context, pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, error) { - if configuration.Current.EnableAzurePVCUpdates { - for _, pvcName := range cluster.Status.ResizingPVC { - // This code works on the assumption that the PVC begins with the name of the pod using it. - if persistentvolumeclaim.BelongToInstance(cluster, pod.Name, pvcName) { - return rollout{ - required: true, - reason: fmt.Sprintf("rebooting pod to complete the resizing of the following PVC: '%s'", pvcName), - }, nil - } - } - } - return rollout{}, nil -} - func checkPodNeedsUpdatedTopology(_ context.Context, pod *corev1.Pod, cluster *apiv1.Cluster) (rollout, error) { if reflect.DeepEqual(cluster.Spec.TopologySpreadConstraints, pod.Spec.TopologySpreadConstraints) { return rollout{}, nil From ff122bad5ab015419a9e7559aba092e75913735e Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Wed, 9 Apr 2025 14:34:25 +0200 Subject: [PATCH 506/836] docs: migrate to the CNCF Slack workspace (#7304) After joining the CNCF Sandbox, CloudNativePG Slack channels have been migrated to the CNCF Workspace: - #cloudnativepg-users: for general matters about CloudNativePG - #cloudnativepg-dev: for development matters around CloudNativePG - #cloudnativepg-www: for website, documentation, and auxiliary resources Signed-off-by: Jonathan Gonzalez V. Signed-off-by: Marco Nenciarini Signed-off-by: Gabriele Bartolini Co-authored-by: Marco Nenciarini Co-authored-by: Gabriele Bartolini --- CONTRIBUTING.md | 6 +++--- README.md | 3 ++- contribute/README.md | 5 +++-- docs/src/supported_releases.md | 20 ++++++++++++-------- 4 files changed, 20 insertions(+), 14 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index acb70e94e2..cb39b99447 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -10,8 +10,8 @@ our project that we need help with, including: * Bugs in our Github actions * Promotion of PostgreSQL on Kubernetes with our operator -First, though, it is important that you read the [code of -conduct](CODE_OF_CONDUCT.md). +First, though, it is important that you read the +[code of conduct](CODE_OF_CONDUCT.md). The guidelines below are a starting point. We don't want to limit your creativity, passion, and initiative. If you think there's a better way, please @@ -29,7 +29,7 @@ We welcome many types of contributions including: * [Documentation](docs/README.md) * Issue Triage * Answering questions on Slack or Github Discussions -* Web design +* [Website](https://github.com/cloudnative-pg/cloudnative-pg.github.io) * Communications / Social Media / Blog Posts * Events participation * Release management diff --git a/README.md b/README.md index 00d090b9c9..1cf028331f 100644 --- a/README.md +++ b/README.md @@ -92,7 +92,8 @@ CloudNativePG can be extended via the [CNPG-I plugin interface](https://github.c ## Communications - [Github Discussions](https://github.com/cloudnative-pg/cloudnative-pg/discussions) -- [Slack Channel](https://join.slack.com/t/cloudnativepg/shared_invite/zt-30a6l6bp3-u1lNAmh~N02Cfiv2utKTFg) +- [Slack](https://cloud-native.slack.com/archives/C08MAUJ7NPM) + (join the [CNCF Slack Workspace](https://communityinviter.com/apps/cloud-native/cncf)). - [Twitter](https://twitter.com/CloudNativePg) - [Mastodon](https://mastodon.social/@CloudNativePG) - [Bluesky](https://bsky.app/profile/cloudnativepg.bsky.social) diff --git a/contribute/README.md b/contribute/README.md index 8e88260aab..2f66de58bb 100644 --- a/contribute/README.md +++ b/contribute/README.md @@ -9,8 +9,9 @@ a good set of docs that guide you through the development process. Having said t we know that everything can always be improved, so if you think our documentation is not enough, let us know or provide a pull request based on your experience. -Feel free to ask in the ["dev" chat](https://cloudnativepg.slack.com/archives/C03D68KGG65) -if you have questions or are seeking guidance. +If you have any questions or need guidance, feel free to reach out in the +[#cloudnativepg-dev](https://cloud-native.slack.com/archives/C08MW1HKF40) channel +on the [CNCF Slack workspace](https://communityinviter.com/apps/cloud-native/cncf). ## About our development workflow diff --git a/docs/src/supported_releases.md b/docs/src/supported_releases.md index d7f9af333c..8be8ff34d5 100644 --- a/docs/src/supported_releases.md +++ b/docs/src/supported_releases.md @@ -183,10 +183,14 @@ Only the last patch release of each branch is supported. We offer two types of support: Technical support -: Technical assistance is offered on a best-effort basis for supported - releases only. You can request support from the community on the - [CloudNativePG Slack](https://cloudnativepg.slack.com/) (in the `#general` channel), - or using [GitHub Discussions](https://github.com/cloudnative-pg/cloudnative-pg/discussions). +: Technical assistance is offered on a best-effort basis and is limited to + supported releases only. For help, you can reach out to the community via the + [#cloudnativepg-users](https://cloud-native.slack.com/archives/C08MAUJ7NPM) + channel on the CNCF Slack workspace (if you're not yet a member, you can + [join the workspace](https://communityinviter.com/apps/cloud-native/cncf)). + Alternatively, you can post your questions in + the [GitHub Discussions](https://github.com/cloudnative-pg/cloudnative-pg/discussions) + section of the CloudNativePG repository. Security and bug fixes : We backport important bug fixes — including security fixes - to all @@ -194,7 +198,7 @@ Security and bug fixes *"Does this backport improve `CloudNativePG`, bearing in mind that we really value stability for already-released versions?"* -If you're looking for professional support, see the -[Support page in the website](https://cloudnative-pg.io/support/). -The vendors listed there might provide service level agreements that included -extended support timeframes. +If you’re looking for professional support, please refer to the +[Support page on our website](https://cloudnative-pg.io/support/). +The vendors listed there may offer service level agreements (SLA), including +extended support periods and additional services. From 1ddd97cf7c1b89ecaa19dff9ffcb8dafcbc9c969 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Wed, 9 Apr 2025 16:00:37 +0200 Subject: [PATCH 507/836] feat(security): pin digest and group updates (#7327) Closes #7257 Signed-off-by: Jonathan Gonzalez V. --- .github/renovate.json5 | 28 +++++++++------------------- 1 file changed, 9 insertions(+), 19 deletions(-) diff --git a/.github/renovate.json5 b/.github/renovate.json5 index 16987dc737..d2eb5117ab 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -2,6 +2,9 @@ $schema: 'https://docs.renovatebot.com/renovate-schema.json', extends: [ 'config:recommended', + 'customManagers:dockerfileVersions', + 'docker:pinDigests', + 'helpers:pinGitHubActionDigests', ], rebaseWhen: 'never', prConcurrentLimit: 5, @@ -403,21 +406,17 @@ ], }, { - matchDepTypes: [ - 'action', + matchDatasources: [ + 'github-tags' ], matchUpdateTypes: [ + 'digest', + 'pinDigest', 'minor', - 'patch', + 'patch' ], groupName: 'all non-major github action', - pinDigests: true, - }, - { - matchDepTypes: [ - 'action', - ], - pinDigests: true, + pinDigests: true }, { groupName: 'kubernetes CSI', @@ -446,15 +445,6 @@ 'quay.io/operator-framework{/,}**', ], }, - { - groupName: 'spellcheck', - separateMajorMinor: false, - pinDigests: true, - matchPackageNames: [ - 'jonasbn/github-action-spellcheck{/,}**', - 'rojopolis/spellcheck-github-actions{/,}**', - ], - }, { groupName: 'cnpg', matchPackageNames: [ From 4f0b73f1b37db582637485e8e9609737dd014b34 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 9 Apr 2025 17:23:30 +0200 Subject: [PATCH 508/836] chore(deps): update all non-major github action (main) (#7308) This PR contains the following updates: https://github.com/actions/upload-artifact `v4.6.1` -> `v4.6.2` https://github.com/azure/login `v2.2.0` -> `v2.3.0` https://github.com/goreleaser/goreleaser-action `90a3faa` -> `9c156ee` https://github.com/rtCamp/action-slack-notify `c337377` -> `e31e87e` --- .github/workflows/continuous-delivery.yml | 12 ++++++------ .github/workflows/continuous-integration.yml | 6 +++--- .github/workflows/k8s-versions-check.yml | 2 +- .github/workflows/ossf_scorecard.yml | 2 +- .github/workflows/release-publish.yml | 2 +- 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index d8b46eff6d..2e477351f4 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -337,7 +337,7 @@ jobs: echo PWD=$(pwd) >> $GITHUB_ENV - name: Run GoReleaser - uses: goreleaser/goreleaser-action@90a3faa9d0182683851fbfa97ca1a2cb983bfca3 # v6 + uses: goreleaser/goreleaser-action@9c156ee8a17a598857849441385a2041ef570552 # v6 with: distribution: goreleaser version: v2 @@ -425,7 +425,7 @@ jobs: # NOTE: we only fire this in TEST DEPTH = 4, as that is the level of the # upgrade test name: Build binary for upgrade test - uses: goreleaser/goreleaser-action@90a3faa9d0182683851fbfa97ca1a2cb983bfca3 # v6 + uses: goreleaser/goreleaser-action@9c156ee8a17a598857849441385a2041ef570552 # v6 if: | always() && !cancelled() && needs.evaluate_options.outputs.test_level == '4' @@ -790,7 +790,7 @@ jobs: steps: - name: Azure Login - uses: azure/login@a65d910e8af852a8061c627c456678983e180302 # v2.2.0 + uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2.3.0 with: creds: ${{ secrets.AZURE_CREDENTIALS }} - @@ -897,7 +897,7 @@ jobs: password: ${{ env.REGISTRY_PASSWORD }} - name: Azure Login - uses: azure/login@a65d910e8af852a8061c627c456678983e180302 # v2.2.0 + uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2.3.0 with: creds: ${{ secrets.AZURE_CREDENTIALS }} - @@ -1137,7 +1137,7 @@ jobs: - name: Azure Login if: always() - uses: azure/login@a65d910e8af852a8061c627c456678983e180302 # v2.2.0 + uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2.3.0 with: creds: ${{ secrets.AZURE_CREDENTIALS }} - @@ -2145,7 +2145,7 @@ jobs: - name: Send the Ciclops view over Slack # Send the Ciclops thermometer on every scheduled run on `main`. # or when there are systematic failures in release branches - uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990 # v2 + uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2 if: | github.repository_owner == env.REPOSITORY_OWNER && ( diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 6e9e15dbdb..a5936961f9 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -485,7 +485,7 @@ jobs: echo PWD=$(pwd) >> $GITHUB_ENV - name: Run GoReleaser to build kubectl plugin - uses: goreleaser/goreleaser-action@90a3faa9d0182683851fbfa97ca1a2cb983bfca3 # v6 + uses: goreleaser/goreleaser-action@9c156ee8a17a598857849441385a2041ef570552 # v6 if: | github.event_name == 'schedule' || ( @@ -505,7 +505,7 @@ jobs: # Send Slack notification if the kubectl plugin build fails. # To avoid message overflow, we only report runs scheduled on main or release branches - name: Slack Notification - uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990 # v2 + uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2 if: | failure() && github.repository_owner == env.REPOSITORY_OWNER && @@ -525,7 +525,7 @@ jobs: SLACK_MESSAGE: Building kubernetes plugin failed! - name: Run GoReleaser - uses: goreleaser/goreleaser-action@90a3faa9d0182683851fbfa97ca1a2cb983bfca3 # v6 + uses: goreleaser/goreleaser-action@9c156ee8a17a598857849441385a2041ef570552 # v6 with: distribution: goreleaser version: v2 diff --git a/.github/workflows/k8s-versions-check.yml b/.github/workflows/k8s-versions-check.yml index 47b5c0438d..d3e2e6b68c 100644 --- a/.github/workflows/k8s-versions-check.yml +++ b/.github/workflows/k8s-versions-check.yml @@ -49,7 +49,7 @@ jobs: if: github.event.inputs.limit == null || github.event.inputs.limit == 'eks' - name: Azure Login - uses: azure/login@a65d910e8af852a8061c627c456678983e180302 # v2.2.0 + uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2.3.0 with: creds: ${{ secrets.AZURE_CREDENTIALS }} if: github.event.inputs.limit == null || github.event.inputs.limit == 'aks' diff --git a/.github/workflows/ossf_scorecard.yml b/.github/workflows/ossf_scorecard.yml index 07d15cfd8d..b820e2097c 100644 --- a/.github/workflows/ossf_scorecard.yml +++ b/.github/workflows/ossf_scorecard.yml @@ -65,7 +65,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4.6.1 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: SARIF file path: results.sarif diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml index dd06f48f2d..974016b255 100644 --- a/.github/workflows/release-publish.yml +++ b/.github/workflows/release-publish.yml @@ -150,7 +150,7 @@ jobs: echo "$GPG_PRIVATE_KEY" > gpg_signing_key.asc - name: Run GoReleaser - uses: goreleaser/goreleaser-action@90a3faa9d0182683851fbfa97ca1a2cb983bfca3 # v6 + uses: goreleaser/goreleaser-action@9c156ee8a17a598857849441385a2041ef570552 # v6 with: distribution: goreleaser version: v2 From 7030f2ead8e7ff2a3e32373b39f0efb3645b89c3 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Wed, 9 Apr 2025 18:05:58 +0200 Subject: [PATCH 509/836] refactor(cnpgi): remove code duplication and code dependencies (#7242) This commit refactors the cnpg-i implementation by removing duplicate code and minimizing dependencies between modules. Signed-off-by: Armando Ruocco --- api/v1/cluster_funcs.go | 3 +- internal/cmd/manager/walrestore/cmd.go | 14 +---- internal/cnpi/plugin/client/client.go | 5 ++ internal/cnpi/plugin/client/contracts.go | 26 +++++++++- internal/cnpi/plugin/client/create.go | 52 +++++++++++++++++++ internal/cnpi/plugin/client/restore_job.go | 10 ++-- internal/cnpi/plugin/connection/unix.go | 4 +- internal/cnpi/plugin/operatorclient/client.go | 8 +-- .../cnpi/plugin/operatorclient/client_test.go | 6 +-- internal/controller/backup_controller.go | 2 +- internal/controller/cluster_controller.go | 4 +- internal/controller/cluster_plugins.go | 3 +- internal/controller/cluster_upgrade_test.go | 12 ++--- internal/controller/plugins.go | 15 +----- pkg/management/postgres/archiver/archiver.go | 26 +++------- pkg/management/postgres/restore.go | 19 ++----- pkg/management/postgres/webserver/local.go | 4 +- .../postgres/webserver/plugin_backup.go | 34 +++++------- pkg/specs/pods.go | 5 +- pkg/utils/{ => context}/context.go | 2 +- pkg/utils/context/doc.go | 21 ++++++++ 21 files changed, 164 insertions(+), 111 deletions(-) create mode 100644 internal/cnpi/plugin/client/create.go rename pkg/utils/{ => context}/context.go (98%) create mode 100644 pkg/utils/context/doc.go diff --git a/api/v1/cluster_funcs.go b/api/v1/cluster_funcs.go index 0f36bcdb1f..c2f27f7ba9 100644 --- a/api/v1/cluster_funcs.go +++ b/api/v1/cluster_funcs.go @@ -42,6 +42,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" "github.com/cloudnative-pg/cloudnative-pg/pkg/system" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + contextutils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils/context" "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" ) @@ -391,7 +392,7 @@ func (secretResourceVersion *SecretsResourceVersion) SetExternalClusterSecretVer // SetInContext records the cluster in the given context func (cluster *Cluster) SetInContext(ctx context.Context) context.Context { - return context.WithValue(ctx, utils.ContextKeyCluster, cluster) + return context.WithValue(ctx, contextutils.ContextKeyCluster, cluster) } // GetPostgresqlVersion gets the PostgreSQL image version detecting it from the diff --git a/internal/cmd/manager/walrestore/cmd.go b/internal/cmd/manager/walrestore/cmd.go index 127cc90350..c520fe4482 100644 --- a/internal/cmd/manager/walrestore/cmd.go +++ b/internal/cmd/manager/walrestore/cmd.go @@ -38,7 +38,6 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" pluginClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client" "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository" - "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/local" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" @@ -256,26 +255,15 @@ func restoreWALViaPlugins( contextLogger := log.FromContext(ctx) plugins := repository.New() - availablePluginNames, err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir) - if err != nil { - contextLogger.Error(err, "Error while loading local plugins") - } defer plugins.Close() - availablePluginNamesSet := stringset.From(availablePluginNames) - enabledPluginNames := apiv1.GetPluginConfigurationEnabledPluginNames(cluster.Spec.Plugins) enabledPluginNames = append( enabledPluginNames, apiv1.GetExternalClustersEnabledPluginNames(cluster.Spec.ExternalClusters)..., ) enabledPluginNamesSet := stringset.From(enabledPluginNames) - - client, err := pluginClient.WithPlugins( - ctx, - plugins, - availablePluginNamesSet.Intersect(enabledPluginNamesSet).ToList()..., - ) + client, err := pluginClient.NewClient(ctx, enabledPluginNamesSet) if err != nil { contextLogger.Error(err, "Error while loading required plugins") return false, err diff --git a/internal/cnpi/plugin/client/client.go b/internal/cnpi/plugin/client/client.go index bc14576e4a..59f3e4cbd3 100644 --- a/internal/cnpi/plugin/client/client.go +++ b/internal/cnpi/plugin/client/client.go @@ -55,6 +55,11 @@ func (data *data) MetadataList() []connection.Metadata { return result } +func (data *data) HasPlugin(pluginName string) bool { + _, err := data.getPlugin(pluginName) + return err == nil +} + func (data *data) Close(ctx context.Context) { contextLogger := log.FromContext(ctx) for i := range data.plugins { diff --git a/internal/cnpi/plugin/client/contracts.go b/internal/cnpi/plugin/client/contracts.go index 0f23953018..6af9e7419e 100644 --- a/internal/cnpi/plugin/client/contracts.go +++ b/internal/cnpi/plugin/client/contracts.go @@ -27,9 +27,9 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin" "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/connection" + contextutils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils/context" ) // Client describes a set of behaviour needed to properly handle all the plugin client expected features @@ -43,6 +43,26 @@ type Client interface { RestoreJobHooksCapabilities } +// SetPluginClientInContext records the plugin client in the given context +func SetPluginClientInContext(ctx context.Context, client Client) context.Context { + return context.WithValue(ctx, contextutils.PluginClientKey, client) +} + +// GetPluginClientFromContext gets the current plugin client from the context +func GetPluginClientFromContext(ctx context.Context) Client { + v := ctx.Value(contextutils.PluginClientKey) + if v == nil { + return nil + } + + cli, ok := v.(Client) + if !ok { + return nil + } + + return cli +} + // Connection describes a set of behaviour needed to properly handle the plugin connections type Connection interface { // Close closes the connection to every loaded plugin @@ -50,6 +70,8 @@ type Connection interface { // MetadataList exposes the metadata of the loaded plugins MetadataList() []connection.Metadata + + HasPlugin(pluginName string) bool } // ClusterCapabilities describes a set of behaviour needed to implement the Cluster capabilities @@ -154,5 +176,5 @@ type BackupCapabilities interface { // RestoreJobHooksCapabilities describes a set of behaviour needed to run the Restore type RestoreJobHooksCapabilities interface { - Restore(ctx context.Context, cluster *apiv1.Cluster) (*restore.RestoreResponse, error) + Restore(ctx context.Context, cluster gvkEnsurer) (*restore.RestoreResponse, error) } diff --git a/internal/cnpi/plugin/client/create.go b/internal/cnpi/plugin/client/create.go new file mode 100644 index 0000000000..a17c3a1cdb --- /dev/null +++ b/internal/cnpi/plugin/client/create.go @@ -0,0 +1,52 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + + "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/stringset" + + "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository" + "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" +) + +// NewClient creates a new CNPI client +func NewClient(ctx context.Context, enabledPlugin *stringset.Data) (Client, error) { + contextLogger := log.FromContext(ctx) + plugins := repository.New() + + // TODO: make che socketDir a parameter + availablePluginNames, err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir) + if err != nil { + contextLogger.Error(err, "Error while loading local plugins") + plugins.Close() + return nil, err + } + + availablePluginNamesSet := stringset.From(availablePluginNames) + availableAndEnabled := stringset.From(availablePluginNamesSet.Intersect(enabledPlugin).ToList()) + return WithPlugins( + ctx, + plugins, + availableAndEnabled.ToList()..., + ) +} diff --git a/internal/cnpi/plugin/client/restore_job.go b/internal/cnpi/plugin/client/restore_job.go index c6bc843b7e..86fc0751ae 100644 --- a/internal/cnpi/plugin/client/restore_job.go +++ b/internal/cnpi/plugin/client/restore_job.go @@ -26,16 +26,20 @@ import ( "slices" restore "github.com/cloudnative-pg/cnpg-i/pkg/restore/job" - - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "sigs.k8s.io/controller-runtime/pkg/client" ) // ErrNoPluginSupportsRestoreJobHooksCapability is raised when no plugin supports the restore job hooks capability var ErrNoPluginSupportsRestoreJobHooksCapability = errors.New("no plugin supports the restore job hooks capability") +type gvkEnsurer interface { + EnsureGVKIsPresent() + client.Object +} + func (data *data) Restore( ctx context.Context, - cluster *apiv1.Cluster, + cluster gvkEnsurer, ) (*restore.RestoreResponse, error) { cluster.EnsureGVKIsPresent() diff --git a/internal/cnpi/plugin/connection/unix.go b/internal/cnpi/plugin/connection/unix.go index 866e62b371..fa986f5396 100644 --- a/internal/cnpi/plugin/connection/unix.go +++ b/internal/cnpi/plugin/connection/unix.go @@ -30,7 +30,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + contextutils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils/context" ) // ProtocolUnix is for plugins that are reachable over a @@ -45,7 +45,7 @@ func (p ProtocolUnix) Dial(ctx context.Context) (Handler, error) { contextLogger.Debug("Connecting to plugin via local socket", "path", dialPath) timeoutValue := defaultTimeout - value, ok := ctx.Value(utils.GRPCTimeoutKey).(time.Duration) + value, ok := ctx.Value(contextutils.GRPCTimeoutKey).(time.Duration) if ok { contextLogger.Debug("Using custom timeout value", "timeout", value) timeoutValue = value diff --git a/internal/cnpi/plugin/operatorclient/client.go b/internal/cnpi/plugin/operatorclient/client.go index 709c616ab4..38dec3a931 100644 --- a/internal/cnpi/plugin/operatorclient/client.go +++ b/internal/cnpi/plugin/operatorclient/client.go @@ -29,7 +29,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin" cnpgiClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + contextutils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils/context" ) type extendedClient struct { @@ -50,14 +50,14 @@ func (e *extendedClient) invokePlugin( ) (client.Object, error) { contextLogger := log.FromContext(ctx).WithName("invokePlugin") - cluster, ok := ctx.Value(utils.ContextKeyCluster).(client.Object) + cluster, ok := ctx.Value(contextutils.ContextKeyCluster).(client.Object) if !ok || cluster == nil { contextLogger.Trace("skipping invokePlugin, cannot find the cluster inside the context") return obj, nil } - pluginClient, ok := ctx.Value(utils.PluginClientKey).(cnpgiClient.Client) - if !ok || pluginClient == nil { + pluginClient := cnpgiClient.GetPluginClientFromContext(ctx) + if pluginClient == nil { contextLogger.Trace("skipping invokePlugin, cannot find the plugin client inside the context") return obj, nil } diff --git a/internal/cnpi/plugin/operatorclient/client_test.go b/internal/cnpi/plugin/operatorclient/client_test.go index d1d5a48466..f8b52a1803 100644 --- a/internal/cnpi/plugin/operatorclient/client_test.go +++ b/internal/cnpi/plugin/operatorclient/client_test.go @@ -29,7 +29,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin" pluginclient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client" "github.com/cloudnative-pg/cloudnative-pg/internal/scheme" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + contextutils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils/context" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -78,8 +78,8 @@ var _ = Describe("extendedClient", func() { It("invokePlugin", func(ctx SpecContext) { fakeCrd := &fakeClusterCRD{} - newCtx := context.WithValue(ctx, utils.ContextKeyCluster, fakeCrd) - newCtx = context.WithValue(newCtx, utils.PluginClientKey, pluginClient) + newCtx := context.WithValue(ctx, contextutils.ContextKeyCluster, fakeCrd) + newCtx = context.WithValue(newCtx, contextutils.PluginClientKey, pluginClient) By("ensuring it works the first invocation", func() { obj, err := c.invokePlugin(newCtx, plugin.OperationVerbCreate, &corev1.Pod{}) diff --git a/internal/controller/backup_controller.go b/internal/controller/backup_controller.go index 50866066ae..1f5948a3d5 100644 --- a/internal/controller/backup_controller.go +++ b/internal/controller/backup_controller.go @@ -168,7 +168,7 @@ func (r *BackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr pluginClient.Close(ctx) }() - ctx = setPluginClientInContext(ctx, pluginClient) + ctx = cnpgiClient.SetPluginClientInContext(ctx, pluginClient) // Plugin pre-hooks if hookResult := preReconcilePluginHooks(ctx, &cluster, &backup); hookResult.StopReconciliation { diff --git a/internal/controller/cluster_controller.go b/internal/controller/cluster_controller.go index 6427391f79..f483e67e55 100644 --- a/internal/controller/cluster_controller.go +++ b/internal/controller/cluster_controller.go @@ -214,7 +214,7 @@ func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct pluginClient.Close(ctx) }() - ctx = setPluginClientInContext(ctx, pluginClient) + ctx = cnpgiClient.SetPluginClientInContext(ctx, pluginClient) // Run the inner reconcile loop. Translate any ErrNextLoop to an errorless return result, err := r.reconcile(ctx, cluster) @@ -526,7 +526,7 @@ func (r *ClusterReconciler) reconcile(ctx context.Context, cluster *apiv1.Cluste return hookResult.Result, hookResult.Err } - return setStatusPluginHook(ctx, r.Client, getPluginClientFromContext(ctx), cluster) + return setStatusPluginHook(ctx, r.Client, cnpgiClient.GetPluginClientFromContext(ctx), cluster) } func (r *ClusterReconciler) ensureNoFailoverOnFullDisk( diff --git a/internal/controller/cluster_plugins.go b/internal/controller/cluster_plugins.go index 468cd738d4..56e9b435e0 100644 --- a/internal/controller/cluster_plugins.go +++ b/internal/controller/cluster_plugins.go @@ -27,13 +27,14 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + cnpgiclient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client" ) // updatePluginsStatus ensures that we load the plugins that are required to reconcile // this cluster func (r *ClusterReconciler) updatePluginsStatus(ctx context.Context, cluster *apiv1.Cluster) error { // Load the plugins - pluginClient := getPluginClientFromContext(ctx) + pluginClient := cnpgiclient.GetPluginClientFromContext(ctx) // Get the status of the plugins and store it inside the status section oldCluster := cluster.DeepCopy() diff --git a/internal/controller/cluster_upgrade_test.go b/internal/controller/cluster_upgrade_test.go index cda4e66f6b..87808f8164 100644 --- a/internal/controller/cluster_upgrade_test.go +++ b/internal/controller/cluster_upgrade_test.go @@ -857,10 +857,10 @@ var _ = Describe("checkPodSpec with plugins", Ordered, func() { podModifiedByPlugins.Spec.Containers[0].Image = "postgres:19.0" - pluginClient := fakePluginClientRollout{ + pluginCli := fakePluginClientRollout{ returnedPod: podModifiedByPlugins, } - ctx := context.WithValue(context.TODO(), utils.PluginClientKey, pluginClient) + ctx := pluginClient.SetPluginClientInContext(context.TODO(), pluginCli) rollout, err := checkPodSpecIsOutdated(ctx, pod, &cluster) Expect(err).ToNot(HaveOccurred()) @@ -882,10 +882,10 @@ var _ = Describe("checkPodSpec with plugins", Ordered, func() { }, } - pluginClient := fakePluginClientRollout{ + pluginCli := fakePluginClientRollout{ returnedPod: podModifiedByPlugins, } - ctx := context.WithValue(context.TODO(), utils.PluginClientKey, pluginClient) + ctx := pluginClient.SetPluginClientInContext(context.TODO(), pluginCli) rollout, err := checkPodSpecIsOutdated(ctx, pod, &cluster) Expect(err).ToNot(HaveOccurred()) @@ -906,10 +906,10 @@ var _ = Describe("checkPodSpec with plugins", Ordered, func() { Value: "new_value", }) - pluginClient := fakePluginClientRollout{ + pluginCli := fakePluginClientRollout{ returnedPod: podModifiedByPlugins, } - ctx := context.WithValue(context.TODO(), utils.PluginClientKey, pluginClient) + ctx := pluginClient.SetPluginClientInContext(context.TODO(), pluginCli) rollout, err := checkPodSpecIsOutdated(ctx, pod, &cluster) Expect(err).ToNot(HaveOccurred()) diff --git a/internal/controller/plugins.go b/internal/controller/plugins.go index a63d6302d8..72cc1c22f0 100644 --- a/internal/controller/plugins.go +++ b/internal/controller/plugins.go @@ -30,7 +30,6 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" cnpgiClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) // preReconcilePluginHooks ensures we call the pre-reconcile plugin hooks @@ -39,7 +38,7 @@ func preReconcilePluginHooks( cluster *apiv1.Cluster, object client.Object, ) cnpgiClient.ReconcilerHookResult { - pluginClient := getPluginClientFromContext(ctx) + pluginClient := cnpgiClient.GetPluginClientFromContext(ctx) return pluginClient.PreReconcile(ctx, cluster, object) } @@ -49,7 +48,7 @@ func postReconcilePluginHooks( cluster *apiv1.Cluster, object client.Object, ) cnpgiClient.ReconcilerHookResult { - pluginClient := getPluginClientFromContext(ctx) + pluginClient := cnpgiClient.GetPluginClientFromContext(ctx) return pluginClient.PostReconcile(ctx, cluster, object) } @@ -86,13 +85,3 @@ func setStatusPluginHook( return ctrl.Result{RequeueAfter: 5 * time.Second}, cli.Status().Patch(ctx, cluster, client.MergeFrom(origCluster)) } - -// setPluginClientInContext records the plugin client in the given context -func setPluginClientInContext(ctx context.Context, client cnpgiClient.Client) context.Context { - return context.WithValue(ctx, utils.PluginClientKey, client) -} - -// getPluginClientFromContext gets the current plugin client from the context -func getPluginClientFromContext(ctx context.Context) cnpgiClient.Client { - return ctx.Value(utils.PluginClientKey).(cnpgiClient.Client) -} diff --git a/pkg/management/postgres/archiver/archiver.go b/pkg/management/postgres/archiver/archiver.go index 61dda4fb8d..3d34379585 100644 --- a/pkg/management/postgres/archiver/archiver.go +++ b/pkg/management/postgres/archiver/archiver.go @@ -37,7 +37,6 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" pluginClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client" "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository" - "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" "github.com/cloudnative-pg/cloudnative-pg/internal/management/cache" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/constants" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/client/local" @@ -274,33 +273,22 @@ func archiveWALViaPlugins( } plugins := repository.New() - availablePluginNames, err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir) - if err != nil { - contextLogger.Error(err, "Error while loading local plugins") - } defer plugins.Close() - availablePluginNamesSet := stringset.From(availablePluginNames) - enabledPluginNamesSet := stringset.From( - apiv1.GetPluginConfigurationEnabledPluginNames(cluster.Spec.Plugins)) - availableAndEnabled := stringset.From(availablePluginNamesSet.Intersect(enabledPluginNamesSet).ToList()) - - enabledArchiverPluginName := cluster.GetEnabledWALArchivePluginName() - if enabledArchiverPluginName != "" && !availableAndEnabled.Has(enabledArchiverPluginName) { - return fmt.Errorf("wal archive plugin is not available: %s", enabledArchiverPluginName) - } + enabledPluginNamesSet := stringset.From(apiv1.GetPluginConfigurationEnabledPluginNames(cluster.Spec.Plugins)) - client, err := pluginClient.WithPlugins( - ctx, - plugins, - availableAndEnabled.ToList()..., - ) + client, err := pluginClient.NewClient(ctx, enabledPluginNamesSet) if err != nil { contextLogger.Error(err, "Error while loading required plugins") return err } defer client.Close(ctx) + enabledArchiverPluginName := cluster.GetEnabledWALArchivePluginName() + if enabledArchiverPluginName != "" && !client.HasPlugin(enabledArchiverPluginName) { + return fmt.Errorf("wal archive plugin is not available: %s", enabledArchiverPluginName) + } + return client.ArchiveWAL(ctx, cluster, walName) } diff --git a/pkg/management/postgres/restore.go b/pkg/management/postgres/restore.go index ee0c63acb5..37e2afceb6 100644 --- a/pkg/management/postgres/restore.go +++ b/pkg/management/postgres/restore.go @@ -54,13 +54,13 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" pluginClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client" "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository" - "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" "github.com/cloudnative-pg/cloudnative-pg/pkg/configfile" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/external" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/constants" postgresSpec "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/system" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + contextutils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils/context" ) var ( @@ -1066,23 +1066,14 @@ func restoreViaPlugin( contextLogger := log.FromContext(ctx) // TODO: timeout should be configurable by the user - ctx = context.WithValue(ctx, utils.GRPCTimeoutKey, 100*time.Minute) + ctx = context.WithValue(ctx, contextutils.GRPCTimeoutKey, 100*time.Minute) plugins := repository.New() - availablePluginNames, err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir) - if err != nil { - contextLogger.Error(err, "Error while loading local plugins") - } defer plugins.Close() - availablePluginNamesSet := stringset.From(availablePluginNames) - contextLogger.Info("available plugins", "plugins", availablePluginNamesSet) - - pClient, err := pluginClient.WithPlugins( - ctx, - plugins, - plugin.Name, - ) + pluginEnabledSet := stringset.New() + pluginEnabledSet.Put(plugin.Name) + pClient, err := pluginClient.NewClient(ctx, pluginEnabledSet) if err != nil { contextLogger.Error(err, "Error while loading required plugins") return nil, err diff --git a/pkg/management/postgres/webserver/local.go b/pkg/management/postgres/webserver/local.go index e2a71f9f02..e62186aa86 100644 --- a/pkg/management/postgres/webserver/local.go +++ b/pkg/management/postgres/webserver/local.go @@ -39,7 +39,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/url" "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/status" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + contextutils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils/context" ) type localWebserverEndpoints struct { @@ -239,7 +239,7 @@ func (ws *localWebserverEndpoints) startPluginBackup( backup *apiv1.Backup, ) { // TODO: timeout should be configurable by the user - ctx = context.WithValue(ctx, utils.GRPCTimeoutKey, 100*time.Minute) + ctx = context.WithValue(ctx, contextutils.GRPCTimeoutKey, 100*time.Minute) NewPluginBackupCommand(cluster, backup, ws.typedClient, ws.eventRecorder).Start(ctx) } diff --git a/pkg/management/postgres/webserver/plugin_backup.go b/pkg/management/postgres/webserver/plugin_backup.go index e8a34ba05c..98e0415fda 100644 --- a/pkg/management/postgres/webserver/plugin_backup.go +++ b/pkg/management/postgres/webserver/plugin_backup.go @@ -36,7 +36,6 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" pluginClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client" "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository" - "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/resources" "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/status" @@ -80,30 +79,13 @@ func (b *PluginBackupCommand) invokeStart(ctx context.Context) { "backupNamespace", b.Backup.Name) plugins := repository.New() - availablePlugins, err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir) - if err != nil { - contextLogger.Error(err, "Error while discovering plugins") - } defer plugins.Close() - availablePluginNamesSet := stringset.From(availablePlugins) - - enabledPluginNamesSet := stringset.From( - apiv1.GetPluginConfigurationEnabledPluginNames(b.Cluster.Spec.Plugins)) - availableAndEnabled := stringset.From(availablePluginNamesSet.Intersect(enabledPluginNamesSet).ToList()) - - if !availableAndEnabled.Has(b.Backup.Spec.PluginConfiguration.Name) { - b.markBackupAsFailed( - ctx, - fmt.Errorf("requested plugin is not available: %s", b.Backup.Spec.PluginConfiguration.Name), - ) - return - } - - cli, err := pluginClient.WithPlugins( + enabledPluginNamesSet := stringset.New() + enabledPluginNamesSet.Put(b.Backup.Spec.PluginConfiguration.Name) + cli, err := pluginClient.NewClient( ctx, - plugins, - availableAndEnabled.ToList()..., + enabledPluginNamesSet, ) if err != nil { b.markBackupAsFailed(ctx, err) @@ -111,6 +93,14 @@ func (b *PluginBackupCommand) invokeStart(ctx context.Context) { } defer cli.Close(ctx) + if !cli.HasPlugin(b.Backup.Spec.PluginConfiguration.Name) { + b.markBackupAsFailed( + ctx, + fmt.Errorf("requested plugin is not available: %s", b.Backup.Spec.PluginConfiguration.Name), + ) + return + } + // record the backup beginning contextLogger.Info("Plugin backup started") b.Recorder.Event(b.Backup, "Normal", "Starting", "Backup started") diff --git a/pkg/specs/pods.go b/pkg/specs/pods.go index 85a4b67337..08defc95f8 100644 --- a/pkg/specs/pods.go +++ b/pkg/specs/pods.go @@ -474,8 +474,8 @@ func NewInstance( } }() - pluginClient, ok := ctx.Value(utils.PluginClientKey).(cnpgiClient.Client) - if !ok || pluginClient == nil { + pluginClient := cnpgiClient.GetPluginClientFromContext(ctx) + if pluginClient == nil { contextLogger.Trace("skipping NewInstance, cannot find the plugin client inside the context") return pod, nil } @@ -487,6 +487,7 @@ func NewInstance( return nil, fmt.Errorf("while invoking the lifecycle instance evaluation hook: %w", err) } + var ok bool pod, ok = podClientObject.(*corev1.Pod) if !ok { return nil, fmt.Errorf("while casting the clientObject to the pod type") diff --git a/pkg/utils/context.go b/pkg/utils/context/context.go similarity index 98% rename from pkg/utils/context.go rename to pkg/utils/context/context.go index 128cb28340..a1915a0b12 100644 --- a/pkg/utils/context.go +++ b/pkg/utils/context/context.go @@ -17,7 +17,7 @@ limitations under the License. SPDX-License-Identifier: Apache-2.0 */ -package utils +package context // contextKey a type used to assign values inside the context type contextKey string diff --git a/pkg/utils/context/doc.go b/pkg/utils/context/doc.go new file mode 100644 index 0000000000..b8a7757ef7 --- /dev/null +++ b/pkg/utils/context/doc.go @@ -0,0 +1,21 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package context contains utility functions to work with context.Context +package context From 5461c961ce4b1962dc62a79724d2e8e3dd1d50de Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Wed, 9 Apr 2025 18:23:59 +0200 Subject: [PATCH 510/836] chore(cluster): add `GetCurrentDataImage` method (#7256) Signed-off-by: Armando Ruocco Signed-off-by: Leonardo Cecchi Co-authored-by: Leonardo Cecchi --- internal/controller/cluster_image.go | 19 ++++++++++++++----- internal/controller/cluster_image_test.go | 20 ++++++++++++++++++++ 2 files changed, 34 insertions(+), 5 deletions(-) diff --git a/internal/controller/cluster_image.go b/internal/controller/cluster_image.go index d838adb756..1df58d3b14 100644 --- a/internal/controller/cluster_image.go +++ b/internal/controller/cluster_image.go @@ -52,10 +52,7 @@ func (r *ClusterReconciler) reconcileImage(ctx context.Context, cluster *apiv1.C return &ctrl.Result{}, r.RegisterPhase(ctx, cluster, apiv1.PhaseImageCatalogError, err.Error()) } - currentDataImage := cluster.Status.Image - if cluster.Status.MajorVersionUpgradeFromImage != nil { - currentDataImage = *cluster.Status.MajorVersionUpgradeFromImage - } + currentDataImage := getCurrentPgDataImage(&cluster.Status) // Case 1: the cluster is being initialized and there is still no // running image. In this case, we should simply apply the image selected by the user. @@ -72,7 +69,6 @@ func (r *ClusterReconciler) reconcileImage(ctx context.Context, cluster *apiv1.C // Case 2: there's a running image. The code checks if the user selected // an image of the same major version or if a change in the major // version has been requested. - var majorVersionUpgradeFromImage *string currentVersion, err := version.FromTag(reference.New(currentDataImage).Tag) if err != nil { contextLogger.Error(err, "While parsing current major versions") @@ -85,6 +81,7 @@ func (r *ClusterReconciler) reconcileImage(ctx context.Context, cluster *apiv1.C return nil, err } + var majorVersionUpgradeFromImage *string switch { case currentVersion.Major() < requestedVersion.Major(): // The current major version is older than the requested one @@ -109,6 +106,18 @@ func (r *ClusterReconciler) reconcileImage(ctx context.Context, cluster *apiv1.C ) } +// getCurrentPgDataImage returns Postgres image that was able to run the cluster +// PGDATA correctly last time. +// This is important in the context of major upgrade because it contains the +// image with the "old" major version even when there are no Pods available. +func getCurrentPgDataImage(status *apiv1.ClusterStatus) string { + if status.MajorVersionUpgradeFromImage != nil { + return *status.MajorVersionUpgradeFromImage + } + + return status.Image +} + func (r *ClusterReconciler) getConfiguredImage(ctx context.Context, cluster *apiv1.Cluster) (string, error) { contextLogger := log.FromContext(ctx) diff --git a/internal/controller/cluster_image_test.go b/internal/controller/cluster_image_test.go index ec69797b25..e76c58e9a8 100644 --- a/internal/controller/cluster_image_test.go +++ b/internal/controller/cluster_image_test.go @@ -25,6 +25,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client/fake" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" @@ -212,3 +213,22 @@ var _ = Describe("Cluster image detection", func() { Expect(*cluster.Status.MajorVersionUpgradeFromImage).To(Equal("postgres:16.2")) }) }) + +var _ = Describe("Major version tracking with getCurrentPgDataImage", func() { + It("returns the current major version if no major version update has been requested", func() { + status := &apiv1.ClusterStatus{ + Image: "postgres:15.2", + } + + Expect(getCurrentPgDataImage(status)).To(Equal("postgres:15.2")) + }) + + It("returns the old major version if a major version update has been requested", func() { + status := &apiv1.ClusterStatus{ + Image: "postgres:15.2", + MajorVersionUpgradeFromImage: ptr.To("postgres:14.3"), + } + + Expect(getCurrentPgDataImage(status)).To(Equal("postgres:14.3")) + }) +}) From 54ed280cfc8e57964c3fbf8c5b782fae712e42bd Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Wed, 9 Apr 2025 18:59:05 +0200 Subject: [PATCH 511/836] fix(security): ignore pgbouncer path for the digest updater (#7334) Renovate fails when looking into the container images inside a Go file. In this case failed looking for pkg/specs/pgbouncer/deployments.go Closes #7257 Signed-off-by: Jonathan Gonzalez V. --- .github/renovate.json5 | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/renovate.json5 b/.github/renovate.json5 index d2eb5117ab..601d4502f0 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -20,6 +20,7 @@ 'contribute/**', 'licenses/**', 'pkg/versions/**', + 'pkg/specs/pgbouncer/', ], postUpdateOptions: [ 'gomodTidy', From a4c7dee38ee62be9ee78285daac4ccfef03bfdfc Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Wed, 9 Apr 2025 19:02:57 +0200 Subject: [PATCH 512/836] fix(security): add imagePullPolicy to operator deployment (#7250) We should always have an imagePullPolicy to make sure the operator images is always pulled avoiding using possible local and dangerous images. Signed-off-by: Jonathan Gonzalez V. --- config/manager/manager.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index bc8b23dc20..4ac078c461 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -43,6 +43,7 @@ spec: - --secret-name=$(OPERATOR_DEPLOYMENT_NAME)-config - --webhook-port=9443 image: controller:latest + imagePullPolicy: Always name: manager ports: - containerPort: 8080 From 3bc7061ba1fde7229ca0ceca9ea646693c67e869 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 10 Apr 2025 13:46:53 +0200 Subject: [PATCH 513/836] fix(deps): update all non-major go dependencies (main) (#7333) This PR contains the following updates: https://github.com/goreleaser/goreleaser `v2.8.1` -> `v2.8.2` https://github.com/onsi/ginkgo `v2.23.3` -> `v2.23.4` https://github.com/onsi/gomega `v1.36.3` -> `v1.37.0` https://github.com/prometheus/client_golang `v1.21.1` -> `v1.22.0` golang.org/x/term `v0.30.0` -> `v0.31.0` https://github.com/grpc/grpc-go `v1.71.0` -> `v1.71.1` --- Makefile | 2 +- go.mod | 18 +++++++++--------- go.sum | 40 ++++++++++++++++++++++------------------ 3 files changed, 32 insertions(+), 28 deletions(-) diff --git a/Makefile b/Makefile index e572a80da5..cb808111fe 100644 --- a/Makefile +++ b/Makefile @@ -47,7 +47,7 @@ POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions KUSTOMIZE_VERSION ?= v5.6.0 CONTROLLER_TOOLS_VERSION ?= v0.17.2 GENREF_VERSION ?= 015aaac611407c4fe591bc8700d2c67b7521efca -GORELEASER_VERSION ?= v2.8.1 +GORELEASER_VERSION ?= v2.8.2 SPELLCHECK_VERSION ?= 0.48.0 WOKE_VERSION ?= 0.19.0 OPERATOR_SDK_VERSION ?= v1.39.2 diff --git a/go.mod b/go.mod index 3a7aff37b6..2996122f47 100644 --- a/go.mod +++ b/go.mod @@ -23,10 +23,10 @@ require ( github.com/lib/pq v1.10.9 github.com/logrusorgru/aurora/v4 v4.0.0 github.com/mitchellh/go-ps v1.0.0 - github.com/onsi/ginkgo/v2 v2.23.3 - github.com/onsi/gomega v1.36.3 + github.com/onsi/ginkgo/v2 v2.23.4 + github.com/onsi/gomega v1.37.0 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.80.1 - github.com/prometheus/client_golang v1.21.1 + github.com/prometheus/client_golang v1.22.0 github.com/robfig/cron v1.2.0 github.com/sethvargo/go-password v0.3.1 github.com/spf13/cobra v1.9.1 @@ -35,8 +35,8 @@ require ( go.uber.org/atomic v1.11.0 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 - golang.org/x/term v0.30.0 - google.golang.org/grpc v1.71.0 + golang.org/x/term v0.31.0 + google.golang.org/grpc v1.71.1 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.32.3 k8s.io/apiextensions-apiserver v0.32.3 @@ -69,7 +69,7 @@ require ( github.com/google/gnostic-models v0.6.9 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect + github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect @@ -78,7 +78,6 @@ require ( github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.11 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/mailru/easyjson v0.9.0 // indirect github.com/mattn/go-colorable v0.1.14 // indirect @@ -98,14 +97,15 @@ require ( github.com/spf13/pflag v1.0.6 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xlab/treeprint v1.2.0 // indirect + go.uber.org/automaxprocs v1.6.0 // indirect golang.org/x/crypto v0.36.0 // indirect golang.org/x/net v0.37.0 // indirect golang.org/x/oauth2 v0.27.0 // indirect golang.org/x/sync v0.12.0 // indirect - golang.org/x/sys v0.31.0 // indirect + golang.org/x/sys v0.32.0 // indirect golang.org/x/text v0.23.0 // indirect golang.org/x/time v0.9.0 // indirect - golang.org/x/tools v0.30.0 // indirect + golang.org/x/tools v0.31.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect google.golang.org/protobuf v1.36.5 // indirect diff --git a/go.sum b/go.sum index b37382fcc2..70d8eff2f0 100644 --- a/go.sum +++ b/go.sum @@ -73,8 +73,8 @@ github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= -github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -104,8 +104,8 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:C github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE= -github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= -github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -143,10 +143,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.23.3 h1:edHxnszytJ4lD9D5Jjc4tiDkPBZ3siDeJJkUZJJVkp0= -github.com/onsi/ginkgo/v2 v2.23.3/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM= -github.com/onsi/gomega v1.36.3 h1:hID7cr8t3Wp26+cYnfcjR6HpJ00fdogN6dqZ1t6IylU= -github.com/onsi/gomega v1.36.3/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= +github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus= +github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8= +github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y= +github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -154,10 +154,12 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= +github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.80.1 h1:DP+PUNVOc+Bkft8a4QunLzaZ0RspWuD3tBbcPHr2PeE= github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.80.1/go.mod h1:6x4x0t9BP35g4XcjkHE9EB3RxhyfxpdpmZKd/Qyk8+M= -github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk= -github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= @@ -209,6 +211,8 @@ go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -240,10 +244,10 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= -golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= -golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= +golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= +golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= +golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= @@ -254,8 +258,8 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY= -golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY= +golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU= +golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -264,8 +268,8 @@ gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= -google.golang.org/grpc v1.71.0 h1:kF77BGdPTQ4/JZWMlb9VpJ5pa25aqvVqogsxNHHdeBg= -google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= +google.golang.org/grpc v1.71.1 h1:ffsFWr7ygTUscGPI0KKK6TLrGz0476KUvvsbqWK0rPI= +google.golang.org/grpc v1.71.1/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From 852fdf67e9798d2ac4c3f08a1bbb9ac2a2790a08 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Thu, 10 Apr 2025 14:02:30 +0200 Subject: [PATCH 514/836] fix(instance): improve error handling for `RefreshSecrets` (#7260) Enhance error handling for the `RefreshSecrets` function across multiple components. Errors are now properly propagated and logged, ensuring that issues during secret refresh operations are surfaced and handled appropriately. Signed-off-by: Armando Ruocco Signed-off-by: Marco Nenciarini Co-authored-by: Marco Nenciarini --- internal/cmd/manager/instance/join/cmd.go | 14 +--- .../manager/instance/upgrade/execute/cmd.go | 10 +-- .../controller/instance_controller.go | 75 ++++++++----------- 3 files changed, 41 insertions(+), 58 deletions(-) diff --git a/internal/cmd/manager/instance/join/cmd.go b/internal/cmd/manager/instance/join/cmd.go index e30700b170..bb3784eed7 100644 --- a/internal/cmd/manager/instance/join/cmd.go +++ b/internal/cmd/manager/instance/join/cmd.go @@ -123,16 +123,10 @@ func joinSubCommand(ctx context.Context, instance *postgres.Instance, info postg return err } - // Since we're directly using the reconciler here, we cannot - // tell if the secrets were correctly downloaded or not. - // If they were the following "pg_basebackup" command will work, if - // they don't "pg_basebackup" with fail, complaining that the - // cryptographic material is not available. - // So it doesn't make a real difference. - // - // Besides this, we should improve this situation to have - // a real error handling. - reconciler.RefreshSecrets(ctx, &cluster) + if _, err := reconciler.RefreshSecrets(ctx, &cluster); err != nil { + contextLogger.Error(err, "Error while refreshing secrets") + return err + } // Run "pg_basebackup" to download the data directory from the primary if err := info.Join(ctx, &cluster); err != nil { diff --git a/internal/cmd/manager/instance/upgrade/execute/cmd.go b/internal/cmd/manager/instance/upgrade/execute/cmd.go index 40c192a705..b1c4260ba8 100644 --- a/internal/cmd/manager/instance/upgrade/execute/cmd.go +++ b/internal/cmd/manager/instance/upgrade/execute/cmd.go @@ -107,6 +107,7 @@ func NewCmd() *cobra.Command { return cmd } +// nolint:gocognit func upgradeSubCommand( ctx context.Context, instance *postgres.Instance, @@ -139,12 +140,9 @@ func upgradeSubCommand( return err } - // Since we're directly using the reconciler here, we cannot - // tell if the secrets were correctly downloaded or not. - // If they were the following "pg_upgrade" command will work, if - // they don't "pg_upgrade" with fail, complaining that the - // cryptographic material is not available. - reconciler.RefreshSecrets(ctx, &cluster) + if _, err := reconciler.RefreshSecrets(ctx, &cluster); err != nil { + return fmt.Errorf("error while downloading secrets: %w", err) + } if err := reconciler.ReconcileWalStorage(ctx); err != nil { return fmt.Errorf("error while reconciling the WAL storage: %w", err) diff --git a/internal/management/controller/instance_controller.go b/internal/management/controller/instance_controller.go index b8c8aebe48..ccf0ae3f14 100644 --- a/internal/management/controller/instance_controller.go +++ b/internal/management/controller/instance_controller.go @@ -159,7 +159,10 @@ func (r *InstanceReconciler) Reconcile( // Reconcile secrets and cryptographic material // This doesn't need the PG connection, but it needs to reload it in case of changes - reloadNeeded := r.RefreshSecrets(ctx, cluster) + reloadNeeded, err := r.RefreshSecrets(ctx, cluster) + if err != nil { + return reconcile.Result{}, fmt.Errorf("while refreshing secrets: %w", err) + } reloadConfigNeeded, err := r.refreshConfigurationFiles(ctx, cluster) if err != nil { @@ -899,65 +902,53 @@ func (r *InstanceReconciler) reconcileMonitoringQueries( // It returns a boolean flag telling if something changed. Usually // the invoker will check that flag and reload the PostgreSQL // instance it is up. -// -// This function manages its own errors by logging them, so the -// user cannot easily tell if the operation has been done completely. -// The rationale behind this is: -// -// 1. when invoked at the startup of the instance manager, PostgreSQL -// is not up. If this raise an error, then PostgreSQL won't -// be able to start correctly (TLS certs are missing, i.e.), -// making no difference between returning an error or not -// -// 2. when invoked inside the reconciliation loop, if the operation -// raise an error, it's pointless to retry. The only way to recover -// from such an error is wait for the CNPG operator to refresh the -// resource version of the secrets to be used, and in that case a -// reconciliation loop will be started again. func (r *InstanceReconciler) RefreshSecrets( ctx context.Context, cluster *apiv1.Cluster, -) bool { +) (bool, error) { + type executor func(context.Context, *apiv1.Cluster) (bool, error) + contextLogger := log.FromContext(ctx) - changed := false + var changed bool + + secretRefresher := func(cb executor) error { + localChanged, err := cb(ctx, cluster) + if err == nil { + changed = changed || localChanged + return nil + } + + if !apierrors.IsNotFound(err) { + return err + } - serverSecretChanged, err := r.refreshServerCertificateFiles(ctx, cluster) - if err == nil { - changed = changed || serverSecretChanged - } else if !apierrors.IsNotFound(err) { + return nil + } + + if err := secretRefresher(r.refreshServerCertificateFiles); err != nil { contextLogger.Error(err, "Error while getting server secret") + return changed, err } - replicationSecretChanged, err := r.refreshReplicationUserCertificate(ctx, cluster) - if err == nil { - changed = changed || replicationSecretChanged - } else if !apierrors.IsNotFound(err) { + if err := secretRefresher(r.refreshReplicationUserCertificate); err != nil { contextLogger.Error(err, "Error while getting streaming replication secret") + return changed, err } - - clientCaSecretChanged, err := r.refreshClientCA(ctx, cluster) - if err == nil { - changed = changed || clientCaSecretChanged - } else if !apierrors.IsNotFound(err) { + if err := secretRefresher(r.refreshClientCA); err != nil { contextLogger.Error(err, "Error while getting cluster CA Client secret") + return changed, err } - - serverCaSecretChanged, err := r.refreshServerCA(ctx, cluster) - if err == nil { - changed = changed || serverCaSecretChanged - } else if !apierrors.IsNotFound(err) { + if err := secretRefresher(r.refreshServerCA); err != nil { contextLogger.Error(err, "Error while getting cluster CA Server secret") + return changed, err } - - barmanEndpointCaSecretChanged, err := r.refreshBarmanEndpointCA(ctx, cluster) - if err == nil { - changed = changed || barmanEndpointCaSecretChanged - } else if !apierrors.IsNotFound(err) { + if err := secretRefresher(r.refreshBarmanEndpointCA); err != nil { contextLogger.Error(err, "Error while getting barman endpoint CA secret") + return changed, err } - return changed + return changed, nil } // reconcileInstance sets PostgreSQL instance parameters to current values From 9d3c5133974cb1c22475f3cd32c1453df4e6f3ef Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Mon, 14 Apr 2025 11:56:35 +0200 Subject: [PATCH 515/836] fix(upgrades): consider previous controldata when creating the new datadir (#7274) Closes #7273 #7276 --------- Signed-off-by: Marco Nenciarini Signed-off-by: Armando Ruocco Co-authored-by: Armando Ruocco --- .../manager/instance/upgrade/execute/cmd.go | 101 ++++++++++++++++-- pkg/management/postgres/instance.go | 2 +- pkg/utils/parser.go | 8 +- tests/e2e/cluster_major_upgrade_test.go | 8 ++ 4 files changed, 111 insertions(+), 8 deletions(-) diff --git a/internal/cmd/manager/instance/upgrade/execute/cmd.go b/internal/cmd/manager/instance/upgrade/execute/cmd.go index b1c4260ba8..2e3263039a 100644 --- a/internal/cmd/manager/instance/upgrade/execute/cmd.go +++ b/internal/cmd/manager/instance/upgrade/execute/cmd.go @@ -27,6 +27,7 @@ import ( "os/exec" "path" "path/filepath" + "strconv" "strings" "time" @@ -36,6 +37,7 @@ import ( "github.com/cloudnative-pg/machinery/pkg/fileutils" "github.com/cloudnative-pg/machinery/pkg/fileutils/compatibility" "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/postgres/version" "github.com/spf13/cobra" "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime/pkg/client" @@ -48,9 +50,10 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/management" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/constants" - "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/utils" + postgresutils "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/utils" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/metricserver" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) // NewCmd creates the cobra command @@ -187,8 +190,19 @@ func upgradeSubCommand( } } + // Extract controldata information from the old data directory + controlData, err := getControlData(oldBinDir, pgData) + if err != nil { + return fmt.Errorf("error while getting old data directory control data: %w", err) + } + + targetVersion, err := cluster.GetPostgresqlVersion() + if err != nil { + return fmt.Errorf("error while getting the target version from the cluster object: %w", err) + } + contextLogger.Info("Creating data directory", "directory", newDataDir) - if err := runInitDB(newDataDir, newWalDir); err != nil { + if err := runInitDB(newDataDir, newWalDir, controlData, targetVersion); err != nil { return fmt.Errorf("error while creating the data directory: %w", err) } @@ -199,12 +213,12 @@ func upgradeSubCommand( contextLogger.Info("Checking if we have anything to update") // Read pg_version from both the old and new data directories - oldVersion, err := utils.GetPgdataVersion(pgData) + oldVersion, err := postgresutils.GetPgdataVersion(pgData) if err != nil { return fmt.Errorf("error while reading the old version: %w", err) } - newVersion, err := utils.GetPgdataVersion(newDataDir) + newVersion, err := postgresutils.GetPgdataVersion(newDataDir) if err != nil { return fmt.Errorf("error while reading the new version: %w", err) } @@ -262,7 +276,19 @@ func upgradeSubCommand( return nil } -func runInitDB(destDir string, walDir *string) error { +func getControlData(binDir, pgData string) (map[string]string, error) { + pgControlDataCmd := exec.Command(path.Join(binDir, "pg_controldata")) // #nosec + pgControlDataCmd.Env = append(os.Environ(), "PGDATA="+pgData) + + out, err := pgControlDataCmd.CombinedOutput() + if err != nil { + return nil, fmt.Errorf("while executing pg_controldata: %w", err) + } + + return utils.ParsePgControldataOutput(string(out)), nil +} + +func runInitDB(destDir string, walDir *string, pgControlData map[string]string, targetVersion version.Data) error { // Invoke initdb to generate a data directory options := []string{ "--username", @@ -275,6 +301,17 @@ func runInitDB(destDir string, walDir *string) error { options = append(options, "--waldir", *walDir) } + // Extract the WAL segment size from the pg_controldata output + options, err := tryAddWalSegmentSize(pgControlData, options) + if err != nil { + return err + } + + options, err = tryAddDataChecksums(pgControlData, targetVersion, options) + if err != nil { + return err + } + // Certain CSI drivers may add setgid permissions on newly created folders. // A default umask is set to attempt to avoid this, by revoking group/other // permission bits on the PGDATA @@ -288,6 +325,46 @@ func runInitDB(destDir string, walDir *string) error { return nil } +// TODO: refactor it should be a method of pgControlData +func tryAddDataChecksums( + pgControlData map[string]string, + targetVersion version.Data, + options []string, +) ([]string, error) { + dataPageChecksumVersion, ok := pgControlData[utils.PgControlDataDataPageChecksumVersion] + if !ok { + return nil, fmt.Errorf("no '%s' section into pg_controldata output", utils.PgControlDataDataPageChecksumVersion) + } + + if dataPageChecksumVersion != "1" { + // In postgres 18 we will have to set "--no-data-checksums" if checksums are disabled (they are enabled by default) + if targetVersion.Major() >= 18 { + return append(options, "--no-data-checksums"), nil + } + return options, nil + } + + return append(options, "--data-checksums"), nil +} + +// TODO: refactor it should be a method of pgControlData +func tryAddWalSegmentSize(pgControlData map[string]string, options []string) ([]string, error) { + walSegmentSizeString, ok := pgControlData[utils.PgControlDataBytesPerWALSegment] + if !ok { + return nil, fmt.Errorf("no '%s' section into pg_controldata output", utils.PgControlDataBytesPerWALSegment) + } + + walSegmentSize, err := strconv.Atoi(walSegmentSizeString) + if err != nil { + return nil, fmt.Errorf( + "wrong '%s' pg_controldata value (not an integer): '%s' %w", + utils.PgControlDataBytesPerWALSegment, walSegmentSizeString, err) + } + + param := "--wal-segsize=" + strconv.Itoa(walSegmentSize/(1024*1024)) + return append(options, param), nil +} + func prepareConfigurationFiles(ctx context.Context, cluster apiv1.Cluster, destDir string) error { // Always read the custom and override configuration files created by the operator _, err := configfile.EnsureIncludes(path.Join(destDir, "postgresql.conf"), @@ -298,8 +375,20 @@ func prepareConfigurationFiles(ctx context.Context, cluster apiv1.Cluster, destD return fmt.Errorf("appending inclusion directives to postgresql.conf file resulted in an error: %w", err) } + // Set `max_slot_wal_keep_size` to the default value because any other value it is not supported in pg_upgrade + tmpCluster := cluster.DeepCopy() + tmpCluster.Spec.PostgresConfiguration.Parameters["max_slot_wal_keep_size"] = "-1" + + pgVersion, err := postgresutils.GetPgdataVersion(destDir) + if err != nil { + return fmt.Errorf("error while reading the new data directory version: %w", err) + } + if pgVersion.Major >= 18 { + tmpCluster.Spec.PostgresConfiguration.Parameters["idle_replication_slot_timeout"] = "0" + } + newInstance := postgres.Instance{PgData: destDir} - if _, err := newInstance.RefreshConfigurationFilesFromCluster(ctx, &cluster, false); err != nil { + if _, err := newInstance.RefreshConfigurationFilesFromCluster(ctx, tmpCluster, false); err != nil { return fmt.Errorf("error while creating the configuration files for new datadir %q: %w", destDir, err) } diff --git a/pkg/management/postgres/instance.go b/pkg/management/postgres/instance.go index 9076de82d6..0987c49848 100644 --- a/pkg/management/postgres/instance.go +++ b/pkg/management/postgres/instance.go @@ -271,7 +271,7 @@ func (instance *Instance) CheckHasDiskSpaceForWAL(ctx context.Context) (bool, er } pgControlData := utils.ParsePgControldataOutput(pgControlDataString) - walSegmentSizeString, ok := pgControlData["Bytes per WAL segment"] + walSegmentSizeString, ok := pgControlData[utils.PgControlDataBytesPerWALSegment] if !ok { return false, fmt.Errorf("no 'Bytes per WAL segment' section into pg_controldata output") } diff --git a/pkg/utils/parser.go b/pkg/utils/parser.go index a889c2cbc9..f039ebcb57 100644 --- a/pkg/utils/parser.go +++ b/pkg/utils/parser.go @@ -57,6 +57,12 @@ const ( // PgControlDataDatabaseClusterStateKey is the status // of the latest primary that run on this data directory. PgControlDataDatabaseClusterStateKey pgControlDataKey = "Database cluster state" + + // PgControlDataDataPageChecksumVersion reports whether the checksums are enabled in the cluster + PgControlDataDataPageChecksumVersion pgControlDataKey = "Data page checksum version" + + // PgControlDataBytesPerWALSegment reports the size of the WAL segments + PgControlDataBytesPerWALSegment pgControlDataKey = "Bytes per WAL segment" ) // PgDataState represents the "Database cluster state" field of pg_controldata @@ -82,7 +88,7 @@ func (state PgDataState) IsShutdown(ctx context.Context) bool { } // ParsePgControldataOutput parses a pg_controldata output into a map of key-value pairs -func ParsePgControldataOutput(data string) map[string]string { +func ParsePgControldataOutput(data string) map[pgControlDataKey]string { pairs := make(map[string]string) lines := strings.Split(data, "\n") for _, line := range lines { diff --git a/tests/e2e/cluster_major_upgrade_test.go b/tests/e2e/cluster_major_upgrade_test.go index 86704741ef..d79eb47458 100644 --- a/tests/e2e/cluster_major_upgrade_test.go +++ b/tests/e2e/cluster_major_upgrade_test.go @@ -32,6 +32,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" v1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" @@ -78,6 +79,12 @@ var _ = Describe("Postgres Major Upgrade", Label(tests.LabelPostgresMajorUpgrade }, Spec: v1.ClusterSpec{ Instances: 3, + Bootstrap: &v1.BootstrapConfiguration{ + InitDB: &v1.BootstrapInitDB{ + DataChecksums: ptr.To(true), + WalSegmentSize: 32, + }, + }, StorageConfiguration: v1.StorageConfiguration{ StorageClass: &storageClass, Size: "1Gi", @@ -95,6 +102,7 @@ var _ = Describe("Postgres Major Upgrade", Label(tests.LabelPostgresMajorUpgrade "log_temp_files": "1024", "log_autovacuum_min_duration": "1000", "log_replication_commands": "on", + "max_slot_wal_keep_size": "1GB", }, }, }, From 76900cd2f2046448df1faf8099ad0faf15b132d6 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Mon, 14 Apr 2025 15:49:56 +0200 Subject: [PATCH 516/836] refactor(instance,secrets): extract sub-reconciler for instance secrets management (#7265) This patch introduces a dedicated subreconciler to manage PostgreSQL cryptographic material. The refactored logic is now reused across multiple components: * the instance manager reconciliation loop * the "join" bootstrap job * the PostgreSQL major upgrade process Signed-off-by: Armando Ruocco Signed-off-by: Marco Nenciarini Signed-off-by: Leonardo Cecchi Co-authored-by: Marco Nenciarini Co-authored-by: Leonardo Cecchi --- internal/cmd/manager/instance/join/cmd.go | 12 +- .../manager/instance/upgrade/execute/cmd.go | 15 +- .../controller/instance_controller.go | 182 +-------- .../management/controller/instance_startup.go | 129 ------ internal/management/controller/manager.go | 4 + pkg/management/postgres/instance.go | 23 ++ .../webserver/metricserver/metrics.go | 2 +- pkg/management/postgres/webserver/remote.go | 2 +- pkg/reconciler/instance/certificate/doc.go | 21 + .../instance/certificate/reconciler.go | 375 ++++++++++++++++++ .../instance/certificate/reconciler_test.go | 178 +++++++++ .../instance/certificate/suite_test.go | 32 ++ 12 files changed, 647 insertions(+), 328 deletions(-) create mode 100644 pkg/reconciler/instance/certificate/doc.go create mode 100644 pkg/reconciler/instance/certificate/reconciler.go create mode 100644 pkg/reconciler/instance/certificate/reconciler_test.go create mode 100644 pkg/reconciler/instance/certificate/suite_test.go diff --git a/internal/cmd/manager/instance/join/cmd.go b/internal/cmd/manager/instance/join/cmd.go index bb3784eed7..a82779e4d4 100644 --- a/internal/cmd/manager/instance/join/cmd.go +++ b/internal/cmd/manager/instance/join/cmd.go @@ -29,12 +29,11 @@ import ( ctrl "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/internal/management/controller" "github.com/cloudnative-pg/cloudnative-pg/internal/management/istio" "github.com/cloudnative-pg/cloudnative-pg/internal/management/linkerd" "github.com/cloudnative-pg/cloudnative-pg/pkg/management" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" - "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/metricserver" + instancecertificate "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/instance/certificate" ) // NewCmd creates the new "join" command @@ -108,14 +107,9 @@ func joinSubCommand(ctx context.Context, instance *postgres.Instance, info postg return err } - // Create a fake reconciler just to download the secrets and - // the cluster definition - metricExporter := metricserver.NewExporter(instance) - reconciler := controller.NewInstanceReconciler(instance, client, metricExporter) - // Download the cluster definition from the API server var cluster apiv1.Cluster - if err := reconciler.GetClient().Get(ctx, + if err := client.Get(ctx, ctrl.ObjectKey{Namespace: instance.GetNamespaceName(), Name: instance.GetClusterName()}, &cluster, ); err != nil { @@ -123,7 +117,7 @@ func joinSubCommand(ctx context.Context, instance *postgres.Instance, info postg return err } - if _, err := reconciler.RefreshSecrets(ctx, &cluster); err != nil { + if _, err := instancecertificate.NewReconciler(client, instance).RefreshSecrets(ctx, &cluster); err != nil { contextLogger.Error(err, "Error while refreshing secrets") return err } diff --git a/internal/cmd/manager/instance/upgrade/execute/cmd.go b/internal/cmd/manager/instance/upgrade/execute/cmd.go index 2e3263039a..1748ce8225 100644 --- a/internal/cmd/manager/instance/upgrade/execute/cmd.go +++ b/internal/cmd/manager/instance/upgrade/execute/cmd.go @@ -52,6 +52,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/constants" postgresutils "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/utils" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/metricserver" + instancecertificate "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/instance/certificate" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) @@ -131,22 +132,22 @@ func upgradeSubCommand( return err } - // Create a fake reconciler just to download the secrets and - // the cluster definition - metricExporter := metricserver.NewExporter(instance) - reconciler := controller.NewInstanceReconciler(instance, client, metricExporter) - // Download the cluster definition from the API server var cluster apiv1.Cluster - if err := reconciler.GetClient().Get(ctx, clusterObjectKey, &cluster); err != nil { + if err := client.Get(ctx, clusterObjectKey, &cluster); err != nil { contextLogger.Error(err, "Error while getting cluster") return err } - if _, err := reconciler.RefreshSecrets(ctx, &cluster); err != nil { + if _, err := instancecertificate.NewReconciler(client, instance).RefreshSecrets(ctx, &cluster); err != nil { return fmt.Errorf("error while downloading secrets: %w", err) } + // Create a fake reconciler just to download the secrets and + // the cluster definition + metricExporter := metricserver.NewExporter(instance) + reconciler := controller.NewInstanceReconciler(instance, client, metricExporter) + if err := reconciler.ReconcileWalStorage(ctx); err != nil { return fmt.Errorf("error while reconciling the WAL storage: %w", err) } diff --git a/internal/management/controller/instance_controller.go b/internal/management/controller/instance_controller.go index ccf0ae3f14..e74f1b15ff 100644 --- a/internal/management/controller/instance_controller.go +++ b/internal/management/controller/instance_controller.go @@ -21,7 +21,6 @@ package controller import ( "context" - "crypto/tls" "database/sql" "errors" "fmt" @@ -49,7 +48,6 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/internal/management/controller/roles" "github.com/cloudnative-pg/cloudnative-pg/internal/management/controller/slots/reconciler" "github.com/cloudnative-pg/cloudnative-pg/internal/management/utils" - "github.com/cloudnative-pg/cloudnative-pg/pkg/certs" "github.com/cloudnative-pg/cloudnative-pg/pkg/configfile" postgresManagement "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/constants" @@ -159,7 +157,7 @@ func (r *InstanceReconciler) Reconcile( // Reconcile secrets and cryptographic material // This doesn't need the PG connection, but it needs to reload it in case of changes - reloadNeeded, err := r.RefreshSecrets(ctx, cluster) + reloadNeeded, err := r.certificateReconciler.RefreshSecrets(ctx, cluster) if err != nil { return reconcile.Result{}, fmt.Errorf("while refreshing secrets: %w", err) } @@ -895,62 +893,6 @@ func (r *InstanceReconciler) reconcileMonitoringQueries( r.metricsServerExporter.SetCustomQueries(queriesCollector) } -// RefreshSecrets is called when the PostgreSQL secrets are changed -// and will refresh the contents of the file inside the Pod, without -// reloading the actual PostgreSQL instance. -// -// It returns a boolean flag telling if something changed. Usually -// the invoker will check that flag and reload the PostgreSQL -// instance it is up. -func (r *InstanceReconciler) RefreshSecrets( - ctx context.Context, - cluster *apiv1.Cluster, -) (bool, error) { - type executor func(context.Context, *apiv1.Cluster) (bool, error) - - contextLogger := log.FromContext(ctx) - - var changed bool - - secretRefresher := func(cb executor) error { - localChanged, err := cb(ctx, cluster) - if err == nil { - changed = changed || localChanged - return nil - } - - if !apierrors.IsNotFound(err) { - return err - } - - return nil - } - - if err := secretRefresher(r.refreshServerCertificateFiles); err != nil { - contextLogger.Error(err, "Error while getting server secret") - return changed, err - } - - if err := secretRefresher(r.refreshReplicationUserCertificate); err != nil { - contextLogger.Error(err, "Error while getting streaming replication secret") - return changed, err - } - if err := secretRefresher(r.refreshClientCA); err != nil { - contextLogger.Error(err, "Error while getting cluster CA Client secret") - return changed, err - } - if err := secretRefresher(r.refreshServerCA); err != nil { - contextLogger.Error(err, "Error while getting cluster CA Server secret") - return changed, err - } - if err := secretRefresher(r.refreshBarmanEndpointCA); err != nil { - contextLogger.Error(err, "Error while getting barman endpoint CA secret") - return changed, err - } - - return changed, nil -} - // reconcileInstance sets PostgreSQL instance parameters to current values func (r *InstanceReconciler) reconcileInstance(cluster *apiv1.Cluster) { detectRequiresDesignatedPrimaryTransition := func() bool { @@ -1094,128 +1036,6 @@ func (r *InstanceReconciler) triggerRestartForDecrease(ctx context.Context, clus ) } -// refreshCertificateFilesFromSecret receive a secret and rewrite the file -// corresponding to the server certificate -func (r *InstanceReconciler) refreshInstanceCertificateFromSecret( - secret *corev1.Secret, -) error { - certData, ok := secret.Data[corev1.TLSCertKey] - if !ok { - return fmt.Errorf("missing %s field in Secret", corev1.TLSCertKey) - } - - keyData, ok := secret.Data[corev1.TLSPrivateKeyKey] - if !ok { - return fmt.Errorf("missing %s field in Secret", corev1.TLSPrivateKeyKey) - } - - certificate, err := tls.X509KeyPair(certData, keyData) - if err != nil { - return fmt.Errorf("failed decoding Secret: %w", err) - } - - r.instance.ServerCertificate = &certificate - - return err -} - -// refreshCertificateFilesFromSecret receive a secret and rewrite the file -// corresponding to the server certificate -func (r *InstanceReconciler) refreshCertificateFilesFromSecret( - ctx context.Context, - secret *corev1.Secret, - certificateLocation string, - privateKeyLocation string, -) (bool, error) { - contextLogger := log.FromContext(ctx) - - certificate, ok := secret.Data[corev1.TLSCertKey] - if !ok { - return false, fmt.Errorf("missing %s field in Secret", corev1.TLSCertKey) - } - - privateKey, ok := secret.Data[corev1.TLSPrivateKeyKey] - if !ok { - return false, fmt.Errorf("missing %s field in Secret", corev1.TLSPrivateKeyKey) - } - - certificateIsChanged, err := fileutils.WriteFileAtomic(certificateLocation, certificate, 0o600) - if err != nil { - return false, fmt.Errorf("while writing server certificate: %w", err) - } - - if certificateIsChanged { - contextLogger.Info("Refreshed configuration file", - "filename", certificateLocation, - "secret", secret.Name) - } - - privateKeyIsChanged, err := fileutils.WriteFileAtomic(privateKeyLocation, privateKey, 0o600) - if err != nil { - return false, fmt.Errorf("while writing server private key: %w", err) - } - - if privateKeyIsChanged { - contextLogger.Info("Refreshed configuration file", - "filename", privateKeyLocation, - "secret", secret.Name) - } - - return certificateIsChanged || privateKeyIsChanged, nil -} - -// refreshCAFromSecret receive a secret and rewrite the ca.crt file to the provided location -func (r *InstanceReconciler) refreshCAFromSecret( - ctx context.Context, - secret *corev1.Secret, - destLocation string, -) (bool, error) { - caCertificate, ok := secret.Data[certs.CACertKey] - if !ok { - return false, fmt.Errorf("missing %s entry in Secret", certs.CACertKey) - } - - changed, err := fileutils.WriteFileAtomic(destLocation, caCertificate, 0o600) - if err != nil { - return false, fmt.Errorf("while writing server certificate: %w", err) - } - - if changed { - log.FromContext(ctx).Info("Refreshed configuration file", - "filename", destLocation, - "secret", secret.Name) - } - - return changed, nil -} - -// refreshFileFromSecret receive a secret and rewrite the file corresponding to the key to the provided location -func (r *InstanceReconciler) refreshFileFromSecret( - ctx context.Context, - secret *corev1.Secret, - key, destLocation string, -) (bool, error) { - contextLogger := log.FromContext(ctx) - data, ok := secret.Data[key] - if !ok { - return false, fmt.Errorf("missing %s entry in Secret", key) - } - - changed, err := fileutils.WriteFileAtomic(destLocation, data, 0o600) - if err != nil { - return false, fmt.Errorf("while writing file: %w", err) - } - - if changed { - contextLogger.Info("Refreshed configuration file", - "filename", destLocation, - "secret", secret.Name, - "key", key) - } - - return changed, nil -} - // Reconciler primary logic. DB needed. func (r *InstanceReconciler) reconcilePrimary(ctx context.Context, cluster *apiv1.Cluster) error { contextLogger := log.FromContext(ctx) diff --git a/internal/management/controller/instance_startup.go b/internal/management/controller/instance_startup.go index c8e06fc7bd..687b5ec7c3 100644 --- a/internal/management/controller/instance_startup.go +++ b/internal/management/controller/instance_startup.go @@ -29,144 +29,15 @@ import ( "github.com/cloudnative-pg/machinery/pkg/fileutils" "github.com/cloudnative-pg/machinery/pkg/log" pgTime "github.com/cloudnative-pg/machinery/pkg/postgres/time" - corev1 "k8s.io/api/core/v1" - "k8s.io/client-go/util/retry" "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/internal/controller" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/archiver" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/utils" - postgresSpec "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" ) -// refreshServerCertificateFiles gets the latest server certificates files from the -// secrets, and may set the instance certificate if it was missing our outdated. -// Returns true if configuration has been changed or the instance has been updated -func (r *InstanceReconciler) refreshServerCertificateFiles(ctx context.Context, cluster *apiv1.Cluster) (bool, error) { - contextLogger := log.FromContext(ctx) - - var secret corev1.Secret - - err := retry.OnError(retry.DefaultBackoff, func(error) bool { return true }, - func() error { - err := r.GetClient().Get( - ctx, - client.ObjectKey{Namespace: r.instance.GetNamespaceName(), Name: cluster.Status.Certificates.ServerTLSSecret}, - &secret) - if err != nil { - contextLogger.Info("Error accessing server TLS Certificate. Retrying with exponential backoff.", - "secret", cluster.Status.Certificates.ServerTLSSecret) - return err - } - return nil - }) - if err != nil { - return false, err - } - - changed, err := r.refreshCertificateFilesFromSecret( - ctx, - &secret, - postgresSpec.ServerCertificateLocation, - postgresSpec.ServerKeyLocation) - if err != nil { - return changed, err - } - - if r.instance.ServerCertificate == nil || changed { - return changed, r.refreshInstanceCertificateFromSecret(&secret) - } - - return changed, nil -} - -// refreshReplicationUserCertificate gets the latest replication certificates from the -// secrets. Returns true if configuration has been changed -func (r *InstanceReconciler) refreshReplicationUserCertificate( - ctx context.Context, - cluster *apiv1.Cluster, -) (bool, error) { - var secret corev1.Secret - err := r.GetClient().Get( - ctx, - client.ObjectKey{Namespace: r.instance.GetNamespaceName(), Name: cluster.Status.Certificates.ReplicationTLSSecret}, - &secret) - if err != nil { - return false, err - } - - return r.refreshCertificateFilesFromSecret( - ctx, - &secret, - postgresSpec.StreamingReplicaCertificateLocation, - postgresSpec.StreamingReplicaKeyLocation) -} - -// refreshClientCA gets the latest client CA certificates from the secrets. -// It returns true if configuration has been changed -func (r *InstanceReconciler) refreshClientCA(ctx context.Context, cluster *apiv1.Cluster) (bool, error) { - var secret corev1.Secret - err := r.GetClient().Get( - ctx, - client.ObjectKey{Namespace: r.instance.GetNamespaceName(), Name: cluster.Status.Certificates.ClientCASecret}, - &secret) - if err != nil { - return false, err - } - - return r.refreshCAFromSecret(ctx, &secret, postgresSpec.ClientCACertificateLocation) -} - -// refreshServerCA gets the latest server CA certificates from the secrets. -// It returns true if configuration has been changed -func (r *InstanceReconciler) refreshServerCA(ctx context.Context, cluster *apiv1.Cluster) (bool, error) { - var secret corev1.Secret - err := r.GetClient().Get( - ctx, - client.ObjectKey{Namespace: r.instance.GetNamespaceName(), Name: cluster.Status.Certificates.ServerCASecret}, - &secret) - if err != nil { - return false, err - } - - return r.refreshCAFromSecret(ctx, &secret, postgresSpec.ServerCACertificateLocation) -} - -// refreshBarmanEndpointCA gets the latest barman endpoint CA certificates from the secrets. -// It returns true if configuration has been changed -func (r *InstanceReconciler) refreshBarmanEndpointCA(ctx context.Context, cluster *apiv1.Cluster) (bool, error) { - endpointCAs := map[string]*apiv1.SecretKeySelector{} - if cluster.Spec.Backup.IsBarmanEndpointCASet() { - endpointCAs[postgresSpec.BarmanBackupEndpointCACertificateLocation] = cluster.Spec.Backup.BarmanObjectStore.EndpointCA - } - if replicaBarmanCA := cluster.GetBarmanEndpointCAForReplicaCluster(); replicaBarmanCA != nil { - endpointCAs[postgresSpec.BarmanRestoreEndpointCACertificateLocation] = replicaBarmanCA - } - if len(endpointCAs) == 0 { - return false, nil - } - - var changed bool - for target, secretKeySelector := range endpointCAs { - var secret corev1.Secret - err := r.GetClient().Get( - ctx, - client.ObjectKey{Namespace: r.instance.GetNamespaceName(), Name: secretKeySelector.Name}, - &secret) - if err != nil { - return false, err - } - c, err := r.refreshFileFromSecret(ctx, &secret, secretKeySelector.Key, target) - changed = changed || c - if err != nil { - return changed, err - } - } - return changed, nil -} - // verifyPgDataCoherenceForPrimary will abort the execution if the current server is a primary // one from the PGDATA viewpoint, but is not classified as the target nor the // current primary diff --git a/internal/management/controller/manager.go b/internal/management/controller/manager.go index f73ae7c8ad..dba3414df8 100644 --- a/internal/management/controller/manager.go +++ b/internal/management/controller/manager.go @@ -34,6 +34,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/concurrency" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/metricserver" + instancecertificate "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/instance/certificate" ) // InstanceReconciler reconciles the status of the Cluster resource with @@ -49,6 +50,8 @@ type InstanceReconciler struct { systemInitialization *concurrency.Executed firstReconcileDone atomic.Bool metricsServerExporter *metricserver.Exporter + + certificateReconciler *instancecertificate.Reconciler } // NewInstanceReconciler creates a new instance reconciler @@ -64,6 +67,7 @@ func NewInstanceReconciler( extensionStatus: make(map[string]bool), systemInitialization: concurrency.NewExecuted(), metricsServerExporter: metricsExporter, + certificateReconciler: instancecertificate.NewReconciler(client, instance), } } diff --git a/pkg/management/postgres/instance.go b/pkg/management/postgres/instance.go index 0987c49848..4116862840 100644 --- a/pkg/management/postgres/instance.go +++ b/pkg/management/postgres/instance.go @@ -34,6 +34,7 @@ import ( "runtime" "strconv" "strings" + "sync" "time" "github.com/blang/semver" @@ -213,10 +214,32 @@ type Instance struct { // MetricsPortTLS enables TLS on the port used to publish metrics over HTTP/HTTPS MetricsPortTLS bool + serverCertificateHandler serverCertificateHandler +} + +type serverCertificateHandler struct { + operationInProgress sync.Mutex + // ServerCertificate is the certificate we use to serve https connections ServerCertificate *tls.Certificate } +// GetServerCertificate returns the server certificate for the instance +func (instance *Instance) GetServerCertificate() *tls.Certificate { + instance.serverCertificateHandler.operationInProgress.Lock() + defer instance.serverCertificateHandler.operationInProgress.Unlock() + + return instance.serverCertificateHandler.ServerCertificate +} + +// SetServerCertificate sets the server certificate for the instance +func (instance *Instance) SetServerCertificate(cert *tls.Certificate) { + instance.serverCertificateHandler.operationInProgress.Lock() + defer instance.serverCertificateHandler.operationInProgress.Unlock() + + instance.serverCertificateHandler.ServerCertificate = cert +} + // SetPostgreSQLAutoConfWritable allows or deny writes to the // `postgresql.auto.conf` file in PGDATA func (instance *Instance) SetPostgreSQLAutoConfWritable(writeable bool) error { diff --git a/pkg/management/postgres/webserver/metricserver/metrics.go b/pkg/management/postgres/webserver/metricserver/metrics.go index 9e9ce07623..454ed7f4ec 100644 --- a/pkg/management/postgres/webserver/metricserver/metrics.go +++ b/pkg/management/postgres/webserver/metricserver/metrics.go @@ -66,7 +66,7 @@ func New(serverInstance *postgres.Instance, exporter *Exporter) (*MetricsServer, server.TLSConfig = &tls.Config{ MinVersion: tls.VersionTLS13, GetCertificate: func(_ *tls.ClientHelloInfo) (*tls.Certificate, error) { - return serverInstance.ServerCertificate, nil + return serverInstance.GetServerCertificate(), nil }, } } diff --git a/pkg/management/postgres/webserver/remote.go b/pkg/management/postgres/webserver/remote.go index 57c814d5f6..d9c6d545dd 100644 --- a/pkg/management/postgres/webserver/remote.go +++ b/pkg/management/postgres/webserver/remote.go @@ -122,7 +122,7 @@ func NewRemoteWebServer( server.TLSConfig = &tls.Config{ MinVersion: tls.VersionTLS13, GetCertificate: func(_ *tls.ClientHelloInfo) (*tls.Certificate, error) { - return instance.ServerCertificate, nil + return instance.GetServerCertificate(), nil }, } } diff --git a/pkg/reconciler/instance/certificate/doc.go b/pkg/reconciler/instance/certificate/doc.go new file mode 100644 index 0000000000..e495312140 --- /dev/null +++ b/pkg/reconciler/instance/certificate/doc.go @@ -0,0 +1,21 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package certificate contains the reconciler for the PostgreSQL instance manager secrets +package certificate diff --git a/pkg/reconciler/instance/certificate/reconciler.go b/pkg/reconciler/instance/certificate/reconciler.go new file mode 100644 index 0000000000..1a194f3f3e --- /dev/null +++ b/pkg/reconciler/instance/certificate/reconciler.go @@ -0,0 +1,375 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package certificate + +import ( + "context" + "crypto/tls" + "fmt" + + "github.com/cloudnative-pg/machinery/pkg/fileutils" + "github.com/cloudnative-pg/machinery/pkg/log" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/client" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/certs" + postgresSpec "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" +) + +// Reconciler returns a certificate reconciler +type Reconciler struct { + cli client.Client + serverCertificateHandler serverCertificateHandler +} + +// ErrNoServerCertificateHandler is raised when a new server +// certificate has been detected by the instance reconciler +// but no handler has been set. +var ErrNoServerCertificateHandler = fmt.Errorf("no server certificate handler") + +// NewReconciler creates a new certificate reconciler +func NewReconciler(cli client.Client, serverHandler serverCertificateHandler) *Reconciler { + return &Reconciler{ + cli: cli, + serverCertificateHandler: serverHandler, + } +} + +type serverCertificateHandler interface { + SetServerCertificate(certificate *tls.Certificate) + GetServerCertificate() *tls.Certificate +} + +// RefreshSecrets is called when the PostgreSQL secrets are changed +// and will refresh the contents of the file inside the Pod, without +// reloading the actual PostgreSQL instance. +// +// It returns a boolean flag telling if something changed. Usually +// the invoker will check that flag and reload the PostgreSQL +// instance it is up. +func (r *Reconciler) RefreshSecrets( + ctx context.Context, + cluster *apiv1.Cluster, +) (bool, error) { + type executor func(context.Context, *apiv1.Cluster) (bool, error) + + contextLogger := log.FromContext(ctx) + + var changed bool + + secretRefresher := func(cb executor) error { + localChanged, err := cb(ctx, cluster) + if err == nil { + changed = changed || localChanged + return nil + } + + if !apierrors.IsNotFound(err) { + return err + } + + return nil + } + + if err := secretRefresher(r.refreshServerCertificateFiles); err != nil { + contextLogger.Error(err, "Error while getting server secret") + return changed, err + } + if err := secretRefresher(r.refreshReplicationUserCertificate); err != nil { + contextLogger.Error(err, "Error while getting streaming replication secret") + return changed, err + } + if err := secretRefresher(r.refreshClientCA); err != nil { + contextLogger.Error(err, "Error while getting cluster CA Client secret") + return changed, err + } + if err := secretRefresher(r.refreshServerCA); err != nil { + contextLogger.Error(err, "Error while getting cluster CA Server secret") + return changed, err + } + if err := secretRefresher(r.refreshBarmanEndpointCA); err != nil { + contextLogger.Error(err, "Error while getting barman endpoint CA secret") + return changed, err + } + + return changed, nil +} + +// refreshServerCertificateFiles updates the latest server certificate files +// from the secrets and updates the instance certificate if it is missing or +// outdated. +// It returns true if the configuration has been changed or the instance +// certificate has been updated. +func (r *Reconciler) refreshServerCertificateFiles(ctx context.Context, cluster *apiv1.Cluster) (bool, error) { + contextLogger := log.FromContext(ctx) + + var secret corev1.Secret + + err := retry.OnError(retry.DefaultBackoff, func(error) bool { return true }, + func() error { + err := r.cli.Get( + ctx, + client.ObjectKey{Namespace: cluster.Namespace, Name: cluster.Status.Certificates.ServerTLSSecret}, + &secret) + if err != nil { + contextLogger.Info("Error accessing server TLS Certificate. Retrying with exponential backoff.", + "secret", cluster.Status.Certificates.ServerTLSSecret) + return err + } + return nil + }) + if err != nil { + return false, err + } + + changed, err := r.refreshCertificateFilesFromSecret( + ctx, + &secret, + postgresSpec.ServerCertificateLocation, + postgresSpec.ServerKeyLocation) + if err != nil { + return changed, err + } + + if r.serverCertificateHandler.GetServerCertificate() == nil || changed { + return changed, r.refreshInstanceCertificateFromSecret(&secret) + } + + return changed, nil +} + +// refreshReplicationUserCertificate updates the latest replication user certificates +// from the secrets and updates the corresponding files. +// It returns true if the configuration has been changed. +func (r *Reconciler) refreshReplicationUserCertificate( + ctx context.Context, + cluster *apiv1.Cluster, +) (bool, error) { + var secret corev1.Secret + err := r.cli.Get( + ctx, + client.ObjectKey{Namespace: cluster.Namespace, Name: cluster.Status.Certificates.ReplicationTLSSecret}, + &secret) + if err != nil { + return false, err + } + + return r.refreshCertificateFilesFromSecret( + ctx, + &secret, + postgresSpec.StreamingReplicaCertificateLocation, + postgresSpec.StreamingReplicaKeyLocation) +} + +// refreshClientCA updates the latest client CA certificates from the secrets. +// It returns true if the configuration has been changed. +func (r *Reconciler) refreshClientCA(ctx context.Context, cluster *apiv1.Cluster) (bool, error) { + var secret corev1.Secret + err := r.cli.Get( + ctx, + client.ObjectKey{Namespace: cluster.Namespace, Name: cluster.Status.Certificates.ClientCASecret}, + &secret) + if err != nil { + return false, err + } + + return r.refreshCAFromSecret(ctx, &secret, postgresSpec.ClientCACertificateLocation) +} + +// refreshServerCA gets the latest server CA certificates from the secrets. +// It returns true if the configuration has been changed. +func (r *Reconciler) refreshServerCA(ctx context.Context, cluster *apiv1.Cluster) (bool, error) { + var secret corev1.Secret + err := r.cli.Get( + ctx, + client.ObjectKey{Namespace: cluster.Namespace, Name: cluster.Status.Certificates.ServerCASecret}, + &secret) + if err != nil { + return false, err + } + + return r.refreshCAFromSecret(ctx, &secret, postgresSpec.ServerCACertificateLocation) +} + +// refreshBarmanEndpointCA updates the barman endpoint CA certificates from the secrets. +// It returns true if the configuration has been changed. +// +// Important: this function is deprecated and will be replaced by the relative feature +// in the plugin-barman-cloud project +func (r *Reconciler) refreshBarmanEndpointCA(ctx context.Context, cluster *apiv1.Cluster) (bool, error) { + // refreshFileFromSecret receive a secret and rewrite the file corresponding to + // the key to the provided location. Implementated with an inner function to discourage + // reuse. + refreshFileFromSecret := func( + secret *corev1.Secret, + key, destLocation string, + ) (bool, error) { + contextLogger := log.FromContext(ctx) + data, ok := secret.Data[key] + if !ok { + return false, fmt.Errorf("missing %s entry in Secret", key) + } + + changed, err := fileutils.WriteFileAtomic(destLocation, data, 0o600) + if err != nil { + return false, fmt.Errorf("while writing file: %w", err) + } + + if changed { + contextLogger.Info("Refreshed configuration file", + "filename", destLocation, + "secret", secret.Name, + "key", key) + } + + return changed, nil + } + + endpointCAs := map[string]*apiv1.SecretKeySelector{} + if cluster.Spec.Backup.IsBarmanEndpointCASet() { + endpointCAs[postgresSpec.BarmanBackupEndpointCACertificateLocation] = cluster.Spec.Backup.BarmanObjectStore.EndpointCA + } + if replicaBarmanCA := cluster.GetBarmanEndpointCAForReplicaCluster(); replicaBarmanCA != nil { + endpointCAs[postgresSpec.BarmanRestoreEndpointCACertificateLocation] = replicaBarmanCA + } + if len(endpointCAs) == 0 { + return false, nil + } + + var changed bool + for target, secretKeySelector := range endpointCAs { + var secret corev1.Secret + err := r.cli.Get( + ctx, + client.ObjectKey{Namespace: cluster.Namespace, Name: secretKeySelector.Name}, + &secret) + if err != nil { + return false, err + } + c, err := refreshFileFromSecret(&secret, secretKeySelector.Key, target) + changed = changed || c + if err != nil { + return changed, err + } + } + return changed, nil +} + +// refreshCertificateFilesFromSecret receive a TLS secret, parses it and communicates +// back to the handler the certificate change event. +func (r *Reconciler) refreshInstanceCertificateFromSecret( + secret *corev1.Secret, +) error { + if r.serverCertificateHandler == nil { + return ErrNoServerCertificateHandler + } + + certData, ok := secret.Data[corev1.TLSCertKey] + if !ok { + return fmt.Errorf("missing %s field in Secret", corev1.TLSCertKey) + } + + keyData, ok := secret.Data[corev1.TLSPrivateKeyKey] + if !ok { + return fmt.Errorf("missing %s field in Secret", corev1.TLSPrivateKeyKey) + } + + certificate, err := tls.X509KeyPair(certData, keyData) + if err != nil { + return fmt.Errorf("failed decoding Secret: %w", err) + } + + r.serverCertificateHandler.SetServerCertificate(&certificate) + + return err +} + +// refreshCertificateFilesFromSecret receive a secret and rewrite the file +// corresponding to the server certificate. +func (r *Reconciler) refreshCertificateFilesFromSecret( + ctx context.Context, + secret *corev1.Secret, + certificateLocation string, + privateKeyLocation string, +) (bool, error) { + contextLogger := log.FromContext(ctx) + + certificate, ok := secret.Data[corev1.TLSCertKey] + if !ok { + return false, fmt.Errorf("missing %s field in Secret", corev1.TLSCertKey) + } + + privateKey, ok := secret.Data[corev1.TLSPrivateKeyKey] + if !ok { + return false, fmt.Errorf("missing %s field in Secret", corev1.TLSPrivateKeyKey) + } + + certificateIsChanged, err := fileutils.WriteFileAtomic(certificateLocation, certificate, 0o600) + if err != nil { + return false, fmt.Errorf("while writing server certificate: %w", err) + } + + if certificateIsChanged { + contextLogger.Info("Refreshed configuration file", + "filename", certificateLocation, + "secret", secret.Name) + } + + privateKeyIsChanged, err := fileutils.WriteFileAtomic(privateKeyLocation, privateKey, 0o600) + if err != nil { + return false, fmt.Errorf("while writing server private key: %w", err) + } + + if privateKeyIsChanged { + contextLogger.Info("Refreshed configuration file", + "filename", privateKeyLocation, + "secret", secret.Name) + } + + return certificateIsChanged || privateKeyIsChanged, nil +} + +// refreshCAFromSecret receive a secret and rewrite the `ca.crt` file to the provided location. +func (r *Reconciler) refreshCAFromSecret( + ctx context.Context, + secret *corev1.Secret, + destLocation string, +) (bool, error) { + caCertificate, ok := secret.Data[certs.CACertKey] + if !ok { + return false, fmt.Errorf("missing %s entry in Secret", certs.CACertKey) + } + + changed, err := fileutils.WriteFileAtomic(destLocation, caCertificate, 0o600) + if err != nil { + return false, fmt.Errorf("while writing server certificate: %w", err) + } + + if changed { + log.FromContext(ctx).Info("Refreshed configuration file", + "filename", destLocation, + "secret", secret.Name) + } + + return changed, nil +} diff --git a/pkg/reconciler/instance/certificate/reconciler_test.go b/pkg/reconciler/instance/certificate/reconciler_test.go new file mode 100644 index 0000000000..9214ad11bb --- /dev/null +++ b/pkg/reconciler/instance/certificate/reconciler_test.go @@ -0,0 +1,178 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package certificate + +import ( + "crypto/tls" + "path" + + "github.com/cloudnative-pg/machinery/pkg/fileutils" + corev1 "k8s.io/api/core/v1" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/certs" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +type fakeServerCertificateHandler struct { + certificate *tls.Certificate +} + +func (f *fakeServerCertificateHandler) SetServerCertificate(certificate *tls.Certificate) { + f.certificate = certificate +} + +func (f *fakeServerCertificateHandler) GetServerCertificate() *tls.Certificate { + return f.certificate +} + +var _ = Describe("refresh certificate files from a secret", func() { + publicKeyContent := []byte("public_key") + privateKeyContent := []byte("private_key") + fakeReconciler := Reconciler{} + fakeSecret := corev1.Secret{ + Data: map[string][]byte{ + corev1.TLSCertKey: publicKeyContent, + corev1.TLSPrivateKeyKey: privateKeyContent, + }, + } + + It("writing the required files into a directory", func(ctx SpecContext) { + tempDir := GinkgoT().TempDir() + certificateLocation := path.Join(tempDir, "tls.crt") + privateKeyLocation := path.Join(tempDir, "tls.key") + + By("having code create new files", func() { + status, err := fakeReconciler.refreshCertificateFilesFromSecret( + ctx, &fakeSecret, certificateLocation, privateKeyLocation) + Expect(err).ToNot(HaveOccurred()) + Expect(status).To(BeTrue()) + + writtenPublicKey, err := fileutils.ReadFile(certificateLocation) + Expect(err).ToNot(HaveOccurred()) + Expect(writtenPublicKey).To(Equal(publicKeyContent)) + + writtenPrivateKey, err := fileutils.ReadFile(privateKeyLocation) + Expect(err).ToNot(HaveOccurred()) + Expect(writtenPrivateKey).To(Equal(privateKeyContent)) + }) + + By("writing again the same data, and verifying that the certificate refresh is not triggered", func() { + status, err := fakeReconciler.refreshCertificateFilesFromSecret( + ctx, &fakeSecret, certificateLocation, privateKeyLocation) + Expect(err).ToNot(HaveOccurred()) + Expect(status).To(BeFalse()) + }) + + By("changing the file contents, and verifying that the certificate refresh is triggered", func() { + newPublicKeyContent := []byte("changed public key") + newPrivateKeyContent := []byte("changed private key") + + changedSecret := fakeSecret.DeepCopy() + changedSecret.Data[corev1.TLSCertKey] = newPublicKeyContent + changedSecret.Data[corev1.TLSPrivateKeyKey] = newPrivateKeyContent + + status, err := fakeReconciler.refreshCertificateFilesFromSecret( + ctx, changedSecret, certificateLocation, privateKeyLocation) + Expect(err).ToNot(HaveOccurred()) + Expect(status).To(BeTrue()) + }) + }) +}) + +var _ = Describe("refresh CA from a secret", func() { + publicKeyContent := []byte("public_key") + fakeReconciler := Reconciler{} + fakeSecret := corev1.Secret{ + Data: map[string][]byte{ + certs.CACertKey: publicKeyContent, + }, + } + + It("writing the required files into a directory", func(ctx SpecContext) { + tempDir := GinkgoT().TempDir() + certificateLocation := path.Join(tempDir, "ca.crt") + + By("having code create new files", func() { + status, err := fakeReconciler.refreshCAFromSecret( + ctx, &fakeSecret, certificateLocation) + Expect(err).ToNot(HaveOccurred()) + Expect(status).To(BeTrue()) + + writtenPublicKey, err := fileutils.ReadFile(certificateLocation) + Expect(err).ToNot(HaveOccurred()) + Expect(writtenPublicKey).To(Equal(publicKeyContent)) + }) + + By("writing again the same data, and verifying that the certificate refresh is not triggered", func() { + status, err := fakeReconciler.refreshCAFromSecret( + ctx, &fakeSecret, certificateLocation) + Expect(err).ToNot(HaveOccurred()) + Expect(status).To(BeFalse()) + }) + + By("changing the file contents, and verifying that the certificate refresh is triggered", func() { + newPublicKeyContent := []byte("changed public key") + + changedSecret := fakeSecret.DeepCopy() + changedSecret.Data[certs.CACertKey] = newPublicKeyContent + + status, err := fakeReconciler.refreshCAFromSecret( + ctx, changedSecret, certificateLocation) + Expect(err).ToNot(HaveOccurred()) + Expect(status).To(BeTrue()) + }) + }) +}) + +var _ = Describe("server certificate refresh handler", func() { + It("refresh the server certificate", func() { + var secret *corev1.Secret + + By("creating a new root CA", func() { + root, err := certs.CreateRootCA("common-name", "organization-unit") + Expect(err).ToNot(HaveOccurred()) + + pair, err := root.CreateAndSignPair("host", certs.CertTypeServer, nil) + Expect(err).ToNot(HaveOccurred()) + + secret = pair.GenerateCertificateSecret("default", "pair") + }) + + By("triggering the certificate refresh when no handler is set", func() { + fakeReconciler := Reconciler{} + err := fakeReconciler.refreshInstanceCertificateFromSecret(secret) + Expect(err).Error().Should(Equal(ErrNoServerCertificateHandler)) + }) + + By("triggering the certificate refresh when a handler is set", func() { + fakeReconciler := Reconciler{ + serverCertificateHandler: &fakeServerCertificateHandler{}, + } + + err := fakeReconciler.refreshInstanceCertificateFromSecret(secret) + Expect(err).ShouldNot(HaveOccurred()) + + cert := fakeReconciler.serverCertificateHandler.GetServerCertificate() + Expect(cert).ToNot(BeNil()) + }) + }) +}) diff --git a/pkg/reconciler/instance/certificate/suite_test.go b/pkg/reconciler/instance/certificate/suite_test.go new file mode 100644 index 0000000000..4337e69e53 --- /dev/null +++ b/pkg/reconciler/instance/certificate/suite_test.go @@ -0,0 +1,32 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package certificate + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestCertificate(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Certificate Reconciler") +} From 790ad412d7272efda5ee78f2d4aa86f2e9951b41 Mon Sep 17 00:00:00 2001 From: Peggie Date: Tue, 15 Apr 2025 09:09:55 +0200 Subject: [PATCH 517/836] chore: refresh licenses directory (#7345) Refresh the licenses directory Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Signed-off-by: Jonathan Gonzalez V. Co-authored-by: license-updater --- .../github.com/klauspost/compress/LICENSE | 304 ------------------ .../compress/internal/snapref/LICENSE | 27 -- .../compress/zstd/internal/xxhash/LICENSE.txt | 22 -- 3 files changed, 353 deletions(-) delete mode 100644 licenses/go-licenses/github.com/klauspost/compress/LICENSE delete mode 100644 licenses/go-licenses/github.com/klauspost/compress/internal/snapref/LICENSE delete mode 100644 licenses/go-licenses/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt diff --git a/licenses/go-licenses/github.com/klauspost/compress/LICENSE b/licenses/go-licenses/github.com/klauspost/compress/LICENSE deleted file mode 100644 index 87d5574777..0000000000 --- a/licenses/go-licenses/github.com/klauspost/compress/LICENSE +++ /dev/null @@ -1,304 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2019 Klaus Post. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------------------- - -Files: gzhttp/* - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2016-2017 The New York Times Company - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ------------------- - -Files: s2/cmd/internal/readahead/* - -The MIT License (MIT) - -Copyright (c) 2015 Klaus Post - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ---------------------- -Files: snappy/* -Files: internal/snapref/* - -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------------------ - -Files: s2/cmd/internal/filepathx/* - -Copyright 2016 The filepathx Authors - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/licenses/go-licenses/github.com/klauspost/compress/internal/snapref/LICENSE b/licenses/go-licenses/github.com/klauspost/compress/internal/snapref/LICENSE deleted file mode 100644 index 6050c10f4c..0000000000 --- a/licenses/go-licenses/github.com/klauspost/compress/internal/snapref/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/go-licenses/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt b/licenses/go-licenses/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt deleted file mode 100644 index 24b53065f4..0000000000 --- a/licenses/go-licenses/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2016 Caleb Spare - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. From 3e05e40c6000915b3a6870f6e91127b3839bd2bd Mon Sep 17 00:00:00 2001 From: Vitalii Tverdokhlib Date: Tue, 15 Apr 2025 12:38:32 +0300 Subject: [PATCH 518/836] fix(docs): typo in benchmarking.md (#7343) Signed-off-by: Vitalii Tverdokhlib --- docs/src/benchmarking.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/benchmarking.md b/docs/src/benchmarking.md index 189545c938..291d84689b 100644 --- a/docs/src/benchmarking.md +++ b/docs/src/benchmarking.md @@ -171,7 +171,7 @@ It will: 1. Create a fio deployment composed by a single Pod, which will run fio on the PVC, create graphs after completing the benchmark and start serving the generated files with a webserver. We use the - [`fio-tools`](https://github.com/wallnerryan/fio-tools`) image for that. + [`fio-tools`](https://github.com/wallnerryan/fio-tools) image for that. The Pod created by the deployment will be ready when it starts serving the results. You can forward the port of the pod created by the deployment From 4996e73f5ec5a3a585f104b485912869edb443ad Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Tue, 15 Apr 2025 11:53:48 +0200 Subject: [PATCH 519/836] refactor(instance,storage): extract sub-reconciler for instance storage (#7266) This patch introduces a dedicated subreconciler to manage PostgreSQL separate WAL storage. The refactored logic is now reused across multiple components: * the PostgreSQL startup process * the PostgreSQL major upgrade process Signed-off-by: Armando Ruocco Signed-off-by: Marco Nenciarini Signed-off-by: Leonardo Cecchi Co-authored-by: Marco Nenciarini Co-authored-by: Leonardo Cecchi --- internal/cmd/manager/instance/run/cmd.go | 3 +- .../manager/instance/upgrade/execute/cmd.go | 10 +- .../management/controller/instance_startup.go | 42 ------- internal/webhook/v1/cluster_webhook.go | 2 +- pkg/reconciler/instance/storage/doc.go | 21 ++++ pkg/reconciler/instance/storage/reconciler.go | 92 ++++++++++++++ .../instance/storage/reconciler_test.go | 112 ++++++++++++++++++ pkg/reconciler/instance/storage/suite_test.go | 32 +++++ 8 files changed, 262 insertions(+), 52 deletions(-) create mode 100644 pkg/reconciler/instance/storage/doc.go create mode 100644 pkg/reconciler/instance/storage/reconciler.go create mode 100644 pkg/reconciler/instance/storage/reconciler_test.go create mode 100644 pkg/reconciler/instance/storage/suite_test.go diff --git a/internal/cmd/manager/instance/run/cmd.go b/internal/cmd/manager/instance/run/cmd.go index 37a95d7c2e..b1e33a08b3 100644 --- a/internal/cmd/manager/instance/run/cmd.go +++ b/internal/cmd/manager/instance/run/cmd.go @@ -56,6 +56,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/metricserver" pg "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" + instancestorage "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/instance/storage" "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" ) @@ -267,7 +268,7 @@ func runSubCommand(ctx context.Context, instance *postgres.Instance) error { postgresStartConditions = append(postgresStartConditions, jsonPipe.GetExecutedCondition()) exitedConditions = append(exitedConditions, jsonPipe.GetExitedCondition()) - if err := reconciler.ReconcileWalStorage(ctx); err != nil { + if err := instancestorage.ReconcileWalDirectory(ctx); err != nil { return err } diff --git a/internal/cmd/manager/instance/upgrade/execute/cmd.go b/internal/cmd/manager/instance/upgrade/execute/cmd.go index 1748ce8225..9945052264 100644 --- a/internal/cmd/manager/instance/upgrade/execute/cmd.go +++ b/internal/cmd/manager/instance/upgrade/execute/cmd.go @@ -43,7 +43,6 @@ import ( ctrl "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/internal/management/controller" "github.com/cloudnative-pg/cloudnative-pg/internal/management/istio" "github.com/cloudnative-pg/cloudnative-pg/internal/management/linkerd" "github.com/cloudnative-pg/cloudnative-pg/pkg/configfile" @@ -51,8 +50,8 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/constants" postgresutils "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/utils" - "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/metricserver" instancecertificate "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/instance/certificate" + instancestorage "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/instance/storage" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) @@ -143,12 +142,7 @@ func upgradeSubCommand( return fmt.Errorf("error while downloading secrets: %w", err) } - // Create a fake reconciler just to download the secrets and - // the cluster definition - metricExporter := metricserver.NewExporter(instance) - reconciler := controller.NewInstanceReconciler(instance, client, metricExporter) - - if err := reconciler.ReconcileWalStorage(ctx); err != nil { + if err := instancestorage.ReconcileWalDirectory(ctx); err != nil { return fmt.Errorf("error while reconciling the WAL storage: %w", err) } diff --git a/internal/management/controller/instance_startup.go b/internal/management/controller/instance_startup.go index 687b5ec7c3..cd45a89373 100644 --- a/internal/management/controller/instance_startup.go +++ b/internal/management/controller/instance_startup.go @@ -22,7 +22,6 @@ package controller import ( "context" "fmt" - "io/fs" "os" "path/filepath" @@ -147,47 +146,6 @@ func (r *InstanceReconciler) verifyPgDataCoherenceForPrimary(ctx context.Context } } -// ReconcileWalStorage moves the files from PGDATA/pg_wal to the volume attached, if exists, and -// creates a symlink for it -func (r *InstanceReconciler) ReconcileWalStorage(ctx context.Context) error { - contextLogger := log.FromContext(ctx) - - if pgWalExists, err := fileutils.FileExists(specs.PgWalVolumePath); err != nil { - return err - } else if !pgWalExists { - return nil - } - - pgWalDirInfo, err := os.Lstat(specs.PgWalPath) - if err != nil { - return err - } - // The pgWalDir it's already a symlink meaning that there's nothing to do - mode := pgWalDirInfo.Mode() & fs.ModeSymlink - if !pgWalDirInfo.IsDir() && mode != 0 { - return nil - } - - // We discarded every possibility that this has been done, let's move the current file to their - // new location - contextLogger.Info("Moving data", "from", specs.PgWalPath, "to", specs.PgWalVolumePgWalPath) - if err := fileutils.MoveDirectoryContent(specs.PgWalPath, specs.PgWalVolumePgWalPath); err != nil { - contextLogger.Error(err, "Moving data", "from", specs.PgWalPath, "to", - specs.PgWalVolumePgWalPath) - return err - } - - contextLogger.Debug("Deleting old path", "path", specs.PgWalPath) - if err := fileutils.RemoveFile(specs.PgWalPath); err != nil { - contextLogger.Error(err, "Deleting old path", "path", specs.PgWalPath) - return err - } - - // We moved all the files now we should create the proper symlink - contextLogger.Debug("Creating symlink", "from", specs.PgWalPath, "to", specs.PgWalVolumePgWalPath) - return os.Symlink(specs.PgWalVolumePgWalPath, specs.PgWalPath) -} - // ReconcileTablespaces ensures the mount points created for the tablespaces // are there, and creates a subdirectory in each of them, which will therefore // be owned by the `postgres` user (rather than `root` as the mount point), diff --git a/internal/webhook/v1/cluster_webhook.go b/internal/webhook/v1/cluster_webhook.go index 3de0101272..b5292fa860 100644 --- a/internal/webhook/v1/cluster_webhook.go +++ b/internal/webhook/v1/cluster_webhook.go @@ -1544,7 +1544,7 @@ func (v *ClusterCustomValidator) validateWalStorageChange(r, old *apiv1.Cluster) field.Invalid( field.NewPath("spec", "walStorage"), r.Spec.WalStorage, - "walStorage cannot be disabled once the cluster is created"), + "walStorage cannot be disabled once configured"), } } diff --git a/pkg/reconciler/instance/storage/doc.go b/pkg/reconciler/instance/storage/doc.go new file mode 100644 index 0000000000..276a132db7 --- /dev/null +++ b/pkg/reconciler/instance/storage/doc.go @@ -0,0 +1,21 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package storage contains the Instance storage reconcilers +package storage diff --git a/pkg/reconciler/instance/storage/reconciler.go b/pkg/reconciler/instance/storage/reconciler.go new file mode 100644 index 0000000000..b385c80fcd --- /dev/null +++ b/pkg/reconciler/instance/storage/reconciler.go @@ -0,0 +1,92 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package storage + +import ( + "context" + "io/fs" + "os" + + "github.com/cloudnative-pg/machinery/pkg/fileutils" + "github.com/cloudnative-pg/machinery/pkg/log" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" +) + +type walDirectoryReconcilerOptions struct { + // pgWalDirectory is the directory where PostgreSQL will look for WALs. + // This is usually $PGDATA/pg_wal, and will be a symbolic link pointing + // to the separate WAL storage if configured. + pgWalDirectory string + + // walVolumeDirectory is the directory where the WAL volume is mounted. + walVolumeDirectory string + + // walVolumeWalDirectory is the directory where the WALs should be stored. + // This is usually inside of walVolumeDirectory + walVolumeWalDirectory string +} + +// ReconcileWalDirectory ensures that the `pg_wal` directory is moved to the attached volume (if present) +// and creates a symbolic link pointing to the new location. +func ReconcileWalDirectory(ctx context.Context) error { + return internalReconcileWalDirectory(ctx, walDirectoryReconcilerOptions{ + pgWalDirectory: specs.PgWalPath, + walVolumeDirectory: specs.PgWalVolumePath, + walVolumeWalDirectory: specs.PgWalVolumePgWalPath, + }) +} + +// internalReconcileWalDirectory is only meant to be used internally by unit tests +func internalReconcileWalDirectory(ctx context.Context, opts walDirectoryReconcilerOptions) error { + contextLogger := log.FromContext(ctx) + + // Important: for now walStorage cannot be disabled once configured + if pgWalExists, err := fileutils.FileExists(opts.walVolumeDirectory); err != nil { + return err + } else if !pgWalExists { + return nil + } + + // Check if `pg_wal` is already a symbolic link; if so, no further action is needed. + pgWalDirInfo, err := os.Lstat(opts.pgWalDirectory) + if err != nil { + return err + } + if pgWalDirInfo.Mode().Type() == fs.ModeSymlink { + return nil + } + + contextLogger.Info("Moving data", "from", opts.pgWalDirectory, "to", opts.walVolumeWalDirectory) + if err := fileutils.MoveDirectoryContent(opts.pgWalDirectory, opts.walVolumeWalDirectory); err != nil { + contextLogger.Error(err, "Moving data", "from", opts.pgWalDirectory, "to", + opts.walVolumeWalDirectory) + return err + } + + contextLogger.Debug("Deleting old path", "path", opts.pgWalDirectory) + if err := fileutils.RemoveFile(opts.pgWalDirectory); err != nil { + contextLogger.Error(err, "Deleting old path", "path", opts.pgWalDirectory) + return err + } + + contextLogger.Debug("Creating symlink", "from", opts.pgWalDirectory, "to", opts.walVolumeWalDirectory) + return os.Symlink(opts.walVolumeWalDirectory, opts.pgWalDirectory) +} diff --git a/pkg/reconciler/instance/storage/reconciler_test.go b/pkg/reconciler/instance/storage/reconciler_test.go new file mode 100644 index 0000000000..41e17f143a --- /dev/null +++ b/pkg/reconciler/instance/storage/reconciler_test.go @@ -0,0 +1,112 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package storage + +import ( + "io/fs" + "os" + "path" + + "github.com/cloudnative-pg/machinery/pkg/fileutils" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("WAL Storage reconciler", func() { + var pgDataDir string + var separateWALVolumeDir string + var separateWALVolumeWALDir string + var opts walDirectoryReconcilerOptions + + BeforeEach(func() { + tempDir := GinkgoT().TempDir() + pgDataDir = path.Join(tempDir, "pg_data") + separateWALVolumeDir = path.Join(tempDir, "separate_wal") + separateWALVolumeWALDir = path.Join(tempDir, "separate_wal", "pg_wal") + + opts = walDirectoryReconcilerOptions{ + pgWalDirectory: path.Join(pgDataDir, "pg_wal"), + walVolumeDirectory: separateWALVolumeDir, + walVolumeWalDirectory: separateWALVolumeWALDir, + } + }) + + It("will not error out if a separate WAL storage doesn't exist", func(ctx SpecContext) { + err := internalReconcileWalDirectory(ctx, opts) + Expect(err).ToNot(HaveOccurred()) + }) + + It("won't change anything if pg_wal is already a symlink", func(ctx SpecContext) { + err := fileutils.EnsureDirectoryExists(pgDataDir) + Expect(err).ToNot(HaveOccurred()) + + err = fileutils.EnsureDirectoryExists(separateWALVolumeDir) + Expect(err).ToNot(HaveOccurred()) + + err = os.Symlink(separateWALVolumeDir, opts.pgWalDirectory) + Expect(err).ToNot(HaveOccurred()) + + err = internalReconcileWalDirectory(ctx, opts) + Expect(err).ToNot(HaveOccurred()) + }) + + It("moves the existing WALs to the target volume", func(ctx SpecContext) { + wal1 := path.Join(opts.pgWalDirectory, "000000010000000100000001") + wal2 := path.Join(opts.pgWalDirectory, "000000010000000100000002") + wal3 := path.Join(opts.pgWalDirectory, "000000010000000100000003") + + By("creating a pg_wal directory and a separate WAL volume directory", func() { + err := fileutils.EnsureDirectoryExists(opts.pgWalDirectory) + Expect(err).ToNot(HaveOccurred()) + + err = fileutils.EnsureDirectoryExists(separateWALVolumeDir) + Expect(err).ToNot(HaveOccurred()) + }) + + By("creating a few WALs file in pg_wal", func() { + _, err := fileutils.WriteStringToFile(wal1, "wal content") + Expect(err).ToNot(HaveOccurred()) + + _, err = fileutils.WriteStringToFile(wal2, "wal content") + Expect(err).ToNot(HaveOccurred()) + + _, err = fileutils.WriteStringToFile(wal3, "wal content") + Expect(err).ToNot(HaveOccurred()) + }) + + By("reconciling the WALs to the target volume", func() { + err := internalReconcileWalDirectory(ctx, opts) + Expect(err).ToNot(HaveOccurred()) + }) + + By("checking if pg_wal is a symlink", func() { + pgWalDirInfo, err := os.Lstat(opts.pgWalDirectory) + Expect(err).ToNot(HaveOccurred()) + Expect(pgWalDirInfo.Mode().Type()).To(Equal(fs.ModeSymlink)) + }) + + By("checking the WAL files are in the target volume", func() { + Expect(fileutils.FileExists(wal1)).To(BeTrue()) + Expect(fileutils.FileExists(wal2)).To(BeTrue()) + Expect(fileutils.FileExists(wal3)).To(BeTrue()) + }) + }) +}) diff --git a/pkg/reconciler/instance/storage/suite_test.go b/pkg/reconciler/instance/storage/suite_test.go new file mode 100644 index 0000000000..764c55395f --- /dev/null +++ b/pkg/reconciler/instance/storage/suite_test.go @@ -0,0 +1,32 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package storage + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestStorage(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Storage Reconciler") +} From 507b290095d3b9614ca80287c5c05cea90d27377 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Tue, 15 Apr 2025 13:21:25 +0200 Subject: [PATCH 520/836] fix: avoid returning non-zero and error in reconcilers (#7342) Closes #7061 Signed-off-by: Marco Nenciarini --- internal/controller/backup_controller.go | 17 ++++++-- internal/controller/cluster_controller.go | 40 +++++++++++++------ internal/controller/cluster_create.go | 4 +- internal/controller/plugins.go | 5 ++- .../controller/instance_controller.go | 2 +- pkg/reconciler/hibernation/reconciler.go | 9 +++-- 6 files changed, 54 insertions(+), 23 deletions(-) diff --git a/internal/controller/backup_controller.go b/internal/controller/backup_controller.go index 1f5948a3d5..d228094922 100644 --- a/internal/controller/backup_controller.go +++ b/internal/controller/backup_controller.go @@ -241,7 +241,10 @@ func (r *BackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr contextLogger.Info("Couldn't find target pod, will retry in 30 seconds", "target", cluster.Status.TargetPrimary) backup.Status.Phase = apiv1.BackupPhasePending - return ctrl.Result{RequeueAfter: 30 * time.Second}, r.Status().Patch(ctx, &backup, client.MergeFrom(origBackup)) + if err := r.Status().Patch(ctx, &backup, client.MergeFrom(origBackup)); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{RequeueAfter: 30 * time.Second}, nil } if err != nil { tryFlagBackupAsFailed(ctx, r.Client, &backup, fmt.Errorf("while getting pod: %w", err)) @@ -256,7 +259,10 @@ func (r *BackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr backup.Status.Phase = apiv1.BackupPhasePending r.Recorder.Eventf(&backup, "Warning", "BackupPending", "Backup target pod not ready: %s", cluster.Status.TargetPrimary) - return ctrl.Result{RequeueAfter: 30 * time.Second}, r.Status().Patch(ctx, &backup, client.MergeFrom(origBackup)) + if err := r.Status().Patch(ctx, &backup, client.MergeFrom(origBackup)); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{RequeueAfter: 30 * time.Second}, nil } contextLogger.Info("Starting backup", @@ -390,8 +396,11 @@ func (r *BackupReconciler) reconcileSnapshotBackup( ) origBackup := backup.DeepCopy() backup.Status.Phase = apiv1.BackupPhasePending - err := r.Patch(ctx, backup, client.MergeFrom(origBackup)) - return &ctrl.Result{RequeueAfter: 30 * time.Second}, err + if err := r.Patch(ctx, backup, client.MergeFrom(origBackup)); err != nil { + return nil, err + } + + return &ctrl.Result{RequeueAfter: 30 * time.Second}, nil } if err != nil { tryFlagBackupAsFailed(ctx, r.Client, backup, fmt.Errorf("while getting pod: %w", err)) diff --git a/internal/controller/cluster_controller.go b/internal/controller/cluster_controller.go index f483e67e55..08d235e3cd 100644 --- a/internal/controller/cluster_controller.go +++ b/internal/controller/cluster_controller.go @@ -333,10 +333,12 @@ func (r *ClusterReconciler) reconcile(ctx context.Context, cluster *apiv1.Cluste if cluster.ShouldPromoteFromReplicaCluster() { if !(cluster.Status.Phase == apiv1.PhaseReplicaClusterPromotion || cluster.Status.Phase == apiv1.PhaseUnrecoverable) { - return ctrl.Result{RequeueAfter: 1 * time.Second}, r.RegisterPhase(ctx, + if err := r.RegisterPhase(ctx, cluster, apiv1.PhaseReplicaClusterPromotion, - "Replica cluster promotion in progress") + "Replica cluster promotion in progress"); err != nil { + return ctrl.Result{}, err + } } return ctrl.Result{RequeueAfter: 1 * time.Second}, nil } @@ -417,15 +419,17 @@ func (r *ClusterReconciler) reconcile(ctx context.Context, cluster *apiv1.Cluste contextLogger.Warning( "Failed to extract instance status from ready instances. Attempting to requeue...", ) - registerPhaseErr := r.RegisterPhase( + if err := r.RegisterPhase( ctx, cluster, "Instance Status Extraction Error: HTTP communication issue", "Communication issue detected: The operator was unable to receive the status from all the ready instances. "+ "This may be due to network restrictions such as NetworkPolicy and/or any other network plugin setting. "+ "Please verify your network configuration.", - ) - return ctrl.Result{RequeueAfter: 10 * time.Second}, registerPhaseErr + ); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{RequeueAfter: 10 * time.Second}, nil } if res, err := r.ensureNoFailoverOnFullDisk(ctx, cluster, instancesStatus); err != nil || !res.IsZero() { @@ -553,13 +557,15 @@ func (r *ClusterReconciler) ensureNoFailoverOnFullDisk( reason := "Insufficient disk space detected in one or more pods is preventing PostgreSQL from running." + "Please verify your storage settings. Further information inside .status.instancesReportedState" - registerPhaseErr := r.RegisterPhase( + if err := r.RegisterPhase( ctx, cluster, "Not enough disk space", reason, - ) - return ctrl.Result{RequeueAfter: 10 * time.Second}, registerPhaseErr + ); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{RequeueAfter: 10 * time.Second}, nil } func (r *ClusterReconciler) handleSwitchover( @@ -753,8 +759,11 @@ func (r *ClusterReconciler) reconcileResources( } if len(resources.instances.Items) > 0 && resources.noInstanceIsAlive() { - return ctrl.Result{RequeueAfter: 1 * time.Second}, r.RegisterPhase(ctx, cluster, apiv1.PhaseUnrecoverable, - "No pods are active, the cluster needs manual intervention ") + if err := r.RegisterPhase(ctx, cluster, apiv1.PhaseUnrecoverable, + "No pods are active, the cluster needs manual intervention "); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{RequeueAfter: 1 * time.Second}, nil } // If we still need more instances, we need to wait before setting healthy status @@ -840,8 +849,15 @@ func (r *ClusterReconciler) processUnschedulableInstances( } if podRollout := isPodNeedingRollout(ctx, pod, cluster); podRollout.required { - return &ctrl.Result{RequeueAfter: 1 * time.Second}, - r.upgradePod(ctx, cluster, pod, fmt.Sprintf("recreating unschedulable pod: %s", podRollout.reason)) + if err := r.upgradePod( + ctx, + cluster, + pod, + fmt.Sprintf("recreating unschedulable pod: %s", podRollout.reason), + ); err != nil { + return nil, err + } + return &ctrl.Result{RequeueAfter: 1 * time.Second}, nil } if !cluster.IsNodeMaintenanceWindowInProgress() || cluster.IsReusePVCEnabled() { diff --git a/internal/controller/cluster_create.go b/internal/controller/cluster_create.go index be7db3268a..1df2691714 100644 --- a/internal/controller/cluster_create.go +++ b/internal/controller/cluster_create.go @@ -1116,7 +1116,7 @@ func (r *ClusterReconciler) createPrimaryInstance( recoverySnapshot, nodeSerial, ); err != nil { - return ctrl.Result{RequeueAfter: time.Minute}, err + return ctrl.Result{}, fmt.Errorf("cannot create primary instance PVCs: %w", err) } // We are bootstrapping a cluster and in need to create the first node @@ -1295,7 +1295,7 @@ func (r *ClusterReconciler) joinReplicaInstance( storageSource, nodeSerial, ); err != nil { - return ctrl.Result{RequeueAfter: time.Minute}, err + return ctrl.Result{}, fmt.Errorf("cannot create replica instance PVCs: %w", err) } return ctrl.Result{RequeueAfter: 30 * time.Second}, ErrNextLoop diff --git a/internal/controller/plugins.go b/internal/controller/plugins.go index 72cc1c22f0..979fed5739 100644 --- a/internal/controller/plugins.go +++ b/internal/controller/plugins.go @@ -83,5 +83,8 @@ func setStatusPluginHook( "after", cluster.Status.PluginStatus, ) - return ctrl.Result{RequeueAfter: 5 * time.Second}, cli.Status().Patch(ctx, cluster, client.MergeFrom(origCluster)) + if err := cli.Status().Patch(ctx, cluster, client.MergeFrom(origCluster)); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{RequeueAfter: 5 * time.Second}, nil } diff --git a/internal/management/controller/instance_controller.go b/internal/management/controller/instance_controller.go index e74f1b15ff..e659f1d778 100644 --- a/internal/management/controller/instance_controller.go +++ b/internal/management/controller/instance_controller.go @@ -211,7 +211,7 @@ func (r *InstanceReconciler) Reconcile( "tokenContent", tokenError.TokenContent(), ) // We should be waiting for WAL recovery to reach the LSN in the token - return reconcile.Result{RequeueAfter: 10 * time.Second}, err + return reconcile.Result{RequeueAfter: 10 * time.Second}, nil } } diff --git a/pkg/reconciler/hibernation/reconciler.go b/pkg/reconciler/hibernation/reconciler.go index e24176efe0..0f2cbe43d8 100644 --- a/pkg/reconciler/hibernation/reconciler.go +++ b/pkg/reconciler/hibernation/reconciler.go @@ -82,9 +82,12 @@ func reconcileDeletePods( podToBeDeleted = &instances[0] } - // The Pod list is sorted and the primary instance + // The Pod list is sorted, and the primary instance // will always be the first one, if present contextLogger.Info("Deleting Pod as requested by the hibernation procedure", "podName", podToBeDeleted.Name) - deletionResult := c.Delete(ctx, podToBeDeleted) - return &ctrl.Result{RequeueAfter: 5 * time.Second}, deletionResult + if err := c.Delete(ctx, podToBeDeleted); err != nil { + return nil, err + } + + return &ctrl.Result{RequeueAfter: 5 * time.Second}, nil } From df28e8652299ca26dc2604120e8978763fb11bce Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Tue, 15 Apr 2025 15:12:24 +0200 Subject: [PATCH 521/836] chore: avoid using a magic constant for the imageCatalog indexed field (#7364) Signed-off-by: Leonardo Cecchi --- internal/controller/cluster_image.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/controller/cluster_image.go b/internal/controller/cluster_image.go index 1df58d3b14..db955e65d2 100644 --- a/internal/controller/cluster_image.go +++ b/internal/controller/cluster_image.go @@ -194,7 +194,7 @@ func (r *ClusterReconciler) getClustersForImageCatalogsToClustersMapper( } listOps := &client.ListOptions{ - FieldSelector: fields.OneTermEqualSelector(".spec.imageCatalog.name", object.GetName()), + FieldSelector: fields.OneTermEqualSelector(imageCatalogKey, object.GetName()), Namespace: object.GetNamespace(), } @@ -241,7 +241,7 @@ func (r *ClusterReconciler) getClustersForClusterImageCatalogsToClustersMapper( } listOps := &client.ListOptions{ - FieldSelector: fields.OneTermEqualSelector(".spec.imageCatalog.name", object.GetName()), + FieldSelector: fields.OneTermEqualSelector(imageCatalogKey, object.GetName()), } err = r.List(ctx, &clusters, listOps) From aba2d509844e7063d3e7831a8c5c418a7efc3e04 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 15 Apr 2025 17:35:50 +0200 Subject: [PATCH 522/836] chore(deps): update module sigs.k8s.io/controller-tools to v0.17.3 (main) (#7332) --- Makefile | 2 +- config/crd/bases/postgresql.cnpg.io_backups.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_clusters.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_databases.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_poolers.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_publications.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_subscriptions.yaml | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Makefile b/Makefile index cb808111fe..3bda6139f7 100644 --- a/Makefile +++ b/Makefile @@ -45,7 +45,7 @@ LOCALBIN ?= $(shell pwd)/bin BUILD_IMAGE ?= true POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions.go" | cut -f 2 -d \") KUSTOMIZE_VERSION ?= v5.6.0 -CONTROLLER_TOOLS_VERSION ?= v0.17.2 +CONTROLLER_TOOLS_VERSION ?= v0.17.3 GENREF_VERSION ?= 015aaac611407c4fe591bc8700d2c67b7521efca GORELEASER_VERSION ?= v2.8.2 SPELLCHECK_VERSION ?= 0.48.0 diff --git a/config/crd/bases/postgresql.cnpg.io_backups.yaml b/config/crd/bases/postgresql.cnpg.io_backups.yaml index aecdbd49eb..c0923c1e5c 100644 --- a/config/crd/bases/postgresql.cnpg.io_backups.yaml +++ b/config/crd/bases/postgresql.cnpg.io_backups.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.2 + controller-gen.kubebuilder.io/version: v0.17.3 name: backups.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml b/config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml index 83d116861f..959811fa2e 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.2 + controller-gen.kubebuilder.io/version: v0.17.3 name: clusterimagecatalogs.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml index 943ff26301..93aec9fc60 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.2 + controller-gen.kubebuilder.io/version: v0.17.3 name: clusters.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_databases.yaml b/config/crd/bases/postgresql.cnpg.io_databases.yaml index 04841bc36a..bc3f23d8e1 100644 --- a/config/crd/bases/postgresql.cnpg.io_databases.yaml +++ b/config/crd/bases/postgresql.cnpg.io_databases.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.2 + controller-gen.kubebuilder.io/version: v0.17.3 name: databases.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml b/config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml index a28ad6132e..9c1b49f9ce 100644 --- a/config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml +++ b/config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.2 + controller-gen.kubebuilder.io/version: v0.17.3 name: imagecatalogs.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_poolers.yaml b/config/crd/bases/postgresql.cnpg.io_poolers.yaml index bb2c120f1e..f6a927772c 100644 --- a/config/crd/bases/postgresql.cnpg.io_poolers.yaml +++ b/config/crd/bases/postgresql.cnpg.io_poolers.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.2 + controller-gen.kubebuilder.io/version: v0.17.3 name: poolers.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_publications.yaml b/config/crd/bases/postgresql.cnpg.io_publications.yaml index bbeb13ee9e..a6ae1ed765 100644 --- a/config/crd/bases/postgresql.cnpg.io_publications.yaml +++ b/config/crd/bases/postgresql.cnpg.io_publications.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.2 + controller-gen.kubebuilder.io/version: v0.17.3 name: publications.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml b/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml index 6c2406a879..92abc21fc9 100644 --- a/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml +++ b/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.2 + controller-gen.kubebuilder.io/version: v0.17.3 name: scheduledbackups.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_subscriptions.yaml b/config/crd/bases/postgresql.cnpg.io_subscriptions.yaml index e93bf37d10..8213b82dd5 100644 --- a/config/crd/bases/postgresql.cnpg.io_subscriptions.yaml +++ b/config/crd/bases/postgresql.cnpg.io_subscriptions.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.2 + controller-gen.kubebuilder.io/version: v0.17.3 name: subscriptions.postgresql.cnpg.io spec: group: postgresql.cnpg.io From c7432d793fb5b1e1b44280ee24fa12e37080ae89 Mon Sep 17 00:00:00 2001 From: Francesco Canovai Date: Wed, 16 Apr 2025 08:40:26 +0100 Subject: [PATCH 523/836] ci: add digest to operator manifest for testing (#7348) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Given that we produce multiple images with the same tags for testing, the upgrade tests can fail now that ImagePullPolicy is set to Always. Fix the digest in the manifest to properly specify the image that is used. Closes #7347 Signed-off-by: Francesco Canovai Signed-off-by: Niccolò Fei Co-authored-by: Niccolò Fei --- .github/workflows/continuous-delivery.yml | 12 ++++++++++++ Makefile | 9 ++++++++- hack/e2e/run-e2e-kind.sh | 2 ++ hack/e2e/run-e2e.sh | 4 ++++ 4 files changed, 26 insertions(+), 1 deletion(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index 2e477351f4..a082bbdd72 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -259,6 +259,8 @@ jobs: author_name: ${{ steps.build-meta.outputs.author_name }} author_email: ${{ steps.build-meta.outputs.author_email }} controller_img: ${{ env.CONTROLLER_IMG }} + controller_img_digest: ${{ fromJSON(steps.bake-push.outputs.metadata)['distroless']['containerimage.digest'] }} + controller_img_prime_digest: ${{ steps.build-prime.outputs.digest }} controller_img_ubi: ${{ env.CONTROLLER_IMG_UBI }} index_img: ${{ env.INDEX_IMG }} bundle_img: ${{ env.BUNDLE_IMG }} @@ -407,6 +409,7 @@ jobs: id: generate-manifest env: CONTROLLER_IMG: ${{ env.CONTROLLER_IMG }} + CONTROLLER_IMG_DIGEST: ${{ fromJSON(steps.bake-push.outputs.metadata)['distroless']['containerimage.digest'] }} run: | make generate-manifest - @@ -448,6 +451,7 @@ jobs: # upgrade test name: Build and push image for upgrade test uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4 # v6 + id: build-prime if: | always() && !cancelled() && needs.evaluate_options.outputs.test_level == '4' @@ -607,6 +611,8 @@ jobs: DEBUG: "true" BUILD_IMAGE: "false" CONTROLLER_IMG: ${{ needs.generate-jobs.outputs.controller_img }} + CONTROLLER_IMG_DIGEST: ${{ needs.buildx.outputs.controller_img_digest }} + CONTROLLER_IMG_PRIME_DIGEST: ${{ needs.buildx.outputs.controller_img_prime_digest }} E2E_DEFAULT_STORAGE_CLASS: standard E2E_CSI_STORAGE_CLASS: csi-hostpath-sc E2E_DEFAULT_VOLUMESNAPSHOT_CLASS: csi-hostpath-snapclass @@ -853,6 +859,8 @@ jobs: DEBUG: "true" BUILD_IMAGE: "false" CONTROLLER_IMG: ${{ needs.generate-jobs.outputs.controller_img }} + CONTROLLER_IMG_DIGEST: ${{ needs.buildx.outputs.controller_img_digest }} + CONTROLLER_IMG_PRIME_DIGEST: ${{ needs.buildx.outputs.controller_img_prime_digest }} E2E_DEFAULT_STORAGE_CLASS: rook-ceph-block E2E_CSI_STORAGE_CLASS: rook-ceph-block E2E_DEFAULT_VOLUMESNAPSHOT_CLASS: csi-rbdplugin-snapclass @@ -1191,6 +1199,8 @@ jobs: DEBUG: "true" BUILD_IMAGE: "false" CONTROLLER_IMG: ${{ needs.generate-jobs.outputs.controller_img }} + CONTROLLER_IMG_DIGEST: ${{ needs.buildx.outputs.controller_img_digest }} + CONTROLLER_IMG_PRIME_DIGEST: ${{ needs.buildx.outputs.controller_img_prime_digest }} E2E_DEFAULT_STORAGE_CLASS: gp3 E2E_CSI_STORAGE_CLASS: gp3 E2E_DEFAULT_VOLUMESNAPSHOT_CLASS: ebs-csi-snapclass @@ -1579,6 +1589,8 @@ jobs: DEBUG: "true" BUILD_IMAGE: "false" CONTROLLER_IMG: ${{ needs.generate-jobs.outputs.controller_img }} + CONTROLLER_IMG_DIGEST: ${{ needs.buildx.outputs.controller_img_digest }} + CONTROLLER_IMG_PRIME_DIGEST: ${{ needs.buildx.outputs.controller_img_prime_digest }} E2E_DEFAULT_STORAGE_CLASS: standard-rwo E2E_CSI_STORAGE_CLASS: standard-rwo E2E_DEFAULT_VOLUMESNAPSHOT_CLASS: pd-csi-snapclass diff --git a/Makefile b/Makefile index 3bda6139f7..5942e74070 100644 --- a/Makefile +++ b/Makefile @@ -32,6 +32,13 @@ CATALOG_IMG ?= ${CONTROLLER_IMG}-catalog BUNDLE_IMG ?= ${CONTROLLER_IMG}-bundle INDEX_IMG ?= ${CONTROLLER_IMG}-index +# Define CONTROLLER_IMG_WITH_DIGEST by appending CONTROLLER_IMG_SHA to CONTROLLER_IMG with '@' if CONTROLLER_IMG_SHA is set +ifneq ($(CONTROLLER_IMG_DIGEST),) +CONTROLLER_IMG_WITH_DIGEST := $(CONTROLLER_IMG)@$(CONTROLLER_IMG_DIGEST) +else +CONTROLLER_IMG_WITH_DIGEST := $(CONTROLLER_IMG) +endif + COMMIT := $(shell git rev-parse --short HEAD || echo unknown) DATE := $(shell git log -1 --pretty=format:'%ad' --date short) VERSION := $(shell git describe --tags --match 'v*' | sed -e 's/^v//; s/-g[0-9a-f]\+$$//; s/-\([0-9]\+\)$$/-dev\1/') @@ -215,7 +222,7 @@ generate-manifest: manifests kustomize ## Generate manifest used for deployment. cd $$CONFIG_TMP_DIR/default ;\ $(KUSTOMIZE) edit add patch --path manager_image_pull_secret.yaml ;\ cd $$CONFIG_TMP_DIR/manager ;\ - $(KUSTOMIZE) edit set image controller="${CONTROLLER_IMG}" ;\ + $(KUSTOMIZE) edit set image controller="${CONTROLLER_IMG_WITH_DIGEST}" ;\ $(KUSTOMIZE) edit add patch --path env_override.yaml ;\ $(KUSTOMIZE) edit add configmap controller-manager-env \ --from-literal="POSTGRES_IMAGE_NAME=${POSTGRES_IMAGE_NAME}" \ diff --git a/hack/e2e/run-e2e-kind.sh b/hack/e2e/run-e2e-kind.sh index 43c9ab91cc..11d91182e8 100755 --- a/hack/e2e/run-e2e-kind.sh +++ b/hack/e2e/run-e2e-kind.sh @@ -45,6 +45,8 @@ export E2E_PRE_ROLLING_UPDATE_IMG=${E2E_PRE_ROLLING_UPDATE_IMG:-${POSTGRES_IMG%. export E2E_DEFAULT_STORAGE_CLASS=${E2E_DEFAULT_STORAGE_CLASS:-standard} export E2E_CSI_STORAGE_CLASS=${E2E_CSI_STORAGE_CLASS:-csi-hostpath-sc} export E2E_DEFAULT_VOLUMESNAPSHOT_CLASS=${E2E_DEFAULT_VOLUMESNAPSHOT_CLASS:-csi-hostpath-snapclass} +export CONTROLLER_IMG_DIGEST=${CONTROLLER_IMG_DIGEST:-""} +export CONTROLLER_IMG_PRIME_DIGEST=${CONTROLLER_IMG_PRIME_DIGEST:-""} export DOCKER_REGISTRY_MIRROR=${DOCKER_REGISTRY_MIRROR:-} export TEST_CLOUD_VENDOR="local" diff --git a/hack/e2e/run-e2e.sh b/hack/e2e/run-e2e.sh index b4e38497ff..7ddeb5b3d9 100755 --- a/hack/e2e/run-e2e.sh +++ b/hack/e2e/run-e2e.sh @@ -28,6 +28,8 @@ fi ROOT_DIR=$(realpath "$(dirname "$0")/../../") CONTROLLER_IMG=${CONTROLLER_IMG:-$("${ROOT_DIR}/hack/setup-cluster.sh" print-image)} +CONTROLLER_IMG_DIGEST=${CONTROLLER_IMG_DIGEST:-""} +CONTROLLER_IMG_PRIME_DIGEST=${CONTROLLER_IMG_PRIME_DIGEST:-""} TEST_UPGRADE_TO_V1=${TEST_UPGRADE_TO_V1:-true} POSTGRES_IMG=${POSTGRES_IMG:-$(grep 'DefaultImageName.*=' "${ROOT_DIR}/pkg/versions/versions.go" | cut -f 2 -d \")} # variable need export otherwise be invisible in e2e test case @@ -87,6 +89,7 @@ if [[ "${TEST_UPGRADE_TO_V1}" != "false" ]] && [[ "${TEST_CLOUD_VENDOR}" != "ocp # - built and pushed to nodes or the local registry (by setup-cluster.sh) # - built by the `buildx` step in continuous delivery and pushed to the test registry make CONTROLLER_IMG="${CONTROLLER_IMG}" POSTGRES_IMG="${POSTGRES_IMG}" \ + CONTROLLER_IMG_DIGEST="${CONTROLLER_IMG_DIGEST}" \ OPERATOR_MANIFEST_PATH="${ROOT_DIR}/tests/e2e/fixtures/upgrade/current-manifest.yaml" \ generate-manifest # In order to test the case of upgrading from the current operator @@ -101,6 +104,7 @@ if [[ "${TEST_UPGRADE_TO_V1}" != "false" ]] && [[ "${TEST_CLOUD_VENDOR}" != "ocp # added to the tag by convention, which assumes the image is in place. # This manifest is used to upgrade into in the upgrade_test E2E. make CONTROLLER_IMG="${CONTROLLER_IMG}-prime" POSTGRES_IMG="${POSTGRES_IMG}" \ + CONTROLLER_IMG_DIGEST="${CONTROLLER_IMG_PRIME_DIGEST}" \ OPERATOR_MANIFEST_PATH="${ROOT_DIR}/tests/e2e/fixtures/upgrade/current-manifest-prime.yaml" \ generate-manifest From 0fa2258e9b89c9055e3b482e2a56d147369452e6 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Wed, 16 Apr 2025 11:46:11 +0200 Subject: [PATCH 524/836] feat(cnpgi,snapshot): support WAL recovery via plugins during snapshot restore (#7284) This patch enables CNPG-i plugins to serve as a source of WALs during recovery from volume snapshots. Previously, plugins were required to provide both WALs and PGDATA, which prevented their use in snapshot-based recovery scenarios. With this change, plugins can now be used solely for WAL retrieval during recovery. Closes #7275 Signed-off-by: Armando Ruocco Signed-off-by: Marco Nenciarini Co-authored-by: Marco Nenciarini --- internal/cmd/manager/instance/restore/cmd.go | 10 +- .../manager/instance/restoresnapshot/cmd.go | 125 +++++++++++++----- .../instance/restoresnapshot/restore.go | 74 +++++++++++ pkg/management/postgres/restore.go | 72 +++++----- 4 files changed, 210 insertions(+), 71 deletions(-) create mode 100644 internal/cmd/manager/instance/restoresnapshot/restore.go diff --git a/internal/cmd/manager/instance/restore/cmd.go b/internal/cmd/manager/instance/restore/cmd.go index 7d8bc50926..2cdea6e1c8 100644 --- a/internal/cmd/manager/instance/restore/cmd.go +++ b/internal/cmd/manager/instance/restore/cmd.go @@ -44,10 +44,12 @@ import ( // NewCmd creates the "restore" subcommand func NewCmd() *cobra.Command { - var clusterName string - var namespace string - var pgData string - var pgWal string + var ( + clusterName string + namespace string + pgData string + pgWal string + ) cmd := &cobra.Command{ Use: "restore [flags]", diff --git a/internal/cmd/manager/instance/restoresnapshot/cmd.go b/internal/cmd/manager/instance/restoresnapshot/cmd.go index 57a40825ea..72ea1d1910 100644 --- a/internal/cmd/manager/instance/restoresnapshot/cmd.go +++ b/internal/cmd/manager/instance/restoresnapshot/cmd.go @@ -17,22 +17,30 @@ limitations under the License. SPDX-License-Identifier: Apache-2.0 */ -// Package restoresnapshot implements the "instance restoresnapshot" subcommand of the operator package restoresnapshot import ( "context" "encoding/base64" + "errors" "os" "github.com/cloudnative-pg/machinery/pkg/log" "github.com/spf13/cobra" - ctrl "sigs.k8s.io/controller-runtime/pkg/client" - + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/fields" + controllerruntime "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/internal/management/istio" "github.com/cloudnative-pg/cloudnative-pg/internal/management/linkerd" - "github.com/cloudnative-pg/cloudnative-pg/pkg/management" + "github.com/cloudnative-pg/cloudnative-pg/internal/scheme" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver" ) // NewCmd creates the "restoresnapshot" subcommand @@ -50,21 +58,47 @@ func NewCmd() *cobra.Command { cmd := &cobra.Command{ Use: "restoresnapshot [flags]", SilenceErrors: true, - PreRunE: func(cmd *cobra.Command, _ []string) error { - return management.WaitForGetCluster(cmd.Context(), ctrl.ObjectKey{ - Name: clusterName, - Namespace: namespace, - }) - }, RunE: func(cmd *cobra.Command, _ []string) error { - ctx := cmd.Context() - contextLogger := log.FromContext(ctx) - - info := postgres.InitInfo{ - ClusterName: clusterName, - Namespace: namespace, - PgData: pgData, - PgWal: pgWal, + contextLogger := log.FromContext(cmd.Context()) + + // Canceling this context + ctx, cancel := context.WithCancel(cmd.Context()) + defer cancel() + + // Step 1: build the manager + mgr, err := buildManager(clusterName, namespace) + if err != nil { + contextLogger.Error(err, "while building the manager") + return err + } + + // Step 1.1: add the local webserver to the manager + localSrv, err := webserver.NewLocalWebServer( + postgres.NewInstance().WithClusterName(clusterName).WithNamespace(namespace), + mgr.GetClient(), + mgr.GetEventRecorderFor("local-webserver"), + ) + if err != nil { + return err + } + if err = mgr.Add(localSrv); err != nil { + contextLogger.Error(err, "unable to add local webserver runnable") + return err + } + + // Step 2: add the restore process to the manager + restoreProcess := restoreRunnable{ + cli: mgr.GetClient(), + clusterName: clusterName, + namespace: namespace, + pgData: pgData, + pgWal: pgWal, + immediate: immediate, + cancel: cancel, + } + if mgr.Add(&restoreProcess) != nil { + contextLogger.Error(err, "while building the restore process") + return err } if backupLabel != "" { @@ -72,7 +106,7 @@ func NewCmd() *cobra.Command { if err != nil { return err } - info.BackupLabelFile = res + restoreProcess.backupLabelFile = res } if tablespaceMap != "" { @@ -80,15 +114,23 @@ func NewCmd() *cobra.Command { if err != nil { return err } - info.TablespaceMapFile = res + restoreProcess.tablespaceMapFile = res } - err := execute(ctx, info, immediate) - if err != nil { - contextLogger.Error(err, "Error while recovering Volume Snapshot backup") + // Step 3: start everything + if err := mgr.Start(ctx); err != nil { + contextLogger.Error(err, "restore error") + return err + } + + if !errors.Is(ctx.Err(), context.Canceled) { + contextLogger.Error(err, "error while recovering backup") + return err } - return err + + return nil }, + PostRunE: func(cmd *cobra.Command, _ []string) error { if err := istio.TryInvokeQuitEndpoint(cmd.Context()); err != nil { return err @@ -111,11 +153,32 @@ func NewCmd() *cobra.Command { return cmd } -func execute(ctx context.Context, info postgres.InitInfo, immediate bool) error { - typedClient, err := management.NewControllerRuntimeClient() - if err != nil { - return err - } - - return info.RestoreSnapshot(ctx, typedClient, immediate) +func buildManager(clusterName string, namespace string) (manager.Manager, error) { + return controllerruntime.NewManager(controllerruntime.GetConfigOrDie(), controllerruntime.Options{ + Scheme: scheme.BuildWithAllKnownScheme(), + Cache: cache.Options{ + ByObject: map[client.Object]cache.ByObject{ + &apiv1.Cluster{}: { + Field: fields.OneTermEqualSelector("metadata.name", clusterName), + Namespaces: map[string]cache.Config{ + namespace: {}, + }, + }, + }, + }, + Client: client.Options{ + Cache: &client.CacheOptions{ + DisableFor: []client.Object{ + &corev1.Secret{}, + &corev1.ConfigMap{}, + // todo(armru): we should remove the backup endpoints from the local webserver + &apiv1.Backup{}, + }, + }, + }, + LeaderElection: false, + Metrics: metricsserver.Options{ + BindAddress: "0", + }, + }) } diff --git a/internal/cmd/manager/instance/restoresnapshot/restore.go b/internal/cmd/manager/instance/restoresnapshot/restore.go new file mode 100644 index 0000000000..1a065b12ce --- /dev/null +++ b/internal/cmd/manager/instance/restoresnapshot/restore.go @@ -0,0 +1,74 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package restoresnapshot + +import ( + "context" + "fmt" + + "github.com/cloudnative-pg/machinery/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/cloudnative-pg/cloudnative-pg/pkg/management" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" +) + +type restoreRunnable struct { + cli client.Client + clusterName string + namespace string + pgData string + pgWal string + backupLabelFile []byte + tablespaceMapFile []byte + immediate bool + cancel context.CancelFunc +} + +func (r *restoreRunnable) Start(ctx context.Context) error { + contextLogger := log.FromContext(ctx) + + // we will wait this way for the mgr and informers to be online + if err := management.WaitForGetClusterWithClient(ctx, r.cli, client.ObjectKey{ + Name: r.clusterName, + Namespace: r.namespace, + }); err != nil { + return fmt.Errorf("while waiting for API server connectivity: %w", err) + } + + info := postgres.InitInfo{ + ClusterName: r.clusterName, + Namespace: r.namespace, + PgData: r.pgData, + PgWal: r.pgWal, + BackupLabelFile: r.backupLabelFile, + TablespaceMapFile: r.tablespaceMapFile, + } + + if err := info.RestoreSnapshot(ctx, r.cli, r.immediate); err != nil { + contextLogger.Error(err, "Error while restoring a backup") + return err + } + + // the backup was restored correctly and we now ask + // the manager to quit + r.cancel() + return nil +} diff --git a/pkg/management/postgres/restore.go b/pkg/management/postgres/restore.go index 37e2afceb6..1012fdf03a 100644 --- a/pkg/management/postgres/restore.go +++ b/pkg/management/postgres/restore.go @@ -86,6 +86,7 @@ var ( ) // RestoreSnapshot restores a PostgreSQL cluster from a volumeSnapshot +// nolint:gocognit,gocyclo func (info InitInfo) RestoreSnapshot(ctx context.Context, cli client.Client, immediate bool) error { contextLogger := log.FromContext(ctx) @@ -144,9 +145,21 @@ func (info InitInfo) RestoreSnapshot(ctx context.Context, cli client.Client, imm } } - backup, env, err := info.createBackupObjectForSnapshotRestore(ctx, cli, cluster) - if err != nil { - return err + var envs []string + restoreCmd := fmt.Sprintf( + "/controller/manager wal-restore --log-destination %s/%s.json %%f %%p", + postgresSpec.LogPath, postgresSpec.LogFileName) + config := fmt.Sprintf( + "recovery_target_action = promote\n"+ + "restore_command = '%s'\n", + restoreCmd) + + // nolint:nestif + if pluginConfiguration := cluster.GetRecoverySourcePlugin(); pluginConfiguration == nil { + envs, config, err = info.createEnvAndConfigForSnapshotRestore(ctx, cli, cluster) + if err != nil { + return err + } } if _, err := info.restoreCustomWalDir(ctx); err != nil { @@ -156,7 +169,13 @@ func (info InitInfo) RestoreSnapshot(ctx context.Context, cli client.Client, imm if err := info.WriteInitialPostgresqlConf(ctx, cluster); err != nil { return err } - + // we need a migration here, otherwise the server will not start up if + // we recover from a base which has postgresql.auto.conf + // the override.conf and include statement is present, what we need to do is to + // migrate the content + if _, err := info.GetInstance().migratePostgresAutoConfFile(ctx); err != nil { + return err + } if cluster.IsReplica() { server, ok := cluster.ExternalCluster(cluster.Spec.ReplicaCluster.Source) if !ok { @@ -178,32 +197,31 @@ func (info InitInfo) RestoreSnapshot(ctx context.Context, cli client.Client, imm return err } - if err := info.writeRestoreWalConfig(ctx, backup, cluster); err != nil { + if err := info.writeCustomRestoreWalConfig(cluster, config); err != nil { return err } - return info.ConfigureInstanceAfterRestore(ctx, cluster, env) + return info.ConfigureInstanceAfterRestore(ctx, cluster, envs) } -// createBackupObjectForSnapshotRestore creates a fake Backup object that can be used during the -// snapshot restore process -func (info InitInfo) createBackupObjectForSnapshotRestore( +// createEnvAndConfigForSnapshotRestore creates env and config for snapshot restore +func (info InitInfo) createEnvAndConfigForSnapshotRestore( ctx context.Context, typedClient client.Client, cluster *apiv1.Cluster, -) (*apiv1.Backup, []string, error) { +) ([]string, string, error) { contextLogger := log.FromContext(ctx) sourceName := cluster.Spec.Bootstrap.Recovery.Source if sourceName == "" { - return nil, nil, fmt.Errorf("recovery source not specified") + return nil, "", fmt.Errorf("recovery source not specified") } contextLogger.Info("Recovering from external cluster", "sourceName", sourceName) server, found := cluster.ExternalCluster(sourceName) if !found { - return nil, nil, fmt.Errorf("missing external cluster: %v", sourceName) + return nil, "", fmt.Errorf("missing external cluster: %v", sourceName) } serverName := server.GetServerName() @@ -214,10 +232,10 @@ func (info InitInfo) createBackupObjectForSnapshotRestore( server.BarmanObjectStore, os.Environ()) if err != nil { - return nil, nil, err + return nil, "", err } - return &apiv1.Backup{ + backup := &apiv1.Backup{ Spec: apiv1.BackupSpec{ Cluster: apiv1.LocalObjectReference{ Name: serverName, @@ -231,7 +249,10 @@ func (info InitInfo) createBackupObjectForSnapshotRestore( ServerName: serverName, Phase: apiv1.BackupPhaseCompleted, }, - }, env, nil + } + + config, err := getRestoreWalConfig(ctx, backup) + return env, config, err } // Restore restores a PostgreSQL cluster from a backup into the object storage @@ -618,27 +639,6 @@ func (info InitInfo) loadBackupFromReference( return &backup, env, nil } -// writeRestoreWalConfig writes a `custom.conf` allowing PostgreSQL -// to complete the WAL recovery from the object storage and then start -// as a new primary -func (info InitInfo) writeRestoreWalConfig( - ctx context.Context, - backup *apiv1.Backup, - cluster *apiv1.Cluster, -) error { - conf, err := getRestoreWalConfig(ctx, backup) - if err != nil { - return err - } - recoveryFileContents := fmt.Sprintf( - "%s\n"+ - "%s", - conf, - cluster.Spec.Bootstrap.Recovery.RecoveryTarget.BuildPostgresOptions()) - - return info.writeRecoveryConfiguration(cluster, recoveryFileContents) -} - func (info InitInfo) writeCustomRestoreWalConfig(cluster *apiv1.Cluster, conf string) error { recoveryFileContents := fmt.Sprintf( "%s\n"+ From d1fbfdcfbc753a4d9b9285eb7db6764f0a19d0f1 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 16 Apr 2025 15:16:50 +0200 Subject: [PATCH 525/836] chore(deps): update kubernetes csi (main) (#7361) This PR contains the following updates: https://github.com/kubernetes-csi/external-attacher `v4.8.0` -> `v4.8.1` https://github.com/kubernetes-csi/external-resizer `v1.13.1` -> `v1.13.2` https://github.com/kubernetes-csi/external-snapshotter `v8.2.0` -> `v8.2.1` https://github.com/rook/rook `v1.16.3` -> `v1.16.6` --- .github/workflows/continuous-delivery.yml | 4 ++-- hack/setup-cluster.sh | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index a082bbdd72..547cc39243 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -37,8 +37,8 @@ env: GOLANG_VERSION: "1.24.x" KUBEBUILDER_VERSION: "2.3.1" KIND_VERSION: "v0.27.0" - ROOK_VERSION: "v1.16.3" - EXTERNAL_SNAPSHOTTER_VERSION: "v8.2.0" + ROOK_VERSION: "v1.16.6" + EXTERNAL_SNAPSHOTTER_VERSION: "v8.2.1" OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing" BUILD_PUSH_PROVENANCE: "" BUILD_PUSH_CACHE_FROM: "" diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh index 93b6cde34d..6bf155697d 100755 --- a/hack/setup-cluster.sh +++ b/hack/setup-cluster.sh @@ -29,10 +29,10 @@ fi # Defaults KIND_NODE_DEFAULT_VERSION=v1.32.3 CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=v1.16.1 -EXTERNAL_SNAPSHOTTER_VERSION=v8.2.0 +EXTERNAL_SNAPSHOTTER_VERSION=v8.2.1 EXTERNAL_PROVISIONER_VERSION=v5.2.0 -EXTERNAL_RESIZER_VERSION=v1.13.1 -EXTERNAL_ATTACHER_VERSION=v4.8.0 +EXTERNAL_RESIZER_VERSION=v1.13.2 +EXTERNAL_ATTACHER_VERSION=v4.8.1 K8S_VERSION=${K8S_VERSION-} KUBECTL_VERSION=${KUBECTL_VERSION-} CSI_DRIVER_HOST_PATH_VERSION=${CSI_DRIVER_HOST_PATH_VERSION:-$CSI_DRIVER_HOST_PATH_DEFAULT_VERSION} From 07152a145604bae55f7986af7648c971cfff69cb Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Wed, 16 Apr 2025 18:39:03 +0200 Subject: [PATCH 526/836] docs: release notes for 1.26.0-rc2 (#7378) Closes #7376 Signed-off-by: Marco Nenciarini Signed-off-by: Gabriele Bartolini Co-authored-by: Gabriele Bartolini --- docs/src/preview_version.md | 4 ++-- docs/src/release_notes/v1.26.md | 26 ++++++++++++++++++++++++++ 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/docs/src/preview_version.md b/docs/src/preview_version.md index 273f5b60af..d5b7c5c030 100644 --- a/docs/src/preview_version.md +++ b/docs/src/preview_version.md @@ -36,10 +36,10 @@ are not backwards compatible and could be removed entirely. There are currently no preview versions available. -The current preview version is **1.26.0-rc1**. +The current preview version is **1.26.0-rc2**. For more information on the current preview version and how to test, please view the links below: -- [Announcement](https://cloudnative-pg.io/releases/cloudnative-pg-1-26.0-rc1-released/) +- [Announcement](https://cloudnative-pg.io/releases/cloudnative-pg-1-26.0-rc2-released/) - [Documentation](https://cloudnative-pg.io/documentation/preview/) diff --git a/docs/src/release_notes/v1.26.md b/docs/src/release_notes/v1.26.md index b9ce2daf28..ae0aab986c 100644 --- a/docs/src/release_notes/v1.26.md +++ b/docs/src/release_notes/v1.26.md @@ -7,6 +7,32 @@ For a complete list of changes, please refer to the [commits](https://github.com/cloudnative-pg/cloudnative-pg/commits/release-1.26) on the release branch in GitHub. +## Version 1.26.0-rc2 + +**Release date:** 16 April 2025 + +### Enhancements + +- Introduced support for WAL recovery via CNPG-I plugins during snapshot + restore. (#7284) + +- Removed the `ENABLE_AZURE_PVC_UPDATES` configuration, as it is no longer + required to resize Azure volumes correctly. The Azure CSI driver includes the + necessary fix as of version [1.11.0](https://github.com/kubernetes-sigs/azuredisk-csi-driver/releases/tag/v1.11.0). (#7297) + +### Security + +- Set `imagePullPolicy` to `Always` for the operator deployment to ensure that + images are always pulled from the registry, reducing the risk of using + outdated or potentially unsafe local images. (#7250) + +### Fixes (since RC1) + +- Improved declarative major version upgrades by incorporating `pg_controldata` + information when creating the new data directory, ensuring a more reliable + and seamless upgrade process. (#7274) + + ## Version 1.26.0-rc1 **Release date:** Mar 28, 2025 From 75b325c1be790bc0fc620e3386acfd38365780c7 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 16 Apr 2025 20:09:23 +0200 Subject: [PATCH 527/836] Version tag to 1.26.0-rc2 (#7381) Signed-off-by: Marco Nenciarini Co-authored-by: Marco Nenciarini --- docs/src/installation_upgrade.md | 4 +- docs/src/kubectl-plugin.md | 30 +- pkg/versions/versions.go | 6 +- releases/cnpg-1.26.0-rc2.yaml | 18009 +++++++++++++++++++++++++++++ 4 files changed, 18029 insertions(+), 20 deletions(-) create mode 100644 releases/cnpg-1.26.0-rc2.yaml diff --git a/docs/src/installation_upgrade.md b/docs/src/installation_upgrade.md index 5d00b272a3..a9ab979354 100644 --- a/docs/src/installation_upgrade.md +++ b/docs/src/installation_upgrade.md @@ -8,12 +8,12 @@ The operator can be installed like any other resource in Kubernetes, through a YAML manifest applied via `kubectl`. -You can install the [latest operator manifest](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-1.26.0-rc1.yaml) +You can install the [latest operator manifest](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-1.26.0-rc2.yaml) for this minor release as follows: ```sh kubectl apply --server-side -f \ - https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-1.26.0-rc1.yaml + https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-1.26.0-rc2.yaml ``` You can verify that with: diff --git a/docs/src/kubectl-plugin.md b/docs/src/kubectl-plugin.md index 90e2edc290..eb4d9ba3df 100644 --- a/docs/src/kubectl-plugin.md +++ b/docs/src/kubectl-plugin.md @@ -31,11 +31,11 @@ them in your systems. #### Debian packages -For example, let's install the 1.26.0-rc1 release of the plugin, for an Intel based +For example, let's install the 1.26.0-rc2 release of the plugin, for an Intel based 64 bit server. First, we download the right `.deb` file. ```sh -wget https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.26.0-rc1/kubectl-cnpg_1.26.0-rc1_linux_x86_64.deb \ +wget https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.26.0-rc2/kubectl-cnpg_1.26.0-rc2_linux_x86_64.deb \ --output-document kube-plugin.deb ``` @@ -46,17 +46,17 @@ $ sudo dpkg -i kube-plugin.deb Selecting previously unselected package cnpg. (Reading database ... 6688 files and directories currently installed.) Preparing to unpack kube-plugin.deb ... -Unpacking cnpg (1.26.0-rc1) ... -Setting up cnpg (1.26.0-rc1) ... +Unpacking cnpg (1.26.0-rc2) ... +Setting up cnpg (1.26.0-rc2) ... ``` #### RPM packages -As in the example for `.rpm` packages, let's install the 1.26.0-rc1 release for an +As in the example for `.rpm` packages, let's install the 1.26.0-rc2 release for an Intel 64 bit machine. Note the `--output` flag to provide a file name. ```sh -curl -L https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.26.0-rc1/kubectl-cnpg_1.26.0-rc1_linux_x86_64.rpm \ +curl -L https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.26.0-rc2/kubectl-cnpg_1.26.0-rc2_linux_x86_64.rpm \ --output kube-plugin.rpm ``` @@ -70,7 +70,7 @@ Dependencies resolved. Package Architecture Version Repository Size ==================================================================================================== Installing: - cnpg x86_64 1.26.0-rc1 @commandline 20 M + cnpg x86_64 1.26.0-rc2 @commandline 20 M Transaction Summary ==================================================================================================== @@ -294,9 +294,9 @@ sandbox-3 0/604DE38 0/604DE38 0/604DE38 0/604DE38 00:00:00 00:00:00 00 Instances status Name Current LSN Replication role Status QoS Manager Version Node ---- ----------- ---------------- ------ --- --------------- ---- -sandbox-1 0/604DE38 Primary OK BestEffort 1.26.0-rc1 k8s-eu-worker -sandbox-2 0/604DE38 Standby (async) OK BestEffort 1.26.0-rc1 k8s-eu-worker2 -sandbox-3 0/604DE38 Standby (async) OK BestEffort 1.26.0-rc1 k8s-eu-worker +sandbox-1 0/604DE38 Primary OK BestEffort 1.26.0-rc2 k8s-eu-worker +sandbox-2 0/604DE38 Standby (async) OK BestEffort 1.26.0-rc2 k8s-eu-worker2 +sandbox-3 0/604DE38 Standby (async) OK BestEffort 1.26.0-rc2 k8s-eu-worker ``` If you require more detailed status information, use the `--verbose` option (or @@ -350,9 +350,9 @@ sandbox-primary primary 1 1 1 Instances status Name Current LSN Replication role Status QoS Manager Version Node ---- ----------- ---------------- ------ --- --------------- ---- -sandbox-1 0/6053720 Primary OK BestEffort 1.26.0-rc1 k8s-eu-worker -sandbox-2 0/6053720 Standby (async) OK BestEffort 1.26.0-rc1 k8s-eu-worker2 -sandbox-3 0/6053720 Standby (async) OK BestEffort 1.26.0-rc1 k8s-eu-worker +sandbox-1 0/6053720 Primary OK BestEffort 1.26.0-rc2 k8s-eu-worker +sandbox-2 0/6053720 Standby (async) OK BestEffort 1.26.0-rc2 k8s-eu-worker2 +sandbox-3 0/6053720 Standby (async) OK BestEffort 1.26.0-rc2 k8s-eu-worker ``` With an additional `-v` (e.g. `kubectl cnpg status sandbox -v -v`), you can @@ -575,12 +575,12 @@ Archive: report_operator_.zip ```output ====== Beginning of Previous Log ===== -2023-03-28T12:56:41.251711811Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.26.0-rc1","build":{"Version":"1.26.0-rc1+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} +2023-03-28T12:56:41.251711811Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.26.0-rc2","build":{"Version":"1.26.0-rc2+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} 2023-03-28T12:56:41.251851909Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"} ====== End of Previous Log ===== -2023-03-28T12:57:09.854306024Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.26.0-rc1","build":{"Version":"1.26.0-rc1+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} +2023-03-28T12:57:09.854306024Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.26.0-rc2","build":{"Version":"1.26.0-rc2+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} 2023-03-28T12:57:09.854363943Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"} ``` diff --git a/pkg/versions/versions.go b/pkg/versions/versions.go index 15ef5a12d4..66cfbf4ba5 100644 --- a/pkg/versions/versions.go +++ b/pkg/versions/versions.go @@ -23,13 +23,13 @@ package versions const ( // Version is the version of the operator - Version = "1.26.0-rc1" + Version = "1.26.0-rc2" // DefaultImageName is the default image used by the operator to create pods DefaultImageName = "ghcr.io/cloudnative-pg/postgresql:17.4" // DefaultOperatorImageName is the default operator image used by the controller in the pods running PostgreSQL - DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.26.0-rc1" + DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.26.0-rc2" ) // BuildInfo is a struct containing all the info about the build @@ -39,7 +39,7 @@ type BuildInfo struct { var ( // buildVersion injected during the build - buildVersion = "1.26.0-rc1" + buildVersion = "1.26.0-rc2" // buildCommit injected during the build buildCommit = "none" diff --git a/releases/cnpg-1.26.0-rc2.yaml b/releases/cnpg-1.26.0-rc2.yaml new file mode 100644 index 0000000000..c1d7b515ad --- /dev/null +++ b/releases/cnpg-1.26.0-rc2.yaml @@ -0,0 +1,18009 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-system +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: backups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Backup + listKind: BackupList + plural: backups + singular: backup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.method + name: Method + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .status.error + name: Error + type: string + name: v1 + schema: + openAPIV3Schema: + description: A Backup resource is a request for a PostgreSQL backup by the + user. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the backup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + type: object + status: + description: |- + Most recently observed status of the backup. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + azureCredentials: + description: The credentials to use to upload data to Azure Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without providing + explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + backupId: + description: The ID of the Barman backup + type: string + backupLabelFile: + description: Backup label file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + backupName: + description: The Name of the Barman backup + type: string + beginLSN: + description: The starting xlog + type: string + beginWal: + description: The starting WAL + type: string + commandError: + description: The backup command output in case of error + type: string + commandOutput: + description: Unused. Retained for compatibility with old versions. + type: string + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data. This may not be populated in case of errors. + type: string + encryption: + description: Encryption method required to S3 API + type: string + endLSN: + description: The ending xlog + type: string + endWal: + description: The ending WAL + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + error: + description: The detected error + type: string + googleCredentials: + description: The credentials to use to upload data to Google Cloud + Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage JSON + file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + instanceID: + description: Information to identify the instance where the backup + has been taken from + properties: + ContainerID: + description: The container ID + type: string + podName: + description: The pod name + type: string + type: object + method: + description: The backup method being used + type: string + online: + description: Whether the backup was online/hot (`true`) or offline/cold + (`false`) + type: boolean + phase: + description: The last backup status + type: string + pluginMetadata: + additionalProperties: + type: string + description: A map containing the plugin metadata + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without providing + explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the region + name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + snapshotBackupStatus: + description: Status of the volumeSnapshot backup + properties: + elements: + description: The elements list, populated with the gathered volume + snapshots + items: + description: BackupSnapshotElementStatus is a volume snapshot + that is part of a volume snapshot method backup + properties: + name: + description: Name is the snapshot resource name + type: string + tablespaceName: + description: |- + TablespaceName is the name of the snapshotted tablespace. Only set + when type is PG_TABLESPACE + type: string + type: + description: Type is tho role of the snapshot in the cluster, + such as PG_DATA, PG_WAL and PG_TABLESPACE + type: string + required: + - name + - type + type: object + type: array + type: object + startedAt: + description: When the backup was started + format: date-time + type: string + stoppedAt: + description: When the backup was terminated + format: date-time + type: string + tablespaceMapFile: + description: Tablespace map file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: clusterimagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ClusterImageCatalog + listKind: ClusterImageCatalogList + plural: clusterimagecatalogs + singular: clusterimagecatalog + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ClusterImageCatalog is the Schema for the clusterimagecatalogs + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ClusterImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: clusters.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Cluster + listKind: ClusterList + plural: clusters + singular: cluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Number of instances + jsonPath: .status.instances + name: Instances + type: integer + - description: Number of ready instances + jsonPath: .status.readyInstances + name: Ready + type: integer + - description: Cluster current status + jsonPath: .status.phase + name: Status + type: string + - description: Primary pod + jsonPath: .status.currentPrimary + name: Primary + type: string + name: v1 + schema: + openAPIV3Schema: + description: Cluster is the Schema for the PostgreSQL API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the cluster. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + affinity: + description: Affinity/Anti-affinity rules for Pods + properties: + additionalPodAffinity: + description: AdditionalPodAffinity allows to specify pod affinity + terms to be passed to all the cluster's pods. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + additionalPodAntiAffinity: + description: |- + AdditionalPodAntiAffinity allows to specify pod anti-affinity terms to be added to the ones generated + by the operator if EnablePodAntiAffinity is set to true (default) or to be used exclusively if set to false. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + enablePodAntiAffinity: + description: |- + Activates anti-affinity for the pods. The operator will define pods + anti-affinity unless this field is explicitly set to false + type: boolean + nodeAffinity: + description: |- + NodeAffinity describes node affinity scheduling rules for the pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is map of key-value pairs used to define the nodes on which + the pods can run. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + podAntiAffinityType: + description: |- + PodAntiAffinityType allows the user to decide whether pod anti-affinity between cluster instance has to be + considered a strong requirement during scheduling or not. Allowed values are: "preferred" (default if empty) or + "required". Setting it to "required", could lead to instances remaining pending until new kubernetes nodes are + added if all the existing nodes don't match the required pod anti-affinity rule. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + type: string + tolerations: + description: |- + Tolerations is a list of Tolerations that should be set for all the pods, in order to allow them to run + on tainted nodes. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologyKey: + description: |- + TopologyKey to use for anti-affinity configuration. See k8s documentation + for more info on that + type: string + type: object + backup: + description: The configuration to be used for backups + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2`, and `snappy`. + enum: + - bzip2 + - gzip + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage + JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the + region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2`, + `lz4`, `snappy`, `xz`, and `zstd`. + enum: + - bzip2 + - gzip + - lz4 + - snappy + - xz + - zstd + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + retentionPolicy: + description: |- + RetentionPolicy is the retention policy to be used for backups + and WALs (i.e. '60d'). The retention policy is expressed in the form + of `XXu` where `XX` is a positive integer and `u` is in `[dwm]` - + days, weeks, months. + It's currently only applicable when using the BarmanObjectStore method. + pattern: ^[1-9][0-9]*[dwm]$ + type: string + target: + default: prefer-standby + description: |- + The policy to decide which instance should perform backups. Available + options are empty string, which will default to `prefer-standby` policy, + `primary` to have backups run always on primary instances, `prefer-standby` + to have backups run preferably on the most updated standby, if available. + enum: + - primary + - prefer-standby + type: string + volumeSnapshot: + description: VolumeSnapshot provides the configuration for the + execution of volume snapshot backups. + properties: + annotations: + additionalProperties: + type: string + description: Annotations key-value pairs that will be added + to .metadata.annotations snapshot resources. + type: object + className: + description: |- + ClassName specifies the Snapshot Class to be used for PG_DATA PersistentVolumeClaim. + It is the default class for the other types if no specific class is present + type: string + labels: + additionalProperties: + type: string + description: Labels are key-value pairs that will be added + to .metadata.labels snapshot resources. + type: object + online: + default: true + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + type: boolean + onlineConfiguration: + default: + immediateCheckpoint: false + waitForArchive: true + description: Configuration parameters to control the online/hot + backup with volume snapshots + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + snapshotOwnerReference: + default: none + description: SnapshotOwnerReference indicates the type of + owner reference the snapshot should have + enum: + - none + - cluster + - backup + type: string + tablespaceClassName: + additionalProperties: + type: string + description: |- + TablespaceClassName specifies the Snapshot Class to be used for the tablespaces. + defaults to the PGDATA Snapshot Class, if set + type: object + walClassName: + description: WalClassName specifies the Snapshot Class to + be used for the PG_WAL PersistentVolumeClaim. + type: string + type: object + type: object + bootstrap: + description: Instructions to bootstrap this cluster + properties: + initdb: + description: Bootstrap the cluster via initdb + properties: + builtinLocale: + description: |- + Specifies the locale name when the builtin provider is used. + This option requires `localeProvider` to be set to `builtin`. + Available from PostgreSQL 17. + type: string + dataChecksums: + description: |- + Whether the `-k` option should be passed to initdb, + enabling checksums on data pages (default: `false`) + type: boolean + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + encoding: + description: The value to be passed as option `--encoding` + for initdb (default:`UTF8`) + type: string + icuLocale: + description: |- + Specifies the ICU locale when the ICU provider is used. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 15. + type: string + icuRules: + description: |- + Specifies additional collation rules to customize the behavior of the default collation. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 16. + type: string + import: + description: |- + Bootstraps the new cluster by importing data from an existing PostgreSQL + instance using logical backup (`pg_dump` and `pg_restore`) + properties: + databases: + description: The databases to import + items: + type: string + type: array + pgDumpExtraOptions: + description: |- + List of custom options to pass to the `pg_dump` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + pgRestoreExtraOptions: + description: |- + List of custom options to pass to the `pg_restore` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + postImportApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after is imported - to be used with extreme care + (by default empty). Only available in microservice type. + items: + type: string + type: array + roles: + description: The roles to import + items: + type: string + type: array + schemaOnly: + description: |- + When set to true, only the `pre-data` and `post-data` sections of + `pg_restore` are invoked, avoiding data import. Default: `false`. + type: boolean + source: + description: The source of the import + properties: + externalCluster: + description: The name of the externalCluster used + for import + type: string + required: + - externalCluster + type: object + type: + description: The import type. Can be `microservice` or + `monolith`. + enum: + - microservice + - monolith + type: string + required: + - databases + - source + - type + type: object + locale: + description: Sets the default collation order and character + classification in the new database. + type: string + localeCType: + description: The value to be passed as option `--lc-ctype` + for initdb (default:`C`) + type: string + localeCollate: + description: The value to be passed as option `--lc-collate` + for initdb (default:`C`) + type: string + localeProvider: + description: |- + This option sets the locale provider for databases created in the new cluster. + Available from PostgreSQL 16. + type: string + options: + description: |- + The list of options that must be passed to initdb when creating the cluster. + Deprecated: This could lead to inconsistent configurations, + please use the explicit provided parameters instead. + If defined, explicit values will be ignored. + items: + type: string + type: array + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + postInitApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitApplicationSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the application database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitSQL: + description: |- + List of SQL queries to be executed as a superuser in the `postgres` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `postgres` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitTemplateSQL: + description: |- + List of SQL queries to be executed as a superuser in the `template1` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitTemplateSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `template1` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + walSegmentSize: + description: |- + The value in megabytes (1 to 1024) to be passed to the `--wal-segsize` + option for initdb (default: empty, resulting in PostgreSQL default: 16MB) + maximum: 1024 + minimum: 1 + type: integer + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider + is set to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is + set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set + to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + pg_basebackup: + description: |- + Bootstrap the cluster taking a physical backup of another compatible + PostgreSQL instance + properties: + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: The name of the server of which we need to take + a physical backup + minLength: 1 + type: string + required: + - source + type: object + recovery: + description: Bootstrap the cluster from a backup + properties: + backup: + description: |- + The backup object containing the physical base backup from which to + initiate the recovery procedure. + Mutually exclusive with `source` and `volumeSnapshots`. + properties: + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + name: + description: Name of the referent. + type: string + required: + - name + type: object + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + recoveryTarget: + description: |- + By default, the recovery process applies all the available + WAL files in the archive (full recovery). However, you can also + end the recovery as soon as a consistent state is reached or + recover to a point-in-time (PITR) by specifying a `RecoveryTarget` object, + as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...). + More info: https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET + properties: + backupID: + description: |- + The ID of the backup from which to start the recovery process. + If empty (default) the operator will automatically detect the backup + based on targetTime or targetLSN if specified. Otherwise use the + latest available backup in chronological order. + type: string + exclusive: + description: |- + Set the target to be exclusive. If omitted, defaults to false, so that + in Postgres, `recovery_target_inclusive` will be true + type: boolean + targetImmediate: + description: End recovery as soon as a consistent state + is reached + type: boolean + targetLSN: + description: The target LSN (Log Sequence Number) + type: string + targetName: + description: |- + The target name (to be previously created + with `pg_create_restore_point`) + type: string + targetTLI: + description: The target timeline ("latest" or a positive + integer) + type: string + targetTime: + description: The target time as a timestamp in the RFC3339 + standard + type: string + targetXID: + description: The target transaction ID + type: string + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: |- + The external cluster whose backup we will restore. This is also + used as the name of the folder under which the backup is stored, + so it must be set to the name of the source cluster + Mutually exclusive with `backup`. + type: string + volumeSnapshots: + description: |- + The static PVC data source(s) from which to initiate the + recovery procedure. Currently supporting `VolumeSnapshot` + and `PersistentVolumeClaim` resources that map an existing + PVC group, compatible with CloudNativePG, and taken with + a cold backup copy on a fenced Postgres instance (limitation + which will be removed in the future when online backup + will be implemented). + Mutually exclusive with `backup`. + properties: + storage: + description: Configuration of the storage of the instances + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + tablespaceStorage: + additionalProperties: + description: |- + TypedLocalObjectReference contains enough information to let you locate the + typed referenced object inside the same namespace. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + description: Configuration of the storage for PostgreSQL + tablespaces + type: object + walStorage: + description: Configuration of the storage for PostgreSQL + WAL (Write-Ahead Log) + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + required: + - storage + type: object + type: object + type: object + certificates: + description: The configuration for the CA and related certificates + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + description: + description: Description of this PostgreSQL cluster + type: string + enablePDB: + default: true + description: |- + Manage the `PodDisruptionBudget` resources within the cluster. When + configured as `true` (default setting), the pod disruption budgets + will safeguard the primary node from being terminated. Conversely, + setting it to `false` will result in the absence of any + `PodDisruptionBudget` resource, permitting the shutdown of all nodes + hosting the PostgreSQL cluster. This latter configuration is + advisable for any PostgreSQL cluster employed for + development/staging purposes. + type: boolean + enableSuperuserAccess: + default: false + description: |- + When this option is enabled, the operator will use the `SuperuserSecret` + to update the `postgres` user password (if the secret is + not present, the operator will automatically create one). When this + option is disabled, the operator will ignore the `SuperuserSecret` content, delete + it when automatically created, and then blank the password of the `postgres` + user by setting it to `NULL`. Disabled by default. + type: boolean + env: + description: |- + Env follows the Env format to pass environment variables + to the pods created in the cluster + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + EnvFrom follows the EnvFrom format to pass environment variables + sources to the pods to be used by Env + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each key in + the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + ephemeralVolumeSource: + description: EphemeralVolumeSource allows the user to configure the + source of ephemeral volumes. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to + consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the + PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + ephemeralVolumesSizeLimit: + description: |- + EphemeralVolumesSizeLimit allows the user to set the limits for the ephemeral + volumes + properties: + shm: + anyOf: + - type: integer + - type: string + description: Shm is the size limit of the shared memory volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + temporaryData: + anyOf: + - type: integer + - type: string + description: TemporaryData is the size limit of the temporary + data volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + externalClusters: + description: The list of external clusters which are used in the configuration + items: + description: |- + ExternalCluster represents the connection parameters to an + external cluster which is used in the other sections of the configuration + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2`, and `snappy`. + enum: + - bzip2 + - gzip + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud + Storage JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing + the region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2`, + `lz4`, `snappy`, `xz`, and `zstd`. + enum: + - bzip2 + - gzip + - lz4 + - snappy + - xz + - zstd + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + connectionParameters: + additionalProperties: + type: string + description: The list of connection parameters, such as dbname, + host, username, etc + type: object + name: + description: The server name, required + type: string + password: + description: |- + The reference to the password to be used to connect to the server. + If a password is provided, CloudNativePG creates a PostgreSQL + passfile at `/controller/external/NAME/pass` (where "NAME" is the + cluster's name). This passfile is automatically referenced in the + connection string when establishing a connection to the remote + PostgreSQL server from the current PostgreSQL `Cluster`. This ensures + secure and efficient password management for external clusters. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + plugin: + description: |- + The configuration of the plugin that is taking care + of WAL archiving and backups for this external cluster + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + isWALArchiver: + default: false + description: |- + Only one plugin can be declared as WALArchiver. + Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + sslCert: + description: |- + The reference to an SSL certificate to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslKey: + description: |- + The reference to an SSL private key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslRootCert: + description: |- + The reference to an SSL CA public key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - name + type: object + type: array + failoverDelay: + default: 0 + description: |- + The amount of time (in seconds) to wait before triggering a failover + after the primary PostgreSQL instance in the cluster was detected + to be unhealthy + format: int32 + type: integer + imageCatalogRef: + description: Defines the major PostgreSQL version we want to use within + an ImageCatalog + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + major: + description: The major version of PostgreSQL we want to use from + the ImageCatalog + type: integer + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - major + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: Only image catalogs are supported + rule: self.kind == 'ImageCatalog' || self.kind == 'ClusterImageCatalog' + - message: Only image catalogs are supported + rule: self.apiGroup == 'postgresql.cnpg.io' + imageName: + description: |- + Name of the container image, supporting both tags (`:`) + and digests for deterministic and repeatable deployments + (`:@sha256:`) + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of `Always`, `Never` or `IfNotPresent`. + If not defined, it defaults to `IfNotPresent`. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + imagePullSecrets: + description: The list of pull secrets to be used to pull the images + items: + description: |- + LocalObjectReference contains enough information to let you locate a + local object with a known type inside the same namespace + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + type: array + inheritedMetadata: + description: Metadata that will be inherited by all objects related + to the Cluster + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + instances: + default: 1 + description: Number of instances required in the cluster + minimum: 1 + type: integer + livenessProbeTimeout: + description: |- + LivenessProbeTimeout is the time (in seconds) that is allowed for a PostgreSQL instance + to successfully respond to the liveness probe (default 30). + The Liveness probe failure threshold is derived from this value using the formula: + ceiling(livenessProbe / 10). + format: int32 + type: integer + logLevel: + default: info + description: 'The instances'' log level, one of the following values: + error, warning, info (default), debug, trace' + enum: + - error + - warning + - info + - debug + - trace + type: string + managed: + description: The configuration that is used by the portions of PostgreSQL + that are managed by the instance manager + properties: + roles: + description: Database roles managed by the `Cluster` + items: + description: |- + RoleConfiguration is the representation, in Kubernetes, of a PostgreSQL role + with the additional field Ensure specifying whether to ensure the presence or + absence of the role in the database + + The defaults of the CREATE ROLE command are applied + Reference: https://www.postgresql.org/docs/current/sql-createrole.html + properties: + bypassrls: + description: |- + Whether a role bypasses every row-level security (RLS) policy. + Default is `false`. + type: boolean + comment: + description: Description of the role + type: string + connectionLimit: + default: -1 + description: |- + If the role can log in, this specifies how many concurrent + connections the role can make. `-1` (the default) means no limit. + format: int64 + type: integer + createdb: + description: |- + When set to `true`, the role being defined will be allowed to create + new databases. Specifying `false` (default) will deny a role the + ability to create databases. + type: boolean + createrole: + description: |- + Whether the role will be permitted to create, alter, drop, comment + on, change the security label for, and grant or revoke membership in + other roles. Default is `false`. + type: boolean + disablePassword: + description: DisablePassword indicates that a role's password + should be set to NULL in Postgres + type: boolean + ensure: + default: present + description: Ensure the role is `present` or `absent` - + defaults to "present" + enum: + - present + - absent + type: string + inRoles: + description: |- + List of one or more existing roles to which this role will be + immediately added as a new member. Default empty. + items: + type: string + type: array + inherit: + default: true + description: |- + Whether a role "inherits" the privileges of roles it is a member of. + Defaults is `true`. + type: boolean + login: + description: |- + Whether the role is allowed to log in. A role having the `login` + attribute can be thought of as a user. Roles without this attribute + are useful for managing database privileges, but are not users in + the usual sense of the word. Default is `false`. + type: boolean + name: + description: Name of the role + type: string + passwordSecret: + description: |- + Secret containing the password of the role (if present) + If null, the password will be ignored unless DisablePassword is set + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + replication: + description: |- + Whether a role is a replication role. A role must have this + attribute (or be a superuser) in order to be able to connect to the + server in replication mode (physical or logical replication) and in + order to be able to create or drop replication slots. A role having + the `replication` attribute is a very highly privileged role, and + should only be used on roles actually used for replication. Default + is `false`. + type: boolean + superuser: + description: |- + Whether the role is a `superuser` who can override all access + restrictions within the database - superuser status is dangerous and + should be used only when really needed. You must yourself be a + superuser to create a new superuser. Defaults is `false`. + type: boolean + validUntil: + description: |- + Date and time after which the role's password is no longer valid. + When omitted, the password will never expire (default). + format: date-time + type: string + required: + - name + type: object + type: array + services: + description: Services roles managed by the `Cluster` + properties: + additional: + description: Additional is a list of additional managed services + specified by the user. + items: + description: |- + ManagedService represents a specific service managed by the cluster. + It includes the type of service and its associated template specification. + properties: + selectorType: + description: |- + SelectorType specifies the type of selectors that the service will have. + Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services. + enum: + - rw + - r + - ro + type: string + serviceTemplate: + description: ServiceTemplate is the template specification + for the service. + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only + supported for certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information + on service's port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed + by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains + the configurations of session affinity. + properties: + clientIP: + description: clientIP contains the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic is + distributed to Service endpoints. Implementations can use this field as a + hint, but are not required to guarantee strict adherence. If the field is + not set, the implementation will apply its default routing strategy. If set + to "PreferClose", implementations should prioritize endpoints that are + topologically close (e.g., same zone). + This is a beta field and requires enabling ServiceTrafficDistribution feature. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + updateStrategy: + default: patch + description: UpdateStrategy describes how the service + differences should be reconciled + enum: + - patch + - replace + type: string + required: + - selectorType + - serviceTemplate + type: object + type: array + disabledDefaultServices: + description: |- + DisabledDefaultServices is a list of service types that are disabled by default. + Valid values are "r", and "ro", representing read, and read-only services. + items: + description: |- + ServiceSelectorType describes a valid value for generating the service selectors. + It indicates which type of service the selector applies to, such as read-write, read, or read-only + enum: + - rw + - r + - ro + type: string + type: array + type: object + type: object + maxSyncReplicas: + default: 0 + description: |- + The target value for the synchronous replication quorum, that can be + decreased if the number of ready standbys is lower than this. + Undefined or 0 disable synchronous replication. + minimum: 0 + type: integer + minSyncReplicas: + default: 0 + description: |- + Minimum number of instances required in synchronous replication with the + primary. Undefined or 0 allow writes to complete when no standby is + available. + minimum: 0 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this cluster + properties: + customQueriesConfigMap: + description: The list of config maps containing the custom queries + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + customQueriesSecret: + description: The list of secrets containing the custom queries + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + disableDefaultQueries: + default: false + description: |- + Whether the default queries should be injected. + Set it to `true` if you don't want to inject default queries into the cluster. + Default: false. + type: boolean + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + tls: + description: |- + Configure TLS communication for the metrics endpoint. + Changing tls.enabled option will force a rollout of all instances. + properties: + enabled: + default: false + description: |- + Enable TLS for the monitoring endpoint. + Changing this option will force a rollout of all instances. + type: boolean + type: object + type: object + nodeMaintenanceWindow: + description: Define a maintenance window for the Kubernetes nodes + properties: + inProgress: + default: false + description: Is there a node maintenance activity in progress? + type: boolean + reusePVC: + default: true + description: |- + Reuse the existing PVC (wait for the node to come + up again) or not (recreate it elsewhere - when `instances` >1) + type: boolean + type: object + plugins: + description: |- + The plugins configuration, containing + any plugin to be loaded with the corresponding configuration + items: + description: |- + PluginConfiguration specifies a plugin that need to be loaded for this + cluster to be reconciled + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + isWALArchiver: + default: false + description: |- + Only one plugin can be declared as WALArchiver. + Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + type: array + postgresGID: + default: 26 + description: The GID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresUID: + default: 26 + description: The UID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresql: + description: Configuration of the PostgreSQL server + properties: + enableAlterSystem: + description: |- + If this parameter is true, the user will be able to invoke `ALTER SYSTEM` + on this CloudNativePG Cluster. + This should only be used for debugging and troubleshooting. + Defaults to false. + type: boolean + ldap: + description: Options to specify LDAP configuration + properties: + bindAsAuth: + description: Bind as authentication configuration + properties: + prefix: + description: Prefix for the bind authentication option + type: string + suffix: + description: Suffix for the bind authentication option + type: string + type: object + bindSearchAuth: + description: Bind+Search authentication configuration + properties: + baseDN: + description: Root DN to begin the user search + type: string + bindDN: + description: DN of the user to bind to the directory + type: string + bindPassword: + description: Secret with the password for the user to + bind to the directory + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + searchAttribute: + description: Attribute to match against the username + type: string + searchFilter: + description: Search filter to use when doing the search+bind + authentication + type: string + type: object + port: + description: LDAP server port + type: integer + scheme: + description: LDAP schema to be used, possible options are + `ldap` and `ldaps` + enum: + - ldap + - ldaps + type: string + server: + description: LDAP hostname or IP address + type: string + tls: + description: Set to 'true' to enable LDAP over TLS. 'false' + is default + type: boolean + type: object + parameters: + additionalProperties: + type: string + description: PostgreSQL configuration options (postgresql.conf) + type: object + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + pg_ident: + description: |- + PostgreSQL User Name Maps rules (lines to be appended + to the pg_ident.conf file) + items: + type: string + type: array + promotionTimeout: + description: |- + Specifies the maximum number of seconds to wait when promoting an instance to primary. + Default value is 40000000, greater than one year in seconds, + big enough to simulate an infinite timeout + format: int32 + type: integer + shared_preload_libraries: + description: Lists of shared preload libraries to add to the default + ones + items: + type: string + type: array + syncReplicaElectionConstraint: + description: |- + Requirements to be met by sync replicas. This will affect how the "synchronous_standby_names" parameter will be + set up. + properties: + enabled: + description: This flag enables the constraints for sync replicas + type: boolean + nodeLabelsAntiAffinity: + description: A list of node labels values to extract and compare + to evaluate if the pods reside in the same topology or not + items: + type: string + type: array + required: + - enabled + type: object + synchronous: + description: Configuration of the PostgreSQL synchronous replication + feature + properties: + dataDurability: + default: required + description: |- + If set to "required", data durability is strictly enforced. Write operations + with synchronous commit settings (`on`, `remote_write`, or `remote_apply`) will + block if there are insufficient healthy replicas, ensuring data persistence. + If set to "preferred", data durability is maintained when healthy replicas + are available, but the required number of instances will adjust dynamically + if replicas become unavailable. This setting relaxes strict durability enforcement + to allow for operational continuity. This setting is only applicable if both + `standbyNamesPre` and `standbyNamesPost` are unset (empty). + enum: + - required + - preferred + type: string + maxStandbyNamesFromCluster: + description: |- + Specifies the maximum number of local cluster pods that can be + automatically included in the `synchronous_standby_names` option in + PostgreSQL. + type: integer + method: + description: |- + Method to select synchronous replication standbys from the listed + servers, accepting 'any' (quorum-based synchronous replication) or + 'first' (priority-based synchronous replication) as values. + enum: + - any + - first + type: string + number: + description: |- + Specifies the number of synchronous standby servers that + transactions must wait for responses from. + type: integer + x-kubernetes-validations: + - message: The number of synchronous replicas should be greater + than zero + rule: self > 0 + standbyNamesPost: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` after local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + standbyNamesPre: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` before local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + required: + - method + - number + type: object + x-kubernetes-validations: + - message: dataDurability set to 'preferred' requires empty 'standbyNamesPre' + and empty 'standbyNamesPost' + rule: self.dataDurability!='preferred' || ((!has(self.standbyNamesPre) + || self.standbyNamesPre.size()==0) && (!has(self.standbyNamesPost) + || self.standbyNamesPost.size()==0)) + type: object + primaryUpdateMethod: + default: restart + description: |- + Method to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be with a switchover (`switchover`) or in-place (`restart` - default) + enum: + - switchover + - restart + type: string + primaryUpdateStrategy: + default: unsupervised + description: |- + Deployment strategy to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be automated (`unsupervised` - default) or manual (`supervised`) + enum: + - unsupervised + - supervised + type: string + priorityClassName: + description: |- + Name of the priority class which will be used in every generated Pod, if the PriorityClass + specified does not exist, the pod will not be able to schedule. Please refer to + https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass + for more information + type: string + probes: + description: |- + The configuration of the probes to be injected + in the PostgreSQL Pods. + properties: + liveness: + description: The liveness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + readiness: + description: The readiness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + maximumLag: + anyOf: + - type: integer + - type: string + description: Lag limit. Used only for `streaming` strategy + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: + description: The probe strategy + enum: + - pg_isready + - streaming + - query + type: string + type: object + startup: + description: The startup probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + maximumLag: + anyOf: + - type: integer + - type: string + description: Lag limit. Used only for `streaming` strategy + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: + description: The probe strategy + enum: + - pg_isready + - streaming + - query + type: string + type: object + type: object + projectedVolumeTemplate: + description: |- + Template to be used to define projected volumes, projected volumes will be mounted + under `/projected` base folder + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root to write + the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap data + to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the + pod: only annotations, labels, name, namespace + and uid are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must not + be absolute or contain the ''..'' path. Must + be utf-8 encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the secret data to + project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether the Secret + or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information about the + serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + replica: + description: Replica cluster configuration + properties: + enabled: + description: |- + If replica mode is enabled, this cluster will be a replica of an + existing cluster. Replica cluster can be created from a recovery + object store or via streaming through pg_basebackup. + Refer to the Replica clusters page of the documentation for more information. + type: boolean + minApplyDelay: + description: |- + When replica mode is enabled, this parameter allows you to replay + transactions only when the system time is at least the configured + time past the commit time. This provides an opportunity to correct + data loss errors. Note that when this parameter is set, a promotion + token cannot be used. + type: string + primary: + description: |- + Primary defines which Cluster is defined to be the primary in the distributed PostgreSQL cluster, based on the + topology specified in externalClusters + type: string + promotionToken: + description: |- + A demotion token generated by an external cluster used to + check if the promotion requirements are met. + type: string + self: + description: |- + Self defines the name of this cluster. It is used to determine if this is a primary + or a replica cluster, comparing it with `primary` + type: string + source: + description: The name of the external cluster which is the replication + origin + minLength: 1 + type: string + required: + - source + type: object + replicationSlots: + default: + highAvailability: + enabled: true + description: Replication slots management configuration + properties: + highAvailability: + default: + enabled: true + description: Replication slots for high availability configuration + properties: + enabled: + default: true + description: |- + If enabled (default), the operator will automatically manage replication slots + on the primary instance and use them in streaming replication + connections with all the standby instances that are part of the HA + cluster. If disabled, the operator will not take advantage + of replication slots in streaming connections with the replicas. + This feature also controls replication slots in replica cluster, + from the designated primary to its cascading replicas. + type: boolean + slotPrefix: + default: _cnpg_ + description: |- + Prefix for replication slots managed by the operator for HA. + It may only contain lower case letters, numbers, and the underscore character. + This can only be set at creation time. By default set to `_cnpg_`. + pattern: ^[0-9a-z_]*$ + type: string + type: object + synchronizeReplicas: + description: Configures the synchronization of the user defined + physical replication slots + properties: + enabled: + default: true + description: When set to true, every replication slot that + is on the primary is synchronized on each standby + type: boolean + excludePatterns: + description: List of regular expression patterns to match + the names of replication slots to be excluded (by default + empty) + items: + type: string + type: array + required: + - enabled + type: object + updateInterval: + default: 30 + description: |- + Standby will update the status of the local replication slots + every `updateInterval` seconds (default 30). + minimum: 1 + type: integer + type: object + resources: + description: |- + Resources requirements of every generated Pod. Please refer to + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + for more information. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + schedulerName: + description: |- + If specified, the pod will be dispatched by specified Kubernetes + scheduler. If not specified, the pod will be dispatched by the default + scheduler. More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/ + type: string + seccompProfile: + description: |- + The SeccompProfile applied to every Pod and Container. + Defaults to: `RuntimeDefault` + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + serviceAccountTemplate: + description: Configure the generation of the service account + properties: + metadata: + description: |- + Metadata are the metadata to be used for the generated + service account + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + required: + - metadata + type: object + smartShutdownTimeout: + default: 180 + description: |- + The time in seconds that controls the window of time reserved for the smart shutdown of Postgres to complete. + Make sure you reserve enough time for the operator to request a fast shutdown of Postgres + (that is: `stopDelay` - `smartShutdownTimeout`). + format: int32 + type: integer + startDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + successfully start up (default 3600). + The startup probe failure threshold is derived from this value using the formula: + ceiling(startDelay / 10). + format: int32 + type: integer + stopDelay: + default: 1800 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + gracefully shutdown (default 1800) + format: int32 + type: integer + storage: + description: Configuration of the storage of the instances + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + superuserSecret: + description: |- + The secret containing the superuser password. If not defined a new + secret will be created with a randomly generated password + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + switchoverDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a primary PostgreSQL instance + to gracefully shutdown during a switchover. + Default value is 3600 seconds (1 hour). + format: int32 + type: integer + tablespaces: + description: The tablespaces configuration + items: + description: |- + TablespaceConfiguration is the configuration of a tablespace, and includes + the storage specification for the tablespace + properties: + name: + description: The name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + properties: + name: + type: string + type: object + storage: + description: The storage configuration for the tablespace + properties: + pvcTemplate: + description: Template to be used to generate the Persistent + Volume Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to + the PersistentVolume backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + temporary: + default: false + description: |- + When set to true, the tablespace will be added as a `temp_tablespaces` + entry in PostgreSQL, and will be available to automatically house temp + database objects, or other temporary files. Please refer to PostgreSQL + documentation for more information on the `temp_tablespaces` GUC. + type: boolean + required: + - name + - storage + type: object + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints specifies how to spread matching pods among the given topology. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ + items: + description: TopologySpreadConstraint specifies how to spread matching + pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + walStorage: + description: Configuration of the storage for PostgreSQL WAL (Write-Ahead + Log) + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + required: + - instances + type: object + x-kubernetes-validations: + - message: imageName and imageCatalogRef are mutually exclusive + rule: '!(has(self.imageCatalogRef) && has(self.imageName))' + status: + description: |- + Most recently observed status of the cluster. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + availableArchitectures: + description: AvailableArchitectures reports the available architectures + of a cluster + items: + description: AvailableArchitecture represents the state of a cluster's + architecture + properties: + goArch: + description: GoArch is the name of the executable architecture + type: string + hash: + description: Hash is the hash of the executable + type: string + required: + - goArch + - hash + type: object + type: array + certificates: + description: The configuration for the CA and related certificates, + initialized with defaults. + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + expirations: + additionalProperties: + type: string + description: Expiration dates for all certificates. + type: object + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + cloudNativePGCommitHash: + description: The commit hash number of which this operator running + type: string + cloudNativePGOperatorHash: + description: The hash of the binary of the operator + type: string + conditions: + description: Conditions for cluster object + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + configMapResourceVersion: + description: |- + The list of resource versions of the configmaps, + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + configmap data + properties: + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the config maps used to pass metrics. + Map keys are the config map names, map values are the versions + type: object + type: object + currentPrimary: + description: Current primary instance + type: string + currentPrimaryFailingSinceTimestamp: + description: |- + The timestamp when the primary was detected to be unhealthy + This field is reported when `.spec.failoverDelay` is populated or during online upgrades + type: string + currentPrimaryTimestamp: + description: The timestamp when the last actual promotion to primary + has occurred + type: string + danglingPVC: + description: |- + List of all the PVCs created by this cluster and still available + which are not attached to a Pod + items: + type: string + type: array + demotionToken: + description: |- + DemotionToken is a JSON token containing the information + from pg_controldata such as Database system identifier, Latest checkpoint's + TimeLineID, Latest checkpoint's REDO location, Latest checkpoint's REDO + WAL file, and Time of latest checkpoint + type: string + firstRecoverabilityPoint: + description: |- + The first recoverability point, stored as a date in RFC3339 format. + This field is calculated from the content of FirstRecoverabilityPointByMethod + type: string + firstRecoverabilityPointByMethod: + additionalProperties: + format: date-time + type: string + description: The first recoverability point, stored as a date in RFC3339 + format, per backup method type + type: object + healthyPVC: + description: List of all the PVCs not dangling nor initializing + items: + type: string + type: array + image: + description: Image contains the image name used by the pods + type: string + initializingPVC: + description: List of all the PVCs that are being initialized by this + cluster + items: + type: string + type: array + instanceNames: + description: List of instance names in the cluster + items: + type: string + type: array + instances: + description: The total number of PVC Groups detected in the cluster. + It may differ from the number of existing instance pods. + type: integer + instancesReportedState: + additionalProperties: + description: InstanceReportedState describes the last reported state + of an instance during a reconciliation loop + properties: + isPrimary: + description: indicates if an instance is the primary one + type: boolean + timeLineID: + description: indicates on which TimelineId the instance is + type: integer + required: + - isPrimary + type: object + description: The reported state of the instances during the last reconciliation + loop + type: object + instancesStatus: + additionalProperties: + items: + type: string + type: array + description: InstancesStatus indicates in which status the instances + are + type: object + jobCount: + description: How many Jobs have been created by this cluster + format: int32 + type: integer + lastFailedBackup: + description: Stored as a date in RFC3339 format + type: string + lastPromotionToken: + description: |- + LastPromotionToken is the last verified promotion token that + was used to promote a replica cluster + type: string + lastSuccessfulBackup: + description: |- + Last successful backup, stored as a date in RFC3339 format + This field is calculated from the content of LastSuccessfulBackupByMethod + type: string + lastSuccessfulBackupByMethod: + additionalProperties: + format: date-time + type: string + description: Last successful backup, stored as a date in RFC3339 format, + per backup method type + type: object + latestGeneratedNode: + description: ID of the latest generated node (used to avoid node name + clashing) + type: integer + majorVersionUpgradeFromImage: + description: |- + MajorVersionUpgradeFromImage contains the image that was + running before the major version upgrade started. + type: string + managedRolesStatus: + description: ManagedRolesStatus reports the state of the managed roles + in the cluster + properties: + byStatus: + additionalProperties: + items: + type: string + type: array + description: ByStatus gives the list of roles in each state + type: object + cannotReconcile: + additionalProperties: + items: + type: string + type: array + description: |- + CannotReconcile lists roles that cannot be reconciled in PostgreSQL, + with an explanation of the cause + type: object + passwordStatus: + additionalProperties: + description: PasswordState represents the state of the password + of a managed RoleConfiguration + properties: + resourceVersion: + description: the resource version of the password secret + type: string + transactionID: + description: the last transaction ID to affect the role + definition in PostgreSQL + format: int64 + type: integer + type: object + description: PasswordStatus gives the last transaction id and + password secret version for each managed role + type: object + type: object + onlineUpdateEnabled: + description: OnlineUpdateEnabled shows if the online upgrade is enabled + inside the cluster + type: boolean + phase: + description: Current phase of the cluster + type: string + phaseReason: + description: Reason for the current phase + type: string + pluginStatus: + description: PluginStatus is the status of the loaded plugins + items: + description: PluginStatus is the status of a loaded plugin + properties: + backupCapabilities: + description: |- + BackupCapabilities are the list of capabilities of the + plugin regarding the Backup management + items: + type: string + type: array + capabilities: + description: |- + Capabilities are the list of capabilities of the + plugin + items: + type: string + type: array + name: + description: Name is the name of the plugin + type: string + operatorCapabilities: + description: |- + OperatorCapabilities are the list of capabilities of the + plugin regarding the reconciler + items: + type: string + type: array + restoreJobHookCapabilities: + description: |- + RestoreJobHookCapabilities are the list of capabilities of the + plugin regarding the RestoreJobHook management + items: + type: string + type: array + status: + description: Status contain the status reported by the plugin + through the SetStatusInCluster interface + type: string + version: + description: |- + Version is the version of the plugin loaded by the + latest reconciliation loop + type: string + walCapabilities: + description: |- + WALCapabilities are the list of capabilities of the + plugin regarding the WAL management + items: + type: string + type: array + required: + - name + - version + type: object + type: array + poolerIntegrations: + description: The integration needed by poolers referencing the cluster + properties: + pgBouncerIntegration: + description: PgBouncerIntegrationStatus encapsulates the needed + integration for the pgbouncer poolers referencing the cluster + properties: + secrets: + items: + type: string + type: array + type: object + type: object + pvcCount: + description: How many PVCs have been created by this cluster + format: int32 + type: integer + readService: + description: Current list of read pods + type: string + readyInstances: + description: The total number of ready instances in the cluster. It + is equal to the number of ready instance pods. + type: integer + resizingPVC: + description: List of all the PVCs that have ResizingPVC condition. + items: + type: string + type: array + secretsResourceVersion: + description: |- + The list of resource versions of the secrets + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + secret data + properties: + applicationSecretVersion: + description: The resource version of the "app" user secret + type: string + barmanEndpointCA: + description: The resource version of the Barman Endpoint CA if + provided + type: string + caSecretVersion: + description: Unused. Retained for compatibility with old versions. + type: string + clientCaSecretVersion: + description: The resource version of the PostgreSQL client-side + CA secret version + type: string + externalClusterSecretVersion: + additionalProperties: + type: string + description: The resource versions of the external cluster secrets + type: object + managedRoleSecretVersion: + additionalProperties: + type: string + description: The resource versions of the managed roles secrets + type: object + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the secrets used to pass metrics. + Map keys are the secret names, map values are the versions + type: object + replicationSecretVersion: + description: The resource version of the "streaming_replica" user + secret + type: string + serverCaSecretVersion: + description: The resource version of the PostgreSQL server-side + CA secret version + type: string + serverSecretVersion: + description: The resource version of the PostgreSQL server-side + secret version + type: string + superuserSecretVersion: + description: The resource version of the "postgres" user secret + type: string + type: object + switchReplicaClusterStatus: + description: SwitchReplicaClusterStatus is the status of the switch + to replica cluster + properties: + inProgress: + description: InProgress indicates if there is an ongoing procedure + of switching a cluster to a replica cluster. + type: boolean + type: object + tablespacesStatus: + description: TablespacesStatus reports the state of the declarative + tablespaces in the cluster + items: + description: TablespaceState represents the state of a tablespace + in a cluster + properties: + error: + description: Error is the reconciliation error, if any + type: string + name: + description: Name is the name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + type: string + state: + description: State is the latest reconciliation state + type: string + required: + - name + - state + type: object + type: array + targetPrimary: + description: |- + Target primary instance, this is different from the previous one + during a switchover or a failover + type: string + targetPrimaryTimestamp: + description: The timestamp when the last request for a new primary + has occurred + type: string + timelineID: + description: The timeline of the Postgres cluster + type: integer + topology: + description: Instances topology. + properties: + instances: + additionalProperties: + additionalProperties: + type: string + description: PodTopologyLabels represent the topology of a Pod. + map[labelName]labelValue + type: object + description: Instances contains the pod topology of the instances + type: object + nodesUsed: + description: |- + NodesUsed represents the count of distinct nodes accommodating the instances. + A value of '1' suggests that all instances are hosted on a single node, + implying the absence of High Availability (HA). Ideally, this value should + be the same as the number of instances in the Postgres HA cluster, implying + shared nothing architecture on the compute side. + format: int32 + type: integer + successfullyExtracted: + description: |- + SuccessfullyExtracted indicates if the topology data was extract. It is useful to enact fallback behaviors + in synchronous replica election in case of failures + type: boolean + type: object + unusablePVC: + description: List of all the PVCs that are unusable because another + PVC is missing + items: + type: string + type: array + writeService: + description: Current write pod + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: databases.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Database + listKind: DatabaseList + plural: databases + singular: database + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Database is the Schema for the databases API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired Database. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allowConnections: + description: |- + Maps to the `ALLOW_CONNECTIONS` parameter of `CREATE DATABASE` and + `ALTER DATABASE`. If false then no one can connect to this database. + type: boolean + builtinLocale: + description: |- + Maps to the `BUILTIN_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the locale name when the + builtin provider is used. This option requires `localeProvider` to + be set to `builtin`. Available from PostgreSQL 17. + type: string + x-kubernetes-validations: + - message: builtinLocale is immutable + rule: self == oldSelf + cluster: + description: The name of the PostgreSQL cluster hosting the database. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + collationVersion: + description: |- + Maps to the `COLLATION_VERSION` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: collationVersion is immutable + rule: self == oldSelf + connectionLimit: + description: |- + Maps to the `CONNECTION LIMIT` clause of `CREATE DATABASE` and + `ALTER DATABASE`. How many concurrent connections can be made to + this database. -1 (the default) means no limit. + type: integer + databaseReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this database. + enum: + - delete + - retain + type: string + encoding: + description: |- + Maps to the `ENCODING` parameter of `CREATE DATABASE`. This setting + cannot be changed. Character set encoding to use in the database. + type: string + x-kubernetes-validations: + - message: encoding is immutable + rule: self == oldSelf + ensure: + default: present + description: Ensure the PostgreSQL database is `present` or `absent` + - defaults to "present". + enum: + - present + - absent + type: string + extensions: + description: The list of extensions to be managed in the database + items: + description: ExtensionSpec configures an extension in a database + properties: + ensure: + default: present + description: |- + Specifies whether an extension/schema should be present or absent in + the database. If set to `present`, the extension/schema will be + created if it does not exist. If set to `absent`, the + extension/schema will be removed if it exists. + enum: + - present + - absent + type: string + name: + description: Name of the extension/schema + type: string + schema: + description: |- + The name of the schema in which to install the extension's objects, + in case the extension allows its contents to be relocated. If not + specified (default), and the extension's control file does not + specify a schema either, the current default object creation schema + is used. + type: string + version: + description: |- + The version of the extension to install. If empty, the operator will + install the default version (whatever is specified in the + extension's control file) + type: string + required: + - name + type: object + type: array + icuLocale: + description: |- + Maps to the `ICU_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the ICU locale when the ICU + provider is used. This option requires `localeProvider` to be set to + `icu`. Available from PostgreSQL 15. + type: string + x-kubernetes-validations: + - message: icuLocale is immutable + rule: self == oldSelf + icuRules: + description: |- + Maps to the `ICU_RULES` parameter of `CREATE DATABASE`. This setting + cannot be changed. Specifies additional collation rules to customize + the behavior of the default collation. This option requires + `localeProvider` to be set to `icu`. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: icuRules is immutable + rule: self == oldSelf + isTemplate: + description: |- + Maps to the `IS_TEMPLATE` parameter of `CREATE DATABASE` and `ALTER + DATABASE`. If true, this database is considered a template and can + be cloned by any user with `CREATEDB` privileges. + type: boolean + locale: + description: |- + Maps to the `LOCALE` parameter of `CREATE DATABASE`. This setting + cannot be changed. Sets the default collation order and character + classification in the new database. + type: string + x-kubernetes-validations: + - message: locale is immutable + rule: self == oldSelf + localeCType: + description: |- + Maps to the `LC_CTYPE` parameter of `CREATE DATABASE`. This setting + cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCType is immutable + rule: self == oldSelf + localeCollate: + description: |- + Maps to the `LC_COLLATE` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCollate is immutable + rule: self == oldSelf + localeProvider: + description: |- + Maps to the `LOCALE_PROVIDER` parameter of `CREATE DATABASE`. This + setting cannot be changed. This option sets the locale provider for + databases created in the new cluster. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: localeProvider is immutable + rule: self == oldSelf + name: + description: The name of the database to create inside PostgreSQL. + This setting cannot be changed. + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + - message: the name postgres is reserved + rule: self != 'postgres' + - message: the name template0 is reserved + rule: self != 'template0' + - message: the name template1 is reserved + rule: self != 'template1' + owner: + description: |- + Maps to the `OWNER` parameter of `CREATE DATABASE`. + Maps to the `OWNER TO` command of `ALTER DATABASE`. + The role name of the user who owns the database inside PostgreSQL. + type: string + schemas: + description: The list of schemas to be managed in the database + items: + description: SchemaSpec configures a schema in a database + properties: + ensure: + default: present + description: |- + Specifies whether an extension/schema should be present or absent in + the database. If set to `present`, the extension/schema will be + created if it does not exist. If set to `absent`, the + extension/schema will be removed if it exists. + enum: + - present + - absent + type: string + name: + description: Name of the extension/schema + type: string + owner: + description: |- + The role name of the user who owns the schema inside PostgreSQL. + It maps to the `AUTHORIZATION` parameter of `CREATE SCHEMA` and the + `OWNER TO` command of `ALTER SCHEMA`. + type: string + required: + - name + type: object + type: array + tablespace: + description: |- + Maps to the `TABLESPACE` parameter of `CREATE DATABASE`. + Maps to the `SET TABLESPACE` command of `ALTER DATABASE`. + The name of the tablespace (in PostgreSQL) that will be associated + with the new database. This tablespace will be the default + tablespace used for objects created in this database. + type: string + template: + description: |- + Maps to the `TEMPLATE` parameter of `CREATE DATABASE`. This setting + cannot be changed. The name of the template from which to create + this database. + type: string + x-kubernetes-validations: + - message: template is immutable + rule: self == oldSelf + required: + - cluster + - name + - owner + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider is set + to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + status: + description: |- + Most recently observed status of the Database. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + applied: + description: Applied is true if the database was reconciled correctly + type: boolean + extensions: + description: Extensions is the status of the managed extensions + items: + description: DatabaseObjectStatus is the status of the managed database + objects + properties: + applied: + description: |- + True of the object has been installed successfully in + the database + type: boolean + message: + description: Message is the object reconciliation message + type: string + name: + description: The name of the object + type: string + required: + - applied + - name + type: object + type: array + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + schemas: + description: Schemas is the status of the managed schemas + items: + description: DatabaseObjectStatus is the status of the managed database + objects + properties: + applied: + description: |- + True of the object has been installed successfully in + the database + type: boolean + message: + description: Message is the object reconciliation message + type: string + name: + description: The name of the object + type: string + required: + - applied + - name + type: object + type: array + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: imagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ImageCatalog + listKind: ImageCatalogList + plural: imagecatalogs + singular: imagecatalog + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ImageCatalog is the Schema for the imagecatalogs API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: poolers.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Pooler + listKind: PoolerList + plural: poolers + singular: pooler + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.type + name: Type + type: string + name: v1 + schema: + openAPIV3Schema: + description: Pooler is the Schema for the poolers API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the Pooler. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: |- + This is the cluster reference on which the Pooler will work. + Pooler name should never match with any cluster name within the same namespace. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + deploymentStrategy: + description: The deployment strategy to use for pgbouncer to replace + existing pods with new ones + properties: + rollingUpdate: + description: |- + Rolling update config params. Present only if DeploymentStrategyType = + RollingUpdate. + properties: + maxSurge: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be scheduled above the desired number of + pods. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + This can not be 0 if MaxUnavailable is 0. + Absolute number is calculated from percentage by rounding up. + Defaults to 25%. + Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when + the rolling update starts, such that the total number of old and new pods do not exceed + 130% of desired pods. Once old pods have been killed, + new ReplicaSet can be scaled up further, ensuring that total number of pods running + at any time during the update is at most 130% of desired pods. + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be unavailable during the update. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + Absolute number is calculated from percentage by rounding down. + This can not be 0 if MaxSurge is 0. + Defaults to 25%. + Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods + immediately when the rolling update starts. Once new pods are ready, old ReplicaSet + can be scaled down further, followed by scaling up the new ReplicaSet, ensuring + that the total number of pods available at all times during the update is at + least 70% of desired pods. + x-kubernetes-int-or-string: true + type: object + type: + description: Type of deployment. Can be "Recreate" or "RollingUpdate". + Default is RollingUpdate. + type: string + type: object + instances: + default: 1 + description: 'The number of replicas we want. Default: 1.' + format: int32 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this pooler. + properties: + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + type: object + pgbouncer: + description: The PgBouncer configuration + properties: + authQuery: + description: |- + The query that will be used to download the hash of the password + of a certain user. Default: "SELECT usename, passwd FROM public.user_search($1)". + In case it is specified, also an AuthQuerySecret has to be specified and + no automatic CNPG Cluster integration will be triggered. + type: string + authQuerySecret: + description: |- + The credentials of the user that need to be used for the authentication + query. In case it is specified, also an AuthQuery + (e.g. "SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1") + has to be specified and no automatic CNPG Cluster integration will be triggered. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + parameters: + additionalProperties: + type: string + description: |- + Additional parameters to be passed to PgBouncer - please check + the CNPG documentation for a list of options you can configure + type: object + paused: + default: false + description: |- + When set to `true`, PgBouncer will disconnect from the PostgreSQL + server, first waiting for all queries to complete, and pause all new + client connections until this value is set to `false` (default). Internally, + the operator calls PgBouncer's `PAUSE` and `RESUME` commands. + type: boolean + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + poolMode: + default: session + description: 'The pool mode. Default: `session`.' + enum: + - session + - transaction + type: string + type: object + serviceTemplate: + description: Template for the Service to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information on service's + port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the configurations + of session affinity. + properties: + clientIP: + description: clientIP contains the configurations of Client + IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic is + distributed to Service endpoints. Implementations can use this field as a + hint, but are not required to guarantee strict adherence. If the field is + not set, the implementation will apply its default routing strategy. If set + to "PreferClose", implementations should prioritize endpoints that are + topologically close (e.g., same zone). + This is a beta field and requires enabling ServiceTrafficDistribution feature. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + template: + description: The template of the Pod to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the pod. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + activeDeadlineSeconds: + description: |- + Optional duration in seconds the pod may be active on the node relative to + StartTime before the system will actively try to mark it failed and kill associated containers. + Value must be a positive integer. + format: int64 + type: integer + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + automountServiceAccountToken: + description: AutomountServiceAccountToken indicates whether + a service account token should be automatically mounted. + type: boolean + containers: + description: |- + List of containers belonging to the pod. + Containers cannot currently be added or removed. + There must be at least one container in a Pod. + Cannot be updated. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + dnsConfig: + description: |- + Specifies the DNS parameters of a pod. + Parameters specified here will be merged to the generated DNS + configuration based on DNSPolicy. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver + options of a pod. + properties: + name: + description: |- + Name is this DNS resolver option's name. + Required. + type: string + value: + description: Value is this DNS resolver option's + value. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + dnsPolicy: + description: |- + Set DNS policy for the pod. + Defaults to "ClusterFirst". + Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + To have DNS options set along with hostNetwork, you have to specify DNS policy + explicitly to 'ClusterFirstWithHostNet'. + type: string + enableServiceLinks: + description: |- + EnableServiceLinks indicates whether information about services should be injected into pod's + environment variables, matching the syntax of Docker links. + Optional: Defaults to true. + type: boolean + ephemeralContainers: + description: |- + List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + pod to perform user-initiated actions such as debugging. This list cannot be specified when + creating a pod, and it cannot be modified by updating the pod spec. In order to add an + ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + items: + description: |- + An EphemeralContainer is a temporary container that you may add to an existing Pod for + user-initiated activities such as debugging. Ephemeral containers have no resource or + scheduling guarantees, and they will not be restarted when they exit or when a Pod is + removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the + Pod to exceed its resource allocation. + + To add an ephemeral container, use the ephemeralcontainers subresource of an existing + Pod. Ephemeral containers may not be removed or restarted. + properties: + args: + description: |- + Arguments to the entrypoint. + The image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: Lifecycle is not allowed for ephemeral + containers. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the ephemeral container specified as a DNS_LABEL. + This name must be unique among all containers, init containers and ephemeral containers. + type: string + ports: + description: Ports are not allowed for ephemeral containers. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + already allocated to the pod. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for the container to manage the restart behavior of each + container within a pod. + This may only be set for init containers. You cannot set this field on + ephemeral containers. + type: string + securityContext: + description: |- + Optional: SecurityContext defines the security options the ephemeral container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + targetContainerName: + description: |- + If set, the name of the container from PodSpec that this ephemeral container targets. + The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + If not set then the ephemeral container uses the namespaces configured in the Pod spec. + + The container runtime must implement support for this feature. If the runtime does not + support namespace targeting then the result of setting this field is undefined. + type: string + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + hostAliases: + description: |- + HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + file if specified. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + description: IP address of the host file entry. + type: string + required: + - ip + type: object + type: array + x-kubernetes-list-map-keys: + - ip + x-kubernetes-list-type: map + hostIPC: + description: |- + Use the host's ipc namespace. + Optional: Default to false. + type: boolean + hostNetwork: + description: |- + Host networking requested for this pod. Use the host's network namespace. + If this option is set, the ports that will be used must be specified. + Default to false. + type: boolean + hostPID: + description: |- + Use the host's pid namespace. + Optional: Default to false. + type: boolean + hostUsers: + description: |- + Use the host's user namespace. + Optional: Default to true. + If set to true or not present, the pod will be run in the host user namespace, useful + for when the pod needs a feature only available to the host user namespace, such as + loading a kernel module with CAP_SYS_MODULE. + When set to false, a new userns is created for the pod. Setting false is useful for + mitigating container breakout vulnerabilities even allowing users to run their + containers as root without actually having root privileges on the host. + This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. + type: boolean + hostname: + description: |- + Specifies the hostname of the Pod + If not specified, the pod's hostname will be set to a system-defined value. + type: string + imagePullSecrets: + description: |- + ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + If specified, these secrets will be passed to individual puller implementations for them to use. + More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + initContainers: + description: |- + List of initialization containers belonging to the pod. + Init containers are executed in order prior to containers being started. If any + init container fails, the pod is considered to have failed and is handled according + to its restartPolicy. The name for an init container or normal container must be + unique among all containers. + Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. + The resourceRequirements of an init container are taken into account during scheduling + by finding the highest request/limit for each resource type, and then using the max of + of that value or the sum of the normal containers. Limits are applied to init containers + in a similar fashion. + Init containers cannot currently be added or removed. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + nodeName: + description: |- + NodeName indicates in which node this pod is scheduled. + If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. + Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. + This field should not be used to express a desire for the pod to be scheduled on a specific node. + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the pod to fit on a node. + Selector which must match a node's labels for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + x-kubernetes-map-type: atomic + os: + description: |- + Specifies the OS of the containers in the pod. + Some pod and container fields are restricted if this is set. + + If the OS field is set to linux, the following fields must be unset: + -securityContext.windowsOptions + + If the OS field is set to windows, following fields must be unset: + - spec.hostPID + - spec.hostIPC + - spec.hostUsers + - spec.securityContext.appArmorProfile + - spec.securityContext.seLinuxOptions + - spec.securityContext.seccompProfile + - spec.securityContext.fsGroup + - spec.securityContext.fsGroupChangePolicy + - spec.securityContext.sysctls + - spec.shareProcessNamespace + - spec.securityContext.runAsUser + - spec.securityContext.runAsGroup + - spec.securityContext.supplementalGroups + - spec.securityContext.supplementalGroupsPolicy + - spec.containers[*].securityContext.appArmorProfile + - spec.containers[*].securityContext.seLinuxOptions + - spec.containers[*].securityContext.seccompProfile + - spec.containers[*].securityContext.capabilities + - spec.containers[*].securityContext.readOnlyRootFilesystem + - spec.containers[*].securityContext.privileged + - spec.containers[*].securityContext.allowPrivilegeEscalation + - spec.containers[*].securityContext.procMount + - spec.containers[*].securityContext.runAsUser + - spec.containers[*].securityContext.runAsGroup + properties: + name: + description: |- + Name is the name of the operating system. The currently supported values are linux and windows. + Additional value may be defined in future and can be one of: + https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + Clients should expect to handle additional values and treat unrecognized values in this field as os: null + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + This field will be autopopulated at admission time by the RuntimeClass admission controller. If + the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + The RuntimeClass admission controller will reject Pod create requests which have the overhead already + set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md + type: object + preemptionPolicy: + description: |- + PreemptionPolicy is the Policy for preempting pods with lower priority. + One of Never, PreemptLowerPriority. + Defaults to PreemptLowerPriority if unset. + type: string + priority: + description: |- + The priority value. Various system components use this field to find the + priority of the pod. When Priority Admission Controller is enabled, it + prevents users from setting this field. The admission controller populates + this field from PriorityClassName. + The higher the value, the higher the priority. + format: int32 + type: integer + priorityClassName: + description: |- + If specified, indicates the pod's priority. "system-node-critical" and + "system-cluster-critical" are two special keywords which indicate the + highest priorities with the former being the highest priority. Any other + name must be defined by creating a PriorityClass object with that name. + If not specified, the pod priority will be default or zero if there is no + default. + type: string + readinessGates: + description: |- + If specified, all readiness gates will be evaluated for pod readiness. + A pod is ready when all its containers are ready AND + all conditions specified in the readiness gates have status equal to "True" + More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates + items: + description: PodReadinessGate contains the reference to + a pod condition + properties: + conditionType: + description: ConditionType refers to a condition in + the pod's condition list with matching type. + type: string + required: + - conditionType + type: object + type: array + x-kubernetes-list-type: atomic + resourceClaims: + description: |- + ResourceClaims defines which ResourceClaims must be allocated + and reserved before the Pod is allowed to start. The resources + will be made available to those containers which consume them + by name. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. + items: + description: |- + PodResourceClaim references exactly one ResourceClaim, either directly + or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim + for the pod. + + It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. + Containers that need access to the ResourceClaim reference it with this name. + properties: + name: + description: |- + Name uniquely identifies this resource claim inside the pod. + This must be a DNS_LABEL. + type: string + resourceClaimName: + description: |- + ResourceClaimName is the name of a ResourceClaim object in the same + namespace as this pod. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + resourceClaimTemplateName: + description: |- + ResourceClaimTemplateName is the name of a ResourceClaimTemplate + object in the same namespace as this pod. + + The template will be used to create a new ResourceClaim, which will + be bound to this pod. When this pod is deleted, the ResourceClaim + will also be deleted. The pod name and resource name, along with a + generated component, will be used to form a unique name for the + ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + + This field is immutable and no changes will be made to the + corresponding ResourceClaim by the control plane after creating the + ResourceClaim. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + resources: + description: |- + Resources is the total amount of CPU and Memory resources required by all + containers in the pod. It supports specifying Requests and Limits for + "cpu" and "memory" resource names only. ResourceClaims are not supported. + + This field enables fine-grained control over resource allocation for the + entire pod, allowing resource sharing among containers in a pod. + + This is an alpha field and requires enabling the PodLevelResources feature + gate. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for all containers within the pod. + One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. + Default to Always. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy + type: string + runtimeClassName: + description: |- + RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + empty definition that uses the default runtime handler. + More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + type: string + schedulerName: + description: |- + If specified, the pod will be dispatched by specified scheduler. + If not specified, the pod will be dispatched by default scheduler. + type: string + schedulingGates: + description: |- + SchedulingGates is an opaque list of values that if specified will block scheduling the pod. + If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + scheduler will not attempt to schedule the pod. + + SchedulingGates can only be set at pod creation time, and be removed only afterwards. + items: + description: PodSchedulingGate is associated to a Pod to + guard its scheduling. + properties: + name: + description: |- + Name of the scheduling gate. + Each scheduling gate must have a unique name field. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + securityContext: + description: |- + SecurityContext holds pod-level security attributes and common container settings. + Optional: Defaults to empty. See type description for default values of each field. + properties: + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxChangePolicy: + description: |- + seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. + It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. + Valid values are "MountOption" and "Recursive". + + "Recursive" means relabeling of all files on all Pod volumes by the container runtime. + This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. + + "MountOption" mounts all eligible Pod volumes with `-o context` mount option. + This requires all Pods that share the same volume to use the same SELinux label. + It is not possible to share the same volume among privileged and unprivileged Pods. + Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes + whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their + CSIDriver instance. Other volumes are always re-labelled recursively. + "MountOption" value is allowed only when SELinuxMount feature gate is enabled. + + If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. + If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes + and "Recursive" for all other volumes. + + This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers. + + All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. + Note that this field cannot be set when spec.os.name is windows. + type: string + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in + addition to the container's primary GID and fsGroup (if specified). If + the SupplementalGroupsPolicy feature is enabled, the + supplementalGroupsPolicy field determines whether these are in addition + to or instead of any group memberships defined in the container image. + If unspecified, no additional groups are added, though group memberships + defined in the container image may still be used, depending on the + supplementalGroupsPolicy field. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + description: |- + Defines how supplemental groups of the first container processes are calculated. + Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + and the container runtime must implement support for this feature. + Note that this field cannot be set when spec.os.name is windows. + type: string + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be + set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: |- + DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. + Deprecated: Use serviceAccountName instead. + type: string + serviceAccountName: + description: |- + ServiceAccountName is the name of the ServiceAccount to use to run this pod. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + type: string + setHostnameAsFQDN: + description: |- + If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). + In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). + In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. + If a pod does not have FQDN, this has no effect. + Default to false. + type: boolean + shareProcessNamespace: + description: |- + Share a single process namespace between all of the containers in a pod. + When this is set containers will be able to view and signal processes from other containers + in the same pod, and the first process in each container will not be assigned PID 1. + HostPID and ShareProcessNamespace cannot both be set. + Optional: Default to false. + type: boolean + subdomain: + description: |- + If specified, the fully qualified Pod hostname will be "...svc.". + If not specified, the pod will not have a domainname at all. + type: string + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + If this value is nil, the default grace period will be used instead. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: If specified, the pod's tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of pods ought to spread across topology + domains. Scheduler will schedule pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + description: |- + List of volumes that can be mounted by containers belonging to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk + in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in + the blob storage + type: string + fsType: + default: ext4 + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage account Managed: azure + managed data disk (only in managed availability + set). defaults to shared' + type: string + readOnly: + default: false + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default + is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers. + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about + the pod that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing the + pod field + properties: + fieldRef: + description: 'Required: Selects a field of + the pod: only annotations, labels, name, + namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path. + Must be utf-8 encoded. The first item of + the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource + that is attached to a kubelet's host machine and then + exposed to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target + worldwide names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. + properties: + driver: + description: driver is the name of the driver to + use for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the + specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + image: + description: |- + image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + The volume is resolved at pod startup depending on which PullPolicy value is provided: + + - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + + The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + The volume will be mounted read-only (ro) and non-executable files (noexec). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + default: default + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon + Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources + secrets, configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume + root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the + configMap data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about + the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name, namespace and uid + are supported.' + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file + to be created. Must not be absolute + or contain the ''..'' path. Must + be utf-8 encoded. The first item + of the relative path must not + start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the + secret data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + default: /etc/ceph/keyring + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. + properties: + fsType: + default: xfs + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the + ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the + ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL + communication with Gateway, default false + type: boolean + storageMode: + default: ThinProvisioned + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage + Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether the + Secret or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy + Based Management (SPBM) profile ID associated + with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + required: + - containers + type: object + type: object + type: + default: rw + description: 'Type of service to forward traffic to. Default: `rw`.' + enum: + - rw + - ro + - r + type: string + required: + - cluster + - pgbouncer + type: object + status: + description: |- + Most recently observed status of the Pooler. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + instances: + description: The number of pods trying to be scheduled + format: int32 + type: integer + secrets: + description: The resource version of the config object + properties: + clientCA: + description: The client CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + pgBouncerSecrets: + description: The version of the secrets used by PgBouncer + properties: + authQuery: + description: The auth query secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + serverCA: + description: The server CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + serverTLS: + description: The server TLS secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: publications.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Publication + listKind: PublicationList + plural: publications + singular: publication + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Publication is the Schema for the publications API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PublicationSpec defines the desired state of Publication + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "publisher" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "publisher" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + name: + description: The name of the publication inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Publication parameters part of the `WITH` clause as expected by + PostgreSQL `CREATE PUBLICATION` command + type: object + publicationReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this publication + enum: + - delete + - retain + type: string + target: + description: Target of the publication as expected by PostgreSQL `CREATE + PUBLICATION` command + properties: + allTables: + description: |- + Marks the publication as one that replicates changes for all tables + in the database, including tables created in the future. + Corresponding to `FOR ALL TABLES` in PostgreSQL. + type: boolean + x-kubernetes-validations: + - message: allTables is immutable + rule: self == oldSelf + objects: + description: Just the following schema objects + items: + description: PublicationTargetObject is an object to publish + properties: + table: + description: |- + Specifies a list of tables to add to the publication. Corresponding + to `FOR TABLE` in PostgreSQL. + properties: + columns: + description: The columns to publish + items: + type: string + type: array + name: + description: The table name + type: string + only: + description: Whether to limit to the table only or include + all its descendants + type: boolean + schema: + description: The schema name + type: string + required: + - name + type: object + tablesInSchema: + description: |- + Marks the publication as one that replicates changes for all tables + in the specified list of schemas, including tables created in the + future. Corresponding to `FOR TABLES IN SCHEMA` in PostgreSQL. + type: string + type: object + x-kubernetes-validations: + - message: tablesInSchema and table are mutually exclusive + rule: (has(self.tablesInSchema) && !has(self.table)) || (!has(self.tablesInSchema) + && has(self.table)) + maxItems: 100000 + type: array + x-kubernetes-validations: + - message: specifying a column list when the publication also + publishes tablesInSchema is not supported + rule: '!(self.exists(o, has(o.table) && has(o.table.columns)) + && self.exists(o, has(o.tablesInSchema)))' + type: object + x-kubernetes-validations: + - message: allTables and objects are mutually exclusive + rule: (has(self.allTables) && !has(self.objects)) || (!has(self.allTables) + && has(self.objects)) + required: + - cluster + - dbname + - name + - target + type: object + status: + description: PublicationStatus defines the observed state of Publication + properties: + applied: + description: Applied is true if the publication was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: scheduledbackups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ScheduledBackup + listKind: ScheduledBackupList + plural: scheduledbackups + singular: scheduledbackup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .status.lastScheduleTime + name: Last Backup + type: date + name: v1 + schema: + openAPIV3Schema: + description: ScheduledBackup is the Schema for the scheduledbackups API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ScheduledBackup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + backupOwnerReference: + default: none + description: |- + Indicates which ownerReference should be put inside the created backup resources.
+ - none: no owner reference for created backup objects (same behavior as before the field was introduced)
+ - self: sets the Scheduled backup object as owner of the backup
+ - cluster: set the cluster as owner of the backup
+ enum: + - none + - self + - cluster + type: string + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + immediate: + description: If the first backup has to be immediately start after + creation or not + type: boolean + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + schedule: + description: |- + The schedule does not follow the same format used in Kubernetes CronJobs + as it includes an additional seconds specifier, + see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format + type: string + suspend: + description: If this backup is suspended or not + type: boolean + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + - schedule + type: object + status: + description: |- + Most recently observed status of the ScheduledBackup. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + lastCheckTime: + description: The latest time the schedule + format: date-time + type: string + lastScheduleTime: + description: Information when was the last time that backup was successfully + scheduled. + format: date-time + type: string + nextScheduleTime: + description: Next time we will run a backup + format: date-time + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: subscriptions.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Subscription + listKind: SubscriptionList + plural: subscriptions + singular: subscription + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Subscription is the Schema for the subscriptions API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SubscriptionSpec defines the desired state of Subscription + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "subscriber" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "subscriber" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + externalClusterName: + description: The name of the external cluster with the publication + ("publisher") + type: string + name: + description: The name of the subscription inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Subscription parameters part of the `WITH` clause as expected by + PostgreSQL `CREATE SUBSCRIPTION` command + type: object + publicationDBName: + description: |- + The name of the database containing the publication on the external + cluster. Defaults to the one in the external cluster definition. + type: string + publicationName: + description: |- + The name of the publication inside the PostgreSQL database in the + "publisher" + type: string + subscriptionReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this subscription + enum: + - delete + - retain + type: string + required: + - cluster + - dbname + - externalClusterName + - name + - publicationName + type: object + status: + description: SubscriptionStatus defines the observed state of Subscription + properties: + applied: + description: Applied is true if the subscription was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cnpg-manager +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps/status + - secrets/status + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - pods + - pods/exec + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - pods/status + verbs: + - get +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - get + - patch +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +- apiGroups: + - monitoring.coreos.com + resources: + - podmonitors + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups + - clusters + - databases + - poolers + - publications + - scheduledbackups + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups/status + - databases/status + - publications/status + - scheduledbackups/status + - subscriptions/status + verbs: + - get + - patch + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusterimagecatalogs + - imagecatalogs + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/finalizers + - poolers/finalizers + verbs: + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/status + - poolers/status + verbs: + - get + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - create + - get + - list + - patch + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cnpg-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cnpg-manager +subjects: +- kind: ServiceAccount + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: v1 +data: + queries: | + backends: + query: | + SELECT sa.datname + , sa.usename + , sa.application_name + , states.state + , COALESCE(sa.count, 0) AS total + , COALESCE(sa.max_tx_secs, 0) AS max_tx_duration_seconds + FROM ( VALUES ('active') + , ('idle') + , ('idle in transaction') + , ('idle in transaction (aborted)') + , ('fastpath function call') + , ('disabled') + ) AS states(state) + LEFT JOIN ( + SELECT datname + , state + , usename + , COALESCE(application_name, '') AS application_name + , COUNT(*) + , COALESCE(EXTRACT (EPOCH FROM (max(now() - xact_start))), 0) AS max_tx_secs + FROM pg_catalog.pg_stat_activity + GROUP BY datname, state, usename, application_name + ) sa ON states.state = sa.state + WHERE sa.usename IS NOT NULL + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - usename: + usage: "LABEL" + description: "Name of the user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - state: + usage: "LABEL" + description: "State of the backend" + - total: + usage: "GAUGE" + description: "Number of backends" + - max_tx_duration_seconds: + usage: "GAUGE" + description: "Maximum duration of a transaction in seconds" + + backends_waiting: + query: | + SELECT count(*) AS total + FROM pg_catalog.pg_locks blocked_locks + JOIN pg_catalog.pg_locks blocking_locks + ON blocking_locks.locktype = blocked_locks.locktype + AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database + AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation + AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page + AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple + AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid + AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid + AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid + AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid + AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid + AND blocking_locks.pid != blocked_locks.pid + JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid + WHERE NOT blocked_locks.granted + metrics: + - total: + usage: "GAUGE" + description: "Total number of backends that are currently waiting on other queries" + + pg_database: + query: | + SELECT datname + , pg_catalog.pg_database_size(datname) AS size_bytes + , pg_catalog.age(datfrozenxid) AS xid_age + , pg_catalog.mxid_age(datminmxid) AS mxid_age + FROM pg_catalog.pg_database + WHERE datallowconn + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - size_bytes: + usage: "GAUGE" + description: "Disk space used by the database" + - xid_age: + usage: "GAUGE" + description: "Number of transactions from the frozen XID to the current one" + - mxid_age: + usage: "GAUGE" + description: "Number of multiple transactions (Multixact) from the frozen XID to the current one" + + pg_postmaster: + query: | + SELECT EXTRACT(EPOCH FROM pg_postmaster_start_time) AS start_time + FROM pg_catalog.pg_postmaster_start_time() + metrics: + - start_time: + usage: "GAUGE" + description: "Time at which postgres started (based on epoch)" + + pg_replication: + query: "SELECT CASE WHEN ( + NOT pg_catalog.pg_is_in_recovery() + OR pg_catalog.pg_last_wal_receive_lsn() = pg_catalog.pg_last_wal_replay_lsn()) + THEN 0 + ELSE GREATEST (0, + EXTRACT(EPOCH FROM (now() - pg_catalog.pg_last_xact_replay_timestamp()))) + END AS lag, + pg_catalog.pg_is_in_recovery() AS in_recovery, + EXISTS (TABLE pg_stat_wal_receiver) AS is_wal_receiver_up, + (SELECT count(*) FROM pg_catalog.pg_stat_replication) AS streaming_replicas" + metrics: + - lag: + usage: "GAUGE" + description: "Replication lag behind primary in seconds" + - in_recovery: + usage: "GAUGE" + description: "Whether the instance is in recovery" + - is_wal_receiver_up: + usage: "GAUGE" + description: "Whether the instance wal_receiver is up" + - streaming_replicas: + usage: "GAUGE" + description: "Number of streaming replicas connected to the instance" + + pg_replication_slots: + query: | + SELECT slot_name, + slot_type, + database, + active, + (CASE pg_catalog.pg_is_in_recovery() + WHEN TRUE THEN pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_last_wal_receive_lsn(), restart_lsn) + ELSE pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), restart_lsn) + END) as pg_wal_lsn_diff + FROM pg_catalog.pg_replication_slots + WHERE NOT temporary + metrics: + - slot_name: + usage: "LABEL" + description: "Name of the replication slot" + - slot_type: + usage: "LABEL" + description: "Type of the replication slot" + - database: + usage: "LABEL" + description: "Name of the database" + - active: + usage: "GAUGE" + description: "Flag indicating whether the slot is active" + - pg_wal_lsn_diff: + usage: "GAUGE" + description: "Replication lag in bytes" + + pg_stat_archiver: + query: | + SELECT archived_count + , failed_count + , COALESCE(EXTRACT(EPOCH FROM (now() - last_archived_time)), -1) AS seconds_since_last_archival + , COALESCE(EXTRACT(EPOCH FROM (now() - last_failed_time)), -1) AS seconds_since_last_failure + , COALESCE(EXTRACT(EPOCH FROM last_archived_time), -1) AS last_archived_time + , COALESCE(EXTRACT(EPOCH FROM last_failed_time), -1) AS last_failed_time + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_archived_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_archived_wal_start_lsn + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_failed_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_failed_wal_start_lsn + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_archiver + metrics: + - archived_count: + usage: "COUNTER" + description: "Number of WAL files that have been successfully archived" + - failed_count: + usage: "COUNTER" + description: "Number of failed attempts for archiving WAL files" + - seconds_since_last_archival: + usage: "GAUGE" + description: "Seconds since the last successful archival operation" + - seconds_since_last_failure: + usage: "GAUGE" + description: "Seconds since the last failed archival operation" + - last_archived_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving succeeded" + - last_failed_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving failed" + - last_archived_wal_start_lsn: + usage: "GAUGE" + description: "Archived WAL start LSN" + - last_failed_wal_start_lsn: + usage: "GAUGE" + description: "Last failed WAL LSN" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_bgwriter: + runonserver: "<17.0.0" + query: | + SELECT checkpoints_timed + , checkpoints_req + , checkpoint_write_time + , checkpoint_sync_time + , buffers_checkpoint + , buffers_clean + , maxwritten_clean + , buffers_backend + , buffers_backend_fsync + , buffers_alloc + FROM pg_catalog.pg_stat_bgwriter + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - checkpoint_write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds" + - checkpoint_sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds" + - buffers_checkpoint: + usage: "COUNTER" + description: "Number of buffers written during checkpoints" + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_backend: + usage: "COUNTER" + description: "Number of buffers written directly by a backend" + - buffers_backend_fsync: + usage: "COUNTER" + description: "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + + pg_stat_bgwriter_17: + runonserver: ">=17.0.0" + name: pg_stat_bgwriter + query: | + SELECT buffers_clean + , maxwritten_clean + , buffers_alloc + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_bgwriter + metrics: + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_checkpointer: + runonserver: ">=17.0.0" + query: | + SELECT num_timed AS checkpoints_timed + , num_requested AS checkpoints_req + , restartpoints_timed + , restartpoints_req + , restartpoints_done + , write_time + , sync_time + , buffers_written + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_checkpointer + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - restartpoints_timed: + usage: "COUNTER" + description: "Number of scheduled restartpoints due to timeout or after a failed attempt to perform it" + - restartpoints_req: + usage: "COUNTER" + description: "Number of requested restartpoints that have been performed" + - restartpoints_done: + usage: "COUNTER" + description: "Number of restartpoints that have been performed" + - write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are written to disk, in milliseconds" + - sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are synchronized to disk, in milliseconds" + - buffers_written: + usage: "COUNTER" + description: "Number of buffers written during checkpoints and restartpoints" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_database: + query: | + SELECT datname + , xact_commit + , xact_rollback + , blks_read + , blks_hit + , tup_returned + , tup_fetched + , tup_inserted + , tup_updated + , tup_deleted + , conflicts + , temp_files + , temp_bytes + , deadlocks + , blk_read_time + , blk_write_time + FROM pg_catalog.pg_stat_database + metrics: + - datname: + usage: "LABEL" + description: "Name of this database" + - xact_commit: + usage: "COUNTER" + description: "Number of transactions in this database that have been committed" + - xact_rollback: + usage: "COUNTER" + description: "Number of transactions in this database that have been rolled back" + - blks_read: + usage: "COUNTER" + description: "Number of disk blocks read in this database" + - blks_hit: + usage: "COUNTER" + description: "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)" + - tup_returned: + usage: "COUNTER" + description: "Number of rows returned by queries in this database" + - tup_fetched: + usage: "COUNTER" + description: "Number of rows fetched by queries in this database" + - tup_inserted: + usage: "COUNTER" + description: "Number of rows inserted by queries in this database" + - tup_updated: + usage: "COUNTER" + description: "Number of rows updated by queries in this database" + - tup_deleted: + usage: "COUNTER" + description: "Number of rows deleted by queries in this database" + - conflicts: + usage: "COUNTER" + description: "Number of queries canceled due to conflicts with recovery in this database" + - temp_files: + usage: "COUNTER" + description: "Number of temporary files created by queries in this database" + - temp_bytes: + usage: "COUNTER" + description: "Total amount of data written to temporary files by queries in this database" + - deadlocks: + usage: "COUNTER" + description: "Number of deadlocks detected in this database" + - blk_read_time: + usage: "COUNTER" + description: "Time spent reading data file blocks by backends in this database, in milliseconds" + - blk_write_time: + usage: "COUNTER" + description: "Time spent writing data file blocks by backends in this database, in milliseconds" + + pg_stat_replication: + primary: true + query: | + SELECT usename + , COALESCE(application_name, '') AS application_name + , COALESCE(client_addr::text, '') AS client_addr + , COALESCE(client_port::text, '') AS client_port + , EXTRACT(EPOCH FROM backend_start) AS backend_start + , COALESCE(pg_catalog.age(backend_xmin), 0) AS backend_xmin_age + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), sent_lsn) AS sent_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), write_lsn) AS write_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), flush_lsn) AS flush_diff_bytes + , COALESCE(pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), replay_lsn),0) AS replay_diff_bytes + , COALESCE((EXTRACT(EPOCH FROM write_lag)),0)::float AS write_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM flush_lag)),0)::float AS flush_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM replay_lag)),0)::float AS replay_lag_seconds + FROM pg_catalog.pg_stat_replication + metrics: + - usename: + usage: "LABEL" + description: "Name of the replication user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - client_addr: + usage: "LABEL" + description: "Client IP address" + - client_port: + usage: "LABEL" + description: "Client TCP port" + - backend_start: + usage: "COUNTER" + description: "Time when this process was started" + - backend_xmin_age: + usage: "COUNTER" + description: "The age of this standby's xmin horizon" + - sent_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location sent on this connection" + - write_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location written to disk by this standby server" + - flush_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location flushed to disk by this standby server" + - replay_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location replayed into the database on this standby server" + - write_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it" + - flush_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it" + - replay_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it" + + pg_settings: + query: | + SELECT name, + CASE setting WHEN 'on' THEN '1' WHEN 'off' THEN '0' ELSE setting END AS setting + FROM pg_catalog.pg_settings + WHERE vartype IN ('integer', 'real', 'bool') + ORDER BY 1 + metrics: + - name: + usage: "LABEL" + description: "Name of the setting" + - setting: + usage: "GAUGE" + description: "Setting value" + + pg_extensions: + query: | + SELECT + current_database() as datname, + name as extname, + default_version, + installed_version, + CASE + WHEN default_version = installed_version THEN 0 + ELSE 1 + END AS update_available + FROM pg_catalog.pg_available_extensions + WHERE installed_version IS NOT NULL + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - extname: + usage: "LABEL" + description: "Extension name" + - default_version: + usage: "LABEL" + description: "Default version" + - installed_version: + usage: "LABEL" + description: "Installed version" + - update_available: + usage: "GAUGE" + description: "An update is available" + target_databases: + - '*' +kind: ConfigMap +metadata: + labels: + cnpg.io/reload: "" + name: cnpg-default-monitoring + namespace: cnpg-system +--- +apiVersion: v1 +kind: Service +metadata: + name: cnpg-webhook-service + namespace: cnpg-system +spec: + ports: + - port: 443 + targetPort: 9443 + selector: + app.kubernetes.io/name: cloudnative-pg +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-controller-manager + namespace: cnpg-system +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: cloudnative-pg + template: + metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + spec: + containers: + - args: + - controller + - --leader-elect + - --max-concurrent-reconciles=10 + - --config-map-name=cnpg-controller-manager-config + - --secret-name=cnpg-controller-manager-config + - --webhook-port=9443 + command: + - /manager + env: + - name: OPERATOR_IMAGE_NAME + value: ghcr.io/cloudnative-pg/cloudnative-pg:1.26.0-rc2 + - name: OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MONITORING_QUERIES_CONFIGMAP + value: cnpg-default-monitoring + image: ghcr.io/cloudnative-pg/cloudnative-pg:1.26.0-rc2 + imagePullPolicy: Always + livenessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + name: manager + ports: + - containerPort: 8080 + name: metrics + protocol: TCP + - containerPort: 9443 + name: webhook-server + protocol: TCP + readinessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + resources: + limits: + cpu: 100m + memory: 200Mi + requests: + cpu: 100m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 10001 + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + startupProbe: + failureThreshold: 6 + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + periodSeconds: 5 + volumeMounts: + - mountPath: /controller + name: scratch-data + - mountPath: /run/secrets/cnpg.io/webhook + name: webhook-certificates + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: cnpg-manager + terminationGracePeriodSeconds: 10 + volumes: + - emptyDir: {} + name: scratch-data + - name: webhook-certificates + secret: + defaultMode: 420 + optional: true + secretName: cnpg-webhook-cert +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: cnpg-mutating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: mbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: mcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-database + failurePolicy: Fail + name: mdatabase.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - databases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: mscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: cnpg-validating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: vbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: vcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-database + failurePolicy: Fail + name: vdatabase.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - databases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-pooler + failurePolicy: Fail + name: vpooler.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - poolers + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: vscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None From 45f26728eee003d68664fc222b68db00f8c9b27e Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Tue, 22 Apr 2025 10:26:39 +0200 Subject: [PATCH 528/836] fix(catalog): ensure image catalog takes priority over image names (#7387) When evaluating which PostgreSQL major version is being used, we should ensure to give priority to what is set in the image catalog over the image name specified in the catalog itself. Closes: #7386 Signed-off-by: Leonardo Cecchi Signed-off-by: Armando Ruocco Co-authored-by: Armando Ruocco --- api/v1/cluster_funcs.go | 8 ++++---- api/v1/cluster_funcs_test.go | 29 +++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 4 deletions(-) diff --git a/api/v1/cluster_funcs.go b/api/v1/cluster_funcs.go index c2f27f7ba9..1be7def3b5 100644 --- a/api/v1/cluster_funcs.go +++ b/api/v1/cluster_funcs.go @@ -402,6 +402,10 @@ func (cluster *Cluster) SetInContext(ctx context.Context) context.Context { // ghcr.io/cloudnative-pg/postgresql:14.0 corresponds to version (14,0) // ghcr.io/cloudnative-pg/postgresql:13.2 corresponds to version (13,2) func (cluster *Cluster) GetPostgresqlVersion() (version.Data, error) { + if cluster.Spec.ImageCatalogRef != nil { + return version.FromTag(strconv.Itoa(cluster.Spec.ImageCatalogRef.Major)) + } + if cluster.Status.Image != "" { return version.FromTag(reference.New(cluster.Status.Image).Tag) } @@ -410,10 +414,6 @@ func (cluster *Cluster) GetPostgresqlVersion() (version.Data, error) { return version.FromTag(reference.New(cluster.Spec.ImageName).Tag) } - if cluster.Spec.ImageCatalogRef != nil { - return version.FromTag(strconv.Itoa(cluster.Spec.ImageCatalogRef.Major)) - } - // Fallback for unit tests where a cluster is created without status or defaults return version.FromTag(reference.New(configuration.Current.PostgresImageName).Tag) } diff --git a/api/v1/cluster_funcs_test.go b/api/v1/cluster_funcs_test.go index a9607b0516..a229d2ff06 100644 --- a/api/v1/cluster_funcs_test.go +++ b/api/v1/cluster_funcs_test.go @@ -809,6 +809,35 @@ var _ = Describe("PostgreSQL version detection", func() { } Expect(cluster.GetPostgresqlVersion()).To(Equal(version.New(16, 0))) }) + + It("correctly prioritizes ImageCatalogRef over Status.Image and Spec.ImageName", func() { + cluster := Cluster{ + Spec: ClusterSpec{ + ImageName: "ghcr.io/cloudnative-pg/postgresql:14.1", + ImageCatalogRef: &ImageCatalogRef{ + TypedLocalObjectReference: corev1.TypedLocalObjectReference{ + Name: "test-catalog", + Kind: "ImageCatalog", + }, + Major: 16, + }, + }, + Status: ClusterStatus{ + Image: "ghcr.io/cloudnative-pg/postgresql:15.2", + }, + } + + // ImageCatalogRef should take precedence + Expect(cluster.GetPostgresqlVersion()).To(Equal(version.New(16, 0))) + + // Remove ImageCatalogRef, Status.Image should take precedence + cluster.Spec.ImageCatalogRef = nil + Expect(cluster.GetPostgresqlVersion()).To(Equal(version.New(15, 2))) + + // Remove Status.Image, Spec.ImageName should be used + cluster.Status.Image = "" + Expect(cluster.GetPostgresqlVersion()).To(Equal(version.New(14, 1))) + }) }) var _ = Describe("Default Metrics", func() { From 1db692d811d417d44aeaa10888202ca960067c17 Mon Sep 17 00:00:00 2001 From: Francesco Canovai Date: Tue, 22 Apr 2025 10:59:46 +0100 Subject: [PATCH 529/836] fix: off by 1 error in parallel wal archive (#7389) Fix an error in which the amount of workers archived in parallel was the wal the archive command was called on + the maxParallel, actually using one worker more than requested. Closes #7390 Signed-off-by: Francesco Canovai Signed-off-by: Armando Ruocco Co-authored-by: Armando Ruocco --- pkg/management/postgres/archiver/archiver.go | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/pkg/management/postgres/archiver/archiver.go b/pkg/management/postgres/archiver/archiver.go index 3d34379585..6d8519f761 100644 --- a/pkg/management/postgres/archiver/archiver.go +++ b/pkg/management/postgres/archiver/archiver.go @@ -180,11 +180,6 @@ func internalRun( return fmt.Errorf("failed to get envs: %w", err) } - maxParallel := 1 - if cluster.Spec.Backup.BarmanObjectStore.Wal != nil { - maxParallel = cluster.Spec.Backup.BarmanObjectStore.Wal.MaxParallel - } - // Create the archiver var walArchiver *barmanArchiver.WALArchiver if walArchiver, err = barmanArchiver.New( @@ -210,7 +205,7 @@ func internalRun( return fmt.Errorf("while testing the existence of the WAL file in the spool directory: %w", err) } if isDeletedFromSpool { - contextLog.Info("Archived WAL file (parallel)", + contextLog.Info("WAL file already archived, skipping", "walName", walName, "currentPrimary", cluster.Status.CurrentPrimary, "targetPrimary", cluster.Status.TargetPrimary) @@ -221,7 +216,7 @@ func internalRun( walFilesList := walUtils.GatherReadyWALFiles( ctx, walUtils.GatherReadyWALFilesConfig{ - MaxResults: maxParallel, + MaxResults: getMaxResult(cluster), SkipWALs: []string{walName}, PgDataPath: pgData, }, @@ -230,6 +225,7 @@ func internalRun( // Ensure the requested WAL file is always the first one being // archived walFilesList.Ready = append([]string{walName}, walFilesList.Ready...) + contextLog.Debug("WAL files to archive", "walFilesListReady", walFilesList.Ready) options, err := walArchiver.BarmanCloudWalArchiveOptions( ctx, cluster.Spec.Backup.BarmanObjectStore, cluster.Name) @@ -256,6 +252,13 @@ func internalRun( return walStatus[0].Err } +func getMaxResult(cluster *apiv1.Cluster) int { + if cluster.Spec.Backup.BarmanObjectStore.Wal != nil && cluster.Spec.Backup.BarmanObjectStore.Wal.MaxParallel > 0 { + return cluster.Spec.Backup.BarmanObjectStore.Wal.MaxParallel - 1 + } + return 0 +} + // archiveWALViaPlugins requests every capable plugin to archive the passed // WAL file, and returns an error if a configured plugin fails to do so. // It will not return an error if there's no plugin capable of WAL archiving From 9514a7ffe69ab3170a2e44399b1af8841d84e73d Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Tue, 22 Apr 2025 14:38:48 +0200 Subject: [PATCH 530/836] chore: change default PgBouncer version to 1.24.1 (#7399) Fix: #7397 Signed-off-by: Leonardo Cecchi --- pkg/specs/pgbouncer/deployments.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/specs/pgbouncer/deployments.go b/pkg/specs/pgbouncer/deployments.go index cb25082c2f..bc0993f8bd 100644 --- a/pkg/specs/pgbouncer/deployments.go +++ b/pkg/specs/pgbouncer/deployments.go @@ -42,7 +42,7 @@ import ( const ( // DefaultPgbouncerImage is the name of the pgbouncer image used by default - DefaultPgbouncerImage = "ghcr.io/cloudnative-pg/pgbouncer:1.24.0" + DefaultPgbouncerImage = "ghcr.io/cloudnative-pg/pgbouncer:1.24.1" ) // Deployment create the deployment of pgbouncer, given From 467849c4d82f3a77a54f68c8458f4d14e5b028a7 Mon Sep 17 00:00:00 2001 From: Francesco Canovai Date: Tue, 22 Apr 2025 15:01:54 +0100 Subject: [PATCH 531/836] feat: drop support for pg12 (#7396) Drop conditional logic specific to PostgreSQL 12 and previous versions which have reached end-of-life and are already out of support. Closes #7395 Signed-off-by: Francesco Canovai Signed-off-by: Leonardo Cecchi Signed-off-by: Armando Ruocco Co-authored-by: Leonardo Cecchi Co-authored-by: Armando Ruocco --- docs/src/database_import.md | 5 +- docs/src/postgresql_conf.md | 10 ++-- .../cmd/manager/instance/pgbasebackup/cmd.go | 23 ++------ .../cmd/manager/instance/run/lifecycle/run.go | 50 +++++----------- internal/controller/cluster_image_test.go | 4 +- internal/webhook/v1/cluster_webhook.go | 13 +++-- internal/webhook/v1/cluster_webhook_test.go | 17 ++++++ pkg/management/postgres/join.go | 20 ++----- .../postgres/logpipe/logpipe_test.go | 16 ----- .../logpipe/testdata/two_lines_12.csv | 2 - pkg/management/postgres/probes_test.go | 7 --- pkg/postgres/configuration.go | 58 ++++++++----------- pkg/postgres/configuration_test.go | 13 ----- tests/e2e/replication_slot_test.go | 14 ----- 14 files changed, 84 insertions(+), 168 deletions(-) delete mode 100644 pkg/management/postgres/logpipe/testdata/two_lines_12.csv diff --git a/docs/src/database_import.md b/docs/src/database_import.md index 1c19a501ff..1fb3e6a9ad 100644 --- a/docs/src/database_import.md +++ b/docs/src/database_import.md @@ -18,8 +18,8 @@ As a result, the instructions in this section are suitable for both: - importing one or more databases from an existing PostgreSQL instance, even outside Kubernetes - importing the database from any PostgreSQL version to one that is either the - same or newer, enabling *major upgrades* of PostgreSQL (e.g. from version 11.x - to version 15.x) + same or newer, enabling *major upgrades* of PostgreSQL (e.g. from version 13.x + to version 17.x) !!! Warning When performing major upgrades of PostgreSQL you are responsible for making @@ -316,4 +316,3 @@ upgrades. For more details, including limitations and best practices, refer to the [Logical Replication](logical_replication.md) section in the documentation. - diff --git a/docs/src/postgresql_conf.md b/docs/src/postgresql_conf.md index 6c1660ac86..d547767aa3 100644 --- a/docs/src/postgresql_conf.md +++ b/docs/src/postgresql_conf.md @@ -65,7 +65,7 @@ operator by applying the following sections in this order: The **global default parameters** are: ```text -archive_mode = 'on' +archive_timeout = '5min' dynamic_shared_memory_type = 'posix' full_page_writes = 'on' logging_collector = 'on' @@ -78,9 +78,11 @@ log_truncate_on_rotation = 'false' max_parallel_workers = '32' max_replication_slots = '32' max_worker_processes = '32' -shared_memory_type = 'mmap' # for PostgreSQL >= 12 only -wal_keep_size = '512MB' # for PostgreSQL >= 13 only -wal_keep_segments = '32' # for PostgreSQL <= 12 only +shared_memory_type = 'mmap' +shared_preload_libraries = '' +ssl_max_protocol_version = 'TLSv1.3' +ssl_min_protocol_version = 'TLSv1.3' +wal_keep_size = '512MB' wal_level = 'logical' wal_log_hints = 'on' wal_sender_timeout = '5s' diff --git a/internal/cmd/manager/instance/pgbasebackup/cmd.go b/internal/cmd/manager/instance/pgbasebackup/cmd.go index d3f7487984..8196994f71 100644 --- a/internal/cmd/manager/instance/pgbasebackup/cmd.go +++ b/internal/cmd/manager/instance/pgbasebackup/cmd.go @@ -105,8 +105,6 @@ func NewCmd() *cobra.Command { // bootstrapUsingPgbasebackup creates a new data dir from the configuration func (env *CloneInfo) bootstrapUsingPgbasebackup(ctx context.Context) error { - contextLogger := log.FromContext(ctx) - var cluster apiv1.Cluster err := env.client.Get(ctx, ctrl.ObjectKey{Namespace: env.info.Namespace, Name: env.info.ClusterName}, &cluster) if err != nil { @@ -134,22 +132,13 @@ func (env *CloneInfo) bootstrapUsingPgbasebackup(ctx context.Context) error { return err } - pgVersion, err := cluster.GetPostgresqlVersion() - if err != nil { - contextLogger.Warning( - "Error while parsing PostgreSQL server version to define connection options, defaulting to PostgreSQL 11", - "imageName", cluster.Status.Image, - "err", err) - } else if pgVersion.Major() >= 12 { - // We explicitly disable wal_sender_timeout for join-related pg_basebackup executions. - // A short timeout could not be enough in case the instance is slow to send data, - // like when the I/O is overloaded. - connectionString += " options='-c wal_sender_timeout=0s'" - } + // We explicitly disable wal_sender_timeout for join-related pg_basebackup executions. + // A short timeout could not be enough in case the instance is slow to send data, + // like when the I/O is overloaded. + connectionString += " options='-c wal_sender_timeout=0s'" - err = postgres.ClonePgData(ctx, connectionString, env.info.PgData, env.info.PgWal) - if err != nil { - return err + if err := postgres.ClonePgData(ctx, connectionString, env.info.PgData, env.info.PgWal); err != nil { + return fmt.Errorf("while cloning pgdata: %w", err) } if cluster.IsReplica() { diff --git a/internal/cmd/manager/instance/run/lifecycle/run.go b/internal/cmd/manager/instance/run/lifecycle/run.go index acce529c1c..f5be4fae8f 100644 --- a/internal/cmd/manager/instance/run/lifecycle/run.go +++ b/internal/cmd/manager/instance/run/lifecycle/run.go @@ -22,16 +22,15 @@ package lifecycle import ( "context" "database/sql" + "errors" "fmt" "sync" - "github.com/blang/semver" "github.com/cloudnative-pg/machinery/pkg/log" "github.com/jackc/pgx/v5" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" - postgresutils "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/utils" ) var identifierStreamingReplicationUser = pgx.Identifier{apiv1.StreamingReplicationUser}.Sanitize() @@ -153,11 +152,6 @@ func configureInstancePermissions(ctx context.Context, instance *postgres.Instan return nil } - pgVersion, err := postgresutils.GetPgdataVersion(instance.PgData) - if err != nil { - return fmt.Errorf("while getting major version: %w", err) - } - db, err := instance.GetSuperUserDB() if err != nil { return fmt.Errorf("while getting a connection to the instance: %w", err) @@ -177,14 +171,12 @@ func configureInstancePermissions(ctx context.Context, instance *postgres.Instan return fmt.Errorf("creating a new transaction to setup the instance: %w", err) } - hasSuperuser, err := configureStreamingReplicaUser(tx) - if err != nil { + if err := configureStreamingReplicaUser(tx); err != nil { _ = tx.Rollback() return err } - err = configurePgRewindPrivileges(pgVersion, hasSuperuser, tx) - if err != nil { + if err = configurePgRewindPrivileges(tx); err != nil { _ = tx.Rollback() return err } @@ -194,28 +186,28 @@ func configureInstancePermissions(ctx context.Context, instance *postgres.Instan // configureStreamingReplicaUser makes sure the streaming replication user exists // and has the required rights -func configureStreamingReplicaUser(tx *sql.Tx) (bool, error) { - var hasLoginRight, hasReplicationRight, hasSuperuser bool - row := tx.QueryRow("SELECT rolcanlogin, rolreplication, rolsuper FROM pg_catalog.pg_roles WHERE rolname = $1", +func configureStreamingReplicaUser(tx *sql.Tx) error { + var hasLoginRight, hasReplicationRight bool + row := tx.QueryRow("SELECT rolcanlogin, rolreplication FROM pg_catalog.pg_roles WHERE rolname = $1", apiv1.StreamingReplicationUser) - err := row.Scan(&hasLoginRight, &hasReplicationRight, &hasSuperuser) + err := row.Scan(&hasLoginRight, &hasReplicationRight) if err != nil { - if err != sql.ErrNoRows { - return false, fmt.Errorf("while creating streaming replication user: %w", err) + if !errors.Is(err, sql.ErrNoRows) { + return fmt.Errorf("while getting streaming replication user privileges: %w", err) } _, err = tx.Exec(fmt.Sprintf( "CREATE USER %v REPLICATION", identifierStreamingReplicationUser)) if err != nil { - return false, fmt.Errorf("CREATE USER %v error: %w", apiv1.StreamingReplicationUser, err) + return fmt.Errorf("CREATE USER %v error: %w", apiv1.StreamingReplicationUser, err) } _, err = tx.Exec(fmt.Sprintf( "COMMENT ON ROLE %v IS 'Special user for streaming replication created by CloudNativePG'", identifierStreamingReplicationUser)) if err != nil { - return false, fmt.Errorf("COMMENT ON ROLE %v error: %w", apiv1.StreamingReplicationUser, err) + return fmt.Errorf("COMMENT ON ROLE %v error: %w", apiv1.StreamingReplicationUser, err) } } @@ -224,28 +216,14 @@ func configureStreamingReplicaUser(tx *sql.Tx) (bool, error) { "ALTER USER %v LOGIN REPLICATION", identifierStreamingReplicationUser)) if err != nil { - return false, fmt.Errorf("ALTER USER %v error: %w", apiv1.StreamingReplicationUser, err) + return fmt.Errorf("ALTER USER %v error: %w", apiv1.StreamingReplicationUser, err) } } - return hasSuperuser, nil + return nil } // configurePgRewindPrivileges ensures that the StreamingReplicationUser has enough rights to execute pg_rewind -func configurePgRewindPrivileges(pgVersion semver.Version, hasSuperuser bool, tx *sql.Tx) error { - // We need the superuser bit for the streaming-replication user since pg_rewind in PostgreSQL <= 10 - // will require it. - if pgVersion.Major <= 10 { - if !hasSuperuser { - _, err := tx.Exec(fmt.Sprintf( - "ALTER USER %v SUPERUSER", - identifierStreamingReplicationUser)) - if err != nil { - return fmt.Errorf("ALTER USER %v error: %w", apiv1.StreamingReplicationUser, err) - } - } - return nil - } - +func configurePgRewindPrivileges(tx *sql.Tx) error { // Ensure the user has rights to execute the functions needed for pg_rewind var hasPgRewindPrivileges bool row := tx.QueryRow( diff --git a/internal/controller/cluster_image_test.go b/internal/controller/cluster_image_test.go index e76c58e9a8..5e9644d579 100644 --- a/internal/controller/cluster_image_test.go +++ b/internal/controller/cluster_image_test.go @@ -150,8 +150,8 @@ var _ = Describe("Cluster image detection", func() { Spec: apiv1.ImageCatalogSpec{ Images: []apiv1.CatalogImage{ { - Image: "postgres:11.2", - Major: 11, + Image: "postgres:17.4", + Major: 17, }, }, }, diff --git a/internal/webhook/v1/cluster_webhook.go b/internal/webhook/v1/cluster_webhook.go index b5292fa860..6301dda225 100644 --- a/internal/webhook/v1/cluster_webhook.go +++ b/internal/webhook/v1/cluster_webhook.go @@ -107,7 +107,8 @@ func (v *ClusterCustomValidator) ValidateCreate(_ context.Context, obj runtime.O if !ok { return nil, fmt.Errorf("expected a Cluster object but got %T", obj) } - clusterLog.Info("Validation for Cluster upon creation", "name", cluster.GetName(), "namespace", cluster.GetNamespace()) + clusterLog.Info("Validation for Cluster upon creation", "name", cluster.GetName(), "namespace", + cluster.GetNamespace()) allErrs := v.validate(cluster) allWarnings := v.getAdmissionWarnings(cluster) @@ -136,7 +137,8 @@ func (v *ClusterCustomValidator) ValidateUpdate( return nil, fmt.Errorf("expected a Cluster object for the oldObj but got %T", oldObj) } - clusterLog.Info("Validation for Cluster upon update", "name", cluster.GetName(), "namespace", cluster.GetNamespace()) + clusterLog.Info("Validation for Cluster upon update", "name", cluster.GetName(), "namespace", + cluster.GetNamespace()) // applying defaults before validating updates to set any new default oldCluster.SetDefaults() @@ -162,7 +164,8 @@ func (v *ClusterCustomValidator) ValidateDelete(_ context.Context, obj runtime.O if !ok { return nil, fmt.Errorf("expected a Cluster object but got %T", obj) } - clusterLog.Info("Validation for Cluster upon deletion", "name", cluster.GetName(), "namespace", cluster.GetNamespace()) + clusterLog.Info("Validation for Cluster upon deletion", "name", cluster.GetName(), "namespace", + cluster.GetNamespace()) // TODO(user): fill in your validation logic upon object deletion. @@ -954,12 +957,12 @@ func (v *ClusterCustomValidator) validateConfiguration(r *apiv1.Cluster) field.E // validateImageName function return result } - if pgVersion.Major() < 12 { + if pgVersion.Major() < 13 { result = append(result, field.Invalid( field.NewPath("spec", "imageName"), r.Spec.ImageName, - "Unsupported PostgreSQL version. Versions 12 or newer are supported")) + "Unsupported PostgreSQL version. Versions 13 or newer are supported")) } info := postgres.ConfigurationInfo{ Settings: postgres.CnpgConfigurationSettings, diff --git a/internal/webhook/v1/cluster_webhook_test.go b/internal/webhook/v1/cluster_webhook_test.go index ae22f9d074..c266615717 100644 --- a/internal/webhook/v1/cluster_webhook_test.go +++ b/internal/webhook/v1/cluster_webhook_test.go @@ -1030,6 +1030,23 @@ var _ = Describe("configuration change validation", func() { Expect(v.validateConfiguration(cluster)).To(HaveLen(1)) }) + It("rejects PostgreSQL version lower than 13", func() { + v := &ClusterCustomValidator{} + + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:12", + }, + } + + result := v.validateConfiguration(cluster) + + Expect(result).To(HaveLen(1)) + Expect(result[0].Field).To(Equal("spec.imageName")) + Expect(result[0].Detail).To(ContainSubstring("Unsupported PostgreSQL version")) + Expect(result[0].Detail).To(ContainSubstring("Versions 13 or newer are supported")) + }) + It("should disallow changing wal_level to minimal for existing clusters", func() { oldCluster := &apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/management/postgres/join.go b/pkg/management/postgres/join.go index fda8bdcdcb..d800ceaad7 100644 --- a/pkg/management/postgres/join.go +++ b/pkg/management/postgres/join.go @@ -77,29 +77,21 @@ func ClonePgData(ctx context.Context, connectionString, targetPgData, walDir str func (info InitInfo) Join(ctx context.Context, cluster *apiv1.Cluster) error { primaryConnInfo := buildPrimaryConnInfo(info.ParentNode, info.PodName) + " dbname=postgres connect_timeout=5" - pgVersion, err := cluster.GetPostgresqlVersion() - if err != nil { - log.Warning( - "Error while parsing PostgreSQL server version to define connection options, defaulting to PostgreSQL 11", - "image", cluster.Status.Image, - "err", err) - } else if pgVersion.Major() >= 12 { - // We explicitly disable wal_sender_timeout for join-related pg_basebackup executions. - // A short timeout could not be enough in case the instance is slow to send data, - // like when the I/O is overloaded. - primaryConnInfo += " options='-c wal_sender_timeout=0s'" - } + // We explicitly disable wal_sender_timeout for join-related pg_basebackup executions. + // A short timeout could not be enough in case the instance is slow to send data, + // like when the I/O is overloaded. + primaryConnInfo += " options='-c wal_sender_timeout=0s'" coredumpFilter := cluster.GetCoredumpFilter() if err := system.SetCoredumpFilter(coredumpFilter); err != nil { return err } - if err = ClonePgData(ctx, primaryConnInfo, info.PgData, info.PgWal); err != nil { + if err := ClonePgData(ctx, primaryConnInfo, info.PgData, info.PgWal); err != nil { return err } slotName := cluster.GetSlotNameFromInstanceName(info.PodName) - _, err = UpdateReplicaConfiguration(info.PgData, info.GetPrimaryConnInfo(), slotName) + _, err := UpdateReplicaConfiguration(info.PgData, info.GetPrimaryConnInfo(), slotName) return err } diff --git a/pkg/management/postgres/logpipe/logpipe_test.go b/pkg/management/postgres/logpipe/logpipe_test.go index 28fc736b23..1f62200338 100644 --- a/pkg/management/postgres/logpipe/logpipe_test.go +++ b/pkg/management/postgres/logpipe/logpipe_test.go @@ -57,22 +57,6 @@ var _ = Describe("CSV file reader", func() { Expect(spy.records).To(HaveLen(2)) }) - It("can read multiple CSV lines on PostgreSQL version <= 12", func(ctx SpecContext) { - f, err := os.Open("testdata/two_lines_12.csv") - defer func() { - _ = f.Close() - }() - Expect(err).ToNot(HaveOccurred()) - - spy := SpyRecordWriter{} - p := LogPipe{ - record: &LoggingRecord{}, - fieldsValidator: LogFieldValidator, - } - Expect(p.streamLogFromCSVFile(ctx, f, &spy)).To(Succeed()) - Expect(spy.records).To(HaveLen(2)) - }) - It("can read multiple CSV lines on PostgreSQL version == 14", func(ctx SpecContext) { f, err := os.Open("testdata/two_lines_14.csv") defer func() { diff --git a/pkg/management/postgres/logpipe/testdata/two_lines_12.csv b/pkg/management/postgres/logpipe/testdata/two_lines_12.csv deleted file mode 100644 index 84b39d6ef1..0000000000 --- a/pkg/management/postgres/logpipe/testdata/two_lines_12.csv +++ /dev/null @@ -1,2 +0,0 @@ -2021-05-10 06:25:24.239 UTC,,,9298,,601c20b5.2452,61853,,2021-02-04 16:28:37 UTC,,0,LOG,00000,"checkpoint starting: time",,,,,,,,,"" -2021-05-10 06:25:30.200 UTC,,,9298,,601c20b5.2452,61854,,2021-02-04 16:28:37 UTC,,0,LOG,00000,"checkpoint complete: wrote 59 buffers (0.0%); 0 WAL file(s) added, 0 removed, 1 recycled; write=5.937 s, sync=0.004 s, total=5.961 s; sync files=7, longest=0.002 s, average=0.000 s; distance=16415 kB, estimate=16910 kB",,,,,,,,,"" \ No newline at end of file diff --git a/pkg/management/postgres/probes_test.go b/pkg/management/postgres/probes_test.go index bf1abc688e..627823e784 100644 --- a/pkg/management/postgres/probes_test.go +++ b/pkg/management/postgres/probes_test.go @@ -93,13 +93,6 @@ var _ = Describe("probes", func() { }) Context("Fill basebackup stats", func() { - It("does nothing in case of that major version is less than 13 ", func() { - instance := &Instance{ - pgVersion: &semver.Version{Major: 12}, - } - Expect(instance.fillBasebackupStats(nil, nil)).To(Succeed()) - }) - It("set the information", func() { instance := (&Instance{ pgVersion: &semver.Version{Major: 13}, diff --git a/pkg/postgres/configuration.go b/pkg/postgres/configuration.go index 12fabdfc20..936de3c1e0 100644 --- a/pkg/postgres/configuration.go +++ b/pkg/postgres/configuration.go @@ -475,64 +475,52 @@ var ( CnpgConfigurationSettings = ConfigurationSettings{ GlobalDefaultSettings: SettingsCollection{ "archive_timeout": "5min", + "dynamic_shared_memory_type": "posix", "full_page_writes": "on", - "max_parallel_workers": "32", - "max_worker_processes": "32", - "max_replication_slots": "32", "logging_collector": "on", "log_destination": "csvlog", + "log_directory": LogPath, + "log_filename": LogFileName, "log_rotation_age": "0", "log_rotation_size": "0", "log_truncate_on_rotation": "false", - "log_directory": LogPath, - "log_filename": LogFileName, - "dynamic_shared_memory_type": "posix", - "wal_sender_timeout": "5s", - "wal_receiver_timeout": "5s", + "max_parallel_workers": "32", + "max_worker_processes": "32", + "max_replication_slots": "32", + "shared_memory_type": "mmap", + "ssl_max_protocol_version": "TLSv1.3", + "ssl_min_protocol_version": "TLSv1.3", + "wal_keep_size": "512MB", "wal_level": "logical", ParameterWalLogHints: "on", + "wal_sender_timeout": "5s", + "wal_receiver_timeout": "5s", // Workaround for PostgreSQL not behaving correctly when // a default value is not explicit in the postgresql.conf and // the parameter cannot be changed without a restart. SharedPreloadLibraries: "", }, - DefaultSettings: map[VersionRange]SettingsCollection{ - {MajorVersionRangeUnlimited, version.New(12, 0)}: { - "wal_keep_segments": "32", - }, - {version.New(12, 0), version.New(13, 0)}: { - "wal_keep_segments": "32", - "shared_memory_type": "mmap", - }, - {version.New(13, 0), MajorVersionRangeUnlimited}: { - "wal_keep_size": "512MB", - "shared_memory_type": "mmap", - }, - {version.New(12, 0), MajorVersionRangeUnlimited}: { - "ssl_max_protocol_version": "TLSv1.3", - "ssl_min_protocol_version": "TLSv1.3", - }, - }, MandatorySettings: SettingsCollection{ - "listen_addresses": "*", - "unix_socket_directories": SocketDirectory, - "hot_standby": "true", "archive_command": fmt.Sprintf( "/controller/manager wal-archive --log-destination %s/%s.json %%p", LogPath, LogFileName), - "port": fmt.Sprint(ServerPort), - "ssl": "on", - "ssl_cert_file": ServerCertificateLocation, - "ssl_key_file": ServerKeyLocation, - "ssl_ca_file": ClientCACertificateLocation, - "restart_after_crash": "false", + "hot_standby": "true", + "listen_addresses": "*", + "port": fmt.Sprint(ServerPort), + "restart_after_crash": "false", + "ssl": "on", + "ssl_cert_file": ServerCertificateLocation, + "ssl_key_file": ServerKeyLocation, + "ssl_ca_file": ClientCACertificateLocation, + "unix_socket_directories": SocketDirectory, }, } ) // CreateHBARules will create the content of pg_hba.conf file given // the rules set by the cluster spec -func CreateHBARules(hba []string, +func CreateHBARules( + hba []string, defaultAuthenticationMethod, ldapConfigString string, ) (string, error) { var hbaContent bytes.Buffer diff --git a/pkg/postgres/configuration_test.go b/pkg/postgres/configuration_test.go index 8d1bacac00..68adc286d3 100644 --- a/pkg/postgres/configuration_test.go +++ b/pkg/postgres/configuration_test.go @@ -83,19 +83,6 @@ var _ = Describe("PostgreSQL configuration creation", func() { Expect(confFile).To(ContainSubstring("log_destination = 'stderr'\nshared_buffers = '128KB'\n")) }) - When("version is 10", func() { - It("will use appropriate settings", func() { - info := ConfigurationInfo{ - Settings: CnpgConfigurationSettings, - Version: version.New(10, 0), - UserSettings: settings, - IncludingMandatory: true, - } - config := CreatePostgresqlConfiguration(info) - Expect(config.GetConfig("wal_keep_segments")).To(Equal("32")) - }) - }) - When("version is 13", func() { It("will use appropriate settings", func() { info := ConfigurationInfo{ diff --git a/tests/e2e/replication_slot_test.go b/tests/e2e/replication_slot_test.go index e858ab5378..9db01a5c60 100644 --- a/tests/e2e/replication_slot_test.go +++ b/tests/e2e/replication_slot_test.go @@ -73,13 +73,6 @@ var _ = Describe("Replication Slot", Label(tests.LabelReplication), func() { }, 10, 2).Should(BeTrue()) }) - if env.PostgresVersion == 11 { - // We need to take into account the fact that on PostgreSQL 11 - // it is required to rolling restart the cluster to - // enable or disable the feature once the cluster is created. - AssertClusterRollingRestart(namespace, clusterName) - } - By("checking Primary HA slots exist and are active", func() { primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) @@ -167,13 +160,6 @@ var _ = Describe("Replication Slot", Label(tests.LabelReplication), func() { }, 10, 2).Should(BeFalse()) }) - if env.PostgresVersion == 11 { - // We need to take into account the fact that on PostgreSQL 11 - // it is required to rolling restart the cluster to - // enable or disable the feature once the cluster is created. - AssertClusterRollingRestart(namespace, clusterName) - } - By("verifying slots have been removed from the cluster's pods", func() { pods, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(err).ToNot(HaveOccurred()) From faa4b3988d39c20a0233dd4b5a8fee535a500b1d Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 22 Apr 2025 18:03:17 +0200 Subject: [PATCH 532/836] chore(deps): update dependency redhat-openshift-ecosystem/openshift-preflight to v1.13.0 (main) (#7374) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 5942e74070..5f57effbb3 100644 --- a/Makefile +++ b/Makefile @@ -59,7 +59,7 @@ SPELLCHECK_VERSION ?= 0.48.0 WOKE_VERSION ?= 0.19.0 OPERATOR_SDK_VERSION ?= v1.39.2 OPM_VERSION ?= v1.51.0 -PREFLIGHT_VERSION ?= 1.12.1 +PREFLIGHT_VERSION ?= 1.13.0 OPENSHIFT_VERSIONS ?= v4.12-v4.18 ARCH ?= amd64 From 1decc91317eb54515036c1bb294646dabe7b3bc0 Mon Sep 17 00:00:00 2001 From: Lauri Tirkkonen Date: Wed, 23 Apr 2025 22:02:45 +0900 Subject: [PATCH 533/836] fix(docs): typo in the architecture section (#7383) Signed-off-by: Lauri Tirkkonen --- docs/src/architecture.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/architecture.md b/docs/src/architecture.md index 74fddca5e7..486ddd538e 100644 --- a/docs/src/architecture.md +++ b/docs/src/architecture.md @@ -136,7 +136,7 @@ the [replica cluster feature](replica_cluster.md)). ![Example of a Kubernetes architecture with only 2 data centers](./images/k8s-architecture-2-az.png) !!! Hint - If you are at en early stage of your Kubernetes journey, please share this + If you are at an early stage of your Kubernetes journey, please share this document with your infrastructure team. The two data centers setup might be simply the result of a "lift-and-shift" transition to Kubernetes from a traditional bare-metal or VM based infrastructure, and the benefits From c7e8d50bcb0e2b204b045d3f28186ec5f32c7b95 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Wed, 23 Apr 2025 15:37:39 +0200 Subject: [PATCH 534/836] chore: update OpenShift metadata version to 4.19 (#7358) Closes #7408 Signed-off-by: Jonathan Gonzalez V. --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 5f57effbb3..a12f4785bc 100644 --- a/Makefile +++ b/Makefile @@ -60,7 +60,7 @@ WOKE_VERSION ?= 0.19.0 OPERATOR_SDK_VERSION ?= v1.39.2 OPM_VERSION ?= v1.51.0 PREFLIGHT_VERSION ?= 1.13.0 -OPENSHIFT_VERSIONS ?= v4.12-v4.18 +OPENSHIFT_VERSIONS ?= v4.12-v4.19 ARCH ?= amd64 export CONTROLLER_IMG From f3ad144020f70d089cbaabaf1b59fc899b4be55b Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 24 Apr 2025 11:43:43 +0200 Subject: [PATCH 535/836] chore(deps): update all non-major github action (main) (#7406) --- .github/workflows/codeql-analysis.yml | 4 ++-- .github/workflows/continuous-integration.yml | 2 +- .github/workflows/latest-postgres-version-check.yml | 2 +- .github/workflows/ossf_scorecard.yml | 2 +- .github/workflows/release-publish.yml | 2 +- .github/workflows/snyk.yml | 4 ++-- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 32265c9841..776f436482 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -74,7 +74,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@45775bd8235c68ba998cffa5171334d58593da47 # v3 + uses: github/codeql-action/init@28deaeda66b76a05916b6923827895f2b14ab387 # v3 with: languages: "go" build-mode: manual @@ -91,6 +91,6 @@ jobs: make - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@45775bd8235c68ba998cffa5171334d58593da47 # v3 + uses: github/codeql-action/analyze@28deaeda66b76a05916b6923827895f2b14ab387 # v3 with: category: "/language:go" diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index a5936961f9..9918c35517 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -611,7 +611,7 @@ jobs: args: --severity-threshold=high --file=Dockerfile - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@45775bd8235c68ba998cffa5171334d58593da47 # v3 + uses: github/codeql-action/upload-sarif@28deaeda66b76a05916b6923827895f2b14ab387 # v3 if: | !github.event.repository.fork && !github.event.pull_request.head.repo.fork diff --git a/.github/workflows/latest-postgres-version-check.yml b/.github/workflows/latest-postgres-version-check.yml index bc9f639252..d08ffa2e6e 100644 --- a/.github/workflows/latest-postgres-version-check.yml +++ b/.github/workflows/latest-postgres-version-check.yml @@ -20,7 +20,7 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: Set up Python 3.9 - uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5 + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5 with: python-version: 3.9 diff --git a/.github/workflows/ossf_scorecard.yml b/.github/workflows/ossf_scorecard.yml index b820e2097c..6824cbb620 100644 --- a/.github/workflows/ossf_scorecard.yml +++ b/.github/workflows/ossf_scorecard.yml @@ -74,6 +74,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard (optional). # Commenting out will disable upload of results to your repo's Code Scanning dashboard - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@45775bd8235c68ba998cffa5171334d58593da47 # v3 + uses: github/codeql-action/upload-sarif@28deaeda66b76a05916b6923827895f2b14ab387 # v3 with: sarif_file: results.sarif diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml index 974016b255..beb74413fe 100644 --- a/.github/workflows/release-publish.yml +++ b/.github/workflows/release-publish.yml @@ -75,7 +75,7 @@ jobs: /src/docs/src/${{ env.FILE }} - name: Release - uses: softprops/action-gh-release@c95fe1489396fe8a9eb87c0abf8aa5b2ef267fda # v2 + uses: softprops/action-gh-release@da05d552573ad5aba039eaac05058a918a7bf631 # v2 with: body_path: release_notes.md draft: false diff --git a/.github/workflows/snyk.yml b/.github/workflows/snyk.yml index ac03598042..b418bd68ae 100644 --- a/.github/workflows/snyk.yml +++ b/.github/workflows/snyk.yml @@ -26,7 +26,7 @@ jobs: args: --sarif-file-output=snyk-static.sarif - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@45775bd8235c68ba998cffa5171334d58593da47 # v3 + uses: github/codeql-action/upload-sarif@28deaeda66b76a05916b6923827895f2b14ab387 # v3 with: sarif_file: snyk-static.sarif @@ -39,6 +39,6 @@ jobs: args: --sarif-file-output=snyk-test.sarif - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@45775bd8235c68ba998cffa5171334d58593da47 # v3 + uses: github/codeql-action/upload-sarif@28deaeda66b76a05916b6923827895f2b14ab387 # v3 with: sarif_file: snyk-test.sarif From af30a6c737fcf8078aa121b59415cd67d64d1b7a Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 24 Apr 2025 14:10:05 +0200 Subject: [PATCH 536/836] chore(deps): update backup test tools (main) (#7417) This PR contains the following updates: https://redirect.github.com/vmware-tanzu/velero `1.15.2` -> `1.16.0` https://redirect.github.com/vmware-tanzu/velero-plugin-for-aws `1.11.1` -> `1.12.0` --- .github/workflows/continuous-delivery.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index 547cc39243..ce5af9c641 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -1313,8 +1313,8 @@ jobs: name: Setup Velero uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3 env: - VELERO_VERSION: "v1.15.2" - VELERO_AWS_PLUGIN_VERSION: "v1.11.1" + VELERO_VERSION: "v1.16.0" + VELERO_AWS_PLUGIN_VERSION: "v1.12.0" with: timeout_minutes: 10 max_attempts: 3 From 04a9fc2bf2f154f643cb5aad9632246d7f9008e6 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 24 Apr 2025 14:58:13 +0200 Subject: [PATCH 537/836] chore(deps): update dependency rook/rook to v1.17.1 (main) (#7422) --- .github/workflows/continuous-delivery.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index ce5af9c641..508be4c21c 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -37,7 +37,7 @@ env: GOLANG_VERSION: "1.24.x" KUBEBUILDER_VERSION: "2.3.1" KIND_VERSION: "v0.27.0" - ROOK_VERSION: "v1.16.6" + ROOK_VERSION: "v1.17.1" EXTERNAL_SNAPSHOTTER_VERSION: "v8.2.1" OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing" BUILD_PUSH_PROVENANCE: "" From 941c5d72f1c1f5751ee3fd6fca21b94607476d00 Mon Sep 17 00:00:00 2001 From: smiyc <36233521+smiyc@users.noreply.github.com> Date: Thu, 24 Apr 2025 17:08:40 +0200 Subject: [PATCH 538/836] adding 1.26.0-rc2 to git template (#7430) fixes #7429 Signed-off-by: Daniel Chambre Signed-off-by: Daniel Chambre --- .github/ISSUE_TEMPLATE/bug.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml index e200839b06..23d7a85e8e 100644 --- a/.github/ISSUE_TEMPLATE/bug.yml +++ b/.github/ISSUE_TEMPLATE/bug.yml @@ -48,6 +48,7 @@ body: label: Version description: What is the version of CloudNativePG you are running? options: + - "1.26.0-rc2" - "1.26.0-rc1" - "1.25 (latest patch)" - "1.24 (latest patch)" From 005e82a175d78c2f3d452ce7565bbbd8ea12ef43 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 25 Apr 2025 13:23:24 +0200 Subject: [PATCH 539/836] chore(deps): bump golang.org/x/net from 0.37.0 to 0.38.0 in the go_modules group across 1 directory (#7382) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 2996122f47..3ee11d1a58 100644 --- a/go.mod +++ b/go.mod @@ -99,7 +99,7 @@ require ( github.com/xlab/treeprint v1.2.0 // indirect go.uber.org/automaxprocs v1.6.0 // indirect golang.org/x/crypto v0.36.0 // indirect - golang.org/x/net v0.37.0 // indirect + golang.org/x/net v0.38.0 // indirect golang.org/x/oauth2 v0.27.0 // indirect golang.org/x/sync v0.12.0 // indirect golang.org/x/sys v0.32.0 // indirect diff --git a/go.sum b/go.sum index 70d8eff2f0..4f6877fc07 100644 --- a/go.sum +++ b/go.sum @@ -230,8 +230,8 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= -golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= From 96d905d85b686a142317a0089f44a329e3786734 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sun, 27 Apr 2025 09:54:23 +0200 Subject: [PATCH 540/836] chore(deps): update kindest/node docker tag to v1.33.0 (main) (#7427) --- hack/e2e/run-e2e-kind.sh | 2 +- hack/setup-cluster.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hack/e2e/run-e2e-kind.sh b/hack/e2e/run-e2e-kind.sh index 11d91182e8..1b9341a978 100755 --- a/hack/e2e/run-e2e-kind.sh +++ b/hack/e2e/run-e2e-kind.sh @@ -33,7 +33,7 @@ E2E_DIR="${HACK_DIR}/e2e" export PRESERVE_CLUSTER=${PRESERVE_CLUSTER:-false} export BUILD_IMAGE=${BUILD_IMAGE:-false} -KIND_NODE_DEFAULT_VERSION=v1.32.3 +KIND_NODE_DEFAULT_VERSION=v1.33.0 export K8S_VERSION=${K8S_VERSION:-$KIND_NODE_DEFAULT_VERSION} export CLUSTER_ENGINE=kind export CLUSTER_NAME=pg-operator-e2e-${K8S_VERSION//./-} diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh index 6bf155697d..0488a0fc62 100755 --- a/hack/setup-cluster.sh +++ b/hack/setup-cluster.sh @@ -27,7 +27,7 @@ if [ "${DEBUG-}" = true ]; then fi # Defaults -KIND_NODE_DEFAULT_VERSION=v1.32.3 +KIND_NODE_DEFAULT_VERSION=v1.33.0 CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=v1.16.1 EXTERNAL_SNAPSHOTTER_VERSION=v8.2.1 EXTERNAL_PROVISIONER_VERSION=v5.2.0 From 036f696d49b954149babf4dc752e30d8c961628f Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Mon, 28 Apr 2025 17:40:45 +0200 Subject: [PATCH 541/836] fix(probes): wrong comparison in custom probes (#7442) Closes: #7440 Signed-off-by: Leonardo Cecchi --- pkg/management/postgres/webserver/probes/streaming.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/management/postgres/webserver/probes/streaming.go b/pkg/management/postgres/webserver/probes/streaming.go index aaeee79366..dc69ee0c44 100644 --- a/pkg/management/postgres/webserver/probes/streaming.go +++ b/pkg/management/postgres/webserver/probes/streaming.go @@ -68,7 +68,7 @@ func (c pgStreamingChecker) IsHealthy(ctx context.Context, instance *postgres.In THEN true WHEN (SELECT coalesce(setting, '') = '' FROM pg_catalog.pg_settings WHERE name = 'primary_conninfo') THEN true - WHEN (SELECT value FROM lag) < $1 + WHEN (SELECT value FROM lag) <= $1 THEN true ELSE false END AS ready_to_start, From e56753ac49bf30135912c07f554a1db5344c746a Mon Sep 17 00:00:00 2001 From: Floor Drees Date: Mon, 28 Apr 2025 17:44:16 +0200 Subject: [PATCH 542/836] docs: update README.md with KubeCon Europe 2025 talks (#7301) Signed-off-by: Floor Drees --- README.md | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 1cf028331f..f234194746 100644 --- a/README.md +++ b/README.md @@ -114,10 +114,13 @@ organization to this list! ### CloudNativePG at KubeCon -- March 21 2024, KubeCon Europe 2024 in Paris: ["Scaling Heights: Mastering Postgres Database Vertical Scalability with Kubernetes Storage Magic"](https://kccnceu2024.sched.com/event/1YeM4/scaling-heights-mastering-postgres-database-vertical-scalability-with-kubernetes-storage-magic-gabriele-bartolini-edb-gari-singh-google) (Gari Singh, Google & Gabriele Bartolini, EDB) -- March 19 2024, Data on Kubernetes Day at KubeCon Europe 2024 in Paris: ["From Zero to Hero: Scaling Postgres in Kubernetes Using the Power of CloudNativePG"](https://colocatedeventseu2024.sched.com/event/1YFha/from-zero-to-hero-scaling-postgres-in-kubernetes-using-the-power-of-cloudnativepg-gabriele-bartolini-edb) (Gabriele Bartolini, EDB) -- 7 November 2023, KubeCon North America 2023 in Chicago: ["Disaster Recovery with Very Large Postgres Databases (in Kubernetes)"](https://kccncna2023.sched.com/event/1R2ml/disaster-recovery-with-very-large-postgres-databases-gabriele-bartolini-edb-michelle-au-google) (Michelle Au, Google & Gabriele Bartolini, EDB) -- 27 October 2022, KubeCon North America 2022 in Detroit: ["Data On Kubernetes, Deploying And Running PostgreSQL And Patterns For Databases In a Kubernetes Cluster"](https://kccncna2022.sched.com/event/182GB/data-on-kubernetes-deploying-and-running-postgresql-and-patterns-for-databases-in-a-kubernetes-cluster-chris-milsted-ondat-gabriele-bartolini-edb) (Chris Milsted, Ondat & Gabriele Bartolini, EDB) +- April 4 2025, KubeCon Europe in London: ["Consistent Volume Group Snapshots, Unraveling the Magic"](https://sched.co/1tx8g) - Leonardo Cecchi (EDB) and Xing Yang (VMware) +- April 2 2025, KubeCon Europe in London: ["The Future of Data on Kubernetes - From Database Management To AI Foundation"](https://kccnceu2025.sched.com/event/1txEy/the-future-of-data-on-kubernetes-from-database-management-to-ai-foundation-melissa-logan-constantia-nimisha-mehta-confluent-gabriele-bartolini-edb-brian-kaufman-google) - Gabriele Bartolini (EDB), Melissa Logan (Constantia), Nimisha Mehta (Confluent), Brian Kaufman (Google) +- April 1 2025, Data on Kubernetes Day: ["The Next Wave Of Data On Kubernetes: Winning Over The Enterprise"](https://colocatedeventseu2025.sched.com/event/1ub0S/sponsored-keynote-the-next-wave-of-data-on-kubernetes-winning-over-the-enterprise-simon-metson-enterprisedb) - Simon Metson, EDB +- March 21 2024, KubeCon Europe 2024 in Paris: ["Scaling Heights: Mastering Postgres Database Vertical Scalability with Kubernetes Storage Magic"](https://kccnceu2024.sched.com/event/1YeM4/scaling-heights-mastering-postgres-database-vertical-scalability-with-kubernetes-storage-magic-gabriele-bartolini-edb-gari-singh-google) - Gari Singh, Google & Gabriele Bartolini, EDB +- March 19 2024, Data on Kubernetes Day at KubeCon Europe 2024 in Paris: ["From Zero to Hero: Scaling Postgres in Kubernetes Using the Power of CloudNativePG"](https://colocatedeventseu2024.sched.com/event/1YFha/from-zero-to-hero-scaling-postgres-in-kubernetes-using-the-power-of-cloudnativepg-gabriele-bartolini-edb) - Gabriele Bartolini, EDB +- 7 November 2023, KubeCon North America 2023 in Chicago: ["Disaster Recovery with Very Large Postgres Databases (in Kubernetes)"](https://kccncna2023.sched.com/event/1R2ml/disaster-recovery-with-very-large-postgres-databases-gabriele-bartolini-edb-michelle-au-google) - Michelle Au, Google & Gabriele Bartolini, EDB +- 27 October 2022, KubeCon North America 2022 in Detroit: ["Data On Kubernetes, Deploying And Running PostgreSQL And Patterns For Databases In a Kubernetes Cluster"](https://kccncna2022.sched.com/event/182GB/data-on-kubernetes-deploying-and-running-postgresql-and-patterns-for-databases-in-a-kubernetes-cluster-chris-milsted-ondat-gabriele-bartolini-edb) - Chris Milsted, Ondat & Gabriele Bartolini, EDB ### Useful links From 3c9243d4c355f7b85cbfbc36cdd7dffb32b63001 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niccol=C3=B2=20Fei?= Date: Mon, 28 Apr 2025 17:53:33 +0200 Subject: [PATCH 543/836] ci: select a single UBI tag during the OLM release process (#7411) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes #7385 Signed-off-by: Niccolò Fei Signed-off-by: Francesco Canovai Co-authored-by: Francesco Canovai --- .github/workflows/release-publish.yml | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml index beb74413fe..a21c122e23 100644 --- a/.github/workflows/release-publish.yml +++ b/.github/workflows/release-publish.yml @@ -96,7 +96,7 @@ jobs: author_name: ${{ steps.build-meta.outputs.author_name }} author_email: ${{ steps.build-meta.outputs.author_email }} platforms: ${{ env.PLATFORMS }} - ubi_img: ${{ fromJSON(steps.bake-push.outputs.metadata)['ubi']['image.name'] }} + olm_img: ${{ steps.olm-image.outputs.olm_image }} steps: - name: Checkout @@ -214,12 +214,23 @@ jobs: # See https://github.blog/security/supply-chain-security/safeguard-container-signing-capability-actions/ # and https://github.com/actions/starter-workflows/blob/main/ci/docker-publish.yml for more details on # how to use cosign. - - name: Sign images + - + name: Sign images run: | images=$(echo '${{ steps.bake-push.outputs.metadata }}' | jq -r '.[] | (."image.name" | sub(",.*";"" )) + "@" + ."containerimage.digest"' ) cosign sign --yes ${images} + - + # Bake returns all the tags for a target's variant in a comma separated list. + # We only care about a single tag for OLM, so we remove the "latest" tag and + # pick the first entry in order from what's left in the list + name: Image for OLM + id: olm-image + env: + ubi_tags: ${{ fromJSON(steps.bake-push.outputs.metadata)['ubi']['image.name'] }} + run: | + echo "olm_image=$(echo "$ubi_tags" | tr ',' '\n' | grep -v 'latest' | sed 's/^ *//g' | head -n 1)" >> $GITHUB_OUTPUT olm-bundle: name: Create OLM bundle and catalog @@ -261,11 +272,11 @@ jobs: - name: Set bundle variables env: - UBI_IMG: ${{ needs.release-binaries.outputs.ubi_img }} + OLM_IMG: ${{ needs.release-binaries.outputs.olm_img }} run: | - echo "CONTROLLER_IMG=${UBI_IMG}" >> $GITHUB_ENV - echo "BUNDLE_IMG=${UBI_IMG}-bundle" >> $GITHUB_ENV - echo "CATALOG_IMG=${UBI_IMG}-catalog" >> $GITHUB_ENV + echo "CONTROLLER_IMG=${OLM_IMG}" >> $GITHUB_ENV + echo "BUNDLE_IMG=${OLM_IMG}-bundle" >> $GITHUB_ENV + echo "CATALOG_IMG=${OLM_IMG}-catalog" >> $GITHUB_ENV - name: Create bundle env: From 91aeecb5841e0a605e157cc27c58211ef2208643 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Tue, 29 Apr 2025 23:09:10 +0200 Subject: [PATCH 544/836] fix(plugin): expand `logs pretty` command buffer size (#7281) # Release Notes `cnpg logs pretty` command now has a bigger default buffer size. Closes #7183 Signed-off-by: Armando Ruocco --- internal/cmd/plugin/logs/pretty/pretty.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/cmd/plugin/logs/pretty/pretty.go b/internal/cmd/plugin/logs/pretty/pretty.go index 23c6636aca..52e4e7384d 100644 --- a/internal/cmd/plugin/logs/pretty/pretty.go +++ b/internal/cmd/plugin/logs/pretty/pretty.go @@ -110,6 +110,7 @@ Should be empty or one of error, warning, info, debug, or trace.`) // decode progressively decodes the logs func (bf *prettyCmd) decode(ctx context.Context, reader io.Reader, recordChannel chan<- logRecord) { scanner := bufio.NewScanner(reader) + scanner.Buffer(make([]byte, 0, 4096), 1024*1024) for scanner.Scan() { select { From e8c8dcd45a17e4e732afe3fd885bff400cea75bd Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Wed, 30 Apr 2025 09:19:41 +0200 Subject: [PATCH 545/836] refactor(restore): remove duplicate code (#7388) Signed-off-by: Armando Ruocco --- pkg/management/postgres/restore.go | 47 +++++++----------------------- 1 file changed, 11 insertions(+), 36 deletions(-) diff --git a/pkg/management/postgres/restore.go b/pkg/management/postgres/restore.go index 1012fdf03a..ee19920f29 100644 --- a/pkg/management/postgres/restore.go +++ b/pkg/management/postgres/restore.go @@ -166,6 +166,16 @@ func (info InitInfo) RestoreSnapshot(ctx context.Context, cli client.Client, imm return err } + return info.concludeRestore(ctx, cli, cluster, config, envs) +} + +func (info InitInfo) concludeRestore( + ctx context.Context, + cli client.Client, + cluster *apiv1.Cluster, + config string, + envs []string, +) error { if err := info.WriteInitialPostgresqlConf(ctx, cluster); err != nil { return err } @@ -334,42 +344,7 @@ func (info InitInfo) Restore(ctx context.Context, cli client.Client) error { envs = env } - if err := info.WriteInitialPostgresqlConf(ctx, cluster); err != nil { - return err - } - // we need a migration here, otherwise the server will not start up if - // we recover from a base which has postgresql.auto.conf - // the override.conf and include statement is present, what we need to do is to - // migrate the content - if _, err := info.GetInstance().migratePostgresAutoConfFile(ctx); err != nil { - return err - } - if cluster.IsReplica() { - server, ok := cluster.ExternalCluster(cluster.Spec.ReplicaCluster.Source) - if !ok { - return fmt.Errorf("missing external cluster: %v", cluster.Spec.ReplicaCluster.Source) - } - - connectionString, err := external.ConfigureConnectionToServer( - ctx, cli, info.Namespace, &server) - if err != nil { - return err - } - - // TODO: Using a replication slot on replica cluster is not supported (yet?) - _, err = UpdateReplicaConfiguration(info.PgData, connectionString, "") - return err - } - - if err := info.WriteRestoreHbaConf(ctx); err != nil { - return err - } - - if err := info.writeCustomRestoreWalConfig(cluster, config); err != nil { - return err - } - - return info.ConfigureInstanceAfterRestore(ctx, cluster, envs) + return info.concludeRestore(ctx, cli, cluster, config, envs) } func (info InitInfo) ensureArchiveContainsLastCheckpointRedoWAL( From cf27b87fb0b0eef5afe911c2a1467d698f3a0736 Mon Sep 17 00:00:00 2001 From: Pierrick <139142330+pchovelon@users.noreply.github.com> Date: Wed, 30 Apr 2025 09:58:44 +0200 Subject: [PATCH 546/836] docs(fix): typo in role name (#7450) Fix a typo, missing "g" in `cnp_pooler_pgbouncer` -> `cnpg_pooler_pgbouncer` Signed-off-by: Pierrick Chovelon --- docs/src/database_import.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/database_import.md b/docs/src/database_import.md index 1fb3e6a9ad..5642977fa3 100644 --- a/docs/src/database_import.md +++ b/docs/src/database_import.md @@ -232,7 +232,7 @@ There are a few things you need to be aware of when using the `monolith` type: - Any role that is required by the imported databases must be specified inside `initdb.import.roles`, with the limitations below: - The following roles, if present, are not imported: - `postgres`, `streaming_replica`, `cnp_pooler_pgbouncer` + `postgres`, `streaming_replica`, `cnpg_pooler_pgbouncer` - The `SUPERUSER` option is removed from any imported role - Wildcard `"*"` can be used as the only element in the `databases` and/or `roles` arrays to import every object of the kind; When matching databases From 56daaadb63ea86f19b00ce7e0eb6ecb7bb93c2af Mon Sep 17 00:00:00 2001 From: Peggie Date: Wed, 30 Apr 2025 13:13:56 +0200 Subject: [PATCH 547/836] feat: Public Cloud K8S versions update (#7135) Update the versions used to test the operator on public cloud providers Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Signed-off-by: Jonathan Gonzalez V. Co-authored-by: public-cloud-k8s-versions-check --- .github/aks_versions.json | 7 +++---- .github/eks_versions.json | 3 +-- .github/gke_versions.json | 3 +-- .github/kind_versions.json | 3 ++- 4 files changed, 7 insertions(+), 9 deletions(-) diff --git a/.github/aks_versions.json b/.github/aks_versions.json index 9ba3fc5a09..a332bfe43c 100644 --- a/.github/aks_versions.json +++ b/.github/aks_versions.json @@ -1,6 +1,5 @@ [ - "1.32.0", - "1.31.5", - "1.30.9", - "1.29.9" + "1.32.3", + "1.31.7", + "1.30.9" ] diff --git a/.github/eks_versions.json b/.github/eks_versions.json index 49228d19da..5c66ae5056 100644 --- a/.github/eks_versions.json +++ b/.github/eks_versions.json @@ -1,6 +1,5 @@ [ "1.32", "1.31", - "1.30", - "1.29" + "1.30" ] diff --git a/.github/gke_versions.json b/.github/gke_versions.json index 49228d19da..5c66ae5056 100644 --- a/.github/gke_versions.json +++ b/.github/gke_versions.json @@ -1,6 +1,5 @@ [ "1.32", "1.31", - "1.30", - "1.29" + "1.30" ] diff --git a/.github/kind_versions.json b/.github/kind_versions.json index 096cd24228..7d090afa05 100644 --- a/.github/kind_versions.json +++ b/.github/kind_versions.json @@ -1,5 +1,6 @@ [ - "v1.32.2", + "v1.33.0", + "v1.32.3", "v1.31.6", "v1.30.10", "v1.29.14", From 2fb0189d4efa1a927684d30ebcdfa56e5f74e8ae Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Thu, 1 May 2025 11:55:41 +0200 Subject: [PATCH 548/836] docs: add "CNCF Project Integration" section for External Secrets (#7294) Introduce a new documentation section detailing the integration between CloudNativePG and the `external-secrets` project. This covers automated password rotation and secret management for PostgreSQL users using External Secrets and using Vault to store the secrets. Closes #7286 Signed-off-by: Gabriele Bartolini Signed-off-by: Jonathan Gonzalez V. Signed-off-by: Jonathan Battiato Co-authored-by: Jonathan Gonzalez V. Co-authored-by: Jonathan Battiato --- .wordlist-en-custom.txt | 4 + docs/mkdocs.yml | 2 + docs/src/cncf-projects/external-secrets.md | 261 +++++++++++++++++++++ 3 files changed, 267 insertions(+) create mode 100644 docs/src/cncf-projects/external-secrets.md diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index 512c2ba1f8..cbe1a23d91 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -144,6 +144,7 @@ EDB EKS EOF EOL +ESO EmbeddedObjectMetadata EnablePDB EncryptionType @@ -210,6 +211,7 @@ Istio's JSON Jihyuk Jitendra +KV Karpenter KinD Krew @@ -365,6 +367,7 @@ PublicationTargetAllTables PublicationTargetObject PublicationTargetTable PullPolicy +PushSecret QoS Quaresima QuickStart @@ -429,6 +432,7 @@ Seccomp SeccompProfile SecretKeySelector SecretRefs +SecretStore SecretVersion SecretsResourceVersion SecurityProfiles diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 2ef9d1cc85..5cf758e60d 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -74,5 +74,7 @@ nav: - supported_releases.md - preview_version.md - release_notes.md + - CNCF Projects Integrations: + - cncf-projects/external-secrets.md - Appendixes: - appendixes/object_stores.md diff --git a/docs/src/cncf-projects/external-secrets.md b/docs/src/cncf-projects/external-secrets.md new file mode 100644 index 0000000000..7be49dd3fc --- /dev/null +++ b/docs/src/cncf-projects/external-secrets.md @@ -0,0 +1,261 @@ +# External Secrets + +[External Secrets](https://external-secrets.io/latest/) is a CNCF Sandbox +project, accepted in 2022 under the sponsorship of TAG Security. + +## About + +The **External Secrets Operator (ESO)** is a Kubernetes operator that enhances +secret management by decoupling the storage of secrets from Kubernetes itself. +It enables seamless synchronization between external secret management systems +and native Kubernetes `Secret` resources. + +ESO supports a wide range of backends, including: + +- [HashiCorp Vault](https://www.vaultproject.io/) +- [AWS Secrets Manager](https://aws.amazon.com/secrets-manager/) +- [Google Secret Manager](https://cloud.google.com/secret-manager) +- [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/) +- [IBM Cloud Secrets Manager](https://www.ibm.com/cloud/secrets-manager) + +…and many more. For a full and up-to-date list of supported providers, refer to +the [official External Secrets documentation](https://external-secrets.io/latest/). + +## Integration with PostgreSQL and CloudNativePG + +When it comes to PostgreSQL databases, External Secrets integrates seamlessly +with [CloudNativePG](https://cloudnative-pg.io/) in two major use cases: + +- **Automated password management:** ESO can handle the automatic generation + and rotation of database user passwords stored in Kubernetes `Secret` + resources, ensuring that applications running inside the cluster always have + access to up-to-date credentials. + +- **Cross-platform secret access:** It enables transparent synchronization of + those passwords with an external Key Management Service (KMS) via a + `SecretStore` resources. This allows applications and developers outside the + Kubernetes cluster—who may not have access to Kubernetes secrets—to retrieve + the database credentials directly from the external KMS. + +## Example: Automated Password Management with External Secrets + +Let’s walk through how to automatically rotate the password of the `app` user +every 24 hours in the `cluster-example` Postgres cluster from the +[quickstart guide](../quickstart.md#part-3-deploy-a-postgresql-cluster). + +!!! Important + Before proceeding, ensure that the `cluster-example` Postgres cluster is up + and running in your environment. + +By default, CloudNativePG generates and manages a Kubernetes `Secret` named +`cluster-example-app`, which contains the credentials for the `app` user in the +`cluster-example` cluster. You can read more about this in the +[“Connecting from an application” section](../applications.md#secrets). + +With External Secrets, the goal is to: + +1. Define a `Password` generator that specifies how to generate the password. +2. Create an `ExternalSecret` resource that keeps the `cluster-example-app` + secret in sync by updating only the `password` and `pgpass` fields. + +### Creating the Password Generator + +The following example creates a +[`Password` generator](https://external-secrets.io/main/api/generator/password/) +resource named `pg-password-generator` in the `default` namespace. You can +customize the name and properties to suit your needs: + +```yaml +apiVersion: generators.external-secrets.io/v1alpha1 +kind: Password +metadata: + name: pg-password-generator +spec: + length: 42 + digits: 5 + symbols: 5 + symbolCharacters: "-_$@" + noUpper: false + allowRepeat: true +``` + +This specification defines the characteristics of the generated password, +including its length and the inclusion of digits, symbols, and uppercase +letters. + +### Creating the External Secret + +The example below creates an `ExternalSecret` resource named +`cluster-example-app-secret`, which refreshes the password every 24 hours. It +uses a `Merge` policy to update only the specified fields (`password`, `pgpass`, +`jdbc-uri` and `uri`) in the `cluster-example-app` secret. + +```yaml +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: cluster-example-app-secret +spec: + refreshInterval: "24h" + target: + name: cluster-example-app + creationPolicy: Merge + template: + metadata: + labels: + cnpg.io/reload: "true" + data: + password: "{{ .password }}" + pgpass: "cluster-example-rw:5432:app:app:{{ .password }}" + jdbc-uri: "jdbc:postgresql://cluster-example-rw.default:5432/app?password={{ .password }}&user=app" + uri: "postgresql://app:{{ .password }}@cluster-example-rw.default:5432/app" + dataFrom: + - sourceRef: + generatorRef: + apiVersion: generators.external-secrets.io/v1alpha1 + kind: Password + name: pg-password-generator +``` + +The label `cnpg.io/reload: "true"` ensures that CloudNativePG triggers a reload +of the user password in the database when the secret changes. + +### Verifying the Configuration + +To check that the `ExternalSecret` is correctly synchronizing: + +```sh +kubectl get es cluster-example-app-secret +``` + +To observe the password being refreshed in real time, temporarily reduce the +`refreshInterval` to `30s` and run the following command repeatedly: + +```sh +kubectl get secret cluster-example-app \ + -o jsonpath="{.data.password}" | base64 -d +``` + +You should see the password change every 30 seconds, confirming that the +rotation is working correctly. + +### There's More + +While the example above focuses on the default `cluster-example-app` secret +created by CloudNativePG, the same approach can be extended to manage any +custom secrets or PostgreSQL users you create to regularly rotate their +password. + + +## Example: Integration with an External KMS + +A widely used Key Management Service (KMS) provider in the CNCF ecosystem is +[HashiCorp Vault](https://www.vaultproject.io/). + +In this example, we'll demonstrate how to integrate CloudNativePG, +External Secrets Operator, and HashiCorp Vault to automatically rotate +a PostgreSQL password and securely store it in Vault. + +!!! Important + This example assumes that HashiCorp Vault is already installed and properly + configured in your environment, and that your team has the necessary expertise + to operate it. There are various ways to deploy Vault, and detailing them is + outside the scope of CloudNativePG. While it's possible to run Vault inside + Kubernetes, it is more commonly deployed externally. For detailed instructions, + consult the [HashiCorp Vault documentation](https://www.vaultproject.io/docs). + +Continuing from the previous example, we will now create the necessary +`SecretStore` and `PushSecret` resources to complete the integration with +Vault. + +### Creating the `SecretStore` + +In this example, we assume that HashiCorp Vault is accessible from within the +namespace at `http://vault.vault.svc:8200`, and that a Kubernetes `Secret` +named `vault-token` exists in the same namespace, containing the token used to +authenticate with Vault. + +```yaml +apiVersion: external-secrets.io/v1beta1 +kind: SecretStore +metadata: + name: vault-backend +spec: + provider: + vault: + server: "http://vault.vault.svc:8200" + path: "secrets" + # Specifies the Vault KV secret engine version ("v1" or "v2"). + # Defaults to "v2" if not set. + version: "v2" + auth: + # References a Kubernetes Secret that contains the Vault token. + # See: https://www.vaultproject.io/docs/auth/token + tokenSecretRef: + name: "vault-token" + key: "token" +--- +apiVersion: v1 +kind: Secret +metadata: + name: vault-token +data: + token: aHZzLioqKioqKio= # hvs.******* +``` + +This configuration creates a `SecretStore` resource named `vault-backend`. + +!!! Important + This example uses basic token-based authentication, which is suitable for + testing API, and CLI use cases. While it is the default method enabled in + Vault, it is not recommended for production environments. For production, + consider using more secure authentication methods. + Refer to the [External Secrets Operator documentation](https://external-secrets.io/latest/provider/hashicorp-vault/) + for a full list of supported authentication mechanisms. + +!!! Info + HashiCorp Vault must have a KV secrets engine enabled at the `secrets` path + with version `v2`. If your Vault instance uses a different path or + version, be sure to update the `path` and `version` fields accordingly. + +### Creating the `PushSecret` + +The `PushSecret` resource is used to push a Kubernetes `Secret` to HashiCorp +Vault. In this simplified example, we'll push the credentials for the `app` +user of the sample cluster `cluster-example`. + +For more details on configuring `PushSecret`, refer to the +[External Secrets Operator documentation](https://external-secrets.io/latest/api/pushsecret/). + +```yaml +apiVersion: external-secrets.io/v1alpha1 +kind: PushSecret +metadata: + name: pushsecret-example +spec: + deletionPolicy: Delete + refreshInterval: 24h + secretStoreRefs: + - name: vault-backend + kind: SecretStore + selector: + secret: + name: cluster-example-app + data: + - match: + remoteRef: + remoteKey: cluster-example-app +``` + +In this example, the `PushSecret` resource instructs the External Secrets +Operator to push the Kubernetes `Secret` named `cluster-example-app` to +HashiCorp Vault (from the previous example). The `remoteKey` defines the name +under which the secret will be stored in Vault, using the `SecretStore` named +`vault-backend`. + +### Verifying the Configuration + +To verify that the `PushSecret` is functioning correctly, navigate to the +HashiCorp Vault UI. In the `kv` secrets engine at the path `secrets`, you +should find a secret named `cluster-example-app`, corresponding to the +`remoteKey` defined above. From 8583a94b354cad76db48fbf2d99298d8d57fa6c0 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sat, 3 May 2025 17:55:27 +0200 Subject: [PATCH 549/836] chore(deps): update all non-major github action (main) (#7437) This PR contains the following updates: https://github.com/actions/download-artifact `95815c3` -> `d3f86a1` https://github.com/docker/bake-action `4ba453f` -> `76f9fa3` https://github.com/docker/build-push-action `471d1dc` -> `14487ce` https://github.com/github/codeql-action `28deaed` -> `60168ef` --- .github/workflows/codeql-analysis.yml | 4 ++-- .github/workflows/continuous-delivery.yml | 8 ++++---- .github/workflows/continuous-integration.yml | 6 +++--- .github/workflows/ossf_scorecard.yml | 2 +- .github/workflows/release-publish.yml | 6 +++--- .github/workflows/snyk.yml | 4 ++-- 6 files changed, 15 insertions(+), 15 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 776f436482..65c187afe4 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -74,7 +74,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@28deaeda66b76a05916b6923827895f2b14ab387 # v3 + uses: github/codeql-action/init@60168efe1c415ce0f5521ea06d5c2062adbeed1b # v3 with: languages: "go" build-mode: manual @@ -91,6 +91,6 @@ jobs: make - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@28deaeda66b76a05916b6923827895f2b14ab387 # v3 + uses: github/codeql-action/analyze@60168efe1c415ce0f5521ea06d5c2062adbeed1b # v3 with: category: "/language:go" diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index 508be4c21c..b447bc08ec 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -365,7 +365,7 @@ jobs: password: ${{ env.REGISTRY_PASSWORD }} - name: Build and push - uses: docker/bake-action@4ba453fbc2db7735392b93edf935aaf9b1e8f747 # v6 + uses: docker/bake-action@76f9fa3a758507623da19f6092dc4089a7e61592 # v6 id: bake-push env: environment: "testing" @@ -450,7 +450,7 @@ jobs: # NOTE: we only fire this in TEST DEPTH = 4, as that is the level of the # upgrade test name: Build and push image for upgrade test - uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4 # v6 + uses: docker/build-push-action@14487ce63c7a62a4a324b0bfb37086795e31c6c1 # v6 id: build-prime if: | always() && !cancelled() && @@ -504,7 +504,7 @@ jobs: rm -fr manifests/operator-manifest.yaml - name: Prepare the operator manifest - uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4 + uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4 with: name: operator-manifest.yaml path: manifests @@ -2124,7 +2124,7 @@ jobs: run: mkdir test-artifacts - name: Download all artifacts to the directory - uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4 + uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4 with: path: test-artifacts pattern: testartifacts-* diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 9918c35517..39a7250c5c 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -553,7 +553,7 @@ jobs: password: ${{ env.REGISTRY_PASSWORD }} - name: Build and push - uses: docker/bake-action@4ba453fbc2db7735392b93edf935aaf9b1e8f747 # v6 + uses: docker/bake-action@76f9fa3a758507623da19f6092dc4089a7e61592 # v6 id: bake-push env: environment: "testing" @@ -611,7 +611,7 @@ jobs: args: --severity-threshold=high --file=Dockerfile - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@28deaeda66b76a05916b6923827895f2b14ab387 # v3 + uses: github/codeql-action/upload-sarif@60168efe1c415ce0f5521ea06d5c2062adbeed1b # v3 if: | !github.event.repository.fork && !github.event.pull_request.head.repo.fork @@ -837,7 +837,7 @@ jobs: password: ${{ env.REGISTRY_PASSWORD }} - name: Download the bundle - uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4 + uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4 with: name: bundle diff --git a/.github/workflows/ossf_scorecard.yml b/.github/workflows/ossf_scorecard.yml index 6824cbb620..dc92ed236c 100644 --- a/.github/workflows/ossf_scorecard.yml +++ b/.github/workflows/ossf_scorecard.yml @@ -74,6 +74,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard (optional). # Commenting out will disable upload of results to your repo's Code Scanning dashboard - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@28deaeda66b76a05916b6923827895f2b14ab387 # v3 + uses: github/codeql-action/upload-sarif@60168efe1c415ce0f5521ea06d5c2062adbeed1b # v3 with: sarif_file: results.sarif diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml index a21c122e23..14a1ece5f0 100644 --- a/.github/workflows/release-publish.yml +++ b/.github/workflows/release-publish.yml @@ -194,7 +194,7 @@ jobs: password: ${{ secrets.GITHUB_TOKEN }} - name: Build and push - uses: docker/bake-action@4ba453fbc2db7735392b93edf935aaf9b1e8f747 # v6 + uses: docker/bake-action@76f9fa3a758507623da19f6092dc4089a7e61592 # v6 id: bake-push env: environment: "production" @@ -315,7 +315,7 @@ jobs: persist-credentials: false - name: Download the bundle - uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4 + uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4 with: name: bundle @@ -398,7 +398,7 @@ jobs: git config user.name "${{ needs.release-binaries.outputs.author_name }}" - name: Download the bundle - uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4 + uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4 with: name: bundle - diff --git a/.github/workflows/snyk.yml b/.github/workflows/snyk.yml index b418bd68ae..f9372b1fc3 100644 --- a/.github/workflows/snyk.yml +++ b/.github/workflows/snyk.yml @@ -26,7 +26,7 @@ jobs: args: --sarif-file-output=snyk-static.sarif - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@28deaeda66b76a05916b6923827895f2b14ab387 # v3 + uses: github/codeql-action/upload-sarif@60168efe1c415ce0f5521ea06d5c2062adbeed1b # v3 with: sarif_file: snyk-static.sarif @@ -39,6 +39,6 @@ jobs: args: --sarif-file-output=snyk-test.sarif - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@28deaeda66b76a05916b6923827895f2b14ab387 # v3 + uses: github/codeql-action/upload-sarif@60168efe1c415ce0f5521ea06d5c2062adbeed1b # v3 with: sarif_file: snyk-test.sarif From be28e8e03ba7c4265e53be0cf7c2bd14ab087515 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 5 May 2025 10:08:44 +0200 Subject: [PATCH 550/836] chore(deps): update dependency operator-framework/operator-registry to v1.53.0 (main) (#7477) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index a12f4785bc..fcd092976b 100644 --- a/Makefile +++ b/Makefile @@ -58,7 +58,7 @@ GORELEASER_VERSION ?= v2.8.2 SPELLCHECK_VERSION ?= 0.48.0 WOKE_VERSION ?= 0.19.0 OPERATOR_SDK_VERSION ?= v1.39.2 -OPM_VERSION ?= v1.51.0 +OPM_VERSION ?= v1.53.0 PREFLIGHT_VERSION ?= 1.13.0 OPENSHIFT_VERSIONS ?= v4.12-v4.19 ARCH ?= amd64 From a83fd53e97c1173fe83941eaf83a6b9bc41b3e84 Mon Sep 17 00:00:00 2001 From: Francesco Canovai Date: Mon, 5 May 2025 11:09:38 +0200 Subject: [PATCH 551/836] fix: replace MajorVersionUpgradeFromImage with PGDataImageInfo in status (#7403) Include both the major version and the image used to run the data directory in the status. This addresses a failure scenario during major upgrades when using an ImageCatalog. If the images in the catalog do not follow the CloudNativePG tag conventions, the operator cannot reliably determine the starting major version, causing the upgrade to fail. By explicitly storing both the image and its associated major version, we ensure the upgrade process has the necessary context. Closes #7391 Signed-off-by: Francesco Canovai Signed-off-by: Armando Ruocco Signed-off-by: Marco Nenciarini Co-authored-by: Armando Ruocco Co-authored-by: Marco Nenciarini --- .wordlist-en-custom.txt | 4 + api/v1/cluster_defaults.go | 4 +- api/v1/cluster_funcs.go | 27 +++-- api/v1/cluster_funcs_test.go | 25 ++--- api/v1/cluster_types.go | 13 ++- api/v1/zz_generated.deepcopy.go | 21 +++- .../bases/postgresql.cnpg.io_clusters.yaml | 19 +++- docs/src/cloudnative-pg.v1.md | 38 ++++++- .../manager/instance/upgrade/execute/cmd.go | 26 ++--- internal/controller/cluster_image.go | 105 +++++++++--------- internal/controller/cluster_image_test.go | 41 +++---- .../management/controller/instance_startup.go | 8 +- internal/webhook/v1/cluster_webhook.go | 33 ++---- internal/webhook/v1/cluster_webhook_test.go | 81 ++++++++++++++ pkg/management/postgres/configuration.go | 13 +-- pkg/management/postgres/configuration_test.go | 14 ++- pkg/management/postgres/instance.go | 16 +-- pkg/management/postgres/utils/version.go | 13 +-- pkg/postgres/configuration.go | 34 ++---- pkg/postgres/configuration_test.go | 44 ++++---- pkg/reconciler/majorupgrade/job.go | 4 +- pkg/reconciler/majorupgrade/job_test.go | 9 +- pkg/reconciler/majorupgrade/reconciler.go | 26 +++-- .../majorupgrade/reconciler_test.go | 7 +- pkg/resources/status/transactions.go | 7 +- 25 files changed, 368 insertions(+), 264 deletions(-) diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index cbe1a23d91..cf12432d8a 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -201,6 +201,7 @@ IfNotPresent ImageCatalog ImageCatalogRef ImageCatalogSpec +ImageInfo ImportSource InfoSec Innocenti @@ -298,6 +299,7 @@ PGAudit PGDATA PGDG PGData +PGDataImageInfo PGSQL PKI PODNAME @@ -968,6 +970,7 @@ lsn lt lz macOS +majorVersion majorVersionUpgradeFromImage malcolm mallocs @@ -1075,6 +1078,7 @@ pgAdmin pgBouncer pgBouncerIntegration pgBouncerSecrets +pgDataImageInfo pgDumpExtraOptions pgRestoreExtraOptions pgSQL diff --git a/api/v1/cluster_defaults.go b/api/v1/cluster_defaults.go index 64f51e0411..a851f3cedd 100644 --- a/api/v1/cluster_defaults.go +++ b/api/v1/cluster_defaults.go @@ -85,13 +85,13 @@ func (r *Cluster) setDefaults(preserveUserSettings bool) { r.Spec.Backup.Target = DefaultBackupTarget } - psqlVersion, err := r.GetPostgresqlVersion() + psqlVersion, err := r.GetPostgresqlMajorVersion() if err == nil { // The validation error will be already raised by the // validateImageName function info := postgres.ConfigurationInfo{ Settings: postgres.CnpgConfigurationSettings, - Version: psqlVersion, + MajorVersion: psqlVersion, UserSettings: r.Spec.PostgresConfiguration.Parameters, IsReplicaCluster: r.IsReplica(), PreserveFixedSettingsFromUser: preserveUserSettings, diff --git a/api/v1/cluster_funcs.go b/api/v1/cluster_funcs.go index 1be7def3b5..17de306af4 100644 --- a/api/v1/cluster_funcs.go +++ b/api/v1/cluster_funcs.go @@ -24,7 +24,6 @@ import ( "fmt" "regexp" "slices" - "strconv" "strings" "time" @@ -395,27 +394,27 @@ func (cluster *Cluster) SetInContext(ctx context.Context) context.Context { return context.WithValue(ctx, contextutils.ContextKeyCluster, cluster) } -// GetPostgresqlVersion gets the PostgreSQL image version detecting it from the +// GetPostgresqlMajorVersion gets the PostgreSQL image major version detecting it from the // image name or from the ImageCatalogRef. -// Example: -// -// ghcr.io/cloudnative-pg/postgresql:14.0 corresponds to version (14,0) -// ghcr.io/cloudnative-pg/postgresql:13.2 corresponds to version (13,2) -func (cluster *Cluster) GetPostgresqlVersion() (version.Data, error) { +func (cluster *Cluster) GetPostgresqlMajorVersion() (int, error) { if cluster.Spec.ImageCatalogRef != nil { - return version.FromTag(strconv.Itoa(cluster.Spec.ImageCatalogRef.Major)) - } - - if cluster.Status.Image != "" { - return version.FromTag(reference.New(cluster.Status.Image).Tag) + return cluster.Spec.ImageCatalogRef.Major, nil } if cluster.Spec.ImageName != "" { - return version.FromTag(reference.New(cluster.Spec.ImageName).Tag) + imgVersion, err := version.FromTag(reference.New(cluster.Spec.ImageName).Tag) + if err != nil { + return 0, fmt.Errorf("cannot parse image name %q: %w", cluster.Spec.ImageName, err) + } + return int(imgVersion.Major()), nil //nolint:gosec } // Fallback for unit tests where a cluster is created without status or defaults - return version.FromTag(reference.New(configuration.Current.PostgresImageName).Tag) + imgVersion, err := version.FromTag(reference.New(configuration.Current.PostgresImageName).Tag) + if err != nil { + return 0, fmt.Errorf("cannot parse default image name %q: %w", configuration.Current.PostgresImageName, err) + } + return int(imgVersion.Major()), nil //nolint:gosec } // GetImagePullSecret get the name of the pull secret to use diff --git a/api/v1/cluster_funcs_test.go b/api/v1/cluster_funcs_test.go index a229d2ff06..95fbeba05c 100644 --- a/api/v1/cluster_funcs_test.go +++ b/api/v1/cluster_funcs_test.go @@ -24,7 +24,6 @@ import ( "time" barmanCatalog "github.com/cloudnative-pg/barman-cloud/pkg/catalog" - "github.com/cloudnative-pg/machinery/pkg/postgres/version" "github.com/cloudnative-pg/machinery/pkg/stringset" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -778,16 +777,16 @@ var _ = Describe("A config map resource version", func() { var _ = Describe("PostgreSQL version detection", func() { tests := []struct { - imageName string - postgresVersion version.Data + imageName string + postgresMajorVersion int }{ { "ghcr.io/cloudnative-pg/postgresql:14.0", - version.New(14, 0), + 14, }, { - "ghcr.io/cloudnative-pg/postgresql:13.2", - version.New(13, 2), + "ghcr.io/cloudnative-pg/postgresql:17.4", + 17, }, } @@ -795,7 +794,7 @@ var _ = Describe("PostgreSQL version detection", func() { cluster := Cluster{} for _, test := range tests { cluster.Spec.ImageName = test.imageName - Expect(cluster.GetPostgresqlVersion()).To(Equal(test.postgresVersion)) + Expect(cluster.GetPostgresqlMajorVersion()).To(Equal(test.postgresMajorVersion)) } }) It("correctly extract PostgreSQL versions from ImageCatalogRef", func() { @@ -807,7 +806,7 @@ var _ = Describe("PostgreSQL version detection", func() { }, Major: 16, } - Expect(cluster.GetPostgresqlVersion()).To(Equal(version.New(16, 0))) + Expect(cluster.GetPostgresqlMajorVersion()).To(Equal(16)) }) It("correctly prioritizes ImageCatalogRef over Status.Image and Spec.ImageName", func() { @@ -828,15 +827,11 @@ var _ = Describe("PostgreSQL version detection", func() { } // ImageCatalogRef should take precedence - Expect(cluster.GetPostgresqlVersion()).To(Equal(version.New(16, 0))) - - // Remove ImageCatalogRef, Status.Image should take precedence - cluster.Spec.ImageCatalogRef = nil - Expect(cluster.GetPostgresqlVersion()).To(Equal(version.New(15, 2))) + Expect(cluster.GetPostgresqlMajorVersion()).To(Equal(16)) // Remove Status.Image, Spec.ImageName should be used - cluster.Status.Image = "" - Expect(cluster.GetPostgresqlVersion()).To(Equal(version.New(14, 1))) + cluster.Spec.ImageCatalogRef = nil + Expect(cluster.GetPostgresqlMajorVersion()).To(Equal(14)) }) }) diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go index 3b7fd86cca..e765ad51a1 100644 --- a/api/v1/cluster_types.go +++ b/api/v1/cluster_types.go @@ -940,10 +940,9 @@ type ClusterStatus struct { // +optional Image string `json:"image,omitempty"` - // MajorVersionUpgradeFromImage contains the image that was - // running before the major version upgrade started. + // PGDataImageInfo contains the details of the latest image that has run on the current data directory. // +optional - MajorVersionUpgradeFromImage *string `json:"majorVersionUpgradeFromImage,omitempty"` + PGDataImageInfo *ImageInfo `json:"pgDataImageInfo,omitempty"` // PluginStatus is the status of the loaded plugins // +optional @@ -961,6 +960,14 @@ type ClusterStatus struct { DemotionToken string `json:"demotionToken,omitempty"` } +// ImageInfo contains the information about a PostgreSQL image +type ImageInfo struct { + // Image is the image name + Image string `json:"image"` + // MajorVersion is the major version of the image + MajorVersion int `json:"majorVersion"` +} + // SwitchReplicaClusterStatus contains all the statuses regarding the switch of a cluster to a replica cluster type SwitchReplicaClusterStatus struct { // InProgress indicates if there is an ongoing procedure of switching a cluster to a replica cluster. diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index 5943b91175..6ec7f7fa4e 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -958,9 +958,9 @@ func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { *out = make([]string, len(*in)) copy(*out, *in) } - if in.MajorVersionUpgradeFromImage != nil { - in, out := &in.MajorVersionUpgradeFromImage, &out.MajorVersionUpgradeFromImage - *out = new(string) + if in.PGDataImageInfo != nil { + in, out := &in.PGDataImageInfo, &out.PGDataImageInfo + *out = new(ImageInfo) **out = **in } if in.PluginStatus != nil { @@ -1424,6 +1424,21 @@ func (in *ImageCatalogSpec) DeepCopy() *ImageCatalogSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageInfo) DeepCopyInto(out *ImageInfo) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageInfo. +func (in *ImageInfo) DeepCopy() *ImageInfo { + if in == nil { + return nil + } + out := new(ImageInfo) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Import) DeepCopyInto(out *Import) { *out = *in diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml index 93aec9fc60..af488bffca 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml @@ -6173,11 +6173,6 @@ spec: description: ID of the latest generated node (used to avoid node name clashing) type: integer - majorVersionUpgradeFromImage: - description: |- - MajorVersionUpgradeFromImage contains the image that was - running before the major version upgrade started. - type: string managedRolesStatus: description: ManagedRolesStatus reports the state of the managed roles in the cluster @@ -6220,6 +6215,20 @@ spec: description: OnlineUpdateEnabled shows if the online upgrade is enabled inside the cluster type: boolean + pgDataImageInfo: + description: PGDataImageInfo contains the details of the latest image + that has run on the current data directory. + properties: + image: + description: Image is the image name + type: string + majorVersion: + description: MajorVersion is the major version of the image + type: integer + required: + - image + - majorVersion + type: object phase: description: Current phase of the cluster type: string diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md index 5fd53c54e1..3112b4c5eb 100644 --- a/docs/src/cloudnative-pg.v1.md +++ b/docs/src/cloudnative-pg.v1.md @@ -2258,12 +2258,11 @@ This field is reported when .spec.failoverDelay is populated or dur

Image contains the image name used by the pods

-majorVersionUpgradeFromImage
-string +pgDataImageInfo
+ImageInfo -

MajorVersionUpgradeFromImage contains the image that was -running before the major version upgrade started.

+

PGDataImageInfo contains the details of the latest image that has run on the current data directory.

pluginStatus
@@ -2998,6 +2997,37 @@ of WAL archiving and backups for this external cluster

+## ImageInfo {#postgresql-cnpg-io-v1-ImageInfo} + + +**Appears in:** + +- [ClusterStatus](#postgresql-cnpg-io-v1-ClusterStatus) + + +

ImageInfo contains the information about a PostgreSQL image

+ + + + + + + + + + + + +
FieldDescription
image [Required]
+string +
+

Image is the image name

+
majorVersion [Required]
+int +
+

MajorVersion is the major version of the image

+
+ ## Import {#postgresql-cnpg-io-v1-Import} diff --git a/internal/cmd/manager/instance/upgrade/execute/cmd.go b/internal/cmd/manager/instance/upgrade/execute/cmd.go index 9945052264..f85170ad2b 100644 --- a/internal/cmd/manager/instance/upgrade/execute/cmd.go +++ b/internal/cmd/manager/instance/upgrade/execute/cmd.go @@ -31,13 +31,11 @@ import ( "strings" "time" - "github.com/blang/semver" "github.com/cloudnative-pg/machinery/pkg/env" "github.com/cloudnative-pg/machinery/pkg/execlog" "github.com/cloudnative-pg/machinery/pkg/fileutils" "github.com/cloudnative-pg/machinery/pkg/fileutils/compatibility" "github.com/cloudnative-pg/machinery/pkg/log" - "github.com/cloudnative-pg/machinery/pkg/postgres/version" "github.com/spf13/cobra" "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime/pkg/client" @@ -191,7 +189,7 @@ func upgradeSubCommand( return fmt.Errorf("error while getting old data directory control data: %w", err) } - targetVersion, err := cluster.GetPostgresqlVersion() + targetVersion, err := cluster.GetPostgresqlMajorVersion() if err != nil { return fmt.Errorf("error while getting the target version from the cluster object: %w", err) } @@ -208,17 +206,17 @@ func upgradeSubCommand( contextLogger.Info("Checking if we have anything to update") // Read pg_version from both the old and new data directories - oldVersion, err := postgresutils.GetPgdataVersion(pgData) + oldVersion, err := postgresutils.GetMajorVersionFromPgData(pgData) if err != nil { return fmt.Errorf("error while reading the old version: %w", err) } - newVersion, err := postgresutils.GetPgdataVersion(newDataDir) + newVersion, err := postgresutils.GetMajorVersionFromPgData(newDataDir) if err != nil { return fmt.Errorf("error while reading the new version: %w", err) } - if oldVersion.Equals(newVersion) { + if oldVersion == newVersion { contextLogger.Info("Versions are the same, no need to upgrade") if err := os.RemoveAll(newDataDir); err != nil { return fmt.Errorf("failed to remove the directory: %w", err) @@ -283,7 +281,7 @@ func getControlData(binDir, pgData string) (map[string]string, error) { return utils.ParsePgControldataOutput(string(out)), nil } -func runInitDB(destDir string, walDir *string, pgControlData map[string]string, targetVersion version.Data) error { +func runInitDB(destDir string, walDir *string, pgControlData map[string]string, targetMajorVersion int) error { // Invoke initdb to generate a data directory options := []string{ "--username", @@ -302,7 +300,7 @@ func runInitDB(destDir string, walDir *string, pgControlData map[string]string, return err } - options, err = tryAddDataChecksums(pgControlData, targetVersion, options) + options, err = tryAddDataChecksums(pgControlData, targetMajorVersion, options) if err != nil { return err } @@ -323,7 +321,7 @@ func runInitDB(destDir string, walDir *string, pgControlData map[string]string, // TODO: refactor it should be a method of pgControlData func tryAddDataChecksums( pgControlData map[string]string, - targetVersion version.Data, + targetMajorVersion int, options []string, ) ([]string, error) { dataPageChecksumVersion, ok := pgControlData[utils.PgControlDataDataPageChecksumVersion] @@ -333,7 +331,7 @@ func tryAddDataChecksums( if dataPageChecksumVersion != "1" { // In postgres 18 we will have to set "--no-data-checksums" if checksums are disabled (they are enabled by default) - if targetVersion.Major() >= 18 { + if targetMajorVersion >= 18 { return append(options, "--no-data-checksums"), nil } return options, nil @@ -374,11 +372,11 @@ func prepareConfigurationFiles(ctx context.Context, cluster apiv1.Cluster, destD tmpCluster := cluster.DeepCopy() tmpCluster.Spec.PostgresConfiguration.Parameters["max_slot_wal_keep_size"] = "-1" - pgVersion, err := postgresutils.GetPgdataVersion(destDir) + pgMajorVersion, err := postgresutils.GetMajorVersionFromPgData(destDir) if err != nil { return fmt.Errorf("error while reading the new data directory version: %w", err) } - if pgVersion.Major >= 18 { + if pgMajorVersion >= 18 { tmpCluster.Spec.PostgresConfiguration.Parameters["idle_replication_slot_timeout"] = "0" } @@ -422,7 +420,7 @@ func runPgUpgrade(oldDataDir string, pgUpgrade string, newDataDir string, oldBin func moveDataInPlace( ctx context.Context, pgData string, - oldVersion semver.Version, + oldMajor int, newDataDir string, newWalDir *string, ) error { @@ -473,7 +471,7 @@ func moveDataInPlace( contextLogger.Info("Cleaning up the previous version directory from tablespaces") if err := removeMatchingPaths(ctx, - path.Join(pgData, "pg_tblspc", "*", fmt.Sprintf("PG_%v_*", oldVersion.Major))); err != nil { + path.Join(pgData, "pg_tblspc", "*", fmt.Sprintf("PG_%v_*", oldMajor))); err != nil { return fmt.Errorf("error while removing the old tablespaces directories: %w", err) } diff --git a/internal/controller/cluster_image.go b/internal/controller/cluster_image.go index db955e65d2..f9d6ba8dd0 100644 --- a/internal/controller/cluster_image.go +++ b/internal/controller/cluster_image.go @@ -47,87 +47,88 @@ import ( func (r *ClusterReconciler) reconcileImage(ctx context.Context, cluster *apiv1.Cluster) (*ctrl.Result, error) { contextLogger := log.FromContext(ctx) - image, err := r.getConfiguredImage(ctx, cluster) + requestedImageInfo, err := r.getRequestedImageInfo(ctx, cluster) if err != nil { return &ctrl.Result{}, r.RegisterPhase(ctx, cluster, apiv1.PhaseImageCatalogError, err.Error()) } - currentDataImage := getCurrentPgDataImage(&cluster.Status) - // Case 1: the cluster is being initialized and there is still no // running image. In this case, we should simply apply the image selected by the user. - if currentDataImage == "" { + if cluster.Status.PGDataImageInfo == nil { return nil, status.PatchWithOptimisticLock( ctx, r.Client, cluster, - status.SetImage(image), - status.SetMajorVersionUpgradeFromImage(nil), + status.SetImage(requestedImageInfo.Image), + status.SetPGDataImageInfo(&requestedImageInfo), ) } // Case 2: there's a running image. The code checks if the user selected // an image of the same major version or if a change in the major // version has been requested. - currentVersion, err := version.FromTag(reference.New(currentDataImage).Tag) - if err != nil { - contextLogger.Error(err, "While parsing current major versions") - return nil, err + if requestedImageInfo.Image == cluster.Status.PGDataImageInfo.Image { + // The requested image is the same as the current one, no action needed + return nil, nil } - requestedVersion, err := version.FromTag(reference.New(image).Tag) - if err != nil { - contextLogger.Error(err, "While parsing requested major versions") - return nil, err - } + currentMajorVersion := cluster.Status.PGDataImageInfo.MajorVersion + requestedMajorVersion := requestedImageInfo.MajorVersion - var majorVersionUpgradeFromImage *string - switch { - case currentVersion.Major() < requestedVersion.Major(): - // The current major version is older than the requested one - majorVersionUpgradeFromImage = ¤tDataImage - case currentVersion.Major() == requestedVersion.Major(): - // The major versions are the same, cancel the update - majorVersionUpgradeFromImage = nil - default: + if currentMajorVersion > requestedMajorVersion { + // Major version downgrade requested. This is not allowed. contextLogger.Info( - "Cannot downgrade the PostgreSQL major version. Forcing the current image.", - "currentImage", currentDataImage, - "requestedImage", image) - image = currentDataImage + "Cannot downgrade the PostgreSQL major version. Forcing the current requestedImageInfo.", + "currentImage", cluster.Status.PGDataImageInfo.Image, + "requestedImage", requestedImageInfo) + return nil, fmt.Errorf("cannot downgrade the PostgreSQL major version from %d to %d", + currentMajorVersion, requestedMajorVersion) } + if currentMajorVersion < requestedMajorVersion { + // Major version upgrade requested + return nil, status.PatchWithOptimisticLock( + ctx, + r.Client, + cluster, + status.SetImage(requestedImageInfo.Image), + ) + } + + // The major versions are the same, but the images are different. + // This is a minor version upgrade/downgrade. return nil, status.PatchWithOptimisticLock( ctx, r.Client, cluster, - status.SetImage(image), - status.SetMajorVersionUpgradeFromImage(majorVersionUpgradeFromImage), - ) + status.SetImage(requestedImageInfo.Image), + status.SetPGDataImageInfo(&requestedImageInfo)) } -// getCurrentPgDataImage returns Postgres image that was able to run the cluster -// PGDATA correctly last time. -// This is important in the context of major upgrade because it contains the -// image with the "old" major version even when there are no Pods available. -func getCurrentPgDataImage(status *apiv1.ClusterStatus) string { - if status.MajorVersionUpgradeFromImage != nil { - return *status.MajorVersionUpgradeFromImage +func getImageInfoFromImage(image string) (apiv1.ImageInfo, error) { + // Parse the version from the tag + imageVersion, err := version.FromTag(reference.New(image).Tag) + if err != nil { + return apiv1.ImageInfo{}, fmt.Errorf("cannot parse version from image %s: %w", image, err) } - return status.Image + return apiv1.ImageInfo{ + Image: image, + MajorVersion: int(imageVersion.Major()), //nolint:gosec + }, nil } -func (r *ClusterReconciler) getConfiguredImage(ctx context.Context, cluster *apiv1.Cluster) (string, error) { +func (r *ClusterReconciler) getRequestedImageInfo( + ctx context.Context, cluster *apiv1.Cluster, +) (apiv1.ImageInfo, error) { contextLogger := log.FromContext(ctx) - // If ImageName is defined and different from the current image in the status, we update the status - if cluster.Spec.ImageName != "" { - return cluster.Spec.ImageName, nil - } - if cluster.Spec.ImageCatalogRef == nil { - return "", fmt.Errorf("ImageName is not defined and no catalog is referenced") + if cluster.Spec.ImageName != "" { + return getImageInfoFromImage(cluster.Spec.ImageName) + } + + return apiv1.ImageInfo{}, fmt.Errorf("ImageName is not defined and no catalog is referenced") } contextLogger = contextLogger.WithValues("catalogRef", cluster.Spec.ImageCatalogRef) @@ -142,13 +143,13 @@ func (r *ClusterReconciler) getConfiguredImage(ctx context.Context, cluster *api catalog = &apiv1.ImageCatalog{} default: contextLogger.Info("Unknown catalog kind") - return "", fmt.Errorf("invalid image catalog type") + return apiv1.ImageInfo{}, fmt.Errorf("invalid image catalog type") } apiGroup := cluster.Spec.ImageCatalogRef.APIGroup if apiGroup == nil || *apiGroup != apiv1.SchemeGroupVersion.Group { contextLogger.Info("Unknown catalog group") - return "", fmt.Errorf("invalid image catalog group") + return apiv1.ImageInfo{}, fmt.Errorf("invalid image catalog group") } // Get the referenced catalog @@ -159,10 +160,10 @@ func (r *ClusterReconciler) getConfiguredImage(ctx context.Context, cluster *api r.Recorder.Eventf(cluster, "Warning", "DiscoverImage", "Cannot get %v/%v", catalogKind, catalogName) contextLogger.Info("catalog not found", "catalogKind", catalogKind, "catalogName", catalogName) - return "", fmt.Errorf("catalog %s/%s not found", catalogKind, catalogName) + return apiv1.ImageInfo{}, fmt.Errorf("catalog %s/%s not found", catalogKind, catalogName) } - return "", err + return apiv1.ImageInfo{}, err } // Catalog found, we try to find the image for the major version @@ -178,10 +179,10 @@ func (r *ClusterReconciler) getConfiguredImage(ctx context.Context, cluster *api catalogName) contextLogger.Info("cannot find requested major version", "requestedMajorVersion", requestedMajorVersion) - return "", fmt.Errorf("selected major version is not available in the catalog") + return apiv1.ImageInfo{}, fmt.Errorf("selected major version is not available in the catalog") } - return catalogImage, nil + return apiv1.ImageInfo{Image: catalogImage, MajorVersion: requestedMajorVersion}, nil } func (r *ClusterReconciler) getClustersForImageCatalogsToClustersMapper( diff --git a/internal/controller/cluster_image_test.go b/internal/controller/cluster_image_test.go index 5e9644d579..11123505b2 100644 --- a/internal/controller/cluster_image_test.go +++ b/internal/controller/cluster_image_test.go @@ -25,7 +25,6 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" - "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client/fake" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" @@ -74,7 +73,8 @@ var _ = Describe("Cluster image detection", func() { Expect(result).To(BeNil()) Expect(cluster.Status.Image).To(Equal("postgres:15.2")) - Expect(cluster.Status.MajorVersionUpgradeFromImage).To(BeNil()) + Expect(cluster.Status.PGDataImageInfo.Image).To(Equal("postgres:15.2")) + Expect(cluster.Status.PGDataImageInfo.MajorVersion).To(Equal(15)) }) It("gets the image from an image catalog", func(ctx SpecContext) { @@ -118,7 +118,8 @@ var _ = Describe("Cluster image detection", func() { Expect(result).To(BeNil()) Expect(cluster.Status.Image).To(Equal("postgres:15.2")) - Expect(cluster.Status.MajorVersionUpgradeFromImage).To(BeNil()) + Expect(cluster.Status.PGDataImageInfo.Image).To(Equal("postgres:15.2")) + Expect(cluster.Status.PGDataImageInfo.MajorVersion).To(Equal(15)) }) It("gets the name from the image catalog, but the catalog is incomplete", func(ctx SpecContext) { @@ -176,13 +177,18 @@ var _ = Describe("Cluster image detection", func() { }, Status: apiv1.ClusterStatus{ Image: "postgres:16.2", + PGDataImageInfo: &apiv1.ImageInfo{ + Image: "postgres:16.2", + MajorVersion: 16, + }, }, } r := newFakeReconcilerFor(cluster, nil) result, err := r.reconcileImage(ctx, cluster) - Expect(err).Error().ShouldNot(HaveOccurred()) + Expect(err).Error().Should(HaveOccurred()) + Expect(err).Error().Should(MatchError("cannot downgrade the PostgreSQL major version from 16 to 15")) Expect(result).To(BeNil()) Expect(cluster.Status.Image).To(Equal("postgres:16.2")) @@ -199,6 +205,10 @@ var _ = Describe("Cluster image detection", func() { }, Status: apiv1.ClusterStatus{ Image: "postgres:16.2", + PGDataImageInfo: &apiv1.ImageInfo{ + Image: "postgres:16.2", + MajorVersion: 16, + }, }, } @@ -209,26 +219,7 @@ var _ = Describe("Cluster image detection", func() { Expect(result).To(BeNil()) Expect(cluster.Status.Image).To(Equal("postgres:17.2")) - Expect(cluster.Status.MajorVersionUpgradeFromImage).ToNot(BeNil()) - Expect(*cluster.Status.MajorVersionUpgradeFromImage).To(Equal("postgres:16.2")) - }) -}) - -var _ = Describe("Major version tracking with getCurrentPgDataImage", func() { - It("returns the current major version if no major version update has been requested", func() { - status := &apiv1.ClusterStatus{ - Image: "postgres:15.2", - } - - Expect(getCurrentPgDataImage(status)).To(Equal("postgres:15.2")) - }) - - It("returns the old major version if a major version update has been requested", func() { - status := &apiv1.ClusterStatus{ - Image: "postgres:15.2", - MajorVersionUpgradeFromImage: ptr.To("postgres:14.3"), - } - - Expect(getCurrentPgDataImage(status)).To(Equal("postgres:14.3")) + Expect(cluster.Status.PGDataImageInfo.Image).To(Equal("postgres:16.2")) + Expect(cluster.Status.PGDataImageInfo.MajorVersion).To(Equal(16)) }) }) diff --git a/internal/management/controller/instance_startup.go b/internal/management/controller/instance_startup.go index cd45a89373..bd170dcba3 100644 --- a/internal/management/controller/instance_startup.go +++ b/internal/management/controller/instance_startup.go @@ -33,7 +33,6 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/internal/controller" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/archiver" - "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/utils" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" ) @@ -111,11 +110,6 @@ func (r *InstanceReconciler) verifyPgDataCoherenceForPrimary(ctx context.Context return err } - pgVersion, err := utils.GetPgdataVersion(r.instance.PgData) - if err != nil { - return err - } - // Clean up any stale pid file before executing pg_rewind err = r.instance.CleanUpStalePid() if err != nil { @@ -136,7 +130,7 @@ func (r *InstanceReconciler) verifyPgDataCoherenceForPrimary(ctx context.Context return fmt.Errorf("while ensuring all WAL files are archived: %w", err) } - err = r.instance.Rewind(ctx, pgVersion) + err = r.instance.Rewind(ctx) if err != nil { return fmt.Errorf("while exucuting pg_rewind: %w", err) } diff --git a/internal/webhook/v1/cluster_webhook.go b/internal/webhook/v1/cluster_webhook.go index 6301dda225..0b235cf0ba 100644 --- a/internal/webhook/v1/cluster_webhook.go +++ b/internal/webhook/v1/cluster_webhook.go @@ -951,13 +951,13 @@ func (v *ClusterCustomValidator) validateConfiguration(r *apiv1.Cluster) field.E "Can't have both legacy synchronous replica configuration and new one")) } - pgVersion, err := r.GetPostgresqlVersion() + pgMajor, err := r.GetPostgresqlMajorVersion() if err != nil { // The validation error will be already raised by the // validateImageName function return result } - if pgVersion.Major() < 13 { + if pgMajor < 13 { result = append(result, field.Invalid( field.NewPath("spec", "imageName"), @@ -966,7 +966,7 @@ func (v *ClusterCustomValidator) validateConfiguration(r *apiv1.Cluster) field.E } info := postgres.ConfigurationInfo{ Settings: postgres.CnpgConfigurationSettings, - Version: pgVersion, + MajorVersion: pgMajor, UserSettings: r.Spec.PostgresConfiguration.Parameters, IsReplicaCluster: r.IsReplica(), IsWalArchivingDisabled: utils.IsWalArchivingDisabled(&r.ObjectMeta), @@ -1232,8 +1232,6 @@ func validateSyncReplicaElectionConstraint(constraints apiv1.SyncReplicaElection // to a new one. func (v *ClusterCustomValidator) validateImageChange(r, old *apiv1.Cluster) field.ErrorList { var result field.ErrorList - var newVersion, oldVersion version.Data - var err error var fieldPath *field.Path if r.Spec.ImageCatalogRef != nil { fieldPath = field.NewPath("spec", "imageCatalogRef", "major") @@ -1241,45 +1239,36 @@ func (v *ClusterCustomValidator) validateImageChange(r, old *apiv1.Cluster) fiel fieldPath = field.NewPath("spec", "imageName") } - newCluster := r.DeepCopy() - newCluster.Status.Image = "" - newVersion, err = newCluster.GetPostgresqlVersion() + newVersion, err := r.GetPostgresqlMajorVersion() if err != nil { // The validation error will be already raised by the // validateImageName function return result } - old = old.DeepCopy() - if old.Status.MajorVersionUpgradeFromImage != nil { - old.Status.Image = *old.Status.MajorVersionUpgradeFromImage - } - oldVersion, err = old.GetPostgresqlVersion() - if err != nil { - // The validation error will be already raised by the - // validateImageName function + if old.Status.PGDataImageInfo == nil { return result } + oldVersion := old.Status.PGDataImageInfo.MajorVersion - if oldVersion.Major() > newVersion.Major() { + if oldVersion > newVersion { result = append( result, field.Invalid( fieldPath, - fmt.Sprintf("%v", newVersion.Major()), - fmt.Sprintf("can't downgrade from majors %v to %v", - oldVersion.Major(), newVersion.Major()))) + strconv.Itoa(newVersion), + fmt.Sprintf("can't downgrade from major %v to %v", oldVersion, newVersion))) } // TODO: Upgrading to versions 14 and 15 would require carrying information around about the collation used. // See https://git.postgresql.org/gitweb/?p=postgresql.git;a=commitdiff;h=9637badd9. // This is not implemented yet, and users should not upgrade to old versions anyway, so we are blocking it. - if oldVersion.Major() < newVersion.Major() && newVersion.Major() < 16 { + if oldVersion < newVersion && newVersion < 16 { result = append( result, field.Invalid( fieldPath, - fmt.Sprintf("%v", newVersion.Major()), + strconv.Itoa(newVersion), "major upgrades are only supported to version 16 or higher")) } return result diff --git a/internal/webhook/v1/cluster_webhook_test.go b/internal/webhook/v1/cluster_webhook_test.go index c266615717..a652acedbc 100644 --- a/internal/webhook/v1/cluster_webhook_test.go +++ b/internal/webhook/v1/cluster_webhook_test.go @@ -1224,8 +1224,17 @@ var _ = Describe("validate image name change", func() { Context("using image name", func() { It("doesn't complain with no changes", func() { + defaultVersion, err := pgversion.FromTag(reference.New(versions.DefaultImageName).Tag) + Expect(err).ToNot(HaveOccurred()) clusterOld := &apiv1.Cluster{ Spec: apiv1.ClusterSpec{}, + Status: apiv1.ClusterStatus{ + Image: versions.DefaultImageName, + PGDataImageInfo: &apiv1.ImageInfo{ + Image: versions.DefaultImageName, + MajorVersion: int(defaultVersion.Major()), + }, + }, } clusterNew := &apiv1.Cluster{ Spec: apiv1.ClusterSpec{}, @@ -1238,6 +1247,13 @@ var _ = Describe("validate image name change", func() { Spec: apiv1.ClusterSpec{ ImageName: "postgres:17.0", }, + Status: apiv1.ClusterStatus{ + Image: "test", + PGDataImageInfo: &apiv1.ImageInfo{ + Image: "postgres:17.0", + MajorVersion: 17, + }, + }, } clusterNew := &apiv1.Cluster{ Spec: apiv1.ClusterSpec{ @@ -1252,6 +1268,13 @@ var _ = Describe("validate image name change", func() { Spec: apiv1.ClusterSpec{ ImageName: "postgres:17.1", }, + Status: apiv1.ClusterStatus{ + Image: "test", + PGDataImageInfo: &apiv1.ImageInfo{ + Image: "postgres:17.1", + MajorVersion: 17, + }, + }, } clusterNew := &apiv1.Cluster{ Spec: apiv1.ClusterSpec{ @@ -1273,6 +1296,13 @@ var _ = Describe("validate image name change", func() { Major: 16, }, }, + Status: apiv1.ClusterStatus{ + Image: "test", + PGDataImageInfo: &apiv1.ImageInfo{ + Image: "test", + MajorVersion: 16, + }, + }, } clusterNew := &apiv1.Cluster{ Spec: apiv1.ClusterSpec{ @@ -1294,6 +1324,13 @@ var _ = Describe("validate image name change", func() { Spec: apiv1.ClusterSpec{ ImageName: "postgres:16.1", }, + Status: apiv1.ClusterStatus{ + Image: "test", + PGDataImageInfo: &apiv1.ImageInfo{ + Image: "postgres:16.1", + MajorVersion: 16, + }, + }, } clusterNew := &apiv1.Cluster{ Spec: apiv1.ClusterSpec{ @@ -1313,6 +1350,13 @@ var _ = Describe("validate image name change", func() { Spec: apiv1.ClusterSpec{ ImageName: "postgres:17.1", }, + Status: apiv1.ClusterStatus{ + Image: "postgres:17.1", + PGDataImageInfo: &apiv1.ImageInfo{ + Image: "postgres:17.1", + MajorVersion: 17, + }, + }, } clusterNew := &apiv1.Cluster{ Spec: apiv1.ClusterSpec{ @@ -1328,8 +1372,17 @@ var _ = Describe("validate image name change", func() { Expect(v.validateImageChange(clusterNew, clusterOld)).To(HaveLen(1)) }) It("complains going from default imageName to different major imageCatalogRef", func() { + defaultVersion, err := pgversion.FromTag(reference.New(versions.DefaultImageName).Tag) + Expect(err).ToNot(HaveOccurred()) clusterOld := &apiv1.Cluster{ Spec: apiv1.ClusterSpec{}, + Status: apiv1.ClusterStatus{ + Image: versions.DefaultImageName, + PGDataImageInfo: &apiv1.ImageInfo{ + Image: versions.DefaultImageName, + MajorVersion: int(defaultVersion.Major()), + }, + }, } clusterNew := &apiv1.Cluster{ Spec: apiv1.ClusterSpec{ @@ -1378,6 +1431,13 @@ var _ = Describe("validate image name change", func() { Major: 17, }, }, + Status: apiv1.ClusterStatus{ + Image: "test", + PGDataImageInfo: &apiv1.ImageInfo{ + Image: "test", + MajorVersion: 17, + }, + }, } clusterNew := &apiv1.Cluster{ Spec: apiv1.ClusterSpec{ @@ -1397,6 +1457,13 @@ var _ = Describe("validate image name change", func() { Major: 17, }, }, + Status: apiv1.ClusterStatus{ + Image: "test", + PGDataImageInfo: &apiv1.ImageInfo{ + Image: "test", + MajorVersion: 17, + }, + }, } clusterNew := &apiv1.Cluster{ Spec: apiv1.ClusterSpec{ @@ -1416,6 +1483,13 @@ var _ = Describe("validate image name change", func() { Major: 18, }, }, + Status: apiv1.ClusterStatus{ + Image: "test", + PGDataImageInfo: &apiv1.ImageInfo{ + Image: "test", + MajorVersion: 18, + }, + }, } clusterNew := &apiv1.Cluster{ Spec: apiv1.ClusterSpec{}, @@ -1437,6 +1511,13 @@ var _ = Describe("validate image name change", func() { Major: int(version.Major()), }, }, + Status: apiv1.ClusterStatus{ + Image: "test", + PGDataImageInfo: &apiv1.ImageInfo{ + Image: "test", + MajorVersion: int(version.Major()), + }, + }, } clusterNew := &apiv1.Cluster{ Spec: apiv1.ClusterSpec{}, diff --git a/pkg/management/postgres/configuration.go b/pkg/management/postgres/configuration.go index 9ae0787382..a4abbdffc9 100644 --- a/pkg/management/postgres/configuration.go +++ b/pkg/management/postgres/configuration.go @@ -30,7 +30,6 @@ import ( "github.com/cloudnative-pg/machinery/pkg/fileutils" "github.com/cloudnative-pg/machinery/pkg/log" - "github.com/cloudnative-pg/machinery/pkg/postgres/version" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/configfile" @@ -70,12 +69,12 @@ func (instance *Instance) RefreshConfigurationFilesFromCluster( cluster *apiv1.Cluster, preserveUserSettings bool, ) (bool, error) { - pgVersion, err := postgresutils.GetPgdataVersion(instance.PgData) + pgMajor, err := postgresutils.GetMajorVersionFromPgData(instance.PgData) if err != nil { return false, err } - postgresConfiguration, sha256 := createPostgresqlConfiguration(cluster, preserveUserSettings, pgVersion.Major) + postgresConfiguration, sha256 := createPostgresqlConfiguration(cluster, preserveUserSettings, pgMajor) postgresConfigurationChanged, err := InstallPgDataFileContent( ctx, instance.PgData, @@ -93,7 +92,7 @@ func (instance *Instance) RefreshConfigurationFilesFromCluster( // GeneratePostgresqlHBA generates the pg_hba.conf content with the LDAP configuration if configured. func (instance *Instance) GeneratePostgresqlHBA(cluster *apiv1.Cluster, ldapBindPassword string) (string, error) { - version, err := cluster.GetPostgresqlVersion() + majorVersion, err := cluster.GetPostgresqlMajorVersion() if err != nil { return "", err } @@ -106,7 +105,7 @@ func (instance *Instance) GeneratePostgresqlHBA(cluster *apiv1.Cluster, ldapBind // See: // https://www.postgresql.org/docs/14/release-14.html defaultAuthenticationMethod := "scram-sha-256" - if version.Major() < 14 { + if majorVersion < 14 { defaultAuthenticationMethod = "md5" } @@ -382,11 +381,11 @@ func (instance *Instance) migratePostgresAutoConfFile(ctx context.Context) (chan func createPostgresqlConfiguration( cluster *apiv1.Cluster, preserveUserSettings bool, - majorVersion uint64, + majorVersion int, ) (string, string) { info := postgres.ConfigurationInfo{ Settings: postgres.CnpgConfigurationSettings, - Version: version.New(majorVersion, 0), + MajorVersion: majorVersion, UserSettings: cluster.Spec.PostgresConfiguration.Parameters, IncludingSharedPreloadLibraries: true, AdditionalSharedPreloadLibraries: cluster.Spec.PostgresConfiguration.AdditionalLibraries, diff --git a/pkg/management/postgres/configuration_test.go b/pkg/management/postgres/configuration_test.go index 7b1dc538ac..54bceec9da 100644 --- a/pkg/management/postgres/configuration_test.go +++ b/pkg/management/postgres/configuration_test.go @@ -124,6 +124,7 @@ var _ = Describe("testing the building of the ldap config string", func() { var _ = Describe("Test building of the list of temporary tablespaces", func() { defaultVersion, err := version.FromTag(reference.New(versions.DefaultImageName).Tag) Expect(err).ToNot(HaveOccurred()) + defaultMajor := int(defaultVersion.Major()) clusterWithoutTablespaces := apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ @@ -175,17 +176,17 @@ var _ = Describe("Test building of the list of temporary tablespaces", func() { } It("doesn't set temp_tablespaces if there are no declared tablespaces", func() { - config, _ := createPostgresqlConfiguration(&clusterWithoutTablespaces, true, defaultVersion.Major()) + config, _ := createPostgresqlConfiguration(&clusterWithoutTablespaces, true, defaultMajor) Expect(config).ToNot(ContainSubstring("temp_tablespaces")) }) It("doesn't set temp_tablespaces if there are no temporary tablespaces", func() { - config, _ := createPostgresqlConfiguration(&clusterWithoutTemporaryTablespaces, true, defaultVersion.Major()) + config, _ := createPostgresqlConfiguration(&clusterWithoutTemporaryTablespaces, true, defaultMajor) Expect(config).ToNot(ContainSubstring("temp_tablespaces")) }) It("sets temp_tablespaces when there are temporary tablespaces", func() { - config, _ := createPostgresqlConfiguration(&clusterWithTemporaryTablespaces, true, defaultVersion.Major()) + config, _ := createPostgresqlConfiguration(&clusterWithTemporaryTablespaces, true, defaultMajor) Expect(config).To(ContainSubstring("temp_tablespaces = 'other_temporary_tablespace,temporary_tablespace'")) }) }) @@ -193,6 +194,7 @@ var _ = Describe("Test building of the list of temporary tablespaces", func() { var _ = Describe("recovery_min_apply_delay", func() { defaultVersion, err := version.FromTag(reference.New(versions.DefaultImageName).Tag) Expect(err).ToNot(HaveOccurred()) + defaultMajor := int(defaultVersion.Major()) primaryCluster := apiv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ @@ -242,21 +244,21 @@ var _ = Describe("recovery_min_apply_delay", func() { It("do not set recovery_min_apply_delay in primary clusters", func() { Expect(primaryCluster.IsReplica()).To(BeFalse()) - config, _ := createPostgresqlConfiguration(&primaryCluster, true, defaultVersion.Major()) + config, _ := createPostgresqlConfiguration(&primaryCluster, true, defaultMajor) Expect(config).ToNot(ContainSubstring("recovery_min_apply_delay")) }) It("set recovery_min_apply_delay in replica clusters when set", func() { Expect(replicaCluster.IsReplica()).To(BeTrue()) - config, _ := createPostgresqlConfiguration(&replicaCluster, true, defaultVersion.Major()) + config, _ := createPostgresqlConfiguration(&replicaCluster, true, defaultMajor) Expect(config).To(ContainSubstring("recovery_min_apply_delay = '3600s'")) }) It("do not set recovery_min_apply_delay in replica clusters when not set", func() { Expect(replicaClusterWithNoDelay.IsReplica()).To(BeTrue()) - config, _ := createPostgresqlConfiguration(&replicaClusterWithNoDelay, true, defaultVersion.Major()) + config, _ := createPostgresqlConfiguration(&replicaClusterWithNoDelay, true, defaultMajor) Expect(config).ToNot(ContainSubstring("recovery_min_apply_delay")) }) }) diff --git a/pkg/management/postgres/instance.go b/pkg/management/postgres/instance.go index 4116862840..44e03b571a 100644 --- a/pkg/management/postgres/instance.go +++ b/pkg/management/postgres/instance.go @@ -1034,7 +1034,7 @@ func (instance *Instance) removePgControlFileBackup() error { // Rewind uses pg_rewind to align this data directory with the contents of the primary node. // If postgres major version is >= 13, add "--restore-target-wal" option -func (instance *Instance) Rewind(ctx context.Context, postgresVersion semver.Version) error { +func (instance *Instance) Rewind(ctx context.Context) error { contextLogger := log.FromContext(ctx) // Signal the liveness probe that we are running pg_rewind before starting postgres @@ -1052,17 +1052,13 @@ func (instance *Instance) Rewind(ctx context.Context, postgresVersion semver.Ver "--target-pgdata", instance.PgData, } - // As PostgreSQL 13 introduces support of restore from the WAL archive in pg_rewind, - // let’s automatically use it, if possible - if postgresVersion.Major >= 13 { - // make sure restore_command is set in override.conf - if _, err := configurePostgresOverrideConfFile(instance.PgData, primaryConnInfo, ""); err != nil { - return err - } - - options = append(options, "--restore-target-wal") + // make sure restore_command is set in override.conf + if _, err := configurePostgresOverrideConfFile(instance.PgData, primaryConnInfo, ""); err != nil { + return err } + options = append(options, "--restore-target-wal") + // Make sure PostgreSQL control file is not empty err := instance.managePgControlFileBackup() if err != nil { diff --git a/pkg/management/postgres/utils/version.go b/pkg/management/postgres/utils/version.go index 91b489f9d5..f7da31f517 100644 --- a/pkg/management/postgres/utils/version.go +++ b/pkg/management/postgres/utils/version.go @@ -53,18 +53,13 @@ func parseVersionNum(versionNum string) (*semver.Version, error) { }, nil } -// GetPgdataVersion read the PG_VERSION file in the data directory +// GetMajorVersionFromPgData read the PG_VERSION file in the data directory // returning the major version of the database -func GetPgdataVersion(pgData string) (semver.Version, error) { +func GetMajorVersionFromPgData(pgData string) (int, error) { content, err := os.ReadFile(path.Join(pgData, "PG_VERSION")) // #nosec if err != nil { - return semver.Version{}, err + return 0, err } - major, err := strconv.ParseUint(strings.TrimSpace(string(content)), 10, 64) - if err != nil { - return semver.Version{}, err - } - - return semver.Version{Major: major}, nil + return strconv.Atoi(strings.TrimSpace(string(content))) } diff --git a/pkg/postgres/configuration.go b/pkg/postgres/configuration.go index 936de3c1e0..3c577d6a98 100644 --- a/pkg/postgres/configuration.go +++ b/pkg/postgres/configuration.go @@ -28,8 +28,6 @@ import ( "strings" "text/template" "time" - - "github.com/cloudnative-pg/machinery/pkg/postgres/version" ) // WalLevelValue a value that is assigned to the 'wal_level' configuration field @@ -249,16 +247,13 @@ var hbaTemplate = template.Must(template.New("pg_hba.conf").Parse(hbaTemplateStr // identTemplate is the template used to create the HBA configuration var identTemplate = template.Must(template.New("pg_ident.conf").Parse(identTemplateString)) -// MajorVersionRangeUnlimited is used to represent an unbound limit in a MajorVersionRange -var MajorVersionRangeUnlimited = version.Data{} - -// VersionRange is used to represent a range of PostgreSQL versions -type VersionRange struct { - // The minimum limit of PostgreSQL major version, extreme included - Min version.Data +// MajorVersionRange represents a range of PostgreSQL major versions. +type MajorVersionRange struct { + // Min is the inclusive lower bound of the PostgreSQL major version range. + Min int - // The maximum limit of PostgreSQL version, extreme excluded, or MajorVersionRangeUnlimited - Max version.Data + // Max is the exclusive upper bound of the PostgreSQL major version range. + Max int } // SettingsCollection is a collection of PostgreSQL settings @@ -274,7 +269,7 @@ type ConfigurationSettings struct { // The following settings are like GlobalPostgresSettings // but are relative only to certain PostgreSQL versions - DefaultSettings map[VersionRange]SettingsCollection + DefaultSettings map[MajorVersionRange]SettingsCollection // The following settings are applied to the final PostgreSQL configuration, // even if the user specified something different @@ -294,7 +289,7 @@ type ConfigurationInfo struct { Settings ConfigurationSettings // The PostgreSQL version - Version version.Data + MajorVersion int // The list of user-level settings UserSettings map[string]string @@ -642,7 +637,7 @@ func CreatePostgresqlConfiguration(info ConfigurationInfo) *PgConfiguration { configuration.OverwriteConfig(key, value) } - if info.Version.Major() >= 17 { + if info.MajorVersion >= 17 { configuration.OverwriteConfig("allow_alter_system", info.getAlterSystemEnabledValue()) } } @@ -711,14 +706,9 @@ func setDefaultConfigurations(info ConfigurationInfo, configuration *PgConfigura // apply settings relative to a certain PostgreSQL version for constraints, settings := range info.Settings.DefaultSettings { - if constraints.Min == MajorVersionRangeUnlimited || - constraints.Min == info.Version || - constraints.Min.Less(info.Version) { - if constraints.Max == MajorVersionRangeUnlimited || - info.Version.Less(constraints.Max) { - for key, value := range settings { - configuration.OverwriteConfig(key, value) - } + if constraints.Min <= info.MajorVersion && info.MajorVersion < constraints.Max { + for key, value := range settings { + configuration.OverwriteConfig(key, value) } } } diff --git a/pkg/postgres/configuration_test.go b/pkg/postgres/configuration_test.go index 68adc286d3..d27c8becc1 100644 --- a/pkg/postgres/configuration_test.go +++ b/pkg/postgres/configuration_test.go @@ -23,8 +23,6 @@ import ( "strings" "time" - "github.com/cloudnative-pg/machinery/pkg/postgres/version" - . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) @@ -37,7 +35,7 @@ var _ = Describe("PostgreSQL configuration creation", func() { It("apply the default settings", func() { info := ConfigurationInfo{ Settings: CnpgConfigurationSettings, - Version: version.New(10, 0), + MajorVersion: 17, UserSettings: settings, IncludingMandatory: true, } @@ -48,8 +46,8 @@ var _ = Describe("PostgreSQL configuration creation", func() { It("enforce the mandatory values", func() { info := ConfigurationInfo{ - Settings: CnpgConfigurationSettings, - Version: version.New(10, 0), + Settings: CnpgConfigurationSettings, + MajorVersion: 17, UserSettings: map[string]string{ "hot_standby": "off", }, @@ -62,7 +60,7 @@ var _ = Describe("PostgreSQL configuration creation", func() { It("generate a config file", func() { info := ConfigurationInfo{ Settings: CnpgConfigurationSettings, - Version: version.New(10, 0), + MajorVersion: 17, UserSettings: settings, IncludingMandatory: true, } @@ -87,7 +85,7 @@ var _ = Describe("PostgreSQL configuration creation", func() { It("will use appropriate settings", func() { info := ConfigurationInfo{ Settings: CnpgConfigurationSettings, - Version: version.New(13, 0), + MajorVersion: 13, UserSettings: settings, IncludingMandatory: true, } @@ -102,7 +100,7 @@ var _ = Describe("PostgreSQL configuration creation", func() { It("will set archive_mode to always", func() { info := ConfigurationInfo{ Settings: CnpgConfigurationSettings, - Version: version.New(13, 0), + MajorVersion: 13, UserSettings: settings, IncludingMandatory: true, IsReplicaCluster: true, @@ -116,7 +114,7 @@ var _ = Describe("PostgreSQL configuration creation", func() { It("will set archive_mode to on", func() { info := ConfigurationInfo{ Settings: CnpgConfigurationSettings, - Version: version.New(13, 0), + MajorVersion: 13, UserSettings: settings, IncludingMandatory: true, IsReplicaCluster: false, @@ -129,7 +127,7 @@ var _ = Describe("PostgreSQL configuration creation", func() { It("adds shared_preload_library correctly", func() { info := ConfigurationInfo{ Settings: CnpgConfigurationSettings, - Version: version.New(13, 0), + MajorVersion: 13, IncludingMandatory: true, IncludingSharedPreloadLibraries: true, AdditionalSharedPreloadLibraries: []string{"some_library", "another_library", ""}, @@ -143,8 +141,8 @@ var _ = Describe("PostgreSQL configuration creation", func() { It("checks if PreserveFixedSettingsFromUser works properly", func() { info := ConfigurationInfo{ - Settings: CnpgConfigurationSettings, - Version: version.New(10, 0), + Settings: CnpgConfigurationSettings, + MajorVersion: 13, UserSettings: map[string]string{ "ssl": "off", "recovery_target_name": "test", @@ -187,7 +185,7 @@ var _ = Describe("PostgreSQL configuration creation", func() { It("can properly set allow_alter_system to on", func() { info := ConfigurationInfo{ IsAlterSystemEnabled: true, - Version: version.New(17, 0), + MajorVersion: 17, IncludingMandatory: true, } config := CreatePostgresqlConfiguration(info) @@ -197,7 +195,7 @@ var _ = Describe("PostgreSQL configuration creation", func() { It("can properly set allow_alter_system to off", func() { info := ConfigurationInfo{ IsAlterSystemEnabled: false, - Version: version.New(18, 0), + MajorVersion: 18, IncludingMandatory: true, } config := CreatePostgresqlConfiguration(info) @@ -209,7 +207,7 @@ var _ = Describe("PostgreSQL configuration creation", func() { It("should not set allow_alter_system", func() { info := ConfigurationInfo{ IsAlterSystemEnabled: false, - Version: version.New(14, 0), + MajorVersion: 14, IncludingMandatory: true, } config := CreatePostgresqlConfiguration(info) @@ -220,7 +218,7 @@ var _ = Describe("PostgreSQL configuration creation", func() { It("should not set allow_alter_system", func() { info := ConfigurationInfo{ IsAlterSystemEnabled: true, - Version: version.New(14, 0), + MajorVersion: 14, IncludingMandatory: true, } config := CreatePostgresqlConfiguration(info) @@ -299,7 +297,7 @@ var _ = Describe("pgaudit", func() { It("adds pgaudit to shared_preload_library", func() { info := ConfigurationInfo{ Settings: CnpgConfigurationSettings, - Version: version.New(13, 0), + MajorVersion: 13, UserSettings: map[string]string{"pgaudit.something": "something"}, IncludingSharedPreloadLibraries: true, IncludingMandatory: true, @@ -316,7 +314,7 @@ var _ = Describe("pgaudit", func() { It("adds pg_stat_statements to shared_preload_library", func() { info := ConfigurationInfo{ Settings: CnpgConfigurationSettings, - Version: version.New(13, 0), + MajorVersion: 13, UserSettings: map[string]string{"pg_stat_statements.something": "something"}, IncludingMandatory: true, IncludingSharedPreloadLibraries: true, @@ -332,8 +330,8 @@ var _ = Describe("pgaudit", func() { It("adds pg_stat_statements and pgaudit to shared_preload_library", func() { info := ConfigurationInfo{ - Settings: CnpgConfigurationSettings, - Version: version.New(13, 0), + Settings: CnpgConfigurationSettings, + MajorVersion: 13, UserSettings: map[string]string{ "pg_stat_statements.something": "something", "pgaudit.somethingelse": "somethingelse", @@ -353,7 +351,7 @@ var _ = Describe("pg_failover_slots", func() { It("adds pg_failover_slots to shared_preload_library", func() { info := ConfigurationInfo{ Settings: CnpgConfigurationSettings, - Version: version.New(13, 0), + MajorVersion: 13, UserSettings: map[string]string{"pg_failover_slots.something": "something"}, IncludingMandatory: true, IncludingSharedPreloadLibraries: true, @@ -370,7 +368,7 @@ var _ = Describe("recovery_min_apply_delay", func() { It("is not added when zero", func() { info := ConfigurationInfo{ Settings: CnpgConfigurationSettings, - Version: version.New(13, 0), + MajorVersion: 13, UserSettings: map[string]string{"pg_failover_slots.something": "something"}, IncludingMandatory: true, IncludingSharedPreloadLibraries: true, @@ -383,7 +381,7 @@ var _ = Describe("recovery_min_apply_delay", func() { It("is added to the configuration when specified", func() { info := ConfigurationInfo{ Settings: CnpgConfigurationSettings, - Version: version.New(13, 0), + MajorVersion: 13, UserSettings: map[string]string{"pg_failover_slots.something": "something"}, IncludingMandatory: true, IncludingSharedPreloadLibraries: true, diff --git a/pkg/reconciler/majorupgrade/job.go b/pkg/reconciler/majorupgrade/job.go index f72690ab37..0c1ec4811b 100644 --- a/pkg/reconciler/majorupgrade/job.go +++ b/pkg/reconciler/majorupgrade/job.go @@ -54,8 +54,6 @@ func getTargetImageFromMajorUpgradeJob(job *batchv1.Job) (string, bool) { // createMajorUpgradeJobDefinition creates a job to upgrade the primary node to a new Postgres major version func createMajorUpgradeJobDefinition(cluster *apiv1.Cluster, nodeSerial int) *batchv1.Job { - oldImage := *cluster.Status.MajorVersionUpgradeFromImage - prepareCommand := []string{ "/controller/manager", "instance", @@ -65,7 +63,7 @@ func createMajorUpgradeJobDefinition(cluster *apiv1.Cluster, nodeSerial int) *ba } oldVersionInitContainer := corev1.Container{ Name: "prepare", - Image: oldImage, + Image: cluster.Status.PGDataImageInfo.Image, ImagePullPolicy: cluster.Spec.ImagePullPolicy, Command: prepareCommand, VolumeMounts: specs.CreatePostgresVolumeMounts(*cluster), diff --git a/pkg/reconciler/majorupgrade/job_test.go b/pkg/reconciler/majorupgrade/job_test.go index 6f15aee5ee..329140d5c1 100644 --- a/pkg/reconciler/majorupgrade/job_test.go +++ b/pkg/reconciler/majorupgrade/job_test.go @@ -30,7 +30,10 @@ import ( ) var _ = Describe("Major upgrade Job generation", func() { - oldImageName := "postgres:16" + oldImageInfo := &apiv1.ImageInfo{ + Image: "postgres:16", + MajorVersion: 16, + } newImageName := "postgres:17" cluster := apiv1.Cluster{ @@ -41,8 +44,8 @@ var _ = Describe("Major upgrade Job generation", func() { }, }, Status: apiv1.ClusterStatus{ - Image: newImageName, - MajorVersionUpgradeFromImage: &oldImageName, + Image: newImageName, + PGDataImageInfo: oldImageInfo.DeepCopy(), }, } diff --git a/pkg/reconciler/majorupgrade/reconciler.go b/pkg/reconciler/majorupgrade/reconciler.go index ed150dc81b..b75e8c6f0f 100644 --- a/pkg/reconciler/majorupgrade/reconciler.go +++ b/pkg/reconciler/majorupgrade/reconciler.go @@ -64,15 +64,14 @@ func Reconcile( return majorVersionUpgradeHandleCompletion(ctx, c, cluster, majorUpgradeJob, pvcs) } - if cluster.Status.MajorVersionUpgradeFromImage == nil { - return nil, nil - } - - desiredVersion, err := cluster.GetPostgresqlVersion() + requestedMajor, err := cluster.GetPostgresqlMajorVersion() if err != nil { - contextLogger.Error(err, "Unable to retrieve the new PostgreSQL version") + contextLogger.Error(err, "Unable to retrieve the requested PostgreSQL version") return nil, err } + if requestedMajor <= cluster.Status.PGDataImageInfo.MajorVersion { + return nil, nil + } primaryNodeSerial, err := getPrimarySerial(pvcs) if err != nil || primaryNodeSerial == 0 { @@ -81,10 +80,10 @@ func Reconcile( } contextLogger.Info("Reconciling in-place major version upgrades", - "primaryNodeSerial", primaryNodeSerial, "desiredVersion", desiredVersion.Major()) + "primaryNodeSerial", primaryNodeSerial, "requestedMajor", requestedMajor) err = registerPhase(ctx, c, cluster, apiv1.PhaseMajorUpgrade, - fmt.Sprintf("Upgrading cluster to major version %v", desiredVersion.Major())) + fmt.Sprintf("Upgrading cluster to major version %v", requestedMajor)) if err != nil { return nil, err } @@ -232,11 +231,20 @@ func majorVersionUpgradeHandleCompletion( return nil, ErrIncoherentMajorUpgradeJob } + requestedMajor, err := cluster.GetPostgresqlMajorVersion() + if err != nil { + contextLogger.Error(err, "Unable to retrieve the requested PostgreSQL version") + return nil, err + } + if err := status.PatchWithOptimisticLock( ctx, c, cluster, - status.SetMajorVersionUpgradeFromImage(&jobImage), + status.SetPGDataImageInfo(&apiv1.ImageInfo{ + Image: jobImage, + MajorVersion: requestedMajor, + }), ); err != nil { contextLogger.Error(err, "Unable to update cluster status after major upgrade completed.") return nil, err diff --git a/pkg/reconciler/majorupgrade/reconciler_test.go b/pkg/reconciler/majorupgrade/reconciler_test.go index ea7cdfb81c..3a307575ab 100644 --- a/pkg/reconciler/majorupgrade/reconciler_test.go +++ b/pkg/reconciler/majorupgrade/reconciler_test.go @@ -65,6 +65,9 @@ var _ = Describe("Major upgrade job status reconciliation", func() { ObjectMeta: metav1.ObjectMeta{ Name: "cluster-example", }, + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:16", + }, } pvcs := []corev1.PersistentVolumeClaim{ buildPrimaryPVC(1), @@ -100,8 +103,8 @@ var _ = Describe("Major upgrade job status reconciliation", func() { } // the upgrade has been marked as done - Expect(cluster.Status.MajorVersionUpgradeFromImage).ToNot(BeNil()) - Expect(*cluster.Status.MajorVersionUpgradeFromImage).To(Equal("postgres:16")) + Expect(cluster.Status.PGDataImageInfo.Image).To(Equal("postgres:16")) + Expect(cluster.Status.PGDataImageInfo.MajorVersion).To(Equal(16)) // the job has been deleted var tempJob batchv1.Job diff --git a/pkg/resources/status/transactions.go b/pkg/resources/status/transactions.go index 4c9f678f28..d758ee3477 100644 --- a/pkg/resources/status/transactions.go +++ b/pkg/resources/status/transactions.go @@ -70,10 +70,9 @@ func SetImage(image string) Transaction { } } -// SetMajorVersionUpgradeFromImage is a transaction that sets the cluster as upgrading to a newer major version -// starting from the provided image -func SetMajorVersionUpgradeFromImage(image *string) Transaction { +// SetPGDataImageInfo is a transaction that sets the PGDataImageInfo +func SetPGDataImageInfo(imageInfo *apiv1.ImageInfo) Transaction { return func(cluster *apiv1.Cluster) { - cluster.Status.MajorVersionUpgradeFromImage = image + cluster.Status.PGDataImageInfo = imageInfo } } From 68260d2694d9aa5827af4b77b8821d1e6b7c2c92 Mon Sep 17 00:00:00 2001 From: Zach Stone Date: Mon, 5 May 2025 06:03:06 -0400 Subject: [PATCH 552/836] docs: add Giant Swarm to ADOPTERS.md (#7471) :wave: CloudNativePG is a really great project! We'd be happy to be listed as an adopter Signed-off-by: Zach Stone --- ADOPTERS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/ADOPTERS.md b/ADOPTERS.md index a9a556254c..b2c4ad1816 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -62,3 +62,4 @@ This list is sorted in chronological order, based on the submission date. | [Obmondo](https://obmondo.com) | @Obmondo | 2024-11-25 | At Obmondo we use CloudNativePG in our open-source Kubernetes meta-management platform called [KubeAid](https://kubeaid.io/) to easily manage all PostgreSQL databases across clusters from a centralized interface. | | [Mirakl](https://www.mirakl.com/) | @ThomasBoussekey | 2025-02-03 | CloudNativePG is our default hosting solution for marketplace instances. With over 300 CloudNativePG clusters managing 8 TB of data, we have developed highly customizable Helm charts that support connection pooling, logical replication, and many other advanced features. | | [Bitnami](https://bitnami.com) | [@carrodher](https://github.com/carrodher) | 2025-03-04 | Bitnami provides CloudNativePG as part of its open-source [Helm charts catalog](https://github.com/bitnami/charts), enabling users to easily deploy PostgreSQL clusters on Kubernetes. Additionally, CloudNativePG is available through [Tanzu Application Catalog](https://www.vmware.com/products/app-platform/tanzu-application-catalog) and [Bitnami Premium](https://www.arrow.com/globalecs/na/vendors/bitnami-premium/), where customers can benefit from advanced security and compliance features such as VEX, SBOM, SLSA3, and CVE scanning. | +| [Giant Swarm](https://www.giantswarm.io/) | [@stone-z](https://github.com/stone-z) | 2025-05-02 | Giant Swarm's full-service Kubernetes security and observability platforms are powered by PostgreSQL clusters delightfully managed with CloudNativePG. | From 1c9cec9538066d30d7c0466e05ddc43c184f8e19 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Tue, 6 May 2025 10:07:25 +0200 Subject: [PATCH 553/836] refactor: centralize `pgControlData` access logic (#7377) Signed-off-by: Armando Ruocco --- .../manager/instance/upgrade/execute/cmd.go | 23 ++-- .../management/controller/instance_token.go | 6 +- pkg/management/postgres/instance.go | 11 +- pkg/management/postgres/webserver/remote.go | 2 +- .../backup/volumesnapshot/reconciler.go | 4 +- .../replicaclusterswitch/shutdown_wal.go | 8 +- pkg/utils/parser.go | 118 ++++++++++++++---- pkg/utils/parser_test.go | 6 +- 8 files changed, 117 insertions(+), 61 deletions(-) diff --git a/internal/cmd/manager/instance/upgrade/execute/cmd.go b/internal/cmd/manager/instance/upgrade/execute/cmd.go index f85170ad2b..97376d66b9 100644 --- a/internal/cmd/manager/instance/upgrade/execute/cmd.go +++ b/internal/cmd/manager/instance/upgrade/execute/cmd.go @@ -318,15 +318,14 @@ func runInitDB(destDir string, walDir *string, pgControlData map[string]string, return nil } -// TODO: refactor it should be a method of pgControlData func tryAddDataChecksums( - pgControlData map[string]string, + pgControlData utils.PgControlData, targetMajorVersion int, options []string, ) ([]string, error) { - dataPageChecksumVersion, ok := pgControlData[utils.PgControlDataDataPageChecksumVersion] - if !ok { - return nil, fmt.Errorf("no '%s' section into pg_controldata output", utils.PgControlDataDataPageChecksumVersion) + dataPageChecksumVersion, err := pgControlData.GetDataPageChecksumVersion() + if err != nil { + return nil, err } if dataPageChecksumVersion != "1" { @@ -340,18 +339,10 @@ func tryAddDataChecksums( return append(options, "--data-checksums"), nil } -// TODO: refactor it should be a method of pgControlData -func tryAddWalSegmentSize(pgControlData map[string]string, options []string) ([]string, error) { - walSegmentSizeString, ok := pgControlData[utils.PgControlDataBytesPerWALSegment] - if !ok { - return nil, fmt.Errorf("no '%s' section into pg_controldata output", utils.PgControlDataBytesPerWALSegment) - } - - walSegmentSize, err := strconv.Atoi(walSegmentSizeString) +func tryAddWalSegmentSize(pgControlData utils.PgControlData, options []string) ([]string, error) { + walSegmentSize, err := pgControlData.GetBytesPerWALSegment() if err != nil { - return nil, fmt.Errorf( - "wrong '%s' pg_controldata value (not an integer): '%s' %w", - utils.PgControlDataBytesPerWALSegment, walSegmentSizeString, err) + return nil, fmt.Errorf("error while reading the WAL segment size: %w", err) } param := "--wal-segsize=" + strconv.Itoa(walSegmentSize/(1024*1024)) diff --git a/internal/management/controller/instance_token.go b/internal/management/controller/instance_token.go index 564e62a7aa..dbc94e90ed 100644 --- a/internal/management/controller/instance_token.go +++ b/internal/management/controller/instance_token.go @@ -75,9 +75,9 @@ func (r *InstanceReconciler) verifyPromotionToken(cluster *apiv1.Cluster) error } parsedControlData := utils.ParsePgControldataOutput(out) - currentTimelineIDString := parsedControlData[utils.PgControlDataKeyLatestCheckpointTimelineID] - currentSystemIdentifier := parsedControlData[utils.PgControlDataKeyDatabaseSystemIdentifier] - replayLSNString := parsedControlData[utils.PgControlDataKeyLatestCheckpointREDOLocation] + currentTimelineIDString := parsedControlData.GetLatestCheckpointTimelineID() + currentSystemIdentifier := parsedControlData.GetDatabaseSystemIdentifier() + replayLSNString := parsedControlData.GetLatestCheckpointREDOLocation() return promotiontoken.ValidateAgainstInstanceStatus(promotionToken, currentSystemIdentifier, currentTimelineIDString, replayLSNString) diff --git a/pkg/management/postgres/instance.go b/pkg/management/postgres/instance.go index 44e03b571a..4698b4e438 100644 --- a/pkg/management/postgres/instance.go +++ b/pkg/management/postgres/instance.go @@ -294,16 +294,9 @@ func (instance *Instance) CheckHasDiskSpaceForWAL(ctx context.Context) (bool, er } pgControlData := utils.ParsePgControldataOutput(pgControlDataString) - walSegmentSizeString, ok := pgControlData[utils.PgControlDataBytesPerWALSegment] - if !ok { - return false, fmt.Errorf("no 'Bytes per WAL segment' section into pg_controldata output") - } - - walSegmentSize, err := strconv.Atoi(walSegmentSizeString) + walSegmentSize, err := pgControlData.GetBytesPerWALSegment() if err != nil { - return false, fmt.Errorf( - "wrong 'Bytes per WAL segment' pg_controldata value (not an integer): '%s' %w", - walSegmentSizeString, err) + return false, err } walDirectory := path.Join(instance.PgData, pgWalDirectory) diff --git a/pkg/management/postgres/webserver/remote.go b/pkg/management/postgres/webserver/remote.go index d9c6d545dd..cf6656e93e 100644 --- a/pkg/management/postgres/webserver/remote.go +++ b/pkg/management/postgres/webserver/remote.go @@ -484,7 +484,7 @@ func (ws *remoteWebserverEndpoints) pgArchivePartial(w http.ResponseWriter, req } data := utils.ParsePgControldataOutput(out) - walFile := data[utils.PgControlDataKeyREDOWALFile] + walFile := data.GetREDOWALFile() if walFile == "" { sendBadRequestJSONResponse(w, "COULD_NOT_PARSE_REDOWAL_FILE", "") return diff --git a/pkg/reconciler/backup/volumesnapshot/reconciler.go b/pkg/reconciler/backup/volumesnapshot/reconciler.go index 47ffb826a4..7900e03359 100644 --- a/pkg/reconciler/backup/volumesnapshot/reconciler.go +++ b/pkg/reconciler/backup/volumesnapshot/reconciler.go @@ -99,11 +99,11 @@ func (se *Reconciler) enrichSnapshot( if data, err := se.instanceStatusClient.GetPgControlDataFromInstance(ctx, targetPod); err == nil { vs.Annotations[utils.PgControldataAnnotationName] = data pgControlData := utils.ParsePgControldataOutput(data) - timelineID, ok := pgControlData[utils.PgControlDataKeyLatestCheckpointTimelineID] + timelineID, ok := pgControlData.TryGetLatestCheckpointTimelineID() if ok { vs.Labels[utils.BackupTimelineLabelName] = timelineID } - startWal, ok := pgControlData[utils.PgControlDataKeyREDOWALFile] + startWal, ok := pgControlData.TryGetREDOWALFile() if ok { vs.Annotations[utils.BackupStartWALAnnotationName] = startWal // TODO: once we have online volumesnapshot backups, this should change diff --git a/pkg/reconciler/replicaclusterswitch/shutdown_wal.go b/pkg/reconciler/replicaclusterswitch/shutdown_wal.go index 1a04d63280..bfa948aff8 100644 --- a/pkg/reconciler/replicaclusterswitch/shutdown_wal.go +++ b/pkg/reconciler/replicaclusterswitch/shutdown_wal.go @@ -70,7 +70,7 @@ func generateDemotionToken( return "", fmt.Errorf("could not get pg_controldata from Pod %s: %w", primaryInstance.Pod.Name, err) } parsed := utils.ParsePgControldataOutput(rawPgControlData) - pgDataState := parsed[utils.PgControlDataDatabaseClusterStateKey] + pgDataState := parsed.GetDatabaseClusterState() if !utils.PgDataState(pgDataState).IsShutdown(ctx) { // PostgreSQL is still not shut down, waiting @@ -78,7 +78,7 @@ func generateDemotionToken( return "", errPostgresNotShutDown } - token, err := utils.CreatePromotionToken(parsed) + token, err := parsed.CreatePromotionToken() if err != nil { return "", err } @@ -92,9 +92,9 @@ func generateDemotionToken( return "", fmt.Errorf("could not archive shutdown checkpoint wal file: %w", err) } - if parsed[utils.PgControlDataKeyREDOWALFile] != partialArchiveWALName { + if parsed.GetREDOWALFile() != partialArchiveWALName { return "", fmt.Errorf("unexpected shutdown checkpoint wal file archived, expected: %s, got: %s", - parsed[utils.PgControlDataKeyREDOWALFile], + parsed.GetREDOWALFile(), partialArchiveWALName, ) } diff --git a/pkg/utils/parser.go b/pkg/utils/parser.go index f039ebcb57..3cba855094 100644 --- a/pkg/utils/parser.go +++ b/pkg/utils/parser.go @@ -24,6 +24,7 @@ import ( "encoding/base64" "encoding/json" "fmt" + "strconv" "strings" "github.com/cloudnative-pg/machinery/pkg/log" @@ -34,37 +35,108 @@ import ( type pgControlDataKey = string const ( - // PgControlDataKeyLatestCheckpointTimelineID is the + // pgControlDataKeyLatestCheckpointTimelineID is the // latest checkpoint's TimeLineID pg_controldata entry - PgControlDataKeyLatestCheckpointTimelineID pgControlDataKey = "Latest checkpoint's TimeLineID" + pgControlDataKeyLatestCheckpointTimelineID pgControlDataKey = "Latest checkpoint's TimeLineID" - // PgControlDataKeyREDOWALFile is the latest checkpoint's + // pgControlDataKeyREDOWALFile is the latest checkpoint's // REDO WAL file pg_controldata entry - PgControlDataKeyREDOWALFile pgControlDataKey = "Latest checkpoint's REDO WAL file" + pgControlDataKeyREDOWALFile pgControlDataKey = "Latest checkpoint's REDO WAL file" - // PgControlDataKeyDatabaseSystemIdentifier is the database + // pgControlDataKeyDatabaseSystemIdentifier is the database // system identifier pg_controldata entry - PgControlDataKeyDatabaseSystemIdentifier pgControlDataKey = "Database system identifier" + pgControlDataKeyDatabaseSystemIdentifier pgControlDataKey = "Database system identifier" - // PgControlDataKeyLatestCheckpointREDOLocation is the latest + // pgControlDataKeyLatestCheckpointREDOLocation is the latest // checkpoint's REDO location pg_controldata entry - PgControlDataKeyLatestCheckpointREDOLocation pgControlDataKey = "Latest checkpoint's REDO location" + pgControlDataKeyLatestCheckpointREDOLocation pgControlDataKey = "Latest checkpoint's REDO location" - // PgControlDataKeyTimeOfLatestCheckpoint is the time + // pgControlDataKeyTimeOfLatestCheckpoint is the time // of latest checkpoint pg_controldata entry - PgControlDataKeyTimeOfLatestCheckpoint pgControlDataKey = "Time of latest checkpoint" + pgControlDataKeyTimeOfLatestCheckpoint pgControlDataKey = "Time of latest checkpoint" - // PgControlDataDatabaseClusterStateKey is the status + // pgControlDataDatabaseClusterStateKey is the status // of the latest primary that run on this data directory. - PgControlDataDatabaseClusterStateKey pgControlDataKey = "Database cluster state" + pgControlDataDatabaseClusterStateKey pgControlDataKey = "Database cluster state" - // PgControlDataDataPageChecksumVersion reports whether the checksums are enabled in the cluster - PgControlDataDataPageChecksumVersion pgControlDataKey = "Data page checksum version" + // pgControlDataDataPageChecksumVersion reports whether the checksums are enabled in the cluster + pgControlDataDataPageChecksumVersion pgControlDataKey = "Data page checksum version" - // PgControlDataBytesPerWALSegment reports the size of the WAL segments - PgControlDataBytesPerWALSegment pgControlDataKey = "Bytes per WAL segment" + // pgControlDataBytesPerWALSegment reports the size of the WAL segments + pgControlDataBytesPerWALSegment pgControlDataKey = "Bytes per WAL segment" ) +// PgControlData represents the parsed output of pg_controldata +type PgControlData map[pgControlDataKey]string + +// GetLatestCheckpointTimelineID returns the latest checkpoint's TimeLineID +func (p PgControlData) GetLatestCheckpointTimelineID() string { + return p[pgControlDataKeyLatestCheckpointTimelineID] +} + +// TryGetLatestCheckpointTimelineID returns the latest checkpoint's TimeLineID +func (p PgControlData) TryGetLatestCheckpointTimelineID() (string, bool) { + v, ok := p[pgControlDataKeyLatestCheckpointTimelineID] + return v, ok +} + +// GetREDOWALFile returns the latest checkpoint's REDO WAL file +func (p PgControlData) GetREDOWALFile() string { + return p[pgControlDataKeyREDOWALFile] +} + +// TryGetREDOWALFile returns the latest checkpoint's REDO WAL file +func (p PgControlData) TryGetREDOWALFile() (string, bool) { + v, ok := p[pgControlDataKeyREDOWALFile] + return v, ok +} + +// GetDatabaseSystemIdentifier returns the database system identifier +func (p PgControlData) GetDatabaseSystemIdentifier() string { + return p[pgControlDataKeyDatabaseSystemIdentifier] +} + +// GetLatestCheckpointREDOLocation returns the latest checkpoint's REDO location +func (p PgControlData) GetLatestCheckpointREDOLocation() string { + return p[pgControlDataKeyLatestCheckpointREDOLocation] +} + +// GetTimeOfLatestCheckpoint returns the time of latest checkpoint +func (p PgControlData) GetTimeOfLatestCheckpoint() string { + return p[pgControlDataKeyTimeOfLatestCheckpoint] +} + +// GetDatabaseClusterState returns the status of the latest primary that ran on this data directory +func (p PgControlData) GetDatabaseClusterState() string { + return p[pgControlDataDatabaseClusterStateKey] +} + +// GetDataPageChecksumVersion returns whether the checksums are enabled in the cluster +func (p PgControlData) GetDataPageChecksumVersion() (string, error) { + value, ok := p[pgControlDataDataPageChecksumVersion] + if !ok { + return "", fmt.Errorf("no '%s' section in pg_controldata output", pgControlDataDataPageChecksumVersion) + } + return value, nil +} + +// GetBytesPerWALSegment returns the size of the WAL segments +func (p PgControlData) GetBytesPerWALSegment() (int, error) { + value, ok := p[pgControlDataBytesPerWALSegment] + if !ok { + return 0, fmt.Errorf("no '%s' section in pg_controldata output", pgControlDataBytesPerWALSegment) + } + + walSegmentSize, err := strconv.Atoi(value) + if err != nil { + return 0, fmt.Errorf( + "wrong '%s' pg_controldata value (not an integer): '%s' %w", + pgControlDataBytesPerWALSegment, value, err) + } + + return walSegmentSize, nil +} + // PgDataState represents the "Database cluster state" field of pg_controldata type PgDataState string @@ -88,7 +160,7 @@ func (state PgDataState) IsShutdown(ctx context.Context) bool { } // ParsePgControldataOutput parses a pg_controldata output into a map of key-value pairs -func ParsePgControldataOutput(data string) map[pgControlDataKey]string { +func ParsePgControldataOutput(data string) PgControlData { pairs := make(map[string]string) lines := strings.Split(data, "\n") for _, line := range lines { @@ -236,13 +308,13 @@ var ( ) // CreatePromotionToken translates a parsed pgControlData into a JSON token -func CreatePromotionToken(pgDataMap map[string]string) (string, error) { +func (p PgControlData) CreatePromotionToken() (string, error) { content := PgControldataTokenContent{ - LatestCheckpointTimelineID: pgDataMap[PgControlDataKeyLatestCheckpointTimelineID], - REDOWALFile: pgDataMap[PgControlDataKeyREDOWALFile], - DatabaseSystemIdentifier: pgDataMap[PgControlDataKeyDatabaseSystemIdentifier], - LatestCheckpointREDOLocation: pgDataMap[PgControlDataKeyLatestCheckpointREDOLocation], - TimeOfLatestCheckpoint: pgDataMap[PgControlDataKeyTimeOfLatestCheckpoint], + LatestCheckpointTimelineID: p.GetLatestCheckpointTimelineID(), + REDOWALFile: p.GetREDOWALFile(), + DatabaseSystemIdentifier: p.GetDatabaseSystemIdentifier(), + LatestCheckpointREDOLocation: p.GetLatestCheckpointREDOLocation(), + TimeOfLatestCheckpoint: p.GetTimeOfLatestCheckpoint(), OperatorVersion: versions.Info.Version, } diff --git a/pkg/utils/parser_test.go b/pkg/utils/parser_test.go index 9e34831005..f550566ea9 100644 --- a/pkg/utils/parser_test.go +++ b/pkg/utils/parser_test.go @@ -109,7 +109,7 @@ var _ = Describe("promotion token creation", func() { return err } - token, err := CreatePromotionToken(parsedControlData) + token, err := parsedControlData.CreatePromotionToken() Expect(err).ToNot(HaveOccurred()) Expect(token).ToNot(BeEmpty()) Expect(decodeBase64(token)).To(Succeed()) @@ -120,7 +120,7 @@ var _ = Describe("promotion token parser", func() { It("parses a newly generated promotion token", func() { parsedControlData := ParsePgControldataOutput(fakeControlData) - token, err := CreatePromotionToken(parsedControlData) + token, err := parsedControlData.CreatePromotionToken() Expect(err).ToNot(HaveOccurred()) tokenContent, err := ParsePgControldataToken(token) @@ -155,7 +155,7 @@ var _ = Describe("promotion token validation", func() { It("validates a newly generated promotion token", func() { parsedControlData := ParsePgControldataOutput(fakeControlData) - token, err := CreatePromotionToken(parsedControlData) + token, err := parsedControlData.CreatePromotionToken() Expect(err).ToNot(HaveOccurred()) tokenContent, err := ParsePgControldataToken(token) From 9cd40c117ebae964b7205339afab56efd491a685 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Tue, 6 May 2025 10:27:34 +0200 Subject: [PATCH 554/836] ci(security): implement least privilege for GitHub Workflows (#7254) We should implement the least privilege principle for all the workflows in our repo following the recommendations from the OSSF Closes #7252 --------- Signed-off-by: Jonathan Gonzalez V. Signed-off-by: Jonathan Battiato Signed-off-by: Francesco Canovai Co-authored-by: Jonathan Battiato Co-authored-by: Francesco Canovai --- .github/workflows/backport.yml | 6 ++++++ .github/workflows/chatops.yml | 2 ++ .github/workflows/close-inactive-issues.yml | 3 ++- .github/workflows/codeql-analysis.yml | 6 +++++- .github/workflows/continuous-delivery.yml | 7 +++++++ .github/workflows/continuous-integration.yml | 7 +++++++ .github/workflows/k8s-versions-check.yml | 5 +---- .../workflows/latest-postgres-version-check.yml | 2 ++ .github/workflows/pr_verify_linked_issue.yml | 4 ++-- .github/workflows/refresh-licenses.yml | 2 ++ .github/workflows/registry-clean.yml | 2 ++ .github/workflows/release-pr.yml | 4 ++++ .github/workflows/release-publish.yml | 16 +++++++++++----- .github/workflows/release-tag.yml | 2 ++ .github/workflows/require-labels.yml | 5 ++--- .github/workflows/snyk.yml | 4 ++++ .github/workflows/spellcheck.yml | 2 ++ .github/workflows/sync-api.yml | 2 ++ 18 files changed, 65 insertions(+), 16 deletions(-) diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 3ea607af9c..cbcefd2ed1 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -8,6 +8,8 @@ on: branches: - main +permissions: read-all + env: GOLANG_VERSION: "1.24.x" @@ -21,6 +23,8 @@ jobs: !contains(github.event.pull_request.labels.*.name, 'backport-requested') && !contains(github.event.pull_request.labels.*.name, 'do not backport') runs-on: ubuntu-24.04 + permissions: + pull-requests: write steps: - name: Label the pull request @@ -142,6 +146,8 @@ jobs: PR: ${{ github.event.pull_request.number }} COMMIT: ${{ needs.back-porting-pr.outputs.commit }} runs-on: ubuntu-24.04 + permissions: + issues: write steps: - name: create ticket uses: dacbd/create-issue-action@cdb57ab6ff8862aa09fee2be6ba77a59581921c2 # v2 diff --git a/.github/workflows/chatops.yml b/.github/workflows/chatops.yml index 4e16d70498..0da6dd096e 100644 --- a/.github/workflows/chatops.yml +++ b/.github/workflows/chatops.yml @@ -9,6 +9,8 @@ on: issue_comment: types: [created] +permissions: read-all + jobs: ok-to-merge: if: | diff --git a/.github/workflows/close-inactive-issues.yml b/.github/workflows/close-inactive-issues.yml index ce0a2b4a1d..2e9057f3f9 100644 --- a/.github/workflows/close-inactive-issues.yml +++ b/.github/workflows/close-inactive-issues.yml @@ -5,12 +5,13 @@ on: schedule: - cron: "30 1 * * *" +permissions: read-all + jobs: close-issues: runs-on: ubuntu-latest permissions: issues: write - #pull-requests: write steps: - uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9 with: diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 65c187afe4..100695f532 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -30,6 +30,8 @@ on: schedule: - cron: '24 0 * * 5' +permissions: read-all + # set up environment variables to be used across all the jobs env: GOLANG_VERSION: "1.24.x" @@ -38,6 +40,9 @@ jobs: duplicate_runs: runs-on: ubuntu-24.04 name: Skip duplicate runs + permissions: + actions: write + contents: read continue-on-error: true outputs: should_skip: ${{ steps.skip_check.outputs.should_skip == 'true' && github.ref != 'refs/heads/main' }} @@ -58,7 +63,6 @@ jobs: if: | needs.duplicate_runs.outputs.should_skip != 'true' permissions: - actions: read contents: read security-events: write diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index b447bc08ec..f8fda00f3f 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -32,6 +32,8 @@ on: schedule: - cron: '0 1 * * *' +permissions: read-all + # set up environment variables to be used across all the jobs env: GOLANG_VERSION: "1.24.x" @@ -65,6 +67,8 @@ jobs: # Note: this is a workaround since we can't directly schedule-run a workflow from a non default branch smoke_test_release_branches: runs-on: ubuntu-24.04 + permissions: + actions: write name: smoke test release-* branches when it's a scheduled run if: github.event_name == 'schedule' strategy: @@ -86,6 +90,9 @@ jobs: github.event.issue.pull_request && startsWith(github.event.comment.body, '/test') name: Retrieve command + permissions: + pull-requests: write + contents: read runs-on: ubuntu-24.04 outputs: github_ref: ${{ steps.refs.outputs.head_sha }} diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 39a7250c5c..93327eace2 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -14,6 +14,8 @@ on: schedule: - cron: '0 1 * * *' +permissions: read-all + # set up environment variables to be used across all the jobs env: GOLANG_VERSION: "1.24.x" @@ -48,6 +50,8 @@ jobs: # Note: this is a workaround since we can't directly schedule-run a workflow from a non default branch smoke_test_release_branches: runs-on: ubuntu-24.04 + permissions: + actions: write name: smoke test release-* branches when it's a scheduled run if: github.event_name == 'schedule' strategy: @@ -68,6 +72,9 @@ jobs: duplicate_runs: runs-on: ubuntu-24.04 name: Skip duplicate runs + permissions: + actions: write + contents: read continue-on-error: true outputs: should_skip: ${{ steps.skip_check.outputs.should_skip == 'true' && github.ref != 'refs/heads/main' }} diff --git a/.github/workflows/k8s-versions-check.yml b/.github/workflows/k8s-versions-check.yml index d3e2e6b68c..d45406ad5a 100644 --- a/.github/workflows/k8s-versions-check.yml +++ b/.github/workflows/k8s-versions-check.yml @@ -16,10 +16,7 @@ on: description: 'Limit to the specified engines list (eks, aks, gke, kind, ocp)' required: false -permissions: - contents: write - pull-requests: write - issues: read +permissions: read-all defaults: run: diff --git a/.github/workflows/latest-postgres-version-check.yml b/.github/workflows/latest-postgres-version-check.yml index d08ffa2e6e..1d1d0e7f3d 100644 --- a/.github/workflows/latest-postgres-version-check.yml +++ b/.github/workflows/latest-postgres-version-check.yml @@ -8,6 +8,8 @@ on: - cron: "30 0 * * *" workflow_dispatch: +permissions: read-all + defaults: run: shell: "bash -Eeuo pipefail -x {0}" diff --git a/.github/workflows/pr_verify_linked_issue.yml b/.github/workflows/pr_verify_linked_issue.yml index fb724912dd..a0a32f4e33 100644 --- a/.github/workflows/pr_verify_linked_issue.yml +++ b/.github/workflows/pr_verify_linked_issue.yml @@ -15,6 +15,8 @@ on: - labeled - unlabeled +permissions: read-all + jobs: verify_linked_issue: runs-on: ubuntu-latest @@ -25,5 +27,3 @@ jobs: uses: hattan/verify-linked-issue-action@2d8e2e47a462cc7b07ba5e6cab6f9d57bd36672e # v1.1.5 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - diff --git a/.github/workflows/refresh-licenses.yml b/.github/workflows/refresh-licenses.yml index 8979611ec4..56c140520c 100644 --- a/.github/workflows/refresh-licenses.yml +++ b/.github/workflows/refresh-licenses.yml @@ -6,6 +6,8 @@ on: schedule: - cron: "30 0 * * 1" +permissions: read-all + env: GOLANG_VERSION: "1.24.x" diff --git a/.github/workflows/registry-clean.yml b/.github/workflows/registry-clean.yml index 028f95b3b3..ed6c707813 100644 --- a/.github/workflows/registry-clean.yml +++ b/.github/workflows/registry-clean.yml @@ -14,6 +14,8 @@ env: jobs: clean-ghcr: name: delete old testing container images + permissions: + packages: write runs-on: ubuntu-latest steps: - name: Delete '-testing' operator images in ${{ env.IMAGE_NAME }} diff --git a/.github/workflows/release-pr.yml b/.github/workflows/release-pr.yml index 10c53d1171..d1c4c06320 100644 --- a/.github/workflows/release-pr.yml +++ b/.github/workflows/release-pr.yml @@ -2,6 +2,10 @@ name: release-pr +permissions: + contents: write + pull-requests: write + on: push: branches: diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml index 14a1ece5f0..5a210016ed 100644 --- a/.github/workflows/release-publish.yml +++ b/.github/workflows/release-publish.yml @@ -7,15 +7,12 @@ on: tags: - v* +permissions: read-all + env: GOLANG_VERSION: "1.24.x" REGISTRY: "ghcr.io" -permissions: - contents: write - packages: write - id-token: write - jobs: check-version: @@ -51,6 +48,8 @@ jobs: release: name: Create Github release runs-on: ubuntu-24.04 + permissions: + contents: write needs: - check-version steps: @@ -89,6 +88,10 @@ jobs: release-binaries: name: Build containers runs-on: ubuntu-24.04 + permissions: + packages: write + contents: read + id-token: write needs: - check-version outputs: @@ -235,6 +238,9 @@ jobs: olm-bundle: name: Create OLM bundle and catalog runs-on: ubuntu-24.04 + permissions: + contents: read + packages: write needs: - check-version - release-binaries diff --git a/.github/workflows/release-tag.yml b/.github/workflows/release-tag.yml index a22254936a..8483977c66 100644 --- a/.github/workflows/release-tag.yml +++ b/.github/workflows/release-tag.yml @@ -11,6 +11,8 @@ on: paths: - 'pkg/versions/versions.go' +permissions: read-all + jobs: tag: runs-on: ubuntu-24.04 diff --git a/.github/workflows/require-labels.yml b/.github/workflows/require-labels.yml index 078a677012..4c428c81ec 100644 --- a/.github/workflows/require-labels.yml +++ b/.github/workflows/require-labels.yml @@ -10,8 +10,7 @@ on: - labeled - unlabeled -env: - REPO_TOKEN: ${{ secrets.GITHUB_TOKEN }} +permissions: read-all jobs: require-labels: @@ -23,4 +22,4 @@ jobs: with: any_of: "ok to merge :ok_hand:" none_of: "do not merge" - repo_token: ${{ env.REPO_TOKEN }} + repo_token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/snyk.yml b/.github/workflows/snyk.yml index f9372b1fc3..b8b4171223 100644 --- a/.github/workflows/snyk.yml +++ b/.github/workflows/snyk.yml @@ -8,6 +8,10 @@ on: - main workflow_dispatch: +permissions: + security-events: write + contents: read + jobs: security: name: Security scan diff --git a/.github/workflows/spellcheck.yml b/.github/workflows/spellcheck.yml index 8693cf2439..ff7eb7dc6c 100644 --- a/.github/workflows/spellcheck.yml +++ b/.github/workflows/spellcheck.yml @@ -3,6 +3,8 @@ on: push: workflow_dispatch: +permissions: read-all + jobs: # Check code for non-inclusive language diff --git a/.github/workflows/sync-api.yml b/.github/workflows/sync-api.yml index c592679497..84ec9a8d86 100644 --- a/.github/workflows/sync-api.yml +++ b/.github/workflows/sync-api.yml @@ -5,6 +5,8 @@ on: branches: - main +permissions: read-all + jobs: trigger-sync: runs-on: ubuntu-latest From 7dbb44ff7301361209db0a712d309fc5a4ca2ed3 Mon Sep 17 00:00:00 2001 From: Francesco Canovai Date: Tue, 6 May 2025 10:48:47 +0200 Subject: [PATCH 555/836] test(e2e): do not remove the stern directory on success (#7461) Fix a problem when the upgrade test suite fails and the following one succeeds, removing the directory with the failed test logs. Closes #6891 Signed-off-by: Francesco Canovai --- tests/e2e/suite_test.go | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/tests/e2e/suite_test.go b/tests/e2e/suite_test.go index 6dd39ef207..1df88660e5 100644 --- a/tests/e2e/suite_test.go +++ b/tests/e2e/suite_test.go @@ -25,7 +25,6 @@ import ( "testing" "time" - "github.com/cloudnative-pg/machinery/pkg/fileutils" "github.com/onsi/ginkgo/v2/types" "github.com/thoas/go-funk" appsv1 "k8s.io/api/apps/v1" @@ -153,15 +152,6 @@ var _ = SynchronizedBeforeSuite(func() []byte { minioEnv.Client = objs["minio"] }) -var _ = ReportAfterSuite("Gathering failed reports", func(report Report) { - // Keep the logs of the operator and the clusters in case of failure - // If everything is skipped, env has not been initialized, and we'll have nothing to clean up - if report.SuiteSucceeded && env != nil { - err := fileutils.RemoveDirectory(namespaces.SternLogDirectory) - Expect(err).ToNot(HaveOccurred()) - } -}) - var _ = BeforeEach(func() { labelsForTestsBreakingTheOperator := []string{"upgrade", "disruptive"} breakingLabelsInCurrentTest := funk.Join(CurrentSpecReport().Labels(), From 8401a2637ac1554dcce5e698e88cf651ecf6301d Mon Sep 17 00:00:00 2001 From: Francesco Canovai Date: Wed, 7 May 2025 10:50:41 +0200 Subject: [PATCH 556/836] fix: fail fast during WAL archiving on primary demotion (#7483) If the primary server is unexpectedly demoted, fail fast during WAL archiving checks, avoiding unnecessary retries, and improving backup efficiency. Closes #7482 Signed-off-by: Francesco Canovai --- pkg/management/postgres/wal.go | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/pkg/management/postgres/wal.go b/pkg/management/postgres/wal.go index 457bfb5fec..91fe4898aa 100644 --- a/pkg/management/postgres/wal.go +++ b/pkg/management/postgres/wal.go @@ -37,7 +37,7 @@ var errNoWalArchivePresent = errors.New("no wal-archive present") // On primary, it could run even before the first WAL has completed. For this reason it // could require a WAL switch, to quicken the check. // On standby, the mere existence of the standby guarantees that a WAL file has already been generated -// by the pg_basebakup used to prime the standby data directory, so we check only if the WAL +// by the pg_basebackup used to prime the standby data directory, so we check only if the WAL // archive process is not failing. func ensureWalArchiveIsWorking(instance *Instance) error { isPrimary, err := instance.IsPrimary() @@ -46,7 +46,7 @@ func ensureWalArchiveIsWorking(instance *Instance) error { } if isPrimary { - return newWalArchiveBootstrapperForPrimary().ensureFirstWalArchived(retryUntilWalArchiveWorking) + return newWalArchiveBootstrapperForPrimary().ensureFirstWalArchived(instance, retryUntilWalArchiveWorking) } return newWalArchiveAnalyzerForReplicaInstance(instance.GetPrimaryConnInfo()). @@ -146,8 +146,18 @@ func newWalArchiveBootstrapperForPrimary() *walArchiveBootstrapper { } } -func (w *walArchiveBootstrapper) ensureFirstWalArchived(backoff wait.Backoff) error { - return retry.OnError(backoff, resources.RetryAlways, func() error { +var errPrimaryDemoted = errors.New("primary was demoted while waiting for the first wal-archive") + +func (w *walArchiveBootstrapper) ensureFirstWalArchived(instance *Instance, backoff wait.Backoff) error { + return retry.OnError(backoff, func(err error) bool { return !errors.Is(err, errPrimaryDemoted) }, func() error { + isPrimary, err := instance.IsPrimary() + if err != nil { + return fmt.Errorf("error checking primary: %w", err) + } + if !isPrimary { + return errPrimaryDemoted + } + db, err := w.dbFactory() if err != nil { return err From a5272a0edb344a5f2c6f2bd122de81eab2288445 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Wed, 7 May 2025 12:02:11 +0200 Subject: [PATCH 557/836] ci(security): implement least privilege for GitHub Workflows (#7498) Add some missing permissions in the previous PR related. Default read permissions for registry-clean.yml Default read permissions for release-pr.yml Closes #7252 Signed-off-by: Jonathan Gonzalez V. --- .github/workflows/codeql-analysis.yml | 3 --- .github/workflows/continuous-integration.yml | 4 ---- .github/workflows/registry-clean.yml | 2 ++ .github/workflows/release-pr.yml | 7 ++++--- .github/workflows/snyk.yml | 6 +++--- 5 files changed, 9 insertions(+), 13 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 100695f532..ae8bbe1649 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -40,9 +40,6 @@ jobs: duplicate_runs: runs-on: ubuntu-24.04 name: Skip duplicate runs - permissions: - actions: write - contents: read continue-on-error: true outputs: should_skip: ${{ steps.skip_check.outputs.should_skip == 'true' && github.ref != 'refs/heads/main' }} diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 93327eace2..cbf2f9e28d 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -58,7 +58,6 @@ jobs: fail-fast: false matrix: branch: [release-1.22, release-1.24, release-1.25] - steps: - name: Invoke workflow with inputs uses: benc-uk/workflow-dispatch@e2e5e9a103e331dad343f381a29e654aea3cf8fc # v1 @@ -72,9 +71,6 @@ jobs: duplicate_runs: runs-on: ubuntu-24.04 name: Skip duplicate runs - permissions: - actions: write - contents: read continue-on-error: true outputs: should_skip: ${{ steps.skip_check.outputs.should_skip == 'true' && github.ref != 'refs/heads/main' }} diff --git a/.github/workflows/registry-clean.yml b/.github/workflows/registry-clean.yml index ed6c707813..ac3df3175d 100644 --- a/.github/workflows/registry-clean.yml +++ b/.github/workflows/registry-clean.yml @@ -11,6 +11,8 @@ env: IMAGE_NAME: "cloudnative-pg-testing" CONTAINER_IMAGE_NAMES: "pgbouncer-testing, postgresql-testing, postgis-testing" +permissions: read-all + jobs: clean-ghcr: name: delete old testing container images diff --git a/.github/workflows/release-pr.yml b/.github/workflows/release-pr.yml index d1c4c06320..82310cd171 100644 --- a/.github/workflows/release-pr.yml +++ b/.github/workflows/release-pr.yml @@ -2,9 +2,7 @@ name: release-pr -permissions: - contents: write - pull-requests: write +permissions: read-all on: push: @@ -14,6 +12,9 @@ on: jobs: pull-request: runs-on: ubuntu-24.04 + permissions: + pull-requests: write + contents: write steps: - name: Checkout diff --git a/.github/workflows/snyk.yml b/.github/workflows/snyk.yml index b8b4171223..2633dc340a 100644 --- a/.github/workflows/snyk.yml +++ b/.github/workflows/snyk.yml @@ -8,14 +8,14 @@ on: - main workflow_dispatch: -permissions: - security-events: write - contents: read +permissions: read-all jobs: security: name: Security scan runs-on: ubuntu-24.04 + permissions: + security-events: write steps: - name: Checkout code uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 From d6ca557f83a257c4e85072c2296739d637ebca01 Mon Sep 17 00:00:00 2001 From: Pierrick <139142330+pchovelon@users.noreply.github.com> Date: Thu, 8 May 2025 11:45:28 +0200 Subject: [PATCH 558/836] docs: fix missing storage spec in tablespaces definitions (#7502) The example given wasn't working because of missing storage clause. Signed-off-by: Pierrick Chovelon --- docs/src/tablespaces.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/docs/src/tablespaces.md b/docs/src/tablespaces.md index 86e5c0847b..cd214d1713 100644 --- a/docs/src/tablespaces.md +++ b/docs/src/tablespaces.md @@ -147,11 +147,13 @@ spec: size: 10Gi tablespaces: - name: current - size: 100Gi - storageClass: fastest + storage: + size: 100Gi + storageClass: fastest - name: this_year - size: 500Gi - storageClass: balanced + storage: + size: 500Gi + storageClass: balanced ``` The `yardbirds` cluster example requests 4 persistent volume claims using From 6c0f3eb5b150c3f935b2eeaf6ab5d0a6480b3269 Mon Sep 17 00:00:00 2001 From: Francesco Canovai Date: Thu, 8 May 2025 14:48:44 +0200 Subject: [PATCH 559/836] fix: don't fail on missing wal in plugin wal restore (#7507) Fix a problem where, if the plugins could not find a wal using the plugins defined in the cluster, the restore command would exit with an error, without trying to use the in-tree barmanObjectStore. Closes #7499 Signed-off-by: Francesco Canovai Signed-off-by: Leonardo Cecchi Signed-off-by: Armando Ruocco Co-authored-by: Leonardo Cecchi Co-authored-by: Armando Ruocco --- internal/cmd/manager/walrestore/cmd.go | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/internal/cmd/manager/walrestore/cmd.go b/internal/cmd/manager/walrestore/cmd.go index c520fe4482..e18145c52a 100644 --- a/internal/cmd/manager/walrestore/cmd.go +++ b/internal/cmd/manager/walrestore/cmd.go @@ -72,6 +72,8 @@ func NewCmd() *cobra.Command { SilenceErrors: true, Args: cobra.ExactArgs(2), RunE: func(cobraCmd *cobra.Command, args []string) error { + // TODO: The command is triggered by PG, resulting in the loss of stdout logs. + // TODO: We need to implement a logpipe to prevent this. contextLog := log.WithName("wal-restore") ctx := log.IntoContext(cobraCmd.Context(), contextLog) err := run(ctx, pgData, podName, args) @@ -123,9 +125,19 @@ func run(ctx context.Context, pgData string, podName string, args []string) erro walFound, err := restoreWALViaPlugins(ctx, cluster, walName, path.Join(pgData, destinationPath)) if err != nil { - return err + // With the current implementation, this happens when both of the following conditions are met: + // + // 1. At least one CNPG-i plugin that implements the WAL service is present. + // 2. No plugin can restore the WAL file because: + // a) The requested WAL could not be found + // b) The plugin failed in the restoration process. + // + // When this happens, `walFound` is false, prompting us to revert to the in-tree barman-cloud support. + contextLog.Trace("could not restore WAL via plugins", "wal", walName, "error", err) } if walFound { + // This happens only if a CNPG-i plugin was able to restore + // the requested WAL. return nil } From 61560a50392f9a4ae804b7d306ec769aefdda958 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Thu, 8 May 2025 16:36:55 +0200 Subject: [PATCH 560/836] fix(cnpg-i): avoid timeout in local CNPG-i entrypoints (#7496) Closes #7495 Signed-off-by: Leonardo Cecchi --- internal/cnpi/plugin/connection/connection.go | 5 +++-- internal/cnpi/plugin/connection/remote.go | 2 +- internal/cnpi/plugin/connection/unix.go | 14 +------------- pkg/management/postgres/restore.go | 4 ---- pkg/management/postgres/webserver/local.go | 4 ---- 5 files changed, 5 insertions(+), 24 deletions(-) diff --git a/internal/cnpi/plugin/connection/connection.go b/internal/cnpi/plugin/connection/connection.go index 1eb3a79794..fdfae176b5 100644 --- a/internal/cnpi/plugin/connection/connection.go +++ b/internal/cnpi/plugin/connection/connection.go @@ -36,8 +36,9 @@ import ( "google.golang.org/grpc" ) -// defaultTimeout is the timeout applied by default to every GRPC call -const defaultTimeout = 30 * time.Second +// defaultNetworkCallTimeout is the timeout applied by default to every GRPC +// call to a plugin in a different Pod +const defaultNetworkCallTimeout = 30 * time.Second // Protocol represents a way to connect to a plugin type Protocol interface { diff --git a/internal/cnpi/plugin/connection/remote.go b/internal/cnpi/plugin/connection/remote.go index 159469a74e..15d0fa93a9 100644 --- a/internal/cnpi/plugin/connection/remote.go +++ b/internal/cnpi/plugin/connection/remote.go @@ -41,7 +41,7 @@ func (p *ProtocolTCP) Dial(_ context.Context) (Handler, error) { p.Address, grpc.WithTransportCredentials(credentials.NewTLS(p.TLSConfig)), grpc.WithUnaryInterceptor( - timeout.UnaryClientInterceptor(defaultTimeout), + timeout.UnaryClientInterceptor(defaultNetworkCallTimeout), ), ) } diff --git a/internal/cnpi/plugin/connection/unix.go b/internal/cnpi/plugin/connection/unix.go index fa986f5396..fe39771fde 100644 --- a/internal/cnpi/plugin/connection/unix.go +++ b/internal/cnpi/plugin/connection/unix.go @@ -23,14 +23,10 @@ package connection import ( "context" "fmt" - "time" "github.com/cloudnative-pg/machinery/pkg/log" - "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/timeout" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" - - contextutils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils/context" ) // ProtocolUnix is for plugins that are reachable over a @@ -44,15 +40,7 @@ func (p ProtocolUnix) Dial(ctx context.Context) (Handler, error) { contextLogger.Debug("Connecting to plugin via local socket", "path", dialPath) - timeoutValue := defaultTimeout - value, ok := ctx.Value(contextutils.GRPCTimeoutKey).(time.Duration) - if ok { - contextLogger.Debug("Using custom timeout value", "timeout", value) - timeoutValue = value - } - return grpc.NewClient( dialPath, - grpc.WithTransportCredentials(insecure.NewCredentials()), - grpc.WithUnaryInterceptor(timeout.UnaryClientInterceptor(timeoutValue))) + grpc.WithTransportCredentials(insecure.NewCredentials())) } diff --git a/pkg/management/postgres/restore.go b/pkg/management/postgres/restore.go index ee19920f29..0b2041cf8d 100644 --- a/pkg/management/postgres/restore.go +++ b/pkg/management/postgres/restore.go @@ -60,7 +60,6 @@ import ( postgresSpec "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/system" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" - contextutils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils/context" ) var ( @@ -1040,9 +1039,6 @@ func restoreViaPlugin( ) (*restore.RestoreResponse, error) { contextLogger := log.FromContext(ctx) - // TODO: timeout should be configurable by the user - ctx = context.WithValue(ctx, contextutils.GRPCTimeoutKey, 100*time.Minute) - plugins := repository.New() defer plugins.Close() diff --git a/pkg/management/postgres/webserver/local.go b/pkg/management/postgres/webserver/local.go index e62186aa86..80e046fcf7 100644 --- a/pkg/management/postgres/webserver/local.go +++ b/pkg/management/postgres/webserver/local.go @@ -26,7 +26,6 @@ import ( "fmt" "net/http" "strings" - "time" "github.com/cloudnative-pg/machinery/pkg/log" apierrs "k8s.io/apimachinery/pkg/api/errors" @@ -39,7 +38,6 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/url" "github.com/cloudnative-pg/cloudnative-pg/pkg/resources/status" - contextutils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils/context" ) type localWebserverEndpoints struct { @@ -238,8 +236,6 @@ func (ws *localWebserverEndpoints) startPluginBackup( cluster *apiv1.Cluster, backup *apiv1.Backup, ) { - // TODO: timeout should be configurable by the user - ctx = context.WithValue(ctx, contextutils.GRPCTimeoutKey, 100*time.Minute) NewPluginBackupCommand(cluster, backup, ws.typedClient, ws.eventRecorder).Start(ctx) } From 6d96e54850c588a42238bdaab164d37898e2dcf9 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Thu, 8 May 2025 17:44:19 +0200 Subject: [PATCH 561/836] fix(kubectl-cnpg): require plugin-name for plugin backups (#7506) Closes #7513 Signed-off-by: Leonardo Cecchi --- internal/cmd/plugin/backup/cmd.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/internal/cmd/plugin/backup/cmd.go b/internal/cmd/plugin/backup/cmd.go index c88d689c85..b3c49395e1 100644 --- a/internal/cmd/plugin/backup/cmd.go +++ b/internal/cmd/plugin/backup/cmd.go @@ -110,7 +110,12 @@ func NewCmd() *cobra.Command { return fmt.Errorf("backup-method: %s is not supported by the backup command", backupMethod) } - if backupMethod != string(apiv1.BackupMethodPlugin) { + if backupMethod == string(apiv1.BackupMethodPlugin) { + if len(pluginName) == 0 { + return fmt.Errorf("plugin-name is required when backup method in %s", + apiv1.BackupMethodPlugin) + } + } else { if len(pluginName) > 0 { return fmt.Errorf("plugin-name is allowed only when backup method in %s", apiv1.BackupMethodPlugin) From 6e86f70896aa352a16db1b2826a88c9bc252925a Mon Sep 17 00:00:00 2001 From: Qua Jones <90266631+quajones@users.noreply.github.com> Date: Fri, 9 May 2025 04:45:47 -0400 Subject: [PATCH 562/836] fix(instance_replica): include dbname in primary_conninfo (#7298) Includes the `dbname=postgres` argument on every call of `GetPrimaryConnInfo()` and avoid adding this argument on every call as it was before. Due to this the function `writeReplicaConfigurationForReplica()` was missing the argument Closes #5917 Signed-off-by: Qua Jones Signed-off-by: Qua Jones Signed-off-by: Jonathan Gonzalez V. Co-authored-by: Jonathan Gonzalez V. --- pkg/management/postgres/instance.go | 6 +++--- pkg/management/postgres/instance_replica.go | 3 ++- pkg/management/postgres/wal.go | 2 +- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/pkg/management/postgres/instance.go b/pkg/management/postgres/instance.go index 4698b4e438..8ea2efc439 100644 --- a/pkg/management/postgres/instance.go +++ b/pkg/management/postgres/instance.go @@ -841,7 +841,7 @@ func (instance *Instance) Demote(ctx context.Context, cluster *apiv1.Cluster) er // WaitForPrimaryAvailable waits until we can connect to the primary func (instance *Instance) WaitForPrimaryAvailable(ctx context.Context) error { - primaryConnInfo := instance.GetPrimaryConnInfo() + " dbname=postgres connect_timeout=5" + primaryConnInfo := instance.GetPrimaryConnInfo() + " connect_timeout=5" log.Info("Waiting for the new primary to be available", "primaryConnInfo", primaryConnInfo) @@ -1041,7 +1041,7 @@ func (instance *Instance) Rewind(ctx context.Context) error { primaryConnInfo := instance.GetPrimaryConnInfo() options := []string{ "-P", - "--source-server", primaryConnInfo + " dbname=postgres", + "--source-server", primaryConnInfo, "--target-pgdata", instance.PgData, } @@ -1300,7 +1300,7 @@ func (instance *Instance) DropConnections() error { // GetPrimaryConnInfo returns the DSN to reach the primary func (instance *Instance) GetPrimaryConnInfo() string { - result := buildPrimaryConnInfo(instance.GetClusterName()+"-rw", instance.GetPodName()) + result := buildPrimaryConnInfo(instance.GetClusterName()+"-rw", instance.GetPodName()) + " dbname=postgres" standbyTCPUserTimeout := os.Getenv("CNPG_STANDBY_TCP_USER_TIMEOUT") if len(standbyTCPUserTimeout) > 0 { diff --git a/pkg/management/postgres/instance_replica.go b/pkg/management/postgres/instance_replica.go index 236348dc65..c8d3bc6354 100644 --- a/pkg/management/postgres/instance_replica.go +++ b/pkg/management/postgres/instance_replica.go @@ -64,7 +64,8 @@ func (instance *Instance) RefreshReplicaConfiguration( func (instance *Instance) writeReplicaConfigurationForReplica(cluster *apiv1.Cluster) (changed bool, err error) { slotName := cluster.GetSlotNameFromInstanceName(instance.GetPodName()) - return UpdateReplicaConfiguration(instance.PgData, instance.GetPrimaryConnInfo(), slotName) + primaryConnInfo := instance.GetPrimaryConnInfo() + return UpdateReplicaConfiguration(instance.PgData, primaryConnInfo, slotName) } func (instance *Instance) writeReplicaConfigurationForDesignatedPrimary( diff --git a/pkg/management/postgres/wal.go b/pkg/management/postgres/wal.go index 91fe4898aa..0a3d486b8d 100644 --- a/pkg/management/postgres/wal.go +++ b/pkg/management/postgres/wal.go @@ -65,7 +65,7 @@ func newWalArchiveAnalyzerForReplicaInstance(primaryConnInfo string) *walArchive dbFactory: func() (*sql.DB, error) { db, openErr := sql.Open( "pgx", - fmt.Sprintf("%s dbname=%s", primaryConnInfo, "postgres"), + primaryConnInfo, ) if openErr != nil { log.Error(openErr, "can not open postgres database") From 85f01bab28b1cce20239f20dc89791a8bc71a2ae Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 9 May 2025 12:05:26 +0200 Subject: [PATCH 563/836] fix(deps): update all non-major go dependencies (main) (#7484) This PR contains the following updates: https://redirect.github.com/goreleaser/goreleaser `v2.8.2` -> `v2.9.0` https://redirect.github.com/grpc-ecosystem/go-grpc-middleware `v2.3.1` -> `v2.3.2` golang.org/x/term `v0.31.0` -> `v0.32.0` https://redirect.github.com/grpc/grpc-go `v1.71.1` -> `v1.72.0` --- Makefile | 2 +- go.mod | 12 ++++++------ go.sum | 24 ++++++++++++------------ 3 files changed, 19 insertions(+), 19 deletions(-) diff --git a/Makefile b/Makefile index fcd092976b..5fa03df63b 100644 --- a/Makefile +++ b/Makefile @@ -54,7 +54,7 @@ POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions KUSTOMIZE_VERSION ?= v5.6.0 CONTROLLER_TOOLS_VERSION ?= v0.17.3 GENREF_VERSION ?= 015aaac611407c4fe591bc8700d2c67b7521efca -GORELEASER_VERSION ?= v2.8.2 +GORELEASER_VERSION ?= v2.9.0 SPELLCHECK_VERSION ?= 0.48.0 WOKE_VERSION ?= 0.19.0 OPERATOR_SDK_VERSION ?= v1.39.2 diff --git a/go.mod b/go.mod index 3ee11d1a58..cd8fc33c48 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/evanphx/json-patch/v5 v5.9.11 github.com/go-logr/logr v1.4.2 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 - github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.1 + github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2 github.com/jackc/pgx/v5 v5.7.4 github.com/jackc/puddle/v2 v2.2.2 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 @@ -35,8 +35,8 @@ require ( go.uber.org/atomic v1.11.0 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 - golang.org/x/term v0.31.0 - google.golang.org/grpc v1.71.1 + golang.org/x/term v0.32.0 + google.golang.org/grpc v1.72.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.32.3 k8s.io/apiextensions-apiserver v0.32.3 @@ -102,13 +102,13 @@ require ( golang.org/x/net v0.38.0 // indirect golang.org/x/oauth2 v0.27.0 // indirect golang.org/x/sync v0.12.0 // indirect - golang.org/x/sys v0.32.0 // indirect + golang.org/x/sys v0.33.0 // indirect golang.org/x/text v0.23.0 // indirect golang.org/x/time v0.9.0 // indirect golang.org/x/tools v0.31.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect - google.golang.org/protobuf v1.36.5 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a // indirect + google.golang.org/protobuf v1.36.6 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect diff --git a/go.sum b/go.sum index 4f6877fc07..526be7eb65 100644 --- a/go.sum +++ b/go.sum @@ -83,8 +83,8 @@ github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWm github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.1 h1:KcFzXwzM/kGhIRHvc8jdixfIJjVzuUJdnv+5xsPutog= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.1/go.mod h1:qOchhhIlmRcqk/O9uCo/puJlyo07YINaIqdZfZG3Jkc= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2 h1:sGm2vDRFUrQJO/Veii4h4zG2vvqG6uWNkBHSTqXOZk0= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2/go.mod h1:wd1YpapPLivG6nQgbf7ZkG1hhSOXDhhn4MLTknx2aAc= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= @@ -244,10 +244,10 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= -golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= -golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= +golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= +golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= @@ -266,12 +266,12 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= -google.golang.org/grpc v1.71.1 h1:ffsFWr7ygTUscGPI0KKK6TLrGz0476KUvvsbqWK0rPI= -google.golang.org/grpc v1.71.1/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= -google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= -google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a h1:51aaUVRocpvUOSQKM6Q7VuoaktNIaMCLuhZB6DKksq4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a/go.mod h1:uRxBH1mhmO8PGhU89cMcHaXKZqO+OfakD8QQO0oYwlQ= +google.golang.org/grpc v1.72.0 h1:S7UkcVa60b5AAQTaO6ZKamFp1zMZSU0fGDK2WZLbBnM= +google.golang.org/grpc v1.72.0/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= From 23df322cafebd5308a9d6363bfb9e88f5771b127 Mon Sep 17 00:00:00 2001 From: Artem Timofeev <39891735+atimofeev@users.noreply.github.com> Date: Fri, 9 May 2025 12:36:55 +0200 Subject: [PATCH 564/836] docs(fix): image_catalog major version inconsistencies (#7511) Short-term fix for #7510 Signed-off-by: Artem Timofeev <39891735+atimofeev@users.noreply.github.com> --- docs/src/image_catalog.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/src/image_catalog.md b/docs/src/image_catalog.md index 4d19ecafe1..d3a9479b57 100644 --- a/docs/src/image_catalog.md +++ b/docs/src/image_catalog.md @@ -33,6 +33,8 @@ spec: - major: 15 image: ghcr.io/cloudnative-pg/postgresql:15.6 - major: 16 + image: ghcr.io/cloudnative-pg/postgresql:16.8 + - major: 17 image: ghcr.io/cloudnative-pg/postgresql:17.4 ``` @@ -48,6 +50,8 @@ spec: - major: 15 image: ghcr.io/cloudnative-pg/postgresql:15.6 - major: 16 + image: ghcr.io/cloudnative-pg/postgresql:16.8 + - major: 17 image: ghcr.io/cloudnative-pg/postgresql:17.4 ``` From c598afe224952e788647b16e1c592e0f10c53b10 Mon Sep 17 00:00:00 2001 From: Ani Ravi <5902976+aniravi24@users.noreply.github.com> Date: Fri, 9 May 2025 06:37:38 -0400 Subject: [PATCH 565/836] docs: fix value for skipEmptyWalArchiveCheck annotation (#7509) The documentation says the value is supposed to be set to `true`, but it's actually supposed to be set to `enabled`. It is correct elsewhere. Signed-off-by: Ani Ravi <5902976+aniravi24@users.noreply.github.com> --- docs/src/labels_annotations.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/labels_annotations.md b/docs/src/labels_annotations.md index 30c0a2e963..290a116bcd 100644 --- a/docs/src/labels_annotations.md +++ b/docs/src/labels_annotations.md @@ -213,7 +213,7 @@ CloudNativePG manages the following predefined annotations: : Contains the latest cluster `reload` time. `reload` is triggered by the user through a plugin. `cnpg.io/skipEmptyWalArchiveCheck` -: When set to `true` on a `Cluster` resource, the operator disables the check +: When set to `enabled` on a `Cluster` resource, the operator disables the check that ensures that the WAL archive is empty before writing data. Use at your own risk. From 6f2f207c6f16edff06256d4069e7951ec0e607bb Mon Sep 17 00:00:00 2001 From: Pascal Bourdier Date: Fri, 9 May 2025 12:38:34 +0200 Subject: [PATCH 566/836] fix(docs): typo in backup plugin message (#7501) Signed-off-by: Pascal Bourdier --- internal/cmd/plugin/backup/cmd.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/cmd/plugin/backup/cmd.go b/internal/cmd/plugin/backup/cmd.go index b3c49395e1..f1bd825d21 100644 --- a/internal/cmd/plugin/backup/cmd.go +++ b/internal/cmd/plugin/backup/cmd.go @@ -211,7 +211,7 @@ func NewCmd() *cobra.Command { ) backupSubcommand.Flags().StringVar(&waitForArchive, "wait-for-archive", "", - "Set the '.spec.onlineConfiguratoin.waitForArchive' field of the "+ + "Set the '.spec.onlineConfiguration.waitForArchive' field of the "+ "Backup resource. If not specified, the value in the "+ "'.spec.backup.volumeSnapshot.onlineConfiguration' field will be used. "+ optionalAcceptedValues, From fe5b0253d5ed70a591bfd67b195875c4b97a3d22 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 9 May 2025 13:57:16 +0200 Subject: [PATCH 567/836] chore(deps): update dependency operator-framework/operator-registry to v1.54.0 (main) (#7520) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 5fa03df63b..8ccadec256 100644 --- a/Makefile +++ b/Makefile @@ -58,7 +58,7 @@ GORELEASER_VERSION ?= v2.9.0 SPELLCHECK_VERSION ?= 0.48.0 WOKE_VERSION ?= 0.19.0 OPERATOR_SDK_VERSION ?= v1.39.2 -OPM_VERSION ?= v1.53.0 +OPM_VERSION ?= v1.54.0 PREFLIGHT_VERSION ?= 1.13.0 OPENSHIFT_VERSIONS ?= v4.12-v4.19 ARCH ?= amd64 From bdf3f8f5461184fdd2731ac62581d1abee89f8ed Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 9 May 2025 15:09:21 +0200 Subject: [PATCH 568/836] fix(deps): update module github.com/cloudnative-pg/cnpg-i to v0.2.1 (main) (#7367) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index cd8fc33c48..3cefab3fe6 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/blang/semver v3.5.1+incompatible github.com/cheynewallace/tabby v1.1.1 github.com/cloudnative-pg/barman-cloud v0.3.1 - github.com/cloudnative-pg/cnpg-i v0.1.1-0.20250321093050-de4ab51537cb + github.com/cloudnative-pg/cnpg-i v0.2.1 github.com/cloudnative-pg/machinery v0.2.0 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/evanphx/json-patch/v5 v5.9.11 diff --git a/go.sum b/go.sum index 526be7eb65..5f42b0b4a0 100644 --- a/go.sum +++ b/go.sum @@ -20,8 +20,8 @@ github.com/cheynewallace/tabby v1.1.1 h1:JvUR8waht4Y0S3JF17G6Vhyt+FRhnqVCkk8l4Yr github.com/cheynewallace/tabby v1.1.1/go.mod h1:Pba/6cUL8uYqvOc9RkyvFbHGrQ9wShyrn6/S/1OYVys= github.com/cloudnative-pg/barman-cloud v0.3.1 h1:kzkY77k2lN/caoyh7ibXDSZjJeSJTNvnVt6Gfa8Iq5M= github.com/cloudnative-pg/barman-cloud v0.3.1/go.mod h1:4HL3AjY9oEl2Ed0HSkyvTZEQPhwyFOaAnuCz9lfVeYQ= -github.com/cloudnative-pg/cnpg-i v0.1.1-0.20250321093050-de4ab51537cb h1:FPORwCxjZwlnKnF7dOkuOAz0GBSQ3Hrn+8lm4uMiWeM= -github.com/cloudnative-pg/cnpg-i v0.1.1-0.20250321093050-de4ab51537cb/go.mod h1:n+kbHm3rzRCY5IJKuE1tGMbG6JaeYz8yycYoLt7BeKo= +github.com/cloudnative-pg/cnpg-i v0.2.1 h1:g96BE1ojdiFtDwtb7tg5wUF9a2kAh0eVg4SkjsO8jnk= +github.com/cloudnative-pg/cnpg-i v0.2.1/go.mod h1:kPfJpPGAKN1/2xvwBcC3WzMP46pj3sKLHLNB8NHr77U= github.com/cloudnative-pg/machinery v0.2.0 h1:x8OAwxdeL/6wkbxqorz+nX6UovTyx7/TBeCfiRebR2o= github.com/cloudnative-pg/machinery v0.2.0/go.mod h1:Kg8W8Tb/1UFGGtw3hR8S5SytSWddlHaCnJSgBo4x/nc= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= From 87c7a6a97857c82934f6ec78733d2959787cfcf7 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 9 May 2025 16:06:13 +0200 Subject: [PATCH 569/836] chore(deps): update actions/setup-go digest to d35c59a (main) (#7515) --- .github/workflows/backport.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/continuous-delivery.yml | 12 ++++++------ .github/workflows/continuous-integration.yml | 16 ++++++++-------- .github/workflows/refresh-licenses.yml | 2 +- .github/workflows/release-publish.yml | 4 ++-- 6 files changed, 19 insertions(+), 19 deletions(-) diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index cbcefd2ed1..43f4102c0a 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -93,7 +93,7 @@ jobs: token: ${{ secrets.REPO_GHA_PAT }} - name: Install Go - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index ae8bbe1649..b59f4d6125 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -68,7 +68,7 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: Install Go - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index f8fda00f3f..22fbc92889 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -282,7 +282,7 @@ jobs: fetch-depth: 0 - name: Install Go - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true @@ -654,7 +654,7 @@ jobs: ref: ${{ needs.evaluate_options.outputs.git_ref }} - name: Install Go - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true @@ -881,7 +881,7 @@ jobs: ref: ${{ needs.evaluate_options.outputs.git_ref }} - name: Install Go - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true @@ -1228,7 +1228,7 @@ jobs: ref: ${{ needs.evaluate_options.outputs.git_ref }} - name: Install Go - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true @@ -1613,7 +1613,7 @@ jobs: ref: ${{ needs.evaluate_options.outputs.git_ref }} - name: Install Go - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true @@ -1924,7 +1924,7 @@ jobs: fetch-depth: 0 - name: Install Go - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index cbf2f9e28d..276ad20031 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -154,7 +154,7 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: Install Go - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 with: # Disable setup-go caching. Cache is better handled by the golangci-lint action cache: false @@ -285,7 +285,7 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: Install Go - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true @@ -324,7 +324,7 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: Install Go - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true @@ -359,7 +359,7 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: Install Go - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true @@ -429,7 +429,7 @@ jobs: fetch-depth: 0 - name: Install Go - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true @@ -668,7 +668,7 @@ jobs: cache-image: false - name: Install Go - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true @@ -715,7 +715,7 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: Install Go - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true @@ -794,7 +794,7 @@ jobs: password: ${{ env.REGISTRY_PASSWORD }} - name: Install Go - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true diff --git a/.github/workflows/refresh-licenses.yml b/.github/workflows/refresh-licenses.yml index 56c140520c..b1e2c5e104 100644 --- a/.github/workflows/refresh-licenses.yml +++ b/.github/workflows/refresh-licenses.yml @@ -20,7 +20,7 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: Install Go - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml index 5a210016ed..a57a5559f7 100644 --- a/.github/workflows/release-publish.yml +++ b/.github/workflows/release-publish.yml @@ -109,7 +109,7 @@ jobs: fetch-depth: 0 - name: Install Go - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true @@ -261,7 +261,7 @@ jobs: platforms: ${{ needs.release-binaries.outputs.platforms }} - name: Install Go - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5 + uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 with: go-version: ${{ env.GOLANG_VERSION }} check-latest: true From ec0d81218e37e90738628484883971d2e5a012fd Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Fri, 9 May 2025 18:26:56 +0200 Subject: [PATCH 570/836] chore(golangci): migrate to golangci-lint v2 (#7534) The new version of golangci-lint v2 found some issues that need to be migrated before we can even try to update to the latest version Disabled checks: * The Morgan's Law was disable in the staticcheck (QF1001) * The Merge conditional assignment into variable declarion (QF1007) Signed-off-by: Jonathan Gonzalez V. --- .github/workflows/continuous-integration.yml | 4 +- .golangci.yml | 150 +++++++----------- api/v1/backup_funcs.go | 2 +- api/v1/base_funcs.go | 2 +- api/v1/cluster_funcs.go | 2 +- internal/cmd/manager/walrestore/cmd.go | 8 +- internal/cmd/plugin/status/status.go | 2 +- internal/cnpi/plugin/mapping.go | 2 +- internal/controller/backup_predicates.go | 2 +- .../controller/cluster_controller_test.go | 2 +- internal/controller/cluster_create.go | 16 +- internal/controller/cluster_create_test.go | 26 +-- internal/controller/cluster_delete_test.go | 2 +- internal/controller/cluster_image.go | 2 +- internal/controller/cluster_restore.go | 2 +- internal/controller/cluster_upgrade.go | 6 +- internal/controller/cluster_upgrade_test.go | 6 +- internal/controller/plugin_controller.go | 2 +- internal/controller/pooler_update_test.go | 4 +- .../controller/database_controller.go | 2 +- .../controller/publication_controller.go | 2 +- .../management/controller/roles/postgres.go | 8 +- .../controller/subscription_controller.go | 2 +- internal/plugin/resources/instance.go | 2 +- internal/webhook/v1/cluster_webhook.go | 2 +- pkg/certs/certs.go | 6 +- pkg/configfile/configfile_test.go | 4 +- .../postgres/logpipe/CSVReadWriter.go | 2 +- pkg/management/postgres/logpipe/pgaudit.go | 4 +- .../webserver/client/remote/instance.go | 14 +- pkg/reconciler/hibernation/status_test.go | 2 +- pkg/reconciler/instance/metadata.go | 8 +- pkg/reconciler/majorupgrade/reconciler.go | 2 +- .../persistentvolumeclaim/metadata.go | 4 +- .../persistentvolumeclaim/status.go | 4 +- .../persistentvolumeclaim/storagesource.go | 4 +- pkg/specs/pods.go | 6 +- pkg/specs/podspec_diff.go | 2 +- pkg/specs/roles.go | 18 +-- pkg/utils/labels_annotations_test.go | 2 +- tests/e2e/apparmor_test.go | 2 +- tests/e2e/asserts_test.go | 2 +- tests/e2e/nodeselector_test.go | 2 +- tests/e2e/operator_unavailable_test.go | 2 +- tests/e2e/tablespaces_test.go | 24 +-- tests/utils/sternmultitailer/multitailer.go | 2 +- 46 files changed, 171 insertions(+), 205 deletions(-) diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 276ad20031..8b6a8878c9 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -19,7 +19,7 @@ permissions: read-all # set up environment variables to be used across all the jobs env: GOLANG_VERSION: "1.24.x" - GOLANGCI_LINT_VERSION: "v1.64.8" + GOLANGCI_LINT_VERSION: "v2.1.6" KUBEBUILDER_VERSION: "2.3.1" KIND_VERSION: "v0.27.0" OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing" @@ -162,7 +162,7 @@ jobs: check-latest: true - name: Run golangci-lint - uses: golangci/golangci-lint-action@55c2c1448f86e01eaae002a5a3a9624417608d84 # v6 + uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8 with: version: ${{ env.GOLANGCI_LINT_VERSION }} diff --git a/.golangci.yml b/.golangci.yml index 07c5b16eb7..071690d162 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,45 +1,25 @@ -linters-settings: - lll: - line-length: 120 - gci: - sections: - - standard - - default - - prefix(github.com/cloudnative-pg/cloudnative-pg) - - blank - - dot - gosec: - excludes: - - G101 # remove this exclude when https://github.com/securego/gosec/issues/1001 is fixed - +version: "2" linters: - # please, do not use `enable-all`: it's deprecated and will be removed soon. - # inverted configuration with `enable-all` and `disable` is not scalable during updates of golangci-lint - disable-all: true + default: none enable: - asciicheck - bodyclose + - copyloopvar - dogsled - dupl - durationcheck - errcheck - - copyloopvar - - gci + - ginkgolinter - gocognit - goconst - gocritic - gocyclo - - gofmt - - gofumpt - goheader - - goimports - gomoddirectives - gomodguard - goprintffuncname - gosec - - gosimple - govet - - ginkgolinter - importas - ineffassign - lll @@ -53,78 +33,64 @@ linters: - rowserrcheck - sqlclosecheck - staticcheck - - stylecheck - thelper - tparallel - - typecheck - unconvert - unparam - unused - wastedassign - whitespace - - # to be checked: - # - errorlint - # - forbidigo - # - forcetypeassert - # - goerr113 - # - ifshort - # - nilerr - # - nlreturn - # - noctx - # - nolintlint - # - paralleltest - # - promlinter - # - tagliatelle - # - wrapcheck - - # don't enable: - # - cyclop - # - depguard - # - exhaustive - # - exhaustivestruct - # - funlen - # - gochecknoglobals - # - gochecknoinits - # - godot - # - godox - # - gomnd - # - testpackage - # - wsl - - # deprecated: - # - deadcode - # - golint - # - interfacer - # - maligned - # - scopelint - # - structcheck - # - varcheck - -run: - timeout: 5m - -issues: - exclude-rules: - # Allow dot imports for ginkgo and gomega - - source: ginkgo|gomega - linters: - - revive - text: "should not use dot imports" - # Exclude some linters from running on tests files. - - path: _test\.go - linters: - - goconst - # Exclude lll issues for lines with long annotations - - linters: - - lll - source: "//\\s*\\+" - # We have no control of this in zz_generated files and it looks like that excluding those files is not enough - # so we disable "ST1016: methods on the same type should have the same receiver name" in api directory - - linters: - - stylecheck - text: "ST1016:" - path: api/ - exclude-use-default: false - exclude-files: - - zz_generated.* + settings: + gosec: + excludes: + - G101 + staticcheck: + checks: + - all + - '-QF1001' + - '-QF1007' + lll: + line-length: 120 + exclusions: + generated: lax + rules: + - linters: + - revive + text: should not use dot imports + source: ginkgo|gomega + - linters: + - goconst + path: _test\.go + - linters: + - lll + source: //\s*\+ + - linters: + - staticcheck + path: api/ + text: 'ST1016:' + paths: + - zz_generated.* + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gci + - gofmt + - gofumpt + - goimports + settings: + gci: + sections: + - standard + - default + - prefix(github.com/cloudnative-pg/cloudnative-pg) + - blank + - dot + exclusions: + generated: lax + paths: + - zz_generated.* + - third_party$ + - builtin$ + - examples$ diff --git a/api/v1/backup_funcs.go b/api/v1/backup_funcs.go index b3ba528d60..f53f8e34b0 100644 --- a/api/v1/backup_funcs.go +++ b/api/v1/backup_funcs.go @@ -194,7 +194,7 @@ func (list *BackupList) SortByName() { func (list *BackupList) SortByReverseCreationTime() { // Sort the list of backups in reverse creation time sort.Slice(list.Items, func(i, j int) bool { - return list.Items[i].CreationTimestamp.Time.Compare(list.Items[j].CreationTimestamp.Time) > 0 + return list.Items[i].CreationTimestamp.Compare(list.Items[j].CreationTimestamp.Time) > 0 }) } diff --git a/api/v1/base_funcs.go b/api/v1/base_funcs.go index abde3703d6..09c3698ef4 100644 --- a/api/v1/base_funcs.go +++ b/api/v1/base_funcs.go @@ -34,7 +34,7 @@ func SecretKeySelectorToCore(selector *SecretKeySelector) *corev1.SecretKeySelec return &corev1.SecretKeySelector{ LocalObjectReference: corev1.LocalObjectReference{ - Name: selector.LocalObjectReference.Name, + Name: selector.Name, }, Key: selector.Key, } diff --git a/api/v1/cluster_funcs.go b/api/v1/cluster_funcs.go index 17de306af4..3e1b08c584 100644 --- a/api/v1/cluster_funcs.go +++ b/api/v1/cluster_funcs.go @@ -1261,7 +1261,7 @@ func (cluster *Cluster) GetServerCASecretObjectKey() types.NamespacedName { // is configured, false otherwise func (backupConfiguration *BackupConfiguration) IsBarmanBackupConfigured() bool { return backupConfiguration != nil && backupConfiguration.BarmanObjectStore != nil && - backupConfiguration.BarmanObjectStore.BarmanCredentials.ArePopulated() + backupConfiguration.BarmanObjectStore.ArePopulated() } // IsBarmanEndpointCASet returns true if we have a CA bundle for the endpoint diff --git a/internal/cmd/manager/walrestore/cmd.go b/internal/cmd/manager/walrestore/cmd.go index e18145c52a..2e1043f791 100644 --- a/internal/cmd/manager/walrestore/cmd.go +++ b/internal/cmd/manager/walrestore/cmd.go @@ -353,9 +353,9 @@ func GetRecoverConfiguration( return "", nil, nil, ErrNoBackupConfigured } configuration := externalCluster.BarmanObjectStore - if configuration.EndpointCA != nil && configuration.BarmanCredentials.AWS != nil { + if configuration.EndpointCA != nil && configuration.AWS != nil { env = append(env, fmt.Sprintf("AWS_CA_BUNDLE=%s", postgres.BarmanRestoreEndpointCACertificateLocation)) - } else if configuration.EndpointCA != nil && configuration.BarmanCredentials.Azure != nil { + } else if configuration.EndpointCA != nil && configuration.Azure != nil { env = append(env, fmt.Sprintf("REQUESTS_CA_BUNDLE=%s", postgres.BarmanRestoreEndpointCACertificateLocation)) } return externalCluster.Name, env, externalCluster.BarmanObjectStore, nil @@ -365,9 +365,9 @@ func GetRecoverConfiguration( // back up this cluster if cluster.Spec.Backup != nil && cluster.Spec.Backup.BarmanObjectStore != nil { configuration := cluster.Spec.Backup.BarmanObjectStore - if configuration.EndpointCA != nil && configuration.BarmanCredentials.AWS != nil { + if configuration.EndpointCA != nil && configuration.AWS != nil { env = append(env, fmt.Sprintf("AWS_CA_BUNDLE=%s", postgres.BarmanBackupEndpointCACertificateLocation)) - } else if configuration.EndpointCA != nil && configuration.BarmanCredentials.Azure != nil { + } else if configuration.EndpointCA != nil && configuration.Azure != nil { env = append(env, fmt.Sprintf("REQUESTS_CA_BUNDLE=%s", postgres.BarmanBackupEndpointCACertificateLocation)) } return cluster.Name, env, cluster.Spec.Backup.BarmanObjectStore, nil diff --git a/internal/cmd/plugin/status/status.go b/internal/cmd/plugin/status/status.go index 4f8300d6e2..84720d7aa8 100644 --- a/internal/cmd/plugin/status/status.go +++ b/internal/cmd/plugin/status/status.go @@ -358,7 +358,7 @@ func (fullStatus *PostgresqlStatus) printHibernationInfo() { hibernationStatus.AddLine("Status", "Active") } hibernationStatus.AddLine("Message", hibernationCondition.Message) - hibernationStatus.AddLine("Time", hibernationCondition.LastTransitionTime.Time.UTC()) + hibernationStatus.AddLine("Time", hibernationCondition.LastTransitionTime.UTC()) fmt.Println(aurora.Green("Hibernation")) hibernationStatus.Print() diff --git a/internal/cnpi/plugin/mapping.go b/internal/cnpi/plugin/mapping.go index 28617ed66b..a408a3024a 100644 --- a/internal/cnpi/plugin/mapping.go +++ b/internal/cnpi/plugin/mapping.go @@ -38,7 +38,7 @@ const ( ) // ToOperationType_Type converts an OperationVerb into a lifecycle.OperationType_Type -// nolint: revive,stylecheck +// nolint: revive,staticcheck func (o OperationVerb) ToOperationType_Type() (lifecycle.OperatorOperationType_Type, error) { switch o { case OperationVerbPatch: diff --git a/internal/controller/backup_predicates.go b/internal/controller/backup_predicates.go index ee5c541351..147af6fb89 100644 --- a/internal/controller/backup_predicates.go +++ b/internal/controller/backup_predicates.go @@ -73,7 +73,7 @@ func (r *BackupReconciler) mapClustersToBackup() handler.MapFunc { return nil } var backups apiv1.BackupList - err := r.Client.List(ctx, &backups, + err := r.List(ctx, &backups, client.MatchingFields{ backupPhase: apiv1.BackupPhaseRunning, }, diff --git a/internal/controller/cluster_controller_test.go b/internal/controller/cluster_controller_test.go index ec4c0214c1..24f567d814 100644 --- a/internal/controller/cluster_controller_test.go +++ b/internal/controller/cluster_controller_test.go @@ -266,7 +266,7 @@ var _ = Describe("Updating target primary", func() { By("checking that the third instance exists even if the cluster has two instances", func() { var expectedPod corev1.Pod instanceName := specs.GetInstanceName(cluster.Name, 3) - err := env.clusterReconciler.Client.Get(ctx, types.NamespacedName{ + err := env.clusterReconciler.Get(ctx, types.NamespacedName{ Name: instanceName, Namespace: cluster.Namespace, }, &expectedPod) diff --git a/internal/controller/cluster_create.go b/internal/controller/cluster_create.go index 1df2691714..76f459c2fc 100644 --- a/internal/controller/cluster_create.go +++ b/internal/controller/cluster_create.go @@ -26,6 +26,7 @@ import ( "slices" "time" + "github.com/cloudnative-pg/machinery/pkg/log" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" "github.com/sethvargo/go-password/password" batchv1 "k8s.io/api/batch/v1" @@ -49,7 +50,6 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/pkg/versions" - "github.com/cloudnative-pg/machinery/pkg/log" ) // createPostgresClusterObjects ensures that we have the required global objects @@ -343,7 +343,7 @@ func (r *ClusterReconciler) reconcileManagedServices(ctx context.Context, cluste // we delete the old managed services not appearing anymore in the spec var livingServices corev1.ServiceList - if err := r.Client.List(ctx, &livingServices, client.InNamespace(cluster.Namespace), client.MatchingLabels{ + if err := r.List(ctx, &livingServices, client.InNamespace(cluster.Namespace), client.MatchingLabels{ utils.IsManagedLabelName: "true", utils.ClusterLabelName: cluster.Name, }); err != nil { @@ -389,13 +389,13 @@ func (r *ClusterReconciler) serviceReconciler( ) var livingService corev1.Service - err := r.Client.Get(ctx, types.NamespacedName{Name: proposed.Name, Namespace: proposed.Namespace}, &livingService) + err := r.Get(ctx, types.NamespacedName{Name: proposed.Name, Namespace: proposed.Namespace}, &livingService) if apierrs.IsNotFound(err) { if !enabled { return nil } contextLogger.Info("creating service") - return r.Client.Create(ctx, proposed) + return r.Create(ctx, proposed) } if err != nil { return err @@ -412,7 +412,7 @@ func (r *ClusterReconciler) serviceReconciler( if !enabled { contextLogger.Info("deleting service, due to not being managed anymore") - return r.Client.Delete(ctx, &livingService) + return r.Delete(ctx, &livingService) } var shouldUpdate bool @@ -448,11 +448,11 @@ func (r *ClusterReconciler) serviceReconciler( if strategy == apiv1.ServiceUpdateStrategyPatch { contextLogger.Info("reconciling service") // we update to ensure that we substitute the selectors - return r.Client.Update(ctx, &livingService) + return r.Update(ctx, &livingService) } contextLogger.Info("deleting the service") - if err := r.Client.Delete(ctx, &livingService); err != nil { + if err := r.Delete(ctx, &livingService); err != nil { return err } @@ -1251,7 +1251,7 @@ func (r *ClusterReconciler) joinReplicaInstance( "job", job.Name, "primary", false, "storageSource", storageSource, - "role", job.Spec.Template.ObjectMeta.Labels[utils.JobRoleLabelName], + "role", job.Spec.Template.Labels[utils.JobRoleLabelName], ) r.Recorder.Eventf(cluster, "Normal", "CreatingInstance", diff --git a/internal/controller/cluster_create_test.go b/internal/controller/cluster_create_test.go index 0e0f0a0318..9db569a615 100644 --- a/internal/controller/cluster_create_test.go +++ b/internal/controller/cluster_create_test.go @@ -185,13 +185,13 @@ var _ = Describe("cluster_create unit tests", func() { svc.Spec.Selector = map[string]string{ "outdated": "selector", } - err := env.clusterReconciler.Client.Create(ctx, svc) + err := env.clusterReconciler.Create(ctx, svc) Expect(err).ToNot(HaveOccurred()) } checkService := func(before *corev1.Service, expectedLabels map[string]string) { var afterChangesService corev1.Service - err := env.clusterReconciler.Client.Get(ctx, types.NamespacedName{ + err := env.clusterReconciler.Get(ctx, types.NamespacedName{ Name: before.Name, Namespace: before.Namespace, }, &afterChangesService) @@ -309,7 +309,7 @@ var _ = Describe("cluster_create unit tests", func() { By("executing createOrPatchServiceAccount (patch)", func() { By("setting owner reference to nil", func() { - sa.ObjectMeta.OwnerReferences = nil + sa.OwnerReferences = nil err := env.client.Update(context.Background(), sa) Expect(err).ToNot(HaveOccurred()) }) @@ -899,9 +899,9 @@ var _ = Describe("createOrPatchClusterCredentialSecret", func() { Expect(err).NotTo(HaveOccurred()) // Assuming secretName is the name of the existing secret - proposed.ObjectMeta.Name = secretName - proposed.ObjectMeta.Labels = map[string]string{"old": "label"} - proposed.ObjectMeta.Annotations = map[string]string{"old": "annotation"} + proposed.Name = secretName + proposed.Labels = map[string]string{"old": "label"} + proposed.Annotations = map[string]string{"old": "annotation"} err = createOrPatchClusterCredentialSecret(ctx, cli, proposed) Expect(err).NotTo(HaveOccurred()) @@ -1007,7 +1007,7 @@ var _ = Describe("createOrPatchOwnedPodDisruptionBudget", func() { }) It("should update the existing PodDisruptionBudget if the metadata is different", func() { - pdb.ObjectMeta.Labels["newlabel"] = "newvalue" + pdb.Labels["newlabel"] = "newvalue" err = reconciler.createOrPatchOwnedPodDisruptionBudget(ctx, cluster, pdb) Expect(err).ShouldNot(HaveOccurred()) @@ -1295,19 +1295,19 @@ var _ = Describe("Service Reconciling", func() { It("should create the default services", func() { err := reconciler.reconcilePostgresServices(ctx, &cluster) Expect(err).NotTo(HaveOccurred()) - err = reconciler.Client.Get( + err = reconciler.Get( ctx, types.NamespacedName{Name: cluster.GetServiceReadWriteName(), Namespace: cluster.Namespace}, &corev1.Service{}, ) Expect(err).ToNot(HaveOccurred()) - err = reconciler.Client.Get( + err = reconciler.Get( ctx, types.NamespacedName{Name: cluster.GetServiceReadName(), Namespace: cluster.Namespace}, &corev1.Service{}, ) Expect(err).ToNot(HaveOccurred()) - err = reconciler.Client.Get( + err = reconciler.Get( ctx, types.NamespacedName{Name: cluster.GetServiceReadOnlyName(), Namespace: cluster.Namespace}, &corev1.Service{}, @@ -1323,19 +1323,19 @@ var _ = Describe("Service Reconciling", func() { } err := reconciler.reconcilePostgresServices(ctx, &cluster) Expect(err).NotTo(HaveOccurred()) - err = reconciler.Client.Get( + err = reconciler.Get( ctx, types.NamespacedName{Name: cluster.GetServiceReadWriteName(), Namespace: cluster.Namespace}, &corev1.Service{}, ) Expect(apierrs.IsNotFound(err)).To(BeTrue()) - err = reconciler.Client.Get( + err = reconciler.Get( ctx, types.NamespacedName{Name: cluster.GetServiceReadName(), Namespace: cluster.Namespace}, &corev1.Service{}, ) Expect(apierrs.IsNotFound(err)).To(BeTrue()) - err = reconciler.Client.Get( + err = reconciler.Get( ctx, types.NamespacedName{Name: cluster.GetServiceReadOnlyName(), Namespace: cluster.Namespace}, &corev1.Service{}, diff --git a/internal/controller/cluster_delete_test.go b/internal/controller/cluster_delete_test.go index 34bbf1c95b..cb42c79dd0 100644 --- a/internal/controller/cluster_delete_test.go +++ b/internal/controller/cluster_delete_test.go @@ -116,7 +116,7 @@ var _ = Describe("ensures that deleteDanglingMonitoringQueries works correctly", cluster.Spec.Monitoring = &apiv1.MonitoringConfiguration{ DisableDefaultQueries: ptr.To(false), } - err := crReconciler.Client.Update(context.Background(), cluster) + err := crReconciler.Update(context.Background(), cluster) Expect(err).ToNot(HaveOccurred()) }) diff --git a/internal/controller/cluster_image.go b/internal/controller/cluster_image.go index f9d6ba8dd0..2c9fcf5eb5 100644 --- a/internal/controller/cluster_image.go +++ b/internal/controller/cluster_image.go @@ -154,7 +154,7 @@ func (r *ClusterReconciler) getRequestedImageInfo( // Get the referenced catalog catalogName := cluster.Spec.ImageCatalogRef.Name - err := r.Client.Get(ctx, types.NamespacedName{Namespace: cluster.Namespace, Name: catalogName}, catalog) + err := r.Get(ctx, types.NamespacedName{Namespace: cluster.Namespace, Name: catalogName}, catalog) if err != nil { if apierrs.IsNotFound(err) { r.Recorder.Eventf(cluster, "Warning", "DiscoverImage", "Cannot get %v/%v", diff --git a/internal/controller/cluster_restore.go b/internal/controller/cluster_restore.go index 590ad4db7f..7205e50815 100644 --- a/internal/controller/cluster_restore.go +++ b/internal/controller/cluster_restore.go @@ -311,7 +311,7 @@ func getNodeSerialsFromPVCs( highestSerial = serial } - instanceRole, _ := utils.GetInstanceRole(pvc.ObjectMeta.Labels) + instanceRole, _ := utils.GetInstanceRole(pvc.Labels) if instanceRole == specs.ClusterRoleLabelPrimary { primarySerial = serial } diff --git a/internal/controller/cluster_upgrade.go b/internal/controller/cluster_upgrade.go index 98b15a45fe..53f68a0fa5 100644 --- a/internal/controller/cluster_upgrade.go +++ b/internal/controller/cluster_upgrade.go @@ -258,7 +258,7 @@ func (r *ClusterReconciler) updateRestartAnnotation( primaryPod.Annotations = make(map[string]string) } primaryPod.Annotations[utils.ClusterRestartAnnotationName] = clusterRestart - if err := r.Client.Patch(ctx, &primaryPod, client.MergeFrom(original)); err != nil { + if err := r.Patch(ctx, &primaryPod, client.MergeFrom(original)); err != nil { return err } } @@ -398,7 +398,7 @@ func isPodNeedingRollout( // check if the pod has a valid podSpec func hasValidPodSpec(pod *corev1.Pod) bool { - podSpecAnnotation, hasStoredPodSpec := pod.ObjectMeta.Annotations[utils.PodSpecAnnotationName] + podSpecAnnotation, hasStoredPodSpec := pod.Annotations[utils.PodSpecAnnotationName] if !hasStoredPodSpec { return false } @@ -604,7 +604,7 @@ func checkPodSpecIsOutdated( pod *corev1.Pod, cluster *apiv1.Cluster, ) (rollout, error) { - podSpecAnnotation, ok := pod.ObjectMeta.Annotations[utils.PodSpecAnnotationName] + podSpecAnnotation, ok := pod.Annotations[utils.PodSpecAnnotationName] if !ok { return rollout{}, nil } diff --git a/internal/controller/cluster_upgrade_test.go b/internal/controller/cluster_upgrade_test.go index 87808f8164..b9b97e792d 100644 --- a/internal/controller/cluster_upgrade_test.go +++ b/internal/controller/cluster_upgrade_test.go @@ -762,14 +762,14 @@ var _ = Describe("hasValidPodSpec", func() { It("should return true", func() { podSpec := &corev1.PodSpec{} podSpecBytes, _ := json.Marshal(podSpec) - pod.ObjectMeta.Annotations[utils.PodSpecAnnotationName] = string(podSpecBytes) + pod.Annotations[utils.PodSpecAnnotationName] = string(podSpecBytes) Expect(hasValidPodSpec(pod)).To(BeTrue()) }) }) Context("and the PodSpecAnnotation is invalid", func() { It("should return false", func() { - pod.ObjectMeta.Annotations[utils.PodSpecAnnotationName] = "invalid JSON" + pod.Annotations[utils.PodSpecAnnotationName] = "invalid JSON" Expect(hasValidPodSpec(pod)).To(BeFalse()) }) }) @@ -794,7 +794,7 @@ var _ = Describe("Cluster upgrade with podSpec reconciliation disabled", func() }) It("skips the rollout if the annotation that disables PodSpec reconciliation is set", func(ctx SpecContext) { - cluster.ObjectMeta.Annotations[utils.ReconcilePodSpecAnnotationName] = "disabled" + cluster.Annotations[utils.ReconcilePodSpecAnnotationName] = "disabled" pod, err := specs.NewInstance(ctx, cluster, 1, true) Expect(err).ToNot(HaveOccurred()) diff --git a/internal/controller/plugin_controller.go b/internal/controller/plugin_controller.go index 8f0885aca9..5b2175b804 100644 --- a/internal/controller/plugin_controller.go +++ b/internal/controller/plugin_controller.go @@ -233,7 +233,7 @@ func (r *PluginReconciler) mapSecretToPlugin(ctx context.Context, obj client.Obj logger := log.FromContext(ctx) var services corev1.ServiceList - if err := r.Client.List( + if err := r.List( ctx, &services, client.HasLabels{utils.PluginNameLabelName}, diff --git a/internal/controller/pooler_update_test.go b/internal/controller/pooler_update_test.go index 36cd44771d..820e7a1753 100644 --- a/internal/controller/pooler_update_test.go +++ b/internal/controller/pooler_update_test.go @@ -300,7 +300,7 @@ var _ = Describe("unit test of pooler_update reconciliation logic", func() { pooler := newFakePooler(env.client, cluster) res := &poolerManagedResources{Deployment: nil, Cluster: cluster} By("setting the reconcilePodSpec annotation to disabled on the pooler ", func() { - pooler.ObjectMeta.Annotations[utils.ReconcilePodSpecAnnotationName] = "disabled" + pooler.Annotations[utils.ReconcilePodSpecAnnotationName] = "disabled" pooler.Spec.Template = &apiv1.PodTemplateSpec{ Spec: corev1.PodSpec{ TerminationGracePeriodSeconds: ptr.To(int64(100)), @@ -347,7 +347,7 @@ var _ = Describe("unit test of pooler_update reconciliation logic", func() { }) By("enable again, making sure pooler change updates the deployment", func() { - delete(pooler.ObjectMeta.Annotations, utils.ReconcilePodSpecAnnotationName) + delete(pooler.Annotations, utils.ReconcilePodSpecAnnotationName) beforeDep := getPoolerDeployment(ctx, env.client, pooler) pooler.Spec.Template.Spec.TerminationGracePeriodSeconds = ptr.To(int64(300)) err := env.poolerReconciler.updateDeployment(ctx, pooler, res) diff --git a/internal/management/controller/database_controller.go b/internal/management/controller/database_controller.go index 8fcb2181fc..30580b9013 100644 --- a/internal/management/controller/database_controller.go +++ b/internal/management/controller/database_controller.go @@ -82,7 +82,7 @@ func (r *DatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c // Get the database object var database apiv1.Database - if err := r.Client.Get(ctx, client.ObjectKey{ + if err := r.Get(ctx, client.ObjectKey{ Namespace: req.Namespace, Name: req.Name, }, &database); err != nil { diff --git a/internal/management/controller/publication_controller.go b/internal/management/controller/publication_controller.go index f1fe600164..1b32ef99b4 100644 --- a/internal/management/controller/publication_controller.go +++ b/internal/management/controller/publication_controller.go @@ -69,7 +69,7 @@ func (r *PublicationReconciler) Reconcile(ctx context.Context, req ctrl.Request) // Get the publication object var publication apiv1.Publication - if err := r.Client.Get(ctx, client.ObjectKey{ + if err := r.Get(ctx, client.ObjectKey{ Namespace: req.Namespace, Name: req.Name, }, &publication); err != nil { diff --git a/internal/management/controller/roles/postgres.go b/internal/management/controller/roles/postgres.go index d8d8c32ff5..82b3b986b6 100644 --- a/internal/management/controller/roles/postgres.go +++ b/internal/management/controller/roles/postgres.go @@ -310,7 +310,7 @@ func appendInRoleOptions(role DatabaseRole, query *strings.Builder) { quotedInRoles[i] = pgx.Identifier{inRole}.Sanitize() } - query.WriteString(fmt.Sprintf(" IN ROLE %s ", strings.Join(quotedInRoles, ","))) + fmt.Fprintf(query, " IN ROLE %s ", strings.Join(quotedInRoles, ",")) } } @@ -357,7 +357,7 @@ func appendRoleOptions(role DatabaseRole, query *strings.Builder) { query.WriteString(" NOSUPERUSER") } - query.WriteString(fmt.Sprintf(" CONNECTION LIMIT %d", role.ConnectionLimit)) + fmt.Fprintf(query, " CONNECTION LIMIT %d", role.ConnectionLimit) } func appendPasswordOption(role DatabaseRole, query *strings.Builder) { @@ -369,7 +369,7 @@ func appendPasswordOption(role DatabaseRole, query *strings.Builder) { case !role.password.Valid: query.WriteString(" PASSWORD NULL") default: - query.WriteString(fmt.Sprintf(" PASSWORD %s", pq.QuoteLiteral(role.password.String))) + fmt.Fprintf(query, " PASSWORD %s", pq.QuoteLiteral(role.password.String)) } if role.ValidUntil.Valid { @@ -379,6 +379,6 @@ func appendPasswordOption(role DatabaseRole, query *strings.Builder) { } else { value = role.ValidUntil.InfinityModifier.String() } - query.WriteString(fmt.Sprintf(" VALID UNTIL %s", pq.QuoteLiteral(value))) + fmt.Fprintf(query, " VALID UNTIL %s", pq.QuoteLiteral(value)) } } diff --git a/internal/management/controller/subscription_controller.go b/internal/management/controller/subscription_controller.go index 6056488455..16450f0772 100644 --- a/internal/management/controller/subscription_controller.go +++ b/internal/management/controller/subscription_controller.go @@ -62,7 +62,7 @@ func (r *SubscriptionReconciler) Reconcile(ctx context.Context, req ctrl.Request // Get the subscription object var subscription apiv1.Subscription - if err := r.Client.Get(ctx, client.ObjectKey{ + if err := r.Get(ctx, client.ObjectKey{ Namespace: req.Namespace, Name: req.Name, }, &subscription); err != nil { diff --git a/internal/plugin/resources/instance.go b/internal/plugin/resources/instance.go index c1a2400eed..a483dd53ba 100644 --- a/internal/plugin/resources/instance.go +++ b/internal/plugin/resources/instance.go @@ -62,7 +62,7 @@ func GetInstancePods(ctx context.Context, clusterName string) ([]corev1.Pod, cor var managedPods []corev1.Pod var primaryPod corev1.Pod for idx := range pods.Items { - for _, owner := range pods.Items[idx].ObjectMeta.OwnerReferences { + for _, owner := range pods.Items[idx].OwnerReferences { if owner.Kind == apiv1.ClusterKind && owner.Name == clusterName { managedPods = append(managedPods, pods.Items[idx]) if specs.IsPodPrimary(pods.Items[idx]) { diff --git a/internal/webhook/v1/cluster_webhook.go b/internal/webhook/v1/cluster_webhook.go index 0b235cf0ba..0aecfbc183 100644 --- a/internal/webhook/v1/cluster_webhook.go +++ b/internal/webhook/v1/cluster_webhook.go @@ -1858,7 +1858,7 @@ func (v *ClusterCustomValidator) validateReplicaMode(r *apiv1.Cluster) field.Err } else if r.Spec.Bootstrap.PgBaseBackup == nil && r.Spec.Bootstrap.Recovery == nil && // this is needed because we only want to validate this during cluster creation, currently if we would have // to enable this logic only during creation and not cluster changes it would require a meaningful refactor - len(r.ObjectMeta.ResourceVersion) == 0 { + len(r.ResourceVersion) == 0 { result = append(result, field.Invalid( field.NewPath("spec", "replicaCluster"), replicaClusterConf, diff --git a/pkg/certs/certs.go b/pkg/certs/certs.go index 73d3a3f13a..7c177bfae3 100644 --- a/pkg/certs/certs.go +++ b/pkg/certs/certs.go @@ -194,11 +194,11 @@ func (pair KeyPair) createAndSignPairWithValidity( } leafTemplate.KeyUsage = x509.KeyUsageDigitalSignature | x509.KeyUsageKeyAgreement - switch { - case usage == CertTypeClient: + switch usage { + case CertTypeClient: leafTemplate.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth} - case usage == CertTypeServer: + case CertTypeServer: leafTemplate.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth} leafTemplate.KeyUsage |= x509.KeyUsageKeyEncipherment diff --git a/pkg/configfile/configfile_test.go b/pkg/configfile/configfile_test.go index e3ad955fbf..6a7706b7c7 100644 --- a/pkg/configfile/configfile_test.go +++ b/pkg/configfile/configfile_test.go @@ -23,10 +23,10 @@ import ( "os" "path/filepath" + "github.com/cloudnative-pg/machinery/pkg/fileutils" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - - "github.com/cloudnative-pg/machinery/pkg/fileutils" ) var _ = Describe("update Postgres configuration files", func() { diff --git a/pkg/management/postgres/logpipe/CSVReadWriter.go b/pkg/management/postgres/logpipe/CSVReadWriter.go index 22fdae76be..298d7e7958 100644 --- a/pkg/management/postgres/logpipe/CSVReadWriter.go +++ b/pkg/management/postgres/logpipe/CSVReadWriter.go @@ -58,7 +58,7 @@ func (r *CSVRecordReadWriter) Read() ([]string, error) { for _, allowedFields := range r.allowedFieldsPerRecord { if len(record) == allowedFields { - r.Reader.FieldsPerRecord = allowedFields + r.FieldsPerRecord = allowedFields return record, nil } } diff --git a/pkg/management/postgres/logpipe/pgaudit.go b/pkg/management/postgres/logpipe/pgaudit.go index 14a392af3f..c46d14bb87 100644 --- a/pkg/management/postgres/logpipe/pgaudit.go +++ b/pkg/management/postgres/logpipe/pgaudit.go @@ -67,7 +67,7 @@ func (r *PgAuditLoggingDecorator) FromCSV(content []string) NamedRecord { return r.LoggingRecord } - _, err := r.CSVReadWriter.Write([]byte(record)) + _, err := r.Write([]byte(record)) if err != nil { return r.LoggingRecord } @@ -76,7 +76,7 @@ func (r *PgAuditLoggingDecorator) FromCSV(content []string) NamedRecord { return r.LoggingRecord } - r.LoggingRecord.Message = "" + r.Message = "" r.Audit.fromCSV(auditContent) return r } diff --git a/pkg/management/postgres/webserver/client/remote/instance.go b/pkg/management/postgres/webserver/client/remote/instance.go index 4ed248df92..da29fa4242 100644 --- a/pkg/management/postgres/webserver/client/remote/instance.go +++ b/pkg/management/postgres/webserver/client/remote/instance.go @@ -190,8 +190,8 @@ func (r *instanceClientImpl) GetPgControlDataFromInstance( if err != nil { return "", err } - r.Client.Timeout = defaultRequestTimeout - resp, err := r.Client.Do(req) + r.Timeout = defaultRequestTimeout + resp, err := r.Do(req) if err != nil { return "", err } @@ -250,8 +250,8 @@ func (r *instanceClientImpl) UpgradeInstanceManager( } req.Body = binaryFileStream - r.Client.Timeout = noRequestTimeout - resp, err := r.Client.Do(req) + r.Timeout = noRequestTimeout + resp, err := r.Do(req) // This is the desired response. The instance manager will // synchronously update and this call won't return. if isEOF(err) { @@ -299,8 +299,8 @@ func (r *instanceClientImpl) rawInstanceStatusRequest( return result } - r.Client.Timeout = defaultRequestTimeout - resp, err := r.Client.Do(req) + r.Timeout = defaultRequestTimeout + resp, err := r.Do(req) if err != nil { result.Error = err return result @@ -379,7 +379,7 @@ func (r *instanceClientImpl) ArchivePartialWAL(ctx context.Context, pod *corev1. if err != nil { return "", err } - resp, err := r.Client.Do(req) + resp, err := r.Do(req) if err != nil { return "", err } diff --git a/pkg/reconciler/hibernation/status_test.go b/pkg/reconciler/hibernation/status_test.go index 481753052c..9e52b3f864 100644 --- a/pkg/reconciler/hibernation/status_test.go +++ b/pkg/reconciler/hibernation/status_test.go @@ -47,7 +47,7 @@ var _ = Describe("Hibernation annotation management", func() { } Expect(isHibernationEnabled(&cluster)).To(BeTrue()) - cluster.ObjectMeta.Annotations[utils.HibernationAnnotationName] = HibernationOff + cluster.Annotations[utils.HibernationAnnotationName] = HibernationOff Expect(isHibernationEnabled(&cluster)).To(BeFalse()) }) }) diff --git a/pkg/reconciler/instance/metadata.go b/pkg/reconciler/instance/metadata.go index 213237e8d9..08a97b7346 100644 --- a/pkg/reconciler/instance/metadata.go +++ b/pkg/reconciler/instance/metadata.go @@ -187,11 +187,11 @@ func updateRoleLabels( // it is important to note that even if utils.ClusterRoleLabelName is deprecated, // we still ensure that the values are aligned between the two fields - podRole, hasRole := instance.ObjectMeta.Labels[utils.ClusterRoleLabelName] - newPodRole, newHasRole := instance.ObjectMeta.Labels[utils.ClusterInstanceRoleLabelName] + podRole, hasRole := instance.Labels[utils.ClusterRoleLabelName] + newPodRole, newHasRole := instance.Labels[utils.ClusterInstanceRoleLabelName] - switch { - case instance.Name == cluster.Status.CurrentPrimary: + switch instance.Name { + case cluster.Status.CurrentPrimary: if !hasRole || podRole != specs.ClusterRoleLabelPrimary || !newHasRole || newPodRole != specs.ClusterRoleLabelPrimary { contextLogger.Info("Setting primary label", "pod", instance.Name) diff --git a/pkg/reconciler/majorupgrade/reconciler.go b/pkg/reconciler/majorupgrade/reconciler.go index b75e8c6f0f..40f9a4c2a6 100644 --- a/pkg/reconciler/majorupgrade/reconciler.go +++ b/pkg/reconciler/majorupgrade/reconciler.go @@ -282,7 +282,7 @@ func getPrimarySerial( pvcs []corev1.PersistentVolumeClaim, ) (int, error) { for _, pvc := range pvcs { - instanceRole, _ := utils.GetInstanceRole(pvc.ObjectMeta.Labels) + instanceRole, _ := utils.GetInstanceRole(pvc.Labels) if instanceRole != specs.ClusterRoleLabelPrimary { continue } diff --git a/pkg/reconciler/persistentvolumeclaim/metadata.go b/pkg/reconciler/persistentvolumeclaim/metadata.go index 85ae5b65cc..01108a4e35 100644 --- a/pkg/reconciler/persistentvolumeclaim/metadata.go +++ b/pkg/reconciler/persistentvolumeclaim/metadata.go @@ -90,10 +90,10 @@ func reconcileInstanceRoleLabel( instanceReconciler := metadataReconciler{ name: "instance-role", isUpToDate: func(pvc *corev1.PersistentVolumeClaim) bool { - if pvc.ObjectMeta.Labels[utils.ClusterRoleLabelName] != instanceRole { + if pvc.Labels[utils.ClusterRoleLabelName] != instanceRole { return false } - if pvc.ObjectMeta.Labels[utils.ClusterInstanceRoleLabelName] != instanceRole { + if pvc.Labels[utils.ClusterInstanceRoleLabelName] != instanceRole { return false } diff --git a/pkg/reconciler/persistentvolumeclaim/status.go b/pkg/reconciler/persistentvolumeclaim/status.go index 3ca29d16dc..1cd8688a30 100644 --- a/pkg/reconciler/persistentvolumeclaim/status.go +++ b/pkg/reconciler/persistentvolumeclaim/status.go @@ -110,7 +110,7 @@ func EnrichStatus( } // There's no point in reattaching ignored PVCs - if pvc.ObjectMeta.DeletionTimestamp != nil { + if pvc.DeletionTimestamp != nil { continue } @@ -162,7 +162,7 @@ func classifyPVC( instanceName string, ) status { // PVC to ignore - if pvc.ObjectMeta.DeletionTimestamp != nil || hasUnknownStatus(ctx, pvc) { + if pvc.DeletionTimestamp != nil || hasUnknownStatus(ctx, pvc) { return ignored } diff --git a/pkg/reconciler/persistentvolumeclaim/storagesource.go b/pkg/reconciler/persistentvolumeclaim/storagesource.go index 350e59f8c2..8f808f9142 100644 --- a/pkg/reconciler/persistentvolumeclaim/storagesource.go +++ b/pkg/reconciler/persistentvolumeclaim/storagesource.go @@ -92,7 +92,7 @@ func GetCandidateStorageSourceForReplica( if result := getCandidateSourceFromBackupList( ctx, - cluster.ObjectMeta.CreationTimestamp, + cluster.CreationTimestamp, backupList, ); result != nil { return result @@ -129,7 +129,7 @@ func getCandidateSourceFromBackupList( continue } - if backup.ObjectMeta.CreationTimestamp.Before(&clusterCreationTime) { + if backup.CreationTimestamp.Before(&clusterCreationTime) { contextLogger.Info( "skipping backup as a potential recovery storage source candidate " + "because if was created before the Cluster object") diff --git a/pkg/specs/pods.go b/pkg/specs/pods.go index 08defc95f8..127b6f2040 100644 --- a/pkg/specs/pods.go +++ b/pkg/specs/pods.go @@ -280,9 +280,9 @@ func createPostgresContainers(cluster apiv1.Cluster, envConfig EnvConfig, enable } if enableHTTPS { - containers[0].StartupProbe.ProbeHandler.HTTPGet.Scheme = corev1.URISchemeHTTPS - containers[0].LivenessProbe.ProbeHandler.HTTPGet.Scheme = corev1.URISchemeHTTPS - containers[0].ReadinessProbe.ProbeHandler.HTTPGet.Scheme = corev1.URISchemeHTTPS + containers[0].StartupProbe.HTTPGet.Scheme = corev1.URISchemeHTTPS + containers[0].LivenessProbe.HTTPGet.Scheme = corev1.URISchemeHTTPS + containers[0].ReadinessProbe.HTTPGet.Scheme = corev1.URISchemeHTTPS containers[0].Command = append(containers[0].Command, "--status-port-tls") } diff --git a/pkg/specs/podspec_diff.go b/pkg/specs/podspec_diff.go index 38523a142a..4e4e04403c 100644 --- a/pkg/specs/podspec_diff.go +++ b/pkg/specs/podspec_diff.go @@ -199,7 +199,7 @@ func doContainersMatch(currentContainer, targetContainer corev1.Container) (bool }, "resources": func() bool { // semantic equality will compare the two objects semantically, not only numbers - return equality.Semantic.Equalities.DeepEqual( + return equality.Semantic.DeepEqual( currentContainer.Resources, targetContainer.Resources, ) diff --git a/pkg/specs/roles.go b/pkg/specs/roles.go index 8b18c95e10..096b2d1079 100644 --- a/pkg/specs/roles.go +++ b/pkg/specs/roles.go @@ -296,13 +296,13 @@ func externalClusterSecrets(cluster apiv1.Cluster) []string { if barmanObjStore := server.BarmanObjectStore; barmanObjStore != nil { result = append( result, - s3CredentialsSecrets(barmanObjStore.BarmanCredentials.AWS)...) + s3CredentialsSecrets(barmanObjStore.AWS)...) result = append( result, - azureCredentialsSecrets(barmanObjStore.BarmanCredentials.Azure)...) + azureCredentialsSecrets(barmanObjStore.Azure)...) result = append( result, - googleCredentialsSecrets(barmanObjStore.BarmanCredentials.Google)...) + googleCredentialsSecrets(barmanObjStore.Google)...) if barmanObjStore.EndpointCA != nil { result = append(result, barmanObjStore.EndpointCA.Name) } @@ -319,13 +319,13 @@ func backupSecrets(cluster apiv1.Cluster, backupOrigin *apiv1.Backup) []string { if cluster.Spec.Backup != nil && cluster.Spec.Backup.BarmanObjectStore != nil { result = append( result, - s3CredentialsSecrets(cluster.Spec.Backup.BarmanObjectStore.BarmanCredentials.AWS)...) + s3CredentialsSecrets(cluster.Spec.Backup.BarmanObjectStore.AWS)...) result = append( result, - azureCredentialsSecrets(cluster.Spec.Backup.BarmanObjectStore.BarmanCredentials.Azure)...) + azureCredentialsSecrets(cluster.Spec.Backup.BarmanObjectStore.Azure)...) result = append( result, - googleCredentialsSecrets(cluster.Spec.Backup.BarmanObjectStore.BarmanCredentials.Google)...) + googleCredentialsSecrets(cluster.Spec.Backup.BarmanObjectStore.Google)...) } // Secrets needed by Barman, if set @@ -338,13 +338,13 @@ func backupSecrets(cluster apiv1.Cluster, backupOrigin *apiv1.Backup) []string { if backupOrigin != nil { result = append( result, - s3CredentialsSecrets(backupOrigin.Status.BarmanCredentials.AWS)...) + s3CredentialsSecrets(backupOrigin.Status.AWS)...) result = append( result, - azureCredentialsSecrets(backupOrigin.Status.BarmanCredentials.Azure)...) + azureCredentialsSecrets(backupOrigin.Status.Azure)...) result = append( result, - googleCredentialsSecrets(backupOrigin.Status.BarmanCredentials.Google)...) + googleCredentialsSecrets(backupOrigin.Status.Google)...) } return result diff --git a/pkg/utils/labels_annotations_test.go b/pkg/utils/labels_annotations_test.go index a6975fb096..d93796af5b 100644 --- a/pkg/utils/labels_annotations_test.go +++ b/pkg/utils/labels_annotations_test.go @@ -155,7 +155,7 @@ var _ = Describe("Annotate pods management", func() { } AnnotateAppArmor(&pod.ObjectMeta, &pod.Spec, annotations) - _, isPresent := pod.ObjectMeta.Annotations[appArmorPostgres] + _, isPresent := pod.Annotations[appArmorPostgres] Expect(isPresent).To(BeFalse()) }) }) diff --git a/tests/e2e/apparmor_test.go b/tests/e2e/apparmor_test.go index dd2ab68201..8d75e5b932 100644 --- a/tests/e2e/apparmor_test.go +++ b/tests/e2e/apparmor_test.go @@ -61,7 +61,7 @@ var _ = Describe("AppArmor support", Serial, Label(tests.LabelNoOpenshift, tests // Gathers the pod list using annotations podList, _ := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) for _, pod := range podList.Items { - annotation := pod.ObjectMeta.Annotations[pkgutils.AppArmorAnnotationPrefix+"/"+specs.PostgresContainerName] + annotation := pod.Annotations[pkgutils.AppArmorAnnotationPrefix+"/"+specs.PostgresContainerName] Expect(annotation).ShouldNot(BeEmpty(), fmt.Sprintf("annotation for apparmor is not on pod %v", specs.PostgresContainerName)) Expect(annotation).Should(BeEquivalentTo("runtime/default"), diff --git a/tests/e2e/asserts_test.go b/tests/e2e/asserts_test.go index 68bd5d55b4..e28d510f66 100644 --- a/tests/e2e/asserts_test.go +++ b/tests/e2e/asserts_test.go @@ -2160,7 +2160,7 @@ func assertPodIsRecreated(namespace, poolerSampleFile string) { } if len(podList.Items) == 1 { if utils.IsPodActive(podList.Items[0]) && utils.IsPodReady(podList.Items[0]) { - if !(podNameBeforeDelete == podList.Items[0].GetName()) { + if podNameBeforeDelete != podList.Items[0].GetName() { return true, err } } diff --git a/tests/e2e/nodeselector_test.go b/tests/e2e/nodeselector_test.go index a422db46cc..a771194863 100644 --- a/tests/e2e/nodeselector_test.go +++ b/tests/e2e/nodeselector_test.go @@ -126,7 +126,7 @@ var _ = Describe("nodeSelector", Label(tests.LabelPodScheduling), func() { for _, nodeDetails := range nodeList.Items { if (nodeDetails.Spec.Unschedulable != true) && (len(nodeDetails.Spec.Taints) == 0) { - nodeName = nodeDetails.ObjectMeta.Name + nodeName = nodeDetails.Name break } } diff --git a/tests/e2e/operator_unavailable_test.go b/tests/e2e/operator_unavailable_test.go index 2ee70fa746..e170c92071 100644 --- a/tests/e2e/operator_unavailable_test.go +++ b/tests/e2e/operator_unavailable_test.go @@ -169,7 +169,7 @@ var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, te podList := &corev1.PodList{} err := env.Client.List(env.Ctx, podList, ctrlclient.InNamespace(operatorNamespace)) Expect(err).ToNot(HaveOccurred()) - operatorPodName = podList.Items[0].ObjectMeta.Name + operatorPodName = podList.Items[0].Name // Force-delete the operator and the primary quickDelete := &ctrlclient.DeleteOptions{ diff --git a/tests/e2e/tablespaces_test.go b/tests/e2e/tablespaces_test.go index 0dfdf25237..4c3b4c04eb 100644 --- a/tests/e2e/tablespaces_test.go +++ b/tests/e2e/tablespaces_test.go @@ -94,8 +94,8 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, // Verify that the tablespace exists on the primary pod of a cluster hasTablespaceAndOwner := func(cluster *apiv1.Cluster, tablespace, owner string) (bool, error) { - namespace := cluster.ObjectMeta.Namespace - clusterName := cluster.ObjectMeta.Name + namespace := cluster.Namespace + clusterName := cluster.Name primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) if err != nil { return false, err @@ -915,8 +915,8 @@ func AssertClusterHasMountPointsAndVolumesForTablespaces( numTablespaces int, timeout int, ) { - namespace := cluster.ObjectMeta.Namespace - clusterName := cluster.ObjectMeta.Name + namespace := cluster.Namespace + clusterName := cluster.Name podMountPaths := func(pod corev1.Pod) (bool, []string) { var hasPostgresContainer bool var mountPaths []string @@ -987,8 +987,8 @@ func getDatabasUserUID(cluster *apiv1.Cluster, dbContainer *corev1.Container) in } func AssertClusterHasPvcsAndDataDirsForTablespaces(cluster *apiv1.Cluster, timeout int) { - namespace := cluster.ObjectMeta.Namespace - clusterName := cluster.ObjectMeta.Name + namespace := cluster.Namespace + clusterName := cluster.Name By("checking all the required PVCs were created", func() { Eventually(func(g Gomega) { pvcList, err := storage.GetPVCList(env.Ctx, env.Client, namespace) @@ -1051,8 +1051,8 @@ func AssertClusterHasPvcsAndDataDirsForTablespaces(cluster *apiv1.Cluster, timeo } func AssertDatabaseContainsTablespaces(cluster *apiv1.Cluster, timeout int) { - namespace := cluster.ObjectMeta.Namespace - clusterName := cluster.ObjectMeta.Name + namespace := cluster.Namespace + clusterName := cluster.Name By("checking the expected tablespaces are in the database", func() { Eventually(func(g Gomega) { instances, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) @@ -1081,8 +1081,8 @@ func AssertDatabaseContainsTablespaces(cluster *apiv1.Cluster, timeout int) { } func AssertTempTablespaceContent(cluster *apiv1.Cluster, timeout int, content string) { - namespace := cluster.ObjectMeta.Namespace - clusterName := cluster.ObjectMeta.Name + namespace := cluster.Namespace + clusterName := cluster.Name By("checking the expected setting in a new PG session", func() { Eventually(func(g Gomega) { primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) @@ -1107,8 +1107,8 @@ func AssertTempTablespaceContent(cluster *apiv1.Cluster, timeout int, content st } func AssertTempTablespaceBehavior(cluster *apiv1.Cluster, expectedTempTablespaceName string) { - namespace := cluster.ObjectMeta.Namespace - clusterName := cluster.ObjectMeta.Name + namespace := cluster.Namespace + clusterName := cluster.Name primary, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) if err != nil { diff --git a/tests/utils/sternmultitailer/multitailer.go b/tests/utils/sternmultitailer/multitailer.go index b6e06baa0f..ad2adf5998 100644 --- a/tests/utils/sternmultitailer/multitailer.go +++ b/tests/utils/sternmultitailer/multitailer.go @@ -162,7 +162,7 @@ func outputWriter(baseDir string, logReader io.Reader) { continue } - _, err = file.WriteString(fmt.Sprintf("%v\n", logLine.Message)) + _, err = fmt.Fprintf(file, "%v\n", logLine.Message) if err != nil { fmt.Printf("could not write message to file %v: %v\n", file.Name(), err) continue From bd6ee38ab152e0fed1c8f87795984fd455c57ed7 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 9 May 2025 21:02:04 +0200 Subject: [PATCH 571/836] fix(deps): update k8s.io/utils digest to 0f33e8f (main) (#7472) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 3cefab3fe6..67a61e0b79 100644 --- a/go.mod +++ b/go.mod @@ -43,7 +43,7 @@ require ( k8s.io/apimachinery v0.32.3 k8s.io/cli-runtime v0.32.3 k8s.io/client-go v0.32.3 - k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e + k8s.io/utils v0.0.0-20250502105355-0f33e8f1c979 sigs.k8s.io/controller-runtime v0.20.4 sigs.k8s.io/yaml v1.4.0 ) diff --git a/go.sum b/go.sum index 5f42b0b4a0..931f79b1a2 100644 --- a/go.sum +++ b/go.sum @@ -297,8 +297,8 @@ k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 h1:hcha5B1kVACrLujCKLbr8XWMxCxzQx42DY8QKYJrDLg= k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7/go.mod h1:GewRfANuJ70iYzvn+i4lezLDAFzvjxZYK1gn1lWcfas= -k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e h1:KqK5c/ghOm8xkHYhlodbp6i6+r+ChV2vuAuVRdFbLro= -k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20250502105355-0f33e8f1c979 h1:jgJW5IePPXLGB8e/1wvd0Ich9QE97RvvF3a8J3fP/Lg= +k8s.io/utils v0.0.0-20250502105355-0f33e8f1c979/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU= sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= From 1e53f90d64995c3441e2e859b99d7b5ec704b640 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Sun, 11 May 2025 16:42:58 +0200 Subject: [PATCH 572/836] chore(volumesnapshot): correct `getBackupVolumeSnapshots` parameter typo (#7410) Signed-off-by: Armando Ruocco --- pkg/reconciler/backup/volumesnapshot/resources.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/reconciler/backup/volumesnapshot/resources.go b/pkg/reconciler/backup/volumesnapshot/resources.go index 2342ebc5b6..04a4aa761b 100644 --- a/pkg/reconciler/backup/volumesnapshot/resources.go +++ b/pkg/reconciler/backup/volumesnapshot/resources.go @@ -105,7 +105,7 @@ func getBackupVolumeSnapshots( ctx context.Context, cli client.Client, namespace string, - backupLabelName string, + backupName string, ) (slice, error) { var list storagesnapshotv1.VolumeSnapshotList @@ -113,7 +113,7 @@ func getBackupVolumeSnapshots( ctx, &list, client.InNamespace(namespace), - client.MatchingLabels{utils.BackupNameLabelName: backupLabelName}, + client.MatchingLabels{utils.BackupNameLabelName: backupName}, ); err != nil { return nil, err } From 629b6ba8ab970c5de216c81df3bbc2da0c576275 Mon Sep 17 00:00:00 2001 From: Jaime Silvela Date: Mon, 12 May 2025 14:59:58 +0200 Subject: [PATCH 573/836] docs: update quickstart and monitoring pages with Grafana dashboard (#7350) Closes #7349 Signed-off-by: Jaime Silvela --- docs/src/monitoring.md | 111 ++++++++++++++++++++++++++--------------- docs/src/quickstart.md | 58 +++++++++++---------- 2 files changed, 105 insertions(+), 64 deletions(-) diff --git a/docs/src/monitoring.md b/docs/src/monitoring.md index 9c72548899..8209ddd97c 100644 --- a/docs/src/monitoring.md +++ b/docs/src/monitoring.md @@ -4,7 +4,7 @@ !!! Important Installing Prometheus and Grafana is beyond the scope of this project. We assume they are correctly installed in your system. However, for - experimentation we provide instructions in + experimentation we provide instructions in [Part 4 of the Quickstart](quickstart.md#part-4-monitor-clusters-with-prometheus-and-grafana). ## Monitoring Instances @@ -55,10 +55,10 @@ by specifying a list of one or more databases in the `target_databases` option. with Prometheus and Grafana, you can find a quick setup guide in [Part 4 of the quickstart](quickstart.md#part-4-monitor-clusters-with-prometheus-and-grafana) -### Prometheus Operator example +### Monitoring with the Prometheus operator A specific PostgreSQL cluster can be monitored using the -[Prometheus Operator's](https://github.com/prometheus-operator/prometheus-operator) resource +[Prometheus Operator's](https://github.com/prometheus-operator/prometheus-operator) resource [PodMonitor](https://github.com/prometheus-operator/prometheus-operator/blob/v0.75.1/Documentation/api.md#podmonitor). A `PodMonitor` that correctly points to the Cluster can be automatically created by the operator by setting @@ -637,7 +637,6 @@ The possible values for `usage` are: | `DURATION` | use this column as a text duration (in milliseconds) | | `HISTOGRAM` | use this column as a histogram | - Please visit the ["Metric Types" page](https://prometheus.io/docs/concepts/metric_types/) from the Prometheus documentation for more information. @@ -653,7 +652,6 @@ cnpg__{= ... } 8080:8080 +``` + +With port forwarding active, the metrics are easily viewable on a browser at +[`localhost:8080/metrics`](http://localhost:8080/metrics). + +### Using curl + +Create the `curl` pod with the following command: ```yaml +kubectl apply -f - <:9187/metrics @@ -793,14 +822,15 @@ kubectl exec -ti curl -- curl -s ${POD_IP}:9187/metrics ``` If you enabled TLS metrics, run instead: + ```shell kubectl exec -ti curl -- curl -sk https://${POD_IP}:9187/metrics ``` -In case you want to access the metrics of the operator, you need to point +To access the metrics of the operator, you need to point to the pod where the operator is running, and use TCP port 8080 as target. -At the end of the inspection, please make sure you delete the `curl` pod: +When you're done inspecting metrics, please remember to delete the `curl` pod: ```shell kubectl delete -f curl.yaml @@ -827,12 +857,15 @@ section for context: In addition, we provide the "raw" sources for the Prometheus alert rules in the `alerts.yaml` file. -The [Grafana dashboard](https://github.com/cloudnative-pg/grafana-dashboards/blob/main/charts/cluster/grafana-dashboard.json) has a dedicated repository now. - -Note that, for the configuration of `kube-prometheus-stack`, other fields and -settings are available over what we provide in `kube-stack-config.yaml`. +A Grafana dashboard for CloudNativePG clusters and operator, is kept in the +dedicated repository [`cloudnative-pg/grafana-dashboards`](https://github.com/cloudnative-pg/grafana-dashboards/tree/main) +as a dashboard JSON configuration: +[`grafana-dashboard.json`](https://github.com/cloudnative-pg/grafana-dashboards/blob/main/charts/cluster/grafana-dashboard.json). +The file can be downloaded, and imported into Grafana +(menus: Dashboard > New > Import). -You can execute `helm show values prometheus-community/kube-prometheus-stack` -to view them. For further information, please refer to the +For a general reference on the settings available on `kube-prometheus-stack`, +you can execute `helm show values prometheus-community/kube-prometheus-stack`. +Please refer to the [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) -page. +page for more detail. diff --git a/docs/src/quickstart.md b/docs/src/quickstart.md index 79e4a35fac..a076829b17 100644 --- a/docs/src/quickstart.md +++ b/docs/src/quickstart.md @@ -166,7 +166,7 @@ In this section we show how to deploy Prometheus and Grafana for observability, and how to create a Grafana Dashboard to monitor CloudNativePG clusters, and a set of Prometheus Rules defining alert conditions. -We leverage the [Kube-Prometheus stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack), +We leverage the [Kube-Prometheus stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) Helm chart, which is maintained by the [Prometheus Community](https://github.com/prometheus-community). Please refer to the project website for additional documentation and background. @@ -184,7 +184,8 @@ If you don't have [Helm](https://helm.sh) installed yet, please follow the system. We need to add the `prometheus-community` helm chart repository, and then -install the *Kube Prometheus stack* using the sample configuration we provide: +install the *Kube Prometheus stack* with our sample configuration +[`kube-stack-config.yaml`](./samples/monitoring/kube-stack-config.yaml). We can accomplish this with the following commands: @@ -198,16 +199,17 @@ helm upgrade --install \ prometheus-community/kube-prometheus-stack ``` -After completion, you will have Prometheus, Grafana and Alert Manager installed with values from the -`kube-stack-config.yaml` file: +After completion, you will have Prometheus, Grafana, and Alert Manager, +configured with the `kube-stack-config.yaml` file: -- From the Prometheus installation, you will have the Prometheus Operator watching for **any** `PodMonitor` - (see [*monitoring*](monitoring.md)). -- The Grafana installation will be watching for a Grafana dashboard `ConfigMap`. +- From the Prometheus installation, you will have the Prometheus Operator + watching for **any** `PodMonitor` (see [*monitoring*](monitoring.md)). +- Alert Manager and Grafana are both enabled. !!! Seealso - For further information about the above command, refer to the [helm install](https://helm.sh/docs/helm/helm_install/) - documentation. + For further information about the above helm commands, refer to the [helm + install](https://helm.sh/docs/helm/helm_install/) + documentation. You can see several Custom Resources have been created: @@ -236,7 +238,7 @@ prometheus-community-kube-prometheus ClusterIP 9090/TCP ### Viewing with Prometheus -At this point, a CloudNativePG cluster deployed with Monitoring activated +At this point, a CloudNativePG cluster deployed with monitoring activated would be observable via Prometheus. For example, you could deploy a simple cluster with `PodMonitor` enabled: @@ -267,13 +269,15 @@ kubectl port-forward svc/prometheus-community-kube-prometheus 9090 Then access the Prometheus console locally at: [`http://localhost:9090/`](http://localhost:9090/) -Assuming that the monitoring stack was successfully deployed, and you have a Cluster with `enablePodMonitor: true`, -you should find a series of metrics relating to CloudNativePG clusters. Again, please -refer to the [*monitoring section*](monitoring.md) for more information. +You should find a series of metrics relating to CloudNativePG clusters. +Please refer to the [monitoring section](monitoring.md) for more information. ![local prometheus](images/prometheus-local.png) -You can now define some alerts by creating a `prometheusRule`: +You can also monitor the CloudNativePG operator by creating a PodMonitor to +target it. See the relevant section in the [monitoring page](monitoring.md#monitoring-the-operator-with-prometheus). + +You can define some alerts by creating a `prometheusRule`: ``` sh kubectl apply -f \ @@ -293,28 +297,32 @@ we just installed. ### Grafana Dashboard -In our "plain" installation, Grafana is deployed with no predefined dashboards. +In our installation so far, Grafana is deployed with no predefined dashboards. -You can port-forward: +To open Grafana, you can port-forward the grafana service: ``` sh kubectl port-forward svc/prometheus-community-grafana 3000:80 ``` -And access Grafana locally at [`http://localhost:3000/`](http://localhost:3000/) -providing the credentials `admin` as username, `prom-operator` as password (defined in `kube-stack-config.yaml`). +and access Grafana locally at [`http://localhost:3000/`](http://localhost:3000/) +providing the credentials `admin` as username, `prom-operator` as password +(defined in `kube-stack-config.yaml`). -CloudNativePG provides a default dashboard for Grafana as part of the official -[Helm chart](https://github.com/cloudnative-pg/charts). You can also download the +CloudNativePG provides a default dashboard for Grafana in the dedicated +[`grafana-dashboards` repository](https://github.com/cloudnative-pg/grafana-dashboards). +You can download the file [grafana-dashboard.json](https://github.com/cloudnative-pg/grafana-dashboards/blob/main/charts/cluster/grafana-dashboard.json) -file and manually importing it via the GUI. +and manually import it via the GUI (menu: Dashboards > New > Import). +You can now click on the `CloudNativePG` dashboard just created: + +![local grafana](images/grafana-local.png) !!! Warning Some graphs in the previous dashboard make use of metrics that are in alpha stage by the time this was created, like `kubelet_volume_stats_available_bytes` and `kubelet_volume_stats_capacity_bytes` producing some graphs to show `No data`. -![local grafana](images/grafana-local.png) - -Note that in our local setup, Prometheus and Grafana are configured to automatically discover -and monitor any CloudNativePG clusters deployed with the Monitoring feature enabled. +Note that in our local setup, Prometheus and Grafana are configured to +automatically discover and monitor any CloudNativePG clusters deployed with the +Monitoring feature enabled. From e9bcc48b14ca40056c41883737afb1b4246eb396 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Mon, 12 May 2025 16:59:24 +0200 Subject: [PATCH 574/836] feat(cluster,status): track latest `Instance` IP (#7546) This patch makes the operator track the latest known Pod IP in the Cluster status. The newly introduced field could be used by the instance manager to test the connectivity with the other PostgreSQL Instances. Related to #7465 Signed-off-by: Leonardo Cecchi Signed-off-by: Armando Ruocco Co-authored-by: Armando Ruocco --- api/v1/cluster_types.go | 2 ++ config/crd/bases/postgresql.cnpg.io_clusters.yaml | 3 +++ docs/src/cloudnative-pg.v1.md | 7 +++++++ internal/controller/cluster_status.go | 1 + 4 files changed, 13 insertions(+) diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go index e765ad51a1..9e3b35f9a6 100644 --- a/api/v1/cluster_types.go +++ b/api/v1/cluster_types.go @@ -982,6 +982,8 @@ type InstanceReportedState struct { // indicates on which TimelineId the instance is // +optional TimeLineID int `json:"timeLineID,omitempty"` + // IP address of the instance + IP string `json:"ip,omitempty"` } // ClusterConditionType defines types of cluster conditions diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml index af488bffca..7b54737091 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml @@ -6125,6 +6125,9 @@ spec: description: InstanceReportedState describes the last reported state of an instance during a reconciliation loop properties: + ip: + description: IP address of the instance + type: string isPrimary: description: indicates if an instance is the primary one type: boolean diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md index 3112b4c5eb..4ffd8790ec 100644 --- a/docs/src/cloudnative-pg.v1.md +++ b/docs/src/cloudnative-pg.v1.md @@ -3193,6 +3193,13 @@ conflict with the operator's intended functionality or design.

indicates on which TimelineId the instance is

+ip [Required]
+string + + +

IP address of the instance

+ + diff --git a/internal/controller/cluster_status.go b/internal/controller/cluster_status.go index 62d0aa56ca..6cf6d3c3f2 100644 --- a/internal/controller/cluster_status.go +++ b/internal/controller/cluster_status.go @@ -758,6 +758,7 @@ func (r *ClusterReconciler) updateClusterStatusThatRequiresInstancesState( cluster.Status.InstancesReportedState[apiv1.PodName(item.Pod.Name)] = apiv1.InstanceReportedState{ IsPrimary: item.IsPrimary, TimeLineID: item.TimeLineID, + IP: item.Pod.Status.PodIP, } } From 9ad673a0be15478dc48ccd0e9f95f0e1a5017d0c Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Mon, 12 May 2025 17:57:29 +0200 Subject: [PATCH 575/836] build: remove deprecated GoReleaser parameters (#7545) The parameters archives.builds and nfpms.builds were deprecated in the latest versions of GoReleaser and should be replaced by *.ids more info: * https://goreleaser.com/deprecations/#archivesbuilds * https://goreleaser.com/deprecations/#nfpmsbuilds Closes #7255 Signed-off-by: Jonathan Gonzalez V. --- .goreleaser.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.goreleaser.yml b/.goreleaser.yml index 191b7fa2b0..3dad477e41 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -95,7 +95,7 @@ archives: {{- if eq .Arch "amd64" }}x86_64 {{- else if eq .Arch "386" }}i386 {{- else }}{{ .Arch }}{{ with .Arm }}v{{ . }}{{ end }}{{ end }} - builds: + ids: - kubectl-cnpg nfpms: @@ -109,7 +109,7 @@ nfpms: homepage: https://github.com/cloudnative-pg/cloudnative-pg bindir: /usr/local/bin maintainer: 'Marco Nenciarini ' - builds: + ids: - kubectl-cnpg formats: - rpm From 0a18bcb19b1e900ae6220e0b2671a1bc1a15def4 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Mon, 12 May 2025 20:53:40 +0200 Subject: [PATCH 576/836] feat(instance): liveness probe isolation checker (#7466) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Enhances the liveness probe logic for primary instances in HA clusters to detect network isolation scenarios. The probe first checks if the Pod can reach the API server. If the API server is unreachable, it then attempts to contact peer PostgreSQL instances in the same cluster via a REST entrypoint. If neither the API server nor any replicas are reachable, the liveness probe fails, prompting Kubernetes to restart the Pod. Upon restart, the instance manager will refuse to start PostgreSQL, as it cannot download the Cluster definition—preventing unsafe behavior in isolated environments. This behavior applies only to primary instances in clusters with HA replicas and is disabled by default. Closes: #7465 Signed-off-by: Leonardo Cecchi Signed-off-by: Armando Ruocco Signed-off-by: Niccolò Fei Signed-off-by: Gabriele Bartolini Signed-off-by: Marco Nenciarini Co-authored-by: Armando Ruocco Co-authored-by: Niccolò Fei Co-authored-by: Gabriele Bartolini Co-authored-by: Marco Nenciarini --- docs/src/instance_manager.md | 42 ++++ internal/webhook/v1/cluster_webhook.go | 22 ++ internal/webhook/v1/cluster_webhook_test.go | 40 ++++ pkg/certs/tls.go | 15 +- .../postgres/webserver/probes/liveness.go | 171 +++++++++++++++ .../postgres/webserver/probes/pinger.go | 204 ++++++++++++++++++ pkg/management/postgres/webserver/remote.go | 16 +- pkg/management/url/url.go | 3 + pkg/utils/labels_annotations.go | 7 + .../cluster-self-fencing.yaml.template | 27 +++ tests/e2e/self_fencing_test.go | 142 ++++++++++++ 11 files changed, 683 insertions(+), 6 deletions(-) create mode 100644 pkg/management/postgres/webserver/probes/liveness.go create mode 100644 pkg/management/postgres/webserver/probes/pinger.go create mode 100644 tests/e2e/fixtures/self-fencing/cluster-self-fencing.yaml.template create mode 100644 tests/e2e/self_fencing_test.go diff --git a/docs/src/instance_manager.md b/docs/src/instance_manager.md index 0e1641d313..a914637b82 100644 --- a/docs/src/instance_manager.md +++ b/docs/src/instance_manager.md @@ -188,6 +188,48 @@ spec: failureThreshold: 10 ``` +### Primary Isolation (alpha) + +CloudNativePG 1.26 introduces an opt-in experimental behavior for the liveness +probe of a PostgreSQL primary, which will report a failure if **both** of the +following conditions are met: + +1. The instance manager cannot reach the Kubernetes API server +2. The instance manager cannot reach **any** other instance via the instance manager’s REST API + +The effect of this behavior is to consider an isolated primary to be not alive and subsequently **shut it down** when the liveness probe fails. + +It is **disabled by default** and can be enabled by adding the following +annotation to the `Cluster` resource: + +```yaml +metadata: + annotations: + alpha.cnpg.io/livenessPinger: '{"enabled": true}' +``` + +!!! Warning + This feature is experimental and will be introduced in a future CloudNativePG + release with a new API. If you decide to use it now, note that the API **will + change**. + +!!! Important + If you plan to enable this experimental feature, be aware that the default + liveness probe settings—automatically derived from `livenessProbeTimeout`—might + be aggressive (30 seconds). As such, we recommend explicitly setting the + liveness probe configuration to suit your environment. + +The annotation also accepts two optional network settings: `requestTimeout` +and `connectionTimeout`, both defaulting to `500` (in milliseconds). +In cloud environments, you may need to increase these values. +For example: + +```yaml +metadata: + annotations: + alpha.cnpg.io/livenessPinger: '{"enabled": true,"requestTimeout":1000,"connectionTimeout":1000}' +``` + ## Readiness Probe The readiness probe starts once the startup probe has successfully completed. diff --git a/internal/webhook/v1/cluster_webhook.go b/internal/webhook/v1/cluster_webhook.go index 0aecfbc183..c52d5cb59e 100644 --- a/internal/webhook/v1/cluster_webhook.go +++ b/internal/webhook/v1/cluster_webhook.go @@ -49,6 +49,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook/admission" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/probes" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" @@ -218,6 +219,7 @@ func (v *ClusterCustomValidator) validate(r *apiv1.Cluster) (allErrs field.Error v.validatePodPatchAnnotation, v.validatePromotionToken, v.validatePluginConfiguration, + v.validateLivenessPingerProbe, } for _, validate := range validations { @@ -2453,3 +2455,23 @@ func (v *ClusterCustomValidator) validatePluginConfiguration(r *apiv1.Cluster) f return errorList } + +func (v *ClusterCustomValidator) validateLivenessPingerProbe(r *apiv1.Cluster) field.ErrorList { + value, ok := r.Annotations[utils.LivenessPingerAnnotationName] + if !ok { + return nil + } + + _, err := probes.NewLivenessPingerConfigFromAnnotations(context.Background(), r.Annotations) + if err != nil { + return field.ErrorList{ + field.Invalid( + field.NewPath("metadata", "annotations", utils.LivenessPingerAnnotationName), + value, + fmt.Sprintf("error decoding liveness pinger config: %s", err.Error()), + ), + } + } + + return nil +} diff --git a/internal/webhook/v1/cluster_webhook_test.go b/internal/webhook/v1/cluster_webhook_test.go index a652acedbc..d92b6a9008 100644 --- a/internal/webhook/v1/cluster_webhook_test.go +++ b/internal/webhook/v1/cluster_webhook_test.go @@ -5089,3 +5089,43 @@ var _ = Describe("validatePluginConfiguration", func() { Expect(v.validatePluginConfiguration(cluster)).To(BeNil()) }) }) + +var _ = Describe("", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + It("returns no errors if the liveness pinger annotation is not present", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{}, + }, + } + Expect(v.validateLivenessPingerProbe(cluster)).To(BeNil()) + }) + + It("returns no errors if the liveness pinger annotation is valid", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.LivenessPingerAnnotationName: `{"connectionTimeout": 1000, "requestTimeout": 5000, "enabled": true}`, + }, + }, + } + Expect(v.validateLivenessPingerProbe(cluster)).To(BeNil()) + }) + + It("returns an error if the liveness pinger annotation is invalid", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.LivenessPingerAnnotationName: `{"requestTimeout": 5000}`, + }, + }, + } + errs := v.validateLivenessPingerProbe(cluster) + Expect(errs).To(HaveLen(1)) + Expect(errs[0].Error()).To(ContainSubstring("error decoding liveness pinger config")) + }) +}) diff --git a/pkg/certs/tls.go b/pkg/certs/tls.go index 30c5b25738..68b41ebcf3 100644 --- a/pkg/certs/tls.go +++ b/pkg/certs/tls.go @@ -57,9 +57,18 @@ func newTLSConfigFromSecret( // for the -rw service, which would cause a name verification error. caCertPool := x509.NewCertPool() caCertPool.AppendCertsFromPEM(caCertificate) + + return NewTLSConfigFromCertPool(caCertPool), nil +} + +// NewTLSConfigFromCertPool creates a tls.Config object from X509 cert pool +// containing the expected server CA +func NewTLSConfigFromCertPool( + certPool *x509.CertPool, +) *tls.Config { tlsConfig := tls.Config{ MinVersion: tls.VersionTLS13, - RootCAs: caCertPool, + RootCAs: certPool, InsecureSkipVerify: true, //#nosec G402 -- we are verifying the certificate ourselves VerifyPeerCertificate: func(rawCerts [][]byte, _ [][]*x509.Certificate) error { // Code adapted from https://go.dev/src/crypto/tls/handshake_client.go#L986 @@ -77,7 +86,7 @@ func newTLSConfigFromSecret( } opts := x509.VerifyOptions{ - Roots: caCertPool, + Roots: certPool, Intermediates: x509.NewCertPool(), } @@ -93,7 +102,7 @@ func newTLSConfigFromSecret( }, } - return &tlsConfig, nil + return &tlsConfig } // NewTLSConfigForContext creates a tls.config with the provided data and returns an expanded context that contains diff --git a/pkg/management/postgres/webserver/probes/liveness.go b/pkg/management/postgres/webserver/probes/liveness.go new file mode 100644 index 0000000000..0449b76752 --- /dev/null +++ b/pkg/management/postgres/webserver/probes/liveness.go @@ -0,0 +1,171 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package probes + +import ( + "context" + "fmt" + "net/http" + + "github.com/cloudnative-pg/machinery/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/client" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" +) + +type livenessExecutor struct { + cli client.Client + instance *postgres.Instance + + lastestKnownCluster *apiv1.Cluster +} + +// NewLivenessChecker creates a new instance of the liveness probe checker +func NewLivenessChecker( + cli client.Client, + instance *postgres.Instance, +) Checker { + return &livenessExecutor{ + cli: cli, + instance: instance, + } +} + +// tryRefreshLatestCluster refreshes the latest cluster definition, returns a bool indicating if the operation was +// successful +func (e *livenessExecutor) tryRefreshLatestCluster(ctx context.Context) bool { + var cluster apiv1.Cluster + err := e.cli.Get( + ctx, + client.ObjectKey{Namespace: e.instance.GetNamespaceName(), Name: e.instance.GetClusterName()}, + &cluster, + ) + if err != nil { + return false + } + + e.lastestKnownCluster = cluster.DeepCopy() + return true +} + +func (e *livenessExecutor) IsHealthy( + ctx context.Context, + w http.ResponseWriter, +) { + contextLogger := log.FromContext(ctx) + + isPrimary, isPrimaryErr := e.instance.IsPrimary() + if isPrimaryErr != nil { + contextLogger.Error( + isPrimaryErr, + "Error while checking the instance role, skipping automatic shutdown.") + _, _ = fmt.Fprint(w, "OK") + return + } + + if !isPrimary { + // There's no need to restart a replica if isolated + _, _ = fmt.Fprint(w, "OK") + return + } + + if clusterRefreshed := e.tryRefreshLatestCluster(ctx); clusterRefreshed { + // We correctly reached the API server but, as a failsafe measure, we + // exercise the reachability checker and leave a log message if something + // is not right. + // In this way a network configuration problem can be discovered as + // quickly as possible. + if err := evaluateLivenessPinger(ctx, e.lastestKnownCluster.DeepCopy()); err != nil { + contextLogger.Warning( + "Instance connectivity error - liveness probe failing but API server is reachable", + "err", + err.Error(), + ) + } + _, _ = fmt.Fprint(w, "OK") + return + } + + contextLogger = contextLogger.WithValues("apiServerReachable", false) + + if e.lastestKnownCluster == nil { + // We were never able to download a cluster definition. This should not + // happen because we check the API server connectivity as soon as the + // instance manager starts, before starting the probe web server. + // + // To be safe, we classify this instance manager to be not isolated and + // postpone any decision to a later liveness probe call. + contextLogger.Warning( + "No cluster definition has been received, skipping automatic shutdown.") + + _, _ = fmt.Fprint(w, "OK") + return + } + + err := evaluateLivenessPinger(ctx, e.lastestKnownCluster.DeepCopy()) + if err != nil { + contextLogger.Error(err, "Instance connectivity error - liveness probe failing") + http.Error( + w, + fmt.Sprintf("liveness check failed: %s", err.Error()), + http.StatusInternalServerError, + ) + return + } + + contextLogger.Debug( + "Instance connectivity test succeeded - liveness probe succeeding", + "latestKnownInstancesReportedState", e.lastestKnownCluster.Status.InstancesReportedState, + ) + _, _ = fmt.Fprint(w, "OK") +} + +func evaluateLivenessPinger( + ctx context.Context, + cluster *apiv1.Cluster, +) error { + contextLogger := log.FromContext(ctx) + + cfg, err := NewLivenessPingerConfigFromAnnotations(ctx, cluster.Annotations) + if err != nil { + return err + } + if !cfg.isEnabled() { + contextLogger.Debug("pinger config not enabled, skipping") + return nil + } + + if cluster.Spec.Instances == 1 { + contextLogger.Debug("Only one instance present in the latest known cluster definition. Skipping automatic shutdown.") + return nil + } + + checker, err := buildInstanceReachabilityChecker(*cfg) + if err != nil { + return fmt.Errorf("failed to build instance reachability checker: %w", err) + } + + if err := checker.ensureInstancesAreReachable(cluster); err != nil { + return fmt.Errorf("liveness check failed: %w", err) + } + + return nil +} diff --git a/pkg/management/postgres/webserver/probes/pinger.go b/pkg/management/postgres/webserver/probes/pinger.go new file mode 100644 index 0000000000..a66470324f --- /dev/null +++ b/pkg/management/postgres/webserver/probes/pinger.go @@ -0,0 +1,204 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package probes + +import ( + "context" + "crypto/x509" + "encoding/json" + "fmt" + "net" + "net/http" + "net/url" + "os" + "time" + + "github.com/cloudnative-pg/machinery/pkg/log" + "k8s.io/utils/ptr" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/certs" + cnpgUrl "github.com/cloudnative-pg/cloudnative-pg/pkg/management/url" + postgresSpec "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" +) + +// LivenessPingerCfg if the configuration of the instance +// reachability checker +type LivenessPingerCfg struct { + Enabled *bool `json:"enabled"` + RequestTimeout int `json:"requestTimeout,omitempty"` + ConnectionTimeout int `json:"connectionTimeout,omitempty"` +} + +func (probe *LivenessPingerCfg) isEnabled() bool { + if probe == nil || probe.Enabled == nil { + return false + } + + return *probe.Enabled +} + +// NewLivenessPingerConfigFromAnnotations creates a new pinger configuration from the annotations +// in the cluster definition +func NewLivenessPingerConfigFromAnnotations( + ctx context.Context, + annotations map[string]string, +) (*LivenessPingerCfg, error) { + const ( + // defaultRequestTimeout is the default value of the request timeout + defaultRequestTimeout = 500 + + // defaultConnectionTimeout is the default value of the connection timeout + defaultConnectionTimeout = 1000 + ) + + contextLogger := log.FromContext(ctx) + + v, ok := annotations[utils.LivenessPingerAnnotationName] + if !ok { + contextLogger.Debug("pinger config not found in the cluster annotations") + return &LivenessPingerCfg{ + Enabled: ptr.To(false), + }, nil + } + + var cfg LivenessPingerCfg + if err := json.Unmarshal([]byte(v), &cfg); err != nil { + contextLogger.Error(err, "failed to unmarshal pinger config") + return nil, fmt.Errorf("while unmarshalling pinger config: %w", err) + } + + if cfg.Enabled == nil { + return nil, fmt.Errorf("pinger config is missing the enabled field") + } + + if cfg.RequestTimeout == 0 { + cfg.RequestTimeout = defaultRequestTimeout + } + if cfg.ConnectionTimeout == 0 { + cfg.ConnectionTimeout = defaultConnectionTimeout + } + + return &cfg, nil +} + +// pinger can check if a certain instance is reachable by using +// the failsafe REST endpoint +type pinger struct { + dialer *net.Dialer + client *http.Client + + config LivenessPingerCfg +} + +// buildInstanceReachabilityChecker creates a new instance reachability checker by loading +// the server CA certificate from the same location that will be used by PostgreSQL. +// In this case, we avoid using the API Server as it may be unreliable. +func buildInstanceReachabilityChecker(cfg LivenessPingerCfg) (*pinger, error) { + certificateLocation := postgresSpec.ServerCACertificateLocation + caCertificate, err := os.ReadFile(certificateLocation) //nolint:gosec + if err != nil { + return nil, fmt.Errorf("while reading server CA certificate [%s]: %w", certificateLocation, err) + } + + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(caCertificate) + + tlsConfig := certs.NewTLSConfigFromCertPool(caCertPool) + + dialer := &net.Dialer{Timeout: time.Duration(cfg.ConnectionTimeout) * time.Millisecond} + + client := http.Client{ + Transport: &http.Transport{ + DialContext: dialer.DialContext, + TLSClientConfig: tlsConfig, + }, + Timeout: time.Duration(cfg.RequestTimeout) * time.Millisecond, + } + + return &pinger{ + dialer: dialer, + client: &client, + config: cfg, + }, nil +} + +// ping checks if the instance with the passed coordinates is reachable +// by calling the failsafe endpoint. +func (e *pinger) ping(host, ip string) error { + failsafeURL := url.URL{ + Scheme: "https", + Host: fmt.Sprintf("%s:%d", ip, cnpgUrl.StatusPort), + Path: cnpgUrl.PathFailSafe, + } + + var res *http.Response + var err error + if res, err = e.client.Get(failsafeURL.String()); err != nil { + return &pingError{ + host: host, + err: err, + config: e.config, + } + } + + _ = res.Body.Close() + + return nil +} + +func (e pinger) ensureInstancesAreReachable(cluster *apiv1.Cluster) error { + for name, state := range cluster.Status.InstancesReportedState { + host := string(name) + ip := state.IP + if err := e.ping(host, ip); err != nil { + return err + } + } + + return nil +} + +// pingError is raised when the instance connectivity test failed. +type pingError struct { + host string + ip string + + config LivenessPingerCfg + + err error +} + +// Error implements the error interface +func (e *pingError) Error() string { + return fmt.Sprintf( + "instance connectivity error for instance [%s] with ip [%s] (requestTimeout:%v connectionTimeout:%v): %s", + e.host, + e.ip, + e.config.RequestTimeout, + e.config.ConnectionTimeout, + e.err.Error()) +} + +// Unwrap implements the error interface +func (e *pingError) Unwrap() error { + return e.err +} diff --git a/pkg/management/postgres/webserver/remote.go b/pkg/management/postgres/webserver/remote.go index cf6656e93e..ef76017ba9 100644 --- a/pkg/management/postgres/webserver/remote.go +++ b/pkg/management/postgres/webserver/remote.go @@ -66,6 +66,8 @@ type remoteWebserverEndpoints struct { instance *postgres.Instance currentBackup *backupConnection ongoingBackupRequest sync.Mutex + // livenessChecker is a stateful probe + livenessChecker probes.Checker } // StartBackupRequest the required data to execute the pg_start_backup @@ -97,11 +99,13 @@ func NewRemoteWebServer( } endpoints := remoteWebserverEndpoints{ - typedClient: typedClient, - instance: instance, + typedClient: typedClient, + instance: instance, + livenessChecker: probes.NewLivenessChecker(typedClient, instance), } serveMux := http.NewServeMux() + serveMux.HandleFunc(url.PathFailSafe, endpoints.failSafe) serveMux.HandleFunc(url.PathPgModeBackup, endpoints.backup) serveMux.HandleFunc(url.PathHealth, endpoints.isServerHealthy) serveMux.HandleFunc(url.PathReady, endpoints.isServerReady) @@ -222,10 +226,16 @@ func (ws *remoteWebserverEndpoints) isServerStartedUp(w http.ResponseWriter, req checker.IsHealthy(req.Context(), w) } -func (ws *remoteWebserverEndpoints) isServerHealthy(w http.ResponseWriter, _ *http.Request) { +// This is the failsafe entrypoint +func (ws *remoteWebserverEndpoints) failSafe(w http.ResponseWriter, _ *http.Request) { _, _ = fmt.Fprint(w, "OK") } +// This is the failsafe probe +func (ws *remoteWebserverEndpoints) isServerHealthy(w http.ResponseWriter, req *http.Request) { + ws.livenessChecker.IsHealthy(req.Context(), w) +} + // This is the readiness probe func (ws *remoteWebserverEndpoints) isServerReady(w http.ResponseWriter, req *http.Request) { if !ws.instance.CanCheckReadiness() { diff --git a/pkg/management/url/url.go b/pkg/management/url/url.go index c54cde8634..b3157163fd 100644 --- a/pkg/management/url/url.go +++ b/pkg/management/url/url.go @@ -34,6 +34,9 @@ const ( // PgBouncerMetricsPort is the port for the exporter of PgBouncer related metrics (HTTP) PgBouncerMetricsPort int32 = 9127 + // PathFailSafe is the path for the failsafe entrypoint + PathFailSafe string = "/failsafe" + // PathHealth is the URL path for Health State PathHealth string = "/healthz" diff --git a/pkg/utils/labels_annotations.go b/pkg/utils/labels_annotations.go index 37143c54bb..1ce7eddfe0 100644 --- a/pkg/utils/labels_annotations.go +++ b/pkg/utils/labels_annotations.go @@ -30,6 +30,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) +// AlphaMetadataNamespace is the annotation and label namespace used by the alpha features of +// the operator +const AlphaMetadataNamespace = "alpha.cnpg.io" + // MetadataNamespace is the annotation and label namespace used by the operator const MetadataNamespace = "cnpg.io" @@ -105,6 +109,9 @@ const ( // PluginNameLabelName is the name of the label to be applied to services // to have them detected as CNPG-i plugins PluginNameLabelName = MetadataNamespace + "/pluginName" + + // LivenessPingerAnnotationName is the name of the pinger configuration + LivenessPingerAnnotationName = AlphaMetadataNamespace + "/livenessPinger" ) const ( diff --git a/tests/e2e/fixtures/self-fencing/cluster-self-fencing.yaml.template b/tests/e2e/fixtures/self-fencing/cluster-self-fencing.yaml.template new file mode 100644 index 0000000000..5235464f2a --- /dev/null +++ b/tests/e2e/fixtures/self-fencing/cluster-self-fencing.yaml.template @@ -0,0 +1,27 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: postgresql-self-fencing + annotations: + alpha.cnpg.io/livenessPinger: '{"enabled": true}' +spec: + instances: 3 + + postgresql: + parameters: + max_connections: "110" + log_checkpoints: "on" + log_lock_waits: "on" + log_min_duration_statement: '1000' + log_statement: 'ddl' + log_temp_files: '1024' + log_autovacuum_min_duration: '1s' + log_replication_commands: 'on' + + # Persistent storage configuration + storage: + storageClass: ${E2E_DEFAULT_STORAGE_CLASS} + size: 1Gi + walStorage: + storageClass: ${E2E_DEFAULT_STORAGE_CLASS} + size: 1Gi diff --git a/tests/e2e/self_fencing_test.go b/tests/e2e/self_fencing_test.go new file mode 100644 index 0000000000..072ed32791 --- /dev/null +++ b/tests/e2e/self_fencing_test.go @@ -0,0 +1,142 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package e2e + +import ( + "fmt" + "strings" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/operator" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Self-fencing with liveness probe", Serial, Label(tests.LabelDisruptive), func() { + const ( + level = tests.Lowest + sampleFile = fixturesDir + "/self-fencing/cluster-self-fencing.yaml.template" + namespacePrefix = "self-fencing" + ) + + BeforeEach(func() { + if testLevelEnv.Depth < int(level) { + Skip("Test depth is lower than the amount requested for this test") + } + if !IsLocal() { + Skip("This test is only run on local cluster") + } + }) + + It("will terminate an isolated primary", func() { + var namespace, clusterName, isolatedNode string + var err error + var oldPrimaryPod *corev1.Pod + + DeferCleanup(func() { + // Ensure the isolatedNode networking is re-established + if CurrentSpecReport().Failed() { + _, _, _ = run.Unchecked(fmt.Sprintf("docker network connect kind %v", isolatedNode)) + } + }) + + By("creating a Cluster", func() { + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) + Expect(err).ToNot(HaveOccurred()) + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) + Expect(err).ToNot(HaveOccurred()) + AssertCreateCluster(namespace, clusterName, sampleFile, env) + }) + + By("setting up the environment", func() { + // Ensure the operator is not running on the same node as the primary. + // If it is, we switch to a new primary + primaryPod, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + operatorPod, err := operator.GetPod(env.Ctx, env.Client) + Expect(err).NotTo(HaveOccurred()) + if primaryPod.Spec.NodeName == operatorPod.Spec.NodeName { + AssertSwitchover(namespace, clusterName, env) + } + }) + + By("disconnecting the node containing the primary", func() { + oldPrimaryPod, err = clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + isolatedNode = oldPrimaryPod.Spec.NodeName + _, _, err = run.Unchecked(fmt.Sprintf("docker network disconnect kind %v", isolatedNode)) + Expect(err).ToNot(HaveOccurred()) + }) + + By("verifying that a new primary has been promoted", func() { + AssertClusterEventuallyReachesPhase(namespace, clusterName, + []string{apiv1.PhaseFailOver}, 120) + Eventually(func(g Gomega) { + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cluster.Status.CurrentPrimary).ToNot(BeEquivalentTo(oldPrimaryPod.Name)) + }, testTimeouts[timeouts.NewPrimaryAfterFailover]).Should(Succeed()) + }) + + By("verifying that oldPrimary will self isolate", func() { + // Assert that the oldPrimary is eventually terminated + Eventually(func(g Gomega) { + out, _, err := run.Unchecked(fmt.Sprintf( + "docker exec %v crictl ps -a "+ + "--label io.kubernetes.pod.namespace=%s,io.kubernetes.pod.name=%s "+ + "--name postgres -s Exited -q", isolatedNode, namespace, oldPrimaryPod.Name)) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(out).ToNot(BeEmpty()) + if out != "" { + GinkgoWriter.Printf("Container %s (%s) has been terminated\n", + oldPrimaryPod.Name, strings.TrimSpace(out)) + } + }, 120).Should(Succeed()) + }) + + By("reconnecting the isolated Node", func() { + _, _, err = run.Unchecked(fmt.Sprintf("docker network connect kind %v", isolatedNode)) + Expect(err).ToNot(HaveOccurred()) + + // Assert that the oldPrimary comes back as a replica + namespacedName := types.NamespacedName{ + Namespace: oldPrimaryPod.Namespace, + Name: oldPrimaryPod.Name, + } + timeout := 180 + Eventually(func() (bool, error) { + pod := corev1.Pod{} + err := env.Client.Get(env.Ctx, namespacedName, &pod) + return utils.IsPodActive(pod) && utils.IsPodReady(pod) && specs.IsPodStandby(pod), err + }, timeout).Should(BeTrue()) + }) + }) +}) From b2cea3dfc9edb7b83d3d4d9b8e326b1687a02331 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Tue, 13 May 2025 06:33:39 +0200 Subject: [PATCH 577/836] feat: in-tree barman-cloud support deprecation notice (#7500) The admission webhook warns when the in-tree Barman Cloud support is used in a cluster definition, advising the user to try the plugin. Additionally, the admission webhook now emits a warning when the retentionPolicy field is specified without an associated in-tree Barman Cloud configuration. Closes #7065 Signed-off-by: Leonardo Cecchi Signed-off-by: Armando Ruocco Signed-off-by: Marco Nenciarini Co-authored-by: Armando Ruocco Co-authored-by: Marco Nenciarini --- internal/webhook/v1/cluster_webhook.go | 46 +++++++++++ internal/webhook/v1/cluster_webhook_test.go | 86 +++++++++++++++++++++ 2 files changed, 132 insertions(+) diff --git a/internal/webhook/v1/cluster_webhook.go b/internal/webhook/v1/cluster_webhook.go index c52d5cb59e..453de80997 100644 --- a/internal/webhook/v1/cluster_webhook.go +++ b/internal/webhook/v1/cluster_webhook.go @@ -2335,9 +2335,55 @@ func (v *ClusterCustomValidator) validatePgFailoverSlots(r *apiv1.Cluster) field func (v *ClusterCustomValidator) getAdmissionWarnings(r *apiv1.Cluster) admission.Warnings { list := getMaintenanceWindowsAdmissionWarnings(r) + list = append(list, getInTreeBarmanWarnings(r)...) + list = append(list, getRetentionPolicyWarnings(r)...) return append(list, getSharedBuffersWarnings(r)...) } +func getInTreeBarmanWarnings(r *apiv1.Cluster) admission.Warnings { + var result admission.Warnings + + var paths []string + + if r.Spec.Backup != nil && r.Spec.Backup.BarmanObjectStore != nil { + paths = append(paths, field.NewPath("spec", "backup", "barmanObjectStore").String()) + } + + for idx, externalCluster := range r.Spec.ExternalClusters { + if externalCluster.BarmanObjectStore != nil { + paths = append(paths, field.NewPath("spec", "externalClusters", fmt.Sprintf("%d", idx), + "barmanObjectStore").String()) + } + } + + if len(paths) > 0 { + pathsStr := strings.Join(paths, ", ") + result = append( + result, + fmt.Sprintf("Native support for Barman Cloud backups and recovery is deprecated and will be "+ + "completely removed in CloudNativePG 1.28.0. Found usage in: %s. "+ + "Please migrate existing clusters to the new Barman Cloud Plugin to ensure a smooth transition.", + pathsStr), + ) + } + return result +} + +func getRetentionPolicyWarnings(r *apiv1.Cluster) admission.Warnings { + var result admission.Warnings + + if r.Spec.Backup != nil && r.Spec.Backup.RetentionPolicy != "" && r.Spec.Backup.BarmanObjectStore == nil { + result = append( + result, + "Retention policies specified in .spec.backup.retentionPolicy are only used by the "+ + "in-tree barman-cloud support, which is not being used in this cluster. "+ + "Please use a backup plugin and migrate this configuration to the plugin configuration", + ) + } + + return result +} + func getSharedBuffersWarnings(r *apiv1.Cluster) admission.Warnings { var result admission.Warnings diff --git a/internal/webhook/v1/cluster_webhook_test.go b/internal/webhook/v1/cluster_webhook_test.go index d92b6a9008..2ab0e37a73 100644 --- a/internal/webhook/v1/cluster_webhook_test.go +++ b/internal/webhook/v1/cluster_webhook_test.go @@ -5129,3 +5129,89 @@ var _ = Describe("", func() { Expect(errs[0].Error()).To(ContainSubstring("error decoding liveness pinger config")) }) }) + +var _ = Describe("getInTreeBarmanWarnings", func() { + It("returns no warnings when BarmanObjectStore is not configured", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Backup: nil, + ExternalClusters: nil, + }, + } + Expect(getInTreeBarmanWarnings(cluster)).To(BeEmpty()) + }) + + It("returns a warning when BarmanObjectStore is configured in backup", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Backup: &apiv1.BackupConfiguration{ + BarmanObjectStore: &apiv1.BarmanObjectStoreConfiguration{}, + }, + }, + } + warnings := getInTreeBarmanWarnings(cluster) + Expect(warnings).To(HaveLen(1)) + Expect(warnings[0]).To(ContainSubstring("spec.backup.barmanObjectStore")) + }) + + It("returns warnings for multiple external clusters with BarmanObjectStore", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ExternalClusters: []apiv1.ExternalCluster{ + {BarmanObjectStore: &apiv1.BarmanObjectStoreConfiguration{}}, + {BarmanObjectStore: &apiv1.BarmanObjectStoreConfiguration{}}, + }, + }, + } + warnings := getInTreeBarmanWarnings(cluster) + Expect(warnings).To(HaveLen(1)) + Expect(warnings[0]).To(ContainSubstring("spec.externalClusters.0.barmanObjectStore")) + Expect(warnings[0]).To(ContainSubstring("spec.externalClusters.1.barmanObjectStore")) + }) + + It("returns warnings for both backup and external clusters with BarmanObjectStore", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Backup: &apiv1.BackupConfiguration{ + BarmanObjectStore: &apiv1.BarmanObjectStoreConfiguration{}, + }, + ExternalClusters: []apiv1.ExternalCluster{ + {BarmanObjectStore: &apiv1.BarmanObjectStoreConfiguration{}}, + }, + }, + } + warnings := getInTreeBarmanWarnings(cluster) + Expect(warnings).To(HaveLen(1)) + Expect(warnings[0]).To(ContainSubstring("spec.backup.barmanObjectStore")) + Expect(warnings[0]).To(ContainSubstring("spec.externalClusters.0.barmanObjectStore")) + }) +}) + +var _ = Describe("getRetentionPolicyWarnings", func() { + It("returns no warnings if the retention policy is used with the in-tree backup support", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Backup: &apiv1.BackupConfiguration{ + RetentionPolicy: "this retention policy", + BarmanObjectStore: &apiv1.BarmanObjectStoreConfiguration{}, + }, + }, + } + + warnings := getRetentionPolicyWarnings(cluster) + Expect(warnings).To(BeEmpty()) + }) + + It("return a warning when retention policies are declared and not used", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + Backup: &apiv1.BackupConfiguration{ + RetentionPolicy: "this retention policy", + }, + }, + } + + warnings := getRetentionPolicyWarnings(cluster) + Expect(warnings).To(HaveLen(1)) + }) +}) From 880f1ce54c448760c85a69b00fcf73ebec5c005a Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 13 May 2025 14:28:38 +0200 Subject: [PATCH 578/836] feat: update default PostgreSQL version to 17.5 (#7556) Update default PostgreSQL version from 17.4 to 17.5 Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Signed-off-by: Jonathan Gonzalez V. Co-authored-by: postgres-versions-updater --- .github/pg_versions.json | 20 ++++++++++---------- docs/src/bootstrap.md | 10 +++++----- docs/src/declarative_hibernation.md | 2 +- docs/src/image_catalog.md | 4 ++-- docs/src/kubectl-plugin.md | 4 ++-- docs/src/monitoring.md | 2 +- docs/src/postgis.md | 2 +- docs/src/postgres_upgrades.md | 2 +- docs/src/samples/cluster-example-full.yaml | 2 +- docs/src/scheduling.md | 2 +- docs/src/ssl_connections.md | 2 +- docs/src/troubleshooting.md | 4 ++-- pkg/versions/versions.go | 2 +- 13 files changed, 29 insertions(+), 29 deletions(-) diff --git a/.github/pg_versions.json b/.github/pg_versions.json index 24c0505e80..828288a6cb 100644 --- a/.github/pg_versions.json +++ b/.github/pg_versions.json @@ -1,22 +1,22 @@ { "17": [ - "17.4", - "17.2" + "17.5", + "17.4" ], "16": [ - "16.8", - "16.6" + "16.9", + "16.8" ], "15": [ - "15.12", - "15.10" + "15.13", + "15.12" ], "14": [ - "14.17", - "14.15" + "14.18", + "14.17" ], "13": [ - "13.20", - "13.18" + "13.21", + "13.20" ] } \ No newline at end of file diff --git a/docs/src/bootstrap.md b/docs/src/bootstrap.md index 342ccf08e8..ded39278c9 100644 --- a/docs/src/bootstrap.md +++ b/docs/src/bootstrap.md @@ -585,7 +585,7 @@ file on the source PostgreSQL instance: host replication streaming_replica all md5 ``` -The following manifest creates a new PostgreSQL 17.4 cluster, +The following manifest creates a new PostgreSQL 17.5 cluster, called `target-db`, using the `pg_basebackup` bootstrap method to clone an external PostgreSQL cluster defined as `source-db` (in the `externalClusters` array). As you can see, the `source-db` @@ -600,7 +600,7 @@ metadata: name: target-db spec: instances: 3 - imageName: ghcr.io/cloudnative-pg/postgresql:17.4 + imageName: ghcr.io/cloudnative-pg/postgresql:17.5 bootstrap: pg_basebackup: @@ -620,7 +620,7 @@ spec: ``` All the requirements must be met for the clone operation to work, including -the same PostgreSQL version (in our case 17.4). +the same PostgreSQL version (in our case 17.5). #### TLS certificate authentication @@ -635,7 +635,7 @@ in the same Kubernetes cluster. This example can be easily adapted to cover an instance that resides outside the Kubernetes cluster. -The manifest defines a new PostgreSQL 17.4 cluster called `cluster-clone-tls`, +The manifest defines a new PostgreSQL 17.5 cluster called `cluster-clone-tls`, which is bootstrapped using the `pg_basebackup` method from the `cluster-example` external cluster. The host is identified by the read/write service in the same cluster, while the `streaming_replica` user is authenticated @@ -650,7 +650,7 @@ metadata: name: cluster-clone-tls spec: instances: 3 - imageName: ghcr.io/cloudnative-pg/postgresql:17.4 + imageName: ghcr.io/cloudnative-pg/postgresql:17.5 bootstrap: pg_basebackup: diff --git a/docs/src/declarative_hibernation.md b/docs/src/declarative_hibernation.md index dba076d8f3..d84a5d5456 100644 --- a/docs/src/declarative_hibernation.md +++ b/docs/src/declarative_hibernation.md @@ -51,7 +51,7 @@ $ kubectl cnpg status Cluster Summary Name: cluster-example Namespace: default -PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:17.4 +PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:17.5 Primary instance: cluster-example-2 Status: Cluster in healthy state Instances: 3 diff --git a/docs/src/image_catalog.md b/docs/src/image_catalog.md index d3a9479b57..02ce6da8c1 100644 --- a/docs/src/image_catalog.md +++ b/docs/src/image_catalog.md @@ -35,7 +35,7 @@ spec: - major: 16 image: ghcr.io/cloudnative-pg/postgresql:16.8 - major: 17 - image: ghcr.io/cloudnative-pg/postgresql:17.4 + image: ghcr.io/cloudnative-pg/postgresql:17.5 ``` **Example of a Cluster-Wide Catalog using `ClusterImageCatalog` Resource:** @@ -52,7 +52,7 @@ spec: - major: 16 image: ghcr.io/cloudnative-pg/postgresql:16.8 - major: 17 - image: ghcr.io/cloudnative-pg/postgresql:17.4 + image: ghcr.io/cloudnative-pg/postgresql:17.5 ``` A `Cluster` resource has the flexibility to reference either an `ImageCatalog` diff --git a/docs/src/kubectl-plugin.md b/docs/src/kubectl-plugin.md index eb4d9ba3df..50208b8651 100644 --- a/docs/src/kubectl-plugin.md +++ b/docs/src/kubectl-plugin.md @@ -1009,7 +1009,7 @@ it from the actual pod. This means that you will be using the `postgres` user. ```console $ kubectl cnpg psql cluster-example -psql (17.4 (Debian 17.4-1.pgdg110+1)) +psql (17.5 (Debian 17.5-1.pgdg110+1)) Type "help" for help. postgres=# @@ -1021,7 +1021,7 @@ select to work against a replica by using the `--replica` option: ```console $ kubectl cnpg psql --replica cluster-example -psql (17.4 (Debian 17.4-1.pgdg110+1)) +psql (17.5 (Debian 17.5-1.pgdg110+1)) Type "help" for help. diff --git a/docs/src/monitoring.md b/docs/src/monitoring.md index 8209ddd97c..dddf3b135b 100644 --- a/docs/src/monitoring.md +++ b/docs/src/monitoring.md @@ -218,7 +218,7 @@ cnpg_collector_up{cluster="cluster-example"} 1 # HELP cnpg_collector_postgres_version Postgres version # TYPE cnpg_collector_postgres_version gauge -cnpg_collector_postgres_version{cluster="cluster-example",full="17.4"} 17.4 +cnpg_collector_postgres_version{cluster="cluster-example",full="17.5"} 17.5 # HELP cnpg_collector_last_failed_backup_timestamp The last failed backup as a unix timestamp # TYPE cnpg_collector_last_failed_backup_timestamp gauge diff --git a/docs/src/postgis.md b/docs/src/postgis.md index cfbcc3b976..087efcdf4c 100644 --- a/docs/src/postgis.md +++ b/docs/src/postgis.md @@ -106,7 +106,7 @@ values from the ones in this document): ```console $ kubectl cnpg psql postgis-example -- app -psql (17.4 (Debian 17.4-1.pgdg110+2)) +psql (17.5 (Debian 17.5-1.pgdg110+2)) Type "help" for help. app=# SELECT * FROM pg_available_extensions WHERE name ~ '^postgis' ORDER BY 1; diff --git a/docs/src/postgres_upgrades.md b/docs/src/postgres_upgrades.md index dac7a53480..2cd31dd9e8 100644 --- a/docs/src/postgres_upgrades.md +++ b/docs/src/postgres_upgrades.md @@ -17,7 +17,7 @@ version 17.1: Minor releases are fully compatible with earlier and later minor releases of the same major version. They include bug fixes and security updates but do not introduce changes to the internal storage format. -For example, PostgreSQL 17.1 is compatible with 17.0 and 17.4. +For example, PostgreSQL 17.1 is compatible with 17.0 and 17.5. ### Upgrading a Minor Version in CloudNativePG diff --git a/docs/src/samples/cluster-example-full.yaml b/docs/src/samples/cluster-example-full.yaml index 1551e5318c..f0237d6f79 100644 --- a/docs/src/samples/cluster-example-full.yaml +++ b/docs/src/samples/cluster-example-full.yaml @@ -35,7 +35,7 @@ metadata: name: cluster-example-full spec: description: "Example of cluster" - imageName: ghcr.io/cloudnative-pg/postgresql:17.4 + imageName: ghcr.io/cloudnative-pg/postgresql:17.5 # imagePullSecret is only required if the images are located in a private registry # imagePullSecrets: # - name: private_registry_access diff --git a/docs/src/scheduling.md b/docs/src/scheduling.md index abe3594a78..6b3f50fb86 100644 --- a/docs/src/scheduling.md +++ b/docs/src/scheduling.md @@ -41,7 +41,7 @@ metadata: name: cluster-example spec: instances: 3 - imageName: ghcr.io/cloudnative-pg/postgresql:17.4 + imageName: ghcr.io/cloudnative-pg/postgresql:17.5 affinity: enablePodAntiAffinity: true # Default value diff --git a/docs/src/ssl_connections.md b/docs/src/ssl_connections.md index a9f910e4f2..34a960a7a2 100644 --- a/docs/src/ssl_connections.md +++ b/docs/src/ssl_connections.md @@ -174,7 +174,7 @@ Output: version -------------------------------------------------------------------------------------- ------------------ -PostgreSQL 17.4 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 8.3.1 20191121 (Red Hat +PostgreSQL 17.5 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 8.3.1 20191121 (Red Hat 8.3.1-5), 64-bit (1 row) ``` diff --git a/docs/src/troubleshooting.md b/docs/src/troubleshooting.md index ebcbf1045f..745a703fe3 100644 --- a/docs/src/troubleshooting.md +++ b/docs/src/troubleshooting.md @@ -221,7 +221,7 @@ Cluster in healthy state Name: cluster-example Namespace: default System ID: 7044925089871458324 -PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:17.4-3 +PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:17.5-3 Primary instance: cluster-example-1 Instances: 3 Ready instances: 3 @@ -289,7 +289,7 @@ kubectl describe cluster -n | grep "Image Name" Output: ```shell - Image Name: ghcr.io/cloudnative-pg/postgresql:17.4-3 + Image Name: ghcr.io/cloudnative-pg/postgresql:17.5-3 ``` !!! Note diff --git a/pkg/versions/versions.go b/pkg/versions/versions.go index 66cfbf4ba5..a97567d158 100644 --- a/pkg/versions/versions.go +++ b/pkg/versions/versions.go @@ -26,7 +26,7 @@ const ( Version = "1.26.0-rc2" // DefaultImageName is the default image used by the operator to create pods - DefaultImageName = "ghcr.io/cloudnative-pg/postgresql:17.4" + DefaultImageName = "ghcr.io/cloudnative-pg/postgresql:17.5" // DefaultOperatorImageName is the default operator image used by the controller in the pods running PostgreSQL DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.26.0-rc2" From 8b162e26a038c3cfbeff35c2ca2b77c4e9b20a71 Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Tue, 13 May 2025 14:32:25 +0200 Subject: [PATCH 579/836] docs: release notes for 1.26.0-rc3 (#7549) Closes #7548 Signed-off-by: Gabriele Bartolini Signed-off-by: Marco Nenciarini Co-authored-by: Marco Nenciarini --- .github/ISSUE_TEMPLATE/bug.yml | 3 +- docs/src/postgres_upgrades.md | 4 +- docs/src/preview_version.md | 2 +- docs/src/release_notes/v1.26.md | 67 +++++++++++++++++++++++++++- docs/src/supported_releases.md | 12 ++--- internal/controller/cluster_image.go | 3 +- 6 files changed, 76 insertions(+), 15 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml index 23d7a85e8e..9b7a3bed78 100644 --- a/.github/ISSUE_TEMPLATE/bug.yml +++ b/.github/ISSUE_TEMPLATE/bug.yml @@ -48,8 +48,7 @@ body: label: Version description: What is the version of CloudNativePG you are running? options: - - "1.26.0-rc2" - - "1.26.0-rc1" + - "1.26.0-rc3" - "1.25 (latest patch)" - "1.24 (latest patch)" - "trunk (main)" diff --git a/docs/src/postgres_upgrades.md b/docs/src/postgres_upgrades.md index 2cd31dd9e8..46f211875f 100644 --- a/docs/src/postgres_upgrades.md +++ b/docs/src/postgres_upgrades.md @@ -74,8 +74,8 @@ For details on supported image tags, see ### Upgrade Process 1. Shuts down all cluster pods to ensure data consistency. -2. Records the previous PostgreSQL version in the cluster’s status under - `.status.majorVersionUpgradeFromImage`. +2. Records the previous PostgreSQL version and image in the cluster’s status under + `.status.pgDataImageInfo`. 3. Initiates a new upgrade job, which: - Verifies that the binaries in the image and the data files align with a major upgrade request. diff --git a/docs/src/preview_version.md b/docs/src/preview_version.md index d5b7c5c030..1d2375721e 100644 --- a/docs/src/preview_version.md +++ b/docs/src/preview_version.md @@ -36,7 +36,7 @@ are not backwards compatible and could be removed entirely. There are currently no preview versions available. -The current preview version is **1.26.0-rc2**. +The current preview version is **1.26.0-rc3**. For more information on the current preview version and how to test, please view the links below: diff --git a/docs/src/release_notes/v1.26.md b/docs/src/release_notes/v1.26.md index ae0aab986c..a61ffb0083 100644 --- a/docs/src/release_notes/v1.26.md +++ b/docs/src/release_notes/v1.26.md @@ -7,9 +7,72 @@ For a complete list of changes, please refer to the [commits](https://github.com/cloudnative-pg/cloudnative-pg/commits/release-1.26) on the release branch in GitHub. +## Version 1.26.0-rc3 + +**Release date:** May 13, 2025 + +### Enhancements + +- Added a new field in the `status` of the `Cluster` resource to track the + latest known Pod IP (#7546). +- Introduced an opt-in experimental feature to enhance the liveness probe with + network isolation detection for primary instances. This feature can be + activated via the `alpha.cnpg.io/livenessPinger` annotation (#7466). +- Updated the default PostgreSQL version to 17.5 for new cluster + definitions. (#7556) + +### Fixes (since RC2) + +- Corrected replication lag comparison logic in custom health probes to accept + equality, enabling probes to succeed when lag is exactly zero (#7442). +- Fixed native replication slot synchronization and logical replication + failover for PostgreSQL 17 by appending the `dbname` parameter to + `primary_conninfo` in replica configurations (#7298). +- Improved performance and resilience of CNPG-I by removing timeouts for local + plugin operations, avoiding failures during longer backup or WAL archiving + executions (#7496). +- Fixed a regression in WAL restore operations that prevented fallback to the + in-tree `barmanObjectStore` configuration defined in the `externalCluster` + source when a plugin failed to locate a WAL file (#7507). +- Improved backup efficiency by introducing a fail-fast mechanism in WAL + archiving, allowing quicker detection of unexpected primary demotion and + avoiding unnecessary retries (#7483). +- Fixed an off-by-one error in parallel WAL archiving that could cause one + extra worker process to be spawned beyond the requested number (#7389). +- Enhanced declarative major version upgrade logic by prioritizing the declared + PostgreSQL version when using image catalogs. This change also replaces the + `majorVersionUpgradeFromImage` field with the new `pgDataImageInfo` object in + cluster status (#7387, #7403). +- `cnpg` plugin: + + - Increased the buffer size in the `logs pretty` command to better handle + larger log output (#7281). + - Ensured the `plugin-name` parameter is required for plugin-based backups + and disallowed for non-plugin backup methods (#7506). + +### Changes + +- Initiated deprecation of in-tree Barman Cloud support: + + - The `.spec.backup.barmanObjectStore` and `.spec.backup.retentionPolicy` + fields are now deprecated in favor of the external Barman Cloud Plugin, and a + warning is now emitted by the admission webhook when these fields are used in + the `Cluster` specification (#7500). + + - While support for in-tree object store backup and recovery remains + available in this release, it is planned for removal in version 1.28.0. + + - *We strongly encourage users to begin planning the migration of their + fleet to the new plugin-based approach. This release candidate provides + an opportunity to test the new plugin workflow ahead of the final 1.26.0 + release. Feedback is welcome*. + +- Updated the default PgBouncer version to **1.24.1** for new `Pooler` + deployments (#7399). + ## Version 1.26.0-rc2 -**Release date:** 16 April 2025 +**Release date:** April 16, 2025 ### Enhancements @@ -35,7 +98,7 @@ on the release branch in GitHub. ## Version 1.26.0-rc1 -**Release date:** Mar 28, 2025 +**Release date:** March 28, 2025 ### Important Changes diff --git a/docs/src/supported_releases.md b/docs/src/supported_releases.md index 8be8ff34d5..c6c364f9e6 100644 --- a/docs/src/supported_releases.md +++ b/docs/src/supported_releases.md @@ -8,7 +8,7 @@ releases of CloudNativePG*. We are committed to providing support for the latest minor release, with a -dedication to launching a new minor release every two months. Each release +dedication to launching a new minor release every three months. Each release remains fully supported until reaching its designated "End of Life" date, as outlined in the [support status table for CloudNativePG releases](#support-status-of-cloudnativepg-releases). This includes an additional 3-month assistance window to facilitate seamless @@ -83,7 +83,7 @@ Git tags for versions are prefixed with `v`. | Version | Currently supported | Release date | End of life | Supported Kubernetes versions | Tested, but not supported | Supported Postgres versions | |-----------------|----------------------|--------------|-----------------|-------------------------------|---------------------------|-----------------------------| -| 1.25.x | Yes | Dec 23, 2024 | ~ Jun/July 2025 | 1.29, 1.30, 1.31, 1.32 | 1.27, 1.28 | 13 - 17 | +| 1.25.x | Yes | Dec 23, 2024 | ~ Aug/Sep 2025 | 1.29, 1.30, 1.31, 1.32 | 1.27, 1.28 | 13 - 17 | | 1.24.x | Yes | Aug 22, 2024 | Mar 23, 2025 | 1.28, 1.29, 1.30, 1.31 | 1.27 | 13 - 17 | | main | No, development only | | | | | 13 - 17 | @@ -122,10 +122,10 @@ version of PostgreSQL, we might not be able to help you. | Version | Release date | End of life | |---------|--------------|-------------| -| 1.26.0 | ~ Mar, 2025 | ~ Dec, 2025 | -| 1.27.0 | ~ Jun, 2025 | ~ Mar, 2026 | -| 1.28.0 | ~ Sep, 2025 | ~ Jun, 2026 | -| 1.29.0 | ~ Dec, 2025 | ~ Sep, 2026 | +| 1.26.0 | ~ May, 2025 | ~ Nov, 2025 | +| 1.27.0 | ~ Aug, 2025 | ~ Feb, 2026 | +| 1.28.0 | ~ Nov, 2025 | ~ May, 2026 | +| 1.29.0 | ~ Feb, 2025 | ~ Aug, 2026 | !!! Note Feature freeze occurs 1-2 weeks before the release, at which point a diff --git a/internal/controller/cluster_image.go b/internal/controller/cluster_image.go index 2c9fcf5eb5..21cbba1dae 100644 --- a/internal/controller/cluster_image.go +++ b/internal/controller/cluster_image.go @@ -42,8 +42,7 @@ import ( // reconcileImage processes the image request, executes it, and stores // the result in the .status.image field. If the user requested a // major version upgrade, the current image is saved in the -// .status.majorVersionUpgradeFromImage field. This allows for -// reverting the upgrade if it doesn't complete successfully. +// .status.pgDataImageInfo field. func (r *ClusterReconciler) reconcileImage(ctx context.Context, cluster *apiv1.Cluster) (*ctrl.Result, error) { contextLogger := log.FromContext(ctx) From f3995f0767b5f9e4ebeb993f669079b67b8303dc Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 13 May 2025 15:16:40 +0200 Subject: [PATCH 580/836] Version tag to 1.26.0-rc3 (#7560) Signed-off-by: Marco Nenciarini Co-authored-by: Marco Nenciarini --- docs/src/installation_upgrade.md | 4 +- docs/src/kubectl-plugin.md | 30 +- pkg/versions/versions.go | 6 +- releases/cnpg-1.26.0-rc3.yaml | 18021 +++++++++++++++++++++++++++++ 4 files changed, 18041 insertions(+), 20 deletions(-) create mode 100644 releases/cnpg-1.26.0-rc3.yaml diff --git a/docs/src/installation_upgrade.md b/docs/src/installation_upgrade.md index a9ab979354..9a8bd66381 100644 --- a/docs/src/installation_upgrade.md +++ b/docs/src/installation_upgrade.md @@ -8,12 +8,12 @@ The operator can be installed like any other resource in Kubernetes, through a YAML manifest applied via `kubectl`. -You can install the [latest operator manifest](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-1.26.0-rc2.yaml) +You can install the [latest operator manifest](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-1.26.0-rc3.yaml) for this minor release as follows: ```sh kubectl apply --server-side -f \ - https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-1.26.0-rc2.yaml + https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-1.26.0-rc3.yaml ``` You can verify that with: diff --git a/docs/src/kubectl-plugin.md b/docs/src/kubectl-plugin.md index 50208b8651..d8e3d55a1b 100644 --- a/docs/src/kubectl-plugin.md +++ b/docs/src/kubectl-plugin.md @@ -31,11 +31,11 @@ them in your systems. #### Debian packages -For example, let's install the 1.26.0-rc2 release of the plugin, for an Intel based +For example, let's install the 1.26.0-rc3 release of the plugin, for an Intel based 64 bit server. First, we download the right `.deb` file. ```sh -wget https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.26.0-rc2/kubectl-cnpg_1.26.0-rc2_linux_x86_64.deb \ +wget https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.26.0-rc3/kubectl-cnpg_1.26.0-rc3_linux_x86_64.deb \ --output-document kube-plugin.deb ``` @@ -46,17 +46,17 @@ $ sudo dpkg -i kube-plugin.deb Selecting previously unselected package cnpg. (Reading database ... 6688 files and directories currently installed.) Preparing to unpack kube-plugin.deb ... -Unpacking cnpg (1.26.0-rc2) ... -Setting up cnpg (1.26.0-rc2) ... +Unpacking cnpg (1.26.0-rc3) ... +Setting up cnpg (1.26.0-rc3) ... ``` #### RPM packages -As in the example for `.rpm` packages, let's install the 1.26.0-rc2 release for an +As in the example for `.rpm` packages, let's install the 1.26.0-rc3 release for an Intel 64 bit machine. Note the `--output` flag to provide a file name. ```sh -curl -L https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.26.0-rc2/kubectl-cnpg_1.26.0-rc2_linux_x86_64.rpm \ +curl -L https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.26.0-rc3/kubectl-cnpg_1.26.0-rc3_linux_x86_64.rpm \ --output kube-plugin.rpm ``` @@ -70,7 +70,7 @@ Dependencies resolved. Package Architecture Version Repository Size ==================================================================================================== Installing: - cnpg x86_64 1.26.0-rc2 @commandline 20 M + cnpg x86_64 1.26.0-rc3 @commandline 20 M Transaction Summary ==================================================================================================== @@ -294,9 +294,9 @@ sandbox-3 0/604DE38 0/604DE38 0/604DE38 0/604DE38 00:00:00 00:00:00 00 Instances status Name Current LSN Replication role Status QoS Manager Version Node ---- ----------- ---------------- ------ --- --------------- ---- -sandbox-1 0/604DE38 Primary OK BestEffort 1.26.0-rc2 k8s-eu-worker -sandbox-2 0/604DE38 Standby (async) OK BestEffort 1.26.0-rc2 k8s-eu-worker2 -sandbox-3 0/604DE38 Standby (async) OK BestEffort 1.26.0-rc2 k8s-eu-worker +sandbox-1 0/604DE38 Primary OK BestEffort 1.26.0-rc3 k8s-eu-worker +sandbox-2 0/604DE38 Standby (async) OK BestEffort 1.26.0-rc3 k8s-eu-worker2 +sandbox-3 0/604DE38 Standby (async) OK BestEffort 1.26.0-rc3 k8s-eu-worker ``` If you require more detailed status information, use the `--verbose` option (or @@ -350,9 +350,9 @@ sandbox-primary primary 1 1 1 Instances status Name Current LSN Replication role Status QoS Manager Version Node ---- ----------- ---------------- ------ --- --------------- ---- -sandbox-1 0/6053720 Primary OK BestEffort 1.26.0-rc2 k8s-eu-worker -sandbox-2 0/6053720 Standby (async) OK BestEffort 1.26.0-rc2 k8s-eu-worker2 -sandbox-3 0/6053720 Standby (async) OK BestEffort 1.26.0-rc2 k8s-eu-worker +sandbox-1 0/6053720 Primary OK BestEffort 1.26.0-rc3 k8s-eu-worker +sandbox-2 0/6053720 Standby (async) OK BestEffort 1.26.0-rc3 k8s-eu-worker2 +sandbox-3 0/6053720 Standby (async) OK BestEffort 1.26.0-rc3 k8s-eu-worker ``` With an additional `-v` (e.g. `kubectl cnpg status sandbox -v -v`), you can @@ -575,12 +575,12 @@ Archive: report_operator_.zip ```output ====== Beginning of Previous Log ===== -2023-03-28T12:56:41.251711811Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.26.0-rc2","build":{"Version":"1.26.0-rc2+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} +2023-03-28T12:56:41.251711811Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.26.0-rc3","build":{"Version":"1.26.0-rc3+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} 2023-03-28T12:56:41.251851909Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"} ====== End of Previous Log ===== -2023-03-28T12:57:09.854306024Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.26.0-rc2","build":{"Version":"1.26.0-rc2+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} +2023-03-28T12:57:09.854306024Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.26.0-rc3","build":{"Version":"1.26.0-rc3+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} 2023-03-28T12:57:09.854363943Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"} ``` diff --git a/pkg/versions/versions.go b/pkg/versions/versions.go index a97567d158..6304f15667 100644 --- a/pkg/versions/versions.go +++ b/pkg/versions/versions.go @@ -23,13 +23,13 @@ package versions const ( // Version is the version of the operator - Version = "1.26.0-rc2" + Version = "1.26.0-rc3" // DefaultImageName is the default image used by the operator to create pods DefaultImageName = "ghcr.io/cloudnative-pg/postgresql:17.5" // DefaultOperatorImageName is the default operator image used by the controller in the pods running PostgreSQL - DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.26.0-rc2" + DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.26.0-rc3" ) // BuildInfo is a struct containing all the info about the build @@ -39,7 +39,7 @@ type BuildInfo struct { var ( // buildVersion injected during the build - buildVersion = "1.26.0-rc2" + buildVersion = "1.26.0-rc3" // buildCommit injected during the build buildCommit = "none" diff --git a/releases/cnpg-1.26.0-rc3.yaml b/releases/cnpg-1.26.0-rc3.yaml new file mode 100644 index 0000000000..0e3ebc55f8 --- /dev/null +++ b/releases/cnpg-1.26.0-rc3.yaml @@ -0,0 +1,18021 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-system +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: backups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Backup + listKind: BackupList + plural: backups + singular: backup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.method + name: Method + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .status.error + name: Error + type: string + name: v1 + schema: + openAPIV3Schema: + description: A Backup resource is a request for a PostgreSQL backup by the + user. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the backup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + type: object + status: + description: |- + Most recently observed status of the backup. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + azureCredentials: + description: The credentials to use to upload data to Azure Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without providing + explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + backupId: + description: The ID of the Barman backup + type: string + backupLabelFile: + description: Backup label file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + backupName: + description: The Name of the Barman backup + type: string + beginLSN: + description: The starting xlog + type: string + beginWal: + description: The starting WAL + type: string + commandError: + description: The backup command output in case of error + type: string + commandOutput: + description: Unused. Retained for compatibility with old versions. + type: string + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data. This may not be populated in case of errors. + type: string + encryption: + description: Encryption method required to S3 API + type: string + endLSN: + description: The ending xlog + type: string + endWal: + description: The ending WAL + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + error: + description: The detected error + type: string + googleCredentials: + description: The credentials to use to upload data to Google Cloud + Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage JSON + file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + instanceID: + description: Information to identify the instance where the backup + has been taken from + properties: + ContainerID: + description: The container ID + type: string + podName: + description: The pod name + type: string + type: object + method: + description: The backup method being used + type: string + online: + description: Whether the backup was online/hot (`true`) or offline/cold + (`false`) + type: boolean + phase: + description: The last backup status + type: string + pluginMetadata: + additionalProperties: + type: string + description: A map containing the plugin metadata + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without providing + explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the region + name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + snapshotBackupStatus: + description: Status of the volumeSnapshot backup + properties: + elements: + description: The elements list, populated with the gathered volume + snapshots + items: + description: BackupSnapshotElementStatus is a volume snapshot + that is part of a volume snapshot method backup + properties: + name: + description: Name is the snapshot resource name + type: string + tablespaceName: + description: |- + TablespaceName is the name of the snapshotted tablespace. Only set + when type is PG_TABLESPACE + type: string + type: + description: Type is tho role of the snapshot in the cluster, + such as PG_DATA, PG_WAL and PG_TABLESPACE + type: string + required: + - name + - type + type: object + type: array + type: object + startedAt: + description: When the backup was started + format: date-time + type: string + stoppedAt: + description: When the backup was terminated + format: date-time + type: string + tablespaceMapFile: + description: Tablespace map file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: clusterimagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ClusterImageCatalog + listKind: ClusterImageCatalogList + plural: clusterimagecatalogs + singular: clusterimagecatalog + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ClusterImageCatalog is the Schema for the clusterimagecatalogs + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ClusterImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: clusters.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Cluster + listKind: ClusterList + plural: clusters + singular: cluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Number of instances + jsonPath: .status.instances + name: Instances + type: integer + - description: Number of ready instances + jsonPath: .status.readyInstances + name: Ready + type: integer + - description: Cluster current status + jsonPath: .status.phase + name: Status + type: string + - description: Primary pod + jsonPath: .status.currentPrimary + name: Primary + type: string + name: v1 + schema: + openAPIV3Schema: + description: Cluster is the Schema for the PostgreSQL API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the cluster. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + affinity: + description: Affinity/Anti-affinity rules for Pods + properties: + additionalPodAffinity: + description: AdditionalPodAffinity allows to specify pod affinity + terms to be passed to all the cluster's pods. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + additionalPodAntiAffinity: + description: |- + AdditionalPodAntiAffinity allows to specify pod anti-affinity terms to be added to the ones generated + by the operator if EnablePodAntiAffinity is set to true (default) or to be used exclusively if set to false. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + enablePodAntiAffinity: + description: |- + Activates anti-affinity for the pods. The operator will define pods + anti-affinity unless this field is explicitly set to false + type: boolean + nodeAffinity: + description: |- + NodeAffinity describes node affinity scheduling rules for the pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is map of key-value pairs used to define the nodes on which + the pods can run. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + podAntiAffinityType: + description: |- + PodAntiAffinityType allows the user to decide whether pod anti-affinity between cluster instance has to be + considered a strong requirement during scheduling or not. Allowed values are: "preferred" (default if empty) or + "required". Setting it to "required", could lead to instances remaining pending until new kubernetes nodes are + added if all the existing nodes don't match the required pod anti-affinity rule. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + type: string + tolerations: + description: |- + Tolerations is a list of Tolerations that should be set for all the pods, in order to allow them to run + on tainted nodes. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologyKey: + description: |- + TopologyKey to use for anti-affinity configuration. See k8s documentation + for more info on that + type: string + type: object + backup: + description: The configuration to be used for backups + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2`, and `snappy`. + enum: + - bzip2 + - gzip + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage + JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the + region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2`, + `lz4`, `snappy`, `xz`, and `zstd`. + enum: + - bzip2 + - gzip + - lz4 + - snappy + - xz + - zstd + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + retentionPolicy: + description: |- + RetentionPolicy is the retention policy to be used for backups + and WALs (i.e. '60d'). The retention policy is expressed in the form + of `XXu` where `XX` is a positive integer and `u` is in `[dwm]` - + days, weeks, months. + It's currently only applicable when using the BarmanObjectStore method. + pattern: ^[1-9][0-9]*[dwm]$ + type: string + target: + default: prefer-standby + description: |- + The policy to decide which instance should perform backups. Available + options are empty string, which will default to `prefer-standby` policy, + `primary` to have backups run always on primary instances, `prefer-standby` + to have backups run preferably on the most updated standby, if available. + enum: + - primary + - prefer-standby + type: string + volumeSnapshot: + description: VolumeSnapshot provides the configuration for the + execution of volume snapshot backups. + properties: + annotations: + additionalProperties: + type: string + description: Annotations key-value pairs that will be added + to .metadata.annotations snapshot resources. + type: object + className: + description: |- + ClassName specifies the Snapshot Class to be used for PG_DATA PersistentVolumeClaim. + It is the default class for the other types if no specific class is present + type: string + labels: + additionalProperties: + type: string + description: Labels are key-value pairs that will be added + to .metadata.labels snapshot resources. + type: object + online: + default: true + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + type: boolean + onlineConfiguration: + default: + immediateCheckpoint: false + waitForArchive: true + description: Configuration parameters to control the online/hot + backup with volume snapshots + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + snapshotOwnerReference: + default: none + description: SnapshotOwnerReference indicates the type of + owner reference the snapshot should have + enum: + - none + - cluster + - backup + type: string + tablespaceClassName: + additionalProperties: + type: string + description: |- + TablespaceClassName specifies the Snapshot Class to be used for the tablespaces. + defaults to the PGDATA Snapshot Class, if set + type: object + walClassName: + description: WalClassName specifies the Snapshot Class to + be used for the PG_WAL PersistentVolumeClaim. + type: string + type: object + type: object + bootstrap: + description: Instructions to bootstrap this cluster + properties: + initdb: + description: Bootstrap the cluster via initdb + properties: + builtinLocale: + description: |- + Specifies the locale name when the builtin provider is used. + This option requires `localeProvider` to be set to `builtin`. + Available from PostgreSQL 17. + type: string + dataChecksums: + description: |- + Whether the `-k` option should be passed to initdb, + enabling checksums on data pages (default: `false`) + type: boolean + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + encoding: + description: The value to be passed as option `--encoding` + for initdb (default:`UTF8`) + type: string + icuLocale: + description: |- + Specifies the ICU locale when the ICU provider is used. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 15. + type: string + icuRules: + description: |- + Specifies additional collation rules to customize the behavior of the default collation. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 16. + type: string + import: + description: |- + Bootstraps the new cluster by importing data from an existing PostgreSQL + instance using logical backup (`pg_dump` and `pg_restore`) + properties: + databases: + description: The databases to import + items: + type: string + type: array + pgDumpExtraOptions: + description: |- + List of custom options to pass to the `pg_dump` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + pgRestoreExtraOptions: + description: |- + List of custom options to pass to the `pg_restore` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + postImportApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after is imported - to be used with extreme care + (by default empty). Only available in microservice type. + items: + type: string + type: array + roles: + description: The roles to import + items: + type: string + type: array + schemaOnly: + description: |- + When set to true, only the `pre-data` and `post-data` sections of + `pg_restore` are invoked, avoiding data import. Default: `false`. + type: boolean + source: + description: The source of the import + properties: + externalCluster: + description: The name of the externalCluster used + for import + type: string + required: + - externalCluster + type: object + type: + description: The import type. Can be `microservice` or + `monolith`. + enum: + - microservice + - monolith + type: string + required: + - databases + - source + - type + type: object + locale: + description: Sets the default collation order and character + classification in the new database. + type: string + localeCType: + description: The value to be passed as option `--lc-ctype` + for initdb (default:`C`) + type: string + localeCollate: + description: The value to be passed as option `--lc-collate` + for initdb (default:`C`) + type: string + localeProvider: + description: |- + This option sets the locale provider for databases created in the new cluster. + Available from PostgreSQL 16. + type: string + options: + description: |- + The list of options that must be passed to initdb when creating the cluster. + Deprecated: This could lead to inconsistent configurations, + please use the explicit provided parameters instead. + If defined, explicit values will be ignored. + items: + type: string + type: array + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + postInitApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitApplicationSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the application database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitSQL: + description: |- + List of SQL queries to be executed as a superuser in the `postgres` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `postgres` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitTemplateSQL: + description: |- + List of SQL queries to be executed as a superuser in the `template1` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitTemplateSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `template1` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + walSegmentSize: + description: |- + The value in megabytes (1 to 1024) to be passed to the `--wal-segsize` + option for initdb (default: empty, resulting in PostgreSQL default: 16MB) + maximum: 1024 + minimum: 1 + type: integer + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider + is set to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is + set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set + to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + pg_basebackup: + description: |- + Bootstrap the cluster taking a physical backup of another compatible + PostgreSQL instance + properties: + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: The name of the server of which we need to take + a physical backup + minLength: 1 + type: string + required: + - source + type: object + recovery: + description: Bootstrap the cluster from a backup + properties: + backup: + description: |- + The backup object containing the physical base backup from which to + initiate the recovery procedure. + Mutually exclusive with `source` and `volumeSnapshots`. + properties: + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + name: + description: Name of the referent. + type: string + required: + - name + type: object + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + recoveryTarget: + description: |- + By default, the recovery process applies all the available + WAL files in the archive (full recovery). However, you can also + end the recovery as soon as a consistent state is reached or + recover to a point-in-time (PITR) by specifying a `RecoveryTarget` object, + as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...). + More info: https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET + properties: + backupID: + description: |- + The ID of the backup from which to start the recovery process. + If empty (default) the operator will automatically detect the backup + based on targetTime or targetLSN if specified. Otherwise use the + latest available backup in chronological order. + type: string + exclusive: + description: |- + Set the target to be exclusive. If omitted, defaults to false, so that + in Postgres, `recovery_target_inclusive` will be true + type: boolean + targetImmediate: + description: End recovery as soon as a consistent state + is reached + type: boolean + targetLSN: + description: The target LSN (Log Sequence Number) + type: string + targetName: + description: |- + The target name (to be previously created + with `pg_create_restore_point`) + type: string + targetTLI: + description: The target timeline ("latest" or a positive + integer) + type: string + targetTime: + description: The target time as a timestamp in the RFC3339 + standard + type: string + targetXID: + description: The target transaction ID + type: string + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: |- + The external cluster whose backup we will restore. This is also + used as the name of the folder under which the backup is stored, + so it must be set to the name of the source cluster + Mutually exclusive with `backup`. + type: string + volumeSnapshots: + description: |- + The static PVC data source(s) from which to initiate the + recovery procedure. Currently supporting `VolumeSnapshot` + and `PersistentVolumeClaim` resources that map an existing + PVC group, compatible with CloudNativePG, and taken with + a cold backup copy on a fenced Postgres instance (limitation + which will be removed in the future when online backup + will be implemented). + Mutually exclusive with `backup`. + properties: + storage: + description: Configuration of the storage of the instances + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + tablespaceStorage: + additionalProperties: + description: |- + TypedLocalObjectReference contains enough information to let you locate the + typed referenced object inside the same namespace. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + description: Configuration of the storage for PostgreSQL + tablespaces + type: object + walStorage: + description: Configuration of the storage for PostgreSQL + WAL (Write-Ahead Log) + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + required: + - storage + type: object + type: object + type: object + certificates: + description: The configuration for the CA and related certificates + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + description: + description: Description of this PostgreSQL cluster + type: string + enablePDB: + default: true + description: |- + Manage the `PodDisruptionBudget` resources within the cluster. When + configured as `true` (default setting), the pod disruption budgets + will safeguard the primary node from being terminated. Conversely, + setting it to `false` will result in the absence of any + `PodDisruptionBudget` resource, permitting the shutdown of all nodes + hosting the PostgreSQL cluster. This latter configuration is + advisable for any PostgreSQL cluster employed for + development/staging purposes. + type: boolean + enableSuperuserAccess: + default: false + description: |- + When this option is enabled, the operator will use the `SuperuserSecret` + to update the `postgres` user password (if the secret is + not present, the operator will automatically create one). When this + option is disabled, the operator will ignore the `SuperuserSecret` content, delete + it when automatically created, and then blank the password of the `postgres` + user by setting it to `NULL`. Disabled by default. + type: boolean + env: + description: |- + Env follows the Env format to pass environment variables + to the pods created in the cluster + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + EnvFrom follows the EnvFrom format to pass environment variables + sources to the pods to be used by Env + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each key in + the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + ephemeralVolumeSource: + description: EphemeralVolumeSource allows the user to configure the + source of ephemeral volumes. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to + consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the + PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + ephemeralVolumesSizeLimit: + description: |- + EphemeralVolumesSizeLimit allows the user to set the limits for the ephemeral + volumes + properties: + shm: + anyOf: + - type: integer + - type: string + description: Shm is the size limit of the shared memory volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + temporaryData: + anyOf: + - type: integer + - type: string + description: TemporaryData is the size limit of the temporary + data volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + externalClusters: + description: The list of external clusters which are used in the configuration + items: + description: |- + ExternalCluster represents the connection parameters to an + external cluster which is used in the other sections of the configuration + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2`, and `snappy`. + enum: + - bzip2 + - gzip + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud + Storage JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing + the region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2`, + `lz4`, `snappy`, `xz`, and `zstd`. + enum: + - bzip2 + - gzip + - lz4 + - snappy + - xz + - zstd + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + connectionParameters: + additionalProperties: + type: string + description: The list of connection parameters, such as dbname, + host, username, etc + type: object + name: + description: The server name, required + type: string + password: + description: |- + The reference to the password to be used to connect to the server. + If a password is provided, CloudNativePG creates a PostgreSQL + passfile at `/controller/external/NAME/pass` (where "NAME" is the + cluster's name). This passfile is automatically referenced in the + connection string when establishing a connection to the remote + PostgreSQL server from the current PostgreSQL `Cluster`. This ensures + secure and efficient password management for external clusters. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + plugin: + description: |- + The configuration of the plugin that is taking care + of WAL archiving and backups for this external cluster + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + isWALArchiver: + default: false + description: |- + Only one plugin can be declared as WALArchiver. + Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + sslCert: + description: |- + The reference to an SSL certificate to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslKey: + description: |- + The reference to an SSL private key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslRootCert: + description: |- + The reference to an SSL CA public key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - name + type: object + type: array + failoverDelay: + default: 0 + description: |- + The amount of time (in seconds) to wait before triggering a failover + after the primary PostgreSQL instance in the cluster was detected + to be unhealthy + format: int32 + type: integer + imageCatalogRef: + description: Defines the major PostgreSQL version we want to use within + an ImageCatalog + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + major: + description: The major version of PostgreSQL we want to use from + the ImageCatalog + type: integer + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - major + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: Only image catalogs are supported + rule: self.kind == 'ImageCatalog' || self.kind == 'ClusterImageCatalog' + - message: Only image catalogs are supported + rule: self.apiGroup == 'postgresql.cnpg.io' + imageName: + description: |- + Name of the container image, supporting both tags (`:`) + and digests for deterministic and repeatable deployments + (`:@sha256:`) + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of `Always`, `Never` or `IfNotPresent`. + If not defined, it defaults to `IfNotPresent`. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + imagePullSecrets: + description: The list of pull secrets to be used to pull the images + items: + description: |- + LocalObjectReference contains enough information to let you locate a + local object with a known type inside the same namespace + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + type: array + inheritedMetadata: + description: Metadata that will be inherited by all objects related + to the Cluster + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + instances: + default: 1 + description: Number of instances required in the cluster + minimum: 1 + type: integer + livenessProbeTimeout: + description: |- + LivenessProbeTimeout is the time (in seconds) that is allowed for a PostgreSQL instance + to successfully respond to the liveness probe (default 30). + The Liveness probe failure threshold is derived from this value using the formula: + ceiling(livenessProbe / 10). + format: int32 + type: integer + logLevel: + default: info + description: 'The instances'' log level, one of the following values: + error, warning, info (default), debug, trace' + enum: + - error + - warning + - info + - debug + - trace + type: string + managed: + description: The configuration that is used by the portions of PostgreSQL + that are managed by the instance manager + properties: + roles: + description: Database roles managed by the `Cluster` + items: + description: |- + RoleConfiguration is the representation, in Kubernetes, of a PostgreSQL role + with the additional field Ensure specifying whether to ensure the presence or + absence of the role in the database + + The defaults of the CREATE ROLE command are applied + Reference: https://www.postgresql.org/docs/current/sql-createrole.html + properties: + bypassrls: + description: |- + Whether a role bypasses every row-level security (RLS) policy. + Default is `false`. + type: boolean + comment: + description: Description of the role + type: string + connectionLimit: + default: -1 + description: |- + If the role can log in, this specifies how many concurrent + connections the role can make. `-1` (the default) means no limit. + format: int64 + type: integer + createdb: + description: |- + When set to `true`, the role being defined will be allowed to create + new databases. Specifying `false` (default) will deny a role the + ability to create databases. + type: boolean + createrole: + description: |- + Whether the role will be permitted to create, alter, drop, comment + on, change the security label for, and grant or revoke membership in + other roles. Default is `false`. + type: boolean + disablePassword: + description: DisablePassword indicates that a role's password + should be set to NULL in Postgres + type: boolean + ensure: + default: present + description: Ensure the role is `present` or `absent` - + defaults to "present" + enum: + - present + - absent + type: string + inRoles: + description: |- + List of one or more existing roles to which this role will be + immediately added as a new member. Default empty. + items: + type: string + type: array + inherit: + default: true + description: |- + Whether a role "inherits" the privileges of roles it is a member of. + Defaults is `true`. + type: boolean + login: + description: |- + Whether the role is allowed to log in. A role having the `login` + attribute can be thought of as a user. Roles without this attribute + are useful for managing database privileges, but are not users in + the usual sense of the word. Default is `false`. + type: boolean + name: + description: Name of the role + type: string + passwordSecret: + description: |- + Secret containing the password of the role (if present) + If null, the password will be ignored unless DisablePassword is set + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + replication: + description: |- + Whether a role is a replication role. A role must have this + attribute (or be a superuser) in order to be able to connect to the + server in replication mode (physical or logical replication) and in + order to be able to create or drop replication slots. A role having + the `replication` attribute is a very highly privileged role, and + should only be used on roles actually used for replication. Default + is `false`. + type: boolean + superuser: + description: |- + Whether the role is a `superuser` who can override all access + restrictions within the database - superuser status is dangerous and + should be used only when really needed. You must yourself be a + superuser to create a new superuser. Defaults is `false`. + type: boolean + validUntil: + description: |- + Date and time after which the role's password is no longer valid. + When omitted, the password will never expire (default). + format: date-time + type: string + required: + - name + type: object + type: array + services: + description: Services roles managed by the `Cluster` + properties: + additional: + description: Additional is a list of additional managed services + specified by the user. + items: + description: |- + ManagedService represents a specific service managed by the cluster. + It includes the type of service and its associated template specification. + properties: + selectorType: + description: |- + SelectorType specifies the type of selectors that the service will have. + Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services. + enum: + - rw + - r + - ro + type: string + serviceTemplate: + description: ServiceTemplate is the template specification + for the service. + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only + supported for certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information + on service's port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed + by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains + the configurations of session affinity. + properties: + clientIP: + description: clientIP contains the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic is + distributed to Service endpoints. Implementations can use this field as a + hint, but are not required to guarantee strict adherence. If the field is + not set, the implementation will apply its default routing strategy. If set + to "PreferClose", implementations should prioritize endpoints that are + topologically close (e.g., same zone). + This is a beta field and requires enabling ServiceTrafficDistribution feature. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + updateStrategy: + default: patch + description: UpdateStrategy describes how the service + differences should be reconciled + enum: + - patch + - replace + type: string + required: + - selectorType + - serviceTemplate + type: object + type: array + disabledDefaultServices: + description: |- + DisabledDefaultServices is a list of service types that are disabled by default. + Valid values are "r", and "ro", representing read, and read-only services. + items: + description: |- + ServiceSelectorType describes a valid value for generating the service selectors. + It indicates which type of service the selector applies to, such as read-write, read, or read-only + enum: + - rw + - r + - ro + type: string + type: array + type: object + type: object + maxSyncReplicas: + default: 0 + description: |- + The target value for the synchronous replication quorum, that can be + decreased if the number of ready standbys is lower than this. + Undefined or 0 disable synchronous replication. + minimum: 0 + type: integer + minSyncReplicas: + default: 0 + description: |- + Minimum number of instances required in synchronous replication with the + primary. Undefined or 0 allow writes to complete when no standby is + available. + minimum: 0 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this cluster + properties: + customQueriesConfigMap: + description: The list of config maps containing the custom queries + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + customQueriesSecret: + description: The list of secrets containing the custom queries + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + disableDefaultQueries: + default: false + description: |- + Whether the default queries should be injected. + Set it to `true` if you don't want to inject default queries into the cluster. + Default: false. + type: boolean + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + tls: + description: |- + Configure TLS communication for the metrics endpoint. + Changing tls.enabled option will force a rollout of all instances. + properties: + enabled: + default: false + description: |- + Enable TLS for the monitoring endpoint. + Changing this option will force a rollout of all instances. + type: boolean + type: object + type: object + nodeMaintenanceWindow: + description: Define a maintenance window for the Kubernetes nodes + properties: + inProgress: + default: false + description: Is there a node maintenance activity in progress? + type: boolean + reusePVC: + default: true + description: |- + Reuse the existing PVC (wait for the node to come + up again) or not (recreate it elsewhere - when `instances` >1) + type: boolean + type: object + plugins: + description: |- + The plugins configuration, containing + any plugin to be loaded with the corresponding configuration + items: + description: |- + PluginConfiguration specifies a plugin that need to be loaded for this + cluster to be reconciled + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + isWALArchiver: + default: false + description: |- + Only one plugin can be declared as WALArchiver. + Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + type: array + postgresGID: + default: 26 + description: The GID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresUID: + default: 26 + description: The UID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresql: + description: Configuration of the PostgreSQL server + properties: + enableAlterSystem: + description: |- + If this parameter is true, the user will be able to invoke `ALTER SYSTEM` + on this CloudNativePG Cluster. + This should only be used for debugging and troubleshooting. + Defaults to false. + type: boolean + ldap: + description: Options to specify LDAP configuration + properties: + bindAsAuth: + description: Bind as authentication configuration + properties: + prefix: + description: Prefix for the bind authentication option + type: string + suffix: + description: Suffix for the bind authentication option + type: string + type: object + bindSearchAuth: + description: Bind+Search authentication configuration + properties: + baseDN: + description: Root DN to begin the user search + type: string + bindDN: + description: DN of the user to bind to the directory + type: string + bindPassword: + description: Secret with the password for the user to + bind to the directory + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + searchAttribute: + description: Attribute to match against the username + type: string + searchFilter: + description: Search filter to use when doing the search+bind + authentication + type: string + type: object + port: + description: LDAP server port + type: integer + scheme: + description: LDAP schema to be used, possible options are + `ldap` and `ldaps` + enum: + - ldap + - ldaps + type: string + server: + description: LDAP hostname or IP address + type: string + tls: + description: Set to 'true' to enable LDAP over TLS. 'false' + is default + type: boolean + type: object + parameters: + additionalProperties: + type: string + description: PostgreSQL configuration options (postgresql.conf) + type: object + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + pg_ident: + description: |- + PostgreSQL User Name Maps rules (lines to be appended + to the pg_ident.conf file) + items: + type: string + type: array + promotionTimeout: + description: |- + Specifies the maximum number of seconds to wait when promoting an instance to primary. + Default value is 40000000, greater than one year in seconds, + big enough to simulate an infinite timeout + format: int32 + type: integer + shared_preload_libraries: + description: Lists of shared preload libraries to add to the default + ones + items: + type: string + type: array + syncReplicaElectionConstraint: + description: |- + Requirements to be met by sync replicas. This will affect how the "synchronous_standby_names" parameter will be + set up. + properties: + enabled: + description: This flag enables the constraints for sync replicas + type: boolean + nodeLabelsAntiAffinity: + description: A list of node labels values to extract and compare + to evaluate if the pods reside in the same topology or not + items: + type: string + type: array + required: + - enabled + type: object + synchronous: + description: Configuration of the PostgreSQL synchronous replication + feature + properties: + dataDurability: + default: required + description: |- + If set to "required", data durability is strictly enforced. Write operations + with synchronous commit settings (`on`, `remote_write`, or `remote_apply`) will + block if there are insufficient healthy replicas, ensuring data persistence. + If set to "preferred", data durability is maintained when healthy replicas + are available, but the required number of instances will adjust dynamically + if replicas become unavailable. This setting relaxes strict durability enforcement + to allow for operational continuity. This setting is only applicable if both + `standbyNamesPre` and `standbyNamesPost` are unset (empty). + enum: + - required + - preferred + type: string + maxStandbyNamesFromCluster: + description: |- + Specifies the maximum number of local cluster pods that can be + automatically included in the `synchronous_standby_names` option in + PostgreSQL. + type: integer + method: + description: |- + Method to select synchronous replication standbys from the listed + servers, accepting 'any' (quorum-based synchronous replication) or + 'first' (priority-based synchronous replication) as values. + enum: + - any + - first + type: string + number: + description: |- + Specifies the number of synchronous standby servers that + transactions must wait for responses from. + type: integer + x-kubernetes-validations: + - message: The number of synchronous replicas should be greater + than zero + rule: self > 0 + standbyNamesPost: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` after local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + standbyNamesPre: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` before local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + required: + - method + - number + type: object + x-kubernetes-validations: + - message: dataDurability set to 'preferred' requires empty 'standbyNamesPre' + and empty 'standbyNamesPost' + rule: self.dataDurability!='preferred' || ((!has(self.standbyNamesPre) + || self.standbyNamesPre.size()==0) && (!has(self.standbyNamesPost) + || self.standbyNamesPost.size()==0)) + type: object + primaryUpdateMethod: + default: restart + description: |- + Method to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be with a switchover (`switchover`) or in-place (`restart` - default) + enum: + - switchover + - restart + type: string + primaryUpdateStrategy: + default: unsupervised + description: |- + Deployment strategy to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be automated (`unsupervised` - default) or manual (`supervised`) + enum: + - unsupervised + - supervised + type: string + priorityClassName: + description: |- + Name of the priority class which will be used in every generated Pod, if the PriorityClass + specified does not exist, the pod will not be able to schedule. Please refer to + https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass + for more information + type: string + probes: + description: |- + The configuration of the probes to be injected + in the PostgreSQL Pods. + properties: + liveness: + description: The liveness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + readiness: + description: The readiness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + maximumLag: + anyOf: + - type: integer + - type: string + description: Lag limit. Used only for `streaming` strategy + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: + description: The probe strategy + enum: + - pg_isready + - streaming + - query + type: string + type: object + startup: + description: The startup probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + maximumLag: + anyOf: + - type: integer + - type: string + description: Lag limit. Used only for `streaming` strategy + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: + description: The probe strategy + enum: + - pg_isready + - streaming + - query + type: string + type: object + type: object + projectedVolumeTemplate: + description: |- + Template to be used to define projected volumes, projected volumes will be mounted + under `/projected` base folder + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root to write + the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap data + to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the + pod: only annotations, labels, name, namespace + and uid are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must not + be absolute or contain the ''..'' path. Must + be utf-8 encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the secret data to + project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether the Secret + or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information about the + serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + replica: + description: Replica cluster configuration + properties: + enabled: + description: |- + If replica mode is enabled, this cluster will be a replica of an + existing cluster. Replica cluster can be created from a recovery + object store or via streaming through pg_basebackup. + Refer to the Replica clusters page of the documentation for more information. + type: boolean + minApplyDelay: + description: |- + When replica mode is enabled, this parameter allows you to replay + transactions only when the system time is at least the configured + time past the commit time. This provides an opportunity to correct + data loss errors. Note that when this parameter is set, a promotion + token cannot be used. + type: string + primary: + description: |- + Primary defines which Cluster is defined to be the primary in the distributed PostgreSQL cluster, based on the + topology specified in externalClusters + type: string + promotionToken: + description: |- + A demotion token generated by an external cluster used to + check if the promotion requirements are met. + type: string + self: + description: |- + Self defines the name of this cluster. It is used to determine if this is a primary + or a replica cluster, comparing it with `primary` + type: string + source: + description: The name of the external cluster which is the replication + origin + minLength: 1 + type: string + required: + - source + type: object + replicationSlots: + default: + highAvailability: + enabled: true + description: Replication slots management configuration + properties: + highAvailability: + default: + enabled: true + description: Replication slots for high availability configuration + properties: + enabled: + default: true + description: |- + If enabled (default), the operator will automatically manage replication slots + on the primary instance and use them in streaming replication + connections with all the standby instances that are part of the HA + cluster. If disabled, the operator will not take advantage + of replication slots in streaming connections with the replicas. + This feature also controls replication slots in replica cluster, + from the designated primary to its cascading replicas. + type: boolean + slotPrefix: + default: _cnpg_ + description: |- + Prefix for replication slots managed by the operator for HA. + It may only contain lower case letters, numbers, and the underscore character. + This can only be set at creation time. By default set to `_cnpg_`. + pattern: ^[0-9a-z_]*$ + type: string + type: object + synchronizeReplicas: + description: Configures the synchronization of the user defined + physical replication slots + properties: + enabled: + default: true + description: When set to true, every replication slot that + is on the primary is synchronized on each standby + type: boolean + excludePatterns: + description: List of regular expression patterns to match + the names of replication slots to be excluded (by default + empty) + items: + type: string + type: array + required: + - enabled + type: object + updateInterval: + default: 30 + description: |- + Standby will update the status of the local replication slots + every `updateInterval` seconds (default 30). + minimum: 1 + type: integer + type: object + resources: + description: |- + Resources requirements of every generated Pod. Please refer to + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + for more information. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + schedulerName: + description: |- + If specified, the pod will be dispatched by specified Kubernetes + scheduler. If not specified, the pod will be dispatched by the default + scheduler. More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/ + type: string + seccompProfile: + description: |- + The SeccompProfile applied to every Pod and Container. + Defaults to: `RuntimeDefault` + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + serviceAccountTemplate: + description: Configure the generation of the service account + properties: + metadata: + description: |- + Metadata are the metadata to be used for the generated + service account + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + required: + - metadata + type: object + smartShutdownTimeout: + default: 180 + description: |- + The time in seconds that controls the window of time reserved for the smart shutdown of Postgres to complete. + Make sure you reserve enough time for the operator to request a fast shutdown of Postgres + (that is: `stopDelay` - `smartShutdownTimeout`). + format: int32 + type: integer + startDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + successfully start up (default 3600). + The startup probe failure threshold is derived from this value using the formula: + ceiling(startDelay / 10). + format: int32 + type: integer + stopDelay: + default: 1800 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + gracefully shutdown (default 1800) + format: int32 + type: integer + storage: + description: Configuration of the storage of the instances + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + superuserSecret: + description: |- + The secret containing the superuser password. If not defined a new + secret will be created with a randomly generated password + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + switchoverDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a primary PostgreSQL instance + to gracefully shutdown during a switchover. + Default value is 3600 seconds (1 hour). + format: int32 + type: integer + tablespaces: + description: The tablespaces configuration + items: + description: |- + TablespaceConfiguration is the configuration of a tablespace, and includes + the storage specification for the tablespace + properties: + name: + description: The name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + properties: + name: + type: string + type: object + storage: + description: The storage configuration for the tablespace + properties: + pvcTemplate: + description: Template to be used to generate the Persistent + Volume Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to + the PersistentVolume backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + temporary: + default: false + description: |- + When set to true, the tablespace will be added as a `temp_tablespaces` + entry in PostgreSQL, and will be available to automatically house temp + database objects, or other temporary files. Please refer to PostgreSQL + documentation for more information on the `temp_tablespaces` GUC. + type: boolean + required: + - name + - storage + type: object + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints specifies how to spread matching pods among the given topology. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ + items: + description: TopologySpreadConstraint specifies how to spread matching + pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + walStorage: + description: Configuration of the storage for PostgreSQL WAL (Write-Ahead + Log) + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + required: + - instances + type: object + x-kubernetes-validations: + - message: imageName and imageCatalogRef are mutually exclusive + rule: '!(has(self.imageCatalogRef) && has(self.imageName))' + status: + description: |- + Most recently observed status of the cluster. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + availableArchitectures: + description: AvailableArchitectures reports the available architectures + of a cluster + items: + description: AvailableArchitecture represents the state of a cluster's + architecture + properties: + goArch: + description: GoArch is the name of the executable architecture + type: string + hash: + description: Hash is the hash of the executable + type: string + required: + - goArch + - hash + type: object + type: array + certificates: + description: The configuration for the CA and related certificates, + initialized with defaults. + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + expirations: + additionalProperties: + type: string + description: Expiration dates for all certificates. + type: object + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + cloudNativePGCommitHash: + description: The commit hash number of which this operator running + type: string + cloudNativePGOperatorHash: + description: The hash of the binary of the operator + type: string + conditions: + description: Conditions for cluster object + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + configMapResourceVersion: + description: |- + The list of resource versions of the configmaps, + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + configmap data + properties: + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the config maps used to pass metrics. + Map keys are the config map names, map values are the versions + type: object + type: object + currentPrimary: + description: Current primary instance + type: string + currentPrimaryFailingSinceTimestamp: + description: |- + The timestamp when the primary was detected to be unhealthy + This field is reported when `.spec.failoverDelay` is populated or during online upgrades + type: string + currentPrimaryTimestamp: + description: The timestamp when the last actual promotion to primary + has occurred + type: string + danglingPVC: + description: |- + List of all the PVCs created by this cluster and still available + which are not attached to a Pod + items: + type: string + type: array + demotionToken: + description: |- + DemotionToken is a JSON token containing the information + from pg_controldata such as Database system identifier, Latest checkpoint's + TimeLineID, Latest checkpoint's REDO location, Latest checkpoint's REDO + WAL file, and Time of latest checkpoint + type: string + firstRecoverabilityPoint: + description: |- + The first recoverability point, stored as a date in RFC3339 format. + This field is calculated from the content of FirstRecoverabilityPointByMethod + type: string + firstRecoverabilityPointByMethod: + additionalProperties: + format: date-time + type: string + description: The first recoverability point, stored as a date in RFC3339 + format, per backup method type + type: object + healthyPVC: + description: List of all the PVCs not dangling nor initializing + items: + type: string + type: array + image: + description: Image contains the image name used by the pods + type: string + initializingPVC: + description: List of all the PVCs that are being initialized by this + cluster + items: + type: string + type: array + instanceNames: + description: List of instance names in the cluster + items: + type: string + type: array + instances: + description: The total number of PVC Groups detected in the cluster. + It may differ from the number of existing instance pods. + type: integer + instancesReportedState: + additionalProperties: + description: InstanceReportedState describes the last reported state + of an instance during a reconciliation loop + properties: + ip: + description: IP address of the instance + type: string + isPrimary: + description: indicates if an instance is the primary one + type: boolean + timeLineID: + description: indicates on which TimelineId the instance is + type: integer + required: + - isPrimary + type: object + description: The reported state of the instances during the last reconciliation + loop + type: object + instancesStatus: + additionalProperties: + items: + type: string + type: array + description: InstancesStatus indicates in which status the instances + are + type: object + jobCount: + description: How many Jobs have been created by this cluster + format: int32 + type: integer + lastFailedBackup: + description: Stored as a date in RFC3339 format + type: string + lastPromotionToken: + description: |- + LastPromotionToken is the last verified promotion token that + was used to promote a replica cluster + type: string + lastSuccessfulBackup: + description: |- + Last successful backup, stored as a date in RFC3339 format + This field is calculated from the content of LastSuccessfulBackupByMethod + type: string + lastSuccessfulBackupByMethod: + additionalProperties: + format: date-time + type: string + description: Last successful backup, stored as a date in RFC3339 format, + per backup method type + type: object + latestGeneratedNode: + description: ID of the latest generated node (used to avoid node name + clashing) + type: integer + managedRolesStatus: + description: ManagedRolesStatus reports the state of the managed roles + in the cluster + properties: + byStatus: + additionalProperties: + items: + type: string + type: array + description: ByStatus gives the list of roles in each state + type: object + cannotReconcile: + additionalProperties: + items: + type: string + type: array + description: |- + CannotReconcile lists roles that cannot be reconciled in PostgreSQL, + with an explanation of the cause + type: object + passwordStatus: + additionalProperties: + description: PasswordState represents the state of the password + of a managed RoleConfiguration + properties: + resourceVersion: + description: the resource version of the password secret + type: string + transactionID: + description: the last transaction ID to affect the role + definition in PostgreSQL + format: int64 + type: integer + type: object + description: PasswordStatus gives the last transaction id and + password secret version for each managed role + type: object + type: object + onlineUpdateEnabled: + description: OnlineUpdateEnabled shows if the online upgrade is enabled + inside the cluster + type: boolean + pgDataImageInfo: + description: PGDataImageInfo contains the details of the latest image + that has run on the current data directory. + properties: + image: + description: Image is the image name + type: string + majorVersion: + description: MajorVersion is the major version of the image + type: integer + required: + - image + - majorVersion + type: object + phase: + description: Current phase of the cluster + type: string + phaseReason: + description: Reason for the current phase + type: string + pluginStatus: + description: PluginStatus is the status of the loaded plugins + items: + description: PluginStatus is the status of a loaded plugin + properties: + backupCapabilities: + description: |- + BackupCapabilities are the list of capabilities of the + plugin regarding the Backup management + items: + type: string + type: array + capabilities: + description: |- + Capabilities are the list of capabilities of the + plugin + items: + type: string + type: array + name: + description: Name is the name of the plugin + type: string + operatorCapabilities: + description: |- + OperatorCapabilities are the list of capabilities of the + plugin regarding the reconciler + items: + type: string + type: array + restoreJobHookCapabilities: + description: |- + RestoreJobHookCapabilities are the list of capabilities of the + plugin regarding the RestoreJobHook management + items: + type: string + type: array + status: + description: Status contain the status reported by the plugin + through the SetStatusInCluster interface + type: string + version: + description: |- + Version is the version of the plugin loaded by the + latest reconciliation loop + type: string + walCapabilities: + description: |- + WALCapabilities are the list of capabilities of the + plugin regarding the WAL management + items: + type: string + type: array + required: + - name + - version + type: object + type: array + poolerIntegrations: + description: The integration needed by poolers referencing the cluster + properties: + pgBouncerIntegration: + description: PgBouncerIntegrationStatus encapsulates the needed + integration for the pgbouncer poolers referencing the cluster + properties: + secrets: + items: + type: string + type: array + type: object + type: object + pvcCount: + description: How many PVCs have been created by this cluster + format: int32 + type: integer + readService: + description: Current list of read pods + type: string + readyInstances: + description: The total number of ready instances in the cluster. It + is equal to the number of ready instance pods. + type: integer + resizingPVC: + description: List of all the PVCs that have ResizingPVC condition. + items: + type: string + type: array + secretsResourceVersion: + description: |- + The list of resource versions of the secrets + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + secret data + properties: + applicationSecretVersion: + description: The resource version of the "app" user secret + type: string + barmanEndpointCA: + description: The resource version of the Barman Endpoint CA if + provided + type: string + caSecretVersion: + description: Unused. Retained for compatibility with old versions. + type: string + clientCaSecretVersion: + description: The resource version of the PostgreSQL client-side + CA secret version + type: string + externalClusterSecretVersion: + additionalProperties: + type: string + description: The resource versions of the external cluster secrets + type: object + managedRoleSecretVersion: + additionalProperties: + type: string + description: The resource versions of the managed roles secrets + type: object + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the secrets used to pass metrics. + Map keys are the secret names, map values are the versions + type: object + replicationSecretVersion: + description: The resource version of the "streaming_replica" user + secret + type: string + serverCaSecretVersion: + description: The resource version of the PostgreSQL server-side + CA secret version + type: string + serverSecretVersion: + description: The resource version of the PostgreSQL server-side + secret version + type: string + superuserSecretVersion: + description: The resource version of the "postgres" user secret + type: string + type: object + switchReplicaClusterStatus: + description: SwitchReplicaClusterStatus is the status of the switch + to replica cluster + properties: + inProgress: + description: InProgress indicates if there is an ongoing procedure + of switching a cluster to a replica cluster. + type: boolean + type: object + tablespacesStatus: + description: TablespacesStatus reports the state of the declarative + tablespaces in the cluster + items: + description: TablespaceState represents the state of a tablespace + in a cluster + properties: + error: + description: Error is the reconciliation error, if any + type: string + name: + description: Name is the name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + type: string + state: + description: State is the latest reconciliation state + type: string + required: + - name + - state + type: object + type: array + targetPrimary: + description: |- + Target primary instance, this is different from the previous one + during a switchover or a failover + type: string + targetPrimaryTimestamp: + description: The timestamp when the last request for a new primary + has occurred + type: string + timelineID: + description: The timeline of the Postgres cluster + type: integer + topology: + description: Instances topology. + properties: + instances: + additionalProperties: + additionalProperties: + type: string + description: PodTopologyLabels represent the topology of a Pod. + map[labelName]labelValue + type: object + description: Instances contains the pod topology of the instances + type: object + nodesUsed: + description: |- + NodesUsed represents the count of distinct nodes accommodating the instances. + A value of '1' suggests that all instances are hosted on a single node, + implying the absence of High Availability (HA). Ideally, this value should + be the same as the number of instances in the Postgres HA cluster, implying + shared nothing architecture on the compute side. + format: int32 + type: integer + successfullyExtracted: + description: |- + SuccessfullyExtracted indicates if the topology data was extract. It is useful to enact fallback behaviors + in synchronous replica election in case of failures + type: boolean + type: object + unusablePVC: + description: List of all the PVCs that are unusable because another + PVC is missing + items: + type: string + type: array + writeService: + description: Current write pod + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: databases.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Database + listKind: DatabaseList + plural: databases + singular: database + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Database is the Schema for the databases API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired Database. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allowConnections: + description: |- + Maps to the `ALLOW_CONNECTIONS` parameter of `CREATE DATABASE` and + `ALTER DATABASE`. If false then no one can connect to this database. + type: boolean + builtinLocale: + description: |- + Maps to the `BUILTIN_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the locale name when the + builtin provider is used. This option requires `localeProvider` to + be set to `builtin`. Available from PostgreSQL 17. + type: string + x-kubernetes-validations: + - message: builtinLocale is immutable + rule: self == oldSelf + cluster: + description: The name of the PostgreSQL cluster hosting the database. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + collationVersion: + description: |- + Maps to the `COLLATION_VERSION` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: collationVersion is immutable + rule: self == oldSelf + connectionLimit: + description: |- + Maps to the `CONNECTION LIMIT` clause of `CREATE DATABASE` and + `ALTER DATABASE`. How many concurrent connections can be made to + this database. -1 (the default) means no limit. + type: integer + databaseReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this database. + enum: + - delete + - retain + type: string + encoding: + description: |- + Maps to the `ENCODING` parameter of `CREATE DATABASE`. This setting + cannot be changed. Character set encoding to use in the database. + type: string + x-kubernetes-validations: + - message: encoding is immutable + rule: self == oldSelf + ensure: + default: present + description: Ensure the PostgreSQL database is `present` or `absent` + - defaults to "present". + enum: + - present + - absent + type: string + extensions: + description: The list of extensions to be managed in the database + items: + description: ExtensionSpec configures an extension in a database + properties: + ensure: + default: present + description: |- + Specifies whether an extension/schema should be present or absent in + the database. If set to `present`, the extension/schema will be + created if it does not exist. If set to `absent`, the + extension/schema will be removed if it exists. + enum: + - present + - absent + type: string + name: + description: Name of the extension/schema + type: string + schema: + description: |- + The name of the schema in which to install the extension's objects, + in case the extension allows its contents to be relocated. If not + specified (default), and the extension's control file does not + specify a schema either, the current default object creation schema + is used. + type: string + version: + description: |- + The version of the extension to install. If empty, the operator will + install the default version (whatever is specified in the + extension's control file) + type: string + required: + - name + type: object + type: array + icuLocale: + description: |- + Maps to the `ICU_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the ICU locale when the ICU + provider is used. This option requires `localeProvider` to be set to + `icu`. Available from PostgreSQL 15. + type: string + x-kubernetes-validations: + - message: icuLocale is immutable + rule: self == oldSelf + icuRules: + description: |- + Maps to the `ICU_RULES` parameter of `CREATE DATABASE`. This setting + cannot be changed. Specifies additional collation rules to customize + the behavior of the default collation. This option requires + `localeProvider` to be set to `icu`. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: icuRules is immutable + rule: self == oldSelf + isTemplate: + description: |- + Maps to the `IS_TEMPLATE` parameter of `CREATE DATABASE` and `ALTER + DATABASE`. If true, this database is considered a template and can + be cloned by any user with `CREATEDB` privileges. + type: boolean + locale: + description: |- + Maps to the `LOCALE` parameter of `CREATE DATABASE`. This setting + cannot be changed. Sets the default collation order and character + classification in the new database. + type: string + x-kubernetes-validations: + - message: locale is immutable + rule: self == oldSelf + localeCType: + description: |- + Maps to the `LC_CTYPE` parameter of `CREATE DATABASE`. This setting + cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCType is immutable + rule: self == oldSelf + localeCollate: + description: |- + Maps to the `LC_COLLATE` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCollate is immutable + rule: self == oldSelf + localeProvider: + description: |- + Maps to the `LOCALE_PROVIDER` parameter of `CREATE DATABASE`. This + setting cannot be changed. This option sets the locale provider for + databases created in the new cluster. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: localeProvider is immutable + rule: self == oldSelf + name: + description: The name of the database to create inside PostgreSQL. + This setting cannot be changed. + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + - message: the name postgres is reserved + rule: self != 'postgres' + - message: the name template0 is reserved + rule: self != 'template0' + - message: the name template1 is reserved + rule: self != 'template1' + owner: + description: |- + Maps to the `OWNER` parameter of `CREATE DATABASE`. + Maps to the `OWNER TO` command of `ALTER DATABASE`. + The role name of the user who owns the database inside PostgreSQL. + type: string + schemas: + description: The list of schemas to be managed in the database + items: + description: SchemaSpec configures a schema in a database + properties: + ensure: + default: present + description: |- + Specifies whether an extension/schema should be present or absent in + the database. If set to `present`, the extension/schema will be + created if it does not exist. If set to `absent`, the + extension/schema will be removed if it exists. + enum: + - present + - absent + type: string + name: + description: Name of the extension/schema + type: string + owner: + description: |- + The role name of the user who owns the schema inside PostgreSQL. + It maps to the `AUTHORIZATION` parameter of `CREATE SCHEMA` and the + `OWNER TO` command of `ALTER SCHEMA`. + type: string + required: + - name + type: object + type: array + tablespace: + description: |- + Maps to the `TABLESPACE` parameter of `CREATE DATABASE`. + Maps to the `SET TABLESPACE` command of `ALTER DATABASE`. + The name of the tablespace (in PostgreSQL) that will be associated + with the new database. This tablespace will be the default + tablespace used for objects created in this database. + type: string + template: + description: |- + Maps to the `TEMPLATE` parameter of `CREATE DATABASE`. This setting + cannot be changed. The name of the template from which to create + this database. + type: string + x-kubernetes-validations: + - message: template is immutable + rule: self == oldSelf + required: + - cluster + - name + - owner + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider is set + to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + status: + description: |- + Most recently observed status of the Database. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + applied: + description: Applied is true if the database was reconciled correctly + type: boolean + extensions: + description: Extensions is the status of the managed extensions + items: + description: DatabaseObjectStatus is the status of the managed database + objects + properties: + applied: + description: |- + True of the object has been installed successfully in + the database + type: boolean + message: + description: Message is the object reconciliation message + type: string + name: + description: The name of the object + type: string + required: + - applied + - name + type: object + type: array + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + schemas: + description: Schemas is the status of the managed schemas + items: + description: DatabaseObjectStatus is the status of the managed database + objects + properties: + applied: + description: |- + True of the object has been installed successfully in + the database + type: boolean + message: + description: Message is the object reconciliation message + type: string + name: + description: The name of the object + type: string + required: + - applied + - name + type: object + type: array + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: imagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ImageCatalog + listKind: ImageCatalogList + plural: imagecatalogs + singular: imagecatalog + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ImageCatalog is the Schema for the imagecatalogs API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: poolers.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Pooler + listKind: PoolerList + plural: poolers + singular: pooler + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.type + name: Type + type: string + name: v1 + schema: + openAPIV3Schema: + description: Pooler is the Schema for the poolers API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the Pooler. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: |- + This is the cluster reference on which the Pooler will work. + Pooler name should never match with any cluster name within the same namespace. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + deploymentStrategy: + description: The deployment strategy to use for pgbouncer to replace + existing pods with new ones + properties: + rollingUpdate: + description: |- + Rolling update config params. Present only if DeploymentStrategyType = + RollingUpdate. + properties: + maxSurge: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be scheduled above the desired number of + pods. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + This can not be 0 if MaxUnavailable is 0. + Absolute number is calculated from percentage by rounding up. + Defaults to 25%. + Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when + the rolling update starts, such that the total number of old and new pods do not exceed + 130% of desired pods. Once old pods have been killed, + new ReplicaSet can be scaled up further, ensuring that total number of pods running + at any time during the update is at most 130% of desired pods. + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be unavailable during the update. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + Absolute number is calculated from percentage by rounding down. + This can not be 0 if MaxSurge is 0. + Defaults to 25%. + Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods + immediately when the rolling update starts. Once new pods are ready, old ReplicaSet + can be scaled down further, followed by scaling up the new ReplicaSet, ensuring + that the total number of pods available at all times during the update is at + least 70% of desired pods. + x-kubernetes-int-or-string: true + type: object + type: + description: Type of deployment. Can be "Recreate" or "RollingUpdate". + Default is RollingUpdate. + type: string + type: object + instances: + default: 1 + description: 'The number of replicas we want. Default: 1.' + format: int32 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this pooler. + properties: + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + type: object + pgbouncer: + description: The PgBouncer configuration + properties: + authQuery: + description: |- + The query that will be used to download the hash of the password + of a certain user. Default: "SELECT usename, passwd FROM public.user_search($1)". + In case it is specified, also an AuthQuerySecret has to be specified and + no automatic CNPG Cluster integration will be triggered. + type: string + authQuerySecret: + description: |- + The credentials of the user that need to be used for the authentication + query. In case it is specified, also an AuthQuery + (e.g. "SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1") + has to be specified and no automatic CNPG Cluster integration will be triggered. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + parameters: + additionalProperties: + type: string + description: |- + Additional parameters to be passed to PgBouncer - please check + the CNPG documentation for a list of options you can configure + type: object + paused: + default: false + description: |- + When set to `true`, PgBouncer will disconnect from the PostgreSQL + server, first waiting for all queries to complete, and pause all new + client connections until this value is set to `false` (default). Internally, + the operator calls PgBouncer's `PAUSE` and `RESUME` commands. + type: boolean + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + poolMode: + default: session + description: 'The pool mode. Default: `session`.' + enum: + - session + - transaction + type: string + type: object + serviceTemplate: + description: Template for the Service to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information on service's + port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the configurations + of session affinity. + properties: + clientIP: + description: clientIP contains the configurations of Client + IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic is + distributed to Service endpoints. Implementations can use this field as a + hint, but are not required to guarantee strict adherence. If the field is + not set, the implementation will apply its default routing strategy. If set + to "PreferClose", implementations should prioritize endpoints that are + topologically close (e.g., same zone). + This is a beta field and requires enabling ServiceTrafficDistribution feature. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + template: + description: The template of the Pod to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the pod. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + activeDeadlineSeconds: + description: |- + Optional duration in seconds the pod may be active on the node relative to + StartTime before the system will actively try to mark it failed and kill associated containers. + Value must be a positive integer. + format: int64 + type: integer + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + automountServiceAccountToken: + description: AutomountServiceAccountToken indicates whether + a service account token should be automatically mounted. + type: boolean + containers: + description: |- + List of containers belonging to the pod. + Containers cannot currently be added or removed. + There must be at least one container in a Pod. + Cannot be updated. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + dnsConfig: + description: |- + Specifies the DNS parameters of a pod. + Parameters specified here will be merged to the generated DNS + configuration based on DNSPolicy. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver + options of a pod. + properties: + name: + description: |- + Name is this DNS resolver option's name. + Required. + type: string + value: + description: Value is this DNS resolver option's + value. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + dnsPolicy: + description: |- + Set DNS policy for the pod. + Defaults to "ClusterFirst". + Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + To have DNS options set along with hostNetwork, you have to specify DNS policy + explicitly to 'ClusterFirstWithHostNet'. + type: string + enableServiceLinks: + description: |- + EnableServiceLinks indicates whether information about services should be injected into pod's + environment variables, matching the syntax of Docker links. + Optional: Defaults to true. + type: boolean + ephemeralContainers: + description: |- + List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + pod to perform user-initiated actions such as debugging. This list cannot be specified when + creating a pod, and it cannot be modified by updating the pod spec. In order to add an + ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + items: + description: |- + An EphemeralContainer is a temporary container that you may add to an existing Pod for + user-initiated activities such as debugging. Ephemeral containers have no resource or + scheduling guarantees, and they will not be restarted when they exit or when a Pod is + removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the + Pod to exceed its resource allocation. + + To add an ephemeral container, use the ephemeralcontainers subresource of an existing + Pod. Ephemeral containers may not be removed or restarted. + properties: + args: + description: |- + Arguments to the entrypoint. + The image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: Lifecycle is not allowed for ephemeral + containers. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the ephemeral container specified as a DNS_LABEL. + This name must be unique among all containers, init containers and ephemeral containers. + type: string + ports: + description: Ports are not allowed for ephemeral containers. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + already allocated to the pod. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for the container to manage the restart behavior of each + container within a pod. + This may only be set for init containers. You cannot set this field on + ephemeral containers. + type: string + securityContext: + description: |- + Optional: SecurityContext defines the security options the ephemeral container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + targetContainerName: + description: |- + If set, the name of the container from PodSpec that this ephemeral container targets. + The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + If not set then the ephemeral container uses the namespaces configured in the Pod spec. + + The container runtime must implement support for this feature. If the runtime does not + support namespace targeting then the result of setting this field is undefined. + type: string + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + hostAliases: + description: |- + HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + file if specified. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + description: IP address of the host file entry. + type: string + required: + - ip + type: object + type: array + x-kubernetes-list-map-keys: + - ip + x-kubernetes-list-type: map + hostIPC: + description: |- + Use the host's ipc namespace. + Optional: Default to false. + type: boolean + hostNetwork: + description: |- + Host networking requested for this pod. Use the host's network namespace. + If this option is set, the ports that will be used must be specified. + Default to false. + type: boolean + hostPID: + description: |- + Use the host's pid namespace. + Optional: Default to false. + type: boolean + hostUsers: + description: |- + Use the host's user namespace. + Optional: Default to true. + If set to true or not present, the pod will be run in the host user namespace, useful + for when the pod needs a feature only available to the host user namespace, such as + loading a kernel module with CAP_SYS_MODULE. + When set to false, a new userns is created for the pod. Setting false is useful for + mitigating container breakout vulnerabilities even allowing users to run their + containers as root without actually having root privileges on the host. + This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. + type: boolean + hostname: + description: |- + Specifies the hostname of the Pod + If not specified, the pod's hostname will be set to a system-defined value. + type: string + imagePullSecrets: + description: |- + ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + If specified, these secrets will be passed to individual puller implementations for them to use. + More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + initContainers: + description: |- + List of initialization containers belonging to the pod. + Init containers are executed in order prior to containers being started. If any + init container fails, the pod is considered to have failed and is handled according + to its restartPolicy. The name for an init container or normal container must be + unique among all containers. + Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. + The resourceRequirements of an init container are taken into account during scheduling + by finding the highest request/limit for each resource type, and then using the max of + of that value or the sum of the normal containers. Limits are applied to init containers + in a similar fashion. + Init containers cannot currently be added or removed. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + nodeName: + description: |- + NodeName indicates in which node this pod is scheduled. + If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. + Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. + This field should not be used to express a desire for the pod to be scheduled on a specific node. + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the pod to fit on a node. + Selector which must match a node's labels for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + x-kubernetes-map-type: atomic + os: + description: |- + Specifies the OS of the containers in the pod. + Some pod and container fields are restricted if this is set. + + If the OS field is set to linux, the following fields must be unset: + -securityContext.windowsOptions + + If the OS field is set to windows, following fields must be unset: + - spec.hostPID + - spec.hostIPC + - spec.hostUsers + - spec.securityContext.appArmorProfile + - spec.securityContext.seLinuxOptions + - spec.securityContext.seccompProfile + - spec.securityContext.fsGroup + - spec.securityContext.fsGroupChangePolicy + - spec.securityContext.sysctls + - spec.shareProcessNamespace + - spec.securityContext.runAsUser + - spec.securityContext.runAsGroup + - spec.securityContext.supplementalGroups + - spec.securityContext.supplementalGroupsPolicy + - spec.containers[*].securityContext.appArmorProfile + - spec.containers[*].securityContext.seLinuxOptions + - spec.containers[*].securityContext.seccompProfile + - spec.containers[*].securityContext.capabilities + - spec.containers[*].securityContext.readOnlyRootFilesystem + - spec.containers[*].securityContext.privileged + - spec.containers[*].securityContext.allowPrivilegeEscalation + - spec.containers[*].securityContext.procMount + - spec.containers[*].securityContext.runAsUser + - spec.containers[*].securityContext.runAsGroup + properties: + name: + description: |- + Name is the name of the operating system. The currently supported values are linux and windows. + Additional value may be defined in future and can be one of: + https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + Clients should expect to handle additional values and treat unrecognized values in this field as os: null + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + This field will be autopopulated at admission time by the RuntimeClass admission controller. If + the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + The RuntimeClass admission controller will reject Pod create requests which have the overhead already + set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md + type: object + preemptionPolicy: + description: |- + PreemptionPolicy is the Policy for preempting pods with lower priority. + One of Never, PreemptLowerPriority. + Defaults to PreemptLowerPriority if unset. + type: string + priority: + description: |- + The priority value. Various system components use this field to find the + priority of the pod. When Priority Admission Controller is enabled, it + prevents users from setting this field. The admission controller populates + this field from PriorityClassName. + The higher the value, the higher the priority. + format: int32 + type: integer + priorityClassName: + description: |- + If specified, indicates the pod's priority. "system-node-critical" and + "system-cluster-critical" are two special keywords which indicate the + highest priorities with the former being the highest priority. Any other + name must be defined by creating a PriorityClass object with that name. + If not specified, the pod priority will be default or zero if there is no + default. + type: string + readinessGates: + description: |- + If specified, all readiness gates will be evaluated for pod readiness. + A pod is ready when all its containers are ready AND + all conditions specified in the readiness gates have status equal to "True" + More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates + items: + description: PodReadinessGate contains the reference to + a pod condition + properties: + conditionType: + description: ConditionType refers to a condition in + the pod's condition list with matching type. + type: string + required: + - conditionType + type: object + type: array + x-kubernetes-list-type: atomic + resourceClaims: + description: |- + ResourceClaims defines which ResourceClaims must be allocated + and reserved before the Pod is allowed to start. The resources + will be made available to those containers which consume them + by name. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. + items: + description: |- + PodResourceClaim references exactly one ResourceClaim, either directly + or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim + for the pod. + + It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. + Containers that need access to the ResourceClaim reference it with this name. + properties: + name: + description: |- + Name uniquely identifies this resource claim inside the pod. + This must be a DNS_LABEL. + type: string + resourceClaimName: + description: |- + ResourceClaimName is the name of a ResourceClaim object in the same + namespace as this pod. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + resourceClaimTemplateName: + description: |- + ResourceClaimTemplateName is the name of a ResourceClaimTemplate + object in the same namespace as this pod. + + The template will be used to create a new ResourceClaim, which will + be bound to this pod. When this pod is deleted, the ResourceClaim + will also be deleted. The pod name and resource name, along with a + generated component, will be used to form a unique name for the + ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + + This field is immutable and no changes will be made to the + corresponding ResourceClaim by the control plane after creating the + ResourceClaim. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + resources: + description: |- + Resources is the total amount of CPU and Memory resources required by all + containers in the pod. It supports specifying Requests and Limits for + "cpu" and "memory" resource names only. ResourceClaims are not supported. + + This field enables fine-grained control over resource allocation for the + entire pod, allowing resource sharing among containers in a pod. + + This is an alpha field and requires enabling the PodLevelResources feature + gate. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for all containers within the pod. + One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. + Default to Always. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy + type: string + runtimeClassName: + description: |- + RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + empty definition that uses the default runtime handler. + More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + type: string + schedulerName: + description: |- + If specified, the pod will be dispatched by specified scheduler. + If not specified, the pod will be dispatched by default scheduler. + type: string + schedulingGates: + description: |- + SchedulingGates is an opaque list of values that if specified will block scheduling the pod. + If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + scheduler will not attempt to schedule the pod. + + SchedulingGates can only be set at pod creation time, and be removed only afterwards. + items: + description: PodSchedulingGate is associated to a Pod to + guard its scheduling. + properties: + name: + description: |- + Name of the scheduling gate. + Each scheduling gate must have a unique name field. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + securityContext: + description: |- + SecurityContext holds pod-level security attributes and common container settings. + Optional: Defaults to empty. See type description for default values of each field. + properties: + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxChangePolicy: + description: |- + seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. + It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. + Valid values are "MountOption" and "Recursive". + + "Recursive" means relabeling of all files on all Pod volumes by the container runtime. + This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. + + "MountOption" mounts all eligible Pod volumes with `-o context` mount option. + This requires all Pods that share the same volume to use the same SELinux label. + It is not possible to share the same volume among privileged and unprivileged Pods. + Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes + whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their + CSIDriver instance. Other volumes are always re-labelled recursively. + "MountOption" value is allowed only when SELinuxMount feature gate is enabled. + + If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. + If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes + and "Recursive" for all other volumes. + + This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers. + + All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. + Note that this field cannot be set when spec.os.name is windows. + type: string + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in + addition to the container's primary GID and fsGroup (if specified). If + the SupplementalGroupsPolicy feature is enabled, the + supplementalGroupsPolicy field determines whether these are in addition + to or instead of any group memberships defined in the container image. + If unspecified, no additional groups are added, though group memberships + defined in the container image may still be used, depending on the + supplementalGroupsPolicy field. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + description: |- + Defines how supplemental groups of the first container processes are calculated. + Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + and the container runtime must implement support for this feature. + Note that this field cannot be set when spec.os.name is windows. + type: string + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be + set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: |- + DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. + Deprecated: Use serviceAccountName instead. + type: string + serviceAccountName: + description: |- + ServiceAccountName is the name of the ServiceAccount to use to run this pod. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + type: string + setHostnameAsFQDN: + description: |- + If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). + In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). + In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. + If a pod does not have FQDN, this has no effect. + Default to false. + type: boolean + shareProcessNamespace: + description: |- + Share a single process namespace between all of the containers in a pod. + When this is set containers will be able to view and signal processes from other containers + in the same pod, and the first process in each container will not be assigned PID 1. + HostPID and ShareProcessNamespace cannot both be set. + Optional: Default to false. + type: boolean + subdomain: + description: |- + If specified, the fully qualified Pod hostname will be "...svc.". + If not specified, the pod will not have a domainname at all. + type: string + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + If this value is nil, the default grace period will be used instead. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: If specified, the pod's tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of pods ought to spread across topology + domains. Scheduler will schedule pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + description: |- + List of volumes that can be mounted by containers belonging to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk + in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in + the blob storage + type: string + fsType: + default: ext4 + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage account Managed: azure + managed data disk (only in managed availability + set). defaults to shared' + type: string + readOnly: + default: false + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default + is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers. + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about + the pod that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing the + pod field + properties: + fieldRef: + description: 'Required: Selects a field of + the pod: only annotations, labels, name, + namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path. + Must be utf-8 encoded. The first item of + the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource + that is attached to a kubelet's host machine and then + exposed to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target + worldwide names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. + properties: + driver: + description: driver is the name of the driver to + use for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the + specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + image: + description: |- + image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + The volume is resolved at pod startup depending on which PullPolicy value is provided: + + - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + + The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + The volume will be mounted read-only (ro) and non-executable files (noexec). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + default: default + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon + Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources + secrets, configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume + root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the + configMap data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about + the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name, namespace and uid + are supported.' + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file + to be created. Must not be absolute + or contain the ''..'' path. Must + be utf-8 encoded. The first item + of the relative path must not + start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the + secret data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + default: /etc/ceph/keyring + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. + properties: + fsType: + default: xfs + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the + ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the + ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL + communication with Gateway, default false + type: boolean + storageMode: + default: ThinProvisioned + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage + Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether the + Secret or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy + Based Management (SPBM) profile ID associated + with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + required: + - containers + type: object + type: object + type: + default: rw + description: 'Type of service to forward traffic to. Default: `rw`.' + enum: + - rw + - ro + - r + type: string + required: + - cluster + - pgbouncer + type: object + status: + description: |- + Most recently observed status of the Pooler. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + instances: + description: The number of pods trying to be scheduled + format: int32 + type: integer + secrets: + description: The resource version of the config object + properties: + clientCA: + description: The client CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + pgBouncerSecrets: + description: The version of the secrets used by PgBouncer + properties: + authQuery: + description: The auth query secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + serverCA: + description: The server CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + serverTLS: + description: The server TLS secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: publications.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Publication + listKind: PublicationList + plural: publications + singular: publication + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Publication is the Schema for the publications API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PublicationSpec defines the desired state of Publication + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "publisher" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "publisher" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + name: + description: The name of the publication inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Publication parameters part of the `WITH` clause as expected by + PostgreSQL `CREATE PUBLICATION` command + type: object + publicationReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this publication + enum: + - delete + - retain + type: string + target: + description: Target of the publication as expected by PostgreSQL `CREATE + PUBLICATION` command + properties: + allTables: + description: |- + Marks the publication as one that replicates changes for all tables + in the database, including tables created in the future. + Corresponding to `FOR ALL TABLES` in PostgreSQL. + type: boolean + x-kubernetes-validations: + - message: allTables is immutable + rule: self == oldSelf + objects: + description: Just the following schema objects + items: + description: PublicationTargetObject is an object to publish + properties: + table: + description: |- + Specifies a list of tables to add to the publication. Corresponding + to `FOR TABLE` in PostgreSQL. + properties: + columns: + description: The columns to publish + items: + type: string + type: array + name: + description: The table name + type: string + only: + description: Whether to limit to the table only or include + all its descendants + type: boolean + schema: + description: The schema name + type: string + required: + - name + type: object + tablesInSchema: + description: |- + Marks the publication as one that replicates changes for all tables + in the specified list of schemas, including tables created in the + future. Corresponding to `FOR TABLES IN SCHEMA` in PostgreSQL. + type: string + type: object + x-kubernetes-validations: + - message: tablesInSchema and table are mutually exclusive + rule: (has(self.tablesInSchema) && !has(self.table)) || (!has(self.tablesInSchema) + && has(self.table)) + maxItems: 100000 + type: array + x-kubernetes-validations: + - message: specifying a column list when the publication also + publishes tablesInSchema is not supported + rule: '!(self.exists(o, has(o.table) && has(o.table.columns)) + && self.exists(o, has(o.tablesInSchema)))' + type: object + x-kubernetes-validations: + - message: allTables and objects are mutually exclusive + rule: (has(self.allTables) && !has(self.objects)) || (!has(self.allTables) + && has(self.objects)) + required: + - cluster + - dbname + - name + - target + type: object + status: + description: PublicationStatus defines the observed state of Publication + properties: + applied: + description: Applied is true if the publication was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: scheduledbackups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ScheduledBackup + listKind: ScheduledBackupList + plural: scheduledbackups + singular: scheduledbackup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .status.lastScheduleTime + name: Last Backup + type: date + name: v1 + schema: + openAPIV3Schema: + description: ScheduledBackup is the Schema for the scheduledbackups API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ScheduledBackup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + backupOwnerReference: + default: none + description: |- + Indicates which ownerReference should be put inside the created backup resources.
+ - none: no owner reference for created backup objects (same behavior as before the field was introduced)
+ - self: sets the Scheduled backup object as owner of the backup
+ - cluster: set the cluster as owner of the backup
+ enum: + - none + - self + - cluster + type: string + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + immediate: + description: If the first backup has to be immediately start after + creation or not + type: boolean + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + schedule: + description: |- + The schedule does not follow the same format used in Kubernetes CronJobs + as it includes an additional seconds specifier, + see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format + type: string + suspend: + description: If this backup is suspended or not + type: boolean + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + - schedule + type: object + status: + description: |- + Most recently observed status of the ScheduledBackup. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + lastCheckTime: + description: The latest time the schedule + format: date-time + type: string + lastScheduleTime: + description: Information when was the last time that backup was successfully + scheduled. + format: date-time + type: string + nextScheduleTime: + description: Next time we will run a backup + format: date-time + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: subscriptions.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Subscription + listKind: SubscriptionList + plural: subscriptions + singular: subscription + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Subscription is the Schema for the subscriptions API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SubscriptionSpec defines the desired state of Subscription + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "subscriber" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "subscriber" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + externalClusterName: + description: The name of the external cluster with the publication + ("publisher") + type: string + name: + description: The name of the subscription inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Subscription parameters part of the `WITH` clause as expected by + PostgreSQL `CREATE SUBSCRIPTION` command + type: object + publicationDBName: + description: |- + The name of the database containing the publication on the external + cluster. Defaults to the one in the external cluster definition. + type: string + publicationName: + description: |- + The name of the publication inside the PostgreSQL database in the + "publisher" + type: string + subscriptionReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this subscription + enum: + - delete + - retain + type: string + required: + - cluster + - dbname + - externalClusterName + - name + - publicationName + type: object + status: + description: SubscriptionStatus defines the observed state of Subscription + properties: + applied: + description: Applied is true if the subscription was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cnpg-manager +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps/status + - secrets/status + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - pods + - pods/exec + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - pods/status + verbs: + - get +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - get + - patch +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +- apiGroups: + - monitoring.coreos.com + resources: + - podmonitors + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups + - clusters + - databases + - poolers + - publications + - scheduledbackups + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups/status + - databases/status + - publications/status + - scheduledbackups/status + - subscriptions/status + verbs: + - get + - patch + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusterimagecatalogs + - imagecatalogs + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/finalizers + - poolers/finalizers + verbs: + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/status + - poolers/status + verbs: + - get + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - create + - get + - list + - patch + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cnpg-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cnpg-manager +subjects: +- kind: ServiceAccount + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: v1 +data: + queries: | + backends: + query: | + SELECT sa.datname + , sa.usename + , sa.application_name + , states.state + , COALESCE(sa.count, 0) AS total + , COALESCE(sa.max_tx_secs, 0) AS max_tx_duration_seconds + FROM ( VALUES ('active') + , ('idle') + , ('idle in transaction') + , ('idle in transaction (aborted)') + , ('fastpath function call') + , ('disabled') + ) AS states(state) + LEFT JOIN ( + SELECT datname + , state + , usename + , COALESCE(application_name, '') AS application_name + , COUNT(*) + , COALESCE(EXTRACT (EPOCH FROM (max(now() - xact_start))), 0) AS max_tx_secs + FROM pg_catalog.pg_stat_activity + GROUP BY datname, state, usename, application_name + ) sa ON states.state = sa.state + WHERE sa.usename IS NOT NULL + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - usename: + usage: "LABEL" + description: "Name of the user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - state: + usage: "LABEL" + description: "State of the backend" + - total: + usage: "GAUGE" + description: "Number of backends" + - max_tx_duration_seconds: + usage: "GAUGE" + description: "Maximum duration of a transaction in seconds" + + backends_waiting: + query: | + SELECT count(*) AS total + FROM pg_catalog.pg_locks blocked_locks + JOIN pg_catalog.pg_locks blocking_locks + ON blocking_locks.locktype = blocked_locks.locktype + AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database + AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation + AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page + AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple + AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid + AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid + AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid + AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid + AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid + AND blocking_locks.pid != blocked_locks.pid + JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid + WHERE NOT blocked_locks.granted + metrics: + - total: + usage: "GAUGE" + description: "Total number of backends that are currently waiting on other queries" + + pg_database: + query: | + SELECT datname + , pg_catalog.pg_database_size(datname) AS size_bytes + , pg_catalog.age(datfrozenxid) AS xid_age + , pg_catalog.mxid_age(datminmxid) AS mxid_age + FROM pg_catalog.pg_database + WHERE datallowconn + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - size_bytes: + usage: "GAUGE" + description: "Disk space used by the database" + - xid_age: + usage: "GAUGE" + description: "Number of transactions from the frozen XID to the current one" + - mxid_age: + usage: "GAUGE" + description: "Number of multiple transactions (Multixact) from the frozen XID to the current one" + + pg_postmaster: + query: | + SELECT EXTRACT(EPOCH FROM pg_postmaster_start_time) AS start_time + FROM pg_catalog.pg_postmaster_start_time() + metrics: + - start_time: + usage: "GAUGE" + description: "Time at which postgres started (based on epoch)" + + pg_replication: + query: "SELECT CASE WHEN ( + NOT pg_catalog.pg_is_in_recovery() + OR pg_catalog.pg_last_wal_receive_lsn() = pg_catalog.pg_last_wal_replay_lsn()) + THEN 0 + ELSE GREATEST (0, + EXTRACT(EPOCH FROM (now() - pg_catalog.pg_last_xact_replay_timestamp()))) + END AS lag, + pg_catalog.pg_is_in_recovery() AS in_recovery, + EXISTS (TABLE pg_stat_wal_receiver) AS is_wal_receiver_up, + (SELECT count(*) FROM pg_catalog.pg_stat_replication) AS streaming_replicas" + metrics: + - lag: + usage: "GAUGE" + description: "Replication lag behind primary in seconds" + - in_recovery: + usage: "GAUGE" + description: "Whether the instance is in recovery" + - is_wal_receiver_up: + usage: "GAUGE" + description: "Whether the instance wal_receiver is up" + - streaming_replicas: + usage: "GAUGE" + description: "Number of streaming replicas connected to the instance" + + pg_replication_slots: + query: | + SELECT slot_name, + slot_type, + database, + active, + (CASE pg_catalog.pg_is_in_recovery() + WHEN TRUE THEN pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_last_wal_receive_lsn(), restart_lsn) + ELSE pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), restart_lsn) + END) as pg_wal_lsn_diff + FROM pg_catalog.pg_replication_slots + WHERE NOT temporary + metrics: + - slot_name: + usage: "LABEL" + description: "Name of the replication slot" + - slot_type: + usage: "LABEL" + description: "Type of the replication slot" + - database: + usage: "LABEL" + description: "Name of the database" + - active: + usage: "GAUGE" + description: "Flag indicating whether the slot is active" + - pg_wal_lsn_diff: + usage: "GAUGE" + description: "Replication lag in bytes" + + pg_stat_archiver: + query: | + SELECT archived_count + , failed_count + , COALESCE(EXTRACT(EPOCH FROM (now() - last_archived_time)), -1) AS seconds_since_last_archival + , COALESCE(EXTRACT(EPOCH FROM (now() - last_failed_time)), -1) AS seconds_since_last_failure + , COALESCE(EXTRACT(EPOCH FROM last_archived_time), -1) AS last_archived_time + , COALESCE(EXTRACT(EPOCH FROM last_failed_time), -1) AS last_failed_time + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_archived_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_archived_wal_start_lsn + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_failed_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_failed_wal_start_lsn + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_archiver + metrics: + - archived_count: + usage: "COUNTER" + description: "Number of WAL files that have been successfully archived" + - failed_count: + usage: "COUNTER" + description: "Number of failed attempts for archiving WAL files" + - seconds_since_last_archival: + usage: "GAUGE" + description: "Seconds since the last successful archival operation" + - seconds_since_last_failure: + usage: "GAUGE" + description: "Seconds since the last failed archival operation" + - last_archived_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving succeeded" + - last_failed_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving failed" + - last_archived_wal_start_lsn: + usage: "GAUGE" + description: "Archived WAL start LSN" + - last_failed_wal_start_lsn: + usage: "GAUGE" + description: "Last failed WAL LSN" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_bgwriter: + runonserver: "<17.0.0" + query: | + SELECT checkpoints_timed + , checkpoints_req + , checkpoint_write_time + , checkpoint_sync_time + , buffers_checkpoint + , buffers_clean + , maxwritten_clean + , buffers_backend + , buffers_backend_fsync + , buffers_alloc + FROM pg_catalog.pg_stat_bgwriter + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - checkpoint_write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds" + - checkpoint_sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds" + - buffers_checkpoint: + usage: "COUNTER" + description: "Number of buffers written during checkpoints" + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_backend: + usage: "COUNTER" + description: "Number of buffers written directly by a backend" + - buffers_backend_fsync: + usage: "COUNTER" + description: "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + + pg_stat_bgwriter_17: + runonserver: ">=17.0.0" + name: pg_stat_bgwriter + query: | + SELECT buffers_clean + , maxwritten_clean + , buffers_alloc + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_bgwriter + metrics: + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_checkpointer: + runonserver: ">=17.0.0" + query: | + SELECT num_timed AS checkpoints_timed + , num_requested AS checkpoints_req + , restartpoints_timed + , restartpoints_req + , restartpoints_done + , write_time + , sync_time + , buffers_written + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_checkpointer + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - restartpoints_timed: + usage: "COUNTER" + description: "Number of scheduled restartpoints due to timeout or after a failed attempt to perform it" + - restartpoints_req: + usage: "COUNTER" + description: "Number of requested restartpoints that have been performed" + - restartpoints_done: + usage: "COUNTER" + description: "Number of restartpoints that have been performed" + - write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are written to disk, in milliseconds" + - sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are synchronized to disk, in milliseconds" + - buffers_written: + usage: "COUNTER" + description: "Number of buffers written during checkpoints and restartpoints" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_database: + query: | + SELECT datname + , xact_commit + , xact_rollback + , blks_read + , blks_hit + , tup_returned + , tup_fetched + , tup_inserted + , tup_updated + , tup_deleted + , conflicts + , temp_files + , temp_bytes + , deadlocks + , blk_read_time + , blk_write_time + FROM pg_catalog.pg_stat_database + metrics: + - datname: + usage: "LABEL" + description: "Name of this database" + - xact_commit: + usage: "COUNTER" + description: "Number of transactions in this database that have been committed" + - xact_rollback: + usage: "COUNTER" + description: "Number of transactions in this database that have been rolled back" + - blks_read: + usage: "COUNTER" + description: "Number of disk blocks read in this database" + - blks_hit: + usage: "COUNTER" + description: "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)" + - tup_returned: + usage: "COUNTER" + description: "Number of rows returned by queries in this database" + - tup_fetched: + usage: "COUNTER" + description: "Number of rows fetched by queries in this database" + - tup_inserted: + usage: "COUNTER" + description: "Number of rows inserted by queries in this database" + - tup_updated: + usage: "COUNTER" + description: "Number of rows updated by queries in this database" + - tup_deleted: + usage: "COUNTER" + description: "Number of rows deleted by queries in this database" + - conflicts: + usage: "COUNTER" + description: "Number of queries canceled due to conflicts with recovery in this database" + - temp_files: + usage: "COUNTER" + description: "Number of temporary files created by queries in this database" + - temp_bytes: + usage: "COUNTER" + description: "Total amount of data written to temporary files by queries in this database" + - deadlocks: + usage: "COUNTER" + description: "Number of deadlocks detected in this database" + - blk_read_time: + usage: "COUNTER" + description: "Time spent reading data file blocks by backends in this database, in milliseconds" + - blk_write_time: + usage: "COUNTER" + description: "Time spent writing data file blocks by backends in this database, in milliseconds" + + pg_stat_replication: + primary: true + query: | + SELECT usename + , COALESCE(application_name, '') AS application_name + , COALESCE(client_addr::text, '') AS client_addr + , COALESCE(client_port::text, '') AS client_port + , EXTRACT(EPOCH FROM backend_start) AS backend_start + , COALESCE(pg_catalog.age(backend_xmin), 0) AS backend_xmin_age + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), sent_lsn) AS sent_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), write_lsn) AS write_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), flush_lsn) AS flush_diff_bytes + , COALESCE(pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), replay_lsn),0) AS replay_diff_bytes + , COALESCE((EXTRACT(EPOCH FROM write_lag)),0)::float AS write_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM flush_lag)),0)::float AS flush_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM replay_lag)),0)::float AS replay_lag_seconds + FROM pg_catalog.pg_stat_replication + metrics: + - usename: + usage: "LABEL" + description: "Name of the replication user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - client_addr: + usage: "LABEL" + description: "Client IP address" + - client_port: + usage: "LABEL" + description: "Client TCP port" + - backend_start: + usage: "COUNTER" + description: "Time when this process was started" + - backend_xmin_age: + usage: "COUNTER" + description: "The age of this standby's xmin horizon" + - sent_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location sent on this connection" + - write_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location written to disk by this standby server" + - flush_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location flushed to disk by this standby server" + - replay_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location replayed into the database on this standby server" + - write_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it" + - flush_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it" + - replay_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it" + + pg_settings: + query: | + SELECT name, + CASE setting WHEN 'on' THEN '1' WHEN 'off' THEN '0' ELSE setting END AS setting + FROM pg_catalog.pg_settings + WHERE vartype IN ('integer', 'real', 'bool') + ORDER BY 1 + metrics: + - name: + usage: "LABEL" + description: "Name of the setting" + - setting: + usage: "GAUGE" + description: "Setting value" + + pg_extensions: + query: | + SELECT + current_database() as datname, + name as extname, + default_version, + installed_version, + CASE + WHEN default_version = installed_version THEN 0 + ELSE 1 + END AS update_available + FROM pg_catalog.pg_available_extensions + WHERE installed_version IS NOT NULL + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - extname: + usage: "LABEL" + description: "Extension name" + - default_version: + usage: "LABEL" + description: "Default version" + - installed_version: + usage: "LABEL" + description: "Installed version" + - update_available: + usage: "GAUGE" + description: "An update is available" + target_databases: + - '*' +kind: ConfigMap +metadata: + labels: + cnpg.io/reload: "" + name: cnpg-default-monitoring + namespace: cnpg-system +--- +apiVersion: v1 +kind: Service +metadata: + name: cnpg-webhook-service + namespace: cnpg-system +spec: + ports: + - port: 443 + targetPort: 9443 + selector: + app.kubernetes.io/name: cloudnative-pg +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-controller-manager + namespace: cnpg-system +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: cloudnative-pg + template: + metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + spec: + containers: + - args: + - controller + - --leader-elect + - --max-concurrent-reconciles=10 + - --config-map-name=cnpg-controller-manager-config + - --secret-name=cnpg-controller-manager-config + - --webhook-port=9443 + command: + - /manager + env: + - name: OPERATOR_IMAGE_NAME + value: ghcr.io/cloudnative-pg/cloudnative-pg:1.26.0-rc3 + - name: OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MONITORING_QUERIES_CONFIGMAP + value: cnpg-default-monitoring + image: ghcr.io/cloudnative-pg/cloudnative-pg:1.26.0-rc3 + imagePullPolicy: Always + livenessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + name: manager + ports: + - containerPort: 8080 + name: metrics + protocol: TCP + - containerPort: 9443 + name: webhook-server + protocol: TCP + readinessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + resources: + limits: + cpu: 100m + memory: 200Mi + requests: + cpu: 100m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 10001 + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + startupProbe: + failureThreshold: 6 + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + periodSeconds: 5 + volumeMounts: + - mountPath: /controller + name: scratch-data + - mountPath: /run/secrets/cnpg.io/webhook + name: webhook-certificates + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: cnpg-manager + terminationGracePeriodSeconds: 10 + volumes: + - emptyDir: {} + name: scratch-data + - name: webhook-certificates + secret: + defaultMode: 420 + optional: true + secretName: cnpg-webhook-cert +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: cnpg-mutating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: mbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: mcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-database + failurePolicy: Fail + name: mdatabase.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - databases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: mscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: cnpg-validating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: vbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: vcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-database + failurePolicy: Fail + name: vdatabase.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - databases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-pooler + failurePolicy: Fail + name: vpooler.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - poolers + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: vscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None From 5efa4a15de289761d97c56458b97fa7222f0af38 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Tue, 13 May 2025 16:21:00 +0200 Subject: [PATCH 581/836] ci: fix permissions on release publish workflow (#7562) Add the missing permission write in the release publish workflow Signed-off-by: Jonathan Gonzalez V. --- .github/workflows/release-publish.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml index a57a5559f7..05cf6f1ecf 100644 --- a/.github/workflows/release-publish.yml +++ b/.github/workflows/release-publish.yml @@ -90,7 +90,7 @@ jobs: runs-on: ubuntu-24.04 permissions: packages: write - contents: read + contents: write id-token: write needs: - check-version @@ -239,7 +239,7 @@ jobs: name: Create OLM bundle and catalog runs-on: ubuntu-24.04 permissions: - contents: read + contents: write packages: write needs: - check-version From 839c99c503ad361982312bd458a858c731880f00 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Tue, 13 May 2025 16:23:23 +0200 Subject: [PATCH 582/836] Revert "Version tag to 1.26.0-rc3 (#7560)" Due to a failed release, we revert the release commit and trigger a new release This reverts commit f3995f0767b5f9e4ebeb993f669079b67b8303dc. Signed-off-by: Jonathan Gonzalez V. --- docs/src/installation_upgrade.md | 4 +- docs/src/kubectl-plugin.md | 30 +- pkg/versions/versions.go | 6 +- releases/cnpg-1.26.0-rc3.yaml | 18021 ----------------------------- 4 files changed, 20 insertions(+), 18041 deletions(-) delete mode 100644 releases/cnpg-1.26.0-rc3.yaml diff --git a/docs/src/installation_upgrade.md b/docs/src/installation_upgrade.md index 9a8bd66381..a9ab979354 100644 --- a/docs/src/installation_upgrade.md +++ b/docs/src/installation_upgrade.md @@ -8,12 +8,12 @@ The operator can be installed like any other resource in Kubernetes, through a YAML manifest applied via `kubectl`. -You can install the [latest operator manifest](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-1.26.0-rc3.yaml) +You can install the [latest operator manifest](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-1.26.0-rc2.yaml) for this minor release as follows: ```sh kubectl apply --server-side -f \ - https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-1.26.0-rc3.yaml + https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-1.26.0-rc2.yaml ``` You can verify that with: diff --git a/docs/src/kubectl-plugin.md b/docs/src/kubectl-plugin.md index d8e3d55a1b..50208b8651 100644 --- a/docs/src/kubectl-plugin.md +++ b/docs/src/kubectl-plugin.md @@ -31,11 +31,11 @@ them in your systems. #### Debian packages -For example, let's install the 1.26.0-rc3 release of the plugin, for an Intel based +For example, let's install the 1.26.0-rc2 release of the plugin, for an Intel based 64 bit server. First, we download the right `.deb` file. ```sh -wget https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.26.0-rc3/kubectl-cnpg_1.26.0-rc3_linux_x86_64.deb \ +wget https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.26.0-rc2/kubectl-cnpg_1.26.0-rc2_linux_x86_64.deb \ --output-document kube-plugin.deb ``` @@ -46,17 +46,17 @@ $ sudo dpkg -i kube-plugin.deb Selecting previously unselected package cnpg. (Reading database ... 6688 files and directories currently installed.) Preparing to unpack kube-plugin.deb ... -Unpacking cnpg (1.26.0-rc3) ... -Setting up cnpg (1.26.0-rc3) ... +Unpacking cnpg (1.26.0-rc2) ... +Setting up cnpg (1.26.0-rc2) ... ``` #### RPM packages -As in the example for `.rpm` packages, let's install the 1.26.0-rc3 release for an +As in the example for `.rpm` packages, let's install the 1.26.0-rc2 release for an Intel 64 bit machine. Note the `--output` flag to provide a file name. ```sh -curl -L https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.26.0-rc3/kubectl-cnpg_1.26.0-rc3_linux_x86_64.rpm \ +curl -L https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.26.0-rc2/kubectl-cnpg_1.26.0-rc2_linux_x86_64.rpm \ --output kube-plugin.rpm ``` @@ -70,7 +70,7 @@ Dependencies resolved. Package Architecture Version Repository Size ==================================================================================================== Installing: - cnpg x86_64 1.26.0-rc3 @commandline 20 M + cnpg x86_64 1.26.0-rc2 @commandline 20 M Transaction Summary ==================================================================================================== @@ -294,9 +294,9 @@ sandbox-3 0/604DE38 0/604DE38 0/604DE38 0/604DE38 00:00:00 00:00:00 00 Instances status Name Current LSN Replication role Status QoS Manager Version Node ---- ----------- ---------------- ------ --- --------------- ---- -sandbox-1 0/604DE38 Primary OK BestEffort 1.26.0-rc3 k8s-eu-worker -sandbox-2 0/604DE38 Standby (async) OK BestEffort 1.26.0-rc3 k8s-eu-worker2 -sandbox-3 0/604DE38 Standby (async) OK BestEffort 1.26.0-rc3 k8s-eu-worker +sandbox-1 0/604DE38 Primary OK BestEffort 1.26.0-rc2 k8s-eu-worker +sandbox-2 0/604DE38 Standby (async) OK BestEffort 1.26.0-rc2 k8s-eu-worker2 +sandbox-3 0/604DE38 Standby (async) OK BestEffort 1.26.0-rc2 k8s-eu-worker ``` If you require more detailed status information, use the `--verbose` option (or @@ -350,9 +350,9 @@ sandbox-primary primary 1 1 1 Instances status Name Current LSN Replication role Status QoS Manager Version Node ---- ----------- ---------------- ------ --- --------------- ---- -sandbox-1 0/6053720 Primary OK BestEffort 1.26.0-rc3 k8s-eu-worker -sandbox-2 0/6053720 Standby (async) OK BestEffort 1.26.0-rc3 k8s-eu-worker2 -sandbox-3 0/6053720 Standby (async) OK BestEffort 1.26.0-rc3 k8s-eu-worker +sandbox-1 0/6053720 Primary OK BestEffort 1.26.0-rc2 k8s-eu-worker +sandbox-2 0/6053720 Standby (async) OK BestEffort 1.26.0-rc2 k8s-eu-worker2 +sandbox-3 0/6053720 Standby (async) OK BestEffort 1.26.0-rc2 k8s-eu-worker ``` With an additional `-v` (e.g. `kubectl cnpg status sandbox -v -v`), you can @@ -575,12 +575,12 @@ Archive: report_operator_.zip ```output ====== Beginning of Previous Log ===== -2023-03-28T12:56:41.251711811Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.26.0-rc3","build":{"Version":"1.26.0-rc3+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} +2023-03-28T12:56:41.251711811Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.26.0-rc2","build":{"Version":"1.26.0-rc2+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} 2023-03-28T12:56:41.251851909Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"} ====== End of Previous Log ===== -2023-03-28T12:57:09.854306024Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.26.0-rc3","build":{"Version":"1.26.0-rc3+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} +2023-03-28T12:57:09.854306024Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.26.0-rc2","build":{"Version":"1.26.0-rc2+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} 2023-03-28T12:57:09.854363943Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"} ``` diff --git a/pkg/versions/versions.go b/pkg/versions/versions.go index 6304f15667..a97567d158 100644 --- a/pkg/versions/versions.go +++ b/pkg/versions/versions.go @@ -23,13 +23,13 @@ package versions const ( // Version is the version of the operator - Version = "1.26.0-rc3" + Version = "1.26.0-rc2" // DefaultImageName is the default image used by the operator to create pods DefaultImageName = "ghcr.io/cloudnative-pg/postgresql:17.5" // DefaultOperatorImageName is the default operator image used by the controller in the pods running PostgreSQL - DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.26.0-rc3" + DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.26.0-rc2" ) // BuildInfo is a struct containing all the info about the build @@ -39,7 +39,7 @@ type BuildInfo struct { var ( // buildVersion injected during the build - buildVersion = "1.26.0-rc3" + buildVersion = "1.26.0-rc2" // buildCommit injected during the build buildCommit = "none" diff --git a/releases/cnpg-1.26.0-rc3.yaml b/releases/cnpg-1.26.0-rc3.yaml deleted file mode 100644 index 0e3ebc55f8..0000000000 --- a/releases/cnpg-1.26.0-rc3.yaml +++ /dev/null @@ -1,18021 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - labels: - app.kubernetes.io/name: cloudnative-pg - name: cnpg-system ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.17.3 - name: backups.postgresql.cnpg.io -spec: - group: postgresql.cnpg.io - names: - kind: Backup - listKind: BackupList - plural: backups - singular: backup - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - jsonPath: .spec.cluster.name - name: Cluster - type: string - - jsonPath: .spec.method - name: Method - type: string - - jsonPath: .status.phase - name: Phase - type: string - - jsonPath: .status.error - name: Error - type: string - name: v1 - schema: - openAPIV3Schema: - description: A Backup resource is a request for a PostgreSQL backup by the - user. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: |- - Specification of the desired behavior of the backup. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - properties: - cluster: - description: The cluster to backup - properties: - name: - description: Name of the referent. - type: string - required: - - name - type: object - method: - default: barmanObjectStore - description: |- - The backup method to be used, possible options are `barmanObjectStore`, - `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. - enum: - - barmanObjectStore - - volumeSnapshot - - plugin - type: string - online: - description: |- - Whether the default type of backup with volume snapshots is - online/hot (`true`, default) or offline/cold (`false`) - Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' - type: boolean - onlineConfiguration: - description: |- - Configuration parameters to control the online/hot backup with volume snapshots - Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza - properties: - immediateCheckpoint: - description: |- - Control whether the I/O workload for the backup initial checkpoint will - be limited, according to the `checkpoint_completion_target` setting on - the PostgreSQL server. If set to true, an immediate checkpoint will be - used, meaning PostgreSQL will complete the checkpoint as soon as - possible. `false` by default. - type: boolean - waitForArchive: - default: true - description: |- - If false, the function will return immediately after the backup is completed, - without waiting for WAL to be archived. - This behavior is only useful with backup software that independently monitors WAL archiving. - Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. - By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is - enabled. - On a standby, this means that it will wait only when archive_mode = always. - If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger - an immediate segment switch. - type: boolean - type: object - pluginConfiguration: - description: Configuration parameters passed to the plugin managing - this backup - properties: - name: - description: Name is the name of the plugin managing this backup - type: string - parameters: - additionalProperties: - type: string - description: |- - Parameters are the configuration parameters passed to the backup - plugin for this backup - type: object - required: - - name - type: object - target: - description: |- - The policy to decide which instance should perform this backup. If empty, - it defaults to `cluster.spec.backup.target`. - Available options are empty string, `primary` and `prefer-standby`. - `primary` to have backups run always on primary instances, - `prefer-standby` to have backups run preferably on the most updated - standby, if available. - enum: - - primary - - prefer-standby - type: string - required: - - cluster - type: object - status: - description: |- - Most recently observed status of the backup. This data may not be up to - date. Populated by the system. Read-only. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - properties: - azureCredentials: - description: The credentials to use to upload data to Azure Blob Storage - properties: - connectionString: - description: The connection string to be used - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - inheritFromAzureAD: - description: Use the Azure AD based authentication without providing - explicitly the keys. - type: boolean - storageAccount: - description: The storage account where to upload data - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - storageKey: - description: |- - The storage account key to be used in conjunction - with the storage account name - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - storageSasToken: - description: |- - A shared-access-signature to be used in conjunction with - the storage account name - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - type: object - backupId: - description: The ID of the Barman backup - type: string - backupLabelFile: - description: Backup label file content as returned by Postgres in - case of online (hot) backups - format: byte - type: string - backupName: - description: The Name of the Barman backup - type: string - beginLSN: - description: The starting xlog - type: string - beginWal: - description: The starting WAL - type: string - commandError: - description: The backup command output in case of error - type: string - commandOutput: - description: Unused. Retained for compatibility with old versions. - type: string - destinationPath: - description: |- - The path where to store the backup (i.e. s3://bucket/path/to/folder) - this path, with different destination folders, will be used for WALs - and for data. This may not be populated in case of errors. - type: string - encryption: - description: Encryption method required to S3 API - type: string - endLSN: - description: The ending xlog - type: string - endWal: - description: The ending WAL - type: string - endpointCA: - description: |- - EndpointCA store the CA bundle of the barman endpoint. - Useful when using self-signed certificates to avoid - errors with certificate issuer and barman-cloud-wal-archive. - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - endpointURL: - description: |- - Endpoint to be used to upload data to the cloud, - overriding the automatic endpoint discovery - type: string - error: - description: The detected error - type: string - googleCredentials: - description: The credentials to use to upload data to Google Cloud - Storage - properties: - applicationCredentials: - description: The secret containing the Google Cloud Storage JSON - file with the credentials - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - gkeEnvironment: - description: |- - If set to true, will presume that it's running inside a GKE environment, - default to false. - type: boolean - type: object - instanceID: - description: Information to identify the instance where the backup - has been taken from - properties: - ContainerID: - description: The container ID - type: string - podName: - description: The pod name - type: string - type: object - method: - description: The backup method being used - type: string - online: - description: Whether the backup was online/hot (`true`) or offline/cold - (`false`) - type: boolean - phase: - description: The last backup status - type: string - pluginMetadata: - additionalProperties: - type: string - description: A map containing the plugin metadata - type: object - s3Credentials: - description: The credentials to use to upload data to S3 - properties: - accessKeyId: - description: The reference to the access key id - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - inheritFromIAMRole: - description: Use the role based authentication without providing - explicitly the keys. - type: boolean - region: - description: The reference to the secret containing the region - name - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - secretAccessKey: - description: The reference to the secret access key - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - sessionToken: - description: The references to the session key - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - type: object - serverName: - description: |- - The server name on S3, the cluster name is used if this - parameter is omitted - type: string - snapshotBackupStatus: - description: Status of the volumeSnapshot backup - properties: - elements: - description: The elements list, populated with the gathered volume - snapshots - items: - description: BackupSnapshotElementStatus is a volume snapshot - that is part of a volume snapshot method backup - properties: - name: - description: Name is the snapshot resource name - type: string - tablespaceName: - description: |- - TablespaceName is the name of the snapshotted tablespace. Only set - when type is PG_TABLESPACE - type: string - type: - description: Type is tho role of the snapshot in the cluster, - such as PG_DATA, PG_WAL and PG_TABLESPACE - type: string - required: - - name - - type - type: object - type: array - type: object - startedAt: - description: When the backup was started - format: date-time - type: string - stoppedAt: - description: When the backup was terminated - format: date-time - type: string - tablespaceMapFile: - description: Tablespace map file content as returned by Postgres in - case of online (hot) backups - format: byte - type: string - type: object - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: - status: {} ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.17.3 - name: clusterimagecatalogs.postgresql.cnpg.io -spec: - group: postgresql.cnpg.io - names: - kind: ClusterImageCatalog - listKind: ClusterImageCatalogList - plural: clusterimagecatalogs - singular: clusterimagecatalog - scope: Cluster - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1 - schema: - openAPIV3Schema: - description: ClusterImageCatalog is the Schema for the clusterimagecatalogs - API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: |- - Specification of the desired behavior of the ClusterImageCatalog. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - properties: - images: - description: List of CatalogImages available in the catalog - items: - description: CatalogImage defines the image and major version - properties: - image: - description: The image reference - type: string - major: - description: The PostgreSQL major version of the image. Must - be unique within the catalog. - minimum: 10 - type: integer - required: - - image - - major - type: object - maxItems: 8 - minItems: 1 - type: array - x-kubernetes-validations: - - message: Images must have unique major versions - rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) - required: - - images - type: object - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: {} ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.17.3 - name: clusters.postgresql.cnpg.io -spec: - group: postgresql.cnpg.io - names: - kind: Cluster - listKind: ClusterList - plural: clusters - singular: cluster - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - description: Number of instances - jsonPath: .status.instances - name: Instances - type: integer - - description: Number of ready instances - jsonPath: .status.readyInstances - name: Ready - type: integer - - description: Cluster current status - jsonPath: .status.phase - name: Status - type: string - - description: Primary pod - jsonPath: .status.currentPrimary - name: Primary - type: string - name: v1 - schema: - openAPIV3Schema: - description: Cluster is the Schema for the PostgreSQL API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: |- - Specification of the desired behavior of the cluster. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - properties: - affinity: - description: Affinity/Anti-affinity rules for Pods - properties: - additionalPodAffinity: - description: AdditionalPodAffinity allows to specify pod affinity - terms to be passed to all the cluster's pods. - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - x-kubernetes-list-type: atomic - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: |- - weight associated with matching the corresponding podAffinityTerm, - in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - x-kubernetes-list-type: atomic - requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to a pod label update), the - system may or may not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes corresponding to each - podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: |- - Defines a set of pods (namely those matching the labelSelector - relative to the given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node whose value of - the label with key matches that of any node on which - a pod of the set of pods is running - properties: - labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - x-kubernetes-list-type: atomic - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - x-kubernetes-list-type: atomic - type: object - additionalPodAntiAffinity: - description: |- - AdditionalPodAntiAffinity allows to specify pod anti-affinity terms to be added to the ones generated - by the operator if EnablePodAntiAffinity is set to true (default) or to be used exclusively if set to false. - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the anti-affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - x-kubernetes-list-type: atomic - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: |- - weight associated with matching the corresponding podAffinityTerm, - in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - x-kubernetes-list-type: atomic - requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the anti-affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the anti-affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to a pod label update), the - system may or may not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes corresponding to each - podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: |- - Defines a set of pods (namely those matching the labelSelector - relative to the given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node whose value of - the label with key matches that of any node on which - a pod of the set of pods is running - properties: - labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - x-kubernetes-list-type: atomic - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - x-kubernetes-list-type: atomic - type: object - enablePodAntiAffinity: - description: |- - Activates anti-affinity for the pods. The operator will define pods - anti-affinity unless this field is explicitly set to false - type: boolean - nodeAffinity: - description: |- - NodeAffinity describes node affinity scheduling rules for the pod. - More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node matches the corresponding matchExpressions; the - node(s) with the highest sum are the most preferred. - items: - description: |- - An empty preferred scheduling term matches all objects with implicit weight 0 - (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - properties: - preference: - description: A node selector term, associated with the - corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - type: object - x-kubernetes-map-type: atomic - weight: - description: Weight associated with matching the corresponding - nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - x-kubernetes-list-type: atomic - requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to an update), the system - may or may not try to eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. - The terms are ORed. - items: - description: |- - A null or empty node selector term matches no objects. The requirements of - them are ANDed. - The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - type: object - x-kubernetes-map-type: atomic - type: array - x-kubernetes-list-type: atomic - required: - - nodeSelectorTerms - type: object - x-kubernetes-map-type: atomic - type: object - nodeSelector: - additionalProperties: - type: string - description: |- - NodeSelector is map of key-value pairs used to define the nodes on which - the pods can run. - More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - type: object - podAntiAffinityType: - description: |- - PodAntiAffinityType allows the user to decide whether pod anti-affinity between cluster instance has to be - considered a strong requirement during scheduling or not. Allowed values are: "preferred" (default if empty) or - "required". Setting it to "required", could lead to instances remaining pending until new kubernetes nodes are - added if all the existing nodes don't match the required pod anti-affinity rule. - More info: - https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - type: string - tolerations: - description: |- - Tolerations is a list of Tolerations that should be set for all the pods, in order to allow them to run - on tainted nodes. - More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ - items: - description: |- - The pod this Toleration is attached to tolerates any taint that matches - the triple using the matching operator . - properties: - effect: - description: |- - Effect indicates the taint effect to match. Empty means match all taint effects. - When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: |- - Key is the taint key that the toleration applies to. Empty means match all taint keys. - If the key is empty, operator must be Exists; this combination means to match all values and all keys. - type: string - operator: - description: |- - Operator represents a key's relationship to the value. - Valid operators are Exists and Equal. Defaults to Equal. - Exists is equivalent to wildcard for value, so that a pod can - tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: |- - TolerationSeconds represents the period of time the toleration (which must be - of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, - it is not set, which means tolerate the taint forever (do not evict). Zero and - negative values will be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: |- - Value is the taint value the toleration matches to. - If the operator is Exists, the value should be empty, otherwise just a regular string. - type: string - type: object - type: array - topologyKey: - description: |- - TopologyKey to use for anti-affinity configuration. See k8s documentation - for more info on that - type: string - type: object - backup: - description: The configuration to be used for backups - properties: - barmanObjectStore: - description: The configuration for the barman-cloud tool suite - properties: - azureCredentials: - description: The credentials to use to upload data to Azure - Blob Storage - properties: - connectionString: - description: The connection string to be used - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - inheritFromAzureAD: - description: Use the Azure AD based authentication without - providing explicitly the keys. - type: boolean - storageAccount: - description: The storage account where to upload data - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - storageKey: - description: |- - The storage account key to be used in conjunction - with the storage account name - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - storageSasToken: - description: |- - A shared-access-signature to be used in conjunction with - the storage account name - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - type: object - data: - description: |- - The configuration to be used to backup the data files - When not defined, base backups files will be stored uncompressed and may - be unencrypted in the object store, according to the bucket default - policy. - properties: - additionalCommandArgs: - description: |- - AdditionalCommandArgs represents additional arguments that can be appended - to the 'barman-cloud-backup' command-line invocation. These arguments - provide flexibility to customize the backup process further according to - specific requirements or configurations. - - Example: - In a scenario where specialized backup options are required, such as setting - a specific timeout or defining custom behavior, users can use this field - to specify additional command arguments. - - Note: - It's essential to ensure that the provided arguments are valid and supported - by the 'barman-cloud-backup' command, to avoid potential errors or unintended - behavior during execution. - items: - type: string - type: array - compression: - description: |- - Compress a backup file (a tar file per tablespace) while streaming it - to the object store. Available options are empty string (no - compression, default), `gzip`, `bzip2`, and `snappy`. - enum: - - bzip2 - - gzip - - snappy - type: string - encryption: - description: |- - Whenever to force the encryption of files (if the bucket is - not already configured for that). - Allowed options are empty string (use the bucket policy, default), - `AES256` and `aws:kms` - enum: - - AES256 - - aws:kms - type: string - immediateCheckpoint: - description: |- - Control whether the I/O workload for the backup initial checkpoint will - be limited, according to the `checkpoint_completion_target` setting on - the PostgreSQL server. If set to true, an immediate checkpoint will be - used, meaning PostgreSQL will complete the checkpoint as soon as - possible. `false` by default. - type: boolean - jobs: - description: |- - The number of parallel jobs to be used to upload the backup, defaults - to 2 - format: int32 - minimum: 1 - type: integer - type: object - destinationPath: - description: |- - The path where to store the backup (i.e. s3://bucket/path/to/folder) - this path, with different destination folders, will be used for WALs - and for data - minLength: 1 - type: string - endpointCA: - description: |- - EndpointCA store the CA bundle of the barman endpoint. - Useful when using self-signed certificates to avoid - errors with certificate issuer and barman-cloud-wal-archive - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - endpointURL: - description: |- - Endpoint to be used to upload data to the cloud, - overriding the automatic endpoint discovery - type: string - googleCredentials: - description: The credentials to use to upload data to Google - Cloud Storage - properties: - applicationCredentials: - description: The secret containing the Google Cloud Storage - JSON file with the credentials - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - gkeEnvironment: - description: |- - If set to true, will presume that it's running inside a GKE environment, - default to false. - type: boolean - type: object - historyTags: - additionalProperties: - type: string - description: |- - HistoryTags is a list of key value pairs that will be passed to the - Barman --history-tags option. - type: object - s3Credentials: - description: The credentials to use to upload data to S3 - properties: - accessKeyId: - description: The reference to the access key id - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - inheritFromIAMRole: - description: Use the role based authentication without - providing explicitly the keys. - type: boolean - region: - description: The reference to the secret containing the - region name - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - secretAccessKey: - description: The reference to the secret access key - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - sessionToken: - description: The references to the session key - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - type: object - serverName: - description: |- - The server name on S3, the cluster name is used if this - parameter is omitted - type: string - tags: - additionalProperties: - type: string - description: |- - Tags is a list of key value pairs that will be passed to the - Barman --tags option. - type: object - wal: - description: |- - The configuration for the backup of the WAL stream. - When not defined, WAL files will be stored uncompressed and may be - unencrypted in the object store, according to the bucket default policy. - properties: - archiveAdditionalCommandArgs: - description: |- - Additional arguments that can be appended to the 'barman-cloud-wal-archive' - command-line invocation. These arguments provide flexibility to customize - the WAL archive process further, according to specific requirements or configurations. - - Example: - In a scenario where specialized backup options are required, such as setting - a specific timeout or defining custom behavior, users can use this field - to specify additional command arguments. - - Note: - It's essential to ensure that the provided arguments are valid and supported - by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended - behavior during execution. - items: - type: string - type: array - compression: - description: |- - Compress a WAL file before sending it to the object store. Available - options are empty string (no compression, default), `gzip`, `bzip2`, - `lz4`, `snappy`, `xz`, and `zstd`. - enum: - - bzip2 - - gzip - - lz4 - - snappy - - xz - - zstd - type: string - encryption: - description: |- - Whenever to force the encryption of files (if the bucket is - not already configured for that). - Allowed options are empty string (use the bucket policy, default), - `AES256` and `aws:kms` - enum: - - AES256 - - aws:kms - type: string - maxParallel: - description: |- - Number of WAL files to be either archived in parallel (when the - PostgreSQL instance is archiving to a backup object store) or - restored in parallel (when a PostgreSQL standby is fetching WAL - files from a recovery object store). If not specified, WAL files - will be processed one at a time. It accepts a positive integer as a - value - with 1 being the minimum accepted value. - minimum: 1 - type: integer - restoreAdditionalCommandArgs: - description: |- - Additional arguments that can be appended to the 'barman-cloud-wal-restore' - command-line invocation. These arguments provide flexibility to customize - the WAL restore process further, according to specific requirements or configurations. - - Example: - In a scenario where specialized backup options are required, such as setting - a specific timeout or defining custom behavior, users can use this field - to specify additional command arguments. - - Note: - It's essential to ensure that the provided arguments are valid and supported - by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended - behavior during execution. - items: - type: string - type: array - type: object - required: - - destinationPath - type: object - retentionPolicy: - description: |- - RetentionPolicy is the retention policy to be used for backups - and WALs (i.e. '60d'). The retention policy is expressed in the form - of `XXu` where `XX` is a positive integer and `u` is in `[dwm]` - - days, weeks, months. - It's currently only applicable when using the BarmanObjectStore method. - pattern: ^[1-9][0-9]*[dwm]$ - type: string - target: - default: prefer-standby - description: |- - The policy to decide which instance should perform backups. Available - options are empty string, which will default to `prefer-standby` policy, - `primary` to have backups run always on primary instances, `prefer-standby` - to have backups run preferably on the most updated standby, if available. - enum: - - primary - - prefer-standby - type: string - volumeSnapshot: - description: VolumeSnapshot provides the configuration for the - execution of volume snapshot backups. - properties: - annotations: - additionalProperties: - type: string - description: Annotations key-value pairs that will be added - to .metadata.annotations snapshot resources. - type: object - className: - description: |- - ClassName specifies the Snapshot Class to be used for PG_DATA PersistentVolumeClaim. - It is the default class for the other types if no specific class is present - type: string - labels: - additionalProperties: - type: string - description: Labels are key-value pairs that will be added - to .metadata.labels snapshot resources. - type: object - online: - default: true - description: |- - Whether the default type of backup with volume snapshots is - online/hot (`true`, default) or offline/cold (`false`) - type: boolean - onlineConfiguration: - default: - immediateCheckpoint: false - waitForArchive: true - description: Configuration parameters to control the online/hot - backup with volume snapshots - properties: - immediateCheckpoint: - description: |- - Control whether the I/O workload for the backup initial checkpoint will - be limited, according to the `checkpoint_completion_target` setting on - the PostgreSQL server. If set to true, an immediate checkpoint will be - used, meaning PostgreSQL will complete the checkpoint as soon as - possible. `false` by default. - type: boolean - waitForArchive: - default: true - description: |- - If false, the function will return immediately after the backup is completed, - without waiting for WAL to be archived. - This behavior is only useful with backup software that independently monitors WAL archiving. - Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. - By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is - enabled. - On a standby, this means that it will wait only when archive_mode = always. - If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger - an immediate segment switch. - type: boolean - type: object - snapshotOwnerReference: - default: none - description: SnapshotOwnerReference indicates the type of - owner reference the snapshot should have - enum: - - none - - cluster - - backup - type: string - tablespaceClassName: - additionalProperties: - type: string - description: |- - TablespaceClassName specifies the Snapshot Class to be used for the tablespaces. - defaults to the PGDATA Snapshot Class, if set - type: object - walClassName: - description: WalClassName specifies the Snapshot Class to - be used for the PG_WAL PersistentVolumeClaim. - type: string - type: object - type: object - bootstrap: - description: Instructions to bootstrap this cluster - properties: - initdb: - description: Bootstrap the cluster via initdb - properties: - builtinLocale: - description: |- - Specifies the locale name when the builtin provider is used. - This option requires `localeProvider` to be set to `builtin`. - Available from PostgreSQL 17. - type: string - dataChecksums: - description: |- - Whether the `-k` option should be passed to initdb, - enabling checksums on data pages (default: `false`) - type: boolean - database: - description: 'Name of the database used by the application. - Default: `app`.' - type: string - encoding: - description: The value to be passed as option `--encoding` - for initdb (default:`UTF8`) - type: string - icuLocale: - description: |- - Specifies the ICU locale when the ICU provider is used. - This option requires `localeProvider` to be set to `icu`. - Available from PostgreSQL 15. - type: string - icuRules: - description: |- - Specifies additional collation rules to customize the behavior of the default collation. - This option requires `localeProvider` to be set to `icu`. - Available from PostgreSQL 16. - type: string - import: - description: |- - Bootstraps the new cluster by importing data from an existing PostgreSQL - instance using logical backup (`pg_dump` and `pg_restore`) - properties: - databases: - description: The databases to import - items: - type: string - type: array - pgDumpExtraOptions: - description: |- - List of custom options to pass to the `pg_dump` command. IMPORTANT: - Use these options with caution and at your own risk, as the operator - does not validate their content. Be aware that certain options may - conflict with the operator's intended functionality or design. - items: - type: string - type: array - pgRestoreExtraOptions: - description: |- - List of custom options to pass to the `pg_restore` command. IMPORTANT: - Use these options with caution and at your own risk, as the operator - does not validate their content. Be aware that certain options may - conflict with the operator's intended functionality or design. - items: - type: string - type: array - postImportApplicationSQL: - description: |- - List of SQL queries to be executed as a superuser in the application - database right after is imported - to be used with extreme care - (by default empty). Only available in microservice type. - items: - type: string - type: array - roles: - description: The roles to import - items: - type: string - type: array - schemaOnly: - description: |- - When set to true, only the `pre-data` and `post-data` sections of - `pg_restore` are invoked, avoiding data import. Default: `false`. - type: boolean - source: - description: The source of the import - properties: - externalCluster: - description: The name of the externalCluster used - for import - type: string - required: - - externalCluster - type: object - type: - description: The import type. Can be `microservice` or - `monolith`. - enum: - - microservice - - monolith - type: string - required: - - databases - - source - - type - type: object - locale: - description: Sets the default collation order and character - classification in the new database. - type: string - localeCType: - description: The value to be passed as option `--lc-ctype` - for initdb (default:`C`) - type: string - localeCollate: - description: The value to be passed as option `--lc-collate` - for initdb (default:`C`) - type: string - localeProvider: - description: |- - This option sets the locale provider for databases created in the new cluster. - Available from PostgreSQL 16. - type: string - options: - description: |- - The list of options that must be passed to initdb when creating the cluster. - Deprecated: This could lead to inconsistent configurations, - please use the explicit provided parameters instead. - If defined, explicit values will be ignored. - items: - type: string - type: array - owner: - description: |- - Name of the owner of the database in the instance to be used - by applications. Defaults to the value of the `database` key. - type: string - postInitApplicationSQL: - description: |- - List of SQL queries to be executed as a superuser in the application - database right after the cluster has been created - to be used with extreme care - (by default empty) - items: - type: string - type: array - postInitApplicationSQLRefs: - description: |- - List of references to ConfigMaps or Secrets containing SQL files - to be executed as a superuser in the application database right after - the cluster has been created. The references are processed in a specific order: - first, all Secrets are processed, followed by all ConfigMaps. - Within each group, the processing order follows the sequence specified - in their respective arrays. - (by default empty) - properties: - configMapRefs: - description: ConfigMapRefs holds a list of references - to ConfigMaps - items: - description: |- - ConfigMapKeySelector contains enough information to let you locate - the key of a ConfigMap - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - type: array - secretRefs: - description: SecretRefs holds a list of references to - Secrets - items: - description: |- - SecretKeySelector contains enough information to let you locate - the key of a Secret - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - type: array - type: object - postInitSQL: - description: |- - List of SQL queries to be executed as a superuser in the `postgres` - database right after the cluster has been created - to be used with extreme care - (by default empty) - items: - type: string - type: array - postInitSQLRefs: - description: |- - List of references to ConfigMaps or Secrets containing SQL files - to be executed as a superuser in the `postgres` database right after - the cluster has been created. The references are processed in a specific order: - first, all Secrets are processed, followed by all ConfigMaps. - Within each group, the processing order follows the sequence specified - in their respective arrays. - (by default empty) - properties: - configMapRefs: - description: ConfigMapRefs holds a list of references - to ConfigMaps - items: - description: |- - ConfigMapKeySelector contains enough information to let you locate - the key of a ConfigMap - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - type: array - secretRefs: - description: SecretRefs holds a list of references to - Secrets - items: - description: |- - SecretKeySelector contains enough information to let you locate - the key of a Secret - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - type: array - type: object - postInitTemplateSQL: - description: |- - List of SQL queries to be executed as a superuser in the `template1` - database right after the cluster has been created - to be used with extreme care - (by default empty) - items: - type: string - type: array - postInitTemplateSQLRefs: - description: |- - List of references to ConfigMaps or Secrets containing SQL files - to be executed as a superuser in the `template1` database right after - the cluster has been created. The references are processed in a specific order: - first, all Secrets are processed, followed by all ConfigMaps. - Within each group, the processing order follows the sequence specified - in their respective arrays. - (by default empty) - properties: - configMapRefs: - description: ConfigMapRefs holds a list of references - to ConfigMaps - items: - description: |- - ConfigMapKeySelector contains enough information to let you locate - the key of a ConfigMap - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - type: array - secretRefs: - description: SecretRefs holds a list of references to - Secrets - items: - description: |- - SecretKeySelector contains enough information to let you locate - the key of a Secret - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - type: array - type: object - secret: - description: |- - Name of the secret containing the initial credentials for the - owner of the user database. If empty a new secret will be - created from scratch - properties: - name: - description: Name of the referent. - type: string - required: - - name - type: object - walSegmentSize: - description: |- - The value in megabytes (1 to 1024) to be passed to the `--wal-segsize` - option for initdb (default: empty, resulting in PostgreSQL default: 16MB) - maximum: 1024 - minimum: 1 - type: integer - type: object - x-kubernetes-validations: - - message: builtinLocale is only available when localeProvider - is set to `builtin` - rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' - - message: icuLocale is only available when localeProvider is - set to `icu` - rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' - - message: icuRules is only available when localeProvider is set - to `icu` - rule: '!has(self.icuRules) || self.localeProvider == ''icu''' - pg_basebackup: - description: |- - Bootstrap the cluster taking a physical backup of another compatible - PostgreSQL instance - properties: - database: - description: 'Name of the database used by the application. - Default: `app`.' - type: string - owner: - description: |- - Name of the owner of the database in the instance to be used - by applications. Defaults to the value of the `database` key. - type: string - secret: - description: |- - Name of the secret containing the initial credentials for the - owner of the user database. If empty a new secret will be - created from scratch - properties: - name: - description: Name of the referent. - type: string - required: - - name - type: object - source: - description: The name of the server of which we need to take - a physical backup - minLength: 1 - type: string - required: - - source - type: object - recovery: - description: Bootstrap the cluster from a backup - properties: - backup: - description: |- - The backup object containing the physical base backup from which to - initiate the recovery procedure. - Mutually exclusive with `source` and `volumeSnapshots`. - properties: - endpointCA: - description: |- - EndpointCA store the CA bundle of the barman endpoint. - Useful when using self-signed certificates to avoid - errors with certificate issuer and barman-cloud-wal-archive. - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - name: - description: Name of the referent. - type: string - required: - - name - type: object - database: - description: 'Name of the database used by the application. - Default: `app`.' - type: string - owner: - description: |- - Name of the owner of the database in the instance to be used - by applications. Defaults to the value of the `database` key. - type: string - recoveryTarget: - description: |- - By default, the recovery process applies all the available - WAL files in the archive (full recovery). However, you can also - end the recovery as soon as a consistent state is reached or - recover to a point-in-time (PITR) by specifying a `RecoveryTarget` object, - as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...). - More info: https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET - properties: - backupID: - description: |- - The ID of the backup from which to start the recovery process. - If empty (default) the operator will automatically detect the backup - based on targetTime or targetLSN if specified. Otherwise use the - latest available backup in chronological order. - type: string - exclusive: - description: |- - Set the target to be exclusive. If omitted, defaults to false, so that - in Postgres, `recovery_target_inclusive` will be true - type: boolean - targetImmediate: - description: End recovery as soon as a consistent state - is reached - type: boolean - targetLSN: - description: The target LSN (Log Sequence Number) - type: string - targetName: - description: |- - The target name (to be previously created - with `pg_create_restore_point`) - type: string - targetTLI: - description: The target timeline ("latest" or a positive - integer) - type: string - targetTime: - description: The target time as a timestamp in the RFC3339 - standard - type: string - targetXID: - description: The target transaction ID - type: string - type: object - secret: - description: |- - Name of the secret containing the initial credentials for the - owner of the user database. If empty a new secret will be - created from scratch - properties: - name: - description: Name of the referent. - type: string - required: - - name - type: object - source: - description: |- - The external cluster whose backup we will restore. This is also - used as the name of the folder under which the backup is stored, - so it must be set to the name of the source cluster - Mutually exclusive with `backup`. - type: string - volumeSnapshots: - description: |- - The static PVC data source(s) from which to initiate the - recovery procedure. Currently supporting `VolumeSnapshot` - and `PersistentVolumeClaim` resources that map an existing - PVC group, compatible with CloudNativePG, and taken with - a cold backup copy on a fenced Postgres instance (limitation - which will be removed in the future when online backup - will be implemented). - Mutually exclusive with `backup`. - properties: - storage: - description: Configuration of the storage of the instances - properties: - apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - tablespaceStorage: - additionalProperties: - description: |- - TypedLocalObjectReference contains enough information to let you locate the - typed referenced object inside the same namespace. - properties: - apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being - referenced - type: string - name: - description: Name is the name of resource being - referenced - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - description: Configuration of the storage for PostgreSQL - tablespaces - type: object - walStorage: - description: Configuration of the storage for PostgreSQL - WAL (Write-Ahead Log) - properties: - apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - required: - - storage - type: object - type: object - type: object - certificates: - description: The configuration for the CA and related certificates - properties: - clientCASecret: - description: |- - The secret containing the Client CA certificate. If not defined, a new secret will be created - with a self-signed CA and will be used to generate all the client certificates.
-
- Contains:
-
- - `ca.crt`: CA that should be used to validate the client certificates, - used as `ssl_ca_file` of all the instances.
- - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, - this can be omitted.
- type: string - replicationTLSSecret: - description: |- - The secret of type kubernetes.io/tls containing the client certificate to authenticate as - the `streaming_replica` user. - If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be - created using the provided CA. - type: string - serverAltDNSNames: - description: The list of the server alternative DNS names to be - added to the generated server TLS certificates, when required. - items: - type: string - type: array - serverCASecret: - description: |- - The secret containing the Server CA certificate. If not defined, a new secret will be created - with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
-
- Contains:
-
- - `ca.crt`: CA that should be used to validate the server certificate, - used as `sslrootcert` in client connection strings.
- - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, - this can be omitted.
- type: string - serverTLSSecret: - description: |- - The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as - `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. - If not defined, ServerCASecret must provide also `ca.key` and a new secret will be - created using the provided CA. - type: string - type: object - description: - description: Description of this PostgreSQL cluster - type: string - enablePDB: - default: true - description: |- - Manage the `PodDisruptionBudget` resources within the cluster. When - configured as `true` (default setting), the pod disruption budgets - will safeguard the primary node from being terminated. Conversely, - setting it to `false` will result in the absence of any - `PodDisruptionBudget` resource, permitting the shutdown of all nodes - hosting the PostgreSQL cluster. This latter configuration is - advisable for any PostgreSQL cluster employed for - development/staging purposes. - type: boolean - enableSuperuserAccess: - default: false - description: |- - When this option is enabled, the operator will use the `SuperuserSecret` - to update the `postgres` user password (if the secret is - not present, the operator will automatically create one). When this - option is disabled, the operator will ignore the `SuperuserSecret` content, delete - it when automatically created, and then blank the password of the `postgres` - user by setting it to `NULL`. Disabled by default. - type: boolean - env: - description: |- - Env follows the Env format to pass environment variables - to the pods created in the cluster - items: - description: EnvVar represents an environment variable present in - a Container. - properties: - name: - description: Name of the environment variable. Must be a C_IDENTIFIER. - type: string - value: - description: |- - Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". - type: string - valueFrom: - description: Source for the environment variable's value. Cannot - be used if value is not empty. - properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the ConfigMap or its key - must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - fieldRef: - description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. - properties: - apiVersion: - description: Version of the schema the FieldPath is - written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in the specified - API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - properties: - containerName: - description: 'Container name: required for volumes, - optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format of the exposed - resources, defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - secretKeyRef: - description: Selects a key of a secret in the pod's namespace - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - required: - - name - type: object - type: array - envFrom: - description: |- - EnvFrom follows the EnvFrom format to pass environment variables - sources to the pods to be used by Env - items: - description: EnvFromSource represents the source of a set of ConfigMaps - properties: - configMapRef: - description: The ConfigMap to select from - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the ConfigMap must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - prefix: - description: An optional identifier to prepend to each key in - the ConfigMap. Must be a C_IDENTIFIER. - type: string - secretRef: - description: The Secret to select from - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the Secret must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - type: object - type: array - ephemeralVolumeSource: - description: EphemeralVolumeSource allows the user to configure the - source of ephemeral volumes. - properties: - volumeClaimTemplate: - description: |- - Will be used to create a stand-alone PVC to provision the volume. - The pod in which this EphemeralVolumeSource is embedded will be the - owner of the PVC, i.e. the PVC will be deleted together with the - pod. The name of the PVC will be `-` where - `` is the name from the `PodSpec.Volumes` array - entry. Pod validation will reject the pod if the concatenated name - is not valid for a PVC (for example, too long). - - An existing PVC with that name that is not owned by the pod - will *not* be used for the pod to avoid using an unrelated - volume by mistake. Starting the pod is then blocked until - the unrelated PVC is removed. If such a pre-created PVC is - meant to be used by the pod, the PVC has to updated with an - owner reference to the pod once the pod exists. Normally - this should not be necessary, but it may be useful when - manually reconstructing a broken cluster. - - This field is read-only and no changes will be made by Kubernetes - to the PVC after it has been created. - - Required, must not be nil. - properties: - metadata: - description: |- - May contain labels and annotations that will be copied into the PVC - when creating it. No other fields are allowed and will be rejected during - validation. - type: object - spec: - description: |- - The specification for the PersistentVolumeClaim. The entire content is - copied unchanged into the PVC that gets created from this - template. The same fields as in a PersistentVolumeClaim - are also valid here. - properties: - accessModes: - description: |- - accessModes contains the desired access modes the volume should have. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 - items: - type: string - type: array - x-kubernetes-list-type: atomic - dataSource: - description: |- - dataSource field can be used to specify either: - * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) - If the provisioner or an external controller can support the specified data source, - it will create a new volume based on the contents of the specified data source. - When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, - and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. - If the namespace is specified, then dataSourceRef will not be copied to dataSource. - properties: - apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - dataSourceRef: - description: |- - dataSourceRef specifies the object from which to populate the volume with data, if a non-empty - volume is desired. This may be any object from a non-empty API group (non - core object) or a PersistentVolumeClaim object. - When this field is specified, volume binding will only succeed if the type of - the specified object matches some installed volume populator or dynamic - provisioner. - This field will replace the functionality of the dataSource field and as such - if both fields are non-empty, they must have the same value. For backwards - compatibility, when namespace isn't specified in dataSourceRef, - both fields (dataSource and dataSourceRef) will be set to the same - value automatically if one of them is empty and the other is non-empty. - When namespace is specified in dataSourceRef, - dataSource isn't set to the same value and must be empty. - There are three important differences between dataSource and dataSourceRef: - * While dataSource only allows two specific types of objects, dataSourceRef - allows any non-core object, as well as PersistentVolumeClaim objects. - * While dataSource ignores disallowed values (dropping them), dataSourceRef - preserves all values, and generates an error if a disallowed value is - specified. - * While dataSource only allows local objects, dataSourceRef allows objects - in any namespaces. - (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. - (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. - properties: - apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - namespace: - description: |- - Namespace is the namespace of resource being referenced - Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. - (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. - type: string - required: - - kind - - name - type: object - resources: - description: |- - resources represents the minimum resources the volume should have. - If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements - that are lower than previous value but must still be higher than capacity recorded in the - status field of the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - selector: - description: selector is a label query over volumes to - consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - storageClassName: - description: |- - storageClassName is the name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 - type: string - volumeAttributesClassName: - description: |- - volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. - If specified, the CSI driver will create or update the volume with the attributes defined - in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. - If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be - set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource - exists. - More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). - type: string - volumeMode: - description: |- - volumeMode defines what type of volume is required by the claim. - Value of Filesystem is implied when not included in claim spec. - type: string - volumeName: - description: volumeName is the binding reference to the - PersistentVolume backing this claim. - type: string - type: object - required: - - spec - type: object - type: object - ephemeralVolumesSizeLimit: - description: |- - EphemeralVolumesSizeLimit allows the user to set the limits for the ephemeral - volumes - properties: - shm: - anyOf: - - type: integer - - type: string - description: Shm is the size limit of the shared memory volume - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - temporaryData: - anyOf: - - type: integer - - type: string - description: TemporaryData is the size limit of the temporary - data volume - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - externalClusters: - description: The list of external clusters which are used in the configuration - items: - description: |- - ExternalCluster represents the connection parameters to an - external cluster which is used in the other sections of the configuration - properties: - barmanObjectStore: - description: The configuration for the barman-cloud tool suite - properties: - azureCredentials: - description: The credentials to use to upload data to Azure - Blob Storage - properties: - connectionString: - description: The connection string to be used - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - inheritFromAzureAD: - description: Use the Azure AD based authentication without - providing explicitly the keys. - type: boolean - storageAccount: - description: The storage account where to upload data - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - storageKey: - description: |- - The storage account key to be used in conjunction - with the storage account name - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - storageSasToken: - description: |- - A shared-access-signature to be used in conjunction with - the storage account name - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - type: object - data: - description: |- - The configuration to be used to backup the data files - When not defined, base backups files will be stored uncompressed and may - be unencrypted in the object store, according to the bucket default - policy. - properties: - additionalCommandArgs: - description: |- - AdditionalCommandArgs represents additional arguments that can be appended - to the 'barman-cloud-backup' command-line invocation. These arguments - provide flexibility to customize the backup process further according to - specific requirements or configurations. - - Example: - In a scenario where specialized backup options are required, such as setting - a specific timeout or defining custom behavior, users can use this field - to specify additional command arguments. - - Note: - It's essential to ensure that the provided arguments are valid and supported - by the 'barman-cloud-backup' command, to avoid potential errors or unintended - behavior during execution. - items: - type: string - type: array - compression: - description: |- - Compress a backup file (a tar file per tablespace) while streaming it - to the object store. Available options are empty string (no - compression, default), `gzip`, `bzip2`, and `snappy`. - enum: - - bzip2 - - gzip - - snappy - type: string - encryption: - description: |- - Whenever to force the encryption of files (if the bucket is - not already configured for that). - Allowed options are empty string (use the bucket policy, default), - `AES256` and `aws:kms` - enum: - - AES256 - - aws:kms - type: string - immediateCheckpoint: - description: |- - Control whether the I/O workload for the backup initial checkpoint will - be limited, according to the `checkpoint_completion_target` setting on - the PostgreSQL server. If set to true, an immediate checkpoint will be - used, meaning PostgreSQL will complete the checkpoint as soon as - possible. `false` by default. - type: boolean - jobs: - description: |- - The number of parallel jobs to be used to upload the backup, defaults - to 2 - format: int32 - minimum: 1 - type: integer - type: object - destinationPath: - description: |- - The path where to store the backup (i.e. s3://bucket/path/to/folder) - this path, with different destination folders, will be used for WALs - and for data - minLength: 1 - type: string - endpointCA: - description: |- - EndpointCA store the CA bundle of the barman endpoint. - Useful when using self-signed certificates to avoid - errors with certificate issuer and barman-cloud-wal-archive - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - endpointURL: - description: |- - Endpoint to be used to upload data to the cloud, - overriding the automatic endpoint discovery - type: string - googleCredentials: - description: The credentials to use to upload data to Google - Cloud Storage - properties: - applicationCredentials: - description: The secret containing the Google Cloud - Storage JSON file with the credentials - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - gkeEnvironment: - description: |- - If set to true, will presume that it's running inside a GKE environment, - default to false. - type: boolean - type: object - historyTags: - additionalProperties: - type: string - description: |- - HistoryTags is a list of key value pairs that will be passed to the - Barman --history-tags option. - type: object - s3Credentials: - description: The credentials to use to upload data to S3 - properties: - accessKeyId: - description: The reference to the access key id - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - inheritFromIAMRole: - description: Use the role based authentication without - providing explicitly the keys. - type: boolean - region: - description: The reference to the secret containing - the region name - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - secretAccessKey: - description: The reference to the secret access key - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - sessionToken: - description: The references to the session key - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - type: object - serverName: - description: |- - The server name on S3, the cluster name is used if this - parameter is omitted - type: string - tags: - additionalProperties: - type: string - description: |- - Tags is a list of key value pairs that will be passed to the - Barman --tags option. - type: object - wal: - description: |- - The configuration for the backup of the WAL stream. - When not defined, WAL files will be stored uncompressed and may be - unencrypted in the object store, according to the bucket default policy. - properties: - archiveAdditionalCommandArgs: - description: |- - Additional arguments that can be appended to the 'barman-cloud-wal-archive' - command-line invocation. These arguments provide flexibility to customize - the WAL archive process further, according to specific requirements or configurations. - - Example: - In a scenario where specialized backup options are required, such as setting - a specific timeout or defining custom behavior, users can use this field - to specify additional command arguments. - - Note: - It's essential to ensure that the provided arguments are valid and supported - by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended - behavior during execution. - items: - type: string - type: array - compression: - description: |- - Compress a WAL file before sending it to the object store. Available - options are empty string (no compression, default), `gzip`, `bzip2`, - `lz4`, `snappy`, `xz`, and `zstd`. - enum: - - bzip2 - - gzip - - lz4 - - snappy - - xz - - zstd - type: string - encryption: - description: |- - Whenever to force the encryption of files (if the bucket is - not already configured for that). - Allowed options are empty string (use the bucket policy, default), - `AES256` and `aws:kms` - enum: - - AES256 - - aws:kms - type: string - maxParallel: - description: |- - Number of WAL files to be either archived in parallel (when the - PostgreSQL instance is archiving to a backup object store) or - restored in parallel (when a PostgreSQL standby is fetching WAL - files from a recovery object store). If not specified, WAL files - will be processed one at a time. It accepts a positive integer as a - value - with 1 being the minimum accepted value. - minimum: 1 - type: integer - restoreAdditionalCommandArgs: - description: |- - Additional arguments that can be appended to the 'barman-cloud-wal-restore' - command-line invocation. These arguments provide flexibility to customize - the WAL restore process further, according to specific requirements or configurations. - - Example: - In a scenario where specialized backup options are required, such as setting - a specific timeout or defining custom behavior, users can use this field - to specify additional command arguments. - - Note: - It's essential to ensure that the provided arguments are valid and supported - by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended - behavior during execution. - items: - type: string - type: array - type: object - required: - - destinationPath - type: object - connectionParameters: - additionalProperties: - type: string - description: The list of connection parameters, such as dbname, - host, username, etc - type: object - name: - description: The server name, required - type: string - password: - description: |- - The reference to the password to be used to connect to the server. - If a password is provided, CloudNativePG creates a PostgreSQL - passfile at `/controller/external/NAME/pass` (where "NAME" is the - cluster's name). This passfile is automatically referenced in the - connection string when establishing a connection to the remote - PostgreSQL server from the current PostgreSQL `Cluster`. This ensures - secure and efficient password management for external clusters. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - plugin: - description: |- - The configuration of the plugin that is taking care - of WAL archiving and backups for this external cluster - properties: - enabled: - default: true - description: Enabled is true if this plugin will be used - type: boolean - isWALArchiver: - default: false - description: |- - Only one plugin can be declared as WALArchiver. - Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. - type: boolean - name: - description: Name is the plugin name - type: string - parameters: - additionalProperties: - type: string - description: Parameters is the configuration of the plugin - type: object - required: - - name - type: object - sslCert: - description: |- - The reference to an SSL certificate to be used to connect to this - instance - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - sslKey: - description: |- - The reference to an SSL private key to be used to connect to this - instance - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - sslRootCert: - description: |- - The reference to an SSL CA public key to be used to connect to this - instance - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - required: - - name - type: object - type: array - failoverDelay: - default: 0 - description: |- - The amount of time (in seconds) to wait before triggering a failover - after the primary PostgreSQL instance in the cluster was detected - to be unhealthy - format: int32 - type: integer - imageCatalogRef: - description: Defines the major PostgreSQL version we want to use within - an ImageCatalog - properties: - apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - major: - description: The major version of PostgreSQL we want to use from - the ImageCatalog - type: integer - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - major - - name - type: object - x-kubernetes-map-type: atomic - x-kubernetes-validations: - - message: Only image catalogs are supported - rule: self.kind == 'ImageCatalog' || self.kind == 'ClusterImageCatalog' - - message: Only image catalogs are supported - rule: self.apiGroup == 'postgresql.cnpg.io' - imageName: - description: |- - Name of the container image, supporting both tags (`:`) - and digests for deterministic and repeatable deployments - (`:@sha256:`) - type: string - imagePullPolicy: - description: |- - Image pull policy. - One of `Always`, `Never` or `IfNotPresent`. - If not defined, it defaults to `IfNotPresent`. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/containers/images#updating-images - type: string - imagePullSecrets: - description: The list of pull secrets to be used to pull the images - items: - description: |- - LocalObjectReference contains enough information to let you locate a - local object with a known type inside the same namespace - properties: - name: - description: Name of the referent. - type: string - required: - - name - type: object - type: array - inheritedMetadata: - description: Metadata that will be inherited by all objects related - to the Cluster - properties: - annotations: - additionalProperties: - type: string - type: object - labels: - additionalProperties: - type: string - type: object - type: object - instances: - default: 1 - description: Number of instances required in the cluster - minimum: 1 - type: integer - livenessProbeTimeout: - description: |- - LivenessProbeTimeout is the time (in seconds) that is allowed for a PostgreSQL instance - to successfully respond to the liveness probe (default 30). - The Liveness probe failure threshold is derived from this value using the formula: - ceiling(livenessProbe / 10). - format: int32 - type: integer - logLevel: - default: info - description: 'The instances'' log level, one of the following values: - error, warning, info (default), debug, trace' - enum: - - error - - warning - - info - - debug - - trace - type: string - managed: - description: The configuration that is used by the portions of PostgreSQL - that are managed by the instance manager - properties: - roles: - description: Database roles managed by the `Cluster` - items: - description: |- - RoleConfiguration is the representation, in Kubernetes, of a PostgreSQL role - with the additional field Ensure specifying whether to ensure the presence or - absence of the role in the database - - The defaults of the CREATE ROLE command are applied - Reference: https://www.postgresql.org/docs/current/sql-createrole.html - properties: - bypassrls: - description: |- - Whether a role bypasses every row-level security (RLS) policy. - Default is `false`. - type: boolean - comment: - description: Description of the role - type: string - connectionLimit: - default: -1 - description: |- - If the role can log in, this specifies how many concurrent - connections the role can make. `-1` (the default) means no limit. - format: int64 - type: integer - createdb: - description: |- - When set to `true`, the role being defined will be allowed to create - new databases. Specifying `false` (default) will deny a role the - ability to create databases. - type: boolean - createrole: - description: |- - Whether the role will be permitted to create, alter, drop, comment - on, change the security label for, and grant or revoke membership in - other roles. Default is `false`. - type: boolean - disablePassword: - description: DisablePassword indicates that a role's password - should be set to NULL in Postgres - type: boolean - ensure: - default: present - description: Ensure the role is `present` or `absent` - - defaults to "present" - enum: - - present - - absent - type: string - inRoles: - description: |- - List of one or more existing roles to which this role will be - immediately added as a new member. Default empty. - items: - type: string - type: array - inherit: - default: true - description: |- - Whether a role "inherits" the privileges of roles it is a member of. - Defaults is `true`. - type: boolean - login: - description: |- - Whether the role is allowed to log in. A role having the `login` - attribute can be thought of as a user. Roles without this attribute - are useful for managing database privileges, but are not users in - the usual sense of the word. Default is `false`. - type: boolean - name: - description: Name of the role - type: string - passwordSecret: - description: |- - Secret containing the password of the role (if present) - If null, the password will be ignored unless DisablePassword is set - properties: - name: - description: Name of the referent. - type: string - required: - - name - type: object - replication: - description: |- - Whether a role is a replication role. A role must have this - attribute (or be a superuser) in order to be able to connect to the - server in replication mode (physical or logical replication) and in - order to be able to create or drop replication slots. A role having - the `replication` attribute is a very highly privileged role, and - should only be used on roles actually used for replication. Default - is `false`. - type: boolean - superuser: - description: |- - Whether the role is a `superuser` who can override all access - restrictions within the database - superuser status is dangerous and - should be used only when really needed. You must yourself be a - superuser to create a new superuser. Defaults is `false`. - type: boolean - validUntil: - description: |- - Date and time after which the role's password is no longer valid. - When omitted, the password will never expire (default). - format: date-time - type: string - required: - - name - type: object - type: array - services: - description: Services roles managed by the `Cluster` - properties: - additional: - description: Additional is a list of additional managed services - specified by the user. - items: - description: |- - ManagedService represents a specific service managed by the cluster. - It includes the type of service and its associated template specification. - properties: - selectorType: - description: |- - SelectorType specifies the type of selectors that the service will have. - Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services. - enum: - - rw - - r - - ro - type: string - serviceTemplate: - description: ServiceTemplate is the template specification - for the service. - properties: - metadata: - description: |- - Standard object's metadata. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations is an unstructured key value map stored with a resource that may be - set by external tools to store and retrieve arbitrary metadata. They are not - queryable and should be preserved when modifying objects. - More info: http://kubernetes.io/docs/user-guide/annotations - type: object - labels: - additionalProperties: - type: string - description: |- - Map of string keys and values that can be used to organize and categorize - (scope and select) objects. May match selectors of replication controllers - and services. - More info: http://kubernetes.io/docs/user-guide/labels - type: object - name: - description: The name of the resource. Only - supported for certain types - type: string - type: object - spec: - description: |- - Specification of the desired behavior of the service. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - properties: - allocateLoadBalancerNodePorts: - description: |- - allocateLoadBalancerNodePorts defines if NodePorts will be automatically - allocated for services with type LoadBalancer. Default is "true". It - may be set to "false" if the cluster load-balancer does not rely on - NodePorts. If the caller requests specific NodePorts (by specifying a - value), those requests will be respected, regardless of this field. - This field may only be set for services with type LoadBalancer and will - be cleared if the type is changed to any other type. - type: boolean - clusterIP: - description: |- - clusterIP is the IP address of the service and is usually assigned - randomly. If an address is specified manually, is in-range (as per - system configuration), and is not in use, it will be allocated to the - service; otherwise creation of the service will fail. This field may not - be changed through updates unless the type field is also being changed - to ExternalName (which requires this field to be blank) or the type - field is being changed from ExternalName (in which case this field may - optionally be specified, as describe above). Valid values are "None", - empty string (""), or a valid IP address. Setting this to "None" makes a - "headless service" (no virtual IP), which is useful when direct endpoint - connections are preferred and proxying is not required. Only applies to - types ClusterIP, NodePort, and LoadBalancer. If this field is specified - when creating a Service of type ExternalName, creation will fail. This - field will be wiped when updating a Service to type ExternalName. - More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - type: string - clusterIPs: - description: |- - ClusterIPs is a list of IP addresses assigned to this service, and are - usually assigned randomly. If an address is specified manually, is - in-range (as per system configuration), and is not in use, it will be - allocated to the service; otherwise creation of the service will fail. - This field may not be changed through updates unless the type field is - also being changed to ExternalName (which requires this field to be - empty) or the type field is being changed from ExternalName (in which - case this field may optionally be specified, as describe above). Valid - values are "None", empty string (""), or a valid IP address. Setting - this to "None" makes a "headless service" (no virtual IP), which is - useful when direct endpoint connections are preferred and proxying is - not required. Only applies to types ClusterIP, NodePort, and - LoadBalancer. If this field is specified when creating a Service of type - ExternalName, creation will fail. This field will be wiped when updating - a Service to type ExternalName. If this field is not specified, it will - be initialized from the clusterIP field. If this field is specified, - clients must ensure that clusterIPs[0] and clusterIP have the same - value. - - This field may hold a maximum of two entries (dual-stack IPs, in either order). - These IPs must correspond to the values of the ipFamilies field. Both - clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. - More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - items: - type: string - type: array - x-kubernetes-list-type: atomic - externalIPs: - description: |- - externalIPs is a list of IP addresses for which nodes in the cluster - will also accept traffic for this service. These IPs are not managed by - Kubernetes. The user is responsible for ensuring that traffic arrives - at a node with this IP. A common example is external load-balancers - that are not part of the Kubernetes system. - items: - type: string - type: array - x-kubernetes-list-type: atomic - externalName: - description: |- - externalName is the external reference that discovery mechanisms will - return as an alias for this service (e.g. a DNS CNAME record). No - proxying will be involved. Must be a lowercase RFC-1123 hostname - (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". - type: string - externalTrafficPolicy: - description: |- - externalTrafficPolicy describes how nodes distribute service traffic they - receive on one of the Service's "externally-facing" addresses (NodePorts, - ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure - the service in a way that assumes that external load balancers will take care - of balancing the service traffic between nodes, and so each node will deliver - traffic only to the node-local endpoints of the service, without masquerading - the client source IP. (Traffic mistakenly sent to a node with no endpoints will - be dropped.) The default value, "Cluster", uses the standard behavior of - routing to all endpoints evenly (possibly modified by topology and other - features). Note that traffic sent to an External IP or LoadBalancer IP from - within the cluster will always get "Cluster" semantics, but clients sending to - a NodePort from within the cluster may need to take traffic policy into account - when picking a node. - type: string - healthCheckNodePort: - description: |- - healthCheckNodePort specifies the healthcheck nodePort for the service. - This only applies when type is set to LoadBalancer and - externalTrafficPolicy is set to Local. If a value is specified, is - in-range, and is not in use, it will be used. If not specified, a value - will be automatically allocated. External systems (e.g. load-balancers) - can use this port to determine if a given node holds endpoints for this - service or not. If this field is specified when creating a Service - which does not need it, creation will fail. This field will be wiped - when updating a Service to no longer need it (e.g. changing type). - This field cannot be updated once set. - format: int32 - type: integer - internalTrafficPolicy: - description: |- - InternalTrafficPolicy describes how nodes distribute service traffic they - receive on the ClusterIP. If set to "Local", the proxy will assume that pods - only want to talk to endpoints of the service on the same node as the pod, - dropping the traffic if there are no local endpoints. The default value, - "Cluster", uses the standard behavior of routing to all endpoints evenly - (possibly modified by topology and other features). - type: string - ipFamilies: - description: |- - IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this - service. This field is usually assigned automatically based on cluster - configuration and the ipFamilyPolicy field. If this field is specified - manually, the requested family is available in the cluster, - and ipFamilyPolicy allows it, it will be used; otherwise creation of - the service will fail. This field is conditionally mutable: it allows - for adding or removing a secondary IP family, but it does not allow - changing the primary IP family of the Service. Valid values are "IPv4" - and "IPv6". This field only applies to Services of types ClusterIP, - NodePort, and LoadBalancer, and does apply to "headless" services. - This field will be wiped when updating a Service to type ExternalName. - - This field may hold a maximum of two entries (dual-stack families, in - either order). These families must correspond to the values of the - clusterIPs field, if specified. Both clusterIPs and ipFamilies are - governed by the ipFamilyPolicy field. - items: - description: |- - IPFamily represents the IP Family (IPv4 or IPv6). This type is used - to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). - type: string - type: array - x-kubernetes-list-type: atomic - ipFamilyPolicy: - description: |- - IPFamilyPolicy represents the dual-stack-ness requested or required by - this Service. If there is no value provided, then this field will be set - to SingleStack. Services can be "SingleStack" (a single IP family), - "PreferDualStack" (two IP families on dual-stack configured clusters or - a single IP family on single-stack clusters), or "RequireDualStack" - (two IP families on dual-stack configured clusters, otherwise fail). The - ipFamilies and clusterIPs fields depend on the value of this field. This - field will be wiped when updating a service to type ExternalName. - type: string - loadBalancerClass: - description: |- - loadBalancerClass is the class of the load balancer implementation this Service belongs to. - If specified, the value of this field must be a label-style identifier, with an optional prefix, - e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. - This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load - balancer implementation is used, today this is typically done through the cloud provider integration, - but should apply for any default implementation. If set, it is assumed that a load balancer - implementation is watching for Services with a matching class. Any default load balancer - implementation (e.g. cloud providers) should ignore Services that set this field. - This field can only be set when creating or updating a Service to type 'LoadBalancer'. - Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. - type: string - loadBalancerIP: - description: |- - Only applies to Service Type: LoadBalancer. - This feature depends on whether the underlying cloud-provider supports specifying - the loadBalancerIP when a load balancer is created. - This field will be ignored if the cloud-provider does not support the feature. - Deprecated: This field was under-specified and its meaning varies across implementations. - Using it is non-portable and it may not support dual-stack. - Users are encouraged to use implementation-specific annotations when available. - type: string - loadBalancerSourceRanges: - description: |- - If specified and supported by the platform, this will restrict traffic through the cloud-provider - load-balancer will be restricted to the specified client IPs. This field will be ignored if the - cloud-provider does not support the feature." - More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ - items: - type: string - type: array - x-kubernetes-list-type: atomic - ports: - description: |- - The list of ports that are exposed by this service. - More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - items: - description: ServicePort contains information - on service's port. - properties: - appProtocol: - description: |- - The application protocol for this port. - This is used as a hint for implementations to offer richer behavior for protocols that they understand. - This field follows standard Kubernetes label syntax. - Valid values are either: - - * Un-prefixed protocol names - reserved for IANA standard service names (as per - RFC-6335 and https://www.iana.org/assignments/service-names). - - * Kubernetes-defined prefixed names: - * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- - * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 - * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 - - * Other protocols should use implementation-defined prefixed names such as - mycompany.com/my-custom-protocol. - type: string - name: - description: |- - The name of this port within the service. This must be a DNS_LABEL. - All ports within a ServiceSpec must have unique names. When considering - the endpoints for a Service, this must match the 'name' field in the - EndpointPort. - Optional if only one ServicePort is defined on this service. - type: string - nodePort: - description: |- - The port on each node on which this service is exposed when type is - NodePort or LoadBalancer. Usually assigned by the system. If a value is - specified, in-range, and not in use it will be used, otherwise the - operation will fail. If not specified, a port will be allocated if this - Service requires one. If this field is specified when creating a - Service which does not need it, creation will fail. This field will be - wiped when updating a Service to no longer need it (e.g. changing type - from NodePort to ClusterIP). - More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - format: int32 - type: integer - port: - description: The port that will be exposed - by this service. - format: int32 - type: integer - protocol: - default: TCP - description: |- - The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". - Default is TCP. - type: string - targetPort: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the pods targeted by the service. - Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - If this is a string, it will be looked up as a named port in the - target Pod's container ports. If this is not specified, the value - of the 'port' field is used (an identity map). - This field is ignored for services with clusterIP=None, and should be - omitted or set equal to the 'port' field. - More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service - x-kubernetes-int-or-string: true - required: - - port - type: object - type: array - x-kubernetes-list-map-keys: - - port - - protocol - x-kubernetes-list-type: map - publishNotReadyAddresses: - description: |- - publishNotReadyAddresses indicates that any agent which deals with endpoints for this - Service should disregard any indications of ready/not-ready. - The primary use case for setting this field is for a StatefulSet's Headless Service to - propagate SRV DNS records for its Pods for the purpose of peer discovery. - The Kubernetes controllers that generate Endpoints and EndpointSlice resources for - Services interpret this to mean that all endpoints are considered "ready" even if the - Pods themselves are not. Agents which consume only Kubernetes generated endpoints - through the Endpoints or EndpointSlice resources can safely assume this behavior. - type: boolean - selector: - additionalProperties: - type: string - description: |- - Route service traffic to pods with label keys and values matching this - selector. If empty or not present, the service is assumed to have an - external process managing its endpoints, which Kubernetes will not - modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. - Ignored if type is ExternalName. - More info: https://kubernetes.io/docs/concepts/services-networking/service/ - type: object - x-kubernetes-map-type: atomic - sessionAffinity: - description: |- - Supports "ClientIP" and "None". Used to maintain session affinity. - Enable client IP based session affinity. - Must be ClientIP or None. - Defaults to None. - More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - type: string - sessionAffinityConfig: - description: sessionAffinityConfig contains - the configurations of session affinity. - properties: - clientIP: - description: clientIP contains the configurations - of Client IP based session affinity. - properties: - timeoutSeconds: - description: |- - timeoutSeconds specifies the seconds of ClientIP type session sticky time. - The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". - Default value is 10800(for 3 hours). - format: int32 - type: integer - type: object - type: object - trafficDistribution: - description: |- - TrafficDistribution offers a way to express preferences for how traffic is - distributed to Service endpoints. Implementations can use this field as a - hint, but are not required to guarantee strict adherence. If the field is - not set, the implementation will apply its default routing strategy. If set - to "PreferClose", implementations should prioritize endpoints that are - topologically close (e.g., same zone). - This is a beta field and requires enabling ServiceTrafficDistribution feature. - type: string - type: - description: |- - type determines how the Service is exposed. Defaults to ClusterIP. Valid - options are ExternalName, ClusterIP, NodePort, and LoadBalancer. - "ClusterIP" allocates a cluster-internal IP address for load-balancing - to endpoints. Endpoints are determined by the selector or if that is not - specified, by manual construction of an Endpoints object or - EndpointSlice objects. If clusterIP is "None", no virtual IP is - allocated and the endpoints are published as a set of endpoints rather - than a virtual IP. - "NodePort" builds on ClusterIP and allocates a port on every node which - routes to the same endpoints as the clusterIP. - "LoadBalancer" builds on NodePort and creates an external load-balancer - (if supported in the current cloud) which routes to the same endpoints - as the clusterIP. - "ExternalName" aliases this service to the specified externalName. - Several other fields do not apply to ExternalName services. - More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types - type: string - type: object - type: object - updateStrategy: - default: patch - description: UpdateStrategy describes how the service - differences should be reconciled - enum: - - patch - - replace - type: string - required: - - selectorType - - serviceTemplate - type: object - type: array - disabledDefaultServices: - description: |- - DisabledDefaultServices is a list of service types that are disabled by default. - Valid values are "r", and "ro", representing read, and read-only services. - items: - description: |- - ServiceSelectorType describes a valid value for generating the service selectors. - It indicates which type of service the selector applies to, such as read-write, read, or read-only - enum: - - rw - - r - - ro - type: string - type: array - type: object - type: object - maxSyncReplicas: - default: 0 - description: |- - The target value for the synchronous replication quorum, that can be - decreased if the number of ready standbys is lower than this. - Undefined or 0 disable synchronous replication. - minimum: 0 - type: integer - minSyncReplicas: - default: 0 - description: |- - Minimum number of instances required in synchronous replication with the - primary. Undefined or 0 allow writes to complete when no standby is - available. - minimum: 0 - type: integer - monitoring: - description: The configuration of the monitoring infrastructure of - this cluster - properties: - customQueriesConfigMap: - description: The list of config maps containing the custom queries - items: - description: |- - ConfigMapKeySelector contains enough information to let you locate - the key of a ConfigMap - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - type: array - customQueriesSecret: - description: The list of secrets containing the custom queries - items: - description: |- - SecretKeySelector contains enough information to let you locate - the key of a Secret - properties: - key: - description: The key to select - type: string - name: - description: Name of the referent. - type: string - required: - - key - - name - type: object - type: array - disableDefaultQueries: - default: false - description: |- - Whether the default queries should be injected. - Set it to `true` if you don't want to inject default queries into the cluster. - Default: false. - type: boolean - enablePodMonitor: - default: false - description: Enable or disable the `PodMonitor` - type: boolean - podMonitorMetricRelabelings: - description: The list of metric relabelings for the `PodMonitor`. - Applied to samples before ingestion. - items: - description: |- - RelabelConfig allows dynamic rewriting of the label set for targets, alerts, - scraped samples and remote write samples. - - More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config - properties: - action: - default: replace - description: |- - Action to perform based on the regex matching. - - `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. - `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. - - Default: "Replace" - enum: - - replace - - Replace - - keep - - Keep - - drop - - Drop - - hashmod - - HashMod - - labelmap - - LabelMap - - labeldrop - - LabelDrop - - labelkeep - - LabelKeep - - lowercase - - Lowercase - - uppercase - - Uppercase - - keepequal - - KeepEqual - - dropequal - - DropEqual - type: string - modulus: - description: |- - Modulus to take of the hash of the source label values. - - Only applicable when the action is `HashMod`. - format: int64 - type: integer - regex: - description: Regular expression against which the extracted - value is matched. - type: string - replacement: - description: |- - Replacement value against which a Replace action is performed if the - regular expression matches. - - Regex capture groups are available. - type: string - separator: - description: Separator is the string between concatenated - SourceLabels. - type: string - sourceLabels: - description: |- - The source labels select values from existing labels. Their content is - concatenated using the configured Separator and matched against the - configured regular expression. - items: - description: |- - LabelName is a valid Prometheus label name which may only contain ASCII - letters, numbers, as well as underscores. - pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ - type: string - type: array - targetLabel: - description: |- - Label to which the resulting string is written in a replacement. - - It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, - `KeepEqual` and `DropEqual` actions. - - Regex capture groups are available. - type: string - type: object - type: array - podMonitorRelabelings: - description: The list of relabelings for the `PodMonitor`. Applied - to samples before scraping. - items: - description: |- - RelabelConfig allows dynamic rewriting of the label set for targets, alerts, - scraped samples and remote write samples. - - More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config - properties: - action: - default: replace - description: |- - Action to perform based on the regex matching. - - `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. - `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. - - Default: "Replace" - enum: - - replace - - Replace - - keep - - Keep - - drop - - Drop - - hashmod - - HashMod - - labelmap - - LabelMap - - labeldrop - - LabelDrop - - labelkeep - - LabelKeep - - lowercase - - Lowercase - - uppercase - - Uppercase - - keepequal - - KeepEqual - - dropequal - - DropEqual - type: string - modulus: - description: |- - Modulus to take of the hash of the source label values. - - Only applicable when the action is `HashMod`. - format: int64 - type: integer - regex: - description: Regular expression against which the extracted - value is matched. - type: string - replacement: - description: |- - Replacement value against which a Replace action is performed if the - regular expression matches. - - Regex capture groups are available. - type: string - separator: - description: Separator is the string between concatenated - SourceLabels. - type: string - sourceLabels: - description: |- - The source labels select values from existing labels. Their content is - concatenated using the configured Separator and matched against the - configured regular expression. - items: - description: |- - LabelName is a valid Prometheus label name which may only contain ASCII - letters, numbers, as well as underscores. - pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ - type: string - type: array - targetLabel: - description: |- - Label to which the resulting string is written in a replacement. - - It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, - `KeepEqual` and `DropEqual` actions. - - Regex capture groups are available. - type: string - type: object - type: array - tls: - description: |- - Configure TLS communication for the metrics endpoint. - Changing tls.enabled option will force a rollout of all instances. - properties: - enabled: - default: false - description: |- - Enable TLS for the monitoring endpoint. - Changing this option will force a rollout of all instances. - type: boolean - type: object - type: object - nodeMaintenanceWindow: - description: Define a maintenance window for the Kubernetes nodes - properties: - inProgress: - default: false - description: Is there a node maintenance activity in progress? - type: boolean - reusePVC: - default: true - description: |- - Reuse the existing PVC (wait for the node to come - up again) or not (recreate it elsewhere - when `instances` >1) - type: boolean - type: object - plugins: - description: |- - The plugins configuration, containing - any plugin to be loaded with the corresponding configuration - items: - description: |- - PluginConfiguration specifies a plugin that need to be loaded for this - cluster to be reconciled - properties: - enabled: - default: true - description: Enabled is true if this plugin will be used - type: boolean - isWALArchiver: - default: false - description: |- - Only one plugin can be declared as WALArchiver. - Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. - type: boolean - name: - description: Name is the plugin name - type: string - parameters: - additionalProperties: - type: string - description: Parameters is the configuration of the plugin - type: object - required: - - name - type: object - type: array - postgresGID: - default: 26 - description: The GID of the `postgres` user inside the image, defaults - to `26` - format: int64 - type: integer - postgresUID: - default: 26 - description: The UID of the `postgres` user inside the image, defaults - to `26` - format: int64 - type: integer - postgresql: - description: Configuration of the PostgreSQL server - properties: - enableAlterSystem: - description: |- - If this parameter is true, the user will be able to invoke `ALTER SYSTEM` - on this CloudNativePG Cluster. - This should only be used for debugging and troubleshooting. - Defaults to false. - type: boolean - ldap: - description: Options to specify LDAP configuration - properties: - bindAsAuth: - description: Bind as authentication configuration - properties: - prefix: - description: Prefix for the bind authentication option - type: string - suffix: - description: Suffix for the bind authentication option - type: string - type: object - bindSearchAuth: - description: Bind+Search authentication configuration - properties: - baseDN: - description: Root DN to begin the user search - type: string - bindDN: - description: DN of the user to bind to the directory - type: string - bindPassword: - description: Secret with the password for the user to - bind to the directory - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - searchAttribute: - description: Attribute to match against the username - type: string - searchFilter: - description: Search filter to use when doing the search+bind - authentication - type: string - type: object - port: - description: LDAP server port - type: integer - scheme: - description: LDAP schema to be used, possible options are - `ldap` and `ldaps` - enum: - - ldap - - ldaps - type: string - server: - description: LDAP hostname or IP address - type: string - tls: - description: Set to 'true' to enable LDAP over TLS. 'false' - is default - type: boolean - type: object - parameters: - additionalProperties: - type: string - description: PostgreSQL configuration options (postgresql.conf) - type: object - pg_hba: - description: |- - PostgreSQL Host Based Authentication rules (lines to be appended - to the pg_hba.conf file) - items: - type: string - type: array - pg_ident: - description: |- - PostgreSQL User Name Maps rules (lines to be appended - to the pg_ident.conf file) - items: - type: string - type: array - promotionTimeout: - description: |- - Specifies the maximum number of seconds to wait when promoting an instance to primary. - Default value is 40000000, greater than one year in seconds, - big enough to simulate an infinite timeout - format: int32 - type: integer - shared_preload_libraries: - description: Lists of shared preload libraries to add to the default - ones - items: - type: string - type: array - syncReplicaElectionConstraint: - description: |- - Requirements to be met by sync replicas. This will affect how the "synchronous_standby_names" parameter will be - set up. - properties: - enabled: - description: This flag enables the constraints for sync replicas - type: boolean - nodeLabelsAntiAffinity: - description: A list of node labels values to extract and compare - to evaluate if the pods reside in the same topology or not - items: - type: string - type: array - required: - - enabled - type: object - synchronous: - description: Configuration of the PostgreSQL synchronous replication - feature - properties: - dataDurability: - default: required - description: |- - If set to "required", data durability is strictly enforced. Write operations - with synchronous commit settings (`on`, `remote_write`, or `remote_apply`) will - block if there are insufficient healthy replicas, ensuring data persistence. - If set to "preferred", data durability is maintained when healthy replicas - are available, but the required number of instances will adjust dynamically - if replicas become unavailable. This setting relaxes strict durability enforcement - to allow for operational continuity. This setting is only applicable if both - `standbyNamesPre` and `standbyNamesPost` are unset (empty). - enum: - - required - - preferred - type: string - maxStandbyNamesFromCluster: - description: |- - Specifies the maximum number of local cluster pods that can be - automatically included in the `synchronous_standby_names` option in - PostgreSQL. - type: integer - method: - description: |- - Method to select synchronous replication standbys from the listed - servers, accepting 'any' (quorum-based synchronous replication) or - 'first' (priority-based synchronous replication) as values. - enum: - - any - - first - type: string - number: - description: |- - Specifies the number of synchronous standby servers that - transactions must wait for responses from. - type: integer - x-kubernetes-validations: - - message: The number of synchronous replicas should be greater - than zero - rule: self > 0 - standbyNamesPost: - description: |- - A user-defined list of application names to be added to - `synchronous_standby_names` after local cluster pods (the order is - only useful for priority-based synchronous replication). - items: - type: string - type: array - standbyNamesPre: - description: |- - A user-defined list of application names to be added to - `synchronous_standby_names` before local cluster pods (the order is - only useful for priority-based synchronous replication). - items: - type: string - type: array - required: - - method - - number - type: object - x-kubernetes-validations: - - message: dataDurability set to 'preferred' requires empty 'standbyNamesPre' - and empty 'standbyNamesPost' - rule: self.dataDurability!='preferred' || ((!has(self.standbyNamesPre) - || self.standbyNamesPre.size()==0) && (!has(self.standbyNamesPost) - || self.standbyNamesPost.size()==0)) - type: object - primaryUpdateMethod: - default: restart - description: |- - Method to follow to upgrade the primary server during a rolling - update procedure, after all replicas have been successfully updated: - it can be with a switchover (`switchover`) or in-place (`restart` - default) - enum: - - switchover - - restart - type: string - primaryUpdateStrategy: - default: unsupervised - description: |- - Deployment strategy to follow to upgrade the primary server during a rolling - update procedure, after all replicas have been successfully updated: - it can be automated (`unsupervised` - default) or manual (`supervised`) - enum: - - unsupervised - - supervised - type: string - priorityClassName: - description: |- - Name of the priority class which will be used in every generated Pod, if the PriorityClass - specified does not exist, the pod will not be able to schedule. Please refer to - https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass - for more information - type: string - probes: - description: |- - The configuration of the probes to be injected - in the PostgreSQL Pods. - properties: - liveness: - description: The liveness probe configuration - properties: - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - readiness: - description: The readiness probe configuration - properties: - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - maximumLag: - anyOf: - - type: integer - - type: string - description: Lag limit. Used only for `streaming` strategy - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: - description: The probe strategy - enum: - - pg_isready - - streaming - - query - type: string - type: object - startup: - description: The startup probe configuration - properties: - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - maximumLag: - anyOf: - - type: integer - - type: string - description: Lag limit. Used only for `streaming` strategy - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: - description: The probe strategy - enum: - - pg_isready - - streaming - - query - type: string - type: object - type: object - projectedVolumeTemplate: - description: |- - Template to be used to define projected volumes, projected volumes will be mounted - under `/projected` base folder - properties: - defaultMode: - description: |- - defaultMode are the mode bits used to set permissions on created files by default. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - Directories within the path are not affected by this setting. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - sources: - description: |- - sources is the list of volume projections. Each entry in this list - handles one source. - items: - description: |- - Projection that may be projected along with other supported volume types. - Exactly one of these fields must be set. - properties: - clusterTrustBundle: - description: |- - ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field - of ClusterTrustBundle objects in an auto-updating file. - - Alpha, gated by the ClusterTrustBundleProjection feature gate. - - ClusterTrustBundle objects can either be selected by name, or by the - combination of signer name and a label selector. - - Kubelet performs aggressive normalization of the PEM contents written - into the pod filesystem. Esoteric PEM features such as inter-block - comments and block headers are stripped. Certificates are deduplicated. - The ordering of certificates within the file is arbitrary, and Kubelet - may change the order over time. - properties: - labelSelector: - description: |- - Select all ClusterTrustBundles that match this label selector. Only has - effect if signerName is set. Mutually-exclusive with name. If unset, - interpreted as "match nothing". If set but empty, interpreted as "match - everything". - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - name: - description: |- - Select a single ClusterTrustBundle by object name. Mutually-exclusive - with signerName and labelSelector. - type: string - optional: - description: |- - If true, don't block pod startup if the referenced ClusterTrustBundle(s) - aren't available. If using name, then the named ClusterTrustBundle is - allowed not to exist. If using signerName, then the combination of - signerName and labelSelector is allowed to match zero - ClusterTrustBundles. - type: boolean - path: - description: Relative path from the volume root to write - the bundle. - type: string - signerName: - description: |- - Select all ClusterTrustBundles that match this signer name. - Mutually-exclusive with name. The contents of all selected - ClusterTrustBundles will be unified and deduplicated. - type: string - required: - - path - type: object - configMap: - description: configMap information about the configMap data - to project - properties: - items: - description: |- - items if unspecified, each key-value pair in the Data field of the referenced - ConfigMap will be projected into the volume as a file whose name is the - key and content is the value. If specified, the listed keys will be - projected into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the ConfigMap, - the volume setup will error unless it is marked optional. Paths must be - relative and may not contain the '..' path or start with '..'. - items: - description: Maps a string key to a path within a - volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - x-kubernetes-list-type: atomic - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: optional specify whether the ConfigMap - or its keys must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - downwardAPI: - description: downwardAPI information about the downwardAPI - data to project - properties: - items: - description: Items is a list of DownwardAPIVolume file - items: - description: DownwardAPIVolumeFile represents information - to create the file containing the pod field - properties: - fieldRef: - description: 'Required: Selects a field of the - pod: only annotations, labels, name, namespace - and uid are supported.' - properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in - the specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - mode: - description: |- - Optional: mode bits used to set permissions on this file, must be an octal value - between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: 'Required: Path is the relative - path name of the file to be created. Must not - be absolute or contain the ''..'' path. Must - be utf-8 encoded. The first item of the relative - path must not start with ''..''' - type: string - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - properties: - containerName: - description: 'Container name: required for - volumes, optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format of - the exposed resources, defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - required: - - path - type: object - type: array - x-kubernetes-list-type: atomic - type: object - secret: - description: secret information about the secret data to - project - properties: - items: - description: |- - items if unspecified, each key-value pair in the Data field of the referenced - Secret will be projected into the volume as a file whose name is the - key and content is the value. If specified, the listed keys will be - projected into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the Secret, - the volume setup will error unless it is marked optional. Paths must be - relative and may not contain the '..' path or start with '..'. - items: - description: Maps a string key to a path within a - volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - x-kubernetes-list-type: atomic - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: optional field specify whether the Secret - or its key must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - serviceAccountToken: - description: serviceAccountToken is information about the - serviceAccountToken data to project - properties: - audience: - description: |- - audience is the intended audience of the token. A recipient of a token - must identify itself with an identifier specified in the audience of the - token, and otherwise should reject the token. The audience defaults to the - identifier of the apiserver. - type: string - expirationSeconds: - description: |- - expirationSeconds is the requested duration of validity of the service - account token. As the token approaches expiration, the kubelet volume - plugin will proactively rotate the service account token. The kubelet will - start trying to rotate the token if the token is older than 80 percent of - its time to live or if the token is older than 24 hours.Defaults to 1 hour - and must be at least 10 minutes. - format: int64 - type: integer - path: - description: |- - path is the path relative to the mount point of the file to project the - token into. - type: string - required: - - path - type: object - type: object - type: array - x-kubernetes-list-type: atomic - type: object - replica: - description: Replica cluster configuration - properties: - enabled: - description: |- - If replica mode is enabled, this cluster will be a replica of an - existing cluster. Replica cluster can be created from a recovery - object store or via streaming through pg_basebackup. - Refer to the Replica clusters page of the documentation for more information. - type: boolean - minApplyDelay: - description: |- - When replica mode is enabled, this parameter allows you to replay - transactions only when the system time is at least the configured - time past the commit time. This provides an opportunity to correct - data loss errors. Note that when this parameter is set, a promotion - token cannot be used. - type: string - primary: - description: |- - Primary defines which Cluster is defined to be the primary in the distributed PostgreSQL cluster, based on the - topology specified in externalClusters - type: string - promotionToken: - description: |- - A demotion token generated by an external cluster used to - check if the promotion requirements are met. - type: string - self: - description: |- - Self defines the name of this cluster. It is used to determine if this is a primary - or a replica cluster, comparing it with `primary` - type: string - source: - description: The name of the external cluster which is the replication - origin - minLength: 1 - type: string - required: - - source - type: object - replicationSlots: - default: - highAvailability: - enabled: true - description: Replication slots management configuration - properties: - highAvailability: - default: - enabled: true - description: Replication slots for high availability configuration - properties: - enabled: - default: true - description: |- - If enabled (default), the operator will automatically manage replication slots - on the primary instance and use them in streaming replication - connections with all the standby instances that are part of the HA - cluster. If disabled, the operator will not take advantage - of replication slots in streaming connections with the replicas. - This feature also controls replication slots in replica cluster, - from the designated primary to its cascading replicas. - type: boolean - slotPrefix: - default: _cnpg_ - description: |- - Prefix for replication slots managed by the operator for HA. - It may only contain lower case letters, numbers, and the underscore character. - This can only be set at creation time. By default set to `_cnpg_`. - pattern: ^[0-9a-z_]*$ - type: string - type: object - synchronizeReplicas: - description: Configures the synchronization of the user defined - physical replication slots - properties: - enabled: - default: true - description: When set to true, every replication slot that - is on the primary is synchronized on each standby - type: boolean - excludePatterns: - description: List of regular expression patterns to match - the names of replication slots to be excluded (by default - empty) - items: - type: string - type: array - required: - - enabled - type: object - updateInterval: - default: 30 - description: |- - Standby will update the status of the local replication slots - every `updateInterval` seconds (default 30). - minimum: 1 - type: integer - type: object - resources: - description: |- - Resources requirements of every generated Pod. Please refer to - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - for more information. - properties: - claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - This field is immutable. It can only be set for containers. - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. - type: string - request: - description: |- - Request is the name chosen for a request in the referenced claim. - If empty, everything from the claim is made available, otherwise - only the result of this request. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - schedulerName: - description: |- - If specified, the pod will be dispatched by specified Kubernetes - scheduler. If not specified, the pod will be dispatched by the default - scheduler. More info: - https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/ - type: string - seccompProfile: - description: |- - The SeccompProfile applied to every Pod and Container. - Defaults to: `RuntimeDefault` - properties: - localhostProfile: - description: |- - localhostProfile indicates a profile defined in a file on the node should be used. - The profile must be preconfigured on the node to work. - Must be a descending path, relative to the kubelet's configured seccomp profile location. - Must be set if type is "Localhost". Must NOT be set for any other type. - type: string - type: - description: |- - type indicates which kind of seccomp profile will be applied. - Valid options are: - - Localhost - a profile defined in a file on the node should be used. - RuntimeDefault - the container runtime default profile should be used. - Unconfined - no profile should be applied. - type: string - required: - - type - type: object - serviceAccountTemplate: - description: Configure the generation of the service account - properties: - metadata: - description: |- - Metadata are the metadata to be used for the generated - service account - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations is an unstructured key value map stored with a resource that may be - set by external tools to store and retrieve arbitrary metadata. They are not - queryable and should be preserved when modifying objects. - More info: http://kubernetes.io/docs/user-guide/annotations - type: object - labels: - additionalProperties: - type: string - description: |- - Map of string keys and values that can be used to organize and categorize - (scope and select) objects. May match selectors of replication controllers - and services. - More info: http://kubernetes.io/docs/user-guide/labels - type: object - name: - description: The name of the resource. Only supported for - certain types - type: string - type: object - required: - - metadata - type: object - smartShutdownTimeout: - default: 180 - description: |- - The time in seconds that controls the window of time reserved for the smart shutdown of Postgres to complete. - Make sure you reserve enough time for the operator to request a fast shutdown of Postgres - (that is: `stopDelay` - `smartShutdownTimeout`). - format: int32 - type: integer - startDelay: - default: 3600 - description: |- - The time in seconds that is allowed for a PostgreSQL instance to - successfully start up (default 3600). - The startup probe failure threshold is derived from this value using the formula: - ceiling(startDelay / 10). - format: int32 - type: integer - stopDelay: - default: 1800 - description: |- - The time in seconds that is allowed for a PostgreSQL instance to - gracefully shutdown (default 1800) - format: int32 - type: integer - storage: - description: Configuration of the storage of the instances - properties: - pvcTemplate: - description: Template to be used to generate the Persistent Volume - Claim - properties: - accessModes: - description: |- - accessModes contains the desired access modes the volume should have. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 - items: - type: string - type: array - x-kubernetes-list-type: atomic - dataSource: - description: |- - dataSource field can be used to specify either: - * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) - If the provisioner or an external controller can support the specified data source, - it will create a new volume based on the contents of the specified data source. - When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, - and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. - If the namespace is specified, then dataSourceRef will not be copied to dataSource. - properties: - apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - dataSourceRef: - description: |- - dataSourceRef specifies the object from which to populate the volume with data, if a non-empty - volume is desired. This may be any object from a non-empty API group (non - core object) or a PersistentVolumeClaim object. - When this field is specified, volume binding will only succeed if the type of - the specified object matches some installed volume populator or dynamic - provisioner. - This field will replace the functionality of the dataSource field and as such - if both fields are non-empty, they must have the same value. For backwards - compatibility, when namespace isn't specified in dataSourceRef, - both fields (dataSource and dataSourceRef) will be set to the same - value automatically if one of them is empty and the other is non-empty. - When namespace is specified in dataSourceRef, - dataSource isn't set to the same value and must be empty. - There are three important differences between dataSource and dataSourceRef: - * While dataSource only allows two specific types of objects, dataSourceRef - allows any non-core object, as well as PersistentVolumeClaim objects. - * While dataSource ignores disallowed values (dropping them), dataSourceRef - preserves all values, and generates an error if a disallowed value is - specified. - * While dataSource only allows local objects, dataSourceRef allows objects - in any namespaces. - (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. - (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. - properties: - apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - namespace: - description: |- - Namespace is the namespace of resource being referenced - Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. - (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. - type: string - required: - - kind - - name - type: object - resources: - description: |- - resources represents the minimum resources the volume should have. - If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements - that are lower than previous value but must still be higher than capacity recorded in the - status field of the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - selector: - description: selector is a label query over volumes to consider - for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - storageClassName: - description: |- - storageClassName is the name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 - type: string - volumeAttributesClassName: - description: |- - volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. - If specified, the CSI driver will create or update the volume with the attributes defined - in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. - If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be - set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource - exists. - More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). - type: string - volumeMode: - description: |- - volumeMode defines what type of volume is required by the claim. - Value of Filesystem is implied when not included in claim spec. - type: string - volumeName: - description: volumeName is the binding reference to the PersistentVolume - backing this claim. - type: string - type: object - resizeInUseVolumes: - default: true - description: Resize existent PVCs, defaults to true - type: boolean - size: - description: |- - Size of the storage. Required if not already specified in the PVC template. - Changes to this field are automatically reapplied to the created PVCs. - Size cannot be decreased. - type: string - storageClass: - description: |- - StorageClass to use for PVCs. Applied after - evaluating the PVC template, if available. - If not specified, the generated PVCs will use the - default storage class - type: string - type: object - superuserSecret: - description: |- - The secret containing the superuser password. If not defined a new - secret will be created with a randomly generated password - properties: - name: - description: Name of the referent. - type: string - required: - - name - type: object - switchoverDelay: - default: 3600 - description: |- - The time in seconds that is allowed for a primary PostgreSQL instance - to gracefully shutdown during a switchover. - Default value is 3600 seconds (1 hour). - format: int32 - type: integer - tablespaces: - description: The tablespaces configuration - items: - description: |- - TablespaceConfiguration is the configuration of a tablespace, and includes - the storage specification for the tablespace - properties: - name: - description: The name of the tablespace - type: string - owner: - description: Owner is the PostgreSQL user owning the tablespace - properties: - name: - type: string - type: object - storage: - description: The storage configuration for the tablespace - properties: - pvcTemplate: - description: Template to be used to generate the Persistent - Volume Claim - properties: - accessModes: - description: |- - accessModes contains the desired access modes the volume should have. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 - items: - type: string - type: array - x-kubernetes-list-type: atomic - dataSource: - description: |- - dataSource field can be used to specify either: - * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) - If the provisioner or an external controller can support the specified data source, - it will create a new volume based on the contents of the specified data source. - When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, - and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. - If the namespace is specified, then dataSourceRef will not be copied to dataSource. - properties: - apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being - referenced - type: string - name: - description: Name is the name of resource being - referenced - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - dataSourceRef: - description: |- - dataSourceRef specifies the object from which to populate the volume with data, if a non-empty - volume is desired. This may be any object from a non-empty API group (non - core object) or a PersistentVolumeClaim object. - When this field is specified, volume binding will only succeed if the type of - the specified object matches some installed volume populator or dynamic - provisioner. - This field will replace the functionality of the dataSource field and as such - if both fields are non-empty, they must have the same value. For backwards - compatibility, when namespace isn't specified in dataSourceRef, - both fields (dataSource and dataSourceRef) will be set to the same - value automatically if one of them is empty and the other is non-empty. - When namespace is specified in dataSourceRef, - dataSource isn't set to the same value and must be empty. - There are three important differences between dataSource and dataSourceRef: - * While dataSource only allows two specific types of objects, dataSourceRef - allows any non-core object, as well as PersistentVolumeClaim objects. - * While dataSource ignores disallowed values (dropping them), dataSourceRef - preserves all values, and generates an error if a disallowed value is - specified. - * While dataSource only allows local objects, dataSourceRef allows objects - in any namespaces. - (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. - (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. - properties: - apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being - referenced - type: string - name: - description: Name is the name of resource being - referenced - type: string - namespace: - description: |- - Namespace is the namespace of resource being referenced - Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. - (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. - type: string - required: - - kind - - name - type: object - resources: - description: |- - resources represents the minimum resources the volume should have. - If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements - that are lower than previous value but must still be higher than capacity recorded in the - status field of the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - selector: - description: selector is a label query over volumes - to consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - storageClassName: - description: |- - storageClassName is the name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 - type: string - volumeAttributesClassName: - description: |- - volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. - If specified, the CSI driver will create or update the volume with the attributes defined - in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. - If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be - set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource - exists. - More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). - type: string - volumeMode: - description: |- - volumeMode defines what type of volume is required by the claim. - Value of Filesystem is implied when not included in claim spec. - type: string - volumeName: - description: volumeName is the binding reference to - the PersistentVolume backing this claim. - type: string - type: object - resizeInUseVolumes: - default: true - description: Resize existent PVCs, defaults to true - type: boolean - size: - description: |- - Size of the storage. Required if not already specified in the PVC template. - Changes to this field are automatically reapplied to the created PVCs. - Size cannot be decreased. - type: string - storageClass: - description: |- - StorageClass to use for PVCs. Applied after - evaluating the PVC template, if available. - If not specified, the generated PVCs will use the - default storage class - type: string - type: object - temporary: - default: false - description: |- - When set to true, the tablespace will be added as a `temp_tablespaces` - entry in PostgreSQL, and will be available to automatically house temp - database objects, or other temporary files. Please refer to PostgreSQL - documentation for more information on the `temp_tablespaces` GUC. - type: boolean - required: - - name - - storage - type: object - type: array - topologySpreadConstraints: - description: |- - TopologySpreadConstraints specifies how to spread matching pods among the given topology. - More info: - https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ - items: - description: TopologySpreadConstraint specifies how to spread matching - pods among the given topology. - properties: - labelSelector: - description: |- - LabelSelector is used to find matching pods. - Pods that match this label selector are counted to determine the number of pods - in their corresponding topology domain. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select the pods over which - spreading will be calculated. The keys are used to lookup values from the - incoming pod labels, those key-value labels are ANDed with labelSelector - to select the group of existing pods over which spreading will be calculated - for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - MatchLabelKeys cannot be set when LabelSelector isn't set. - Keys that don't exist in the incoming pod labels will - be ignored. A null or empty list means only match against labelSelector. - - This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - maxSkew: - description: |- - MaxSkew describes the degree to which pods may be unevenly distributed. - When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference - between the number of matching pods in the target topology and the global minimum. - The global minimum is the minimum number of matching pods in an eligible domain - or zero if the number of eligible domains is less than MinDomains. - For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same - labelSelector spread as 2/2/1: - In this case, the global minimum is 1. - | zone1 | zone2 | zone3 | - | P P | P P | P | - - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; - scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) - violate MaxSkew(1). - - if MaxSkew is 2, incoming pod can be scheduled onto any zone. - When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence - to topologies that satisfy it. - It's a required field. Default value is 1 and 0 is not allowed. - format: int32 - type: integer - minDomains: - description: |- - MinDomains indicates a minimum number of eligible domains. - When the number of eligible domains with matching topology keys is less than minDomains, - Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. - And when the number of eligible domains with matching topology keys equals or greater than minDomains, - this value has no effect on scheduling. - As a result, when the number of eligible domains is less than minDomains, - scheduler won't schedule more than maxSkew Pods to those domains. - If value is nil, the constraint behaves as if MinDomains is equal to 1. - Valid values are integers greater than 0. - When value is not nil, WhenUnsatisfiable must be DoNotSchedule. - - For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same - labelSelector spread as 2/2/2: - | zone1 | zone2 | zone3 | - | P P | P P | P P | - The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. - In this situation, new pod with the same labelSelector cannot be scheduled, - because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, - it will violate MaxSkew. - format: int32 - type: integer - nodeAffinityPolicy: - description: |- - NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector - when calculating pod topology spread skew. Options are: - - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. - - If this value is nil, the behavior is equivalent to the Honor policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. - type: string - nodeTaintsPolicy: - description: |- - NodeTaintsPolicy indicates how we will treat node taints when calculating - pod topology spread skew. Options are: - - Honor: nodes without taints, along with tainted nodes for which the incoming pod - has a toleration, are included. - - Ignore: node taints are ignored. All nodes are included. - - If this value is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. - type: string - topologyKey: - description: |- - TopologyKey is the key of node labels. Nodes that have a label with this key - and identical values are considered to be in the same topology. - We consider each as a "bucket", and try to put balanced number - of pods into each bucket. - We define a domain as a particular instance of a topology. - Also, we define an eligible domain as a domain whose nodes meet the requirements of - nodeAffinityPolicy and nodeTaintsPolicy. - e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. - And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. - It's a required field. - type: string - whenUnsatisfiable: - description: |- - WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy - the spread constraint. - - DoNotSchedule (default) tells the scheduler not to schedule it. - - ScheduleAnyway tells the scheduler to schedule the pod in any location, - but giving higher precedence to topologies that would help reduce the - skew. - A constraint is considered "Unsatisfiable" for an incoming pod - if and only if every possible node assignment for that pod would violate - "MaxSkew" on some topology. - For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same - labelSelector spread as 3/1/1: - | zone1 | zone2 | zone3 | - | P P P | P | P | - If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled - to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies - MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler - won't make it *more* imbalanced. - It's a required field. - type: string - required: - - maxSkew - - topologyKey - - whenUnsatisfiable - type: object - type: array - walStorage: - description: Configuration of the storage for PostgreSQL WAL (Write-Ahead - Log) - properties: - pvcTemplate: - description: Template to be used to generate the Persistent Volume - Claim - properties: - accessModes: - description: |- - accessModes contains the desired access modes the volume should have. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 - items: - type: string - type: array - x-kubernetes-list-type: atomic - dataSource: - description: |- - dataSource field can be used to specify either: - * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) - If the provisioner or an external controller can support the specified data source, - it will create a new volume based on the contents of the specified data source. - When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, - and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. - If the namespace is specified, then dataSourceRef will not be copied to dataSource. - properties: - apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - dataSourceRef: - description: |- - dataSourceRef specifies the object from which to populate the volume with data, if a non-empty - volume is desired. This may be any object from a non-empty API group (non - core object) or a PersistentVolumeClaim object. - When this field is specified, volume binding will only succeed if the type of - the specified object matches some installed volume populator or dynamic - provisioner. - This field will replace the functionality of the dataSource field and as such - if both fields are non-empty, they must have the same value. For backwards - compatibility, when namespace isn't specified in dataSourceRef, - both fields (dataSource and dataSourceRef) will be set to the same - value automatically if one of them is empty and the other is non-empty. - When namespace is specified in dataSourceRef, - dataSource isn't set to the same value and must be empty. - There are three important differences between dataSource and dataSourceRef: - * While dataSource only allows two specific types of objects, dataSourceRef - allows any non-core object, as well as PersistentVolumeClaim objects. - * While dataSource ignores disallowed values (dropping them), dataSourceRef - preserves all values, and generates an error if a disallowed value is - specified. - * While dataSource only allows local objects, dataSourceRef allows objects - in any namespaces. - (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. - (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. - properties: - apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource being referenced - type: string - name: - description: Name is the name of resource being referenced - type: string - namespace: - description: |- - Namespace is the namespace of resource being referenced - Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. - (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. - type: string - required: - - kind - - name - type: object - resources: - description: |- - resources represents the minimum resources the volume should have. - If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements - that are lower than previous value but must still be higher than capacity recorded in the - status field of the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - selector: - description: selector is a label query over volumes to consider - for binding. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - storageClassName: - description: |- - storageClassName is the name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 - type: string - volumeAttributesClassName: - description: |- - volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. - If specified, the CSI driver will create or update the volume with the attributes defined - in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. - If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be - set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource - exists. - More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). - type: string - volumeMode: - description: |- - volumeMode defines what type of volume is required by the claim. - Value of Filesystem is implied when not included in claim spec. - type: string - volumeName: - description: volumeName is the binding reference to the PersistentVolume - backing this claim. - type: string - type: object - resizeInUseVolumes: - default: true - description: Resize existent PVCs, defaults to true - type: boolean - size: - description: |- - Size of the storage. Required if not already specified in the PVC template. - Changes to this field are automatically reapplied to the created PVCs. - Size cannot be decreased. - type: string - storageClass: - description: |- - StorageClass to use for PVCs. Applied after - evaluating the PVC template, if available. - If not specified, the generated PVCs will use the - default storage class - type: string - type: object - required: - - instances - type: object - x-kubernetes-validations: - - message: imageName and imageCatalogRef are mutually exclusive - rule: '!(has(self.imageCatalogRef) && has(self.imageName))' - status: - description: |- - Most recently observed status of the cluster. This data may not be up - to date. Populated by the system. Read-only. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - properties: - availableArchitectures: - description: AvailableArchitectures reports the available architectures - of a cluster - items: - description: AvailableArchitecture represents the state of a cluster's - architecture - properties: - goArch: - description: GoArch is the name of the executable architecture - type: string - hash: - description: Hash is the hash of the executable - type: string - required: - - goArch - - hash - type: object - type: array - certificates: - description: The configuration for the CA and related certificates, - initialized with defaults. - properties: - clientCASecret: - description: |- - The secret containing the Client CA certificate. If not defined, a new secret will be created - with a self-signed CA and will be used to generate all the client certificates.
-
- Contains:
-
- - `ca.crt`: CA that should be used to validate the client certificates, - used as `ssl_ca_file` of all the instances.
- - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, - this can be omitted.
- type: string - expirations: - additionalProperties: - type: string - description: Expiration dates for all certificates. - type: object - replicationTLSSecret: - description: |- - The secret of type kubernetes.io/tls containing the client certificate to authenticate as - the `streaming_replica` user. - If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be - created using the provided CA. - type: string - serverAltDNSNames: - description: The list of the server alternative DNS names to be - added to the generated server TLS certificates, when required. - items: - type: string - type: array - serverCASecret: - description: |- - The secret containing the Server CA certificate. If not defined, a new secret will be created - with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
-
- Contains:
-
- - `ca.crt`: CA that should be used to validate the server certificate, - used as `sslrootcert` in client connection strings.
- - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, - this can be omitted.
- type: string - serverTLSSecret: - description: |- - The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as - `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. - If not defined, ServerCASecret must provide also `ca.key` and a new secret will be - created using the provided CA. - type: string - type: object - cloudNativePGCommitHash: - description: The commit hash number of which this operator running - type: string - cloudNativePGOperatorHash: - description: The hash of the binary of the operator - type: string - conditions: - description: Conditions for cluster object - items: - description: Condition contains details for one aspect of the current - state of this API Resource. - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: |- - message is a human readable message indicating details about the transition. - This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: |- - observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - configMapResourceVersion: - description: |- - The list of resource versions of the configmaps, - managed by the operator. Every change here is done in the - interest of the instance manager, which will refresh the - configmap data - properties: - metrics: - additionalProperties: - type: string - description: |- - A map with the versions of all the config maps used to pass metrics. - Map keys are the config map names, map values are the versions - type: object - type: object - currentPrimary: - description: Current primary instance - type: string - currentPrimaryFailingSinceTimestamp: - description: |- - The timestamp when the primary was detected to be unhealthy - This field is reported when `.spec.failoverDelay` is populated or during online upgrades - type: string - currentPrimaryTimestamp: - description: The timestamp when the last actual promotion to primary - has occurred - type: string - danglingPVC: - description: |- - List of all the PVCs created by this cluster and still available - which are not attached to a Pod - items: - type: string - type: array - demotionToken: - description: |- - DemotionToken is a JSON token containing the information - from pg_controldata such as Database system identifier, Latest checkpoint's - TimeLineID, Latest checkpoint's REDO location, Latest checkpoint's REDO - WAL file, and Time of latest checkpoint - type: string - firstRecoverabilityPoint: - description: |- - The first recoverability point, stored as a date in RFC3339 format. - This field is calculated from the content of FirstRecoverabilityPointByMethod - type: string - firstRecoverabilityPointByMethod: - additionalProperties: - format: date-time - type: string - description: The first recoverability point, stored as a date in RFC3339 - format, per backup method type - type: object - healthyPVC: - description: List of all the PVCs not dangling nor initializing - items: - type: string - type: array - image: - description: Image contains the image name used by the pods - type: string - initializingPVC: - description: List of all the PVCs that are being initialized by this - cluster - items: - type: string - type: array - instanceNames: - description: List of instance names in the cluster - items: - type: string - type: array - instances: - description: The total number of PVC Groups detected in the cluster. - It may differ from the number of existing instance pods. - type: integer - instancesReportedState: - additionalProperties: - description: InstanceReportedState describes the last reported state - of an instance during a reconciliation loop - properties: - ip: - description: IP address of the instance - type: string - isPrimary: - description: indicates if an instance is the primary one - type: boolean - timeLineID: - description: indicates on which TimelineId the instance is - type: integer - required: - - isPrimary - type: object - description: The reported state of the instances during the last reconciliation - loop - type: object - instancesStatus: - additionalProperties: - items: - type: string - type: array - description: InstancesStatus indicates in which status the instances - are - type: object - jobCount: - description: How many Jobs have been created by this cluster - format: int32 - type: integer - lastFailedBackup: - description: Stored as a date in RFC3339 format - type: string - lastPromotionToken: - description: |- - LastPromotionToken is the last verified promotion token that - was used to promote a replica cluster - type: string - lastSuccessfulBackup: - description: |- - Last successful backup, stored as a date in RFC3339 format - This field is calculated from the content of LastSuccessfulBackupByMethod - type: string - lastSuccessfulBackupByMethod: - additionalProperties: - format: date-time - type: string - description: Last successful backup, stored as a date in RFC3339 format, - per backup method type - type: object - latestGeneratedNode: - description: ID of the latest generated node (used to avoid node name - clashing) - type: integer - managedRolesStatus: - description: ManagedRolesStatus reports the state of the managed roles - in the cluster - properties: - byStatus: - additionalProperties: - items: - type: string - type: array - description: ByStatus gives the list of roles in each state - type: object - cannotReconcile: - additionalProperties: - items: - type: string - type: array - description: |- - CannotReconcile lists roles that cannot be reconciled in PostgreSQL, - with an explanation of the cause - type: object - passwordStatus: - additionalProperties: - description: PasswordState represents the state of the password - of a managed RoleConfiguration - properties: - resourceVersion: - description: the resource version of the password secret - type: string - transactionID: - description: the last transaction ID to affect the role - definition in PostgreSQL - format: int64 - type: integer - type: object - description: PasswordStatus gives the last transaction id and - password secret version for each managed role - type: object - type: object - onlineUpdateEnabled: - description: OnlineUpdateEnabled shows if the online upgrade is enabled - inside the cluster - type: boolean - pgDataImageInfo: - description: PGDataImageInfo contains the details of the latest image - that has run on the current data directory. - properties: - image: - description: Image is the image name - type: string - majorVersion: - description: MajorVersion is the major version of the image - type: integer - required: - - image - - majorVersion - type: object - phase: - description: Current phase of the cluster - type: string - phaseReason: - description: Reason for the current phase - type: string - pluginStatus: - description: PluginStatus is the status of the loaded plugins - items: - description: PluginStatus is the status of a loaded plugin - properties: - backupCapabilities: - description: |- - BackupCapabilities are the list of capabilities of the - plugin regarding the Backup management - items: - type: string - type: array - capabilities: - description: |- - Capabilities are the list of capabilities of the - plugin - items: - type: string - type: array - name: - description: Name is the name of the plugin - type: string - operatorCapabilities: - description: |- - OperatorCapabilities are the list of capabilities of the - plugin regarding the reconciler - items: - type: string - type: array - restoreJobHookCapabilities: - description: |- - RestoreJobHookCapabilities are the list of capabilities of the - plugin regarding the RestoreJobHook management - items: - type: string - type: array - status: - description: Status contain the status reported by the plugin - through the SetStatusInCluster interface - type: string - version: - description: |- - Version is the version of the plugin loaded by the - latest reconciliation loop - type: string - walCapabilities: - description: |- - WALCapabilities are the list of capabilities of the - plugin regarding the WAL management - items: - type: string - type: array - required: - - name - - version - type: object - type: array - poolerIntegrations: - description: The integration needed by poolers referencing the cluster - properties: - pgBouncerIntegration: - description: PgBouncerIntegrationStatus encapsulates the needed - integration for the pgbouncer poolers referencing the cluster - properties: - secrets: - items: - type: string - type: array - type: object - type: object - pvcCount: - description: How many PVCs have been created by this cluster - format: int32 - type: integer - readService: - description: Current list of read pods - type: string - readyInstances: - description: The total number of ready instances in the cluster. It - is equal to the number of ready instance pods. - type: integer - resizingPVC: - description: List of all the PVCs that have ResizingPVC condition. - items: - type: string - type: array - secretsResourceVersion: - description: |- - The list of resource versions of the secrets - managed by the operator. Every change here is done in the - interest of the instance manager, which will refresh the - secret data - properties: - applicationSecretVersion: - description: The resource version of the "app" user secret - type: string - barmanEndpointCA: - description: The resource version of the Barman Endpoint CA if - provided - type: string - caSecretVersion: - description: Unused. Retained for compatibility with old versions. - type: string - clientCaSecretVersion: - description: The resource version of the PostgreSQL client-side - CA secret version - type: string - externalClusterSecretVersion: - additionalProperties: - type: string - description: The resource versions of the external cluster secrets - type: object - managedRoleSecretVersion: - additionalProperties: - type: string - description: The resource versions of the managed roles secrets - type: object - metrics: - additionalProperties: - type: string - description: |- - A map with the versions of all the secrets used to pass metrics. - Map keys are the secret names, map values are the versions - type: object - replicationSecretVersion: - description: The resource version of the "streaming_replica" user - secret - type: string - serverCaSecretVersion: - description: The resource version of the PostgreSQL server-side - CA secret version - type: string - serverSecretVersion: - description: The resource version of the PostgreSQL server-side - secret version - type: string - superuserSecretVersion: - description: The resource version of the "postgres" user secret - type: string - type: object - switchReplicaClusterStatus: - description: SwitchReplicaClusterStatus is the status of the switch - to replica cluster - properties: - inProgress: - description: InProgress indicates if there is an ongoing procedure - of switching a cluster to a replica cluster. - type: boolean - type: object - tablespacesStatus: - description: TablespacesStatus reports the state of the declarative - tablespaces in the cluster - items: - description: TablespaceState represents the state of a tablespace - in a cluster - properties: - error: - description: Error is the reconciliation error, if any - type: string - name: - description: Name is the name of the tablespace - type: string - owner: - description: Owner is the PostgreSQL user owning the tablespace - type: string - state: - description: State is the latest reconciliation state - type: string - required: - - name - - state - type: object - type: array - targetPrimary: - description: |- - Target primary instance, this is different from the previous one - during a switchover or a failover - type: string - targetPrimaryTimestamp: - description: The timestamp when the last request for a new primary - has occurred - type: string - timelineID: - description: The timeline of the Postgres cluster - type: integer - topology: - description: Instances topology. - properties: - instances: - additionalProperties: - additionalProperties: - type: string - description: PodTopologyLabels represent the topology of a Pod. - map[labelName]labelValue - type: object - description: Instances contains the pod topology of the instances - type: object - nodesUsed: - description: |- - NodesUsed represents the count of distinct nodes accommodating the instances. - A value of '1' suggests that all instances are hosted on a single node, - implying the absence of High Availability (HA). Ideally, this value should - be the same as the number of instances in the Postgres HA cluster, implying - shared nothing architecture on the compute side. - format: int32 - type: integer - successfullyExtracted: - description: |- - SuccessfullyExtracted indicates if the topology data was extract. It is useful to enact fallback behaviors - in synchronous replica election in case of failures - type: boolean - type: object - unusablePVC: - description: List of all the PVCs that are unusable because another - PVC is missing - items: - type: string - type: array - writeService: - description: Current write pod - type: string - type: object - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: - scale: - specReplicasPath: .spec.instances - statusReplicasPath: .status.instances - status: {} ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.17.3 - name: databases.postgresql.cnpg.io -spec: - group: postgresql.cnpg.io - names: - kind: Database - listKind: DatabaseList - plural: databases - singular: database - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - jsonPath: .spec.cluster.name - name: Cluster - type: string - - jsonPath: .spec.name - name: PG Name - type: string - - jsonPath: .status.applied - name: Applied - type: boolean - - description: Latest reconciliation message - jsonPath: .status.message - name: Message - type: string - name: v1 - schema: - openAPIV3Schema: - description: Database is the Schema for the databases API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: |- - Specification of the desired Database. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - properties: - allowConnections: - description: |- - Maps to the `ALLOW_CONNECTIONS` parameter of `CREATE DATABASE` and - `ALTER DATABASE`. If false then no one can connect to this database. - type: boolean - builtinLocale: - description: |- - Maps to the `BUILTIN_LOCALE` parameter of `CREATE DATABASE`. This - setting cannot be changed. Specifies the locale name when the - builtin provider is used. This option requires `localeProvider` to - be set to `builtin`. Available from PostgreSQL 17. - type: string - x-kubernetes-validations: - - message: builtinLocale is immutable - rule: self == oldSelf - cluster: - description: The name of the PostgreSQL cluster hosting the database. - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - collationVersion: - description: |- - Maps to the `COLLATION_VERSION` parameter of `CREATE DATABASE`. This - setting cannot be changed. - type: string - x-kubernetes-validations: - - message: collationVersion is immutable - rule: self == oldSelf - connectionLimit: - description: |- - Maps to the `CONNECTION LIMIT` clause of `CREATE DATABASE` and - `ALTER DATABASE`. How many concurrent connections can be made to - this database. -1 (the default) means no limit. - type: integer - databaseReclaimPolicy: - default: retain - description: The policy for end-of-life maintenance of this database. - enum: - - delete - - retain - type: string - encoding: - description: |- - Maps to the `ENCODING` parameter of `CREATE DATABASE`. This setting - cannot be changed. Character set encoding to use in the database. - type: string - x-kubernetes-validations: - - message: encoding is immutable - rule: self == oldSelf - ensure: - default: present - description: Ensure the PostgreSQL database is `present` or `absent` - - defaults to "present". - enum: - - present - - absent - type: string - extensions: - description: The list of extensions to be managed in the database - items: - description: ExtensionSpec configures an extension in a database - properties: - ensure: - default: present - description: |- - Specifies whether an extension/schema should be present or absent in - the database. If set to `present`, the extension/schema will be - created if it does not exist. If set to `absent`, the - extension/schema will be removed if it exists. - enum: - - present - - absent - type: string - name: - description: Name of the extension/schema - type: string - schema: - description: |- - The name of the schema in which to install the extension's objects, - in case the extension allows its contents to be relocated. If not - specified (default), and the extension's control file does not - specify a schema either, the current default object creation schema - is used. - type: string - version: - description: |- - The version of the extension to install. If empty, the operator will - install the default version (whatever is specified in the - extension's control file) - type: string - required: - - name - type: object - type: array - icuLocale: - description: |- - Maps to the `ICU_LOCALE` parameter of `CREATE DATABASE`. This - setting cannot be changed. Specifies the ICU locale when the ICU - provider is used. This option requires `localeProvider` to be set to - `icu`. Available from PostgreSQL 15. - type: string - x-kubernetes-validations: - - message: icuLocale is immutable - rule: self == oldSelf - icuRules: - description: |- - Maps to the `ICU_RULES` parameter of `CREATE DATABASE`. This setting - cannot be changed. Specifies additional collation rules to customize - the behavior of the default collation. This option requires - `localeProvider` to be set to `icu`. Available from PostgreSQL 16. - type: string - x-kubernetes-validations: - - message: icuRules is immutable - rule: self == oldSelf - isTemplate: - description: |- - Maps to the `IS_TEMPLATE` parameter of `CREATE DATABASE` and `ALTER - DATABASE`. If true, this database is considered a template and can - be cloned by any user with `CREATEDB` privileges. - type: boolean - locale: - description: |- - Maps to the `LOCALE` parameter of `CREATE DATABASE`. This setting - cannot be changed. Sets the default collation order and character - classification in the new database. - type: string - x-kubernetes-validations: - - message: locale is immutable - rule: self == oldSelf - localeCType: - description: |- - Maps to the `LC_CTYPE` parameter of `CREATE DATABASE`. This setting - cannot be changed. - type: string - x-kubernetes-validations: - - message: localeCType is immutable - rule: self == oldSelf - localeCollate: - description: |- - Maps to the `LC_COLLATE` parameter of `CREATE DATABASE`. This - setting cannot be changed. - type: string - x-kubernetes-validations: - - message: localeCollate is immutable - rule: self == oldSelf - localeProvider: - description: |- - Maps to the `LOCALE_PROVIDER` parameter of `CREATE DATABASE`. This - setting cannot be changed. This option sets the locale provider for - databases created in the new cluster. Available from PostgreSQL 16. - type: string - x-kubernetes-validations: - - message: localeProvider is immutable - rule: self == oldSelf - name: - description: The name of the database to create inside PostgreSQL. - This setting cannot be changed. - type: string - x-kubernetes-validations: - - message: name is immutable - rule: self == oldSelf - - message: the name postgres is reserved - rule: self != 'postgres' - - message: the name template0 is reserved - rule: self != 'template0' - - message: the name template1 is reserved - rule: self != 'template1' - owner: - description: |- - Maps to the `OWNER` parameter of `CREATE DATABASE`. - Maps to the `OWNER TO` command of `ALTER DATABASE`. - The role name of the user who owns the database inside PostgreSQL. - type: string - schemas: - description: The list of schemas to be managed in the database - items: - description: SchemaSpec configures a schema in a database - properties: - ensure: - default: present - description: |- - Specifies whether an extension/schema should be present or absent in - the database. If set to `present`, the extension/schema will be - created if it does not exist. If set to `absent`, the - extension/schema will be removed if it exists. - enum: - - present - - absent - type: string - name: - description: Name of the extension/schema - type: string - owner: - description: |- - The role name of the user who owns the schema inside PostgreSQL. - It maps to the `AUTHORIZATION` parameter of `CREATE SCHEMA` and the - `OWNER TO` command of `ALTER SCHEMA`. - type: string - required: - - name - type: object - type: array - tablespace: - description: |- - Maps to the `TABLESPACE` parameter of `CREATE DATABASE`. - Maps to the `SET TABLESPACE` command of `ALTER DATABASE`. - The name of the tablespace (in PostgreSQL) that will be associated - with the new database. This tablespace will be the default - tablespace used for objects created in this database. - type: string - template: - description: |- - Maps to the `TEMPLATE` parameter of `CREATE DATABASE`. This setting - cannot be changed. The name of the template from which to create - this database. - type: string - x-kubernetes-validations: - - message: template is immutable - rule: self == oldSelf - required: - - cluster - - name - - owner - type: object - x-kubernetes-validations: - - message: builtinLocale is only available when localeProvider is set - to `builtin` - rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' - - message: icuLocale is only available when localeProvider is set to `icu` - rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' - - message: icuRules is only available when localeProvider is set to `icu` - rule: '!has(self.icuRules) || self.localeProvider == ''icu''' - status: - description: |- - Most recently observed status of the Database. This data may not be up to - date. Populated by the system. Read-only. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - properties: - applied: - description: Applied is true if the database was reconciled correctly - type: boolean - extensions: - description: Extensions is the status of the managed extensions - items: - description: DatabaseObjectStatus is the status of the managed database - objects - properties: - applied: - description: |- - True of the object has been installed successfully in - the database - type: boolean - message: - description: Message is the object reconciliation message - type: string - name: - description: The name of the object - type: string - required: - - applied - - name - type: object - type: array - message: - description: Message is the reconciliation output message - type: string - observedGeneration: - description: |- - A sequence number representing the latest - desired state that was synchronized - format: int64 - type: integer - schemas: - description: Schemas is the status of the managed schemas - items: - description: DatabaseObjectStatus is the status of the managed database - objects - properties: - applied: - description: |- - True of the object has been installed successfully in - the database - type: boolean - message: - description: Message is the object reconciliation message - type: string - name: - description: The name of the object - type: string - required: - - applied - - name - type: object - type: array - type: object - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: - status: {} ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.17.3 - name: imagecatalogs.postgresql.cnpg.io -spec: - group: postgresql.cnpg.io - names: - kind: ImageCatalog - listKind: ImageCatalogList - plural: imagecatalogs - singular: imagecatalog - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1 - schema: - openAPIV3Schema: - description: ImageCatalog is the Schema for the imagecatalogs API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: |- - Specification of the desired behavior of the ImageCatalog. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - properties: - images: - description: List of CatalogImages available in the catalog - items: - description: CatalogImage defines the image and major version - properties: - image: - description: The image reference - type: string - major: - description: The PostgreSQL major version of the image. Must - be unique within the catalog. - minimum: 10 - type: integer - required: - - image - - major - type: object - maxItems: 8 - minItems: 1 - type: array - x-kubernetes-validations: - - message: Images must have unique major versions - rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) - required: - - images - type: object - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: {} ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.17.3 - name: poolers.postgresql.cnpg.io -spec: - group: postgresql.cnpg.io - names: - kind: Pooler - listKind: PoolerList - plural: poolers - singular: pooler - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - jsonPath: .spec.cluster.name - name: Cluster - type: string - - jsonPath: .spec.type - name: Type - type: string - name: v1 - schema: - openAPIV3Schema: - description: Pooler is the Schema for the poolers API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: |- - Specification of the desired behavior of the Pooler. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - properties: - cluster: - description: |- - This is the cluster reference on which the Pooler will work. - Pooler name should never match with any cluster name within the same namespace. - properties: - name: - description: Name of the referent. - type: string - required: - - name - type: object - deploymentStrategy: - description: The deployment strategy to use for pgbouncer to replace - existing pods with new ones - properties: - rollingUpdate: - description: |- - Rolling update config params. Present only if DeploymentStrategyType = - RollingUpdate. - properties: - maxSurge: - anyOf: - - type: integer - - type: string - description: |- - The maximum number of pods that can be scheduled above the desired number of - pods. - Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). - This can not be 0 if MaxUnavailable is 0. - Absolute number is calculated from percentage by rounding up. - Defaults to 25%. - Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when - the rolling update starts, such that the total number of old and new pods do not exceed - 130% of desired pods. Once old pods have been killed, - new ReplicaSet can be scaled up further, ensuring that total number of pods running - at any time during the update is at most 130% of desired pods. - x-kubernetes-int-or-string: true - maxUnavailable: - anyOf: - - type: integer - - type: string - description: |- - The maximum number of pods that can be unavailable during the update. - Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). - Absolute number is calculated from percentage by rounding down. - This can not be 0 if MaxSurge is 0. - Defaults to 25%. - Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods - immediately when the rolling update starts. Once new pods are ready, old ReplicaSet - can be scaled down further, followed by scaling up the new ReplicaSet, ensuring - that the total number of pods available at all times during the update is at - least 70% of desired pods. - x-kubernetes-int-or-string: true - type: object - type: - description: Type of deployment. Can be "Recreate" or "RollingUpdate". - Default is RollingUpdate. - type: string - type: object - instances: - default: 1 - description: 'The number of replicas we want. Default: 1.' - format: int32 - type: integer - monitoring: - description: The configuration of the monitoring infrastructure of - this pooler. - properties: - enablePodMonitor: - default: false - description: Enable or disable the `PodMonitor` - type: boolean - podMonitorMetricRelabelings: - description: The list of metric relabelings for the `PodMonitor`. - Applied to samples before ingestion. - items: - description: |- - RelabelConfig allows dynamic rewriting of the label set for targets, alerts, - scraped samples and remote write samples. - - More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config - properties: - action: - default: replace - description: |- - Action to perform based on the regex matching. - - `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. - `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. - - Default: "Replace" - enum: - - replace - - Replace - - keep - - Keep - - drop - - Drop - - hashmod - - HashMod - - labelmap - - LabelMap - - labeldrop - - LabelDrop - - labelkeep - - LabelKeep - - lowercase - - Lowercase - - uppercase - - Uppercase - - keepequal - - KeepEqual - - dropequal - - DropEqual - type: string - modulus: - description: |- - Modulus to take of the hash of the source label values. - - Only applicable when the action is `HashMod`. - format: int64 - type: integer - regex: - description: Regular expression against which the extracted - value is matched. - type: string - replacement: - description: |- - Replacement value against which a Replace action is performed if the - regular expression matches. - - Regex capture groups are available. - type: string - separator: - description: Separator is the string between concatenated - SourceLabels. - type: string - sourceLabels: - description: |- - The source labels select values from existing labels. Their content is - concatenated using the configured Separator and matched against the - configured regular expression. - items: - description: |- - LabelName is a valid Prometheus label name which may only contain ASCII - letters, numbers, as well as underscores. - pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ - type: string - type: array - targetLabel: - description: |- - Label to which the resulting string is written in a replacement. - - It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, - `KeepEqual` and `DropEqual` actions. - - Regex capture groups are available. - type: string - type: object - type: array - podMonitorRelabelings: - description: The list of relabelings for the `PodMonitor`. Applied - to samples before scraping. - items: - description: |- - RelabelConfig allows dynamic rewriting of the label set for targets, alerts, - scraped samples and remote write samples. - - More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config - properties: - action: - default: replace - description: |- - Action to perform based on the regex matching. - - `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. - `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. - - Default: "Replace" - enum: - - replace - - Replace - - keep - - Keep - - drop - - Drop - - hashmod - - HashMod - - labelmap - - LabelMap - - labeldrop - - LabelDrop - - labelkeep - - LabelKeep - - lowercase - - Lowercase - - uppercase - - Uppercase - - keepequal - - KeepEqual - - dropequal - - DropEqual - type: string - modulus: - description: |- - Modulus to take of the hash of the source label values. - - Only applicable when the action is `HashMod`. - format: int64 - type: integer - regex: - description: Regular expression against which the extracted - value is matched. - type: string - replacement: - description: |- - Replacement value against which a Replace action is performed if the - regular expression matches. - - Regex capture groups are available. - type: string - separator: - description: Separator is the string between concatenated - SourceLabels. - type: string - sourceLabels: - description: |- - The source labels select values from existing labels. Their content is - concatenated using the configured Separator and matched against the - configured regular expression. - items: - description: |- - LabelName is a valid Prometheus label name which may only contain ASCII - letters, numbers, as well as underscores. - pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ - type: string - type: array - targetLabel: - description: |- - Label to which the resulting string is written in a replacement. - - It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, - `KeepEqual` and `DropEqual` actions. - - Regex capture groups are available. - type: string - type: object - type: array - type: object - pgbouncer: - description: The PgBouncer configuration - properties: - authQuery: - description: |- - The query that will be used to download the hash of the password - of a certain user. Default: "SELECT usename, passwd FROM public.user_search($1)". - In case it is specified, also an AuthQuerySecret has to be specified and - no automatic CNPG Cluster integration will be triggered. - type: string - authQuerySecret: - description: |- - The credentials of the user that need to be used for the authentication - query. In case it is specified, also an AuthQuery - (e.g. "SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1") - has to be specified and no automatic CNPG Cluster integration will be triggered. - properties: - name: - description: Name of the referent. - type: string - required: - - name - type: object - parameters: - additionalProperties: - type: string - description: |- - Additional parameters to be passed to PgBouncer - please check - the CNPG documentation for a list of options you can configure - type: object - paused: - default: false - description: |- - When set to `true`, PgBouncer will disconnect from the PostgreSQL - server, first waiting for all queries to complete, and pause all new - client connections until this value is set to `false` (default). Internally, - the operator calls PgBouncer's `PAUSE` and `RESUME` commands. - type: boolean - pg_hba: - description: |- - PostgreSQL Host Based Authentication rules (lines to be appended - to the pg_hba.conf file) - items: - type: string - type: array - poolMode: - default: session - description: 'The pool mode. Default: `session`.' - enum: - - session - - transaction - type: string - type: object - serviceTemplate: - description: Template for the Service to be created - properties: - metadata: - description: |- - Standard object's metadata. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations is an unstructured key value map stored with a resource that may be - set by external tools to store and retrieve arbitrary metadata. They are not - queryable and should be preserved when modifying objects. - More info: http://kubernetes.io/docs/user-guide/annotations - type: object - labels: - additionalProperties: - type: string - description: |- - Map of string keys and values that can be used to organize and categorize - (scope and select) objects. May match selectors of replication controllers - and services. - More info: http://kubernetes.io/docs/user-guide/labels - type: object - name: - description: The name of the resource. Only supported for - certain types - type: string - type: object - spec: - description: |- - Specification of the desired behavior of the service. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - properties: - allocateLoadBalancerNodePorts: - description: |- - allocateLoadBalancerNodePorts defines if NodePorts will be automatically - allocated for services with type LoadBalancer. Default is "true". It - may be set to "false" if the cluster load-balancer does not rely on - NodePorts. If the caller requests specific NodePorts (by specifying a - value), those requests will be respected, regardless of this field. - This field may only be set for services with type LoadBalancer and will - be cleared if the type is changed to any other type. - type: boolean - clusterIP: - description: |- - clusterIP is the IP address of the service and is usually assigned - randomly. If an address is specified manually, is in-range (as per - system configuration), and is not in use, it will be allocated to the - service; otherwise creation of the service will fail. This field may not - be changed through updates unless the type field is also being changed - to ExternalName (which requires this field to be blank) or the type - field is being changed from ExternalName (in which case this field may - optionally be specified, as describe above). Valid values are "None", - empty string (""), or a valid IP address. Setting this to "None" makes a - "headless service" (no virtual IP), which is useful when direct endpoint - connections are preferred and proxying is not required. Only applies to - types ClusterIP, NodePort, and LoadBalancer. If this field is specified - when creating a Service of type ExternalName, creation will fail. This - field will be wiped when updating a Service to type ExternalName. - More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - type: string - clusterIPs: - description: |- - ClusterIPs is a list of IP addresses assigned to this service, and are - usually assigned randomly. If an address is specified manually, is - in-range (as per system configuration), and is not in use, it will be - allocated to the service; otherwise creation of the service will fail. - This field may not be changed through updates unless the type field is - also being changed to ExternalName (which requires this field to be - empty) or the type field is being changed from ExternalName (in which - case this field may optionally be specified, as describe above). Valid - values are "None", empty string (""), or a valid IP address. Setting - this to "None" makes a "headless service" (no virtual IP), which is - useful when direct endpoint connections are preferred and proxying is - not required. Only applies to types ClusterIP, NodePort, and - LoadBalancer. If this field is specified when creating a Service of type - ExternalName, creation will fail. This field will be wiped when updating - a Service to type ExternalName. If this field is not specified, it will - be initialized from the clusterIP field. If this field is specified, - clients must ensure that clusterIPs[0] and clusterIP have the same - value. - - This field may hold a maximum of two entries (dual-stack IPs, in either order). - These IPs must correspond to the values of the ipFamilies field. Both - clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. - More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - items: - type: string - type: array - x-kubernetes-list-type: atomic - externalIPs: - description: |- - externalIPs is a list of IP addresses for which nodes in the cluster - will also accept traffic for this service. These IPs are not managed by - Kubernetes. The user is responsible for ensuring that traffic arrives - at a node with this IP. A common example is external load-balancers - that are not part of the Kubernetes system. - items: - type: string - type: array - x-kubernetes-list-type: atomic - externalName: - description: |- - externalName is the external reference that discovery mechanisms will - return as an alias for this service (e.g. a DNS CNAME record). No - proxying will be involved. Must be a lowercase RFC-1123 hostname - (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". - type: string - externalTrafficPolicy: - description: |- - externalTrafficPolicy describes how nodes distribute service traffic they - receive on one of the Service's "externally-facing" addresses (NodePorts, - ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure - the service in a way that assumes that external load balancers will take care - of balancing the service traffic between nodes, and so each node will deliver - traffic only to the node-local endpoints of the service, without masquerading - the client source IP. (Traffic mistakenly sent to a node with no endpoints will - be dropped.) The default value, "Cluster", uses the standard behavior of - routing to all endpoints evenly (possibly modified by topology and other - features). Note that traffic sent to an External IP or LoadBalancer IP from - within the cluster will always get "Cluster" semantics, but clients sending to - a NodePort from within the cluster may need to take traffic policy into account - when picking a node. - type: string - healthCheckNodePort: - description: |- - healthCheckNodePort specifies the healthcheck nodePort for the service. - This only applies when type is set to LoadBalancer and - externalTrafficPolicy is set to Local. If a value is specified, is - in-range, and is not in use, it will be used. If not specified, a value - will be automatically allocated. External systems (e.g. load-balancers) - can use this port to determine if a given node holds endpoints for this - service or not. If this field is specified when creating a Service - which does not need it, creation will fail. This field will be wiped - when updating a Service to no longer need it (e.g. changing type). - This field cannot be updated once set. - format: int32 - type: integer - internalTrafficPolicy: - description: |- - InternalTrafficPolicy describes how nodes distribute service traffic they - receive on the ClusterIP. If set to "Local", the proxy will assume that pods - only want to talk to endpoints of the service on the same node as the pod, - dropping the traffic if there are no local endpoints. The default value, - "Cluster", uses the standard behavior of routing to all endpoints evenly - (possibly modified by topology and other features). - type: string - ipFamilies: - description: |- - IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this - service. This field is usually assigned automatically based on cluster - configuration and the ipFamilyPolicy field. If this field is specified - manually, the requested family is available in the cluster, - and ipFamilyPolicy allows it, it will be used; otherwise creation of - the service will fail. This field is conditionally mutable: it allows - for adding or removing a secondary IP family, but it does not allow - changing the primary IP family of the Service. Valid values are "IPv4" - and "IPv6". This field only applies to Services of types ClusterIP, - NodePort, and LoadBalancer, and does apply to "headless" services. - This field will be wiped when updating a Service to type ExternalName. - - This field may hold a maximum of two entries (dual-stack families, in - either order). These families must correspond to the values of the - clusterIPs field, if specified. Both clusterIPs and ipFamilies are - governed by the ipFamilyPolicy field. - items: - description: |- - IPFamily represents the IP Family (IPv4 or IPv6). This type is used - to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). - type: string - type: array - x-kubernetes-list-type: atomic - ipFamilyPolicy: - description: |- - IPFamilyPolicy represents the dual-stack-ness requested or required by - this Service. If there is no value provided, then this field will be set - to SingleStack. Services can be "SingleStack" (a single IP family), - "PreferDualStack" (two IP families on dual-stack configured clusters or - a single IP family on single-stack clusters), or "RequireDualStack" - (two IP families on dual-stack configured clusters, otherwise fail). The - ipFamilies and clusterIPs fields depend on the value of this field. This - field will be wiped when updating a service to type ExternalName. - type: string - loadBalancerClass: - description: |- - loadBalancerClass is the class of the load balancer implementation this Service belongs to. - If specified, the value of this field must be a label-style identifier, with an optional prefix, - e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. - This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load - balancer implementation is used, today this is typically done through the cloud provider integration, - but should apply for any default implementation. If set, it is assumed that a load balancer - implementation is watching for Services with a matching class. Any default load balancer - implementation (e.g. cloud providers) should ignore Services that set this field. - This field can only be set when creating or updating a Service to type 'LoadBalancer'. - Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. - type: string - loadBalancerIP: - description: |- - Only applies to Service Type: LoadBalancer. - This feature depends on whether the underlying cloud-provider supports specifying - the loadBalancerIP when a load balancer is created. - This field will be ignored if the cloud-provider does not support the feature. - Deprecated: This field was under-specified and its meaning varies across implementations. - Using it is non-portable and it may not support dual-stack. - Users are encouraged to use implementation-specific annotations when available. - type: string - loadBalancerSourceRanges: - description: |- - If specified and supported by the platform, this will restrict traffic through the cloud-provider - load-balancer will be restricted to the specified client IPs. This field will be ignored if the - cloud-provider does not support the feature." - More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ - items: - type: string - type: array - x-kubernetes-list-type: atomic - ports: - description: |- - The list of ports that are exposed by this service. - More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - items: - description: ServicePort contains information on service's - port. - properties: - appProtocol: - description: |- - The application protocol for this port. - This is used as a hint for implementations to offer richer behavior for protocols that they understand. - This field follows standard Kubernetes label syntax. - Valid values are either: - - * Un-prefixed protocol names - reserved for IANA standard service names (as per - RFC-6335 and https://www.iana.org/assignments/service-names). - - * Kubernetes-defined prefixed names: - * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- - * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 - * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 - - * Other protocols should use implementation-defined prefixed names such as - mycompany.com/my-custom-protocol. - type: string - name: - description: |- - The name of this port within the service. This must be a DNS_LABEL. - All ports within a ServiceSpec must have unique names. When considering - the endpoints for a Service, this must match the 'name' field in the - EndpointPort. - Optional if only one ServicePort is defined on this service. - type: string - nodePort: - description: |- - The port on each node on which this service is exposed when type is - NodePort or LoadBalancer. Usually assigned by the system. If a value is - specified, in-range, and not in use it will be used, otherwise the - operation will fail. If not specified, a port will be allocated if this - Service requires one. If this field is specified when creating a - Service which does not need it, creation will fail. This field will be - wiped when updating a Service to no longer need it (e.g. changing type - from NodePort to ClusterIP). - More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - format: int32 - type: integer - port: - description: The port that will be exposed by this service. - format: int32 - type: integer - protocol: - default: TCP - description: |- - The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". - Default is TCP. - type: string - targetPort: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the pods targeted by the service. - Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - If this is a string, it will be looked up as a named port in the - target Pod's container ports. If this is not specified, the value - of the 'port' field is used (an identity map). - This field is ignored for services with clusterIP=None, and should be - omitted or set equal to the 'port' field. - More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service - x-kubernetes-int-or-string: true - required: - - port - type: object - type: array - x-kubernetes-list-map-keys: - - port - - protocol - x-kubernetes-list-type: map - publishNotReadyAddresses: - description: |- - publishNotReadyAddresses indicates that any agent which deals with endpoints for this - Service should disregard any indications of ready/not-ready. - The primary use case for setting this field is for a StatefulSet's Headless Service to - propagate SRV DNS records for its Pods for the purpose of peer discovery. - The Kubernetes controllers that generate Endpoints and EndpointSlice resources for - Services interpret this to mean that all endpoints are considered "ready" even if the - Pods themselves are not. Agents which consume only Kubernetes generated endpoints - through the Endpoints or EndpointSlice resources can safely assume this behavior. - type: boolean - selector: - additionalProperties: - type: string - description: |- - Route service traffic to pods with label keys and values matching this - selector. If empty or not present, the service is assumed to have an - external process managing its endpoints, which Kubernetes will not - modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. - Ignored if type is ExternalName. - More info: https://kubernetes.io/docs/concepts/services-networking/service/ - type: object - x-kubernetes-map-type: atomic - sessionAffinity: - description: |- - Supports "ClientIP" and "None". Used to maintain session affinity. - Enable client IP based session affinity. - Must be ClientIP or None. - Defaults to None. - More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - type: string - sessionAffinityConfig: - description: sessionAffinityConfig contains the configurations - of session affinity. - properties: - clientIP: - description: clientIP contains the configurations of Client - IP based session affinity. - properties: - timeoutSeconds: - description: |- - timeoutSeconds specifies the seconds of ClientIP type session sticky time. - The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". - Default value is 10800(for 3 hours). - format: int32 - type: integer - type: object - type: object - trafficDistribution: - description: |- - TrafficDistribution offers a way to express preferences for how traffic is - distributed to Service endpoints. Implementations can use this field as a - hint, but are not required to guarantee strict adherence. If the field is - not set, the implementation will apply its default routing strategy. If set - to "PreferClose", implementations should prioritize endpoints that are - topologically close (e.g., same zone). - This is a beta field and requires enabling ServiceTrafficDistribution feature. - type: string - type: - description: |- - type determines how the Service is exposed. Defaults to ClusterIP. Valid - options are ExternalName, ClusterIP, NodePort, and LoadBalancer. - "ClusterIP" allocates a cluster-internal IP address for load-balancing - to endpoints. Endpoints are determined by the selector or if that is not - specified, by manual construction of an Endpoints object or - EndpointSlice objects. If clusterIP is "None", no virtual IP is - allocated and the endpoints are published as a set of endpoints rather - than a virtual IP. - "NodePort" builds on ClusterIP and allocates a port on every node which - routes to the same endpoints as the clusterIP. - "LoadBalancer" builds on NodePort and creates an external load-balancer - (if supported in the current cloud) which routes to the same endpoints - as the clusterIP. - "ExternalName" aliases this service to the specified externalName. - Several other fields do not apply to ExternalName services. - More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types - type: string - type: object - type: object - template: - description: The template of the Pod to be created - properties: - metadata: - description: |- - Standard object's metadata. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations is an unstructured key value map stored with a resource that may be - set by external tools to store and retrieve arbitrary metadata. They are not - queryable and should be preserved when modifying objects. - More info: http://kubernetes.io/docs/user-guide/annotations - type: object - labels: - additionalProperties: - type: string - description: |- - Map of string keys and values that can be used to organize and categorize - (scope and select) objects. May match selectors of replication controllers - and services. - More info: http://kubernetes.io/docs/user-guide/labels - type: object - name: - description: The name of the resource. Only supported for - certain types - type: string - type: object - spec: - description: |- - Specification of the desired behavior of the pod. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - properties: - activeDeadlineSeconds: - description: |- - Optional duration in seconds the pod may be active on the node relative to - StartTime before the system will actively try to mark it failed and kill associated containers. - Value must be a positive integer. - format: int64 - type: integer - affinity: - description: If specified, the pod's scheduling constraints - properties: - nodeAffinity: - description: Describes node affinity scheduling rules - for the pod. - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node matches the corresponding matchExpressions; the - node(s) with the highest sum are the most preferred. - items: - description: |- - An empty preferred scheduling term matches all objects with implicit weight 0 - (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - properties: - preference: - description: A node selector term, associated - with the corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key that the - selector applies to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key that the - selector applies to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - type: object - x-kubernetes-map-type: atomic - weight: - description: Weight associated with matching - the corresponding nodeSelectorTerm, in the - range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - x-kubernetes-list-type: atomic - requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to an update), the system - may or may not try to eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector - terms. The terms are ORed. - items: - description: |- - A null or empty node selector term matches no objects. The requirements of - them are ANDed. - The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key that the - selector applies to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key that the - selector applies to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - type: object - x-kubernetes-map-type: atomic - type: array - x-kubernetes-list-type: atomic - required: - - nodeSelectorTerms - type: object - x-kubernetes-map-type: atomic - type: object - podAffinity: - description: Describes pod affinity scheduling rules (e.g. - co-locate this pod in the same node, zone, etc. as some - other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred - node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, - associated with the corresponding weight. - properties: - labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The - requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label - key that the selector applies - to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The - requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label - key that the selector applies - to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - x-kubernetes-list-type: atomic - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: |- - weight associated with matching the corresponding podAffinityTerm, - in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - x-kubernetes-list-type: atomic - requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to a pod label update), the - system may or may not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes corresponding to each - podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: |- - Defines a set of pods (namely those matching the labelSelector - relative to the given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node whose value of - the label with key matches that of any node on which - a pod of the set of pods is running - properties: - labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - x-kubernetes-list-type: atomic - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - x-kubernetes-list-type: atomic - type: object - podAntiAffinity: - description: Describes pod anti-affinity scheduling rules - (e.g. avoid putting this pod in the same node, zone, - etc. as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the anti-affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred - node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, - associated with the corresponding weight. - properties: - labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The - requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label - key that the selector applies - to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The - requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label - key that the selector applies - to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - x-kubernetes-list-type: atomic - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: |- - weight associated with matching the corresponding podAffinityTerm, - in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - x-kubernetes-list-type: atomic - requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the anti-affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the anti-affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to a pod label update), the - system may or may not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes corresponding to each - podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: |- - Defines a set of pods (namely those matching the labelSelector - relative to the given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node whose value of - the label with key matches that of any node on which - a pod of the set of pods is running - properties: - labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - x-kubernetes-list-type: atomic - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - x-kubernetes-list-type: atomic - type: object - type: object - automountServiceAccountToken: - description: AutomountServiceAccountToken indicates whether - a service account token should be automatically mounted. - type: boolean - containers: - description: |- - List of containers belonging to the pod. - Containers cannot currently be added or removed. - There must be at least one container in a Pod. - Cannot be updated. - items: - description: A single application container that you want - to run within a pod. - properties: - args: - description: |- - Arguments to the entrypoint. - The container image's CMD is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless - of whether the variable exists or not. Cannot be updated. - More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - items: - type: string - type: array - x-kubernetes-list-type: atomic - command: - description: |- - Entrypoint array. Not executed within a shell. - The container image's ENTRYPOINT is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless - of whether the variable exists or not. Cannot be updated. - More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - items: - type: string - type: array - x-kubernetes-list-type: atomic - env: - description: |- - List of environment variables to set in the container. - Cannot be updated. - items: - description: EnvVar represents an environment variable - present in a Container. - properties: - name: - description: Name of the environment variable. - Must be a C_IDENTIFIER. - type: string - value: - description: |- - Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". - type: string - valueFrom: - description: Source for the environment variable's - value. Cannot be used if value is not empty. - properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the ConfigMap - or its key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - fieldRef: - description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. - properties: - apiVersion: - description: Version of the schema the - FieldPath is written in terms of, defaults - to "v1". - type: string - fieldPath: - description: Path of the field to select - in the specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - properties: - containerName: - description: 'Container name: required - for volumes, optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format - of the exposed resources, defaults to - "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - secretKeyRef: - description: Selects a key of a secret in - the pod's namespace - properties: - key: - description: The key of the secret to - select from. Must be a valid secret - key. - type: string - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the Secret - or its key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - envFrom: - description: |- - List of sources to populate environment variables in the container. - The keys defined within a source must be a C_IDENTIFIER. All invalid keys - will be reported as an event when the container is starting. When a key exists in multiple - sources, the value associated with the last source will take precedence. - Values defined by an Env with a duplicate key will take precedence. - Cannot be updated. - items: - description: EnvFromSource represents the source of - a set of ConfigMaps - properties: - configMapRef: - description: The ConfigMap to select from - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the ConfigMap - must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - prefix: - description: An optional identifier to prepend - to each key in the ConfigMap. Must be a C_IDENTIFIER. - type: string - secretRef: - description: The Secret to select from - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the Secret must - be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - type: object - type: array - x-kubernetes-list-type: atomic - image: - description: |- - Container image name. - More info: https://kubernetes.io/docs/concepts/containers/images - This field is optional to allow higher level config management to default or override - container images in workload controllers like Deployments and StatefulSets. - type: string - imagePullPolicy: - description: |- - Image pull policy. - One of Always, Never, IfNotPresent. - Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/containers/images#updating-images - type: string - lifecycle: - description: |- - Actions that the management system should take in response to container lifecycle events. - Cannot be updated. - properties: - postStart: - description: |- - PostStart is called immediately after a container is created. If the handler fails, - the container is terminated and restarted according to its restart policy. - Other management of the container blocks until the hook completes. - More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - properties: - exec: - description: Exec specifies a command to execute - in the container. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - x-kubernetes-list-type: atomic - type: object - httpGet: - description: HTTPGet specifies an HTTP GET request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the - request. HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - x-kubernetes-list-type: atomic - path: - description: Path to access on the HTTP - server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - sleep: - description: Sleep represents a duration that - the container should sleep. - properties: - seconds: - description: Seconds is the number of seconds - to sleep. - format: int64 - type: integer - required: - - seconds - type: object - tcpSocket: - description: |- - Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for backward compatibility. There is no validation of this field and - lifecycle hooks will fail at runtime when it is specified. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - description: |- - PreStop is called immediately before a container is terminated due to an - API request or management event such as liveness/startup probe failure, - preemption, resource contention, etc. The handler is not called if the - container crashes or exits. The Pod's termination grace period countdown begins before the - PreStop hook is executed. Regardless of the outcome of the handler, the - container will eventually terminate within the Pod's termination grace - period (unless delayed by finalizers). Other management of the container blocks until the hook completes - or until the termination grace period is reached. - More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - properties: - exec: - description: Exec specifies a command to execute - in the container. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - x-kubernetes-list-type: atomic - type: object - httpGet: - description: HTTPGet specifies an HTTP GET request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the - request. HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - x-kubernetes-list-type: atomic - path: - description: Path to access on the HTTP - server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - sleep: - description: Sleep represents a duration that - the container should sleep. - properties: - seconds: - description: Seconds is the number of seconds - to sleep. - format: int64 - type: integer - required: - - seconds - type: object - tcpSocket: - description: |- - Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for backward compatibility. There is no validation of this field and - lifecycle hooks will fail at runtime when it is specified. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - description: |- - Periodic probe of container liveness. - Container will be restarted if the probe fails. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - properties: - exec: - description: Exec specifies a command to execute - in the container. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - x-kubernetes-list-type: atomic - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies a GRPC HealthCheckRequest. - properties: - port: - description: Port number of the gRPC service. - Number must be in the range 1 to 65535. - format: int32 - type: integer - service: - default: "" - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies an HTTP GET request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - x-kubernetes-list-type: atomic - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies a connection to - a TCP port. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - name: - description: |- - Name of the container specified as a DNS_LABEL. - Each container in a pod must have a unique name (DNS_LABEL). - Cannot be updated. - type: string - ports: - description: |- - List of ports to expose from the container. Not specifying a port here - DOES NOT prevent that port from being exposed. Any port which is - listening on the default "0.0.0.0" address inside a container will be - accessible from the network. - Modifying this array with strategic merge patch may corrupt the data. - For more information See https://github.com/kubernetes/kubernetes/issues/108255. - Cannot be updated. - items: - description: ContainerPort represents a network port - in a single container. - properties: - containerPort: - description: |- - Number of port to expose on the pod's IP address. - This must be a valid port number, 0 < x < 65536. - format: int32 - type: integer - hostIP: - description: What host IP to bind the external - port to. - type: string - hostPort: - description: |- - Number of port to expose on the host. - If specified, this must be a valid port number, 0 < x < 65536. - If HostNetwork is specified, this must match ContainerPort. - Most containers do not need this. - format: int32 - type: integer - name: - description: |- - If specified, this must be an IANA_SVC_NAME and unique within the pod. Each - named port in a pod must have a unique name. Name for the port that can be - referred to by services. - type: string - protocol: - default: TCP - description: |- - Protocol for port. Must be UDP, TCP, or SCTP. - Defaults to "TCP". - type: string - required: - - containerPort - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - description: |- - Periodic probe of container service readiness. - Container will be removed from service endpoints if the probe fails. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - properties: - exec: - description: Exec specifies a command to execute - in the container. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - x-kubernetes-list-type: atomic - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies a GRPC HealthCheckRequest. - properties: - port: - description: Port number of the gRPC service. - Number must be in the range 1 to 65535. - format: int32 - type: integer - service: - default: "" - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies an HTTP GET request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - x-kubernetes-list-type: atomic - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies a connection to - a TCP port. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - resizePolicy: - description: Resources resize policy for the container. - items: - description: ContainerResizePolicy represents resource - resize policy for the container. - properties: - resourceName: - description: |- - Name of the resource to which this resource resize policy applies. - Supported values: cpu, memory. - type: string - restartPolicy: - description: |- - Restart policy to apply when specified resource is resized. - If not specified, it defaults to NotRequired. - type: string - required: - - resourceName - - restartPolicy - type: object - type: array - x-kubernetes-list-type: atomic - resources: - description: |- - Compute Resources required by this container. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - properties: - claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - This field is immutable. It can only be set for containers. - items: - description: ResourceClaim references one entry - in PodSpec.ResourceClaims. - properties: - name: - description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. - type: string - request: - description: |- - Request is the name chosen for a request in the referenced claim. - If empty, everything from the claim is made available, otherwise - only the result of this request. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - restartPolicy: - description: |- - RestartPolicy defines the restart behavior of individual containers in a pod. - This field may only be set for init containers, and the only allowed value is "Always". - For non-init containers or when this field is not specified, - the restart behavior is defined by the Pod's restart policy and the container type. - Setting the RestartPolicy as "Always" for the init container will have the following effect: - this init container will be continually restarted on - exit until all regular containers have terminated. Once all regular - containers have completed, all init containers with restartPolicy "Always" - will be shut down. This lifecycle differs from normal init containers and - is often referred to as a "sidecar" container. Although this init - container still starts in the init container sequence, it does not wait - for the container to complete before proceeding to the next init - container. Instead, the next init container starts immediately after this - init container is started, or after any startupProbe has successfully - completed. - type: string - securityContext: - description: |- - SecurityContext defines the security options the container should be run with. - If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. - More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - properties: - allowPrivilegeEscalation: - description: |- - AllowPrivilegeEscalation controls whether a process can gain more - privileges than its parent process. This bool directly controls if - the no_new_privs flag will be set on the container process. - AllowPrivilegeEscalation is true always when the container is: - 1) run as Privileged - 2) has CAP_SYS_ADMIN - Note that this field cannot be set when spec.os.name is windows. - type: boolean - appArmorProfile: - description: |- - appArmorProfile is the AppArmor options to use by this container. If set, this profile - overrides the pod's appArmorProfile. - Note that this field cannot be set when spec.os.name is windows. - properties: - localhostProfile: - description: |- - localhostProfile indicates a profile loaded on the node that should be used. - The profile must be preconfigured on the node to work. - Must match the loaded name of the profile. - Must be set if and only if type is "Localhost". - type: string - type: - description: |- - type indicates which kind of AppArmor profile will be applied. - Valid options are: - Localhost - a profile pre-loaded on the node. - RuntimeDefault - the container runtime's default profile. - Unconfined - no AppArmor enforcement. - type: string - required: - - type - type: object - capabilities: - description: |- - The capabilities to add/drop when running containers. - Defaults to the default set of capabilities granted by the container runtime. - Note that this field cannot be set when spec.os.name is windows. - properties: - add: - description: Added capabilities - items: - description: Capability represent POSIX capabilities - type - type: string - type: array - x-kubernetes-list-type: atomic - drop: - description: Removed capabilities - items: - description: Capability represent POSIX capabilities - type - type: string - type: array - x-kubernetes-list-type: atomic - type: object - privileged: - description: |- - Run container in privileged mode. - Processes in privileged containers are essentially equivalent to root on the host. - Defaults to false. - Note that this field cannot be set when spec.os.name is windows. - type: boolean - procMount: - description: |- - procMount denotes the type of proc mount to use for the containers. - The default value is Default which uses the container runtime defaults for - readonly paths and masked paths. - This requires the ProcMountType feature flag to be enabled. - Note that this field cannot be set when spec.os.name is windows. - type: string - readOnlyRootFilesystem: - description: |- - Whether this container has a read-only root filesystem. - Default is false. - Note that this field cannot be set when spec.os.name is windows. - type: boolean - runAsGroup: - description: |- - The GID to run the entrypoint of the container process. - Uses runtime default if unset. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - runAsNonRoot: - description: |- - Indicates that the container must run as a non-root user. - If true, the Kubelet will validate the image at runtime to ensure that it - does not run as UID 0 (root) and fail to start the container if it does. - If unset or false, no such validation will be performed. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: boolean - runAsUser: - description: |- - The UID to run the entrypoint of the container process. - Defaults to user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - seLinuxOptions: - description: |- - The SELinux context to be applied to the container. - If unspecified, the container runtime will allocate a random SELinux context for each - container. May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - properties: - level: - description: Level is SELinux level label that - applies to the container. - type: string - role: - description: Role is a SELinux role label that - applies to the container. - type: string - type: - description: Type is a SELinux type label that - applies to the container. - type: string - user: - description: User is a SELinux user label that - applies to the container. - type: string - type: object - seccompProfile: - description: |- - The seccomp options to use by this container. If seccomp options are - provided at both the pod & container level, the container options - override the pod options. - Note that this field cannot be set when spec.os.name is windows. - properties: - localhostProfile: - description: |- - localhostProfile indicates a profile defined in a file on the node should be used. - The profile must be preconfigured on the node to work. - Must be a descending path, relative to the kubelet's configured seccomp profile location. - Must be set if type is "Localhost". Must NOT be set for any other type. - type: string - type: - description: |- - type indicates which kind of seccomp profile will be applied. - Valid options are: - - Localhost - a profile defined in a file on the node should be used. - RuntimeDefault - the container runtime default profile should be used. - Unconfined - no profile should be applied. - type: string - required: - - type - type: object - windowsOptions: - description: |- - The Windows specific settings applied to all containers. - If unspecified, the options from the PodSecurityContext will be used. - If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is linux. - properties: - gmsaCredentialSpec: - description: |- - GMSACredentialSpec is where the GMSA admission webhook - (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the - GMSA credential spec named by the GMSACredentialSpecName field. - type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name - of the GMSA credential spec to use. - type: string - hostProcess: - description: |- - HostProcess determines if a container should be run as a 'Host Process' container. - All of a Pod's containers must have the same effective HostProcess value - (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). - In addition, if HostProcess is true then HostNetwork must also be set to true. - type: boolean - runAsUserName: - description: |- - The UserName in Windows to run the entrypoint of the container process. - Defaults to the user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: string - type: object - type: object - startupProbe: - description: |- - StartupProbe indicates that the Pod has successfully initialized. - If specified, no other probes are executed until this completes successfully. - If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. - This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, - when it might take a long time to load data or warm a cache, than during steady-state operation. - This cannot be updated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - properties: - exec: - description: Exec specifies a command to execute - in the container. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - x-kubernetes-list-type: atomic - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies a GRPC HealthCheckRequest. - properties: - port: - description: Port number of the gRPC service. - Number must be in the range 1 to 65535. - format: int32 - type: integer - service: - default: "" - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies an HTTP GET request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - x-kubernetes-list-type: atomic - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies a connection to - a TCP port. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - stdin: - description: |- - Whether this container should allocate a buffer for stdin in the container runtime. If this - is not set, reads from stdin in the container will always result in EOF. - Default is false. - type: boolean - stdinOnce: - description: |- - Whether the container runtime should close the stdin channel after it has been opened by - a single attach. When stdin is true the stdin stream will remain open across multiple attach - sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the - first client attaches to stdin, and then remains open and accepts data until the client disconnects, - at which time stdin is closed and remains closed until the container is restarted. If this - flag is false, a container processes that reads from stdin will never receive an EOF. - Default is false - type: boolean - terminationMessagePath: - description: |- - Optional: Path at which the file to which the container's termination message - will be written is mounted into the container's filesystem. - Message written is intended to be brief final status, such as an assertion failure message. - Will be truncated by the node if greater than 4096 bytes. The total message length across - all containers will be limited to 12kb. - Defaults to /dev/termination-log. - Cannot be updated. - type: string - terminationMessagePolicy: - description: |- - Indicate how the termination message should be populated. File will use the contents of - terminationMessagePath to populate the container status message on both success and failure. - FallbackToLogsOnError will use the last chunk of container log output if the termination - message file is empty and the container exited with an error. - The log output is limited to 2048 bytes or 80 lines, whichever is smaller. - Defaults to File. - Cannot be updated. - type: string - tty: - description: |- - Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. - Default is false. - type: boolean - volumeDevices: - description: volumeDevices is the list of block devices - to be used by the container. - items: - description: volumeDevice describes a mapping of a - raw block device within a container. - properties: - devicePath: - description: devicePath is the path inside of - the container that the device will be mapped - to. - type: string - name: - description: name must match the name of a persistentVolumeClaim - in the pod - type: string - required: - - devicePath - - name - type: object - type: array - x-kubernetes-list-map-keys: - - devicePath - x-kubernetes-list-type: map - volumeMounts: - description: |- - Pod volumes to mount into the container's filesystem. - Cannot be updated. - items: - description: VolumeMount describes a mounting of a - Volume within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified - (which defaults to None). - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - recursiveReadOnly: - description: |- - RecursiveReadOnly specifies whether read-only mounts should be handled - recursively. - - If ReadOnly is false, this field has no meaning and must be unspecified. - - If ReadOnly is true, and this field is set to Disabled, the mount is not made - recursively read-only. If this field is set to IfPossible, the mount is made - recursively read-only, if it is supported by the container runtime. If this - field is set to Enabled, the mount is made recursively read-only if it is - supported by the container runtime, otherwise the pod will not be started and - an error will be generated to indicate the reason. - - If this field is set to IfPossible or Enabled, MountPropagation must be set to - None (or be unspecified, which defaults to None). - - If this field is not specified, it is treated as an equivalent of Disabled. - type: string - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array - x-kubernetes-list-map-keys: - - mountPath - x-kubernetes-list-type: map - workingDir: - description: |- - Container's working directory. - If not specified, the container runtime's default will be used, which - might be configured in the container image. - Cannot be updated. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - dnsConfig: - description: |- - Specifies the DNS parameters of a pod. - Parameters specified here will be merged to the generated DNS - configuration based on DNSPolicy. - properties: - nameservers: - description: |- - A list of DNS name server IP addresses. - This will be appended to the base nameservers generated from DNSPolicy. - Duplicated nameservers will be removed. - items: - type: string - type: array - x-kubernetes-list-type: atomic - options: - description: |- - A list of DNS resolver options. - This will be merged with the base options generated from DNSPolicy. - Duplicated entries will be removed. Resolution options given in Options - will override those that appear in the base DNSPolicy. - items: - description: PodDNSConfigOption defines DNS resolver - options of a pod. - properties: - name: - description: |- - Name is this DNS resolver option's name. - Required. - type: string - value: - description: Value is this DNS resolver option's - value. - type: string - type: object - type: array - x-kubernetes-list-type: atomic - searches: - description: |- - A list of DNS search domains for host-name lookup. - This will be appended to the base search paths generated from DNSPolicy. - Duplicated search paths will be removed. - items: - type: string - type: array - x-kubernetes-list-type: atomic - type: object - dnsPolicy: - description: |- - Set DNS policy for the pod. - Defaults to "ClusterFirst". - Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. - DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. - To have DNS options set along with hostNetwork, you have to specify DNS policy - explicitly to 'ClusterFirstWithHostNet'. - type: string - enableServiceLinks: - description: |- - EnableServiceLinks indicates whether information about services should be injected into pod's - environment variables, matching the syntax of Docker links. - Optional: Defaults to true. - type: boolean - ephemeralContainers: - description: |- - List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing - pod to perform user-initiated actions such as debugging. This list cannot be specified when - creating a pod, and it cannot be modified by updating the pod spec. In order to add an - ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. - items: - description: |- - An EphemeralContainer is a temporary container that you may add to an existing Pod for - user-initiated activities such as debugging. Ephemeral containers have no resource or - scheduling guarantees, and they will not be restarted when they exit or when a Pod is - removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the - Pod to exceed its resource allocation. - - To add an ephemeral container, use the ephemeralcontainers subresource of an existing - Pod. Ephemeral containers may not be removed or restarted. - properties: - args: - description: |- - Arguments to the entrypoint. - The image's CMD is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless - of whether the variable exists or not. Cannot be updated. - More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - items: - type: string - type: array - x-kubernetes-list-type: atomic - command: - description: |- - Entrypoint array. Not executed within a shell. - The image's ENTRYPOINT is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless - of whether the variable exists or not. Cannot be updated. - More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - items: - type: string - type: array - x-kubernetes-list-type: atomic - env: - description: |- - List of environment variables to set in the container. - Cannot be updated. - items: - description: EnvVar represents an environment variable - present in a Container. - properties: - name: - description: Name of the environment variable. - Must be a C_IDENTIFIER. - type: string - value: - description: |- - Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". - type: string - valueFrom: - description: Source for the environment variable's - value. Cannot be used if value is not empty. - properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the ConfigMap - or its key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - fieldRef: - description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. - properties: - apiVersion: - description: Version of the schema the - FieldPath is written in terms of, defaults - to "v1". - type: string - fieldPath: - description: Path of the field to select - in the specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - properties: - containerName: - description: 'Container name: required - for volumes, optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format - of the exposed resources, defaults to - "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - secretKeyRef: - description: Selects a key of a secret in - the pod's namespace - properties: - key: - description: The key of the secret to - select from. Must be a valid secret - key. - type: string - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the Secret - or its key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - envFrom: - description: |- - List of sources to populate environment variables in the container. - The keys defined within a source must be a C_IDENTIFIER. All invalid keys - will be reported as an event when the container is starting. When a key exists in multiple - sources, the value associated with the last source will take precedence. - Values defined by an Env with a duplicate key will take precedence. - Cannot be updated. - items: - description: EnvFromSource represents the source of - a set of ConfigMaps - properties: - configMapRef: - description: The ConfigMap to select from - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the ConfigMap - must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - prefix: - description: An optional identifier to prepend - to each key in the ConfigMap. Must be a C_IDENTIFIER. - type: string - secretRef: - description: The Secret to select from - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the Secret must - be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - type: object - type: array - x-kubernetes-list-type: atomic - image: - description: |- - Container image name. - More info: https://kubernetes.io/docs/concepts/containers/images - type: string - imagePullPolicy: - description: |- - Image pull policy. - One of Always, Never, IfNotPresent. - Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/containers/images#updating-images - type: string - lifecycle: - description: Lifecycle is not allowed for ephemeral - containers. - properties: - postStart: - description: |- - PostStart is called immediately after a container is created. If the handler fails, - the container is terminated and restarted according to its restart policy. - Other management of the container blocks until the hook completes. - More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - properties: - exec: - description: Exec specifies a command to execute - in the container. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - x-kubernetes-list-type: atomic - type: object - httpGet: - description: HTTPGet specifies an HTTP GET request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the - request. HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - x-kubernetes-list-type: atomic - path: - description: Path to access on the HTTP - server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - sleep: - description: Sleep represents a duration that - the container should sleep. - properties: - seconds: - description: Seconds is the number of seconds - to sleep. - format: int64 - type: integer - required: - - seconds - type: object - tcpSocket: - description: |- - Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for backward compatibility. There is no validation of this field and - lifecycle hooks will fail at runtime when it is specified. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - description: |- - PreStop is called immediately before a container is terminated due to an - API request or management event such as liveness/startup probe failure, - preemption, resource contention, etc. The handler is not called if the - container crashes or exits. The Pod's termination grace period countdown begins before the - PreStop hook is executed. Regardless of the outcome of the handler, the - container will eventually terminate within the Pod's termination grace - period (unless delayed by finalizers). Other management of the container blocks until the hook completes - or until the termination grace period is reached. - More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - properties: - exec: - description: Exec specifies a command to execute - in the container. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - x-kubernetes-list-type: atomic - type: object - httpGet: - description: HTTPGet specifies an HTTP GET request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the - request. HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - x-kubernetes-list-type: atomic - path: - description: Path to access on the HTTP - server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - sleep: - description: Sleep represents a duration that - the container should sleep. - properties: - seconds: - description: Seconds is the number of seconds - to sleep. - format: int64 - type: integer - required: - - seconds - type: object - tcpSocket: - description: |- - Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for backward compatibility. There is no validation of this field and - lifecycle hooks will fail at runtime when it is specified. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - description: Probes are not allowed for ephemeral containers. - properties: - exec: - description: Exec specifies a command to execute - in the container. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - x-kubernetes-list-type: atomic - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies a GRPC HealthCheckRequest. - properties: - port: - description: Port number of the gRPC service. - Number must be in the range 1 to 65535. - format: int32 - type: integer - service: - default: "" - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies an HTTP GET request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - x-kubernetes-list-type: atomic - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies a connection to - a TCP port. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - name: - description: |- - Name of the ephemeral container specified as a DNS_LABEL. - This name must be unique among all containers, init containers and ephemeral containers. - type: string - ports: - description: Ports are not allowed for ephemeral containers. - items: - description: ContainerPort represents a network port - in a single container. - properties: - containerPort: - description: |- - Number of port to expose on the pod's IP address. - This must be a valid port number, 0 < x < 65536. - format: int32 - type: integer - hostIP: - description: What host IP to bind the external - port to. - type: string - hostPort: - description: |- - Number of port to expose on the host. - If specified, this must be a valid port number, 0 < x < 65536. - If HostNetwork is specified, this must match ContainerPort. - Most containers do not need this. - format: int32 - type: integer - name: - description: |- - If specified, this must be an IANA_SVC_NAME and unique within the pod. Each - named port in a pod must have a unique name. Name for the port that can be - referred to by services. - type: string - protocol: - default: TCP - description: |- - Protocol for port. Must be UDP, TCP, or SCTP. - Defaults to "TCP". - type: string - required: - - containerPort - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - description: Probes are not allowed for ephemeral containers. - properties: - exec: - description: Exec specifies a command to execute - in the container. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - x-kubernetes-list-type: atomic - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies a GRPC HealthCheckRequest. - properties: - port: - description: Port number of the gRPC service. - Number must be in the range 1 to 65535. - format: int32 - type: integer - service: - default: "" - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies an HTTP GET request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - x-kubernetes-list-type: atomic - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies a connection to - a TCP port. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - resizePolicy: - description: Resources resize policy for the container. - items: - description: ContainerResizePolicy represents resource - resize policy for the container. - properties: - resourceName: - description: |- - Name of the resource to which this resource resize policy applies. - Supported values: cpu, memory. - type: string - restartPolicy: - description: |- - Restart policy to apply when specified resource is resized. - If not specified, it defaults to NotRequired. - type: string - required: - - resourceName - - restartPolicy - type: object - type: array - x-kubernetes-list-type: atomic - resources: - description: |- - Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources - already allocated to the pod. - properties: - claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - This field is immutable. It can only be set for containers. - items: - description: ResourceClaim references one entry - in PodSpec.ResourceClaims. - properties: - name: - description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. - type: string - request: - description: |- - Request is the name chosen for a request in the referenced claim. - If empty, everything from the claim is made available, otherwise - only the result of this request. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - restartPolicy: - description: |- - Restart policy for the container to manage the restart behavior of each - container within a pod. - This may only be set for init containers. You cannot set this field on - ephemeral containers. - type: string - securityContext: - description: |- - Optional: SecurityContext defines the security options the ephemeral container should be run with. - If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. - properties: - allowPrivilegeEscalation: - description: |- - AllowPrivilegeEscalation controls whether a process can gain more - privileges than its parent process. This bool directly controls if - the no_new_privs flag will be set on the container process. - AllowPrivilegeEscalation is true always when the container is: - 1) run as Privileged - 2) has CAP_SYS_ADMIN - Note that this field cannot be set when spec.os.name is windows. - type: boolean - appArmorProfile: - description: |- - appArmorProfile is the AppArmor options to use by this container. If set, this profile - overrides the pod's appArmorProfile. - Note that this field cannot be set when spec.os.name is windows. - properties: - localhostProfile: - description: |- - localhostProfile indicates a profile loaded on the node that should be used. - The profile must be preconfigured on the node to work. - Must match the loaded name of the profile. - Must be set if and only if type is "Localhost". - type: string - type: - description: |- - type indicates which kind of AppArmor profile will be applied. - Valid options are: - Localhost - a profile pre-loaded on the node. - RuntimeDefault - the container runtime's default profile. - Unconfined - no AppArmor enforcement. - type: string - required: - - type - type: object - capabilities: - description: |- - The capabilities to add/drop when running containers. - Defaults to the default set of capabilities granted by the container runtime. - Note that this field cannot be set when spec.os.name is windows. - properties: - add: - description: Added capabilities - items: - description: Capability represent POSIX capabilities - type - type: string - type: array - x-kubernetes-list-type: atomic - drop: - description: Removed capabilities - items: - description: Capability represent POSIX capabilities - type - type: string - type: array - x-kubernetes-list-type: atomic - type: object - privileged: - description: |- - Run container in privileged mode. - Processes in privileged containers are essentially equivalent to root on the host. - Defaults to false. - Note that this field cannot be set when spec.os.name is windows. - type: boolean - procMount: - description: |- - procMount denotes the type of proc mount to use for the containers. - The default value is Default which uses the container runtime defaults for - readonly paths and masked paths. - This requires the ProcMountType feature flag to be enabled. - Note that this field cannot be set when spec.os.name is windows. - type: string - readOnlyRootFilesystem: - description: |- - Whether this container has a read-only root filesystem. - Default is false. - Note that this field cannot be set when spec.os.name is windows. - type: boolean - runAsGroup: - description: |- - The GID to run the entrypoint of the container process. - Uses runtime default if unset. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - runAsNonRoot: - description: |- - Indicates that the container must run as a non-root user. - If true, the Kubelet will validate the image at runtime to ensure that it - does not run as UID 0 (root) and fail to start the container if it does. - If unset or false, no such validation will be performed. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: boolean - runAsUser: - description: |- - The UID to run the entrypoint of the container process. - Defaults to user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - seLinuxOptions: - description: |- - The SELinux context to be applied to the container. - If unspecified, the container runtime will allocate a random SELinux context for each - container. May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - properties: - level: - description: Level is SELinux level label that - applies to the container. - type: string - role: - description: Role is a SELinux role label that - applies to the container. - type: string - type: - description: Type is a SELinux type label that - applies to the container. - type: string - user: - description: User is a SELinux user label that - applies to the container. - type: string - type: object - seccompProfile: - description: |- - The seccomp options to use by this container. If seccomp options are - provided at both the pod & container level, the container options - override the pod options. - Note that this field cannot be set when spec.os.name is windows. - properties: - localhostProfile: - description: |- - localhostProfile indicates a profile defined in a file on the node should be used. - The profile must be preconfigured on the node to work. - Must be a descending path, relative to the kubelet's configured seccomp profile location. - Must be set if type is "Localhost". Must NOT be set for any other type. - type: string - type: - description: |- - type indicates which kind of seccomp profile will be applied. - Valid options are: - - Localhost - a profile defined in a file on the node should be used. - RuntimeDefault - the container runtime default profile should be used. - Unconfined - no profile should be applied. - type: string - required: - - type - type: object - windowsOptions: - description: |- - The Windows specific settings applied to all containers. - If unspecified, the options from the PodSecurityContext will be used. - If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is linux. - properties: - gmsaCredentialSpec: - description: |- - GMSACredentialSpec is where the GMSA admission webhook - (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the - GMSA credential spec named by the GMSACredentialSpecName field. - type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name - of the GMSA credential spec to use. - type: string - hostProcess: - description: |- - HostProcess determines if a container should be run as a 'Host Process' container. - All of a Pod's containers must have the same effective HostProcess value - (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). - In addition, if HostProcess is true then HostNetwork must also be set to true. - type: boolean - runAsUserName: - description: |- - The UserName in Windows to run the entrypoint of the container process. - Defaults to the user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: string - type: object - type: object - startupProbe: - description: Probes are not allowed for ephemeral containers. - properties: - exec: - description: Exec specifies a command to execute - in the container. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - x-kubernetes-list-type: atomic - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies a GRPC HealthCheckRequest. - properties: - port: - description: Port number of the gRPC service. - Number must be in the range 1 to 65535. - format: int32 - type: integer - service: - default: "" - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies an HTTP GET request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - x-kubernetes-list-type: atomic - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies a connection to - a TCP port. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - stdin: - description: |- - Whether this container should allocate a buffer for stdin in the container runtime. If this - is not set, reads from stdin in the container will always result in EOF. - Default is false. - type: boolean - stdinOnce: - description: |- - Whether the container runtime should close the stdin channel after it has been opened by - a single attach. When stdin is true the stdin stream will remain open across multiple attach - sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the - first client attaches to stdin, and then remains open and accepts data until the client disconnects, - at which time stdin is closed and remains closed until the container is restarted. If this - flag is false, a container processes that reads from stdin will never receive an EOF. - Default is false - type: boolean - targetContainerName: - description: |- - If set, the name of the container from PodSpec that this ephemeral container targets. - The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. - If not set then the ephemeral container uses the namespaces configured in the Pod spec. - - The container runtime must implement support for this feature. If the runtime does not - support namespace targeting then the result of setting this field is undefined. - type: string - terminationMessagePath: - description: |- - Optional: Path at which the file to which the container's termination message - will be written is mounted into the container's filesystem. - Message written is intended to be brief final status, such as an assertion failure message. - Will be truncated by the node if greater than 4096 bytes. The total message length across - all containers will be limited to 12kb. - Defaults to /dev/termination-log. - Cannot be updated. - type: string - terminationMessagePolicy: - description: |- - Indicate how the termination message should be populated. File will use the contents of - terminationMessagePath to populate the container status message on both success and failure. - FallbackToLogsOnError will use the last chunk of container log output if the termination - message file is empty and the container exited with an error. - The log output is limited to 2048 bytes or 80 lines, whichever is smaller. - Defaults to File. - Cannot be updated. - type: string - tty: - description: |- - Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. - Default is false. - type: boolean - volumeDevices: - description: volumeDevices is the list of block devices - to be used by the container. - items: - description: volumeDevice describes a mapping of a - raw block device within a container. - properties: - devicePath: - description: devicePath is the path inside of - the container that the device will be mapped - to. - type: string - name: - description: name must match the name of a persistentVolumeClaim - in the pod - type: string - required: - - devicePath - - name - type: object - type: array - x-kubernetes-list-map-keys: - - devicePath - x-kubernetes-list-type: map - volumeMounts: - description: |- - Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. - Cannot be updated. - items: - description: VolumeMount describes a mounting of a - Volume within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified - (which defaults to None). - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - recursiveReadOnly: - description: |- - RecursiveReadOnly specifies whether read-only mounts should be handled - recursively. - - If ReadOnly is false, this field has no meaning and must be unspecified. - - If ReadOnly is true, and this field is set to Disabled, the mount is not made - recursively read-only. If this field is set to IfPossible, the mount is made - recursively read-only, if it is supported by the container runtime. If this - field is set to Enabled, the mount is made recursively read-only if it is - supported by the container runtime, otherwise the pod will not be started and - an error will be generated to indicate the reason. - - If this field is set to IfPossible or Enabled, MountPropagation must be set to - None (or be unspecified, which defaults to None). - - If this field is not specified, it is treated as an equivalent of Disabled. - type: string - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array - x-kubernetes-list-map-keys: - - mountPath - x-kubernetes-list-type: map - workingDir: - description: |- - Container's working directory. - If not specified, the container runtime's default will be used, which - might be configured in the container image. - Cannot be updated. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - hostAliases: - description: |- - HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts - file if specified. - items: - description: |- - HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the - pod's hosts file. - properties: - hostnames: - description: Hostnames for the above IP address. - items: - type: string - type: array - x-kubernetes-list-type: atomic - ip: - description: IP address of the host file entry. - type: string - required: - - ip - type: object - type: array - x-kubernetes-list-map-keys: - - ip - x-kubernetes-list-type: map - hostIPC: - description: |- - Use the host's ipc namespace. - Optional: Default to false. - type: boolean - hostNetwork: - description: |- - Host networking requested for this pod. Use the host's network namespace. - If this option is set, the ports that will be used must be specified. - Default to false. - type: boolean - hostPID: - description: |- - Use the host's pid namespace. - Optional: Default to false. - type: boolean - hostUsers: - description: |- - Use the host's user namespace. - Optional: Default to true. - If set to true or not present, the pod will be run in the host user namespace, useful - for when the pod needs a feature only available to the host user namespace, such as - loading a kernel module with CAP_SYS_MODULE. - When set to false, a new userns is created for the pod. Setting false is useful for - mitigating container breakout vulnerabilities even allowing users to run their - containers as root without actually having root privileges on the host. - This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. - type: boolean - hostname: - description: |- - Specifies the hostname of the Pod - If not specified, the pod's hostname will be set to a system-defined value. - type: string - imagePullSecrets: - description: |- - ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. - If specified, these secrets will be passed to individual puller implementations for them to use. - More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod - items: - description: |- - LocalObjectReference contains enough information to let you locate the - referenced object inside the same namespace. - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - initContainers: - description: |- - List of initialization containers belonging to the pod. - Init containers are executed in order prior to containers being started. If any - init container fails, the pod is considered to have failed and is handled according - to its restartPolicy. The name for an init container or normal container must be - unique among all containers. - Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. - The resourceRequirements of an init container are taken into account during scheduling - by finding the highest request/limit for each resource type, and then using the max of - of that value or the sum of the normal containers. Limits are applied to init containers - in a similar fashion. - Init containers cannot currently be added or removed. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ - items: - description: A single application container that you want - to run within a pod. - properties: - args: - description: |- - Arguments to the entrypoint. - The container image's CMD is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless - of whether the variable exists or not. Cannot be updated. - More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - items: - type: string - type: array - x-kubernetes-list-type: atomic - command: - description: |- - Entrypoint array. Not executed within a shell. - The container image's ENTRYPOINT is used if this is not provided. - Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless - of whether the variable exists or not. Cannot be updated. - More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - items: - type: string - type: array - x-kubernetes-list-type: atomic - env: - description: |- - List of environment variables to set in the container. - Cannot be updated. - items: - description: EnvVar represents an environment variable - present in a Container. - properties: - name: - description: Name of the environment variable. - Must be a C_IDENTIFIER. - type: string - value: - description: |- - Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". - type: string - valueFrom: - description: Source for the environment variable's - value. Cannot be used if value is not empty. - properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the ConfigMap - or its key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - fieldRef: - description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. - properties: - apiVersion: - description: Version of the schema the - FieldPath is written in terms of, defaults - to "v1". - type: string - fieldPath: - description: Path of the field to select - in the specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - properties: - containerName: - description: 'Container name: required - for volumes, optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format - of the exposed resources, defaults to - "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - secretKeyRef: - description: Selects a key of a secret in - the pod's namespace - properties: - key: - description: The key of the secret to - select from. Must be a valid secret - key. - type: string - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the Secret - or its key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - envFrom: - description: |- - List of sources to populate environment variables in the container. - The keys defined within a source must be a C_IDENTIFIER. All invalid keys - will be reported as an event when the container is starting. When a key exists in multiple - sources, the value associated with the last source will take precedence. - Values defined by an Env with a duplicate key will take precedence. - Cannot be updated. - items: - description: EnvFromSource represents the source of - a set of ConfigMaps - properties: - configMapRef: - description: The ConfigMap to select from - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the ConfigMap - must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - prefix: - description: An optional identifier to prepend - to each key in the ConfigMap. Must be a C_IDENTIFIER. - type: string - secretRef: - description: The Secret to select from - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the Secret must - be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - type: object - type: array - x-kubernetes-list-type: atomic - image: - description: |- - Container image name. - More info: https://kubernetes.io/docs/concepts/containers/images - This field is optional to allow higher level config management to default or override - container images in workload controllers like Deployments and StatefulSets. - type: string - imagePullPolicy: - description: |- - Image pull policy. - One of Always, Never, IfNotPresent. - Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/containers/images#updating-images - type: string - lifecycle: - description: |- - Actions that the management system should take in response to container lifecycle events. - Cannot be updated. - properties: - postStart: - description: |- - PostStart is called immediately after a container is created. If the handler fails, - the container is terminated and restarted according to its restart policy. - Other management of the container blocks until the hook completes. - More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - properties: - exec: - description: Exec specifies a command to execute - in the container. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - x-kubernetes-list-type: atomic - type: object - httpGet: - description: HTTPGet specifies an HTTP GET request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the - request. HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - x-kubernetes-list-type: atomic - path: - description: Path to access on the HTTP - server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - sleep: - description: Sleep represents a duration that - the container should sleep. - properties: - seconds: - description: Seconds is the number of seconds - to sleep. - format: int64 - type: integer - required: - - seconds - type: object - tcpSocket: - description: |- - Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for backward compatibility. There is no validation of this field and - lifecycle hooks will fail at runtime when it is specified. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - description: |- - PreStop is called immediately before a container is terminated due to an - API request or management event such as liveness/startup probe failure, - preemption, resource contention, etc. The handler is not called if the - container crashes or exits. The Pod's termination grace period countdown begins before the - PreStop hook is executed. Regardless of the outcome of the handler, the - container will eventually terminate within the Pod's termination grace - period (unless delayed by finalizers). Other management of the container blocks until the hook completes - or until the termination grace period is reached. - More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - properties: - exec: - description: Exec specifies a command to execute - in the container. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - x-kubernetes-list-type: atomic - type: object - httpGet: - description: HTTPGet specifies an HTTP GET request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the - request. HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - x-kubernetes-list-type: atomic - path: - description: Path to access on the HTTP - server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - sleep: - description: Sleep represents a duration that - the container should sleep. - properties: - seconds: - description: Seconds is the number of seconds - to sleep. - format: int64 - type: integer - required: - - seconds - type: object - tcpSocket: - description: |- - Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - for backward compatibility. There is no validation of this field and - lifecycle hooks will fail at runtime when it is specified. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - description: |- - Periodic probe of container liveness. - Container will be restarted if the probe fails. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - properties: - exec: - description: Exec specifies a command to execute - in the container. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - x-kubernetes-list-type: atomic - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies a GRPC HealthCheckRequest. - properties: - port: - description: Port number of the gRPC service. - Number must be in the range 1 to 65535. - format: int32 - type: integer - service: - default: "" - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies an HTTP GET request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - x-kubernetes-list-type: atomic - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies a connection to - a TCP port. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - name: - description: |- - Name of the container specified as a DNS_LABEL. - Each container in a pod must have a unique name (DNS_LABEL). - Cannot be updated. - type: string - ports: - description: |- - List of ports to expose from the container. Not specifying a port here - DOES NOT prevent that port from being exposed. Any port which is - listening on the default "0.0.0.0" address inside a container will be - accessible from the network. - Modifying this array with strategic merge patch may corrupt the data. - For more information See https://github.com/kubernetes/kubernetes/issues/108255. - Cannot be updated. - items: - description: ContainerPort represents a network port - in a single container. - properties: - containerPort: - description: |- - Number of port to expose on the pod's IP address. - This must be a valid port number, 0 < x < 65536. - format: int32 - type: integer - hostIP: - description: What host IP to bind the external - port to. - type: string - hostPort: - description: |- - Number of port to expose on the host. - If specified, this must be a valid port number, 0 < x < 65536. - If HostNetwork is specified, this must match ContainerPort. - Most containers do not need this. - format: int32 - type: integer - name: - description: |- - If specified, this must be an IANA_SVC_NAME and unique within the pod. Each - named port in a pod must have a unique name. Name for the port that can be - referred to by services. - type: string - protocol: - default: TCP - description: |- - Protocol for port. Must be UDP, TCP, or SCTP. - Defaults to "TCP". - type: string - required: - - containerPort - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - description: |- - Periodic probe of container service readiness. - Container will be removed from service endpoints if the probe fails. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - properties: - exec: - description: Exec specifies a command to execute - in the container. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - x-kubernetes-list-type: atomic - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies a GRPC HealthCheckRequest. - properties: - port: - description: Port number of the gRPC service. - Number must be in the range 1 to 65535. - format: int32 - type: integer - service: - default: "" - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies an HTTP GET request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - x-kubernetes-list-type: atomic - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies a connection to - a TCP port. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - resizePolicy: - description: Resources resize policy for the container. - items: - description: ContainerResizePolicy represents resource - resize policy for the container. - properties: - resourceName: - description: |- - Name of the resource to which this resource resize policy applies. - Supported values: cpu, memory. - type: string - restartPolicy: - description: |- - Restart policy to apply when specified resource is resized. - If not specified, it defaults to NotRequired. - type: string - required: - - resourceName - - restartPolicy - type: object - type: array - x-kubernetes-list-type: atomic - resources: - description: |- - Compute Resources required by this container. - Cannot be updated. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - properties: - claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - This field is immutable. It can only be set for containers. - items: - description: ResourceClaim references one entry - in PodSpec.ResourceClaims. - properties: - name: - description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. - type: string - request: - description: |- - Request is the name chosen for a request in the referenced claim. - If empty, everything from the claim is made available, otherwise - only the result of this request. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - restartPolicy: - description: |- - RestartPolicy defines the restart behavior of individual containers in a pod. - This field may only be set for init containers, and the only allowed value is "Always". - For non-init containers or when this field is not specified, - the restart behavior is defined by the Pod's restart policy and the container type. - Setting the RestartPolicy as "Always" for the init container will have the following effect: - this init container will be continually restarted on - exit until all regular containers have terminated. Once all regular - containers have completed, all init containers with restartPolicy "Always" - will be shut down. This lifecycle differs from normal init containers and - is often referred to as a "sidecar" container. Although this init - container still starts in the init container sequence, it does not wait - for the container to complete before proceeding to the next init - container. Instead, the next init container starts immediately after this - init container is started, or after any startupProbe has successfully - completed. - type: string - securityContext: - description: |- - SecurityContext defines the security options the container should be run with. - If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. - More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - properties: - allowPrivilegeEscalation: - description: |- - AllowPrivilegeEscalation controls whether a process can gain more - privileges than its parent process. This bool directly controls if - the no_new_privs flag will be set on the container process. - AllowPrivilegeEscalation is true always when the container is: - 1) run as Privileged - 2) has CAP_SYS_ADMIN - Note that this field cannot be set when spec.os.name is windows. - type: boolean - appArmorProfile: - description: |- - appArmorProfile is the AppArmor options to use by this container. If set, this profile - overrides the pod's appArmorProfile. - Note that this field cannot be set when spec.os.name is windows. - properties: - localhostProfile: - description: |- - localhostProfile indicates a profile loaded on the node that should be used. - The profile must be preconfigured on the node to work. - Must match the loaded name of the profile. - Must be set if and only if type is "Localhost". - type: string - type: - description: |- - type indicates which kind of AppArmor profile will be applied. - Valid options are: - Localhost - a profile pre-loaded on the node. - RuntimeDefault - the container runtime's default profile. - Unconfined - no AppArmor enforcement. - type: string - required: - - type - type: object - capabilities: - description: |- - The capabilities to add/drop when running containers. - Defaults to the default set of capabilities granted by the container runtime. - Note that this field cannot be set when spec.os.name is windows. - properties: - add: - description: Added capabilities - items: - description: Capability represent POSIX capabilities - type - type: string - type: array - x-kubernetes-list-type: atomic - drop: - description: Removed capabilities - items: - description: Capability represent POSIX capabilities - type - type: string - type: array - x-kubernetes-list-type: atomic - type: object - privileged: - description: |- - Run container in privileged mode. - Processes in privileged containers are essentially equivalent to root on the host. - Defaults to false. - Note that this field cannot be set when spec.os.name is windows. - type: boolean - procMount: - description: |- - procMount denotes the type of proc mount to use for the containers. - The default value is Default which uses the container runtime defaults for - readonly paths and masked paths. - This requires the ProcMountType feature flag to be enabled. - Note that this field cannot be set when spec.os.name is windows. - type: string - readOnlyRootFilesystem: - description: |- - Whether this container has a read-only root filesystem. - Default is false. - Note that this field cannot be set when spec.os.name is windows. - type: boolean - runAsGroup: - description: |- - The GID to run the entrypoint of the container process. - Uses runtime default if unset. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - runAsNonRoot: - description: |- - Indicates that the container must run as a non-root user. - If true, the Kubelet will validate the image at runtime to ensure that it - does not run as UID 0 (root) and fail to start the container if it does. - If unset or false, no such validation will be performed. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: boolean - runAsUser: - description: |- - The UID to run the entrypoint of the container process. - Defaults to user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - seLinuxOptions: - description: |- - The SELinux context to be applied to the container. - If unspecified, the container runtime will allocate a random SELinux context for each - container. May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - properties: - level: - description: Level is SELinux level label that - applies to the container. - type: string - role: - description: Role is a SELinux role label that - applies to the container. - type: string - type: - description: Type is a SELinux type label that - applies to the container. - type: string - user: - description: User is a SELinux user label that - applies to the container. - type: string - type: object - seccompProfile: - description: |- - The seccomp options to use by this container. If seccomp options are - provided at both the pod & container level, the container options - override the pod options. - Note that this field cannot be set when spec.os.name is windows. - properties: - localhostProfile: - description: |- - localhostProfile indicates a profile defined in a file on the node should be used. - The profile must be preconfigured on the node to work. - Must be a descending path, relative to the kubelet's configured seccomp profile location. - Must be set if type is "Localhost". Must NOT be set for any other type. - type: string - type: - description: |- - type indicates which kind of seccomp profile will be applied. - Valid options are: - - Localhost - a profile defined in a file on the node should be used. - RuntimeDefault - the container runtime default profile should be used. - Unconfined - no profile should be applied. - type: string - required: - - type - type: object - windowsOptions: - description: |- - The Windows specific settings applied to all containers. - If unspecified, the options from the PodSecurityContext will be used. - If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is linux. - properties: - gmsaCredentialSpec: - description: |- - GMSACredentialSpec is where the GMSA admission webhook - (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the - GMSA credential spec named by the GMSACredentialSpecName field. - type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name - of the GMSA credential spec to use. - type: string - hostProcess: - description: |- - HostProcess determines if a container should be run as a 'Host Process' container. - All of a Pod's containers must have the same effective HostProcess value - (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). - In addition, if HostProcess is true then HostNetwork must also be set to true. - type: boolean - runAsUserName: - description: |- - The UserName in Windows to run the entrypoint of the container process. - Defaults to the user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: string - type: object - type: object - startupProbe: - description: |- - StartupProbe indicates that the Pod has successfully initialized. - If specified, no other probes are executed until this completes successfully. - If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. - This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, - when it might take a long time to load data or warm a cache, than during steady-state operation. - This cannot be updated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - properties: - exec: - description: Exec specifies a command to execute - in the container. - properties: - command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - items: - type: string - type: array - x-kubernetes-list-type: atomic - type: object - failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - format: int32 - type: integer - grpc: - description: GRPC specifies a GRPC HealthCheckRequest. - properties: - port: - description: Port number of the gRPC service. - Number must be in the range 1 to 65535. - format: int32 - type: integer - service: - default: "" - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - If this is not specified, the default behavior is defined by gRPC. - type: string - required: - - port - type: object - httpGet: - description: HTTPGet specifies an HTTP GET request - to perform. - properties: - host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set in the request. - HTTP allows repeated headers. - items: - description: HTTPHeader describes a custom - header to be used in HTTP probes - properties: - name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. - type: string - value: - description: The header field value - type: string - required: - - name - - value - type: object - type: array - x-kubernetes-list-type: atomic - path: - description: Path to access on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: TCPSocket specifies a connection to - a TCP port. - properties: - host: - description: 'Optional: Host name to connect - to, defaults to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - format: int32 - type: integer - type: object - stdin: - description: |- - Whether this container should allocate a buffer for stdin in the container runtime. If this - is not set, reads from stdin in the container will always result in EOF. - Default is false. - type: boolean - stdinOnce: - description: |- - Whether the container runtime should close the stdin channel after it has been opened by - a single attach. When stdin is true the stdin stream will remain open across multiple attach - sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the - first client attaches to stdin, and then remains open and accepts data until the client disconnects, - at which time stdin is closed and remains closed until the container is restarted. If this - flag is false, a container processes that reads from stdin will never receive an EOF. - Default is false - type: boolean - terminationMessagePath: - description: |- - Optional: Path at which the file to which the container's termination message - will be written is mounted into the container's filesystem. - Message written is intended to be brief final status, such as an assertion failure message. - Will be truncated by the node if greater than 4096 bytes. The total message length across - all containers will be limited to 12kb. - Defaults to /dev/termination-log. - Cannot be updated. - type: string - terminationMessagePolicy: - description: |- - Indicate how the termination message should be populated. File will use the contents of - terminationMessagePath to populate the container status message on both success and failure. - FallbackToLogsOnError will use the last chunk of container log output if the termination - message file is empty and the container exited with an error. - The log output is limited to 2048 bytes or 80 lines, whichever is smaller. - Defaults to File. - Cannot be updated. - type: string - tty: - description: |- - Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. - Default is false. - type: boolean - volumeDevices: - description: volumeDevices is the list of block devices - to be used by the container. - items: - description: volumeDevice describes a mapping of a - raw block device within a container. - properties: - devicePath: - description: devicePath is the path inside of - the container that the device will be mapped - to. - type: string - name: - description: name must match the name of a persistentVolumeClaim - in the pod - type: string - required: - - devicePath - - name - type: object - type: array - x-kubernetes-list-map-keys: - - devicePath - x-kubernetes-list-type: map - volumeMounts: - description: |- - Pod volumes to mount into the container's filesystem. - Cannot be updated. - items: - description: VolumeMount describes a mounting of a - Volume within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified - (which defaults to None). - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - recursiveReadOnly: - description: |- - RecursiveReadOnly specifies whether read-only mounts should be handled - recursively. - - If ReadOnly is false, this field has no meaning and must be unspecified. - - If ReadOnly is true, and this field is set to Disabled, the mount is not made - recursively read-only. If this field is set to IfPossible, the mount is made - recursively read-only, if it is supported by the container runtime. If this - field is set to Enabled, the mount is made recursively read-only if it is - supported by the container runtime, otherwise the pod will not be started and - an error will be generated to indicate the reason. - - If this field is set to IfPossible or Enabled, MountPropagation must be set to - None (or be unspecified, which defaults to None). - - If this field is not specified, it is treated as an equivalent of Disabled. - type: string - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array - x-kubernetes-list-map-keys: - - mountPath - x-kubernetes-list-type: map - workingDir: - description: |- - Container's working directory. - If not specified, the container runtime's default will be used, which - might be configured in the container image. - Cannot be updated. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - nodeName: - description: |- - NodeName indicates in which node this pod is scheduled. - If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. - Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. - This field should not be used to express a desire for the pod to be scheduled on a specific node. - https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename - type: string - nodeSelector: - additionalProperties: - type: string - description: |- - NodeSelector is a selector which must be true for the pod to fit on a node. - Selector which must match a node's labels for the pod to be scheduled on that node. - More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - type: object - x-kubernetes-map-type: atomic - os: - description: |- - Specifies the OS of the containers in the pod. - Some pod and container fields are restricted if this is set. - - If the OS field is set to linux, the following fields must be unset: - -securityContext.windowsOptions - - If the OS field is set to windows, following fields must be unset: - - spec.hostPID - - spec.hostIPC - - spec.hostUsers - - spec.securityContext.appArmorProfile - - spec.securityContext.seLinuxOptions - - spec.securityContext.seccompProfile - - spec.securityContext.fsGroup - - spec.securityContext.fsGroupChangePolicy - - spec.securityContext.sysctls - - spec.shareProcessNamespace - - spec.securityContext.runAsUser - - spec.securityContext.runAsGroup - - spec.securityContext.supplementalGroups - - spec.securityContext.supplementalGroupsPolicy - - spec.containers[*].securityContext.appArmorProfile - - spec.containers[*].securityContext.seLinuxOptions - - spec.containers[*].securityContext.seccompProfile - - spec.containers[*].securityContext.capabilities - - spec.containers[*].securityContext.readOnlyRootFilesystem - - spec.containers[*].securityContext.privileged - - spec.containers[*].securityContext.allowPrivilegeEscalation - - spec.containers[*].securityContext.procMount - - spec.containers[*].securityContext.runAsUser - - spec.containers[*].securityContext.runAsGroup - properties: - name: - description: |- - Name is the name of the operating system. The currently supported values are linux and windows. - Additional value may be defined in future and can be one of: - https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration - Clients should expect to handle additional values and treat unrecognized values in this field as os: null - type: string - required: - - name - type: object - overhead: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. - This field will be autopopulated at admission time by the RuntimeClass admission controller. If - the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. - The RuntimeClass admission controller will reject Pod create requests which have the overhead already - set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value - defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. - More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md - type: object - preemptionPolicy: - description: |- - PreemptionPolicy is the Policy for preempting pods with lower priority. - One of Never, PreemptLowerPriority. - Defaults to PreemptLowerPriority if unset. - type: string - priority: - description: |- - The priority value. Various system components use this field to find the - priority of the pod. When Priority Admission Controller is enabled, it - prevents users from setting this field. The admission controller populates - this field from PriorityClassName. - The higher the value, the higher the priority. - format: int32 - type: integer - priorityClassName: - description: |- - If specified, indicates the pod's priority. "system-node-critical" and - "system-cluster-critical" are two special keywords which indicate the - highest priorities with the former being the highest priority. Any other - name must be defined by creating a PriorityClass object with that name. - If not specified, the pod priority will be default or zero if there is no - default. - type: string - readinessGates: - description: |- - If specified, all readiness gates will be evaluated for pod readiness. - A pod is ready when all its containers are ready AND - all conditions specified in the readiness gates have status equal to "True" - More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates - items: - description: PodReadinessGate contains the reference to - a pod condition - properties: - conditionType: - description: ConditionType refers to a condition in - the pod's condition list with matching type. - type: string - required: - - conditionType - type: object - type: array - x-kubernetes-list-type: atomic - resourceClaims: - description: |- - ResourceClaims defines which ResourceClaims must be allocated - and reserved before the Pod is allowed to start. The resources - will be made available to those containers which consume them - by name. - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - This field is immutable. - items: - description: |- - PodResourceClaim references exactly one ResourceClaim, either directly - or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim - for the pod. - - It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. - Containers that need access to the ResourceClaim reference it with this name. - properties: - name: - description: |- - Name uniquely identifies this resource claim inside the pod. - This must be a DNS_LABEL. - type: string - resourceClaimName: - description: |- - ResourceClaimName is the name of a ResourceClaim object in the same - namespace as this pod. - - Exactly one of ResourceClaimName and ResourceClaimTemplateName must - be set. - type: string - resourceClaimTemplateName: - description: |- - ResourceClaimTemplateName is the name of a ResourceClaimTemplate - object in the same namespace as this pod. - - The template will be used to create a new ResourceClaim, which will - be bound to this pod. When this pod is deleted, the ResourceClaim - will also be deleted. The pod name and resource name, along with a - generated component, will be used to form a unique name for the - ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. - - This field is immutable and no changes will be made to the - corresponding ResourceClaim by the control plane after creating the - ResourceClaim. - - Exactly one of ResourceClaimName and ResourceClaimTemplateName must - be set. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - resources: - description: |- - Resources is the total amount of CPU and Memory resources required by all - containers in the pod. It supports specifying Requests and Limits for - "cpu" and "memory" resource names only. ResourceClaims are not supported. - - This field enables fine-grained control over resource allocation for the - entire pod, allowing resource sharing among containers in a pod. - - This is an alpha field and requires enabling the PodLevelResources feature - gate. - properties: - claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - This field is immutable. It can only be set for containers. - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. - type: string - request: - description: |- - Request is the name chosen for a request in the referenced claim. - If empty, everything from the claim is made available, otherwise - only the result of this request. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - restartPolicy: - description: |- - Restart policy for all containers within the pod. - One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. - Default to Always. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy - type: string - runtimeClassName: - description: |- - RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used - to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. - If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an - empty definition that uses the default runtime handler. - More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class - type: string - schedulerName: - description: |- - If specified, the pod will be dispatched by specified scheduler. - If not specified, the pod will be dispatched by default scheduler. - type: string - schedulingGates: - description: |- - SchedulingGates is an opaque list of values that if specified will block scheduling the pod. - If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the - scheduler will not attempt to schedule the pod. - - SchedulingGates can only be set at pod creation time, and be removed only afterwards. - items: - description: PodSchedulingGate is associated to a Pod to - guard its scheduling. - properties: - name: - description: |- - Name of the scheduling gate. - Each scheduling gate must have a unique name field. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - securityContext: - description: |- - SecurityContext holds pod-level security attributes and common container settings. - Optional: Defaults to empty. See type description for default values of each field. - properties: - appArmorProfile: - description: |- - appArmorProfile is the AppArmor options to use by the containers in this pod. - Note that this field cannot be set when spec.os.name is windows. - properties: - localhostProfile: - description: |- - localhostProfile indicates a profile loaded on the node that should be used. - The profile must be preconfigured on the node to work. - Must match the loaded name of the profile. - Must be set if and only if type is "Localhost". - type: string - type: - description: |- - type indicates which kind of AppArmor profile will be applied. - Valid options are: - Localhost - a profile pre-loaded on the node. - RuntimeDefault - the container runtime's default profile. - Unconfined - no AppArmor enforcement. - type: string - required: - - type - type: object - fsGroup: - description: |- - A special supplemental group that applies to all containers in a pod. - Some volume types allow the Kubelet to change the ownership of that volume - to be owned by the pod: - - 1. The owning GID will be the FSGroup - 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) - 3. The permission bits are OR'd with rw-rw---- - - If unset, the Kubelet will not modify the ownership and permissions of any volume. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - fsGroupChangePolicy: - description: |- - fsGroupChangePolicy defines behavior of changing ownership and permission of the volume - before being exposed inside Pod. This field will only apply to - volume types which support fsGroup based ownership(and permissions). - It will have no effect on ephemeral volume types such as: secret, configmaps - and emptydir. - Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. - Note that this field cannot be set when spec.os.name is windows. - type: string - runAsGroup: - description: |- - The GID to run the entrypoint of the container process. - Uses runtime default if unset. - May also be set in SecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence - for that container. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - runAsNonRoot: - description: |- - Indicates that the container must run as a non-root user. - If true, the Kubelet will validate the image at runtime to ensure that it - does not run as UID 0 (root) and fail to start the container if it does. - If unset or false, no such validation will be performed. - May also be set in SecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: boolean - runAsUser: - description: |- - The UID to run the entrypoint of the container process. - Defaults to user specified in image metadata if unspecified. - May also be set in SecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence - for that container. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - seLinuxChangePolicy: - description: |- - seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. - It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. - Valid values are "MountOption" and "Recursive". - - "Recursive" means relabeling of all files on all Pod volumes by the container runtime. - This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. - - "MountOption" mounts all eligible Pod volumes with `-o context` mount option. - This requires all Pods that share the same volume to use the same SELinux label. - It is not possible to share the same volume among privileged and unprivileged Pods. - Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes - whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labelled recursively. - "MountOption" value is allowed only when SELinuxMount feature gate is enabled. - - If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. - If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes - and "Recursive" for all other volumes. - - This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers. - - All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. - Note that this field cannot be set when spec.os.name is windows. - type: string - seLinuxOptions: - description: |- - The SELinux context to be applied to all containers. - If unspecified, the container runtime will allocate a random SELinux context for each - container. May also be set in SecurityContext. If set in - both SecurityContext and PodSecurityContext, the value specified in SecurityContext - takes precedence for that container. - Note that this field cannot be set when spec.os.name is windows. - properties: - level: - description: Level is SELinux level label that applies - to the container. - type: string - role: - description: Role is a SELinux role label that applies - to the container. - type: string - type: - description: Type is a SELinux type label that applies - to the container. - type: string - user: - description: User is a SELinux user label that applies - to the container. - type: string - type: object - seccompProfile: - description: |- - The seccomp options to use by the containers in this pod. - Note that this field cannot be set when spec.os.name is windows. - properties: - localhostProfile: - description: |- - localhostProfile indicates a profile defined in a file on the node should be used. - The profile must be preconfigured on the node to work. - Must be a descending path, relative to the kubelet's configured seccomp profile location. - Must be set if type is "Localhost". Must NOT be set for any other type. - type: string - type: - description: |- - type indicates which kind of seccomp profile will be applied. - Valid options are: - - Localhost - a profile defined in a file on the node should be used. - RuntimeDefault - the container runtime default profile should be used. - Unconfined - no profile should be applied. - type: string - required: - - type - type: object - supplementalGroups: - description: |- - A list of groups applied to the first process run in each container, in - addition to the container's primary GID and fsGroup (if specified). If - the SupplementalGroupsPolicy feature is enabled, the - supplementalGroupsPolicy field determines whether these are in addition - to or instead of any group memberships defined in the container image. - If unspecified, no additional groups are added, though group memberships - defined in the container image may still be used, depending on the - supplementalGroupsPolicy field. - Note that this field cannot be set when spec.os.name is windows. - items: - format: int64 - type: integer - type: array - x-kubernetes-list-type: atomic - supplementalGroupsPolicy: - description: |- - Defines how supplemental groups of the first container processes are calculated. - Valid values are "Merge" and "Strict". If not specified, "Merge" is used. - (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled - and the container runtime must implement support for this feature. - Note that this field cannot be set when spec.os.name is windows. - type: string - sysctls: - description: |- - Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported - sysctls (by the container runtime) might fail to launch. - Note that this field cannot be set when spec.os.name is windows. - items: - description: Sysctl defines a kernel parameter to be - set - properties: - name: - description: Name of a property to set - type: string - value: - description: Value of a property to set - type: string - required: - - name - - value - type: object - type: array - x-kubernetes-list-type: atomic - windowsOptions: - description: |- - The Windows specific settings applied to all containers. - If unspecified, the options within a container's SecurityContext will be used. - If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is linux. - properties: - gmsaCredentialSpec: - description: |- - GMSACredentialSpec is where the GMSA admission webhook - (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the - GMSA credential spec named by the GMSACredentialSpecName field. - type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name of - the GMSA credential spec to use. - type: string - hostProcess: - description: |- - HostProcess determines if a container should be run as a 'Host Process' container. - All of a Pod's containers must have the same effective HostProcess value - (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). - In addition, if HostProcess is true then HostNetwork must also be set to true. - type: boolean - runAsUserName: - description: |- - The UserName in Windows to run the entrypoint of the container process. - Defaults to the user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: string - type: object - type: object - serviceAccount: - description: |- - DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. - Deprecated: Use serviceAccountName instead. - type: string - serviceAccountName: - description: |- - ServiceAccountName is the name of the ServiceAccount to use to run this pod. - More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - type: string - setHostnameAsFQDN: - description: |- - If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). - In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). - In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. - If a pod does not have FQDN, this has no effect. - Default to false. - type: boolean - shareProcessNamespace: - description: |- - Share a single process namespace between all of the containers in a pod. - When this is set containers will be able to view and signal processes from other containers - in the same pod, and the first process in each container will not be assigned PID 1. - HostPID and ShareProcessNamespace cannot both be set. - Optional: Default to false. - type: boolean - subdomain: - description: |- - If specified, the fully qualified Pod hostname will be "...svc.". - If not specified, the pod will not have a domainname at all. - type: string - terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - If this value is nil, the default grace period will be used instead. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - Defaults to 30 seconds. - format: int64 - type: integer - tolerations: - description: If specified, the pod's tolerations. - items: - description: |- - The pod this Toleration is attached to tolerates any taint that matches - the triple using the matching operator . - properties: - effect: - description: |- - Effect indicates the taint effect to match. Empty means match all taint effects. - When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: |- - Key is the taint key that the toleration applies to. Empty means match all taint keys. - If the key is empty, operator must be Exists; this combination means to match all values and all keys. - type: string - operator: - description: |- - Operator represents a key's relationship to the value. - Valid operators are Exists and Equal. Defaults to Equal. - Exists is equivalent to wildcard for value, so that a pod can - tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: |- - TolerationSeconds represents the period of time the toleration (which must be - of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, - it is not set, which means tolerate the taint forever (do not evict). Zero and - negative values will be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: |- - Value is the taint value the toleration matches to. - If the operator is Exists, the value should be empty, otherwise just a regular string. - type: string - type: object - type: array - x-kubernetes-list-type: atomic - topologySpreadConstraints: - description: |- - TopologySpreadConstraints describes how a group of pods ought to spread across topology - domains. Scheduler will schedule pods in a way which abides by the constraints. - All topologySpreadConstraints are ANDed. - items: - description: TopologySpreadConstraint specifies how to spread - matching pods among the given topology. - properties: - labelSelector: - description: |- - LabelSelector is used to find matching pods. - Pods that match this label selector are counted to determine the number of pods - in their corresponding topology domain. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select the pods over which - spreading will be calculated. The keys are used to lookup values from the - incoming pod labels, those key-value labels are ANDed with labelSelector - to select the group of existing pods over which spreading will be calculated - for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - MatchLabelKeys cannot be set when LabelSelector isn't set. - Keys that don't exist in the incoming pod labels will - be ignored. A null or empty list means only match against labelSelector. - - This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - maxSkew: - description: |- - MaxSkew describes the degree to which pods may be unevenly distributed. - When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference - between the number of matching pods in the target topology and the global minimum. - The global minimum is the minimum number of matching pods in an eligible domain - or zero if the number of eligible domains is less than MinDomains. - For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same - labelSelector spread as 2/2/1: - In this case, the global minimum is 1. - | zone1 | zone2 | zone3 | - | P P | P P | P | - - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; - scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) - violate MaxSkew(1). - - if MaxSkew is 2, incoming pod can be scheduled onto any zone. - When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence - to topologies that satisfy it. - It's a required field. Default value is 1 and 0 is not allowed. - format: int32 - type: integer - minDomains: - description: |- - MinDomains indicates a minimum number of eligible domains. - When the number of eligible domains with matching topology keys is less than minDomains, - Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. - And when the number of eligible domains with matching topology keys equals or greater than minDomains, - this value has no effect on scheduling. - As a result, when the number of eligible domains is less than minDomains, - scheduler won't schedule more than maxSkew Pods to those domains. - If value is nil, the constraint behaves as if MinDomains is equal to 1. - Valid values are integers greater than 0. - When value is not nil, WhenUnsatisfiable must be DoNotSchedule. - - For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same - labelSelector spread as 2/2/2: - | zone1 | zone2 | zone3 | - | P P | P P | P P | - The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. - In this situation, new pod with the same labelSelector cannot be scheduled, - because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, - it will violate MaxSkew. - format: int32 - type: integer - nodeAffinityPolicy: - description: |- - NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector - when calculating pod topology spread skew. Options are: - - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. - - If this value is nil, the behavior is equivalent to the Honor policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. - type: string - nodeTaintsPolicy: - description: |- - NodeTaintsPolicy indicates how we will treat node taints when calculating - pod topology spread skew. Options are: - - Honor: nodes without taints, along with tainted nodes for which the incoming pod - has a toleration, are included. - - Ignore: node taints are ignored. All nodes are included. - - If this value is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. - type: string - topologyKey: - description: |- - TopologyKey is the key of node labels. Nodes that have a label with this key - and identical values are considered to be in the same topology. - We consider each as a "bucket", and try to put balanced number - of pods into each bucket. - We define a domain as a particular instance of a topology. - Also, we define an eligible domain as a domain whose nodes meet the requirements of - nodeAffinityPolicy and nodeTaintsPolicy. - e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. - And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. - It's a required field. - type: string - whenUnsatisfiable: - description: |- - WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy - the spread constraint. - - DoNotSchedule (default) tells the scheduler not to schedule it. - - ScheduleAnyway tells the scheduler to schedule the pod in any location, - but giving higher precedence to topologies that would help reduce the - skew. - A constraint is considered "Unsatisfiable" for an incoming pod - if and only if every possible node assignment for that pod would violate - "MaxSkew" on some topology. - For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same - labelSelector spread as 3/1/1: - | zone1 | zone2 | zone3 | - | P P P | P | P | - If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled - to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies - MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler - won't make it *more* imbalanced. - It's a required field. - type: string - required: - - maxSkew - - topologyKey - - whenUnsatisfiable - type: object - type: array - x-kubernetes-list-map-keys: - - topologyKey - - whenUnsatisfiable - x-kubernetes-list-type: map - volumes: - description: |- - List of volumes that can be mounted by containers belonging to the pod. - More info: https://kubernetes.io/docs/concepts/storage/volumes - items: - description: Volume represents a named volume in a pod that - may be accessed by any container in the pod. - properties: - awsElasticBlockStore: - description: |- - awsElasticBlockStore represents an AWS Disk resource that is attached to a - kubelet's host machine and then exposed to the pod. - Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree - awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. - More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - properties: - fsType: - description: |- - fsType is the filesystem type of the volume that you want to mount. - Tip: Ensure that the filesystem type is supported by the host operating system. - Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - type: string - partition: - description: |- - partition is the partition in the volume that you want to mount. - If omitted, the default is to mount by volume name. - Examples: For volume /dev/sda1, you specify the partition as "1". - Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). - format: int32 - type: integer - readOnly: - description: |- - readOnly value true will force the readOnly setting in VolumeMounts. - More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - type: boolean - volumeID: - description: |- - volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). - More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - type: string - required: - - volumeID - type: object - azureDisk: - description: |- - azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. - Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type - are redirected to the disk.csi.azure.com CSI driver. - properties: - cachingMode: - description: 'cachingMode is the Host Caching mode: - None, Read Only, Read Write.' - type: string - diskName: - description: diskName is the Name of the data disk - in the blob storage - type: string - diskURI: - description: diskURI is the URI of data disk in - the blob storage - type: string - fsType: - default: ext4 - description: |- - fsType is Filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - kind: - description: 'kind expected values are Shared: multiple - blob disks per storage account Dedicated: single - blob disk per storage account Managed: azure - managed data disk (only in managed availability - set). defaults to shared' - type: string - readOnly: - default: false - description: |- - readOnly Defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - type: boolean - required: - - diskName - - diskURI - type: object - azureFile: - description: |- - azureFile represents an Azure File Service mount on the host and bind mount to the pod. - Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type - are redirected to the file.csi.azure.com CSI driver. - properties: - readOnly: - description: |- - readOnly defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - type: boolean - secretName: - description: secretName is the name of secret that - contains Azure Storage Account Name and Key - type: string - shareName: - description: shareName is the azure share Name - type: string - required: - - secretName - - shareName - type: object - cephfs: - description: |- - cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. - Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. - properties: - monitors: - description: |- - monitors is Required: Monitors is a collection of Ceph monitors - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it - items: - type: string - type: array - x-kubernetes-list-type: atomic - path: - description: 'path is Optional: Used as the mounted - root, rather than the full Ceph tree, default - is /' - type: string - readOnly: - description: |- - readOnly is Optional: Defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it - type: boolean - secretFile: - description: |- - secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it - type: string - secretRef: - description: |- - secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - user: - description: |- - user is optional: User is the rados user name, default is admin - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it - type: string - required: - - monitors - type: object - cinder: - description: |- - cinder represents a cinder volume attached and mounted on kubelets host machine. - Deprecated: Cinder is deprecated. All operations for the in-tree cinder type - are redirected to the cinder.csi.openstack.org CSI driver. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md - properties: - fsType: - description: |- - fsType is the filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md - type: string - readOnly: - description: |- - readOnly defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md - type: boolean - secretRef: - description: |- - secretRef is optional: points to a secret object containing parameters used to connect - to OpenStack. - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - volumeID: - description: |- - volumeID used to identify the volume in cinder. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md - type: string - required: - - volumeID - type: object - configMap: - description: configMap represents a configMap that should - populate this volume - properties: - defaultMode: - description: |- - defaultMode is optional: mode bits used to set permissions on created files by default. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - Defaults to 0644. - Directories within the path are not affected by this setting. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - items: - description: |- - items if unspecified, each key-value pair in the Data field of the referenced - ConfigMap will be projected into the volume as a file whose name is the - key and content is the value. If specified, the listed keys will be - projected into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the ConfigMap, - the volume setup will error unless it is marked optional. Paths must be - relative and may not contain the '..' path or start with '..'. - items: - description: Maps a string key to a path within - a volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - x-kubernetes-list-type: atomic - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: optional specify whether the ConfigMap - or its keys must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - csi: - description: csi (Container Storage Interface) represents - ephemeral storage that is handled by certain external - CSI drivers. - properties: - driver: - description: |- - driver is the name of the CSI driver that handles this volume. - Consult with your admin for the correct name as registered in the cluster. - type: string - fsType: - description: |- - fsType to mount. Ex. "ext4", "xfs", "ntfs". - If not provided, the empty value is passed to the associated CSI driver - which will determine the default filesystem to apply. - type: string - nodePublishSecretRef: - description: |- - nodePublishSecretRef is a reference to the secret object containing - sensitive information to pass to the CSI driver to complete the CSI - NodePublishVolume and NodeUnpublishVolume calls. - This field is optional, and may be empty if no secret is required. If the - secret object contains more than one secret, all secret references are passed. - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - readOnly: - description: |- - readOnly specifies a read-only configuration for the volume. - Defaults to false (read/write). - type: boolean - volumeAttributes: - additionalProperties: - type: string - description: |- - volumeAttributes stores driver-specific properties that are passed to the CSI - driver. Consult your driver's documentation for supported values. - type: object - required: - - driver - type: object - downwardAPI: - description: downwardAPI represents downward API about - the pod that should populate this volume - properties: - defaultMode: - description: |- - Optional: mode bits to use on created files by default. Must be a - Optional: mode bits used to set permissions on created files by default. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - Defaults to 0644. - Directories within the path are not affected by this setting. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - items: - description: Items is a list of downward API volume - file - items: - description: DownwardAPIVolumeFile represents - information to create the file containing the - pod field - properties: - fieldRef: - description: 'Required: Selects a field of - the pod: only annotations, labels, name, - namespace and uid are supported.' - properties: - apiVersion: - description: Version of the schema the - FieldPath is written in terms of, defaults - to "v1". - type: string - fieldPath: - description: Path of the field to select - in the specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - mode: - description: |- - Optional: mode bits used to set permissions on this file, must be an octal value - between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: 'Required: Path is the relative - path name of the file to be created. Must - not be absolute or contain the ''..'' path. - Must be utf-8 encoded. The first item of - the relative path must not start with ''..''' - type: string - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - properties: - containerName: - description: 'Container name: required - for volumes, optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format - of the exposed resources, defaults to - "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - required: - - path - type: object - type: array - x-kubernetes-list-type: atomic - type: object - emptyDir: - description: |- - emptyDir represents a temporary directory that shares a pod's lifetime. - More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir - properties: - medium: - description: |- - medium represents what type of storage medium should back this directory. - The default is "" which means to use the node's default medium. - Must be an empty string (default) or Memory. - More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir - type: string - sizeLimit: - anyOf: - - type: integer - - type: string - description: |- - sizeLimit is the total amount of local storage required for this EmptyDir volume. - The size limit is also applicable for memory medium. - The maximum usage on memory medium EmptyDir would be the minimum value between - the SizeLimit specified here and the sum of memory limits of all containers in a pod. - The default is nil which means that the limit is undefined. - More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - ephemeral: - description: |- - ephemeral represents a volume that is handled by a cluster storage driver. - The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, - and deleted when the pod is removed. - - Use this if: - a) the volume is only needed while the pod runs, - b) features of normal volumes like restoring from snapshot or capacity - tracking are needed, - c) the storage driver is specified through a storage class, and - d) the storage driver supports dynamic volume provisioning through - a PersistentVolumeClaim (see EphemeralVolumeSource for more - information on the connection between this volume type - and PersistentVolumeClaim). - - Use PersistentVolumeClaim or one of the vendor-specific - APIs for volumes that persist for longer than the lifecycle - of an individual pod. - - Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to - be used that way - see the documentation of the driver for - more information. - - A pod can use both types of ephemeral volumes and - persistent volumes at the same time. - properties: - volumeClaimTemplate: - description: |- - Will be used to create a stand-alone PVC to provision the volume. - The pod in which this EphemeralVolumeSource is embedded will be the - owner of the PVC, i.e. the PVC will be deleted together with the - pod. The name of the PVC will be `-` where - `` is the name from the `PodSpec.Volumes` array - entry. Pod validation will reject the pod if the concatenated name - is not valid for a PVC (for example, too long). - - An existing PVC with that name that is not owned by the pod - will *not* be used for the pod to avoid using an unrelated - volume by mistake. Starting the pod is then blocked until - the unrelated PVC is removed. If such a pre-created PVC is - meant to be used by the pod, the PVC has to updated with an - owner reference to the pod once the pod exists. Normally - this should not be necessary, but it may be useful when - manually reconstructing a broken cluster. - - This field is read-only and no changes will be made by Kubernetes - to the PVC after it has been created. - - Required, must not be nil. - properties: - metadata: - description: |- - May contain labels and annotations that will be copied into the PVC - when creating it. No other fields are allowed and will be rejected during - validation. - type: object - spec: - description: |- - The specification for the PersistentVolumeClaim. The entire content is - copied unchanged into the PVC that gets created from this - template. The same fields as in a PersistentVolumeClaim - are also valid here. - properties: - accessModes: - description: |- - accessModes contains the desired access modes the volume should have. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 - items: - type: string - type: array - x-kubernetes-list-type: atomic - dataSource: - description: |- - dataSource field can be used to specify either: - * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) - If the provisioner or an external controller can support the specified data source, - it will create a new volume based on the contents of the specified data source. - When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, - and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. - If the namespace is specified, then dataSourceRef will not be copied to dataSource. - properties: - apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource - being referenced - type: string - name: - description: Name is the name of resource - being referenced - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - dataSourceRef: - description: |- - dataSourceRef specifies the object from which to populate the volume with data, if a non-empty - volume is desired. This may be any object from a non-empty API group (non - core object) or a PersistentVolumeClaim object. - When this field is specified, volume binding will only succeed if the type of - the specified object matches some installed volume populator or dynamic - provisioner. - This field will replace the functionality of the dataSource field and as such - if both fields are non-empty, they must have the same value. For backwards - compatibility, when namespace isn't specified in dataSourceRef, - both fields (dataSource and dataSourceRef) will be set to the same - value automatically if one of them is empty and the other is non-empty. - When namespace is specified in dataSourceRef, - dataSource isn't set to the same value and must be empty. - There are three important differences between dataSource and dataSourceRef: - * While dataSource only allows two specific types of objects, dataSourceRef - allows any non-core object, as well as PersistentVolumeClaim objects. - * While dataSource ignores disallowed values (dropping them), dataSourceRef - preserves all values, and generates an error if a disallowed value is - specified. - * While dataSource only allows local objects, dataSourceRef allows objects - in any namespaces. - (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. - (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. - properties: - apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. - type: string - kind: - description: Kind is the type of resource - being referenced - type: string - name: - description: Name is the name of resource - being referenced - type: string - namespace: - description: |- - Namespace is the namespace of resource being referenced - Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. - (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. - type: string - required: - - kind - - name - type: object - resources: - description: |- - resources represents the minimum resources the volume should have. - If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements - that are lower than previous value but must still be higher than capacity recorded in the - status field of the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - selector: - description: selector is a label query over - volumes to consider for binding. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The - requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label - key that the selector applies - to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - storageClassName: - description: |- - storageClassName is the name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 - type: string - volumeAttributesClassName: - description: |- - volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. - If specified, the CSI driver will create or update the volume with the attributes defined - in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. - If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be - set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource - exists. - More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). - type: string - volumeMode: - description: |- - volumeMode defines what type of volume is required by the claim. - Value of Filesystem is implied when not included in claim spec. - type: string - volumeName: - description: volumeName is the binding reference - to the PersistentVolume backing this claim. - type: string - type: object - required: - - spec - type: object - type: object - fc: - description: fc represents a Fibre Channel resource - that is attached to a kubelet's host machine and then - exposed to the pod. - properties: - fsType: - description: |- - fsType is the filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - lun: - description: 'lun is Optional: FC target lun number' - format: int32 - type: integer - readOnly: - description: |- - readOnly is Optional: Defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - type: boolean - targetWWNs: - description: 'targetWWNs is Optional: FC target - worldwide names (WWNs)' - items: - type: string - type: array - x-kubernetes-list-type: atomic - wwids: - description: |- - wwids Optional: FC volume world wide identifiers (wwids) - Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. - items: - type: string - type: array - x-kubernetes-list-type: atomic - type: object - flexVolume: - description: |- - flexVolume represents a generic volume resource that is - provisioned/attached using an exec based plugin. - Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. - properties: - driver: - description: driver is the name of the driver to - use for this volume. - type: string - fsType: - description: |- - fsType is the filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. - type: string - options: - additionalProperties: - type: string - description: 'options is Optional: this field holds - extra command options if any.' - type: object - readOnly: - description: |- - readOnly is Optional: defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - type: boolean - secretRef: - description: |- - secretRef is Optional: secretRef is reference to the secret object containing - sensitive information to pass to the plugin scripts. This may be - empty if no secret object is specified. If the secret object - contains more than one secret, all secrets are passed to the plugin - scripts. - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - required: - - driver - type: object - flocker: - description: |- - flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. - Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. - properties: - datasetName: - description: |- - datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker - should be considered as deprecated - type: string - datasetUUID: - description: datasetUUID is the UUID of the dataset. - This is unique identifier of a Flocker dataset - type: string - type: object - gcePersistentDisk: - description: |- - gcePersistentDisk represents a GCE Disk resource that is attached to a - kubelet's host machine and then exposed to the pod. - Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree - gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. - More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - properties: - fsType: - description: |- - fsType is filesystem type of the volume that you want to mount. - Tip: Ensure that the filesystem type is supported by the host operating system. - Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - type: string - partition: - description: |- - partition is the partition in the volume that you want to mount. - If omitted, the default is to mount by volume name. - Examples: For volume /dev/sda1, you specify the partition as "1". - Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). - More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - format: int32 - type: integer - pdName: - description: |- - pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. - More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - type: string - readOnly: - description: |- - readOnly here will force the ReadOnly setting in VolumeMounts. - Defaults to false. - More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - type: boolean - required: - - pdName - type: object - gitRepo: - description: |- - gitRepo represents a git repository at a particular revision. - Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an - EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir - into the Pod's container. - properties: - directory: - description: |- - directory is the target directory name. - Must not contain or start with '..'. If '.' is supplied, the volume directory will be the - git repository. Otherwise, if specified, the volume will contain the git repository in - the subdirectory with the given name. - type: string - repository: - description: repository is the URL - type: string - revision: - description: revision is the commit hash for the - specified revision. - type: string - required: - - repository - type: object - glusterfs: - description: |- - glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. - More info: https://examples.k8s.io/volumes/glusterfs/README.md - properties: - endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod - type: string - path: - description: |- - path is the Glusterfs volume path. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod - type: string - readOnly: - description: |- - readOnly here will force the Glusterfs volume to be mounted with read-only permissions. - Defaults to false. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod - type: boolean - required: - - endpoints - - path - type: object - hostPath: - description: |- - hostPath represents a pre-existing file or directory on the host - machine that is directly exposed to the container. This is generally - used for system agents or other privileged things that are allowed - to see the host machine. Most containers will NOT need this. - More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - properties: - path: - description: |- - path of the directory on the host. - If the path is a symlink, it will follow the link to the real path. - More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - type: string - type: - description: |- - type for HostPath Volume - Defaults to "" - More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - type: string - required: - - path - type: object - image: - description: |- - image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. - The volume is resolved at pod startup depending on which PullPolicy value is provided: - - - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. - - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. - - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. - - The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. - A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. - The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. - The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. - The volume will be mounted read-only (ro) and non-executable files (noexec). - Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). - The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. - properties: - pullPolicy: - description: |- - Policy for pulling OCI objects. Possible values are: - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. - Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. - type: string - reference: - description: |- - Required: Image or artifact reference to be used. - Behaves in the same way as pod.spec.containers[*].image. - Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. - More info: https://kubernetes.io/docs/concepts/containers/images - This field is optional to allow higher level config management to default or override - container images in workload controllers like Deployments and StatefulSets. - type: string - type: object - iscsi: - description: |- - iscsi represents an ISCSI Disk resource that is attached to a - kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md - properties: - chapAuthDiscovery: - description: chapAuthDiscovery defines whether support - iSCSI Discovery CHAP authentication - type: boolean - chapAuthSession: - description: chapAuthSession defines whether support - iSCSI Session CHAP authentication - type: boolean - fsType: - description: |- - fsType is the filesystem type of the volume that you want to mount. - Tip: Ensure that the filesystem type is supported by the host operating system. - Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi - type: string - initiatorName: - description: |- - initiatorName is the custom iSCSI Initiator Name. - If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface - : will be created for the connection. - type: string - iqn: - description: iqn is the target iSCSI Qualified Name. - type: string - iscsiInterface: - default: default - description: |- - iscsiInterface is the interface Name that uses an iSCSI transport. - Defaults to 'default' (tcp). - type: string - lun: - description: lun represents iSCSI Target Lun number. - format: int32 - type: integer - portals: - description: |- - portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port - is other than default (typically TCP ports 860 and 3260). - items: - type: string - type: array - x-kubernetes-list-type: atomic - readOnly: - description: |- - readOnly here will force the ReadOnly setting in VolumeMounts. - Defaults to false. - type: boolean - secretRef: - description: secretRef is the CHAP Secret for iSCSI - target and initiator authentication - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - targetPortal: - description: |- - targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port - is other than default (typically TCP ports 860 and 3260). - type: string - required: - - iqn - - lun - - targetPortal - type: object - name: - description: |- - name of the volume. - Must be a DNS_LABEL and unique within the pod. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - nfs: - description: |- - nfs represents an NFS mount on the host that shares a pod's lifetime - More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs - properties: - path: - description: |- - path that is exported by the NFS server. - More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs - type: string - readOnly: - description: |- - readOnly here will force the NFS export to be mounted with read-only permissions. - Defaults to false. - More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs - type: boolean - server: - description: |- - server is the hostname or IP address of the NFS server. - More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs - type: string - required: - - path - - server - type: object - persistentVolumeClaim: - description: |- - persistentVolumeClaimVolumeSource represents a reference to a - PersistentVolumeClaim in the same namespace. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims - properties: - claimName: - description: |- - claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims - type: string - readOnly: - description: |- - readOnly Will force the ReadOnly setting in VolumeMounts. - Default false. - type: boolean - required: - - claimName - type: object - photonPersistentDisk: - description: |- - photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. - Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. - properties: - fsType: - description: |- - fsType is the filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - pdID: - description: pdID is the ID that identifies Photon - Controller persistent disk - type: string - required: - - pdID - type: object - portworxVolume: - description: |- - portworxVolume represents a portworx volume attached and mounted on kubelets host machine. - Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type - are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate - is on. - properties: - fsType: - description: |- - fSType represents the filesystem type to mount - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. - type: string - readOnly: - description: |- - readOnly defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - type: boolean - volumeID: - description: volumeID uniquely identifies a Portworx - volume - type: string - required: - - volumeID - type: object - projected: - description: projected items for all in one resources - secrets, configmaps, and downward API - properties: - defaultMode: - description: |- - defaultMode are the mode bits used to set permissions on created files by default. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - Directories within the path are not affected by this setting. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - sources: - description: |- - sources is the list of volume projections. Each entry in this list - handles one source. - items: - description: |- - Projection that may be projected along with other supported volume types. - Exactly one of these fields must be set. - properties: - clusterTrustBundle: - description: |- - ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field - of ClusterTrustBundle objects in an auto-updating file. - - Alpha, gated by the ClusterTrustBundleProjection feature gate. - - ClusterTrustBundle objects can either be selected by name, or by the - combination of signer name and a label selector. - - Kubelet performs aggressive normalization of the PEM contents written - into the pod filesystem. Esoteric PEM features such as inter-block - comments and block headers are stripped. Certificates are deduplicated. - The ordering of certificates within the file is arbitrary, and Kubelet - may change the order over time. - properties: - labelSelector: - description: |- - Select all ClusterTrustBundles that match this label selector. Only has - effect if signerName is set. Mutually-exclusive with name. If unset, - interpreted as "match nothing". If set but empty, interpreted as "match - everything". - properties: - matchExpressions: - description: matchExpressions is a - list of label selector requirements. - The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label - key that the selector applies - to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - name: - description: |- - Select a single ClusterTrustBundle by object name. Mutually-exclusive - with signerName and labelSelector. - type: string - optional: - description: |- - If true, don't block pod startup if the referenced ClusterTrustBundle(s) - aren't available. If using name, then the named ClusterTrustBundle is - allowed not to exist. If using signerName, then the combination of - signerName and labelSelector is allowed to match zero - ClusterTrustBundles. - type: boolean - path: - description: Relative path from the volume - root to write the bundle. - type: string - signerName: - description: |- - Select all ClusterTrustBundles that match this signer name. - Mutually-exclusive with name. The contents of all selected - ClusterTrustBundles will be unified and deduplicated. - type: string - required: - - path - type: object - configMap: - description: configMap information about the - configMap data to project - properties: - items: - description: |- - items if unspecified, each key-value pair in the Data field of the referenced - ConfigMap will be projected into the volume as a file whose name is the - key and content is the value. If specified, the listed keys will be - projected into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the ConfigMap, - the volume setup will error unless it is marked optional. Paths must be - relative and may not contain the '..' path or start with '..'. - items: - description: Maps a string key to a - path within a volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - x-kubernetes-list-type: atomic - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: optional specify whether - the ConfigMap or its keys must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - downwardAPI: - description: downwardAPI information about - the downwardAPI data to project - properties: - items: - description: Items is a list of DownwardAPIVolume - file - items: - description: DownwardAPIVolumeFile represents - information to create the file containing - the pod field - properties: - fieldRef: - description: 'Required: Selects - a field of the pod: only annotations, - labels, name, namespace and uid - are supported.' - properties: - apiVersion: - description: Version of the - schema the FieldPath is written - in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field - to select in the specified - API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - mode: - description: |- - Optional: mode bits used to set permissions on this file, must be an octal value - between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: 'Required: Path is the - relative path name of the file - to be created. Must not be absolute - or contain the ''..'' path. Must - be utf-8 encoded. The first item - of the relative path must not - start with ''..''' - type: string - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - properties: - containerName: - description: 'Container name: - required for volumes, optional - for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output - format of the exposed resources, - defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource - to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - required: - - path - type: object - type: array - x-kubernetes-list-type: atomic - type: object - secret: - description: secret information about the - secret data to project - properties: - items: - description: |- - items if unspecified, each key-value pair in the Data field of the referenced - Secret will be projected into the volume as a file whose name is the - key and content is the value. If specified, the listed keys will be - projected into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the Secret, - the volume setup will error unless it is marked optional. Paths must be - relative and may not contain the '..' path or start with '..'. - items: - description: Maps a string key to a - path within a volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - x-kubernetes-list-type: atomic - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: optional field specify whether - the Secret or its key must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - serviceAccountToken: - description: serviceAccountToken is information - about the serviceAccountToken data to project - properties: - audience: - description: |- - audience is the intended audience of the token. A recipient of a token - must identify itself with an identifier specified in the audience of the - token, and otherwise should reject the token. The audience defaults to the - identifier of the apiserver. - type: string - expirationSeconds: - description: |- - expirationSeconds is the requested duration of validity of the service - account token. As the token approaches expiration, the kubelet volume - plugin will proactively rotate the service account token. The kubelet will - start trying to rotate the token if the token is older than 80 percent of - its time to live or if the token is older than 24 hours.Defaults to 1 hour - and must be at least 10 minutes. - format: int64 - type: integer - path: - description: |- - path is the path relative to the mount point of the file to project the - token into. - type: string - required: - - path - type: object - type: object - type: array - x-kubernetes-list-type: atomic - type: object - quobyte: - description: |- - quobyte represents a Quobyte mount on the host that shares a pod's lifetime. - Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. - properties: - group: - description: |- - group to map volume access to - Default is no group - type: string - readOnly: - description: |- - readOnly here will force the Quobyte volume to be mounted with read-only permissions. - Defaults to false. - type: boolean - registry: - description: |- - registry represents a single or multiple Quobyte Registry services - specified as a string as host:port pair (multiple entries are separated with commas) - which acts as the central registry for volumes - type: string - tenant: - description: |- - tenant owning the given Quobyte volume in the Backend - Used with dynamically provisioned Quobyte volumes, value is set by the plugin - type: string - user: - description: |- - user to map volume access to - Defaults to serivceaccount user - type: string - volume: - description: volume is a string that references - an already created Quobyte volume by name. - type: string - required: - - registry - - volume - type: object - rbd: - description: |- - rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. - Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. - More info: https://examples.k8s.io/volumes/rbd/README.md - properties: - fsType: - description: |- - fsType is the filesystem type of the volume that you want to mount. - Tip: Ensure that the filesystem type is supported by the host operating system. - Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd - type: string - image: - description: |- - image is the rados image name. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - type: string - keyring: - default: /etc/ceph/keyring - description: |- - keyring is the path to key ring for RBDUser. - Default is /etc/ceph/keyring. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - type: string - monitors: - description: |- - monitors is a collection of Ceph monitors. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - items: - type: string - type: array - x-kubernetes-list-type: atomic - pool: - default: rbd - description: |- - pool is the rados pool name. - Default is rbd. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - type: string - readOnly: - description: |- - readOnly here will force the ReadOnly setting in VolumeMounts. - Defaults to false. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - type: boolean - secretRef: - description: |- - secretRef is name of the authentication secret for RBDUser. If provided - overrides keyring. - Default is nil. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - user: - default: admin - description: |- - user is the rados user name. - Default is admin. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - type: string - required: - - image - - monitors - type: object - scaleIO: - description: |- - scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. - Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. - properties: - fsType: - default: xfs - description: |- - fsType is the filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". - Default is "xfs". - type: string - gateway: - description: gateway is the host address of the - ScaleIO API Gateway. - type: string - protectionDomain: - description: protectionDomain is the name of the - ScaleIO Protection Domain for the configured storage. - type: string - readOnly: - description: |- - readOnly Defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - type: boolean - secretRef: - description: |- - secretRef references to the secret for ScaleIO user and other - sensitive information. If this is not provided, Login operation will fail. - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - sslEnabled: - description: sslEnabled Flag enable/disable SSL - communication with Gateway, default false - type: boolean - storageMode: - default: ThinProvisioned - description: |- - storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. - Default is ThinProvisioned. - type: string - storagePool: - description: storagePool is the ScaleIO Storage - Pool associated with the protection domain. - type: string - system: - description: system is the name of the storage system - as configured in ScaleIO. - type: string - volumeName: - description: |- - volumeName is the name of a volume already created in the ScaleIO system - that is associated with this volume source. - type: string - required: - - gateway - - secretRef - - system - type: object - secret: - description: |- - secret represents a secret that should populate this volume. - More info: https://kubernetes.io/docs/concepts/storage/volumes#secret - properties: - defaultMode: - description: |- - defaultMode is Optional: mode bits used to set permissions on created files by default. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values - for mode bits. Defaults to 0644. - Directories within the path are not affected by this setting. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - items: - description: |- - items If unspecified, each key-value pair in the Data field of the referenced - Secret will be projected into the volume as a file whose name is the - key and content is the value. If specified, the listed keys will be - projected into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the Secret, - the volume setup will error unless it is marked optional. Paths must be - relative and may not contain the '..' path or start with '..'. - items: - description: Maps a string key to a path within - a volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - x-kubernetes-list-type: atomic - optional: - description: optional field specify whether the - Secret or its keys must be defined - type: boolean - secretName: - description: |- - secretName is the name of the secret in the pod's namespace to use. - More info: https://kubernetes.io/docs/concepts/storage/volumes#secret - type: string - type: object - storageos: - description: |- - storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. - Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. - properties: - fsType: - description: |- - fsType is the filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - readOnly: - description: |- - readOnly defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - type: boolean - secretRef: - description: |- - secretRef specifies the secret to use for obtaining the StorageOS API - credentials. If not specified, default values will be attempted. - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - volumeName: - description: |- - volumeName is the human-readable name of the StorageOS volume. Volume - names are only unique within a namespace. - type: string - volumeNamespace: - description: |- - volumeNamespace specifies the scope of the volume within StorageOS. If no - namespace is specified then the Pod's namespace will be used. This allows the - Kubernetes name scoping to be mirrored within StorageOS for tighter integration. - Set VolumeName to any name to override the default behaviour. - Set to "default" if you are not using namespaces within StorageOS. - Namespaces that do not pre-exist within StorageOS will be created. - type: string - type: object - vsphereVolume: - description: |- - vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. - Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type - are redirected to the csi.vsphere.vmware.com CSI driver. - properties: - fsType: - description: |- - fsType is filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - type: string - storagePolicyID: - description: storagePolicyID is the storage Policy - Based Management (SPBM) profile ID associated - with the StoragePolicyName. - type: string - storagePolicyName: - description: storagePolicyName is the storage Policy - Based Management (SPBM) profile name. - type: string - volumePath: - description: volumePath is the path that identifies - vSphere volume vmdk - type: string - required: - - volumePath - type: object - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - required: - - containers - type: object - type: object - type: - default: rw - description: 'Type of service to forward traffic to. Default: `rw`.' - enum: - - rw - - ro - - r - type: string - required: - - cluster - - pgbouncer - type: object - status: - description: |- - Most recently observed status of the Pooler. This data may not be up to - date. Populated by the system. Read-only. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - properties: - instances: - description: The number of pods trying to be scheduled - format: int32 - type: integer - secrets: - description: The resource version of the config object - properties: - clientCA: - description: The client CA secret version - properties: - name: - description: The name of the secret - type: string - version: - description: The ResourceVersion of the secret - type: string - type: object - pgBouncerSecrets: - description: The version of the secrets used by PgBouncer - properties: - authQuery: - description: The auth query secret version - properties: - name: - description: The name of the secret - type: string - version: - description: The ResourceVersion of the secret - type: string - type: object - type: object - serverCA: - description: The server CA secret version - properties: - name: - description: The name of the secret - type: string - version: - description: The ResourceVersion of the secret - type: string - type: object - serverTLS: - description: The server TLS secret version - properties: - name: - description: The name of the secret - type: string - version: - description: The ResourceVersion of the secret - type: string - type: object - type: object - type: object - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: - scale: - specReplicasPath: .spec.instances - statusReplicasPath: .status.instances - status: {} ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.17.3 - name: publications.postgresql.cnpg.io -spec: - group: postgresql.cnpg.io - names: - kind: Publication - listKind: PublicationList - plural: publications - singular: publication - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - jsonPath: .spec.cluster.name - name: Cluster - type: string - - jsonPath: .spec.name - name: PG Name - type: string - - jsonPath: .status.applied - name: Applied - type: boolean - - description: Latest reconciliation message - jsonPath: .status.message - name: Message - type: string - name: v1 - schema: - openAPIV3Schema: - description: Publication is the Schema for the publications API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: PublicationSpec defines the desired state of Publication - properties: - cluster: - description: The name of the PostgreSQL cluster that identifies the - "publisher" - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - dbname: - description: |- - The name of the database where the publication will be installed in - the "publisher" cluster - type: string - x-kubernetes-validations: - - message: dbname is immutable - rule: self == oldSelf - name: - description: The name of the publication inside PostgreSQL - type: string - x-kubernetes-validations: - - message: name is immutable - rule: self == oldSelf - parameters: - additionalProperties: - type: string - description: |- - Publication parameters part of the `WITH` clause as expected by - PostgreSQL `CREATE PUBLICATION` command - type: object - publicationReclaimPolicy: - default: retain - description: The policy for end-of-life maintenance of this publication - enum: - - delete - - retain - type: string - target: - description: Target of the publication as expected by PostgreSQL `CREATE - PUBLICATION` command - properties: - allTables: - description: |- - Marks the publication as one that replicates changes for all tables - in the database, including tables created in the future. - Corresponding to `FOR ALL TABLES` in PostgreSQL. - type: boolean - x-kubernetes-validations: - - message: allTables is immutable - rule: self == oldSelf - objects: - description: Just the following schema objects - items: - description: PublicationTargetObject is an object to publish - properties: - table: - description: |- - Specifies a list of tables to add to the publication. Corresponding - to `FOR TABLE` in PostgreSQL. - properties: - columns: - description: The columns to publish - items: - type: string - type: array - name: - description: The table name - type: string - only: - description: Whether to limit to the table only or include - all its descendants - type: boolean - schema: - description: The schema name - type: string - required: - - name - type: object - tablesInSchema: - description: |- - Marks the publication as one that replicates changes for all tables - in the specified list of schemas, including tables created in the - future. Corresponding to `FOR TABLES IN SCHEMA` in PostgreSQL. - type: string - type: object - x-kubernetes-validations: - - message: tablesInSchema and table are mutually exclusive - rule: (has(self.tablesInSchema) && !has(self.table)) || (!has(self.tablesInSchema) - && has(self.table)) - maxItems: 100000 - type: array - x-kubernetes-validations: - - message: specifying a column list when the publication also - publishes tablesInSchema is not supported - rule: '!(self.exists(o, has(o.table) && has(o.table.columns)) - && self.exists(o, has(o.tablesInSchema)))' - type: object - x-kubernetes-validations: - - message: allTables and objects are mutually exclusive - rule: (has(self.allTables) && !has(self.objects)) || (!has(self.allTables) - && has(self.objects)) - required: - - cluster - - dbname - - name - - target - type: object - status: - description: PublicationStatus defines the observed state of Publication - properties: - applied: - description: Applied is true if the publication was reconciled correctly - type: boolean - message: - description: Message is the reconciliation output message - type: string - observedGeneration: - description: |- - A sequence number representing the latest - desired state that was synchronized - format: int64 - type: integer - type: object - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: - status: {} ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.17.3 - name: scheduledbackups.postgresql.cnpg.io -spec: - group: postgresql.cnpg.io - names: - kind: ScheduledBackup - listKind: ScheduledBackupList - plural: scheduledbackups - singular: scheduledbackup - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - jsonPath: .spec.cluster.name - name: Cluster - type: string - - jsonPath: .status.lastScheduleTime - name: Last Backup - type: date - name: v1 - schema: - openAPIV3Schema: - description: ScheduledBackup is the Schema for the scheduledbackups API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: |- - Specification of the desired behavior of the ScheduledBackup. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - properties: - backupOwnerReference: - default: none - description: |- - Indicates which ownerReference should be put inside the created backup resources.
- - none: no owner reference for created backup objects (same behavior as before the field was introduced)
- - self: sets the Scheduled backup object as owner of the backup
- - cluster: set the cluster as owner of the backup
- enum: - - none - - self - - cluster - type: string - cluster: - description: The cluster to backup - properties: - name: - description: Name of the referent. - type: string - required: - - name - type: object - immediate: - description: If the first backup has to be immediately start after - creation or not - type: boolean - method: - default: barmanObjectStore - description: |- - The backup method to be used, possible options are `barmanObjectStore`, - `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. - enum: - - barmanObjectStore - - volumeSnapshot - - plugin - type: string - online: - description: |- - Whether the default type of backup with volume snapshots is - online/hot (`true`, default) or offline/cold (`false`) - Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' - type: boolean - onlineConfiguration: - description: |- - Configuration parameters to control the online/hot backup with volume snapshots - Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza - properties: - immediateCheckpoint: - description: |- - Control whether the I/O workload for the backup initial checkpoint will - be limited, according to the `checkpoint_completion_target` setting on - the PostgreSQL server. If set to true, an immediate checkpoint will be - used, meaning PostgreSQL will complete the checkpoint as soon as - possible. `false` by default. - type: boolean - waitForArchive: - default: true - description: |- - If false, the function will return immediately after the backup is completed, - without waiting for WAL to be archived. - This behavior is only useful with backup software that independently monitors WAL archiving. - Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. - By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is - enabled. - On a standby, this means that it will wait only when archive_mode = always. - If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger - an immediate segment switch. - type: boolean - type: object - pluginConfiguration: - description: Configuration parameters passed to the plugin managing - this backup - properties: - name: - description: Name is the name of the plugin managing this backup - type: string - parameters: - additionalProperties: - type: string - description: |- - Parameters are the configuration parameters passed to the backup - plugin for this backup - type: object - required: - - name - type: object - schedule: - description: |- - The schedule does not follow the same format used in Kubernetes CronJobs - as it includes an additional seconds specifier, - see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format - type: string - suspend: - description: If this backup is suspended or not - type: boolean - target: - description: |- - The policy to decide which instance should perform this backup. If empty, - it defaults to `cluster.spec.backup.target`. - Available options are empty string, `primary` and `prefer-standby`. - `primary` to have backups run always on primary instances, - `prefer-standby` to have backups run preferably on the most updated - standby, if available. - enum: - - primary - - prefer-standby - type: string - required: - - cluster - - schedule - type: object - status: - description: |- - Most recently observed status of the ScheduledBackup. This data may not be up - to date. Populated by the system. Read-only. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - properties: - lastCheckTime: - description: The latest time the schedule - format: date-time - type: string - lastScheduleTime: - description: Information when was the last time that backup was successfully - scheduled. - format: date-time - type: string - nextScheduleTime: - description: Next time we will run a backup - format: date-time - type: string - type: object - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: - status: {} ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.17.3 - name: subscriptions.postgresql.cnpg.io -spec: - group: postgresql.cnpg.io - names: - kind: Subscription - listKind: SubscriptionList - plural: subscriptions - singular: subscription - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - - jsonPath: .spec.cluster.name - name: Cluster - type: string - - jsonPath: .spec.name - name: PG Name - type: string - - jsonPath: .status.applied - name: Applied - type: boolean - - description: Latest reconciliation message - jsonPath: .status.message - name: Message - type: string - name: v1 - schema: - openAPIV3Schema: - description: Subscription is the Schema for the subscriptions API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: SubscriptionSpec defines the desired state of Subscription - properties: - cluster: - description: The name of the PostgreSQL cluster that identifies the - "subscriber" - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - dbname: - description: |- - The name of the database where the publication will be installed in - the "subscriber" cluster - type: string - x-kubernetes-validations: - - message: dbname is immutable - rule: self == oldSelf - externalClusterName: - description: The name of the external cluster with the publication - ("publisher") - type: string - name: - description: The name of the subscription inside PostgreSQL - type: string - x-kubernetes-validations: - - message: name is immutable - rule: self == oldSelf - parameters: - additionalProperties: - type: string - description: |- - Subscription parameters part of the `WITH` clause as expected by - PostgreSQL `CREATE SUBSCRIPTION` command - type: object - publicationDBName: - description: |- - The name of the database containing the publication on the external - cluster. Defaults to the one in the external cluster definition. - type: string - publicationName: - description: |- - The name of the publication inside the PostgreSQL database in the - "publisher" - type: string - subscriptionReclaimPolicy: - default: retain - description: The policy for end-of-life maintenance of this subscription - enum: - - delete - - retain - type: string - required: - - cluster - - dbname - - externalClusterName - - name - - publicationName - type: object - status: - description: SubscriptionStatus defines the observed state of Subscription - properties: - applied: - description: Applied is true if the subscription was reconciled correctly - type: boolean - message: - description: Message is the reconciliation output message - type: string - observedGeneration: - description: |- - A sequence number representing the latest - desired state that was synchronized - format: int64 - type: integer - type: object - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: - status: {} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: cnpg-manager - namespace: cnpg-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/managed-by: kustomize - app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 - name: cnpg-database-editor-role -rules: -- apiGroups: - - postgresql.cnpg.io - resources: - - databases - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.cnpg.io - resources: - - databases/status - verbs: - - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/managed-by: kustomize - app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 - name: cnpg-database-viewer-role -rules: -- apiGroups: - - postgresql.cnpg.io - resources: - - databases - verbs: - - get - - list - - watch -- apiGroups: - - postgresql.cnpg.io - resources: - - databases/status - verbs: - - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: cnpg-manager -rules: -- apiGroups: - - "" - resources: - - configmaps - - secrets - - services - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - configmaps/status - - secrets/status - verbs: - - get - - patch - - update -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch -- apiGroups: - - "" - resources: - - nodes - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - persistentvolumeclaims - - pods - - pods/exec - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - - pods/status - verbs: - - get -- apiGroups: - - "" - resources: - - serviceaccounts - verbs: - - create - - get - - list - - patch - - update - - watch -- apiGroups: - - admissionregistration.k8s.io - resources: - - mutatingwebhookconfigurations - - validatingwebhookconfigurations - verbs: - - get - - patch -- apiGroups: - - apps - resources: - - deployments - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - batch - resources: - - jobs - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create - - get - - update -- apiGroups: - - monitoring.coreos.com - resources: - - podmonitors - verbs: - - create - - delete - - get - - list - - patch - - watch -- apiGroups: - - policy - resources: - - poddisruptionbudgets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.cnpg.io - resources: - - backups - - clusters - - databases - - poolers - - publications - - scheduledbackups - - subscriptions - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.cnpg.io - resources: - - backups/status - - databases/status - - publications/status - - scheduledbackups/status - - subscriptions/status - verbs: - - get - - patch - - update -- apiGroups: - - postgresql.cnpg.io - resources: - - clusterimagecatalogs - - imagecatalogs - verbs: - - get - - list - - watch -- apiGroups: - - postgresql.cnpg.io - resources: - - clusters/finalizers - - poolers/finalizers - verbs: - - update -- apiGroups: - - postgresql.cnpg.io - resources: - - clusters/status - - poolers/status - verbs: - - get - - patch - - update - - watch -- apiGroups: - - rbac.authorization.k8s.io - resources: - - rolebindings - - roles - verbs: - - create - - get - - list - - patch - - update - - watch -- apiGroups: - - snapshot.storage.k8s.io - resources: - - volumesnapshots - verbs: - - create - - get - - list - - patch - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/managed-by: kustomize - app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 - name: cnpg-publication-editor-role -rules: -- apiGroups: - - postgresql.cnpg.io - resources: - - publications - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.cnpg.io - resources: - - publications/status - verbs: - - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/managed-by: kustomize - app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 - name: cnpg-publication-viewer-role -rules: -- apiGroups: - - postgresql.cnpg.io - resources: - - publications - verbs: - - get - - list - - watch -- apiGroups: - - postgresql.cnpg.io - resources: - - publications/status - verbs: - - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/managed-by: kustomize - app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 - name: cnpg-subscription-editor-role -rules: -- apiGroups: - - postgresql.cnpg.io - resources: - - subscriptions - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - postgresql.cnpg.io - resources: - - subscriptions/status - verbs: - - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/managed-by: kustomize - app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 - name: cnpg-subscription-viewer-role -rules: -- apiGroups: - - postgresql.cnpg.io - resources: - - subscriptions - verbs: - - get - - list - - watch -- apiGroups: - - postgresql.cnpg.io - resources: - - subscriptions/status - verbs: - - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: cnpg-manager-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cnpg-manager -subjects: -- kind: ServiceAccount - name: cnpg-manager - namespace: cnpg-system ---- -apiVersion: v1 -data: - queries: | - backends: - query: | - SELECT sa.datname - , sa.usename - , sa.application_name - , states.state - , COALESCE(sa.count, 0) AS total - , COALESCE(sa.max_tx_secs, 0) AS max_tx_duration_seconds - FROM ( VALUES ('active') - , ('idle') - , ('idle in transaction') - , ('idle in transaction (aborted)') - , ('fastpath function call') - , ('disabled') - ) AS states(state) - LEFT JOIN ( - SELECT datname - , state - , usename - , COALESCE(application_name, '') AS application_name - , COUNT(*) - , COALESCE(EXTRACT (EPOCH FROM (max(now() - xact_start))), 0) AS max_tx_secs - FROM pg_catalog.pg_stat_activity - GROUP BY datname, state, usename, application_name - ) sa ON states.state = sa.state - WHERE sa.usename IS NOT NULL - metrics: - - datname: - usage: "LABEL" - description: "Name of the database" - - usename: - usage: "LABEL" - description: "Name of the user" - - application_name: - usage: "LABEL" - description: "Name of the application" - - state: - usage: "LABEL" - description: "State of the backend" - - total: - usage: "GAUGE" - description: "Number of backends" - - max_tx_duration_seconds: - usage: "GAUGE" - description: "Maximum duration of a transaction in seconds" - - backends_waiting: - query: | - SELECT count(*) AS total - FROM pg_catalog.pg_locks blocked_locks - JOIN pg_catalog.pg_locks blocking_locks - ON blocking_locks.locktype = blocked_locks.locktype - AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database - AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation - AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page - AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple - AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid - AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid - AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid - AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid - AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid - AND blocking_locks.pid != blocked_locks.pid - JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid - WHERE NOT blocked_locks.granted - metrics: - - total: - usage: "GAUGE" - description: "Total number of backends that are currently waiting on other queries" - - pg_database: - query: | - SELECT datname - , pg_catalog.pg_database_size(datname) AS size_bytes - , pg_catalog.age(datfrozenxid) AS xid_age - , pg_catalog.mxid_age(datminmxid) AS mxid_age - FROM pg_catalog.pg_database - WHERE datallowconn - metrics: - - datname: - usage: "LABEL" - description: "Name of the database" - - size_bytes: - usage: "GAUGE" - description: "Disk space used by the database" - - xid_age: - usage: "GAUGE" - description: "Number of transactions from the frozen XID to the current one" - - mxid_age: - usage: "GAUGE" - description: "Number of multiple transactions (Multixact) from the frozen XID to the current one" - - pg_postmaster: - query: | - SELECT EXTRACT(EPOCH FROM pg_postmaster_start_time) AS start_time - FROM pg_catalog.pg_postmaster_start_time() - metrics: - - start_time: - usage: "GAUGE" - description: "Time at which postgres started (based on epoch)" - - pg_replication: - query: "SELECT CASE WHEN ( - NOT pg_catalog.pg_is_in_recovery() - OR pg_catalog.pg_last_wal_receive_lsn() = pg_catalog.pg_last_wal_replay_lsn()) - THEN 0 - ELSE GREATEST (0, - EXTRACT(EPOCH FROM (now() - pg_catalog.pg_last_xact_replay_timestamp()))) - END AS lag, - pg_catalog.pg_is_in_recovery() AS in_recovery, - EXISTS (TABLE pg_stat_wal_receiver) AS is_wal_receiver_up, - (SELECT count(*) FROM pg_catalog.pg_stat_replication) AS streaming_replicas" - metrics: - - lag: - usage: "GAUGE" - description: "Replication lag behind primary in seconds" - - in_recovery: - usage: "GAUGE" - description: "Whether the instance is in recovery" - - is_wal_receiver_up: - usage: "GAUGE" - description: "Whether the instance wal_receiver is up" - - streaming_replicas: - usage: "GAUGE" - description: "Number of streaming replicas connected to the instance" - - pg_replication_slots: - query: | - SELECT slot_name, - slot_type, - database, - active, - (CASE pg_catalog.pg_is_in_recovery() - WHEN TRUE THEN pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_last_wal_receive_lsn(), restart_lsn) - ELSE pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), restart_lsn) - END) as pg_wal_lsn_diff - FROM pg_catalog.pg_replication_slots - WHERE NOT temporary - metrics: - - slot_name: - usage: "LABEL" - description: "Name of the replication slot" - - slot_type: - usage: "LABEL" - description: "Type of the replication slot" - - database: - usage: "LABEL" - description: "Name of the database" - - active: - usage: "GAUGE" - description: "Flag indicating whether the slot is active" - - pg_wal_lsn_diff: - usage: "GAUGE" - description: "Replication lag in bytes" - - pg_stat_archiver: - query: | - SELECT archived_count - , failed_count - , COALESCE(EXTRACT(EPOCH FROM (now() - last_archived_time)), -1) AS seconds_since_last_archival - , COALESCE(EXTRACT(EPOCH FROM (now() - last_failed_time)), -1) AS seconds_since_last_failure - , COALESCE(EXTRACT(EPOCH FROM last_archived_time), -1) AS last_archived_time - , COALESCE(EXTRACT(EPOCH FROM last_failed_time), -1) AS last_failed_time - , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_archived_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_archived_wal_start_lsn - , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_failed_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_failed_wal_start_lsn - , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time - FROM pg_catalog.pg_stat_archiver - metrics: - - archived_count: - usage: "COUNTER" - description: "Number of WAL files that have been successfully archived" - - failed_count: - usage: "COUNTER" - description: "Number of failed attempts for archiving WAL files" - - seconds_since_last_archival: - usage: "GAUGE" - description: "Seconds since the last successful archival operation" - - seconds_since_last_failure: - usage: "GAUGE" - description: "Seconds since the last failed archival operation" - - last_archived_time: - usage: "GAUGE" - description: "Epoch of the last time WAL archiving succeeded" - - last_failed_time: - usage: "GAUGE" - description: "Epoch of the last time WAL archiving failed" - - last_archived_wal_start_lsn: - usage: "GAUGE" - description: "Archived WAL start LSN" - - last_failed_wal_start_lsn: - usage: "GAUGE" - description: "Last failed WAL LSN" - - stats_reset_time: - usage: "GAUGE" - description: "Time at which these statistics were last reset" - - pg_stat_bgwriter: - runonserver: "<17.0.0" - query: | - SELECT checkpoints_timed - , checkpoints_req - , checkpoint_write_time - , checkpoint_sync_time - , buffers_checkpoint - , buffers_clean - , maxwritten_clean - , buffers_backend - , buffers_backend_fsync - , buffers_alloc - FROM pg_catalog.pg_stat_bgwriter - metrics: - - checkpoints_timed: - usage: "COUNTER" - description: "Number of scheduled checkpoints that have been performed" - - checkpoints_req: - usage: "COUNTER" - description: "Number of requested checkpoints that have been performed" - - checkpoint_write_time: - usage: "COUNTER" - description: "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds" - - checkpoint_sync_time: - usage: "COUNTER" - description: "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds" - - buffers_checkpoint: - usage: "COUNTER" - description: "Number of buffers written during checkpoints" - - buffers_clean: - usage: "COUNTER" - description: "Number of buffers written by the background writer" - - maxwritten_clean: - usage: "COUNTER" - description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" - - buffers_backend: - usage: "COUNTER" - description: "Number of buffers written directly by a backend" - - buffers_backend_fsync: - usage: "COUNTER" - description: "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)" - - buffers_alloc: - usage: "COUNTER" - description: "Number of buffers allocated" - - pg_stat_bgwriter_17: - runonserver: ">=17.0.0" - name: pg_stat_bgwriter - query: | - SELECT buffers_clean - , maxwritten_clean - , buffers_alloc - , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time - FROM pg_catalog.pg_stat_bgwriter - metrics: - - buffers_clean: - usage: "COUNTER" - description: "Number of buffers written by the background writer" - - maxwritten_clean: - usage: "COUNTER" - description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" - - buffers_alloc: - usage: "COUNTER" - description: "Number of buffers allocated" - - stats_reset_time: - usage: "GAUGE" - description: "Time at which these statistics were last reset" - - pg_stat_checkpointer: - runonserver: ">=17.0.0" - query: | - SELECT num_timed AS checkpoints_timed - , num_requested AS checkpoints_req - , restartpoints_timed - , restartpoints_req - , restartpoints_done - , write_time - , sync_time - , buffers_written - , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time - FROM pg_catalog.pg_stat_checkpointer - metrics: - - checkpoints_timed: - usage: "COUNTER" - description: "Number of scheduled checkpoints that have been performed" - - checkpoints_req: - usage: "COUNTER" - description: "Number of requested checkpoints that have been performed" - - restartpoints_timed: - usage: "COUNTER" - description: "Number of scheduled restartpoints due to timeout or after a failed attempt to perform it" - - restartpoints_req: - usage: "COUNTER" - description: "Number of requested restartpoints that have been performed" - - restartpoints_done: - usage: "COUNTER" - description: "Number of restartpoints that have been performed" - - write_time: - usage: "COUNTER" - description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are written to disk, in milliseconds" - - sync_time: - usage: "COUNTER" - description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are synchronized to disk, in milliseconds" - - buffers_written: - usage: "COUNTER" - description: "Number of buffers written during checkpoints and restartpoints" - - stats_reset_time: - usage: "GAUGE" - description: "Time at which these statistics were last reset" - - pg_stat_database: - query: | - SELECT datname - , xact_commit - , xact_rollback - , blks_read - , blks_hit - , tup_returned - , tup_fetched - , tup_inserted - , tup_updated - , tup_deleted - , conflicts - , temp_files - , temp_bytes - , deadlocks - , blk_read_time - , blk_write_time - FROM pg_catalog.pg_stat_database - metrics: - - datname: - usage: "LABEL" - description: "Name of this database" - - xact_commit: - usage: "COUNTER" - description: "Number of transactions in this database that have been committed" - - xact_rollback: - usage: "COUNTER" - description: "Number of transactions in this database that have been rolled back" - - blks_read: - usage: "COUNTER" - description: "Number of disk blocks read in this database" - - blks_hit: - usage: "COUNTER" - description: "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)" - - tup_returned: - usage: "COUNTER" - description: "Number of rows returned by queries in this database" - - tup_fetched: - usage: "COUNTER" - description: "Number of rows fetched by queries in this database" - - tup_inserted: - usage: "COUNTER" - description: "Number of rows inserted by queries in this database" - - tup_updated: - usage: "COUNTER" - description: "Number of rows updated by queries in this database" - - tup_deleted: - usage: "COUNTER" - description: "Number of rows deleted by queries in this database" - - conflicts: - usage: "COUNTER" - description: "Number of queries canceled due to conflicts with recovery in this database" - - temp_files: - usage: "COUNTER" - description: "Number of temporary files created by queries in this database" - - temp_bytes: - usage: "COUNTER" - description: "Total amount of data written to temporary files by queries in this database" - - deadlocks: - usage: "COUNTER" - description: "Number of deadlocks detected in this database" - - blk_read_time: - usage: "COUNTER" - description: "Time spent reading data file blocks by backends in this database, in milliseconds" - - blk_write_time: - usage: "COUNTER" - description: "Time spent writing data file blocks by backends in this database, in milliseconds" - - pg_stat_replication: - primary: true - query: | - SELECT usename - , COALESCE(application_name, '') AS application_name - , COALESCE(client_addr::text, '') AS client_addr - , COALESCE(client_port::text, '') AS client_port - , EXTRACT(EPOCH FROM backend_start) AS backend_start - , COALESCE(pg_catalog.age(backend_xmin), 0) AS backend_xmin_age - , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), sent_lsn) AS sent_diff_bytes - , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), write_lsn) AS write_diff_bytes - , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), flush_lsn) AS flush_diff_bytes - , COALESCE(pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), replay_lsn),0) AS replay_diff_bytes - , COALESCE((EXTRACT(EPOCH FROM write_lag)),0)::float AS write_lag_seconds - , COALESCE((EXTRACT(EPOCH FROM flush_lag)),0)::float AS flush_lag_seconds - , COALESCE((EXTRACT(EPOCH FROM replay_lag)),0)::float AS replay_lag_seconds - FROM pg_catalog.pg_stat_replication - metrics: - - usename: - usage: "LABEL" - description: "Name of the replication user" - - application_name: - usage: "LABEL" - description: "Name of the application" - - client_addr: - usage: "LABEL" - description: "Client IP address" - - client_port: - usage: "LABEL" - description: "Client TCP port" - - backend_start: - usage: "COUNTER" - description: "Time when this process was started" - - backend_xmin_age: - usage: "COUNTER" - description: "The age of this standby's xmin horizon" - - sent_diff_bytes: - usage: "GAUGE" - description: "Difference in bytes from the last write-ahead log location sent on this connection" - - write_diff_bytes: - usage: "GAUGE" - description: "Difference in bytes from the last write-ahead log location written to disk by this standby server" - - flush_diff_bytes: - usage: "GAUGE" - description: "Difference in bytes from the last write-ahead log location flushed to disk by this standby server" - - replay_diff_bytes: - usage: "GAUGE" - description: "Difference in bytes from the last write-ahead log location replayed into the database on this standby server" - - write_lag_seconds: - usage: "GAUGE" - description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it" - - flush_lag_seconds: - usage: "GAUGE" - description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it" - - replay_lag_seconds: - usage: "GAUGE" - description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it" - - pg_settings: - query: | - SELECT name, - CASE setting WHEN 'on' THEN '1' WHEN 'off' THEN '0' ELSE setting END AS setting - FROM pg_catalog.pg_settings - WHERE vartype IN ('integer', 'real', 'bool') - ORDER BY 1 - metrics: - - name: - usage: "LABEL" - description: "Name of the setting" - - setting: - usage: "GAUGE" - description: "Setting value" - - pg_extensions: - query: | - SELECT - current_database() as datname, - name as extname, - default_version, - installed_version, - CASE - WHEN default_version = installed_version THEN 0 - ELSE 1 - END AS update_available - FROM pg_catalog.pg_available_extensions - WHERE installed_version IS NOT NULL - metrics: - - datname: - usage: "LABEL" - description: "Name of the database" - - extname: - usage: "LABEL" - description: "Extension name" - - default_version: - usage: "LABEL" - description: "Default version" - - installed_version: - usage: "LABEL" - description: "Installed version" - - update_available: - usage: "GAUGE" - description: "An update is available" - target_databases: - - '*' -kind: ConfigMap -metadata: - labels: - cnpg.io/reload: "" - name: cnpg-default-monitoring - namespace: cnpg-system ---- -apiVersion: v1 -kind: Service -metadata: - name: cnpg-webhook-service - namespace: cnpg-system -spec: - ports: - - port: 443 - targetPort: 9443 - selector: - app.kubernetes.io/name: cloudnative-pg ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app.kubernetes.io/name: cloudnative-pg - name: cnpg-controller-manager - namespace: cnpg-system -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: cloudnative-pg - template: - metadata: - labels: - app.kubernetes.io/name: cloudnative-pg - spec: - containers: - - args: - - controller - - --leader-elect - - --max-concurrent-reconciles=10 - - --config-map-name=cnpg-controller-manager-config - - --secret-name=cnpg-controller-manager-config - - --webhook-port=9443 - command: - - /manager - env: - - name: OPERATOR_IMAGE_NAME - value: ghcr.io/cloudnative-pg/cloudnative-pg:1.26.0-rc3 - - name: OPERATOR_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: MONITORING_QUERIES_CONFIGMAP - value: cnpg-default-monitoring - image: ghcr.io/cloudnative-pg/cloudnative-pg:1.26.0-rc3 - imagePullPolicy: Always - livenessProbe: - httpGet: - path: /readyz - port: 9443 - scheme: HTTPS - name: manager - ports: - - containerPort: 8080 - name: metrics - protocol: TCP - - containerPort: 9443 - name: webhook-server - protocol: TCP - readinessProbe: - httpGet: - path: /readyz - port: 9443 - scheme: HTTPS - resources: - limits: - cpu: 100m - memory: 200Mi - requests: - cpu: 100m - memory: 100Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - runAsGroup: 10001 - runAsUser: 10001 - seccompProfile: - type: RuntimeDefault - startupProbe: - failureThreshold: 6 - httpGet: - path: /readyz - port: 9443 - scheme: HTTPS - periodSeconds: 5 - volumeMounts: - - mountPath: /controller - name: scratch-data - - mountPath: /run/secrets/cnpg.io/webhook - name: webhook-certificates - securityContext: - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - serviceAccountName: cnpg-manager - terminationGracePeriodSeconds: 10 - volumes: - - emptyDir: {} - name: scratch-data - - name: webhook-certificates - secret: - defaultMode: 420 - optional: true - secretName: cnpg-webhook-cert ---- -apiVersion: admissionregistration.k8s.io/v1 -kind: MutatingWebhookConfiguration -metadata: - name: cnpg-mutating-webhook-configuration -webhooks: -- admissionReviewVersions: - - v1 - clientConfig: - service: - name: cnpg-webhook-service - namespace: cnpg-system - path: /mutate-postgresql-cnpg-io-v1-backup - failurePolicy: Fail - name: mbackup.cnpg.io - rules: - - apiGroups: - - postgresql.cnpg.io - apiVersions: - - v1 - operations: - - CREATE - - UPDATE - resources: - - backups - sideEffects: None -- admissionReviewVersions: - - v1 - clientConfig: - service: - name: cnpg-webhook-service - namespace: cnpg-system - path: /mutate-postgresql-cnpg-io-v1-cluster - failurePolicy: Fail - name: mcluster.cnpg.io - rules: - - apiGroups: - - postgresql.cnpg.io - apiVersions: - - v1 - operations: - - CREATE - - UPDATE - resources: - - clusters - sideEffects: None -- admissionReviewVersions: - - v1 - clientConfig: - service: - name: cnpg-webhook-service - namespace: cnpg-system - path: /mutate-postgresql-cnpg-io-v1-database - failurePolicy: Fail - name: mdatabase.cnpg.io - rules: - - apiGroups: - - postgresql.cnpg.io - apiVersions: - - v1 - operations: - - CREATE - - UPDATE - resources: - - databases - sideEffects: None -- admissionReviewVersions: - - v1 - clientConfig: - service: - name: cnpg-webhook-service - namespace: cnpg-system - path: /mutate-postgresql-cnpg-io-v1-scheduledbackup - failurePolicy: Fail - name: mscheduledbackup.cnpg.io - rules: - - apiGroups: - - postgresql.cnpg.io - apiVersions: - - v1 - operations: - - CREATE - - UPDATE - resources: - - scheduledbackups - sideEffects: None ---- -apiVersion: admissionregistration.k8s.io/v1 -kind: ValidatingWebhookConfiguration -metadata: - name: cnpg-validating-webhook-configuration -webhooks: -- admissionReviewVersions: - - v1 - clientConfig: - service: - name: cnpg-webhook-service - namespace: cnpg-system - path: /validate-postgresql-cnpg-io-v1-backup - failurePolicy: Fail - name: vbackup.cnpg.io - rules: - - apiGroups: - - postgresql.cnpg.io - apiVersions: - - v1 - operations: - - CREATE - - UPDATE - resources: - - backups - sideEffects: None -- admissionReviewVersions: - - v1 - clientConfig: - service: - name: cnpg-webhook-service - namespace: cnpg-system - path: /validate-postgresql-cnpg-io-v1-cluster - failurePolicy: Fail - name: vcluster.cnpg.io - rules: - - apiGroups: - - postgresql.cnpg.io - apiVersions: - - v1 - operations: - - CREATE - - UPDATE - resources: - - clusters - sideEffects: None -- admissionReviewVersions: - - v1 - clientConfig: - service: - name: cnpg-webhook-service - namespace: cnpg-system - path: /validate-postgresql-cnpg-io-v1-database - failurePolicy: Fail - name: vdatabase.cnpg.io - rules: - - apiGroups: - - postgresql.cnpg.io - apiVersions: - - v1 - operations: - - CREATE - - UPDATE - resources: - - databases - sideEffects: None -- admissionReviewVersions: - - v1 - clientConfig: - service: - name: cnpg-webhook-service - namespace: cnpg-system - path: /validate-postgresql-cnpg-io-v1-pooler - failurePolicy: Fail - name: vpooler.cnpg.io - rules: - - apiGroups: - - postgresql.cnpg.io - apiVersions: - - v1 - operations: - - CREATE - - UPDATE - resources: - - poolers - sideEffects: None -- admissionReviewVersions: - - v1 - clientConfig: - service: - name: cnpg-webhook-service - namespace: cnpg-system - path: /validate-postgresql-cnpg-io-v1-scheduledbackup - failurePolicy: Fail - name: vscheduledbackup.cnpg.io - rules: - - apiGroups: - - postgresql.cnpg.io - apiVersions: - - v1 - operations: - - CREATE - - UPDATE - resources: - - scheduledbackups - sideEffects: None From 13c22edbd8caa3146f577293638273c4dd4589e9 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 13 May 2025 16:29:44 +0200 Subject: [PATCH 583/836] Version tag to 1.26.0-rc3 (#7563) Signed-off-by: Jonathan Gonzalez V. Co-authored-by: Jonathan Gonzalez V. --- docs/src/installation_upgrade.md | 4 +- docs/src/kubectl-plugin.md | 30 +- pkg/versions/versions.go | 6 +- releases/cnpg-1.26.0-rc3.yaml | 18021 +++++++++++++++++++++++++++++ 4 files changed, 18041 insertions(+), 20 deletions(-) create mode 100644 releases/cnpg-1.26.0-rc3.yaml diff --git a/docs/src/installation_upgrade.md b/docs/src/installation_upgrade.md index a9ab979354..9a8bd66381 100644 --- a/docs/src/installation_upgrade.md +++ b/docs/src/installation_upgrade.md @@ -8,12 +8,12 @@ The operator can be installed like any other resource in Kubernetes, through a YAML manifest applied via `kubectl`. -You can install the [latest operator manifest](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-1.26.0-rc2.yaml) +You can install the [latest operator manifest](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-1.26.0-rc3.yaml) for this minor release as follows: ```sh kubectl apply --server-side -f \ - https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-1.26.0-rc2.yaml + https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-1.26.0-rc3.yaml ``` You can verify that with: diff --git a/docs/src/kubectl-plugin.md b/docs/src/kubectl-plugin.md index 50208b8651..d8e3d55a1b 100644 --- a/docs/src/kubectl-plugin.md +++ b/docs/src/kubectl-plugin.md @@ -31,11 +31,11 @@ them in your systems. #### Debian packages -For example, let's install the 1.26.0-rc2 release of the plugin, for an Intel based +For example, let's install the 1.26.0-rc3 release of the plugin, for an Intel based 64 bit server. First, we download the right `.deb` file. ```sh -wget https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.26.0-rc2/kubectl-cnpg_1.26.0-rc2_linux_x86_64.deb \ +wget https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.26.0-rc3/kubectl-cnpg_1.26.0-rc3_linux_x86_64.deb \ --output-document kube-plugin.deb ``` @@ -46,17 +46,17 @@ $ sudo dpkg -i kube-plugin.deb Selecting previously unselected package cnpg. (Reading database ... 6688 files and directories currently installed.) Preparing to unpack kube-plugin.deb ... -Unpacking cnpg (1.26.0-rc2) ... -Setting up cnpg (1.26.0-rc2) ... +Unpacking cnpg (1.26.0-rc3) ... +Setting up cnpg (1.26.0-rc3) ... ``` #### RPM packages -As in the example for `.rpm` packages, let's install the 1.26.0-rc2 release for an +As in the example for `.rpm` packages, let's install the 1.26.0-rc3 release for an Intel 64 bit machine. Note the `--output` flag to provide a file name. ```sh -curl -L https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.26.0-rc2/kubectl-cnpg_1.26.0-rc2_linux_x86_64.rpm \ +curl -L https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.26.0-rc3/kubectl-cnpg_1.26.0-rc3_linux_x86_64.rpm \ --output kube-plugin.rpm ``` @@ -70,7 +70,7 @@ Dependencies resolved. Package Architecture Version Repository Size ==================================================================================================== Installing: - cnpg x86_64 1.26.0-rc2 @commandline 20 M + cnpg x86_64 1.26.0-rc3 @commandline 20 M Transaction Summary ==================================================================================================== @@ -294,9 +294,9 @@ sandbox-3 0/604DE38 0/604DE38 0/604DE38 0/604DE38 00:00:00 00:00:00 00 Instances status Name Current LSN Replication role Status QoS Manager Version Node ---- ----------- ---------------- ------ --- --------------- ---- -sandbox-1 0/604DE38 Primary OK BestEffort 1.26.0-rc2 k8s-eu-worker -sandbox-2 0/604DE38 Standby (async) OK BestEffort 1.26.0-rc2 k8s-eu-worker2 -sandbox-3 0/604DE38 Standby (async) OK BestEffort 1.26.0-rc2 k8s-eu-worker +sandbox-1 0/604DE38 Primary OK BestEffort 1.26.0-rc3 k8s-eu-worker +sandbox-2 0/604DE38 Standby (async) OK BestEffort 1.26.0-rc3 k8s-eu-worker2 +sandbox-3 0/604DE38 Standby (async) OK BestEffort 1.26.0-rc3 k8s-eu-worker ``` If you require more detailed status information, use the `--verbose` option (or @@ -350,9 +350,9 @@ sandbox-primary primary 1 1 1 Instances status Name Current LSN Replication role Status QoS Manager Version Node ---- ----------- ---------------- ------ --- --------------- ---- -sandbox-1 0/6053720 Primary OK BestEffort 1.26.0-rc2 k8s-eu-worker -sandbox-2 0/6053720 Standby (async) OK BestEffort 1.26.0-rc2 k8s-eu-worker2 -sandbox-3 0/6053720 Standby (async) OK BestEffort 1.26.0-rc2 k8s-eu-worker +sandbox-1 0/6053720 Primary OK BestEffort 1.26.0-rc3 k8s-eu-worker +sandbox-2 0/6053720 Standby (async) OK BestEffort 1.26.0-rc3 k8s-eu-worker2 +sandbox-3 0/6053720 Standby (async) OK BestEffort 1.26.0-rc3 k8s-eu-worker ``` With an additional `-v` (e.g. `kubectl cnpg status sandbox -v -v`), you can @@ -575,12 +575,12 @@ Archive: report_operator_.zip ```output ====== Beginning of Previous Log ===== -2023-03-28T12:56:41.251711811Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.26.0-rc2","build":{"Version":"1.26.0-rc2+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} +2023-03-28T12:56:41.251711811Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.26.0-rc3","build":{"Version":"1.26.0-rc3+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} 2023-03-28T12:56:41.251851909Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"} ====== End of Previous Log ===== -2023-03-28T12:57:09.854306024Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.26.0-rc2","build":{"Version":"1.26.0-rc2+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} +2023-03-28T12:57:09.854306024Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.26.0-rc3","build":{"Version":"1.26.0-rc3+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} 2023-03-28T12:57:09.854363943Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"} ``` diff --git a/pkg/versions/versions.go b/pkg/versions/versions.go index a97567d158..6304f15667 100644 --- a/pkg/versions/versions.go +++ b/pkg/versions/versions.go @@ -23,13 +23,13 @@ package versions const ( // Version is the version of the operator - Version = "1.26.0-rc2" + Version = "1.26.0-rc3" // DefaultImageName is the default image used by the operator to create pods DefaultImageName = "ghcr.io/cloudnative-pg/postgresql:17.5" // DefaultOperatorImageName is the default operator image used by the controller in the pods running PostgreSQL - DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.26.0-rc2" + DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.26.0-rc3" ) // BuildInfo is a struct containing all the info about the build @@ -39,7 +39,7 @@ type BuildInfo struct { var ( // buildVersion injected during the build - buildVersion = "1.26.0-rc2" + buildVersion = "1.26.0-rc3" // buildCommit injected during the build buildCommit = "none" diff --git a/releases/cnpg-1.26.0-rc3.yaml b/releases/cnpg-1.26.0-rc3.yaml new file mode 100644 index 0000000000..0e3ebc55f8 --- /dev/null +++ b/releases/cnpg-1.26.0-rc3.yaml @@ -0,0 +1,18021 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-system +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: backups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Backup + listKind: BackupList + plural: backups + singular: backup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.method + name: Method + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .status.error + name: Error + type: string + name: v1 + schema: + openAPIV3Schema: + description: A Backup resource is a request for a PostgreSQL backup by the + user. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the backup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + type: object + status: + description: |- + Most recently observed status of the backup. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + azureCredentials: + description: The credentials to use to upload data to Azure Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without providing + explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + backupId: + description: The ID of the Barman backup + type: string + backupLabelFile: + description: Backup label file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + backupName: + description: The Name of the Barman backup + type: string + beginLSN: + description: The starting xlog + type: string + beginWal: + description: The starting WAL + type: string + commandError: + description: The backup command output in case of error + type: string + commandOutput: + description: Unused. Retained for compatibility with old versions. + type: string + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data. This may not be populated in case of errors. + type: string + encryption: + description: Encryption method required to S3 API + type: string + endLSN: + description: The ending xlog + type: string + endWal: + description: The ending WAL + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + error: + description: The detected error + type: string + googleCredentials: + description: The credentials to use to upload data to Google Cloud + Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage JSON + file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + instanceID: + description: Information to identify the instance where the backup + has been taken from + properties: + ContainerID: + description: The container ID + type: string + podName: + description: The pod name + type: string + type: object + method: + description: The backup method being used + type: string + online: + description: Whether the backup was online/hot (`true`) or offline/cold + (`false`) + type: boolean + phase: + description: The last backup status + type: string + pluginMetadata: + additionalProperties: + type: string + description: A map containing the plugin metadata + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without providing + explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the region + name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + snapshotBackupStatus: + description: Status of the volumeSnapshot backup + properties: + elements: + description: The elements list, populated with the gathered volume + snapshots + items: + description: BackupSnapshotElementStatus is a volume snapshot + that is part of a volume snapshot method backup + properties: + name: + description: Name is the snapshot resource name + type: string + tablespaceName: + description: |- + TablespaceName is the name of the snapshotted tablespace. Only set + when type is PG_TABLESPACE + type: string + type: + description: Type is tho role of the snapshot in the cluster, + such as PG_DATA, PG_WAL and PG_TABLESPACE + type: string + required: + - name + - type + type: object + type: array + type: object + startedAt: + description: When the backup was started + format: date-time + type: string + stoppedAt: + description: When the backup was terminated + format: date-time + type: string + tablespaceMapFile: + description: Tablespace map file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: clusterimagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ClusterImageCatalog + listKind: ClusterImageCatalogList + plural: clusterimagecatalogs + singular: clusterimagecatalog + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ClusterImageCatalog is the Schema for the clusterimagecatalogs + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ClusterImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: clusters.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Cluster + listKind: ClusterList + plural: clusters + singular: cluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Number of instances + jsonPath: .status.instances + name: Instances + type: integer + - description: Number of ready instances + jsonPath: .status.readyInstances + name: Ready + type: integer + - description: Cluster current status + jsonPath: .status.phase + name: Status + type: string + - description: Primary pod + jsonPath: .status.currentPrimary + name: Primary + type: string + name: v1 + schema: + openAPIV3Schema: + description: Cluster is the Schema for the PostgreSQL API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the cluster. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + affinity: + description: Affinity/Anti-affinity rules for Pods + properties: + additionalPodAffinity: + description: AdditionalPodAffinity allows to specify pod affinity + terms to be passed to all the cluster's pods. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + additionalPodAntiAffinity: + description: |- + AdditionalPodAntiAffinity allows to specify pod anti-affinity terms to be added to the ones generated + by the operator if EnablePodAntiAffinity is set to true (default) or to be used exclusively if set to false. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + enablePodAntiAffinity: + description: |- + Activates anti-affinity for the pods. The operator will define pods + anti-affinity unless this field is explicitly set to false + type: boolean + nodeAffinity: + description: |- + NodeAffinity describes node affinity scheduling rules for the pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is map of key-value pairs used to define the nodes on which + the pods can run. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + podAntiAffinityType: + description: |- + PodAntiAffinityType allows the user to decide whether pod anti-affinity between cluster instance has to be + considered a strong requirement during scheduling or not. Allowed values are: "preferred" (default if empty) or + "required". Setting it to "required", could lead to instances remaining pending until new kubernetes nodes are + added if all the existing nodes don't match the required pod anti-affinity rule. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + type: string + tolerations: + description: |- + Tolerations is a list of Tolerations that should be set for all the pods, in order to allow them to run + on tainted nodes. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologyKey: + description: |- + TopologyKey to use for anti-affinity configuration. See k8s documentation + for more info on that + type: string + type: object + backup: + description: The configuration to be used for backups + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2`, and `snappy`. + enum: + - bzip2 + - gzip + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage + JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the + region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2`, + `lz4`, `snappy`, `xz`, and `zstd`. + enum: + - bzip2 + - gzip + - lz4 + - snappy + - xz + - zstd + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + retentionPolicy: + description: |- + RetentionPolicy is the retention policy to be used for backups + and WALs (i.e. '60d'). The retention policy is expressed in the form + of `XXu` where `XX` is a positive integer and `u` is in `[dwm]` - + days, weeks, months. + It's currently only applicable when using the BarmanObjectStore method. + pattern: ^[1-9][0-9]*[dwm]$ + type: string + target: + default: prefer-standby + description: |- + The policy to decide which instance should perform backups. Available + options are empty string, which will default to `prefer-standby` policy, + `primary` to have backups run always on primary instances, `prefer-standby` + to have backups run preferably on the most updated standby, if available. + enum: + - primary + - prefer-standby + type: string + volumeSnapshot: + description: VolumeSnapshot provides the configuration for the + execution of volume snapshot backups. + properties: + annotations: + additionalProperties: + type: string + description: Annotations key-value pairs that will be added + to .metadata.annotations snapshot resources. + type: object + className: + description: |- + ClassName specifies the Snapshot Class to be used for PG_DATA PersistentVolumeClaim. + It is the default class for the other types if no specific class is present + type: string + labels: + additionalProperties: + type: string + description: Labels are key-value pairs that will be added + to .metadata.labels snapshot resources. + type: object + online: + default: true + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + type: boolean + onlineConfiguration: + default: + immediateCheckpoint: false + waitForArchive: true + description: Configuration parameters to control the online/hot + backup with volume snapshots + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + snapshotOwnerReference: + default: none + description: SnapshotOwnerReference indicates the type of + owner reference the snapshot should have + enum: + - none + - cluster + - backup + type: string + tablespaceClassName: + additionalProperties: + type: string + description: |- + TablespaceClassName specifies the Snapshot Class to be used for the tablespaces. + defaults to the PGDATA Snapshot Class, if set + type: object + walClassName: + description: WalClassName specifies the Snapshot Class to + be used for the PG_WAL PersistentVolumeClaim. + type: string + type: object + type: object + bootstrap: + description: Instructions to bootstrap this cluster + properties: + initdb: + description: Bootstrap the cluster via initdb + properties: + builtinLocale: + description: |- + Specifies the locale name when the builtin provider is used. + This option requires `localeProvider` to be set to `builtin`. + Available from PostgreSQL 17. + type: string + dataChecksums: + description: |- + Whether the `-k` option should be passed to initdb, + enabling checksums on data pages (default: `false`) + type: boolean + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + encoding: + description: The value to be passed as option `--encoding` + for initdb (default:`UTF8`) + type: string + icuLocale: + description: |- + Specifies the ICU locale when the ICU provider is used. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 15. + type: string + icuRules: + description: |- + Specifies additional collation rules to customize the behavior of the default collation. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 16. + type: string + import: + description: |- + Bootstraps the new cluster by importing data from an existing PostgreSQL + instance using logical backup (`pg_dump` and `pg_restore`) + properties: + databases: + description: The databases to import + items: + type: string + type: array + pgDumpExtraOptions: + description: |- + List of custom options to pass to the `pg_dump` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + pgRestoreExtraOptions: + description: |- + List of custom options to pass to the `pg_restore` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + postImportApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after is imported - to be used with extreme care + (by default empty). Only available in microservice type. + items: + type: string + type: array + roles: + description: The roles to import + items: + type: string + type: array + schemaOnly: + description: |- + When set to true, only the `pre-data` and `post-data` sections of + `pg_restore` are invoked, avoiding data import. Default: `false`. + type: boolean + source: + description: The source of the import + properties: + externalCluster: + description: The name of the externalCluster used + for import + type: string + required: + - externalCluster + type: object + type: + description: The import type. Can be `microservice` or + `monolith`. + enum: + - microservice + - monolith + type: string + required: + - databases + - source + - type + type: object + locale: + description: Sets the default collation order and character + classification in the new database. + type: string + localeCType: + description: The value to be passed as option `--lc-ctype` + for initdb (default:`C`) + type: string + localeCollate: + description: The value to be passed as option `--lc-collate` + for initdb (default:`C`) + type: string + localeProvider: + description: |- + This option sets the locale provider for databases created in the new cluster. + Available from PostgreSQL 16. + type: string + options: + description: |- + The list of options that must be passed to initdb when creating the cluster. + Deprecated: This could lead to inconsistent configurations, + please use the explicit provided parameters instead. + If defined, explicit values will be ignored. + items: + type: string + type: array + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + postInitApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitApplicationSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the application database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitSQL: + description: |- + List of SQL queries to be executed as a superuser in the `postgres` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `postgres` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitTemplateSQL: + description: |- + List of SQL queries to be executed as a superuser in the `template1` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitTemplateSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `template1` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + walSegmentSize: + description: |- + The value in megabytes (1 to 1024) to be passed to the `--wal-segsize` + option for initdb (default: empty, resulting in PostgreSQL default: 16MB) + maximum: 1024 + minimum: 1 + type: integer + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider + is set to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is + set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set + to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + pg_basebackup: + description: |- + Bootstrap the cluster taking a physical backup of another compatible + PostgreSQL instance + properties: + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: The name of the server of which we need to take + a physical backup + minLength: 1 + type: string + required: + - source + type: object + recovery: + description: Bootstrap the cluster from a backup + properties: + backup: + description: |- + The backup object containing the physical base backup from which to + initiate the recovery procedure. + Mutually exclusive with `source` and `volumeSnapshots`. + properties: + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + name: + description: Name of the referent. + type: string + required: + - name + type: object + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + recoveryTarget: + description: |- + By default, the recovery process applies all the available + WAL files in the archive (full recovery). However, you can also + end the recovery as soon as a consistent state is reached or + recover to a point-in-time (PITR) by specifying a `RecoveryTarget` object, + as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...). + More info: https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET + properties: + backupID: + description: |- + The ID of the backup from which to start the recovery process. + If empty (default) the operator will automatically detect the backup + based on targetTime or targetLSN if specified. Otherwise use the + latest available backup in chronological order. + type: string + exclusive: + description: |- + Set the target to be exclusive. If omitted, defaults to false, so that + in Postgres, `recovery_target_inclusive` will be true + type: boolean + targetImmediate: + description: End recovery as soon as a consistent state + is reached + type: boolean + targetLSN: + description: The target LSN (Log Sequence Number) + type: string + targetName: + description: |- + The target name (to be previously created + with `pg_create_restore_point`) + type: string + targetTLI: + description: The target timeline ("latest" or a positive + integer) + type: string + targetTime: + description: The target time as a timestamp in the RFC3339 + standard + type: string + targetXID: + description: The target transaction ID + type: string + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: |- + The external cluster whose backup we will restore. This is also + used as the name of the folder under which the backup is stored, + so it must be set to the name of the source cluster + Mutually exclusive with `backup`. + type: string + volumeSnapshots: + description: |- + The static PVC data source(s) from which to initiate the + recovery procedure. Currently supporting `VolumeSnapshot` + and `PersistentVolumeClaim` resources that map an existing + PVC group, compatible with CloudNativePG, and taken with + a cold backup copy on a fenced Postgres instance (limitation + which will be removed in the future when online backup + will be implemented). + Mutually exclusive with `backup`. + properties: + storage: + description: Configuration of the storage of the instances + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + tablespaceStorage: + additionalProperties: + description: |- + TypedLocalObjectReference contains enough information to let you locate the + typed referenced object inside the same namespace. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + description: Configuration of the storage for PostgreSQL + tablespaces + type: object + walStorage: + description: Configuration of the storage for PostgreSQL + WAL (Write-Ahead Log) + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + required: + - storage + type: object + type: object + type: object + certificates: + description: The configuration for the CA and related certificates + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + description: + description: Description of this PostgreSQL cluster + type: string + enablePDB: + default: true + description: |- + Manage the `PodDisruptionBudget` resources within the cluster. When + configured as `true` (default setting), the pod disruption budgets + will safeguard the primary node from being terminated. Conversely, + setting it to `false` will result in the absence of any + `PodDisruptionBudget` resource, permitting the shutdown of all nodes + hosting the PostgreSQL cluster. This latter configuration is + advisable for any PostgreSQL cluster employed for + development/staging purposes. + type: boolean + enableSuperuserAccess: + default: false + description: |- + When this option is enabled, the operator will use the `SuperuserSecret` + to update the `postgres` user password (if the secret is + not present, the operator will automatically create one). When this + option is disabled, the operator will ignore the `SuperuserSecret` content, delete + it when automatically created, and then blank the password of the `postgres` + user by setting it to `NULL`. Disabled by default. + type: boolean + env: + description: |- + Env follows the Env format to pass environment variables + to the pods created in the cluster + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + EnvFrom follows the EnvFrom format to pass environment variables + sources to the pods to be used by Env + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each key in + the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + ephemeralVolumeSource: + description: EphemeralVolumeSource allows the user to configure the + source of ephemeral volumes. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to + consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the + PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + ephemeralVolumesSizeLimit: + description: |- + EphemeralVolumesSizeLimit allows the user to set the limits for the ephemeral + volumes + properties: + shm: + anyOf: + - type: integer + - type: string + description: Shm is the size limit of the shared memory volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + temporaryData: + anyOf: + - type: integer + - type: string + description: TemporaryData is the size limit of the temporary + data volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + externalClusters: + description: The list of external clusters which are used in the configuration + items: + description: |- + ExternalCluster represents the connection parameters to an + external cluster which is used in the other sections of the configuration + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2`, and `snappy`. + enum: + - bzip2 + - gzip + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud + Storage JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing + the region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2`, + `lz4`, `snappy`, `xz`, and `zstd`. + enum: + - bzip2 + - gzip + - lz4 + - snappy + - xz + - zstd + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + connectionParameters: + additionalProperties: + type: string + description: The list of connection parameters, such as dbname, + host, username, etc + type: object + name: + description: The server name, required + type: string + password: + description: |- + The reference to the password to be used to connect to the server. + If a password is provided, CloudNativePG creates a PostgreSQL + passfile at `/controller/external/NAME/pass` (where "NAME" is the + cluster's name). This passfile is automatically referenced in the + connection string when establishing a connection to the remote + PostgreSQL server from the current PostgreSQL `Cluster`. This ensures + secure and efficient password management for external clusters. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + plugin: + description: |- + The configuration of the plugin that is taking care + of WAL archiving and backups for this external cluster + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + isWALArchiver: + default: false + description: |- + Only one plugin can be declared as WALArchiver. + Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + sslCert: + description: |- + The reference to an SSL certificate to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslKey: + description: |- + The reference to an SSL private key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslRootCert: + description: |- + The reference to an SSL CA public key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - name + type: object + type: array + failoverDelay: + default: 0 + description: |- + The amount of time (in seconds) to wait before triggering a failover + after the primary PostgreSQL instance in the cluster was detected + to be unhealthy + format: int32 + type: integer + imageCatalogRef: + description: Defines the major PostgreSQL version we want to use within + an ImageCatalog + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + major: + description: The major version of PostgreSQL we want to use from + the ImageCatalog + type: integer + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - major + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: Only image catalogs are supported + rule: self.kind == 'ImageCatalog' || self.kind == 'ClusterImageCatalog' + - message: Only image catalogs are supported + rule: self.apiGroup == 'postgresql.cnpg.io' + imageName: + description: |- + Name of the container image, supporting both tags (`:`) + and digests for deterministic and repeatable deployments + (`:@sha256:`) + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of `Always`, `Never` or `IfNotPresent`. + If not defined, it defaults to `IfNotPresent`. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + imagePullSecrets: + description: The list of pull secrets to be used to pull the images + items: + description: |- + LocalObjectReference contains enough information to let you locate a + local object with a known type inside the same namespace + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + type: array + inheritedMetadata: + description: Metadata that will be inherited by all objects related + to the Cluster + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + instances: + default: 1 + description: Number of instances required in the cluster + minimum: 1 + type: integer + livenessProbeTimeout: + description: |- + LivenessProbeTimeout is the time (in seconds) that is allowed for a PostgreSQL instance + to successfully respond to the liveness probe (default 30). + The Liveness probe failure threshold is derived from this value using the formula: + ceiling(livenessProbe / 10). + format: int32 + type: integer + logLevel: + default: info + description: 'The instances'' log level, one of the following values: + error, warning, info (default), debug, trace' + enum: + - error + - warning + - info + - debug + - trace + type: string + managed: + description: The configuration that is used by the portions of PostgreSQL + that are managed by the instance manager + properties: + roles: + description: Database roles managed by the `Cluster` + items: + description: |- + RoleConfiguration is the representation, in Kubernetes, of a PostgreSQL role + with the additional field Ensure specifying whether to ensure the presence or + absence of the role in the database + + The defaults of the CREATE ROLE command are applied + Reference: https://www.postgresql.org/docs/current/sql-createrole.html + properties: + bypassrls: + description: |- + Whether a role bypasses every row-level security (RLS) policy. + Default is `false`. + type: boolean + comment: + description: Description of the role + type: string + connectionLimit: + default: -1 + description: |- + If the role can log in, this specifies how many concurrent + connections the role can make. `-1` (the default) means no limit. + format: int64 + type: integer + createdb: + description: |- + When set to `true`, the role being defined will be allowed to create + new databases. Specifying `false` (default) will deny a role the + ability to create databases. + type: boolean + createrole: + description: |- + Whether the role will be permitted to create, alter, drop, comment + on, change the security label for, and grant or revoke membership in + other roles. Default is `false`. + type: boolean + disablePassword: + description: DisablePassword indicates that a role's password + should be set to NULL in Postgres + type: boolean + ensure: + default: present + description: Ensure the role is `present` or `absent` - + defaults to "present" + enum: + - present + - absent + type: string + inRoles: + description: |- + List of one or more existing roles to which this role will be + immediately added as a new member. Default empty. + items: + type: string + type: array + inherit: + default: true + description: |- + Whether a role "inherits" the privileges of roles it is a member of. + Defaults is `true`. + type: boolean + login: + description: |- + Whether the role is allowed to log in. A role having the `login` + attribute can be thought of as a user. Roles without this attribute + are useful for managing database privileges, but are not users in + the usual sense of the word. Default is `false`. + type: boolean + name: + description: Name of the role + type: string + passwordSecret: + description: |- + Secret containing the password of the role (if present) + If null, the password will be ignored unless DisablePassword is set + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + replication: + description: |- + Whether a role is a replication role. A role must have this + attribute (or be a superuser) in order to be able to connect to the + server in replication mode (physical or logical replication) and in + order to be able to create or drop replication slots. A role having + the `replication` attribute is a very highly privileged role, and + should only be used on roles actually used for replication. Default + is `false`. + type: boolean + superuser: + description: |- + Whether the role is a `superuser` who can override all access + restrictions within the database - superuser status is dangerous and + should be used only when really needed. You must yourself be a + superuser to create a new superuser. Defaults is `false`. + type: boolean + validUntil: + description: |- + Date and time after which the role's password is no longer valid. + When omitted, the password will never expire (default). + format: date-time + type: string + required: + - name + type: object + type: array + services: + description: Services roles managed by the `Cluster` + properties: + additional: + description: Additional is a list of additional managed services + specified by the user. + items: + description: |- + ManagedService represents a specific service managed by the cluster. + It includes the type of service and its associated template specification. + properties: + selectorType: + description: |- + SelectorType specifies the type of selectors that the service will have. + Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services. + enum: + - rw + - r + - ro + type: string + serviceTemplate: + description: ServiceTemplate is the template specification + for the service. + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only + supported for certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information + on service's port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed + by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains + the configurations of session affinity. + properties: + clientIP: + description: clientIP contains the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic is + distributed to Service endpoints. Implementations can use this field as a + hint, but are not required to guarantee strict adherence. If the field is + not set, the implementation will apply its default routing strategy. If set + to "PreferClose", implementations should prioritize endpoints that are + topologically close (e.g., same zone). + This is a beta field and requires enabling ServiceTrafficDistribution feature. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + updateStrategy: + default: patch + description: UpdateStrategy describes how the service + differences should be reconciled + enum: + - patch + - replace + type: string + required: + - selectorType + - serviceTemplate + type: object + type: array + disabledDefaultServices: + description: |- + DisabledDefaultServices is a list of service types that are disabled by default. + Valid values are "r", and "ro", representing read, and read-only services. + items: + description: |- + ServiceSelectorType describes a valid value for generating the service selectors. + It indicates which type of service the selector applies to, such as read-write, read, or read-only + enum: + - rw + - r + - ro + type: string + type: array + type: object + type: object + maxSyncReplicas: + default: 0 + description: |- + The target value for the synchronous replication quorum, that can be + decreased if the number of ready standbys is lower than this. + Undefined or 0 disable synchronous replication. + minimum: 0 + type: integer + minSyncReplicas: + default: 0 + description: |- + Minimum number of instances required in synchronous replication with the + primary. Undefined or 0 allow writes to complete when no standby is + available. + minimum: 0 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this cluster + properties: + customQueriesConfigMap: + description: The list of config maps containing the custom queries + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + customQueriesSecret: + description: The list of secrets containing the custom queries + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + disableDefaultQueries: + default: false + description: |- + Whether the default queries should be injected. + Set it to `true` if you don't want to inject default queries into the cluster. + Default: false. + type: boolean + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + tls: + description: |- + Configure TLS communication for the metrics endpoint. + Changing tls.enabled option will force a rollout of all instances. + properties: + enabled: + default: false + description: |- + Enable TLS for the monitoring endpoint. + Changing this option will force a rollout of all instances. + type: boolean + type: object + type: object + nodeMaintenanceWindow: + description: Define a maintenance window for the Kubernetes nodes + properties: + inProgress: + default: false + description: Is there a node maintenance activity in progress? + type: boolean + reusePVC: + default: true + description: |- + Reuse the existing PVC (wait for the node to come + up again) or not (recreate it elsewhere - when `instances` >1) + type: boolean + type: object + plugins: + description: |- + The plugins configuration, containing + any plugin to be loaded with the corresponding configuration + items: + description: |- + PluginConfiguration specifies a plugin that need to be loaded for this + cluster to be reconciled + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + isWALArchiver: + default: false + description: |- + Only one plugin can be declared as WALArchiver. + Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + type: array + postgresGID: + default: 26 + description: The GID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresUID: + default: 26 + description: The UID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresql: + description: Configuration of the PostgreSQL server + properties: + enableAlterSystem: + description: |- + If this parameter is true, the user will be able to invoke `ALTER SYSTEM` + on this CloudNativePG Cluster. + This should only be used for debugging and troubleshooting. + Defaults to false. + type: boolean + ldap: + description: Options to specify LDAP configuration + properties: + bindAsAuth: + description: Bind as authentication configuration + properties: + prefix: + description: Prefix for the bind authentication option + type: string + suffix: + description: Suffix for the bind authentication option + type: string + type: object + bindSearchAuth: + description: Bind+Search authentication configuration + properties: + baseDN: + description: Root DN to begin the user search + type: string + bindDN: + description: DN of the user to bind to the directory + type: string + bindPassword: + description: Secret with the password for the user to + bind to the directory + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + searchAttribute: + description: Attribute to match against the username + type: string + searchFilter: + description: Search filter to use when doing the search+bind + authentication + type: string + type: object + port: + description: LDAP server port + type: integer + scheme: + description: LDAP schema to be used, possible options are + `ldap` and `ldaps` + enum: + - ldap + - ldaps + type: string + server: + description: LDAP hostname or IP address + type: string + tls: + description: Set to 'true' to enable LDAP over TLS. 'false' + is default + type: boolean + type: object + parameters: + additionalProperties: + type: string + description: PostgreSQL configuration options (postgresql.conf) + type: object + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + pg_ident: + description: |- + PostgreSQL User Name Maps rules (lines to be appended + to the pg_ident.conf file) + items: + type: string + type: array + promotionTimeout: + description: |- + Specifies the maximum number of seconds to wait when promoting an instance to primary. + Default value is 40000000, greater than one year in seconds, + big enough to simulate an infinite timeout + format: int32 + type: integer + shared_preload_libraries: + description: Lists of shared preload libraries to add to the default + ones + items: + type: string + type: array + syncReplicaElectionConstraint: + description: |- + Requirements to be met by sync replicas. This will affect how the "synchronous_standby_names" parameter will be + set up. + properties: + enabled: + description: This flag enables the constraints for sync replicas + type: boolean + nodeLabelsAntiAffinity: + description: A list of node labels values to extract and compare + to evaluate if the pods reside in the same topology or not + items: + type: string + type: array + required: + - enabled + type: object + synchronous: + description: Configuration of the PostgreSQL synchronous replication + feature + properties: + dataDurability: + default: required + description: |- + If set to "required", data durability is strictly enforced. Write operations + with synchronous commit settings (`on`, `remote_write`, or `remote_apply`) will + block if there are insufficient healthy replicas, ensuring data persistence. + If set to "preferred", data durability is maintained when healthy replicas + are available, but the required number of instances will adjust dynamically + if replicas become unavailable. This setting relaxes strict durability enforcement + to allow for operational continuity. This setting is only applicable if both + `standbyNamesPre` and `standbyNamesPost` are unset (empty). + enum: + - required + - preferred + type: string + maxStandbyNamesFromCluster: + description: |- + Specifies the maximum number of local cluster pods that can be + automatically included in the `synchronous_standby_names` option in + PostgreSQL. + type: integer + method: + description: |- + Method to select synchronous replication standbys from the listed + servers, accepting 'any' (quorum-based synchronous replication) or + 'first' (priority-based synchronous replication) as values. + enum: + - any + - first + type: string + number: + description: |- + Specifies the number of synchronous standby servers that + transactions must wait for responses from. + type: integer + x-kubernetes-validations: + - message: The number of synchronous replicas should be greater + than zero + rule: self > 0 + standbyNamesPost: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` after local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + standbyNamesPre: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` before local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + required: + - method + - number + type: object + x-kubernetes-validations: + - message: dataDurability set to 'preferred' requires empty 'standbyNamesPre' + and empty 'standbyNamesPost' + rule: self.dataDurability!='preferred' || ((!has(self.standbyNamesPre) + || self.standbyNamesPre.size()==0) && (!has(self.standbyNamesPost) + || self.standbyNamesPost.size()==0)) + type: object + primaryUpdateMethod: + default: restart + description: |- + Method to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be with a switchover (`switchover`) or in-place (`restart` - default) + enum: + - switchover + - restart + type: string + primaryUpdateStrategy: + default: unsupervised + description: |- + Deployment strategy to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be automated (`unsupervised` - default) or manual (`supervised`) + enum: + - unsupervised + - supervised + type: string + priorityClassName: + description: |- + Name of the priority class which will be used in every generated Pod, if the PriorityClass + specified does not exist, the pod will not be able to schedule. Please refer to + https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass + for more information + type: string + probes: + description: |- + The configuration of the probes to be injected + in the PostgreSQL Pods. + properties: + liveness: + description: The liveness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + readiness: + description: The readiness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + maximumLag: + anyOf: + - type: integer + - type: string + description: Lag limit. Used only for `streaming` strategy + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: + description: The probe strategy + enum: + - pg_isready + - streaming + - query + type: string + type: object + startup: + description: The startup probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + maximumLag: + anyOf: + - type: integer + - type: string + description: Lag limit. Used only for `streaming` strategy + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: + description: The probe strategy + enum: + - pg_isready + - streaming + - query + type: string + type: object + type: object + projectedVolumeTemplate: + description: |- + Template to be used to define projected volumes, projected volumes will be mounted + under `/projected` base folder + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root to write + the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap data + to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the + pod: only annotations, labels, name, namespace + and uid are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must not + be absolute or contain the ''..'' path. Must + be utf-8 encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the secret data to + project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether the Secret + or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information about the + serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + replica: + description: Replica cluster configuration + properties: + enabled: + description: |- + If replica mode is enabled, this cluster will be a replica of an + existing cluster. Replica cluster can be created from a recovery + object store or via streaming through pg_basebackup. + Refer to the Replica clusters page of the documentation for more information. + type: boolean + minApplyDelay: + description: |- + When replica mode is enabled, this parameter allows you to replay + transactions only when the system time is at least the configured + time past the commit time. This provides an opportunity to correct + data loss errors. Note that when this parameter is set, a promotion + token cannot be used. + type: string + primary: + description: |- + Primary defines which Cluster is defined to be the primary in the distributed PostgreSQL cluster, based on the + topology specified in externalClusters + type: string + promotionToken: + description: |- + A demotion token generated by an external cluster used to + check if the promotion requirements are met. + type: string + self: + description: |- + Self defines the name of this cluster. It is used to determine if this is a primary + or a replica cluster, comparing it with `primary` + type: string + source: + description: The name of the external cluster which is the replication + origin + minLength: 1 + type: string + required: + - source + type: object + replicationSlots: + default: + highAvailability: + enabled: true + description: Replication slots management configuration + properties: + highAvailability: + default: + enabled: true + description: Replication slots for high availability configuration + properties: + enabled: + default: true + description: |- + If enabled (default), the operator will automatically manage replication slots + on the primary instance and use them in streaming replication + connections with all the standby instances that are part of the HA + cluster. If disabled, the operator will not take advantage + of replication slots in streaming connections with the replicas. + This feature also controls replication slots in replica cluster, + from the designated primary to its cascading replicas. + type: boolean + slotPrefix: + default: _cnpg_ + description: |- + Prefix for replication slots managed by the operator for HA. + It may only contain lower case letters, numbers, and the underscore character. + This can only be set at creation time. By default set to `_cnpg_`. + pattern: ^[0-9a-z_]*$ + type: string + type: object + synchronizeReplicas: + description: Configures the synchronization of the user defined + physical replication slots + properties: + enabled: + default: true + description: When set to true, every replication slot that + is on the primary is synchronized on each standby + type: boolean + excludePatterns: + description: List of regular expression patterns to match + the names of replication slots to be excluded (by default + empty) + items: + type: string + type: array + required: + - enabled + type: object + updateInterval: + default: 30 + description: |- + Standby will update the status of the local replication slots + every `updateInterval` seconds (default 30). + minimum: 1 + type: integer + type: object + resources: + description: |- + Resources requirements of every generated Pod. Please refer to + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + for more information. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + schedulerName: + description: |- + If specified, the pod will be dispatched by specified Kubernetes + scheduler. If not specified, the pod will be dispatched by the default + scheduler. More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/ + type: string + seccompProfile: + description: |- + The SeccompProfile applied to every Pod and Container. + Defaults to: `RuntimeDefault` + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + serviceAccountTemplate: + description: Configure the generation of the service account + properties: + metadata: + description: |- + Metadata are the metadata to be used for the generated + service account + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + required: + - metadata + type: object + smartShutdownTimeout: + default: 180 + description: |- + The time in seconds that controls the window of time reserved for the smart shutdown of Postgres to complete. + Make sure you reserve enough time for the operator to request a fast shutdown of Postgres + (that is: `stopDelay` - `smartShutdownTimeout`). + format: int32 + type: integer + startDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + successfully start up (default 3600). + The startup probe failure threshold is derived from this value using the formula: + ceiling(startDelay / 10). + format: int32 + type: integer + stopDelay: + default: 1800 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + gracefully shutdown (default 1800) + format: int32 + type: integer + storage: + description: Configuration of the storage of the instances + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + superuserSecret: + description: |- + The secret containing the superuser password. If not defined a new + secret will be created with a randomly generated password + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + switchoverDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a primary PostgreSQL instance + to gracefully shutdown during a switchover. + Default value is 3600 seconds (1 hour). + format: int32 + type: integer + tablespaces: + description: The tablespaces configuration + items: + description: |- + TablespaceConfiguration is the configuration of a tablespace, and includes + the storage specification for the tablespace + properties: + name: + description: The name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + properties: + name: + type: string + type: object + storage: + description: The storage configuration for the tablespace + properties: + pvcTemplate: + description: Template to be used to generate the Persistent + Volume Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to + the PersistentVolume backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + temporary: + default: false + description: |- + When set to true, the tablespace will be added as a `temp_tablespaces` + entry in PostgreSQL, and will be available to automatically house temp + database objects, or other temporary files. Please refer to PostgreSQL + documentation for more information on the `temp_tablespaces` GUC. + type: boolean + required: + - name + - storage + type: object + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints specifies how to spread matching pods among the given topology. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ + items: + description: TopologySpreadConstraint specifies how to spread matching + pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + walStorage: + description: Configuration of the storage for PostgreSQL WAL (Write-Ahead + Log) + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + required: + - instances + type: object + x-kubernetes-validations: + - message: imageName and imageCatalogRef are mutually exclusive + rule: '!(has(self.imageCatalogRef) && has(self.imageName))' + status: + description: |- + Most recently observed status of the cluster. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + availableArchitectures: + description: AvailableArchitectures reports the available architectures + of a cluster + items: + description: AvailableArchitecture represents the state of a cluster's + architecture + properties: + goArch: + description: GoArch is the name of the executable architecture + type: string + hash: + description: Hash is the hash of the executable + type: string + required: + - goArch + - hash + type: object + type: array + certificates: + description: The configuration for the CA and related certificates, + initialized with defaults. + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + expirations: + additionalProperties: + type: string + description: Expiration dates for all certificates. + type: object + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + cloudNativePGCommitHash: + description: The commit hash number of which this operator running + type: string + cloudNativePGOperatorHash: + description: The hash of the binary of the operator + type: string + conditions: + description: Conditions for cluster object + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + configMapResourceVersion: + description: |- + The list of resource versions of the configmaps, + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + configmap data + properties: + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the config maps used to pass metrics. + Map keys are the config map names, map values are the versions + type: object + type: object + currentPrimary: + description: Current primary instance + type: string + currentPrimaryFailingSinceTimestamp: + description: |- + The timestamp when the primary was detected to be unhealthy + This field is reported when `.spec.failoverDelay` is populated or during online upgrades + type: string + currentPrimaryTimestamp: + description: The timestamp when the last actual promotion to primary + has occurred + type: string + danglingPVC: + description: |- + List of all the PVCs created by this cluster and still available + which are not attached to a Pod + items: + type: string + type: array + demotionToken: + description: |- + DemotionToken is a JSON token containing the information + from pg_controldata such as Database system identifier, Latest checkpoint's + TimeLineID, Latest checkpoint's REDO location, Latest checkpoint's REDO + WAL file, and Time of latest checkpoint + type: string + firstRecoverabilityPoint: + description: |- + The first recoverability point, stored as a date in RFC3339 format. + This field is calculated from the content of FirstRecoverabilityPointByMethod + type: string + firstRecoverabilityPointByMethod: + additionalProperties: + format: date-time + type: string + description: The first recoverability point, stored as a date in RFC3339 + format, per backup method type + type: object + healthyPVC: + description: List of all the PVCs not dangling nor initializing + items: + type: string + type: array + image: + description: Image contains the image name used by the pods + type: string + initializingPVC: + description: List of all the PVCs that are being initialized by this + cluster + items: + type: string + type: array + instanceNames: + description: List of instance names in the cluster + items: + type: string + type: array + instances: + description: The total number of PVC Groups detected in the cluster. + It may differ from the number of existing instance pods. + type: integer + instancesReportedState: + additionalProperties: + description: InstanceReportedState describes the last reported state + of an instance during a reconciliation loop + properties: + ip: + description: IP address of the instance + type: string + isPrimary: + description: indicates if an instance is the primary one + type: boolean + timeLineID: + description: indicates on which TimelineId the instance is + type: integer + required: + - isPrimary + type: object + description: The reported state of the instances during the last reconciliation + loop + type: object + instancesStatus: + additionalProperties: + items: + type: string + type: array + description: InstancesStatus indicates in which status the instances + are + type: object + jobCount: + description: How many Jobs have been created by this cluster + format: int32 + type: integer + lastFailedBackup: + description: Stored as a date in RFC3339 format + type: string + lastPromotionToken: + description: |- + LastPromotionToken is the last verified promotion token that + was used to promote a replica cluster + type: string + lastSuccessfulBackup: + description: |- + Last successful backup, stored as a date in RFC3339 format + This field is calculated from the content of LastSuccessfulBackupByMethod + type: string + lastSuccessfulBackupByMethod: + additionalProperties: + format: date-time + type: string + description: Last successful backup, stored as a date in RFC3339 format, + per backup method type + type: object + latestGeneratedNode: + description: ID of the latest generated node (used to avoid node name + clashing) + type: integer + managedRolesStatus: + description: ManagedRolesStatus reports the state of the managed roles + in the cluster + properties: + byStatus: + additionalProperties: + items: + type: string + type: array + description: ByStatus gives the list of roles in each state + type: object + cannotReconcile: + additionalProperties: + items: + type: string + type: array + description: |- + CannotReconcile lists roles that cannot be reconciled in PostgreSQL, + with an explanation of the cause + type: object + passwordStatus: + additionalProperties: + description: PasswordState represents the state of the password + of a managed RoleConfiguration + properties: + resourceVersion: + description: the resource version of the password secret + type: string + transactionID: + description: the last transaction ID to affect the role + definition in PostgreSQL + format: int64 + type: integer + type: object + description: PasswordStatus gives the last transaction id and + password secret version for each managed role + type: object + type: object + onlineUpdateEnabled: + description: OnlineUpdateEnabled shows if the online upgrade is enabled + inside the cluster + type: boolean + pgDataImageInfo: + description: PGDataImageInfo contains the details of the latest image + that has run on the current data directory. + properties: + image: + description: Image is the image name + type: string + majorVersion: + description: MajorVersion is the major version of the image + type: integer + required: + - image + - majorVersion + type: object + phase: + description: Current phase of the cluster + type: string + phaseReason: + description: Reason for the current phase + type: string + pluginStatus: + description: PluginStatus is the status of the loaded plugins + items: + description: PluginStatus is the status of a loaded plugin + properties: + backupCapabilities: + description: |- + BackupCapabilities are the list of capabilities of the + plugin regarding the Backup management + items: + type: string + type: array + capabilities: + description: |- + Capabilities are the list of capabilities of the + plugin + items: + type: string + type: array + name: + description: Name is the name of the plugin + type: string + operatorCapabilities: + description: |- + OperatorCapabilities are the list of capabilities of the + plugin regarding the reconciler + items: + type: string + type: array + restoreJobHookCapabilities: + description: |- + RestoreJobHookCapabilities are the list of capabilities of the + plugin regarding the RestoreJobHook management + items: + type: string + type: array + status: + description: Status contain the status reported by the plugin + through the SetStatusInCluster interface + type: string + version: + description: |- + Version is the version of the plugin loaded by the + latest reconciliation loop + type: string + walCapabilities: + description: |- + WALCapabilities are the list of capabilities of the + plugin regarding the WAL management + items: + type: string + type: array + required: + - name + - version + type: object + type: array + poolerIntegrations: + description: The integration needed by poolers referencing the cluster + properties: + pgBouncerIntegration: + description: PgBouncerIntegrationStatus encapsulates the needed + integration for the pgbouncer poolers referencing the cluster + properties: + secrets: + items: + type: string + type: array + type: object + type: object + pvcCount: + description: How many PVCs have been created by this cluster + format: int32 + type: integer + readService: + description: Current list of read pods + type: string + readyInstances: + description: The total number of ready instances in the cluster. It + is equal to the number of ready instance pods. + type: integer + resizingPVC: + description: List of all the PVCs that have ResizingPVC condition. + items: + type: string + type: array + secretsResourceVersion: + description: |- + The list of resource versions of the secrets + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + secret data + properties: + applicationSecretVersion: + description: The resource version of the "app" user secret + type: string + barmanEndpointCA: + description: The resource version of the Barman Endpoint CA if + provided + type: string + caSecretVersion: + description: Unused. Retained for compatibility with old versions. + type: string + clientCaSecretVersion: + description: The resource version of the PostgreSQL client-side + CA secret version + type: string + externalClusterSecretVersion: + additionalProperties: + type: string + description: The resource versions of the external cluster secrets + type: object + managedRoleSecretVersion: + additionalProperties: + type: string + description: The resource versions of the managed roles secrets + type: object + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the secrets used to pass metrics. + Map keys are the secret names, map values are the versions + type: object + replicationSecretVersion: + description: The resource version of the "streaming_replica" user + secret + type: string + serverCaSecretVersion: + description: The resource version of the PostgreSQL server-side + CA secret version + type: string + serverSecretVersion: + description: The resource version of the PostgreSQL server-side + secret version + type: string + superuserSecretVersion: + description: The resource version of the "postgres" user secret + type: string + type: object + switchReplicaClusterStatus: + description: SwitchReplicaClusterStatus is the status of the switch + to replica cluster + properties: + inProgress: + description: InProgress indicates if there is an ongoing procedure + of switching a cluster to a replica cluster. + type: boolean + type: object + tablespacesStatus: + description: TablespacesStatus reports the state of the declarative + tablespaces in the cluster + items: + description: TablespaceState represents the state of a tablespace + in a cluster + properties: + error: + description: Error is the reconciliation error, if any + type: string + name: + description: Name is the name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + type: string + state: + description: State is the latest reconciliation state + type: string + required: + - name + - state + type: object + type: array + targetPrimary: + description: |- + Target primary instance, this is different from the previous one + during a switchover or a failover + type: string + targetPrimaryTimestamp: + description: The timestamp when the last request for a new primary + has occurred + type: string + timelineID: + description: The timeline of the Postgres cluster + type: integer + topology: + description: Instances topology. + properties: + instances: + additionalProperties: + additionalProperties: + type: string + description: PodTopologyLabels represent the topology of a Pod. + map[labelName]labelValue + type: object + description: Instances contains the pod topology of the instances + type: object + nodesUsed: + description: |- + NodesUsed represents the count of distinct nodes accommodating the instances. + A value of '1' suggests that all instances are hosted on a single node, + implying the absence of High Availability (HA). Ideally, this value should + be the same as the number of instances in the Postgres HA cluster, implying + shared nothing architecture on the compute side. + format: int32 + type: integer + successfullyExtracted: + description: |- + SuccessfullyExtracted indicates if the topology data was extract. It is useful to enact fallback behaviors + in synchronous replica election in case of failures + type: boolean + type: object + unusablePVC: + description: List of all the PVCs that are unusable because another + PVC is missing + items: + type: string + type: array + writeService: + description: Current write pod + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: databases.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Database + listKind: DatabaseList + plural: databases + singular: database + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Database is the Schema for the databases API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired Database. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allowConnections: + description: |- + Maps to the `ALLOW_CONNECTIONS` parameter of `CREATE DATABASE` and + `ALTER DATABASE`. If false then no one can connect to this database. + type: boolean + builtinLocale: + description: |- + Maps to the `BUILTIN_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the locale name when the + builtin provider is used. This option requires `localeProvider` to + be set to `builtin`. Available from PostgreSQL 17. + type: string + x-kubernetes-validations: + - message: builtinLocale is immutable + rule: self == oldSelf + cluster: + description: The name of the PostgreSQL cluster hosting the database. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + collationVersion: + description: |- + Maps to the `COLLATION_VERSION` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: collationVersion is immutable + rule: self == oldSelf + connectionLimit: + description: |- + Maps to the `CONNECTION LIMIT` clause of `CREATE DATABASE` and + `ALTER DATABASE`. How many concurrent connections can be made to + this database. -1 (the default) means no limit. + type: integer + databaseReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this database. + enum: + - delete + - retain + type: string + encoding: + description: |- + Maps to the `ENCODING` parameter of `CREATE DATABASE`. This setting + cannot be changed. Character set encoding to use in the database. + type: string + x-kubernetes-validations: + - message: encoding is immutable + rule: self == oldSelf + ensure: + default: present + description: Ensure the PostgreSQL database is `present` or `absent` + - defaults to "present". + enum: + - present + - absent + type: string + extensions: + description: The list of extensions to be managed in the database + items: + description: ExtensionSpec configures an extension in a database + properties: + ensure: + default: present + description: |- + Specifies whether an extension/schema should be present or absent in + the database. If set to `present`, the extension/schema will be + created if it does not exist. If set to `absent`, the + extension/schema will be removed if it exists. + enum: + - present + - absent + type: string + name: + description: Name of the extension/schema + type: string + schema: + description: |- + The name of the schema in which to install the extension's objects, + in case the extension allows its contents to be relocated. If not + specified (default), and the extension's control file does not + specify a schema either, the current default object creation schema + is used. + type: string + version: + description: |- + The version of the extension to install. If empty, the operator will + install the default version (whatever is specified in the + extension's control file) + type: string + required: + - name + type: object + type: array + icuLocale: + description: |- + Maps to the `ICU_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the ICU locale when the ICU + provider is used. This option requires `localeProvider` to be set to + `icu`. Available from PostgreSQL 15. + type: string + x-kubernetes-validations: + - message: icuLocale is immutable + rule: self == oldSelf + icuRules: + description: |- + Maps to the `ICU_RULES` parameter of `CREATE DATABASE`. This setting + cannot be changed. Specifies additional collation rules to customize + the behavior of the default collation. This option requires + `localeProvider` to be set to `icu`. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: icuRules is immutable + rule: self == oldSelf + isTemplate: + description: |- + Maps to the `IS_TEMPLATE` parameter of `CREATE DATABASE` and `ALTER + DATABASE`. If true, this database is considered a template and can + be cloned by any user with `CREATEDB` privileges. + type: boolean + locale: + description: |- + Maps to the `LOCALE` parameter of `CREATE DATABASE`. This setting + cannot be changed. Sets the default collation order and character + classification in the new database. + type: string + x-kubernetes-validations: + - message: locale is immutable + rule: self == oldSelf + localeCType: + description: |- + Maps to the `LC_CTYPE` parameter of `CREATE DATABASE`. This setting + cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCType is immutable + rule: self == oldSelf + localeCollate: + description: |- + Maps to the `LC_COLLATE` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCollate is immutable + rule: self == oldSelf + localeProvider: + description: |- + Maps to the `LOCALE_PROVIDER` parameter of `CREATE DATABASE`. This + setting cannot be changed. This option sets the locale provider for + databases created in the new cluster. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: localeProvider is immutable + rule: self == oldSelf + name: + description: The name of the database to create inside PostgreSQL. + This setting cannot be changed. + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + - message: the name postgres is reserved + rule: self != 'postgres' + - message: the name template0 is reserved + rule: self != 'template0' + - message: the name template1 is reserved + rule: self != 'template1' + owner: + description: |- + Maps to the `OWNER` parameter of `CREATE DATABASE`. + Maps to the `OWNER TO` command of `ALTER DATABASE`. + The role name of the user who owns the database inside PostgreSQL. + type: string + schemas: + description: The list of schemas to be managed in the database + items: + description: SchemaSpec configures a schema in a database + properties: + ensure: + default: present + description: |- + Specifies whether an extension/schema should be present or absent in + the database. If set to `present`, the extension/schema will be + created if it does not exist. If set to `absent`, the + extension/schema will be removed if it exists. + enum: + - present + - absent + type: string + name: + description: Name of the extension/schema + type: string + owner: + description: |- + The role name of the user who owns the schema inside PostgreSQL. + It maps to the `AUTHORIZATION` parameter of `CREATE SCHEMA` and the + `OWNER TO` command of `ALTER SCHEMA`. + type: string + required: + - name + type: object + type: array + tablespace: + description: |- + Maps to the `TABLESPACE` parameter of `CREATE DATABASE`. + Maps to the `SET TABLESPACE` command of `ALTER DATABASE`. + The name of the tablespace (in PostgreSQL) that will be associated + with the new database. This tablespace will be the default + tablespace used for objects created in this database. + type: string + template: + description: |- + Maps to the `TEMPLATE` parameter of `CREATE DATABASE`. This setting + cannot be changed. The name of the template from which to create + this database. + type: string + x-kubernetes-validations: + - message: template is immutable + rule: self == oldSelf + required: + - cluster + - name + - owner + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider is set + to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + status: + description: |- + Most recently observed status of the Database. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + applied: + description: Applied is true if the database was reconciled correctly + type: boolean + extensions: + description: Extensions is the status of the managed extensions + items: + description: DatabaseObjectStatus is the status of the managed database + objects + properties: + applied: + description: |- + True of the object has been installed successfully in + the database + type: boolean + message: + description: Message is the object reconciliation message + type: string + name: + description: The name of the object + type: string + required: + - applied + - name + type: object + type: array + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + schemas: + description: Schemas is the status of the managed schemas + items: + description: DatabaseObjectStatus is the status of the managed database + objects + properties: + applied: + description: |- + True of the object has been installed successfully in + the database + type: boolean + message: + description: Message is the object reconciliation message + type: string + name: + description: The name of the object + type: string + required: + - applied + - name + type: object + type: array + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: imagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ImageCatalog + listKind: ImageCatalogList + plural: imagecatalogs + singular: imagecatalog + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ImageCatalog is the Schema for the imagecatalogs API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: poolers.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Pooler + listKind: PoolerList + plural: poolers + singular: pooler + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.type + name: Type + type: string + name: v1 + schema: + openAPIV3Schema: + description: Pooler is the Schema for the poolers API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the Pooler. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: |- + This is the cluster reference on which the Pooler will work. + Pooler name should never match with any cluster name within the same namespace. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + deploymentStrategy: + description: The deployment strategy to use for pgbouncer to replace + existing pods with new ones + properties: + rollingUpdate: + description: |- + Rolling update config params. Present only if DeploymentStrategyType = + RollingUpdate. + properties: + maxSurge: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be scheduled above the desired number of + pods. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + This can not be 0 if MaxUnavailable is 0. + Absolute number is calculated from percentage by rounding up. + Defaults to 25%. + Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when + the rolling update starts, such that the total number of old and new pods do not exceed + 130% of desired pods. Once old pods have been killed, + new ReplicaSet can be scaled up further, ensuring that total number of pods running + at any time during the update is at most 130% of desired pods. + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be unavailable during the update. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + Absolute number is calculated from percentage by rounding down. + This can not be 0 if MaxSurge is 0. + Defaults to 25%. + Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods + immediately when the rolling update starts. Once new pods are ready, old ReplicaSet + can be scaled down further, followed by scaling up the new ReplicaSet, ensuring + that the total number of pods available at all times during the update is at + least 70% of desired pods. + x-kubernetes-int-or-string: true + type: object + type: + description: Type of deployment. Can be "Recreate" or "RollingUpdate". + Default is RollingUpdate. + type: string + type: object + instances: + default: 1 + description: 'The number of replicas we want. Default: 1.' + format: int32 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this pooler. + properties: + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + type: object + pgbouncer: + description: The PgBouncer configuration + properties: + authQuery: + description: |- + The query that will be used to download the hash of the password + of a certain user. Default: "SELECT usename, passwd FROM public.user_search($1)". + In case it is specified, also an AuthQuerySecret has to be specified and + no automatic CNPG Cluster integration will be triggered. + type: string + authQuerySecret: + description: |- + The credentials of the user that need to be used for the authentication + query. In case it is specified, also an AuthQuery + (e.g. "SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1") + has to be specified and no automatic CNPG Cluster integration will be triggered. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + parameters: + additionalProperties: + type: string + description: |- + Additional parameters to be passed to PgBouncer - please check + the CNPG documentation for a list of options you can configure + type: object + paused: + default: false + description: |- + When set to `true`, PgBouncer will disconnect from the PostgreSQL + server, first waiting for all queries to complete, and pause all new + client connections until this value is set to `false` (default). Internally, + the operator calls PgBouncer's `PAUSE` and `RESUME` commands. + type: boolean + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + poolMode: + default: session + description: 'The pool mode. Default: `session`.' + enum: + - session + - transaction + type: string + type: object + serviceTemplate: + description: Template for the Service to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information on service's + port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the configurations + of session affinity. + properties: + clientIP: + description: clientIP contains the configurations of Client + IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic is + distributed to Service endpoints. Implementations can use this field as a + hint, but are not required to guarantee strict adherence. If the field is + not set, the implementation will apply its default routing strategy. If set + to "PreferClose", implementations should prioritize endpoints that are + topologically close (e.g., same zone). + This is a beta field and requires enabling ServiceTrafficDistribution feature. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + template: + description: The template of the Pod to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the pod. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + activeDeadlineSeconds: + description: |- + Optional duration in seconds the pod may be active on the node relative to + StartTime before the system will actively try to mark it failed and kill associated containers. + Value must be a positive integer. + format: int64 + type: integer + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + automountServiceAccountToken: + description: AutomountServiceAccountToken indicates whether + a service account token should be automatically mounted. + type: boolean + containers: + description: |- + List of containers belonging to the pod. + Containers cannot currently be added or removed. + There must be at least one container in a Pod. + Cannot be updated. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + dnsConfig: + description: |- + Specifies the DNS parameters of a pod. + Parameters specified here will be merged to the generated DNS + configuration based on DNSPolicy. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver + options of a pod. + properties: + name: + description: |- + Name is this DNS resolver option's name. + Required. + type: string + value: + description: Value is this DNS resolver option's + value. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + dnsPolicy: + description: |- + Set DNS policy for the pod. + Defaults to "ClusterFirst". + Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + To have DNS options set along with hostNetwork, you have to specify DNS policy + explicitly to 'ClusterFirstWithHostNet'. + type: string + enableServiceLinks: + description: |- + EnableServiceLinks indicates whether information about services should be injected into pod's + environment variables, matching the syntax of Docker links. + Optional: Defaults to true. + type: boolean + ephemeralContainers: + description: |- + List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + pod to perform user-initiated actions such as debugging. This list cannot be specified when + creating a pod, and it cannot be modified by updating the pod spec. In order to add an + ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + items: + description: |- + An EphemeralContainer is a temporary container that you may add to an existing Pod for + user-initiated activities such as debugging. Ephemeral containers have no resource or + scheduling guarantees, and they will not be restarted when they exit or when a Pod is + removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the + Pod to exceed its resource allocation. + + To add an ephemeral container, use the ephemeralcontainers subresource of an existing + Pod. Ephemeral containers may not be removed or restarted. + properties: + args: + description: |- + Arguments to the entrypoint. + The image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: Lifecycle is not allowed for ephemeral + containers. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the ephemeral container specified as a DNS_LABEL. + This name must be unique among all containers, init containers and ephemeral containers. + type: string + ports: + description: Ports are not allowed for ephemeral containers. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + already allocated to the pod. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for the container to manage the restart behavior of each + container within a pod. + This may only be set for init containers. You cannot set this field on + ephemeral containers. + type: string + securityContext: + description: |- + Optional: SecurityContext defines the security options the ephemeral container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + targetContainerName: + description: |- + If set, the name of the container from PodSpec that this ephemeral container targets. + The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + If not set then the ephemeral container uses the namespaces configured in the Pod spec. + + The container runtime must implement support for this feature. If the runtime does not + support namespace targeting then the result of setting this field is undefined. + type: string + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + hostAliases: + description: |- + HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + file if specified. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + description: IP address of the host file entry. + type: string + required: + - ip + type: object + type: array + x-kubernetes-list-map-keys: + - ip + x-kubernetes-list-type: map + hostIPC: + description: |- + Use the host's ipc namespace. + Optional: Default to false. + type: boolean + hostNetwork: + description: |- + Host networking requested for this pod. Use the host's network namespace. + If this option is set, the ports that will be used must be specified. + Default to false. + type: boolean + hostPID: + description: |- + Use the host's pid namespace. + Optional: Default to false. + type: boolean + hostUsers: + description: |- + Use the host's user namespace. + Optional: Default to true. + If set to true or not present, the pod will be run in the host user namespace, useful + for when the pod needs a feature only available to the host user namespace, such as + loading a kernel module with CAP_SYS_MODULE. + When set to false, a new userns is created for the pod. Setting false is useful for + mitigating container breakout vulnerabilities even allowing users to run their + containers as root without actually having root privileges on the host. + This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. + type: boolean + hostname: + description: |- + Specifies the hostname of the Pod + If not specified, the pod's hostname will be set to a system-defined value. + type: string + imagePullSecrets: + description: |- + ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + If specified, these secrets will be passed to individual puller implementations for them to use. + More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + initContainers: + description: |- + List of initialization containers belonging to the pod. + Init containers are executed in order prior to containers being started. If any + init container fails, the pod is considered to have failed and is handled according + to its restartPolicy. The name for an init container or normal container must be + unique among all containers. + Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. + The resourceRequirements of an init container are taken into account during scheduling + by finding the highest request/limit for each resource type, and then using the max of + of that value or the sum of the normal containers. Limits are applied to init containers + in a similar fashion. + Init containers cannot currently be added or removed. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + nodeName: + description: |- + NodeName indicates in which node this pod is scheduled. + If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. + Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. + This field should not be used to express a desire for the pod to be scheduled on a specific node. + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the pod to fit on a node. + Selector which must match a node's labels for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + x-kubernetes-map-type: atomic + os: + description: |- + Specifies the OS of the containers in the pod. + Some pod and container fields are restricted if this is set. + + If the OS field is set to linux, the following fields must be unset: + -securityContext.windowsOptions + + If the OS field is set to windows, following fields must be unset: + - spec.hostPID + - spec.hostIPC + - spec.hostUsers + - spec.securityContext.appArmorProfile + - spec.securityContext.seLinuxOptions + - spec.securityContext.seccompProfile + - spec.securityContext.fsGroup + - spec.securityContext.fsGroupChangePolicy + - spec.securityContext.sysctls + - spec.shareProcessNamespace + - spec.securityContext.runAsUser + - spec.securityContext.runAsGroup + - spec.securityContext.supplementalGroups + - spec.securityContext.supplementalGroupsPolicy + - spec.containers[*].securityContext.appArmorProfile + - spec.containers[*].securityContext.seLinuxOptions + - spec.containers[*].securityContext.seccompProfile + - spec.containers[*].securityContext.capabilities + - spec.containers[*].securityContext.readOnlyRootFilesystem + - spec.containers[*].securityContext.privileged + - spec.containers[*].securityContext.allowPrivilegeEscalation + - spec.containers[*].securityContext.procMount + - spec.containers[*].securityContext.runAsUser + - spec.containers[*].securityContext.runAsGroup + properties: + name: + description: |- + Name is the name of the operating system. The currently supported values are linux and windows. + Additional value may be defined in future and can be one of: + https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + Clients should expect to handle additional values and treat unrecognized values in this field as os: null + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + This field will be autopopulated at admission time by the RuntimeClass admission controller. If + the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + The RuntimeClass admission controller will reject Pod create requests which have the overhead already + set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md + type: object + preemptionPolicy: + description: |- + PreemptionPolicy is the Policy for preempting pods with lower priority. + One of Never, PreemptLowerPriority. + Defaults to PreemptLowerPriority if unset. + type: string + priority: + description: |- + The priority value. Various system components use this field to find the + priority of the pod. When Priority Admission Controller is enabled, it + prevents users from setting this field. The admission controller populates + this field from PriorityClassName. + The higher the value, the higher the priority. + format: int32 + type: integer + priorityClassName: + description: |- + If specified, indicates the pod's priority. "system-node-critical" and + "system-cluster-critical" are two special keywords which indicate the + highest priorities with the former being the highest priority. Any other + name must be defined by creating a PriorityClass object with that name. + If not specified, the pod priority will be default or zero if there is no + default. + type: string + readinessGates: + description: |- + If specified, all readiness gates will be evaluated for pod readiness. + A pod is ready when all its containers are ready AND + all conditions specified in the readiness gates have status equal to "True" + More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates + items: + description: PodReadinessGate contains the reference to + a pod condition + properties: + conditionType: + description: ConditionType refers to a condition in + the pod's condition list with matching type. + type: string + required: + - conditionType + type: object + type: array + x-kubernetes-list-type: atomic + resourceClaims: + description: |- + ResourceClaims defines which ResourceClaims must be allocated + and reserved before the Pod is allowed to start. The resources + will be made available to those containers which consume them + by name. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. + items: + description: |- + PodResourceClaim references exactly one ResourceClaim, either directly + or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim + for the pod. + + It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. + Containers that need access to the ResourceClaim reference it with this name. + properties: + name: + description: |- + Name uniquely identifies this resource claim inside the pod. + This must be a DNS_LABEL. + type: string + resourceClaimName: + description: |- + ResourceClaimName is the name of a ResourceClaim object in the same + namespace as this pod. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + resourceClaimTemplateName: + description: |- + ResourceClaimTemplateName is the name of a ResourceClaimTemplate + object in the same namespace as this pod. + + The template will be used to create a new ResourceClaim, which will + be bound to this pod. When this pod is deleted, the ResourceClaim + will also be deleted. The pod name and resource name, along with a + generated component, will be used to form a unique name for the + ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + + This field is immutable and no changes will be made to the + corresponding ResourceClaim by the control plane after creating the + ResourceClaim. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + resources: + description: |- + Resources is the total amount of CPU and Memory resources required by all + containers in the pod. It supports specifying Requests and Limits for + "cpu" and "memory" resource names only. ResourceClaims are not supported. + + This field enables fine-grained control over resource allocation for the + entire pod, allowing resource sharing among containers in a pod. + + This is an alpha field and requires enabling the PodLevelResources feature + gate. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for all containers within the pod. + One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. + Default to Always. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy + type: string + runtimeClassName: + description: |- + RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + empty definition that uses the default runtime handler. + More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + type: string + schedulerName: + description: |- + If specified, the pod will be dispatched by specified scheduler. + If not specified, the pod will be dispatched by default scheduler. + type: string + schedulingGates: + description: |- + SchedulingGates is an opaque list of values that if specified will block scheduling the pod. + If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + scheduler will not attempt to schedule the pod. + + SchedulingGates can only be set at pod creation time, and be removed only afterwards. + items: + description: PodSchedulingGate is associated to a Pod to + guard its scheduling. + properties: + name: + description: |- + Name of the scheduling gate. + Each scheduling gate must have a unique name field. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + securityContext: + description: |- + SecurityContext holds pod-level security attributes and common container settings. + Optional: Defaults to empty. See type description for default values of each field. + properties: + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxChangePolicy: + description: |- + seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. + It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. + Valid values are "MountOption" and "Recursive". + + "Recursive" means relabeling of all files on all Pod volumes by the container runtime. + This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. + + "MountOption" mounts all eligible Pod volumes with `-o context` mount option. + This requires all Pods that share the same volume to use the same SELinux label. + It is not possible to share the same volume among privileged and unprivileged Pods. + Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes + whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their + CSIDriver instance. Other volumes are always re-labelled recursively. + "MountOption" value is allowed only when SELinuxMount feature gate is enabled. + + If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. + If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes + and "Recursive" for all other volumes. + + This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers. + + All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. + Note that this field cannot be set when spec.os.name is windows. + type: string + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in + addition to the container's primary GID and fsGroup (if specified). If + the SupplementalGroupsPolicy feature is enabled, the + supplementalGroupsPolicy field determines whether these are in addition + to or instead of any group memberships defined in the container image. + If unspecified, no additional groups are added, though group memberships + defined in the container image may still be used, depending on the + supplementalGroupsPolicy field. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + description: |- + Defines how supplemental groups of the first container processes are calculated. + Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + and the container runtime must implement support for this feature. + Note that this field cannot be set when spec.os.name is windows. + type: string + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be + set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: |- + DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. + Deprecated: Use serviceAccountName instead. + type: string + serviceAccountName: + description: |- + ServiceAccountName is the name of the ServiceAccount to use to run this pod. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + type: string + setHostnameAsFQDN: + description: |- + If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). + In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). + In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. + If a pod does not have FQDN, this has no effect. + Default to false. + type: boolean + shareProcessNamespace: + description: |- + Share a single process namespace between all of the containers in a pod. + When this is set containers will be able to view and signal processes from other containers + in the same pod, and the first process in each container will not be assigned PID 1. + HostPID and ShareProcessNamespace cannot both be set. + Optional: Default to false. + type: boolean + subdomain: + description: |- + If specified, the fully qualified Pod hostname will be "...svc.". + If not specified, the pod will not have a domainname at all. + type: string + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + If this value is nil, the default grace period will be used instead. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: If specified, the pod's tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of pods ought to spread across topology + domains. Scheduler will schedule pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + description: |- + List of volumes that can be mounted by containers belonging to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk + in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in + the blob storage + type: string + fsType: + default: ext4 + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage account Managed: azure + managed data disk (only in managed availability + set). defaults to shared' + type: string + readOnly: + default: false + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default + is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers. + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about + the pod that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing the + pod field + properties: + fieldRef: + description: 'Required: Selects a field of + the pod: only annotations, labels, name, + namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path. + Must be utf-8 encoded. The first item of + the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource + that is attached to a kubelet's host machine and then + exposed to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target + worldwide names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. + properties: + driver: + description: driver is the name of the driver to + use for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the + specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + image: + description: |- + image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + The volume is resolved at pod startup depending on which PullPolicy value is provided: + + - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + + The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + The volume will be mounted read-only (ro) and non-executable files (noexec). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + default: default + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon + Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources + secrets, configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume + root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the + configMap data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about + the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name, namespace and uid + are supported.' + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file + to be created. Must not be absolute + or contain the ''..'' path. Must + be utf-8 encoded. The first item + of the relative path must not + start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the + secret data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + default: /etc/ceph/keyring + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. + properties: + fsType: + default: xfs + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the + ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the + ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL + communication with Gateway, default false + type: boolean + storageMode: + default: ThinProvisioned + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage + Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether the + Secret or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy + Based Management (SPBM) profile ID associated + with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + required: + - containers + type: object + type: object + type: + default: rw + description: 'Type of service to forward traffic to. Default: `rw`.' + enum: + - rw + - ro + - r + type: string + required: + - cluster + - pgbouncer + type: object + status: + description: |- + Most recently observed status of the Pooler. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + instances: + description: The number of pods trying to be scheduled + format: int32 + type: integer + secrets: + description: The resource version of the config object + properties: + clientCA: + description: The client CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + pgBouncerSecrets: + description: The version of the secrets used by PgBouncer + properties: + authQuery: + description: The auth query secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + serverCA: + description: The server CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + serverTLS: + description: The server TLS secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: publications.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Publication + listKind: PublicationList + plural: publications + singular: publication + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Publication is the Schema for the publications API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PublicationSpec defines the desired state of Publication + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "publisher" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "publisher" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + name: + description: The name of the publication inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Publication parameters part of the `WITH` clause as expected by + PostgreSQL `CREATE PUBLICATION` command + type: object + publicationReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this publication + enum: + - delete + - retain + type: string + target: + description: Target of the publication as expected by PostgreSQL `CREATE + PUBLICATION` command + properties: + allTables: + description: |- + Marks the publication as one that replicates changes for all tables + in the database, including tables created in the future. + Corresponding to `FOR ALL TABLES` in PostgreSQL. + type: boolean + x-kubernetes-validations: + - message: allTables is immutable + rule: self == oldSelf + objects: + description: Just the following schema objects + items: + description: PublicationTargetObject is an object to publish + properties: + table: + description: |- + Specifies a list of tables to add to the publication. Corresponding + to `FOR TABLE` in PostgreSQL. + properties: + columns: + description: The columns to publish + items: + type: string + type: array + name: + description: The table name + type: string + only: + description: Whether to limit to the table only or include + all its descendants + type: boolean + schema: + description: The schema name + type: string + required: + - name + type: object + tablesInSchema: + description: |- + Marks the publication as one that replicates changes for all tables + in the specified list of schemas, including tables created in the + future. Corresponding to `FOR TABLES IN SCHEMA` in PostgreSQL. + type: string + type: object + x-kubernetes-validations: + - message: tablesInSchema and table are mutually exclusive + rule: (has(self.tablesInSchema) && !has(self.table)) || (!has(self.tablesInSchema) + && has(self.table)) + maxItems: 100000 + type: array + x-kubernetes-validations: + - message: specifying a column list when the publication also + publishes tablesInSchema is not supported + rule: '!(self.exists(o, has(o.table) && has(o.table.columns)) + && self.exists(o, has(o.tablesInSchema)))' + type: object + x-kubernetes-validations: + - message: allTables and objects are mutually exclusive + rule: (has(self.allTables) && !has(self.objects)) || (!has(self.allTables) + && has(self.objects)) + required: + - cluster + - dbname + - name + - target + type: object + status: + description: PublicationStatus defines the observed state of Publication + properties: + applied: + description: Applied is true if the publication was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: scheduledbackups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ScheduledBackup + listKind: ScheduledBackupList + plural: scheduledbackups + singular: scheduledbackup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .status.lastScheduleTime + name: Last Backup + type: date + name: v1 + schema: + openAPIV3Schema: + description: ScheduledBackup is the Schema for the scheduledbackups API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ScheduledBackup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + backupOwnerReference: + default: none + description: |- + Indicates which ownerReference should be put inside the created backup resources.
+ - none: no owner reference for created backup objects (same behavior as before the field was introduced)
+ - self: sets the Scheduled backup object as owner of the backup
+ - cluster: set the cluster as owner of the backup
+ enum: + - none + - self + - cluster + type: string + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + immediate: + description: If the first backup has to be immediately start after + creation or not + type: boolean + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + schedule: + description: |- + The schedule does not follow the same format used in Kubernetes CronJobs + as it includes an additional seconds specifier, + see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format + type: string + suspend: + description: If this backup is suspended or not + type: boolean + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + - schedule + type: object + status: + description: |- + Most recently observed status of the ScheduledBackup. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + lastCheckTime: + description: The latest time the schedule + format: date-time + type: string + lastScheduleTime: + description: Information when was the last time that backup was successfully + scheduled. + format: date-time + type: string + nextScheduleTime: + description: Next time we will run a backup + format: date-time + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: subscriptions.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Subscription + listKind: SubscriptionList + plural: subscriptions + singular: subscription + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Subscription is the Schema for the subscriptions API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SubscriptionSpec defines the desired state of Subscription + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "subscriber" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "subscriber" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + externalClusterName: + description: The name of the external cluster with the publication + ("publisher") + type: string + name: + description: The name of the subscription inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Subscription parameters part of the `WITH` clause as expected by + PostgreSQL `CREATE SUBSCRIPTION` command + type: object + publicationDBName: + description: |- + The name of the database containing the publication on the external + cluster. Defaults to the one in the external cluster definition. + type: string + publicationName: + description: |- + The name of the publication inside the PostgreSQL database in the + "publisher" + type: string + subscriptionReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this subscription + enum: + - delete + - retain + type: string + required: + - cluster + - dbname + - externalClusterName + - name + - publicationName + type: object + status: + description: SubscriptionStatus defines the observed state of Subscription + properties: + applied: + description: Applied is true if the subscription was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cnpg-manager +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps/status + - secrets/status + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - pods + - pods/exec + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - pods/status + verbs: + - get +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - get + - patch +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +- apiGroups: + - monitoring.coreos.com + resources: + - podmonitors + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups + - clusters + - databases + - poolers + - publications + - scheduledbackups + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups/status + - databases/status + - publications/status + - scheduledbackups/status + - subscriptions/status + verbs: + - get + - patch + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusterimagecatalogs + - imagecatalogs + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/finalizers + - poolers/finalizers + verbs: + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/status + - poolers/status + verbs: + - get + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - create + - get + - list + - patch + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cnpg-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cnpg-manager +subjects: +- kind: ServiceAccount + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: v1 +data: + queries: | + backends: + query: | + SELECT sa.datname + , sa.usename + , sa.application_name + , states.state + , COALESCE(sa.count, 0) AS total + , COALESCE(sa.max_tx_secs, 0) AS max_tx_duration_seconds + FROM ( VALUES ('active') + , ('idle') + , ('idle in transaction') + , ('idle in transaction (aborted)') + , ('fastpath function call') + , ('disabled') + ) AS states(state) + LEFT JOIN ( + SELECT datname + , state + , usename + , COALESCE(application_name, '') AS application_name + , COUNT(*) + , COALESCE(EXTRACT (EPOCH FROM (max(now() - xact_start))), 0) AS max_tx_secs + FROM pg_catalog.pg_stat_activity + GROUP BY datname, state, usename, application_name + ) sa ON states.state = sa.state + WHERE sa.usename IS NOT NULL + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - usename: + usage: "LABEL" + description: "Name of the user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - state: + usage: "LABEL" + description: "State of the backend" + - total: + usage: "GAUGE" + description: "Number of backends" + - max_tx_duration_seconds: + usage: "GAUGE" + description: "Maximum duration of a transaction in seconds" + + backends_waiting: + query: | + SELECT count(*) AS total + FROM pg_catalog.pg_locks blocked_locks + JOIN pg_catalog.pg_locks blocking_locks + ON blocking_locks.locktype = blocked_locks.locktype + AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database + AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation + AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page + AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple + AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid + AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid + AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid + AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid + AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid + AND blocking_locks.pid != blocked_locks.pid + JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid + WHERE NOT blocked_locks.granted + metrics: + - total: + usage: "GAUGE" + description: "Total number of backends that are currently waiting on other queries" + + pg_database: + query: | + SELECT datname + , pg_catalog.pg_database_size(datname) AS size_bytes + , pg_catalog.age(datfrozenxid) AS xid_age + , pg_catalog.mxid_age(datminmxid) AS mxid_age + FROM pg_catalog.pg_database + WHERE datallowconn + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - size_bytes: + usage: "GAUGE" + description: "Disk space used by the database" + - xid_age: + usage: "GAUGE" + description: "Number of transactions from the frozen XID to the current one" + - mxid_age: + usage: "GAUGE" + description: "Number of multiple transactions (Multixact) from the frozen XID to the current one" + + pg_postmaster: + query: | + SELECT EXTRACT(EPOCH FROM pg_postmaster_start_time) AS start_time + FROM pg_catalog.pg_postmaster_start_time() + metrics: + - start_time: + usage: "GAUGE" + description: "Time at which postgres started (based on epoch)" + + pg_replication: + query: "SELECT CASE WHEN ( + NOT pg_catalog.pg_is_in_recovery() + OR pg_catalog.pg_last_wal_receive_lsn() = pg_catalog.pg_last_wal_replay_lsn()) + THEN 0 + ELSE GREATEST (0, + EXTRACT(EPOCH FROM (now() - pg_catalog.pg_last_xact_replay_timestamp()))) + END AS lag, + pg_catalog.pg_is_in_recovery() AS in_recovery, + EXISTS (TABLE pg_stat_wal_receiver) AS is_wal_receiver_up, + (SELECT count(*) FROM pg_catalog.pg_stat_replication) AS streaming_replicas" + metrics: + - lag: + usage: "GAUGE" + description: "Replication lag behind primary in seconds" + - in_recovery: + usage: "GAUGE" + description: "Whether the instance is in recovery" + - is_wal_receiver_up: + usage: "GAUGE" + description: "Whether the instance wal_receiver is up" + - streaming_replicas: + usage: "GAUGE" + description: "Number of streaming replicas connected to the instance" + + pg_replication_slots: + query: | + SELECT slot_name, + slot_type, + database, + active, + (CASE pg_catalog.pg_is_in_recovery() + WHEN TRUE THEN pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_last_wal_receive_lsn(), restart_lsn) + ELSE pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), restart_lsn) + END) as pg_wal_lsn_diff + FROM pg_catalog.pg_replication_slots + WHERE NOT temporary + metrics: + - slot_name: + usage: "LABEL" + description: "Name of the replication slot" + - slot_type: + usage: "LABEL" + description: "Type of the replication slot" + - database: + usage: "LABEL" + description: "Name of the database" + - active: + usage: "GAUGE" + description: "Flag indicating whether the slot is active" + - pg_wal_lsn_diff: + usage: "GAUGE" + description: "Replication lag in bytes" + + pg_stat_archiver: + query: | + SELECT archived_count + , failed_count + , COALESCE(EXTRACT(EPOCH FROM (now() - last_archived_time)), -1) AS seconds_since_last_archival + , COALESCE(EXTRACT(EPOCH FROM (now() - last_failed_time)), -1) AS seconds_since_last_failure + , COALESCE(EXTRACT(EPOCH FROM last_archived_time), -1) AS last_archived_time + , COALESCE(EXTRACT(EPOCH FROM last_failed_time), -1) AS last_failed_time + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_archived_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_archived_wal_start_lsn + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_failed_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_failed_wal_start_lsn + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_archiver + metrics: + - archived_count: + usage: "COUNTER" + description: "Number of WAL files that have been successfully archived" + - failed_count: + usage: "COUNTER" + description: "Number of failed attempts for archiving WAL files" + - seconds_since_last_archival: + usage: "GAUGE" + description: "Seconds since the last successful archival operation" + - seconds_since_last_failure: + usage: "GAUGE" + description: "Seconds since the last failed archival operation" + - last_archived_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving succeeded" + - last_failed_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving failed" + - last_archived_wal_start_lsn: + usage: "GAUGE" + description: "Archived WAL start LSN" + - last_failed_wal_start_lsn: + usage: "GAUGE" + description: "Last failed WAL LSN" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_bgwriter: + runonserver: "<17.0.0" + query: | + SELECT checkpoints_timed + , checkpoints_req + , checkpoint_write_time + , checkpoint_sync_time + , buffers_checkpoint + , buffers_clean + , maxwritten_clean + , buffers_backend + , buffers_backend_fsync + , buffers_alloc + FROM pg_catalog.pg_stat_bgwriter + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - checkpoint_write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds" + - checkpoint_sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds" + - buffers_checkpoint: + usage: "COUNTER" + description: "Number of buffers written during checkpoints" + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_backend: + usage: "COUNTER" + description: "Number of buffers written directly by a backend" + - buffers_backend_fsync: + usage: "COUNTER" + description: "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + + pg_stat_bgwriter_17: + runonserver: ">=17.0.0" + name: pg_stat_bgwriter + query: | + SELECT buffers_clean + , maxwritten_clean + , buffers_alloc + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_bgwriter + metrics: + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_checkpointer: + runonserver: ">=17.0.0" + query: | + SELECT num_timed AS checkpoints_timed + , num_requested AS checkpoints_req + , restartpoints_timed + , restartpoints_req + , restartpoints_done + , write_time + , sync_time + , buffers_written + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_checkpointer + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - restartpoints_timed: + usage: "COUNTER" + description: "Number of scheduled restartpoints due to timeout or after a failed attempt to perform it" + - restartpoints_req: + usage: "COUNTER" + description: "Number of requested restartpoints that have been performed" + - restartpoints_done: + usage: "COUNTER" + description: "Number of restartpoints that have been performed" + - write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are written to disk, in milliseconds" + - sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are synchronized to disk, in milliseconds" + - buffers_written: + usage: "COUNTER" + description: "Number of buffers written during checkpoints and restartpoints" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_database: + query: | + SELECT datname + , xact_commit + , xact_rollback + , blks_read + , blks_hit + , tup_returned + , tup_fetched + , tup_inserted + , tup_updated + , tup_deleted + , conflicts + , temp_files + , temp_bytes + , deadlocks + , blk_read_time + , blk_write_time + FROM pg_catalog.pg_stat_database + metrics: + - datname: + usage: "LABEL" + description: "Name of this database" + - xact_commit: + usage: "COUNTER" + description: "Number of transactions in this database that have been committed" + - xact_rollback: + usage: "COUNTER" + description: "Number of transactions in this database that have been rolled back" + - blks_read: + usage: "COUNTER" + description: "Number of disk blocks read in this database" + - blks_hit: + usage: "COUNTER" + description: "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)" + - tup_returned: + usage: "COUNTER" + description: "Number of rows returned by queries in this database" + - tup_fetched: + usage: "COUNTER" + description: "Number of rows fetched by queries in this database" + - tup_inserted: + usage: "COUNTER" + description: "Number of rows inserted by queries in this database" + - tup_updated: + usage: "COUNTER" + description: "Number of rows updated by queries in this database" + - tup_deleted: + usage: "COUNTER" + description: "Number of rows deleted by queries in this database" + - conflicts: + usage: "COUNTER" + description: "Number of queries canceled due to conflicts with recovery in this database" + - temp_files: + usage: "COUNTER" + description: "Number of temporary files created by queries in this database" + - temp_bytes: + usage: "COUNTER" + description: "Total amount of data written to temporary files by queries in this database" + - deadlocks: + usage: "COUNTER" + description: "Number of deadlocks detected in this database" + - blk_read_time: + usage: "COUNTER" + description: "Time spent reading data file blocks by backends in this database, in milliseconds" + - blk_write_time: + usage: "COUNTER" + description: "Time spent writing data file blocks by backends in this database, in milliseconds" + + pg_stat_replication: + primary: true + query: | + SELECT usename + , COALESCE(application_name, '') AS application_name + , COALESCE(client_addr::text, '') AS client_addr + , COALESCE(client_port::text, '') AS client_port + , EXTRACT(EPOCH FROM backend_start) AS backend_start + , COALESCE(pg_catalog.age(backend_xmin), 0) AS backend_xmin_age + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), sent_lsn) AS sent_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), write_lsn) AS write_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), flush_lsn) AS flush_diff_bytes + , COALESCE(pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), replay_lsn),0) AS replay_diff_bytes + , COALESCE((EXTRACT(EPOCH FROM write_lag)),0)::float AS write_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM flush_lag)),0)::float AS flush_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM replay_lag)),0)::float AS replay_lag_seconds + FROM pg_catalog.pg_stat_replication + metrics: + - usename: + usage: "LABEL" + description: "Name of the replication user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - client_addr: + usage: "LABEL" + description: "Client IP address" + - client_port: + usage: "LABEL" + description: "Client TCP port" + - backend_start: + usage: "COUNTER" + description: "Time when this process was started" + - backend_xmin_age: + usage: "COUNTER" + description: "The age of this standby's xmin horizon" + - sent_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location sent on this connection" + - write_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location written to disk by this standby server" + - flush_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location flushed to disk by this standby server" + - replay_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location replayed into the database on this standby server" + - write_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it" + - flush_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it" + - replay_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it" + + pg_settings: + query: | + SELECT name, + CASE setting WHEN 'on' THEN '1' WHEN 'off' THEN '0' ELSE setting END AS setting + FROM pg_catalog.pg_settings + WHERE vartype IN ('integer', 'real', 'bool') + ORDER BY 1 + metrics: + - name: + usage: "LABEL" + description: "Name of the setting" + - setting: + usage: "GAUGE" + description: "Setting value" + + pg_extensions: + query: | + SELECT + current_database() as datname, + name as extname, + default_version, + installed_version, + CASE + WHEN default_version = installed_version THEN 0 + ELSE 1 + END AS update_available + FROM pg_catalog.pg_available_extensions + WHERE installed_version IS NOT NULL + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - extname: + usage: "LABEL" + description: "Extension name" + - default_version: + usage: "LABEL" + description: "Default version" + - installed_version: + usage: "LABEL" + description: "Installed version" + - update_available: + usage: "GAUGE" + description: "An update is available" + target_databases: + - '*' +kind: ConfigMap +metadata: + labels: + cnpg.io/reload: "" + name: cnpg-default-monitoring + namespace: cnpg-system +--- +apiVersion: v1 +kind: Service +metadata: + name: cnpg-webhook-service + namespace: cnpg-system +spec: + ports: + - port: 443 + targetPort: 9443 + selector: + app.kubernetes.io/name: cloudnative-pg +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-controller-manager + namespace: cnpg-system +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: cloudnative-pg + template: + metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + spec: + containers: + - args: + - controller + - --leader-elect + - --max-concurrent-reconciles=10 + - --config-map-name=cnpg-controller-manager-config + - --secret-name=cnpg-controller-manager-config + - --webhook-port=9443 + command: + - /manager + env: + - name: OPERATOR_IMAGE_NAME + value: ghcr.io/cloudnative-pg/cloudnative-pg:1.26.0-rc3 + - name: OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MONITORING_QUERIES_CONFIGMAP + value: cnpg-default-monitoring + image: ghcr.io/cloudnative-pg/cloudnative-pg:1.26.0-rc3 + imagePullPolicy: Always + livenessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + name: manager + ports: + - containerPort: 8080 + name: metrics + protocol: TCP + - containerPort: 9443 + name: webhook-server + protocol: TCP + readinessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + resources: + limits: + cpu: 100m + memory: 200Mi + requests: + cpu: 100m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 10001 + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + startupProbe: + failureThreshold: 6 + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + periodSeconds: 5 + volumeMounts: + - mountPath: /controller + name: scratch-data + - mountPath: /run/secrets/cnpg.io/webhook + name: webhook-certificates + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: cnpg-manager + terminationGracePeriodSeconds: 10 + volumes: + - emptyDir: {} + name: scratch-data + - name: webhook-certificates + secret: + defaultMode: 420 + optional: true + secretName: cnpg-webhook-cert +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: cnpg-mutating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: mbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: mcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-database + failurePolicy: Fail + name: mdatabase.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - databases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: mscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: cnpg-validating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: vbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: vcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-database + failurePolicy: Fail + name: vdatabase.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - databases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-pooler + failurePolicy: Fail + name: vpooler.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - poolers + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: vscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None From b39f118d36f083a7fc0e4deda1ab17638f3312c7 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Wed, 14 May 2025 12:20:29 +0200 Subject: [PATCH 584/836] chore(ci): adding missing permissions to action (#7567) The action xt0rted/pull-request-comment-branch requires the permission `issues: read` to do the work properly, this was missing. Per documentation here: https://github.com/xt0rted/pull-request-comment-branch?tab=readme-ov-file#token-permissions Signed-off-by: Jonathan Gonzalez V. --- .github/workflows/continuous-delivery.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index 22fbc92889..1c91224e3e 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -93,6 +93,7 @@ jobs: permissions: pull-requests: write contents: read + issues: read runs-on: ubuntu-24.04 outputs: github_ref: ${{ steps.refs.outputs.head_sha }} From 6c4c489e255736004c17add8236ed34f94fb7b29 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Wed, 14 May 2025 13:16:03 +0200 Subject: [PATCH 585/836] chore(ci): add missing permissions to workflow (#7552) The workflow to update PostgreSQL versions was having only read permissions not being able to create PRs Signed-off-by: Jonathan Gonzalez V. --- .github/workflows/latest-postgres-version-check.yml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/.github/workflows/latest-postgres-version-check.yml b/.github/workflows/latest-postgres-version-check.yml index 1d1d0e7f3d..c8a7a53732 100644 --- a/.github/workflows/latest-postgres-version-check.yml +++ b/.github/workflows/latest-postgres-version-check.yml @@ -70,9 +70,8 @@ jobs: - name: Create PR to update PostgreSQL version if: env.LATEST_POSTGRES_VERSION_IMAGE != env.CURRENT_POSTGRES_VERSION_IMAGE uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7 - env: - GITHUB_TOKEN: ${{ secrets.REPO_GHA_PAT }} with: + token: ${{ secrets.REPO_GHA_PAT }} title: "feat: update default PostgreSQL version to ${{ env.LATEST_POSTGRES_VERSION }}" body: "Update default PostgreSQL version from ${{ env.CURRENT_POSTGRES_VERSION }} to ${{ env.LATEST_POSTGRES_VERSION }}" branch: "postgres-versions-update" @@ -83,9 +82,8 @@ jobs: - name: Create Pull Request if postgresql versions have been updated if: env.LATEST_POSTGRES_VERSION_IMAGE == env.CURRENT_POSTGRES_VERSION_IMAGE uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7 - env: - GITHUB_TOKEN: ${{ secrets.REPO_GHA_PAT }} with: + token: ${{ secrets.REPO_GHA_PAT }} title: "test: Updated Postgres versions used in E2E tests" body: "Update the Postgres versions used in E2E tests" branch: "postgres-versions-update" From 874528a4c98bad0eedfc80a634f4949c4330e5a6 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Wed, 14 May 2025 17:01:36 +0200 Subject: [PATCH 586/836] docs: fix release announcement link in preview page (#7571) Signed-off-by: Marco Nenciarini --- docs/src/preview_version.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/src/preview_version.md b/docs/src/preview_version.md index 1d2375721e..587280b89a 100644 --- a/docs/src/preview_version.md +++ b/docs/src/preview_version.md @@ -33,13 +33,13 @@ may contain serious bugs. Features in preview versions may change in ways that are not backwards compatible and could be removed entirely. ## Current Preview Version - + The current preview version is **1.26.0-rc3**. For more information on the current preview version and how to test, please view the links below: -- [Announcement](https://cloudnative-pg.io/releases/cloudnative-pg-1-26.0-rc2-released/) +- [Announcement](https://cloudnative-pg.io/releases/cloudnative-pg-1-26.0-rc3-released/) - [Documentation](https://cloudnative-pg.io/documentation/preview/) - From 02f01659e6a0aa879f88d21fd4e37c7c9bd9db58 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niccol=C3=B2=20Fei?= Date: Mon, 19 May 2025 20:20:04 +0200 Subject: [PATCH 587/836] test: allow configuring a custom image repository in major upgrade E2Es (#7303) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch goes together with https://github.com/cloudnative-pg/postgres-trunk-containers/pull/80. The goal is to allow configuring a custom repository (in this case the `postgresql-trunk`) instead of the default ones (via an env variable), which will be used to fetch the target images required to perform the major upgrade scenarios. This will allow us to test major upgrades to development versions of PG18. Closes #7302 Signed-off-by: Niccolò Fei Signed-off-by: Armando Ruocco Co-authored-by: Armando Ruocco --- tests/e2e/cluster_major_upgrade_test.go | 102 +++++++++++++++--------- 1 file changed, 66 insertions(+), 36 deletions(-) diff --git a/tests/e2e/cluster_major_upgrade_test.go b/tests/e2e/cluster_major_upgrade_test.go index d79eb47458..819e574e64 100644 --- a/tests/e2e/cluster_major_upgrade_test.go +++ b/tests/e2e/cluster_major_upgrade_test.go @@ -54,11 +54,12 @@ import ( var _ = Describe("Postgres Major Upgrade", Label(tests.LabelPostgresMajorUpgrade), func() { const ( - level = tests.Medium - namespacePrefix = "cluster-major-upgrade" - postgisEntry = "postgis" - postgresqlEntry = "postgresql" - postgresqlMinimalEntry = "postgresql-minimal" + level = tests.Medium + namespacePrefix = "cluster-major-upgrade" + postgisEntry = "postgis" + postgresqlEntry = "postgresql" + postgresqlMinimalEntry = "postgresql-minimal" + customImageRegistryEnvVar = "MAJOR_UPGRADE_IMAGE_REGISTRY" ) var namespace string @@ -111,14 +112,10 @@ var _ = Describe("Postgres Major Upgrade", Label(tests.LabelPostgresMajorUpgrade generatePostgreSQLCluster := func(namespace string, storageClass string, majorVersion int) *v1.Cluster { cluster := generateBaseCluster(namespace, storageClass) - cluster.Spec.ImageName = "ghcr.io/cloudnative-pg/postgresql:" + strconv.Itoa(majorVersion) - cluster.Spec.Bootstrap = &v1.BootstrapConfiguration{ - InitDB: &v1.BootstrapInitDB{ - PostInitSQL: []string{ - "CREATE EXTENSION IF NOT EXISTS pg_stat_statements;", - "CREATE EXTENSION IF NOT EXISTS pg_trgm;", - }, - }, + cluster.Spec.ImageName = fmt.Sprintf("ghcr.io/cloudnative-pg/postgresql:%d-standard-bookworm", majorVersion) + cluster.Spec.Bootstrap.InitDB.PostInitSQL = []string{ + "CREATE EXTENSION IF NOT EXISTS pg_stat_statements;", + "CREATE EXTENSION IF NOT EXISTS pg_trgm;", } cluster.Spec.PostgresConfiguration.Parameters["pg_stat_statements.track"] = "top" return cluster @@ -132,26 +129,22 @@ var _ = Describe("Postgres Major Upgrade", Label(tests.LabelPostgresMajorUpgrade generatePostGISCluster := func(namespace string, storageClass string, majorVersion int) *v1.Cluster { cluster := generateBaseCluster(namespace, storageClass) cluster.Spec.ImageName = "ghcr.io/cloudnative-pg/postgis:" + strconv.Itoa(majorVersion) - cluster.Spec.Bootstrap = &v1.BootstrapConfiguration{ - InitDB: &v1.BootstrapInitDB{ - PostInitApplicationSQL: []string{ - "CREATE EXTENSION postgis", - "CREATE EXTENSION postgis_raster", - "CREATE EXTENSION postgis_sfcgal", - "CREATE EXTENSION fuzzystrmatch", - "CREATE EXTENSION address_standardizer", - "CREATE EXTENSION address_standardizer_data_us", - "CREATE EXTENSION postgis_tiger_geocoder", - "CREATE EXTENSION postgis_topology", - "CREATE TABLE geometries (name varchar, geom geometry)", - "INSERT INTO geometries VALUES" + - " ('Point', 'POINT(0 0)')," + - " ('Linestring', 'LINESTRING(0 0, 1 1, 2 1, 2 2)')," + - " ('Polygon', 'POLYGON((0 0, 1 0, 1 1, 0 1, 0 0))')," + - " ('PolygonWithHole', 'POLYGON((0 0, 10 0, 10 10, 0 10, 0 0),(1 1, 1 2, 2 2, 2 1, 1 1))')," + - " ('Collection', 'GEOMETRYCOLLECTION(POINT(2 0),POLYGON((0 0, 1 0, 1 1, 0 1, 0 0)))');", - }, - }, + cluster.Spec.Bootstrap.InitDB.PostInitApplicationSQL = []string{ + "CREATE EXTENSION postgis", + "CREATE EXTENSION postgis_raster", + "CREATE EXTENSION postgis_sfcgal", + "CREATE EXTENSION fuzzystrmatch", + "CREATE EXTENSION address_standardizer", + "CREATE EXTENSION address_standardizer_data_us", + "CREATE EXTENSION postgis_tiger_geocoder", + "CREATE EXTENSION postgis_topology", + "CREATE TABLE geometries (name varchar, geom geometry)", + "INSERT INTO geometries VALUES" + + " ('Point', 'POINT(0 0)')," + + " ('Linestring', 'LINESTRING(0 0, 1 1, 2 1, 2 2)')," + + " ('Polygon', 'POLYGON((0 0, 1 0, 1 1, 0 1, 0 0))')," + + " ('PolygonWithHole', 'POLYGON((0 0, 10 0, 10 10, 0 10, 0 0),(1 1, 1 2, 2 2, 2 1, 1 1))')," + + " ('Collection', 'GEOMETRYCOLLECTION(POINT(2 0),POLYGON((0 0, 1 0, 1 1, 0 1, 0 0)))');", } return cluster } @@ -177,26 +170,55 @@ var _ = Describe("Postgres Major Upgrade", Label(tests.LabelPostgresMajorUpgrade return currentMajor, targetMajor } + // generateTargetImages, given a targetMajor, generates a target image for each buildScenario. + // MAJOR_UPGRADE_IMAGE_REPO env allows to customize the target image repository. + generateTargetImages := func(targetMajor uint64) map[string]string { + const ( + // ImageRepository is the default repository for Postgres container images + ImageRepository = "ghcr.io/cloudnative-pg/postgresql" + + // PostgisImageRepository is the default repository for Postgis container images + PostgisImageRepository = "ghcr.io/cloudnative-pg/postgis" + ) + + // Default target Images + targetImages := map[string]string{ + postgisEntry: fmt.Sprintf("%v:%v", PostgisImageRepository, targetMajor), + postgresqlEntry: fmt.Sprintf("%v:%v-standard-bookworm", ImageRepository, targetMajor), + postgresqlMinimalEntry: fmt.Sprintf("%v:%v-minimal-bookworm", ImageRepository, targetMajor), + } + // Set custom targets when detecting a given env variable + if envValue := os.Getenv(customImageRegistryEnvVar); envValue != "" { + targetImages[postgisEntry] = fmt.Sprintf("%v:%v-postgis-bookworm", envValue, targetMajor) + targetImages[postgresqlEntry] = fmt.Sprintf("%v:%v-standard-bookworm", envValue, targetMajor) + targetImages[postgresqlMinimalEntry] = fmt.Sprintf("%v:%v-minimal-bookworm", envValue, targetMajor) + } + + return targetImages + } + buildScenarios := func( namespace string, storageClass string, currentMajor, targetMajor uint64, ) map[string]*scenario { + targetImages := generateTargetImages(targetMajor) + return map[string]*scenario{ postgisEntry: { startingCluster: generatePostGISCluster(namespace, storageClass, int(currentMajor)), startingMajor: int(currentMajor), - targetImage: fmt.Sprintf("ghcr.io/cloudnative-pg/postgis:%v", targetMajor), + targetImage: targetImages[postgisEntry], targetMajor: int(targetMajor), }, postgresqlEntry: { startingCluster: generatePostgreSQLCluster(namespace, storageClass, int(currentMajor)), startingMajor: int(currentMajor), - targetImage: fmt.Sprintf("ghcr.io/cloudnative-pg/postgresql:%v", targetMajor), + targetImage: targetImages[postgresqlEntry], targetMajor: int(targetMajor), }, postgresqlMinimalEntry: { startingCluster: generatePostgreSQLMinimalCluster(namespace, storageClass, int(currentMajor)), startingMajor: int(currentMajor), - targetImage: fmt.Sprintf("ghcr.io/cloudnative-pg/postgresql:%v-minimal-bookworm", targetMajor), + targetImage: targetImages[postgresqlMinimalEntry], targetMajor: int(targetMajor), }, } @@ -285,6 +307,14 @@ var _ = Describe("Postgres Major Upgrade", Label(tests.LabelPostgresMajorUpgrade DescribeTable("can upgrade a Cluster to a newer major version", func(scenarioName string) { By("Creating the starting cluster") + // Avoid running Postgis major upgrade tests when a custom registry is being specified, because our + // PostGIS images are still based on Debian bullseye which uses OpenSSL 1.1, thus making them incompatible + // with any other image that uses OpenSSL 3.0 or greater. + // TODO: remove once we have PostGIS bookworm images + if scenarioName == postgisEntry && os.Getenv(customImageRegistryEnvVar) != "" { + Skip("Skipping PostGIS major upgrades when a custom registry is specified") + } + scenario := scenarios[scenarioName] cluster := scenario.startingCluster err := env.Client.Create(env.Ctx, cluster) From 2e129276ada268d3bd1b5fa3f37af7152afb78ba Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Wed, 21 May 2025 09:09:54 +0200 Subject: [PATCH 588/836] feat: allow customization of in-place upgrades from a CNPG-I plugin (#7588) Enable customization of in-place major upgrades using a CNPG-I plugin by passing optional arguments for `pg_upgrade` and `initdb`, and specify alternative executable paths for `initdb` if needed. Closes #7587 Signed-off-by: Marco Nenciarini --- .../manager/instance/upgrade/execute/cmd.go | 86 +++++++++++++------ 1 file changed, 61 insertions(+), 25 deletions(-) diff --git a/internal/cmd/manager/instance/upgrade/execute/cmd.go b/internal/cmd/manager/instance/upgrade/execute/cmd.go index 97376d66b9..9fc44816de 100644 --- a/internal/cmd/manager/instance/upgrade/execute/cmd.go +++ b/internal/cmd/manager/instance/upgrade/execute/cmd.go @@ -61,6 +61,9 @@ func NewCmd() *cobra.Command { var clusterName string var namespace string var pgUpgrade string + var pgUpgradeArgs []string + var initdb string + var initdbArgs []string cmd := &cobra.Command{ Use: "execute [options]", @@ -84,7 +87,15 @@ func NewCmd() *cobra.Command { } oldBinDir := strings.TrimSpace(string(oldBinDirBytes)) - return upgradeSubCommand(ctx, instance, pgData, oldBinDir, pgUpgrade) + info := upgradeInfo{ + pgData: pgData, + oldBinDir: oldBinDir, + pgUpgrade: pgUpgrade, + pgUpgradeArgs: pgUpgradeArgs, + initdb: initdb, + initdbArgs: initdbArgs, + } + return info.upgradeSubCommand(ctx, instance) }, PostRunE: func(cmd *cobra.Command, _ []string) error { if err := istio.TryInvokeQuitEndpoint(cmd.Context()); err != nil { @@ -104,18 +115,29 @@ func NewCmd() *cobra.Command { "the current cluster in k8s, used to download TLS certificates") cmd.Flags().StringVar(&pgUpgrade, "pg-upgrade", env.GetOrDefault("PG_UPGRADE", "pg_upgrade"), `The path of "pg_upgrade" executable. Defaults to "pg_upgrade".`) + cmd.Flags().StringArrayVar(&pgUpgradeArgs, "pg-upgrade-args", nil, + `Additional arguments for "pg_upgrade" invocation. `+ + `Use the --pg-upgrade-args flag multiple times to pass multiple arguments.`) + cmd.Flags().StringVar(&initdb, "initdb", env.GetOrDefault("INITDB", "initdb"), + `The path of "initdb" executable. Defaults to "initdb".`) + cmd.Flags().StringArrayVar(&initdbArgs, "initdb-args", nil, + `Additional arguments for "initdb" invocation.`+ + `Use the --initdb-args flag multiple times to pass multiple arguments.`) return cmd } +type upgradeInfo struct { + pgData string + oldBinDir string + pgUpgrade string + pgUpgradeArgs []string + initdb string + initdbArgs []string +} + // nolint:gocognit -func upgradeSubCommand( - ctx context.Context, - instance *postgres.Instance, - pgData string, - oldBinDir string, - pgUpgrade string, -) error { +func (ui upgradeInfo) upgradeSubCommand(ctx context.Context, instance *postgres.Instance) error { contextLogger := log.FromContext(ctx) client, err := management.NewControllerRuntimeClient() @@ -184,7 +206,7 @@ func upgradeSubCommand( } // Extract controldata information from the old data directory - controlData, err := getControlData(oldBinDir, pgData) + controlData, err := getControlData(ui.oldBinDir, ui.pgData) if err != nil { return fmt.Errorf("error while getting old data directory control data: %w", err) } @@ -195,7 +217,7 @@ func upgradeSubCommand( } contextLogger.Info("Creating data directory", "directory", newDataDir) - if err := runInitDB(newDataDir, newWalDir, controlData, targetVersion); err != nil { + if err := runInitDB(newDataDir, newWalDir, controlData, targetVersion, ui.initdb, ui.initdbArgs); err != nil { return fmt.Errorf("error while creating the data directory: %w", err) } @@ -206,7 +228,7 @@ func upgradeSubCommand( contextLogger.Info("Checking if we have anything to update") // Read pg_version from both the old and new data directories - oldVersion, err := postgresutils.GetMajorVersionFromPgData(pgData) + oldVersion, err := postgresutils.GetMajorVersionFromPgData(ui.pgData) if err != nil { return fmt.Errorf("error while reading the old version: %w", err) } @@ -226,17 +248,17 @@ func upgradeSubCommand( // We need to make sure that the permissions are the right ones // in some systems they may be messed up even if we fix them before - _ = fileutils.EnsurePgDataPerms(pgData) + _ = fileutils.EnsurePgDataPerms(ui.pgData) _ = fileutils.EnsurePgDataPerms(newDataDir) contextLogger.Info("Running pg_upgrade") - if err := runPgUpgrade(pgData, pgUpgrade, newDataDir, oldBinDir); err != nil { + if err := ui.runPgUpgrade(newDataDir); err != nil { // TODO: in case of failures we should dump the content of the pg_upgrade logs return fmt.Errorf("error while running pg_upgrade: %w", err) } - err = moveDataInPlace(ctx, pgData, oldVersion, newDataDir, newWalDir) + err = moveDataInPlace(ctx, ui.pgData, oldVersion, newDataDir, newWalDir) if err != nil { contextLogger.Error(err, "Error while moving the data in place, saving the new data directory to avoid data loss") @@ -245,7 +267,7 @@ func upgradeSubCommand( dirToBeSaved := []string{ newDataDir, - pgData + ".old", + ui.pgData + ".old", } if newWalDir != nil { dirToBeSaved = append(dirToBeSaved, @@ -281,7 +303,14 @@ func getControlData(binDir, pgData string) (map[string]string, error) { return utils.ParsePgControldataOutput(string(out)), nil } -func runInitDB(destDir string, walDir *string, pgControlData map[string]string, targetMajorVersion int) error { +func runInitDB( + destDir string, + walDir *string, + pgControlData map[string]string, + targetMajorVersion int, + initdb string, + initdbArgs []string, +) error { // Invoke initdb to generate a data directory options := []string{ "--username", @@ -305,13 +334,15 @@ func runInitDB(destDir string, walDir *string, pgControlData map[string]string, return err } + options = append(options, initdbArgs...) + // Certain CSI drivers may add setgid permissions on newly created folders. // A default umask is set to attempt to avoid this, by revoking group/other // permission bits on the PGDATA _ = compatibility.Umask(0o077) - initdbCmd := exec.Command(constants.InitdbName, options...) // #nosec - if err := execlog.RunStreaming(initdbCmd, constants.InitdbName); err != nil { + initdbCmd := exec.Command(initdb, options...) // #nosec + if err := execlog.RunStreaming(initdbCmd, initdb); err != nil { return err } @@ -391,17 +422,22 @@ func prepareConfigurationFiles(ctx context.Context, cluster apiv1.Cluster, destD return nil } -func runPgUpgrade(oldDataDir string, pgUpgrade string, newDataDir string, oldBinDir string) error { - // Run the pg_upgrade command - cmd := exec.Command(pgUpgrade, +func (ui upgradeInfo) runPgUpgrade( + newDataDir string, +) error { + args := []string{ "--link", "--username", "postgres", - "--old-bindir", oldBinDir, - "--old-datadir", oldDataDir, + "--old-bindir", ui.oldBinDir, + "--old-datadir", ui.pgData, "--new-datadir", newDataDir, - ) // #nosec + } + args = append(args, ui.pgUpgradeArgs...) + + // Run the pg_upgrade command + cmd := exec.Command(ui.pgUpgrade, args...) // #nosec cmd.Dir = newDataDir - if err := execlog.RunStreaming(cmd, path.Base(pgUpgrade)); err != nil { + if err := execlog.RunStreaming(cmd, path.Base(ui.pgUpgrade)); err != nil { return fmt.Errorf("error while running %q: %w", cmd, err) } From 975b739c8536030963db1adfd85bb5b29069f4db Mon Sep 17 00:00:00 2001 From: Gustavo Fernandes de Carvalho <17139678+gusfcarvalho@users.noreply.github.com> Date: Wed, 21 May 2025 04:20:53 -0300 Subject: [PATCH 589/836] docs: add OpenBao as OSS alternative to Vault (#7470) Hashicorp Vault, though commonly used, does have a restrictive, BUSL license that most organizations cannot use. Adding a mention to https://openbao.org, which is released under the MPL license. Signed-off-by: Gustavo Fernandes de Carvalho <17139678+gusfcarvalho@users.noreply.github.com> Signed-off-by: Gabriele Bartolini Co-authored-by: Gabriele Bartolini --- .wordlist-en-custom.txt | 2 ++ docs/src/cncf-projects/external-secrets.md | 8 ++++++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index cf12432d8a..bccda465d9 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -25,6 +25,7 @@ AzureCredentials AzurePVCUpdateEnabled Azurite BDR +BUSL BackupCapabilities BackupConfiguration BackupFrom @@ -285,6 +286,7 @@ OngoingSnapshotBackups OnlineConfiguration OnlineUpdateEnabled OnlineUpgrading +OpenBao OpenID OpenSSL OpenShift diff --git a/docs/src/cncf-projects/external-secrets.md b/docs/src/cncf-projects/external-secrets.md index 7be49dd3fc..1fd2c840b4 100644 --- a/docs/src/cncf-projects/external-secrets.md +++ b/docs/src/cncf-projects/external-secrets.md @@ -149,8 +149,12 @@ password. ## Example: Integration with an External KMS -A widely used Key Management Service (KMS) provider in the CNCF ecosystem is -[HashiCorp Vault](https://www.vaultproject.io/). +One of the most widely used Key Management Service (KMS) providers in the CNCF +ecosystem is [HashiCorp Vault](https://www.vaultproject.io/). Although Vault is +licensed under the Business Source License (BUSL), a fully compatible and +actively maintained open source alternative is available: [OpenBao](https://openbao.org/). +OpenBao supports all the same interfaces as HashiCorp Vault, making it a true +drop-in replacement. In this example, we'll demonstrate how to integrate CloudNativePG, External Secrets Operator, and HashiCorp Vault to automatically rotate From bc479e25524e431aee872207983fee4f458c77ee Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Wed, 21 May 2025 10:30:47 +0200 Subject: [PATCH 590/836] docs: clarify example in distributed topology (#7594) Closes #7593 Signed-off-by: Gabriele Bartolini --- docs/src/replica_cluster.md | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/docs/src/replica_cluster.md b/docs/src/replica_cluster.md index 9c1262f72f..71fd23a594 100644 --- a/docs/src/replica_cluster.md +++ b/docs/src/replica_cluster.md @@ -171,9 +171,6 @@ continuous recovery are thoroughly explained below. ## Distributed Topology -!!! Important - The Distributed Topology strategy was introduced in CloudNativePG 1.24. - ### Planning for a Distributed PostgreSQL Database As Dwight Eisenhower famously said, "Planning is everything", and this holds @@ -197,9 +194,14 @@ local object store. This object store is also accessible by the PostgreSQL `Cluster` named `cluster-eu-central`, installed in the Central European Kubernetes cluster. Initially, `cluster-eu-central` functions as a replica cluster. Following a symmetric approach, it also has a local object store for -continuous backup, which needs to be read by `cluster-eu-south`. The recovery -in this setup relies solely on WAL shipping, with no streaming connection -between the two clusters. +continuous backup, which needs to be read by `cluster-eu-south`. + +In this example, recovery is performed solely through WAL shipping, without any +streaming replication between the two clusters. However, you can configure the +setup to use streaming replication alone or adopt a hybrid approach—streaming +replication with WAL shipping as a fallback—as described in the +[“Configuring replication”](replica_cluster.md#defining-an-external-cluster) +section. Here’s how you would configure the `externalClusters` section for both `Cluster` resources: From 0ccb770484cff2d116a7dde53de751c0fd7d895b Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Wed, 21 May 2025 18:54:12 +0200 Subject: [PATCH 591/836] docs(major upgrades): clarify image requires same OS (#7604) Closes #7580 Signed-off-by: Gabriele Bartolini --- docs/src/postgres_upgrades.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/src/postgres_upgrades.md b/docs/src/postgres_upgrades.md index 46f211875f..4f8af744fe 100644 --- a/docs/src/postgres_upgrades.md +++ b/docs/src/postgres_upgrades.md @@ -54,6 +54,11 @@ CloudNativePG performs an **offline in-place major upgrade** when a new operand container image with a higher PostgreSQL major version is declaratively requested for a cluster. +!!! Important + Major upgrades are only supported between images based on the same + operating system distribution. For example, if your previous version uses a + `bullseye` image, you cannot upgrade to a `bookworm` image. + You can trigger the upgrade in one of two ways: - By updating the major version in the image tag via the `.spec.imageName` From 6a36fdbb796c6e9eef26515d793cad6b3d3ec5c1 Mon Sep 17 00:00:00 2001 From: Peggie Date: Wed, 21 May 2025 19:07:04 +0200 Subject: [PATCH 592/836] feat: Public Cloud K8S versions update (#7463) Update the versions used to test the operator on public cloud providers Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: public-cloud-k8s-versions-check --- .github/aks_versions.json | 1 + .github/kind_versions.json | 8 ++++---- .github/openshift_versions.json | 1 + 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/.github/aks_versions.json b/.github/aks_versions.json index a332bfe43c..4f53fabd97 100644 --- a/.github/aks_versions.json +++ b/.github/aks_versions.json @@ -1,4 +1,5 @@ [ + "1.33.0", "1.32.3", "1.31.7", "1.30.9" diff --git a/.github/kind_versions.json b/.github/kind_versions.json index 7d090afa05..0f0983321d 100644 --- a/.github/kind_versions.json +++ b/.github/kind_versions.json @@ -1,8 +1,8 @@ [ - "v1.33.0", - "v1.32.3", - "v1.31.6", - "v1.30.10", + "v1.33.1", + "v1.32.5", + "v1.31.9", + "v1.30.13", "v1.29.14", "v1.28.15", "v1.27.16" diff --git a/.github/openshift_versions.json b/.github/openshift_versions.json index e41221f17a..1421d20ce0 100644 --- a/.github/openshift_versions.json +++ b/.github/openshift_versions.json @@ -1,4 +1,5 @@ [ + "4.19", "4.18", "4.17", "4.16", From a111ccde8b1488e5d44fcfdabb7ecf3d31aa9b36 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Wed, 21 May 2025 19:14:44 +0200 Subject: [PATCH 593/836] test: enable unit test on 1.33 (#7606) Closes #7605 Signed-off-by: Marco Nenciarini --- .github/k8s_versions_scope.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/k8s_versions_scope.json b/.github/k8s_versions_scope.json index 2bd3e8cbee..5f9b4d6b49 100644 --- a/.github/k8s_versions_scope.json +++ b/.github/k8s_versions_scope.json @@ -6,5 +6,5 @@ "GKE": {"min": "1.29", "max": ""}, "OPENSHIFT": {"min": "4.12", "max": ""} }, - "unit_test": {"min": "1.27", "max": "1.32"} + "unit_test": {"min": "1.27", "max": "1.33"} } From c9004de531fbb8a87f586543ef64bcfb3a3d1f49 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Wed, 21 May 2025 19:19:01 +0200 Subject: [PATCH 594/836] chore: remove regexp cache from API (#7582) The Cluster API included some extra fields to cache the compiled regexp and the related errors. Unfortunately, this prevents the API from being used with [controllerutil.CreateOrUpdate](1). This patch removes the cache, compiling the regexp when needed. Closes: #6605 Signed-off-by: Leonardo Cecchi [1]: https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.4/pkg/controller/controllerutil#CreateOrUpdate Signed-off-by: Armando Ruocco Signed-off-by: Marco Nenciarini Co-authored-by: Armando Ruocco Co-authored-by: Marco Nenciarini --- api/v1/cluster_funcs.go | 56 ++++++++++++++++++-------- api/v1/cluster_funcs_test.go | 24 +++-------- api/v1/cluster_types.go | 14 ------- api/v1/zz_api_repo_funcs_to_copy.go | 30 -------------- api/v1/zz_generated.deepcopy.go | 1 - internal/webhook/v1/cluster_webhook.go | 4 +- 6 files changed, 47 insertions(+), 82 deletions(-) delete mode 100644 api/v1/zz_api_repo_funcs_to_copy.go diff --git a/api/v1/cluster_funcs.go b/api/v1/cluster_funcs.go index 3e1b08c584..7234cbcfe3 100644 --- a/api/v1/cluster_funcs.go +++ b/api/v1/cluster_funcs.go @@ -154,27 +154,47 @@ func (status *ClusterStatus) GetAvailableArchitecture(archName string) *Availabl return nil } -func (r *SynchronizeReplicasConfiguration) compileRegex() []error { - if r == nil { - return nil +type regexErrors struct { + errs []error +} + +func (r regexErrors) Error() string { + if len(r.errs) == 0 { + return "" + } + var sb strings.Builder + sb.WriteString("failed to compile regex patterns: ") + for _, err := range r.errs { + sb.WriteString(err.Error()) + sb.WriteString("; ") } - if r.compiled { - return r.compileErrors + return sb.String() +} + +func (r *SynchronizeReplicasConfiguration) compileRegex() ([]regexp.Regexp, error) { + if r == nil { + return nil, nil } - var errs []error - for _, pattern := range r.ExcludePatterns { + var ( + compiledPatterns = make([]regexp.Regexp, len(r.ExcludePatterns)) + compileErrors []error + ) + + for idx, pattern := range r.ExcludePatterns { re, err := regexp.Compile(pattern) if err != nil { - errs = append(errs, err) + compileErrors = append(compileErrors, err) continue } - r.compiledPatterns = append(r.compiledPatterns, *re) + compiledPatterns[idx] = *re } - r.compiled = true - r.compileErrors = errs - return errs + if len(compileErrors) > 0 { + return nil, regexErrors{errs: compileErrors} + } + + return compiledPatterns, nil } // GetEnabled returns false if synchronized replication slots are disabled, defaults to true @@ -186,8 +206,9 @@ func (r *SynchronizeReplicasConfiguration) GetEnabled() bool { } // ValidateRegex returns all the errors that happened during the regex compilation -func (r *SynchronizeReplicasConfiguration) ValidateRegex() []error { - return r.compileRegex() +func (r *SynchronizeReplicasConfiguration) ValidateRegex() error { + _, err := r.compileRegex() + return err } // IsExcludedByUser returns if a replication slot should not be reconciled on the replicas @@ -196,12 +217,13 @@ func (r *SynchronizeReplicasConfiguration) IsExcludedByUser(slotName string) (bo return false, nil } + compiledPatterns, err := r.compileRegex() // this is an unexpected issue, validation should happen at webhook level - if errs := r.compileRegex(); len(errs) > 0 { - return false, errs[0] + if err != nil { + return false, err } - for _, re := range r.compiledPatterns { + for _, re := range compiledPatterns { if re.MatchString(slotName) { return true, nil } diff --git a/api/v1/cluster_funcs_test.go b/api/v1/cluster_funcs_test.go index 95fbeba05c..01799bd6e1 100644 --- a/api/v1/cluster_funcs_test.go +++ b/api/v1/cluster_funcs_test.go @@ -1189,7 +1189,7 @@ var _ = Describe("SynchronizeReplicasConfiguration", func() { Context("CompileRegex", func() { It("should return no errors when SynchronizeReplicasConfiguration is nil", func() { synchronizeReplicas = nil - Expect(synchronizeReplicas.ValidateRegex()).To(BeEmpty()) + Expect(synchronizeReplicas.ValidateRegex()).ToNot(HaveOccurred()) }) Context("when SynchronizeReplicasConfiguration is not nil", func() { @@ -1198,7 +1198,7 @@ var _ = Describe("SynchronizeReplicasConfiguration", func() { }) It("should compile patterns without errors", func() { - Expect(synchronizeReplicas.ValidateRegex()).To(BeEmpty()) + Expect(synchronizeReplicas.ValidateRegex()).ToNot(HaveOccurred()) }) Context("when a pattern fails to compile", func() { @@ -1207,16 +1207,11 @@ var _ = Describe("SynchronizeReplicasConfiguration", func() { }) It("should return errors for the invalid pattern", func() { - errors := synchronizeReplicas.ValidateRegex() - Expect(errors).To(HaveLen(1)) + err := synchronizeReplicas.ValidateRegex() + Expect(err).To(HaveOccurred()) }) }) }) - - It("should return no errors on subsequent calls when compile is called multiple times", func() { - Expect(synchronizeReplicas.ValidateRegex()).To(BeEmpty()) - Expect(synchronizeReplicas.ValidateRegex()).To(BeEmpty()) - }) }) Context("GetEnabled", func() { @@ -1268,19 +1263,12 @@ var _ = Describe("SynchronizeReplicasConfiguration", func() { Expect(isExcludedByUser).To(BeTrue()) }) - It("should compile patterns before checking for exclusion when compile is not called", func() { - Expect(synchronizeReplicas.compiledPatterns).To(BeEmpty()) - isExcludedByUser, err := synchronizeReplicas.IsExcludedByUser("pattern1MatchingSlot") - Expect(err).ToNot(HaveOccurred()) - Expect(isExcludedByUser, err).To(BeTrue()) - Expect(synchronizeReplicas.compiledPatterns).To(HaveLen(2)) - }) - It("should return an error in case of an invalid pattern", func() { synchronizeReplicas.ExcludePatterns = []string{"([a-zA-Z]+"} isExcludedByUser, err := synchronizeReplicas.IsExcludedByUser("test") Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(Equal("error parsing regexp: missing closing ): `([a-zA-Z]+`")) + Expect(err.Error()).To(Equal("failed to compile regex patterns: error parsing regexp: " + + "missing closing ): `([a-zA-Z]+`; ")) Expect(isExcludedByUser).To(BeFalse()) }) }) diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go index 9e3b35f9a6..21c43565cf 100644 --- a/api/v1/cluster_types.go +++ b/api/v1/cluster_types.go @@ -20,8 +20,6 @@ SPDX-License-Identifier: Apache-2.0 package v1 import ( - "regexp" - monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -1123,18 +1121,6 @@ type SynchronizeReplicasConfiguration struct { // List of regular expression patterns to match the names of replication slots to be excluded (by default empty) // +optional ExcludePatterns []string `json:"excludePatterns,omitempty"` - - synchronizeReplicasCache `json:"-"` -} - -// synchronizeReplicasCache contains the result of the regex compilation -// +kubebuilder:object:generate:=false -type synchronizeReplicasCache struct { - compiledPatterns []regexp.Regexp `json:"-"` - - compiled bool `json:"-"` - - compileErrors []error `json:"-"` } // ReplicationSlotsConfiguration encapsulates the configuration diff --git a/api/v1/zz_api_repo_funcs_to_copy.go b/api/v1/zz_api_repo_funcs_to_copy.go deleted file mode 100644 index bd55f68dec..0000000000 --- a/api/v1/zz_api_repo_funcs_to_copy.go +++ /dev/null @@ -1,30 +0,0 @@ -/* -Copyright © contributors to CloudNativePG, established as -CloudNativePG a Series of LF Projects, LLC. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package v1 - -// IMPORTANT: -// This file contains the functions that need to be copied from the api/v1 package to the cloudnative-pg/api -// repository. This is currently required because the controller-gen tool cannot generate DeepCopyInto for the -// regexp type. This will be removed once the controller-gen tool supports this feature. - -// DeepCopyInto needs to be manually added for the controller-gen compiler to work correctly, given that it cannot -// generate the DeepCopyInto for the regexp type. -// The method is empty because we don't want to transfer the cache when invoking DeepCopyInto. -func (receiver synchronizeReplicasCache) DeepCopyInto(*synchronizeReplicasCache) {} diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index 6ec7f7fa4e..c8b3917c96 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -3068,7 +3068,6 @@ func (in *SynchronizeReplicasConfiguration) DeepCopyInto(out *SynchronizeReplica *out = make([]string, len(*in)) copy(*out, *in) } - in.synchronizeReplicasCache.DeepCopyInto(&out.synchronizeReplicasCache) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SynchronizeReplicasConfiguration. diff --git a/internal/webhook/v1/cluster_webhook.go b/internal/webhook/v1/cluster_webhook.go index 453de80997..7091457935 100644 --- a/internal/webhook/v1/cluster_webhook.go +++ b/internal/webhook/v1/cluster_webhook.go @@ -2059,11 +2059,11 @@ func (v *ClusterCustomValidator) validateReplicationSlots(r *apiv1.Cluster) fiel return nil } - if errs := r.Spec.ReplicationSlots.SynchronizeReplicas.ValidateRegex(); len(errs) > 0 { + if err := r.Spec.ReplicationSlots.SynchronizeReplicas.ValidateRegex(); err != nil { return field.ErrorList{ field.Invalid( field.NewPath("spec", "replicationSlots", "synchronizeReplicas", "excludePatterns"), - errs, + err, "Cannot configure synchronizeReplicas. Invalid regexes were found"), } } From 4970da38868e6d23dd3c8628b743f6f97ee3905c Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Wed, 21 May 2025 19:31:26 +0200 Subject: [PATCH 595/836] chore: stop testing Kubernetes versions older than 1.29 (#7608) Removed support for Kubernetes (and OpenShift) versions older than 1.29. Closes #7607 Signed-off-by: Gabriele Bartolini --- .github/k8s_versions_scope.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/k8s_versions_scope.json b/.github/k8s_versions_scope.json index 5f9b4d6b49..03d41581e4 100644 --- a/.github/k8s_versions_scope.json +++ b/.github/k8s_versions_scope.json @@ -1,10 +1,10 @@ { "e2e_test": { - "KIND": {"min": "1.27", "max": ""}, - "AKS": {"min": "1.28", "max": ""}, + "KIND": {"min": "1.29", "max": ""}, + "AKS": {"min": "1.29", "max": ""}, "EKS": {"min": "1.29", "max": ""}, "GKE": {"min": "1.29", "max": ""}, - "OPENSHIFT": {"min": "4.12", "max": ""} + "OPENSHIFT": {"min": "4.16", "max": ""} }, - "unit_test": {"min": "1.27", "max": "1.33"} + "unit_test": {"min": "1.29", "max": "1.33"} } From 3c5df07861ea8dabc1dd5fb2c61406308e7763e5 Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Wed, 21 May 2025 19:49:19 +0200 Subject: [PATCH 596/836] docs: restructure backup and recovery section for CNPG-I plugins (#7581) With the introduction of the CNPG-I interface and official support for the Barman Cloud Plugin, the backup and recovery documentation has been reorganised to reflect the new plugin-based architecture. This is the first step toward a more modular and extensible approach. Further work is planned outside the core CloudNativePG repository, including maintaining an inventory of available plugins and migrating volume snapshot support into a dedicated plugin. Closes #6876 Signed-off-by: Gabriele Bartolini Signed-off-by: Francesco Canovai Co-authored-by: Francesco Canovai --- .wordlist-en-custom.txt | 1 + docs/mkdocs.yml | 5 +- .../backup_barmanobjectstore.md | 158 +++++- .../{ => appendixes}/backup_volumesnapshot.md | 57 +- docs/src/appendixes/object_stores.md | 134 +---- docs/src/backup.md | 497 +++++++++++------- docs/src/bootstrap.md | 6 +- docs/src/index.md | 24 +- docs/src/operator_capability_levels.md | 52 +- docs/src/recovery.md | 362 ++++++------- docs/src/replica_cluster.md | 58 +- docs/src/wal_archiving.md | 102 ++-- 12 files changed, 762 insertions(+), 694 deletions(-) rename docs/src/{ => appendixes}/backup_barmanobjectstore.md (57%) rename docs/src/{ => appendixes}/backup_volumesnapshot.md (91%) diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index bccda465d9..cc086513df 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -507,6 +507,7 @@ Uncomment Unrealizable UpdateStrategy VLDB +VLDBs VM VMs VOLNAME diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 5cf758e60d..fe3de716f7 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -14,6 +14,7 @@ markdown_extensions: - def_list - attr_list - footnotes + - pymdownx.caret nav: - index.md @@ -34,9 +35,7 @@ nav: - replication.md - logical_replication.md - backup.md - - backup_barmanobjectstore.md - wal_archiving.md - - backup_volumesnapshot.md - recovery.md - service_management.md - postgresql_conf.md @@ -77,4 +76,6 @@ nav: - CNCF Projects Integrations: - cncf-projects/external-secrets.md - Appendixes: + - appendixes/backup_volumesnapshot.md + - appendixes/backup_barmanobjectstore.md - appendixes/object_stores.md diff --git a/docs/src/backup_barmanobjectstore.md b/docs/src/appendixes/backup_barmanobjectstore.md similarity index 57% rename from docs/src/backup_barmanobjectstore.md rename to docs/src/appendixes/backup_barmanobjectstore.md index 8c062f64bf..8573315567 100644 --- a/docs/src/backup_barmanobjectstore.md +++ b/docs/src/appendixes/backup_barmanobjectstore.md @@ -1,11 +1,14 @@ -# Backup on object stores +# Appendix B - Backup on object stores + !!! Warning - With the deprecation of native Barman Cloud support in CloudNativePG in - favor of the Barman Cloud Plugin, this page—and the backup and recovery - documentation—may undergo changes before the official release of version - 1.26.0. + As of CloudNativePG 1.26, **native Barman Cloud support is deprecated** in + favor of the **Barman Cloud Plugin**. This page has been moved to the appendix + for reference purposes. While the native integration remains functional for + now, we strongly recommend beginning a gradual migration to the plugin-based + interface after appropriate testing. For guidance, see + [Migrating from Built-in CloudNativePG Backup](https://cloudnative-pg.io/plugin-barman-cloud/docs/migration/). CloudNativePG natively supports **online/hot backup** of PostgreSQL clusters through continuous physical backup and WAL archiving on an object @@ -34,23 +37,90 @@ as it is composed of a community PostgreSQL image and the latest A backup is performed from a primary or a designated primary instance in a `Cluster` (please refer to -[replica clusters](replica_cluster.md) +[replica clusters](../replica_cluster.md) for more information about designated primary instances), or alternatively -on a [standby](backup.md#backup-from-a-standby). +on a [standby](../backup.md#backup-from-a-standby). ## Common object stores If you are looking for a specific object store such as -[AWS S3](appendixes/object_stores.md#aws-s3), -[Microsoft Azure Blob Storage](appendixes/object_stores.md#azure-blob-storage), -[Google Cloud Storage](appendixes/object_stores.md#google-cloud-storage), or -[MinIO Gateway](appendixes/object_stores.md#minio-gateway), or a compatible -provider, please refer to [Appendix A - Common object stores](appendixes/object_stores.md). +[AWS S3](object_stores.md#aws-s3), +[Microsoft Azure Blob Storage](object_stores.md#azure-blob-storage), +[Google Cloud Storage](object_stores.md#google-cloud-storage), or a compatible +provider, please refer to [Appendix C - Common object stores for backups](object_stores.md). -## Retention policies +## WAL archiving + +WAL archiving is the process that feeds a [WAL archive](../backup.md#wal-archive) +in CloudNativePG. + +The WAL archive is defined in the `.spec.backup.barmanObjectStore` stanza of +a `Cluster` resource. + +!!! Info + Please refer to [`BarmanObjectStoreConfiguration`](https://pkg.go.dev/github.com/cloudnative-pg/barman-cloud/pkg/api#BarmanObjectStoreConfiguration) + in the barman-cloud API for a full list of options. + +If required, you can choose to compress WAL files as soon as they +are uploaded and/or encrypt them: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +[...] +spec: + backup: + barmanObjectStore: + [...] + wal: + compression: gzip + encryption: AES256 +``` + +You can configure the encryption directly in your bucket, and the operator +will use it unless you override it in the cluster configuration. + +PostgreSQL implements a sequential archiving scheme, where the +`archive_command` will be executed sequentially for every WAL +segment to be archived. !!! Important - Retention policies are not currently available on volume snapshots. + By default, CloudNativePG sets `archive_timeout` to `5min`, ensuring + that WAL files, even in case of low workloads, are closed and archived + at least every 5 minutes, providing a deterministic time-based value for + your Recovery Point Objective ([RPO](../before_you_start.md#rpo)). Even though you change the value + of the [`archive_timeout` setting in the PostgreSQL configuration](https://www.postgresql.org/docs/current/runtime-config-wal.html#GUC-ARCHIVE-TIMEOUT), + our experience suggests that the default value set by the operator is + suitable for most use cases. + +When the bandwidth between the PostgreSQL instance and the object +store allows archiving more than one WAL file in parallel, you +can use the parallel WAL archiving feature of the instance manager +like in the following example: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +[...] +spec: + backup: + barmanObjectStore: + [...] + wal: + compression: gzip + maxParallel: 8 + encryption: AES256 +``` + +In the previous example, the instance manager optimizes the WAL +archiving process by archiving in parallel at most eight ready +WALs, including the one requested by PostgreSQL. + +When PostgreSQL will request the archiving of a WAL that has +already been archived by the instance manager as an optimization, +that archival request will be just dismissed with a positive status. + +## Retention policies CloudNativePG can manage the automated deletion of backup files from the backup object store, using **retention policies** based on the recovery @@ -163,7 +233,7 @@ spec: You can append additional options to the `barman-cloud-backup` and `barman-cloud-wal-archive` commands by using the `additionalCommandArgs` property in the `.spec.backup.barmanObjectStore.data` and `.spec.backup.barmanObjectStore.wal` sections respectively. -This properties are lists of strings that will be appended to the +These properties are lists of strings that will be appended to the `barman-cloud-backup` and `barman-cloud-wal-archive` commands. For example, you can use the `--read-timeout=60` to customize the connection @@ -209,3 +279,61 @@ spec: - "--max-concurrency=1" - "--read-timeout=60" ``` + +## Recovery from an object store + +You can recover from a backup created by Barman Cloud and stored on a supported +object store. After you define the external cluster, including all the required +configuration in the `barmanObjectStore` section, you need to reference it in +the `.spec.recovery.source` option. + +This example defines a recovery object store in a blob container in Azure: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: cluster-restore +spec: + [...] + + superuserSecret: + name: superuser-secret + + bootstrap: + recovery: + source: clusterBackup + + externalClusters: + - name: clusterBackup + barmanObjectStore: + destinationPath: https://STORAGEACCOUNTNAME.blob.core.windows.net/CONTAINERNAME/ + azureCredentials: + storageAccount: + name: recovery-object-store-secret + key: storage_account_name + storageKey: + name: recovery-object-store-secret + key: storage_account_key + wal: + maxParallel: 8 +``` + +The previous example assumes that the application database and its owning user +are named `app` by default. If the PostgreSQL cluster being restored uses +different names, you must specify these names before exiting the recovery phase, +as documented in ["Configure the application database"](../recovery.md#configure-the-application-database). + +!!! Important + By default, the `recovery` method strictly uses the `name` of the + cluster in the `externalClusters` section as the name of the main folder + of the backup data within the object store. This name is normally reserved + for the name of the server. You can specify a different folder name + using the `barmanObjectStore.serverName` property. + +!!! Note + This example takes advantage of the parallel WAL restore feature, + dedicating up to 8 jobs to concurrently fetch the required WAL files from the + archive. This feature can appreciably reduce the recovery time. Make sure that + you plan ahead for this scenario and correctly tune the value of this parameter + for your environment. It will make a difference when you need it, and you will. diff --git a/docs/src/backup_volumesnapshot.md b/docs/src/appendixes/backup_volumesnapshot.md similarity index 91% rename from docs/src/backup_volumesnapshot.md rename to docs/src/appendixes/backup_volumesnapshot.md index 814867cd40..6bf555f49b 100644 --- a/docs/src/backup_volumesnapshot.md +++ b/docs/src/appendixes/backup_volumesnapshot.md @@ -1,12 +1,10 @@ -# Backup on volume snapshots +# Appendix A - Backup on volume snapshots -!!! Warning - As noted in the [backup document](backup.md), a cold snapshot explicitly - set to target the primary will result in the primary being fenced for - the duration of the backup, rendering the cluster read-only during that - For safety, in a cluster already containing fenced instances, a cold - snapshot is rejected. +!!! Important + Please refer to the official Kubernetes documentation for a list of all + the supported [Container Storage Interface (CSI) drivers](https://kubernetes-csi.github.io/docs/drivers.html) + that provide snapshotting capabilities. CloudNativePG is one of the first known cases of database operators that directly leverages the Kubernetes native Volume Snapshot API for both @@ -53,18 +51,16 @@ volumes of a given storage class, and managed as `VolumeSnapshot` and !!! Important It is your responsibility to verify with the third party vendor that volume snapshots are supported. CloudNativePG only interacts - with the Kubernetes API on this matter and we cannot support issues + with the Kubernetes API on this matter, and we cannot support issues at the storage level for each specific CSI driver. ## How to configure Volume Snapshot backups - - CloudNativePG allows you to configure a given Postgres cluster for Volume Snapshot backups through the `backup.volumeSnapshot` stanza. !!! Info - Please refer to [`VolumeSnapshotConfiguration`](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-VolumeSnapshotConfiguration) + Please refer to [`VolumeSnapshotConfiguration`](../cloudnative-pg.v1.md#postgresql-cnpg-io-v1-VolumeSnapshotConfiguration) in the API reference for a full list of options. A generic example with volume snapshots (assuming that PGDATA and WALs share @@ -89,18 +85,21 @@ spec: # Volume snapshot backups volumeSnapshot: className: @VOLUME_SNAPSHOT_CLASS_NAME@ - # WAL archive - barmanObjectStore: - # ... + + plugins: + - name: barman-cloud.cloudnative-pg.io + isWALArchiver: true + parameters: + barmanObjectName: @OBJECTSTORE_NAME@ ``` As you can see, the `backup` section contains both the `volumeSnapshot` stanza (controlling physical base backups on volume snapshots) and the -`barmanObjectStore` one (controlling the [WAL archive](wal_archiving.md)). +`plugins` one (controlling the [WAL archive](../wal_archiving.md)). !!! Info - Once you have defined the `barmanObjectStore`, you can decide to use - both volume snapshot and object store backup strategies simultaneously + Once you have defined the `plugin`, you can decide to use + both volume snapshot and plugin backup strategies simultaneously to take physical backups. The `volumeSnapshot.className` option allows you to reference the default @@ -118,6 +117,13 @@ a `ScheduledBackup` resource that requests such backups on a periodic basis. ## Hot and cold backups +!!! Warning + As noted in the [backup document](../backup.md), a cold snapshot explicitly + set to target the primary will result in the primary being fenced for + the duration of the backup, making the cluster read-only during this + period. For safety, in a cluster already containing fenced instances, a cold + snapshot is rejected. + By default, CloudNativePG requests an online/hot backup on volume snapshots, using the [PostgreSQL defaults of the low-level API for base backups](https://www.postgresql.org/docs/current/continuous-archiving.html#BACKUP-LOWLEVEL-BASE-BACKUP): @@ -335,8 +341,6 @@ spec: ## Example of Volume Snapshot Backup - - The following example shows how to configure volume snapshot base backups on an EKS cluster on AWS using the `ebs-sc` storage class and the `csi-aws-vsc` volume snapshot class. @@ -349,7 +353,7 @@ volume snapshot class. The following manifest creates a `Cluster` that is ready to be used for volume snapshots and that stores the WAL archive in a S3 bucket via IAM role for the -Service Account (IRSA, see [AWS S3](appendixes/object_stores.md#aws-s3)): +Service Account (IRSA, see [AWS S3](object_stores.md#aws-s3)): ``` yaml apiVersion: postgresql.cnpg.io/v1 @@ -369,13 +373,12 @@ spec: backup: volumeSnapshot: className: csi-aws-vsc - barmanObjectStore: - destinationPath: s3://@BUCKET_NAME@/ - s3Credentials: - inheritFromIAMRole: true - wal: - compression: gzip - maxParallel: 2 + + plugins: + - name: barman-cloud.cloudnative-pg.io + isWALArchiver: true + parameters: + barmanObjectName: @OBJECTSTORE_NAME@ serviceAccountTemplate: metadata: diff --git a/docs/src/appendixes/object_stores.md b/docs/src/appendixes/object_stores.md index 5975e7ddd7..73e424abb9 100644 --- a/docs/src/appendixes/object_stores.md +++ b/docs/src/appendixes/object_stores.md @@ -1,11 +1,13 @@ -# Appendix A - Common object stores for backups +# Appendix C - Common object stores for backups !!! Warning - With the deprecation of native Barman Cloud support in CloudNativePG in - favor of the Barman Cloud Plugin, this page—and the backup and recovery - documentation—may undergo changes before the official release of version - 1.26.0. + As of CloudNativePG 1.26, **native Barman Cloud support is deprecated** in + favor of the **Barman Cloud Plugin**. While the native integration remains + functional for now, we strongly recommend beginning a gradual migration to + the plugin-based interface after appropriate testing. The Barman Cloud + Plugin documentation describes + [how to use common object stores](https://cloudnative-pg.io/plugin-barman-cloud/docs/object_stores/). You can store the [backup](../backup.md) files in any service that is supported by the Barman Cloud infrastructure. That is: @@ -348,125 +350,3 @@ Now the operator will use the credentials to authenticate against Google Cloud S This way of authentication will create a JSON file inside the container with all the needed information to access your Google Cloud Storage bucket, meaning that if someone gets access to the pod will also have write permissions to the bucket. - -## MinIO Gateway - -Optionally, you can use MinIO Gateway as a common interface which -relays backup objects to other cloud storage solutions, like S3 or GCS. -For more information, please refer to [MinIO official documentation](https://docs.min.io/). - -Specifically, the CloudNativePG cluster can directly point to a local -MinIO Gateway as an endpoint, using previously created credentials and service. - -MinIO secrets will be used by both the PostgreSQL cluster and the MinIO instance. -Therefore, you must create them in the same namespace: - -```sh -kubectl create secret generic minio-creds \ - --from-literal=MINIO_ACCESS_KEY= \ - --from-literal=MINIO_SECRET_KEY= -``` - -!!! Note - Cloud Object Storage credentials will be used only by MinIO Gateway in this case. - -!!! Important - In order to allow PostgreSQL to reach MinIO Gateway, it is necessary to create a - `ClusterIP` service on port `9000` bound to the MinIO Gateway instance. - -For example: - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: minio-gateway-service -spec: - type: ClusterIP - ports: - - port: 9000 - targetPort: 9000 - protocol: TCP - selector: - app: minio -``` - -!!! Warning - At the time of writing this documentation, the official - [MinIO Operator](https://github.com/minio/minio-operator/issues/71) - for Kubernetes does not support the gateway feature. As such, we will use a - `deployment` instead. - -The MinIO deployment will use cloud storage credentials to upload objects to the -remote bucket and relay backup files to different locations. - -Here is an example using AWS S3 as Cloud Object Storage: - -```yaml -apiVersion: apps/v1 -kind: Deployment -[...] -spec: - containers: - - name: minio - image: minio/minio:RELEASE.2020-06-03T22-13-49Z - args: - - gateway - - s3 - env: - # MinIO access key and secret key - - name: MINIO_ACCESS_KEY - valueFrom: - secretKeyRef: - name: minio-creds - key: MINIO_ACCESS_KEY - - name: MINIO_SECRET_KEY - valueFrom: - secretKeyRef: - name: minio-creds - key: MINIO_SECRET_KEY - # AWS credentials - - name: AWS_ACCESS_KEY_ID - valueFrom: - secretKeyRef: - name: aws-creds - key: ACCESS_KEY_ID - - name: AWS_SECRET_ACCESS_KEY - valueFrom: - secretKeyRef: - name: aws-creds - key: ACCESS_SECRET_KEY -# Uncomment the below section if session token is required -# - name: AWS_SESSION_TOKEN -# valueFrom: -# secretKeyRef: -# name: aws-creds -# key: ACCESS_SESSION_TOKEN - ports: - - containerPort: 9000 -``` - -Proceed by configuring MinIO Gateway service as the `endpointURL` in the `Cluster` -definition, then choose a bucket name to replace `BUCKET_NAME`: - -```yaml -apiVersion: postgresql.cnpg.io/v1 -kind: Cluster -[...] -spec: - backup: - barmanObjectStore: - destinationPath: s3://BUCKET_NAME/ - endpointURL: http://minio-gateway-service:9000 - s3Credentials: - accessKeyId: - name: minio-creds - key: MINIO_ACCESS_KEY - secretAccessKey: - name: minio-creds - key: MINIO_SECRET_KEY - [...] -``` - -Verify on `s3://BUCKET_NAME/` the presence of archived WAL files before -proceeding with a backup. diff --git a/docs/src/backup.md b/docs/src/backup.md index 7a1196c7e9..99ee3f5e08 100644 --- a/docs/src/backup.md +++ b/docs/src/backup.md @@ -1,11 +1,42 @@ # Backup -!!! Warning - With the deprecation of native Barman Cloud support in CloudNativePG in - favor of the Barman Cloud Plugin, this page—and the backup and recovery - documentation—may undergo changes before the official release of version - 1.26.0. +!!! Info + This section covers **physical backups** in PostgreSQL. + While PostgreSQL also supports logical backups using the `pg_dump` utility, + these are **not suitable for business continuity** and are **not managed** by + CloudNativePG. If you still wish to use `pg_dump`, refer to the + [*Troubleshooting / Emergency backup* section](troubleshooting.md#emergency-backup) + for guidance. + +!!! Important + Starting with version 1.26, native backup and recovery capabilities are + being **progressively phased out** of the core operator and moved to official + CNPG-I plugins. This transition aligns with CloudNativePG's shift towards a + **backup-agnostic architecture**, enabled by its extensible + interface—**CNPG-I**—which standardizes the management of **WAL archiving**, + **physical base backups**, and corresponding **recovery processes**. + +CloudNativePG currently supports **physical backups of PostgreSQL clusters** in +two main ways: + +- **Via [CNPG-I](https://github.com/cloudnative-pg/cnpg-i/) plugins**: the + CloudNativePG Community officially supports the [**Barman Cloud Plugin**](https://cloudnative-pg.io/plugin-barman-cloud/) + for integration with object storage services. + +- **Natively**, with support for: + + - [Object storage via Barman Cloud](appendixes/backup_barmanobjectstore.md) + *(although deprecated from 1.26 in favor of the Barman Cloud Plugin)* + - [Kubernetes Volume Snapshots](appendixes/backup_volumesnapshot.md), if + supported by the underlying storage class + +Before selecting a backup strategy with CloudNativePG, it's important to +familiarize yourself with the foundational concepts covered in the ["Main Concepts"](#main-concepts) +section. These include WAL archiving, hot and cold backups, performing backups +from a standby, and more. + +## Main Concepts PostgreSQL natively provides first class backup and recovery capabilities based on file system level (physical) copy. These have been successfully used for @@ -13,14 +44,6 @@ more than 15 years in mission critical production databases, helping organizations all over the world achieve their disaster recovery goals with Postgres. -!!! Note - There's another way to backup databases in PostgreSQL, through the - `pg_dump` utility - which relies on logical backups instead of physical ones. - However, logical backups are not suitable for business continuity use cases - and as such are not covered by CloudNativePG (yet, at least). - If you want to use the `pg_dump` utility, let yourself be inspired by the - ["Troubleshooting / Emergency backup" section](troubleshooting.md#emergency-backup). - In CloudNativePG, the backup infrastructure for each PostgreSQL cluster is made up of the following resources: @@ -29,28 +52,11 @@ up of the following resources: - **Physical base backups**: a copy of all the files that PostgreSQL uses to store the data in the database (primarily the `PGDATA` and any tablespace) -The WAL archive can only be stored on object stores at the moment. - -On the other hand, CloudNativePG supports two ways to store physical base backups: - -- on [object stores](backup_barmanobjectstore.md), as tarballs - optionally - compressed: - - Using the Barman Cloud plugin - - Natively via `.spec.backup.barmanObjectStore` (*deprecated, to be removed in CloudNativePG 1.28*) -- on [Kubernetes Volume Snapshots](backup_volumesnapshot.md), if supported by - the underlying storage class - -!!! Important - Before choosing your backup strategy with CloudNativePG, it is important that - you take some time to familiarize with some basic concepts, like WAL archive, - hot and cold backups. - -!!! Important - Please refer to the official Kubernetes documentation for a list of all - the supported [Container Storage Interface (CSI) drivers](https://kubernetes-csi.github.io/docs/drivers.html) - that provide snapshotting capabilities. +CNPG-I provides a generic and extensible interface for managing WAL archiving +(both archive and restore operations), as well as the base backup and +corresponding restore processes. -## WAL archive +### WAL archive The WAL archive in PostgreSQL is at the heart of **continuous backup**, and it is fundamental for the following reasons: @@ -58,7 +64,7 @@ is fundamental for the following reasons: - **Hot backups**: the possibility to take physical base backups from any instance in the Postgres cluster (either primary or standby) without shutting down the server; they are also known as online backups -- **Point in Time recovery** (PITR): to possibility to recover at any point in +- **Point in Time recovery** (PITR): the possibility to recover at any point in time from the first available base backup in your system !!! Warning @@ -80,20 +86,20 @@ recovery, even across regions. !!! Important Our recommendation is to always setup the WAL archive in production. - There are known use cases - normally involving staging and development - environments - where none of the above benefits are needed and the WAL + There are known use cases — normally involving staging and development + environments — where none of the above benefits are needed and the WAL archive is not necessary. RPO in this case can be any value, such as 24 hours (daily backups) or infinite (no backup at all). -## Cold and Hot backups +### Cold and Hot backups Hot backups have already been defined in the previous section. They require the -presence of a WAL archive and they are the norm in any modern database management -system. +presence of a WAL archive, and they are the norm in any modern database +management system. **Cold backups**, also known as offline backups, are instead physical base backups taken when the PostgreSQL instance (standby or primary) is shut down. They are -consistent per definition and they represent a snapshot of the database at the +consistent per definition, and they represent a snapshot of the database at the time it was shut down. As a result, PostgreSQL instances can be restarted from a cold backup without @@ -105,78 +111,107 @@ In those situations with a higher RPO (for example, 1 hour or 24 hours), and shorter retention periods, cold backups represent a viable option to be considered for your disaster recovery plans. -## Object stores or volume snapshots: which one to use? +## Comparing Available Backup Options: Object Stores vs Volume Snapshots + +CloudNativePG currently supports two main approaches for physical backups: + +- **Object store–based backups**, via the [**Barman Cloud + Plugin**](https://cloudnative-pg.io/plugin-barman-cloud/) or the + [**deprecated native integration**](appendixes/backup_barmanobjectstore.md) +- [**Volume Snapshots**](appendixes/backup_volumesnapshot.md), using the + Kubernetes CSI interface and supported storage classes + +!!! Important + CNPG-I is designed to enable third parties to build and integrate their own + backup plugins. Over time, we expect the ecosystem of supported backup + solutions to grow. + +### Object Store–Based Backups + +Backups to an object store (e.g. AWS S3, Azure Blob, GCS): -In CloudNativePG, object store based backups: +- Always require WAL archiving +- Support hot backups only +- Do not support incremental or differential copies +- Support retention policies -- always require the WAL archive -- support hot backup only -- don't support incremental copy -- don't support differential copy +### Volume Snapshots -VolumeSnapshots instead: +Native volume snapshots: -- don't require the WAL archive, although in production it is always recommended -- support incremental copy, depending on the underlying storage classes -- support differential copy, depending on the underlying storage classes -- also support cold backup +- Do not require WAL archiving, though its use is still strongly + recommended in production +- Support incremental and differential copies, depending on the + capabilities of the underlying storage class +- Support both hot and cold backups +- Do not support retention policies -Which one to use depends on your specific requirements and environment, -including: +### Choosing Between the Two -- availability of a viable object store solution in your Kubernetes cluster -- availability of a trusted storage class that supports volume snapshots -- size of the database: with object stores, the larger your database, the - longer backup and, most importantly, recovery procedures take (the latter - impacts [RTO](before_you_start.md#rto)); in presence of Very Large Databases - (VLDB), the general advice is to rely on Volume Snapshots as, thanks to - copy-on-write, they provide faster recovery -- data mobility and possibility to store or relay backup files on a - secondary location in a different region, or any subsequent one -- other factors, mostly based on the confidence and familiarity with the - underlying storage solutions +The best approach depends on your environment and operational requirements. +Consider the following factors: -The summary table below highlights some of the main differences between the two -available methods for storing physical base backups. +- **Object store availability**: Ensure your Kubernetes cluster can access a + reliable object storage solution, including a stable networking layer. +- **Storage class capabilities**: Confirm that your storage class supports CSI + volume snapshots with incremental/differential features. +- **Database size**: For very large databases (VLDBs), **volume snapshots are + generally preferred** as they enable faster recovery due to copy-on-write + technology—this significantly improves your + [Recovery Time Objective (RTO)](before_you_start.md#rto). +- **Data mobility**: Object store–based backups may offer greater flexibility + for replicating or storing backups across regions or environments. +- **Operational familiarity**: Choose the method that aligns best with your + team's experience and confidence in managing storage. -| | Object store | Volume Snapshots | +### Comparison Summary + +| Feature | Object Store | Volume Snapshots | |-----------------------------------|:------------:|:--------------------:| -| **WAL archiving** | Required | Recommended (1) | -| **Cold backup** | ✗ | ✓ | -| **Hot backup** | ✓ | ✓ | -| **Incremental copy** | ✗ | ✓ (2) | -| **Differential copy** | ✗ | ✓ (2) | -| **Backup from a standby** | ✓ | ✓ | -| **Snapshot recovery** | ✗ (3) | ✓ | -| **Point In Time Recovery (PITR)** | ✓ | Requires WAL archive | -| **Underlying technology** | Barman Cloud | Kubernetes API | - - -> See the explanation below for the notes in the above table: +| **WAL archiving** | Required | Recommended^1^ | +| **Cold backup** | ❌ | ✅ | +| **Hot backup** | ✅ | ✅ | +| **Incremental copy** | ❌ | ✅^2^ | +| **Differential copy** | ❌ | ✅^2^ | +| **Backup from a standby** | ✅ | ✅ | +| **Snapshot recovery** | ❌^3^ | ✅ | +| **Retention policies** | ✅ | ❌ | +| **Point-in-Time Recovery (PITR)** | ✅ | Requires WAL archive | +| **Underlying technology** | Barman Cloud | Kubernetes API | + +--- + +> **Notes:** > -> 1. WAL archive must be on an object store at the moment -> 2. If supported by the underlying storage classes of the PostgreSQL volumes -> 3. Snapshot recovery can be emulated using the -> `bootstrap.recovery.recoveryTarget.targetImmediate` option +> 1. WAL archiving must currently use an object store through a plugin (or the +> deprecated native one). +> 2. Availability of incremental and differential copies depends on the +> capabilities of the storage class used for PostgreSQL volumes. +> 3. Snapshot recovery can be emulated by using the +> `bootstrap.recovery.recoveryTarget.targetImmediate` option. -## Scheduled backups +## Scheduled Backups -Scheduled backups are the recommended way to configure your backup strategy in -CloudNativePG. They are managed by the `ScheduledBackup` resource. +Scheduled backups are the recommended way to implement a reliable backup +strategy in CloudNativePG. They are defined using the `ScheduledBackup` custom +resource. !!! Info - Please refer to [`ScheduledBackupSpec`](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-ScheduledBackupSpec) - in the API reference for a full list of options. + For a complete list of configuration options, refer to the + [`ScheduledBackupSpec`](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-ScheduledBackupSpec) + in the API reference. + +### Cron Schedule -The `schedule` field allows you to define a *six-term cron schedule* specification, -which includes seconds, as expressed in -the [Go `cron` package format](https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format). +The `schedule` field defines **when** the backup should occur, using a +*six-field cron expression* that includes seconds. This format follows the +[Go `cron` package specification](https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format). !!! Warning - Beware that this format accepts also the `seconds` field, and it is - different from the `crontab` format in Unix/Linux systems. + This format differs from the traditional Unix/Linux `crontab`—it includes a + **seconds** field as the first entry. -This is an example of a scheduled backup: +Example of a daily scheduled backup: ```yaml apiVersion: postgresql.cnpg.io/v1 @@ -184,62 +219,73 @@ kind: ScheduledBackup metadata: name: backup-example spec: - schedule: "0 0 0 * * *" + schedule: "0 0 0 * * *" # At midnight every day backupOwnerReference: self cluster: name: pg-backup + # method: plugin, volumeSnapshot, or barmanObjectStore (default) ``` -The above example will schedule a backup every day at midnight because the schedule -specifies zero for the second, minute, and hour, while specifying wildcard, meaning all, -for day of the month, month, and day of the week. +The schedule `"0 0 0 * * *"` triggers a backup every day at midnight +(00:00:00). In Kubernetes CronJobs, the equivalent expression would be `0 0 * * *`, +since seconds are not supported. -In Kubernetes CronJobs, the equivalent expression is `0 0 * * *` because seconds -are not included. +### Backup Frequency and RTO !!! Hint - Backup frequency might impact your recovery time objective ([RTO](before_you_start.md#rto)) after a - disaster which requires a full or Point-In-Time recovery operation. Our - advice is that you regularly test your backups by recovering them, and then - measuring the time it takes to recover from scratch so that you can refine - your RTO predictability. Recovery time is influenced by the size of the - base backup and the amount of WAL files that need to be fetched from the archive - and replayed during recovery (remember that WAL archiving is what enables - continuous backup in PostgreSQL!). - Based on our experience, a weekly base backup is more than enough for most - cases - while it is extremely rare to schedule backups more frequently than once - a day. - -You can choose whether to schedule a backup on a defined object store or a -volume snapshot via the `.spec.method` attribute, by default set to -`barmanObjectStore`. If you have properly defined -[volume snapshots](backup_volumesnapshot.md#how-to-configure-volume-snapshot-backups) -in the `backup` stanza of the cluster, you can set `method: volumeSnapshot` -to start scheduling base backups on volume snapshots. - -ScheduledBackups can be suspended, if needed, by setting `.spec.suspend: true`. -This will stop any new backup from being scheduled until the option is removed -or set back to `false`. - -In case you want to issue a backup as soon as the ScheduledBackup resource is created -you can set `.spec.immediate: true`. + The frequency of your backups directly impacts your **Recovery Time Objective** + ([RTO](before_you_start.md#rto)). -!!! Note - `.spec.backupOwnerReference` indicates which ownerReference should be put inside - the created backup resources. +To optimize your disaster recovery strategy based on continuous backup: + +- Regularly test restoring from your backups. +- Measure the time required for a full recovery. +- Account for the size of base backups and the number of WAL files that must be + retrieved and replayed. + +In most cases, a **weekly base backup** is sufficient. It is rare to schedule +full backups more frequently than once per day. + +### Immediate Backup + +To trigger a backup immediately when the `ScheduledBackup` is created: + +```yaml +spec: + immediate: true +``` + +### Pause Scheduled Backups + +To temporarily stop scheduled backups from running: + +```yaml +spec: + suspend: true +``` + +### Backup Owner Reference (`.spec.backupOwnerReference`) - - *none:* no owner reference for created backup objects (same behavior as before the field was introduced) - - *self:* sets the Scheduled backup object as owner of the backup - - *cluster:* set the cluster as owner of the backup +Controls which Kubernetes object is set as the owner of the backup resource: -## On-demand backups +- `none`: No owner reference (legacy behavior) +- `self`: The `ScheduledBackup` object becomes the owner +- `cluster`: The PostgreSQL cluster becomes the owner + +## On-Demand Backups + +On-demand backups allow you to manually trigger a backup operation at any time +by creating a `Backup` resource. !!! Info - Please refer to [`BackupSpec`](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-BackupSpec) - in the API reference for a full list of options. + For a full list of available options, see the + [`BackupSpec`](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-BackupSpec) in the + API reference. + +### Example: Requesting an On-Demand Backup -To request a new backup, you need to create a new `Backup` resource -like the following one: +To start an on-demand backup, apply a `Backup` request custom resource like the +following: ```yaml apiVersion: postgresql.cnpg.io/v1 @@ -252,20 +298,23 @@ spec: name: pg-backup ``` -In this case, the operator will start to orchestrate the cluster to take the -required backup on an object store, using `barman-cloud-backup`. You can check -the backup status using the plain `kubectl describe backup ` command: +In this example, the operator will orchestrate the backup process using the +`barman-cloud-backup` tool and store the backup in the configured object store. + +### Monitoring Backup Progress + +You can check the status of the backup using: + +```bash +kubectl describe backup backup-example +``` + +While the backup is in progress, you'll see output similar to: ```text Name: backup-example Namespace: default -Labels: -Annotations: API Version: postgresql.cnpg.io/v1 -Kind: Backup -Metadata: - Creation Timestamp: 2020-10-26T13:57:40Z - Self Link: /apis/postgresql.cnpg.io/v1/namespaces/default/backups/backup-example - UID: ad5f855c-2ffd-454a-a157-900d5f1f6584 +... Spec: Cluster: Name: pg-backup @@ -275,59 +324,95 @@ Status: Events: ``` -When the backup has been completed, the phase will be `completed` -like in the following example: +Once the backup has successfully completed, the `phase` will be set to +`completed`, and the output will include additional metadata: ```text Name: backup-example Namespace: default -Labels: -Annotations: API Version: postgresql.cnpg.io/v1 -Kind: Backup -Metadata: - Creation Timestamp: 2020-10-26T13:57:40Z - Self Link: /apis/postgresql.cnpg.io/v1/namespaces/default/backups/backup-example - UID: ad5f855c-2ffd-454a-a157-900d5f1f6584 -Spec: - Cluster: - Name: pg-backup +... Status: Backup Id: 20201026T135740 Destination Path: s3://backups/ Endpoint URL: http://minio:9000 Phase: completed - s3Credentials: + S3 Credentials: Access Key Id: - Key: ACCESS_KEY_ID Name: minio + Key: ACCESS_KEY_ID Secret Access Key: - Key: ACCESS_SECRET_KEY - Name: minio - Server Name: pg-backup - Started At: 2020-10-26T13:57:40Z - Stopped At: 2020-10-26T13:57:44Z -Events: + Name: minio + Key: ACCESS_SECRET_KEY + Server Name: pg-backup + Started At: 2020-10-26T13:57:40Z + Stopped At: 2020-10-26T13:57:44Z ``` -!!!Important - This feature will not backup the secrets for the superuser and the - application user. The secrets are supposed to be backed up as part of - the standard backup procedures for the Kubernetes cluster. +--- -## Backup from a standby +!!! Important + On-demand backups do **not** include Kubernetes secrets for the PostgreSQL + superuser or application user. You should ensure these secrets are included in + your broader Kubernetes cluster backup strategy. - -Taking a base backup requires to scrape the whole data content of the -PostgreSQL instance on disk, possibly resulting in I/O contention with the -actual workload of the database. +## Backup Methods -For this reason, CloudNativePG allows you to take advantage of a -feature which is directly available in PostgreSQL: **backup from a standby**. +CloudNativePG currently supports the following backup methods for scheduled +and on-demand backups: -By default, backups will run on the most aligned replica of a `Cluster`. If -no replicas are available, backups will run on the primary instance. +- `plugin` – Uses a CNPG-I plugin (requires `.spec.pluginConfiguration`) +- `volumeSnapshot` – Uses native [Kubernetes volume snapshots](appendixes/backup_volumesnapshot.md#how-to-configure-volume-snapshot-backups) +- `barmanObjectStore` – Uses [Barman Cloud for object storage](appendixes/backup_barmanobjectstore.md) + *(deprecated starting with v1.26 in favor of the + [Barman Cloud Plugin](https://cloudnative-pg.io/plugin-barman-cloud/), + but still the default for backward compatibility)* -!!! Info +Specify the method using the `.spec.method` field (defaults to +`barmanObjectStore`). + +If your cluster is configured to support volume snapshots, you can enable +scheduled snapshot backups like this: + +```yaml +spec: + method: volumeSnapshot +``` + +To use the Barman Cloud Plugin as the backup method, set `method: plugin` and +configure the plugin accordingly. You can find an example in the +["Performing a Base Backup" section of the plugin documentation](https://cloudnative-pg.io/plugin-barman-cloud/docs/usage/#performing-a-base-backup) + +## Backup from a Standby + +Taking a base backup involves reading the entire on-disk data set of a +PostgreSQL instance, which can introduce I/O contention and impact the +performance of the active workload. + +To reduce this impact, **CloudNativePG supports taking backups from a standby +instance**, leveraging PostgreSQL’s built-in capability to perform backups from +read-only replicas. + +By default, backups are performed on the **most up-to-date replica** in the +cluster. If no replicas are available, the backup will fall back to the +**primary instance**. + +!!! Note + The examples in this section are focused on backup target selection and do not + take the backup method (`spec.method`) into account, as it is not relevant to + the scope being discussed. + +### How It Works + +When `prefer-standby` is the target (the default behavior), CloudNativePG will +attempt to: + +1. Identify the most synchronized standby node. +2. Run the backup process on that standby. +3. Fall back to the primary if no standbys are available. + +This strategy minimizes interference with the primary’s workload. + +!!! Warning Although the standby might not always be up to date with the primary, in the time continuum from the first available backup to the last archived WAL this is normally irrelevant. The base backup indeed @@ -338,8 +423,10 @@ no replicas are available, backups will run on the primary instance. primary. This might produce unexpected results in the short term (before `archive_timeout` kicks in) in deployments with low write activity. -If you prefer to always run backups on the primary, you can set the backup -target to `primary` as outlined in the example below: +### Forcing Backup on the Primary + +To always run backups on the primary instance, explicitly set the backup target +to `primary` in the cluster configuration: ```yaml apiVersion: postgresql.cnpg.io/v1 @@ -352,21 +439,16 @@ spec: ``` !!! Warning - Beware of setting the target to primary when performing a cold backup - with volume snapshots, as this will shut down the primary for - the time needed to take the snapshot, impacting write operations. - This also applies to taking a cold backup in a single-instance cluster, even - if you did not explicitly set the primary as the target. + Be cautious when using `primary` as the target for **cold backups using + volume snapshots**, as this will require shutting down the primary instance + temporarily—interrupting all write operations. The same caution applies to + single-instance clusters, even if you haven't explicitly set the target. -When the backup target is set to `prefer-standby`, such policy will ensure -backups are run on the most up-to-date available secondary instance, or if no -other instance is available, on the primary instance. +### Overriding the Cluster-Wide Target -By default, when not otherwise specified, target is automatically set to take -backups from a standby. - -The backup target specified in the `Cluster` can be overridden in the `Backup` -and `ScheduledBackup` types, like in the following example: +You can override the cluster-level target on a per-backup basis, using either +`Backup` or `ScheduledBackup` resources. Here's an example of an on-demand +backup: ```yaml apiVersion: postgresql.cnpg.io/v1 @@ -379,5 +461,24 @@ spec: target: "primary" ``` -In the previous example, CloudNativePG will invariably choose the primary -instance even if the `Cluster` is set to prefer replicas. +In this example, even if the cluster’s default target is `prefer-standby`, the +backup will be taken from the primary instance. + +## Retention Policies + +CloudNativePG is evolving toward a **backup-agnostic architecture**, where +backup responsibilities are delegated to external **CNPG-I plugins**. These +plugins are expected to offer advanced and customizable data protection +features, including sophisticated retention management, that go beyond the +built-in capabilities and scope of CloudNativePG. + +As part of this transition, the `spec.backup.retentionPolicy` field in the +`Cluster` resource is **deprecated** and will be removed in a future release. + +For more details on available retention features, refer to your chosen plugin’s documentation. +For example: ["Retention Policies" with Barman Cloud Plugin](https://cloudnative-pg.io/plugin-barman-cloud/docs/retention/). + +!!! Important + Users are encouraged to rely on the retention mechanisms provided by the + backup plugin they are using. This ensures better flexibility and consistency + with the backup method in use. diff --git a/docs/src/bootstrap.md b/docs/src/bootstrap.md index ded39278c9..f074ced936 100644 --- a/docs/src/bootstrap.md +++ b/docs/src/bootstrap.md @@ -441,9 +441,9 @@ by `name` (our recommendation is to use the same `name` of the origin cluster). By default the `recovery` method strictly uses the `name` of the cluster in the `externalClusters` section to locate the main folder of the backup data within the object store, which is normally reserved - for the name of the server. You can specify a different one with the - `barmanObjectStore.serverName` property (by default assigned to the - value of `name` in the external cluster definition). + for the name of the server. Backup plugins provide ways to specify a + different one. For example, the Barman Cloud Plugin provides the [`serverName` parameter](https://cloudnative-pg.io/plugin-barman-cloud/docs/parameters/) + (by default assigned to the value of `name` in the external cluster definition). ### Bootstrap from a backup (`recovery`) diff --git a/docs/src/index.md b/docs/src/index.md index c1f8ef984a..540274a100 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -110,14 +110,22 @@ Additionally, the community provides images for the [PostGIS extension](postgis. - Support for Local Persistent Volumes with PVC templates. - Reuse of Persistent Volumes storage in Pods. - Separate volumes for WAL files and tablespaces. -- Backup and recovery options, including: - - Integration with the [Barman Cloud plugin](https://github.com/cloudnative-pg/plugin-barman-cloud) - for continuous online backup via WAL archiving to AWS S3, S3-compatible - services, Azure Blob Storage, and Google Cloud Storage, with support for - retention policies based on a configurable recovery window. - - Backups using volume snapshots (where supported by storage classes). - - Full and Point-In-Time recovery from volume snapshots or object stores (via Barman Cloud plugin). - - Backup from standby replicas to reduce primary workload impact. +- Backup and Recovery via CNPG-I Plugins: + - Pluggable architecture for continuous physical backup and recovery. + - Hot and cold base backups. + - WAL archiving. + - Full and Point-In-Time Recovery (PITR). + - Scheduled and on-demand backups. + - Backup from standbys to reduce primary load. +- Community-Supported Barman Cloud Plugin: + - WAL archiving to object stores with support for full/PITR recovery. + - Retention policies based on configurable recovery windows. + - Supported as a CNPG-I plugin (recommended approach). +- Native Backup Methods: + - Continuous backup and full/PITR recovery via volume snapshots (if + supported by the storage class). + - Native integration with Barman Cloud for object store backups via + `.spec.backup.barmanObjectStore` (*deprecated since v1.26*). - Offline in-place major upgrades of PostgreSQL - Offline and online import of PostgreSQL databases, including major upgrades: - *Offline Import*: Direct restore from existing databases. diff --git a/docs/src/operator_capability_levels.md b/docs/src/operator_capability_levels.md index b7eeb208dd..b69204c882 100644 --- a/docs/src/operator_capability_levels.md +++ b/docs/src/operator_capability_levels.md @@ -379,39 +379,37 @@ in the archive. In addition, `Instance Manager` checks the correctness of the archive destination by performing the `barman-cloud-check-wal-archive` command before beginning to ship the first set of WAL files. -### PostgreSQL backups +### PostgreSQL Backups -The operator was designed to provide application-level backups using -PostgreSQL’s native continuous hot backup technology based on -physical base backups and continuous WAL archiving. -Base backups can be saved on: +CloudNativePG provides a pluggable interface (CNPG-I) for managing +application-level backups using PostgreSQL’s native physical backup +mechanisms—namely base backups and continuous WAL archiving. This +design enables flexibility and extensibility while ensuring consistency and +performance. -- Kubernetes volume snapshots -- Object stores (AWS S3 and S3-compatible, Azure Blob Storage, Google Cloud - Storage, and gateways like MinIO) +The CloudNativePG Community officially supports the [Barman Cloud Plugin](https://cloudnative-pg.io/plugin-barman-cloud/), +which enables continuous physical backups to object stores, along with full and +Point-In-Time Recovery (PITR) capabilities. -Base backups are defined at the cluster level, declaratively, -through the `backup` parameter in the cluster definition. +In addition to CNPG-I plugins, CloudNativePG also natively supports backups +using Kubernetes volume snapshots, when supported by the underlying storage +class and CSI driver. -You can define base backups in two ways: +You can initiate base backups in two ways: -- On-demand, through the `Backup` custom resource definition -- Scheduled, through the `ScheduledBackup`custom resource definition, using a cron-like syntax +- On-demand, using the `Backup` custom resource +- Scheduled, using the `ScheduledBackup` custom resource, with a cron-like + schedule format -Volume snapshots rely directly on the Kubernetes API, which delegates this -capability to the underlying storage classes and CSI drivers. Volume snapshot -backups are suitable for very large database (VLDB) contexts. +Volume snapshots leverage the Kubernetes API and are particularly effective for +very large databases (VLDBs) due to their speed and storage efficiency. -Object store backups rely on `barman-cloud-backup` for the job (distributed as -part of the application container image) to relay backups in the same endpoint, -alongside WAL files. +Both volume snapshots and CNPG-I-based backups support: -Both `barman-cloud-wal-restore` and `barman-cloud-backup` are distributed in -the application container image under GNU GPL 3 terms. - -Object store backups and volume snapshot backups are taken while PostgreSQL is -up and running (hot backups). Volume snapshots also support taking consistent -database snapshots with cold backups. +- Hot backups: Taken while PostgreSQL is running, ensuring minimal + disruption. +- Cold backups: Performed by temporarily stopping PostgreSQL to ensure a + fully consistent snapshot, when required. ### Backups from a standby @@ -423,8 +421,8 @@ operations. ### Full restore from a backup The operator enables you to bootstrap a new cluster (with its settings) -starting from an existing and accessible backup, either on a volume snapshot -or in an object store. +starting from an existing and accessible backup, either on a volume snapshot, +or in an object store, or via a plugin. Once the bootstrap process is completed, the operator initiates the instance in recovery mode. It replays all available WAL files from the specified archive, diff --git a/docs/src/recovery.md b/docs/src/recovery.md index 9f915aab31..5aba0eecc0 100644 --- a/docs/src/recovery.md +++ b/docs/src/recovery.md @@ -1,73 +1,86 @@ # Recovery -!!! Warning - With the deprecation of native Barman Cloud support in CloudNativePG in - favor of the Barman Cloud Plugin, this page—and the backup and recovery - documentation—may undergo changes before the official release of version - 1.26.0. - -In PostgreSQL terminology, recovery is the process of starting a PostgreSQL -instance using an existing backup. The PostgreSQL recovery mechanism -is very solid and rich. It also supports point-in-time recovery (PITR), which allows -you to restore a given cluster up to any point in time, from the first available -backup in your catalog to the last archived WAL. (The WAL -archive is mandatory in this case.) - -In CloudNativePG, you can't perform recovery in place on an existing -cluster. Recovery is instead a way to bootstrap a new Postgres cluster -starting from an available physical backup. +In PostgreSQL, **recovery** refers to the process of starting an instance from +an existing physical backup. PostgreSQL's recovery system is robust and +feature-rich, supporting **Point-In-Time Recovery (PITR)**—the ability to +restore a cluster to any specific moment, from the earliest available backup to +the latest archived WAL file. + +!!! Important + A valid WAL archive is required to perform PITR. + +In CloudNativePG, recovery is **not performed in-place** on an existing +cluster. Instead, it is used to **bootstrap a new cluster** from a physical +backup. !!! Note - For details on the `bootstrap` stanza, see + For more details on configuring the `bootstrap` stanza, refer to [Bootstrap](bootstrap.md). -The `recovery` bootstrap mode lets you create a cluster from an existing -physical base backup. You then reapply the WAL files containing the REDO log -from the archive. +The `recovery` bootstrap mode allows you to initialize a cluster from a +physical base backup and replay the associated WAL files to bring the system to +a consistent and optionally point-in-time state. -WAL files are pulled from the defined *recovery object store*. +CloudNativePG supports recovery via: -Base backups can be taken either on object stores or using volume snapshots. +- A **pluggable backup and recovery interface (CNPG-I)**, enabling integration + with external tools such as the [Barman Cloud Plugin](https://cloudnative-pg.io/plugin-barman-cloud/). +- **Native recovery from volume snapshots**, where supported by the underlying + Kubernetes storage infrastructure. +- **Native recovery from object stores via Barman Cloud**, which is + **deprecated** as of version 1.26 in favor of the plugin-based approach. -!!! Info - Starting with version 1.25, CloudNativePG includes experimental support for - backup and recovery using plugins, such as the - [Barman Cloud plugin](https://github.com/cloudnative-pg/plugin-barman-cloud). +With the deprecation of native Barman Cloud support in version 1.26, this +section now focuses on two supported recovery methods: using the **Barman Cloud +Plugin** for recovery from object stores, and the **native interface** for +recovery from volume snapshots. -You can achieve recovery from a *recovery object store* in two ways: +!!! Important + For legacy documentation, see + [Appendix B – Recovery from an Object Store](appendixes/backup_barmanobjectstore.md#recovery-from-an-object-store). -- We recommend using a recovery object store, that is, a backup of another cluster - created by Barman Cloud and defined by way of the `barmanObjectStore` option - in the `externalClusters` section. -- Alternatively, you can use an existing `Backup` object in the same namespace. +## Recovery from an Object Store with the Barman Cloud Plugin -Both recovery methods enable either full recovery (up to the last -available WAL) or up to a [point in time](#point-in-time-recovery-pitr). -When performing a full recovery, you can also start the cluster -in replica mode (see [replica clusters](replica_cluster.md) for reference). +This section outlines how to recover a PostgreSQL cluster from an object store +using the recommended Barman Cloud Plugin. !!! Important - If using replica mode, make sure that the PostgreSQL configuration - (`.spec.postgresql.parameters`) of the recovered cluster is compatible with - the original one from a physical replication standpoint. - -For recovery using *volume snapshots*: + The object store must contain backup data produced by a CloudNativePG + `Cluster`—either using the **deprecated native Barman Cloud integration** or + the **Barman Cloud Plugin**. -- Use a consistent set of `VolumeSnapshot` objects that all belong to the - same backup and are identified by the same `cnpg.io/cluster` and - `cnpg.io/backupName` labels. Then, recover through the `volumeSnapshots` - option in the `.spec.bootstrap.recovery` stanza, as described in - [Recovery from `VolumeSnapshot` objects](#recovery-from-volumesnapshot-objects). +!!! Info + For full details, refer to the + [“Recovery of a Postgres Cluster” section in the Barman Cloud Plugin documentation](https://cloudnative-pg.io/plugin-barman-cloud/docs/concepts/#recovery-of-a-postgres-cluster). -## Recovery from an object store +Begin by defining the object store that holds both your base backups and WAL +files. The Barman Cloud Plugin uses a custom `ObjectStore` resource for this +purpose. The following example shows how to configure one for Azure Blob +Storage: -You can recover from a backup created by Barman Cloud and stored on a supported -object store. After you define the external cluster, including all the required -configuration in the `barmanObjectStore` section, you need to reference it in -the `.spec.recovery.source` option. +```yaml +apiVersion: barmancloud.cnpg.io/v1 +kind: ObjectStore +metadata: + name: cluster-example-backup +spec: + configuration: + destinationPath: https://STORAGEACCOUNTNAME.blob.core.windows.net/CONTAINERNAME/ + azureCredentials: + storageAccount: + name: recovery-object-store-secret + key: storage_account_name + storageKey: + name: recovery-object-store-secret + key: storage_account_key + wal: + maxParallel: 8 +``` -This example defines a recovery object store in a blob container in Azure: +Next, configure the `Cluster` resource to use the `ObjectStore` you defined. In +the `bootstrap` section, specify the recovery source, and define an +`externalCluster` entry that references the plugin: ```yaml apiVersion: postgresql.cnpg.io/v1 @@ -82,55 +95,39 @@ spec: bootstrap: recovery: - source: clusterBackup + source: origin externalClusters: - - name: clusterBackup - barmanObjectStore: - destinationPath: https://STORAGEACCOUNTNAME.blob.core.windows.net/CONTAINERNAME/ - azureCredentials: - storageAccount: - name: recovery-object-store-secret - key: storage_account_name - storageKey: - name: recovery-object-store-secret - key: storage_account_key - wal: - maxParallel: 8 + - name: origin + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: cluster-example-backup + serverName: cluster-example ``` -The previous example assumes that the application database and its owning user -are named `app` by default. If the PostgreSQL cluster being restored uses -different names, you must specify these names before exiting the recovery phase, -as documented in ["Configure the application database"](#configure-the-application-database). - -!!! Important - By default, the `recovery` method strictly uses the `name` of the - cluster in the `externalClusters` section as the name of the main folder - of the backup data within the object store. This name is normally reserved - for the name of the server. You can specify a different folder name - using the `barmanObjectStore.serverName` property. - -!!! Note - This example takes advantage of the parallel WAL restore feature, - dedicating up to 8 jobs to concurrently fetch the required WAL files from the - archive. This feature can appreciably reduce the recovery time. Make sure that - you plan ahead for this scenario and correctly tune the value of this parameter - for your environment. It will make a difference when you need it, and you will. - -## Recovery from `VolumeSnapshot` objects +## Recovery from `VolumeSnapshot` Objects !!! Warning - When creating replicas after recovering the primary instance from - the volume snapshot, the operator might end up using `pg_basebackup` - to synchronize them. This behavior results in a slower process, depending - on the size of the database. This limitation will be lifted in the future when - support for online backups and PVC cloning are introduced. - -CloudNativePG can create a new cluster from a `VolumeSnapshot` of a PVC of an -existing `Cluster` that's been taken using the declarative API for [volume -snapshot backups](backup_volumesnapshot.md). You must specify the name of the -snapshot, as in the following example: + When creating replicas after recovering a primary instance from a + `VolumeSnapshot`, the operator may fall back to using `pg_basebackup` to + synchronize them. This process can be significantly slower—especially for large + databases—because it involves a full base backup. This limitation will be + addressed in the future with support for online backups and PVC cloning in + the scale-up process. + +CloudNativePG allows you to create a new cluster from a `VolumeSnapshot` of a +`PersistentVolumeClaim` (PVC) that belongs to an existing `Cluster`. +These snapshots are created using the declarative API for +[volume snapshot backups](appendixes/backup_volumesnapshot.md). + +To complete the recovery process, the new cluster must also reference an +external cluster that provides access to the WAL archive needed to reapply +changes and finalize the recovery. + +The following example shows a cluster being recovered using both a +`VolumeSnapshot` for the base backup and a WAL archive accessed through the +Barman Cloud Plugin: ```yaml apiVersion: postgresql.cnpg.io/v1 @@ -142,11 +139,20 @@ spec: bootstrap: recovery: + source: origin volumeSnapshots: storage: name: kind: VolumeSnapshot apiGroup: snapshot.storage.k8s.io + + externalClusters: + - name: origin + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: cluster-example-backup + serverName: cluster-example ``` In case the backed-up cluster was using a separate PVC to store the WAL files, @@ -260,9 +266,9 @@ feature to work if you specify a recovery target. ### PITR from an object store -This example uses a recovery object store in Azure that contains both -the base backups and the WAL archive. The recovery target is based on a -requested timestamp. +This example uses the same recovery object store in Azure defined earlier for +the Barman Cloud plugin, containing both the base backups and the WAL archive. +The recovery target is based on a requested timestamp. ```yaml apiVersion: postgresql.cnpg.io/v1 @@ -278,24 +284,18 @@ spec: bootstrap: recovery: # Recovery object store containing WAL archive and base backups - source: clusterBackup + source: origin recoveryTarget: # Time base target for the recovery targetTime: "2023-08-11 11:14:21.00000+02" externalClusters: - - name: clusterBackup - barmanObjectStore: - destinationPath: https://STORAGEACCOUNTNAME.blob.core.windows.net/CONTAINERNAME/ - azureCredentials: - storageAccount: - name: recovery-object-store-secret - key: storage_account_name - storageKey: - name: recovery-object-store-secret - key: storage_account_key - wal: - maxParallel: 8 + - name: origin + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: cluster-example-backup + serverName: cluster-example ``` In this example, you had to specify only the `targetTime` in the form of a @@ -320,17 +320,20 @@ the recovery as follows: - Otherwise, the operator selects the last available backup, in chronological order. -### PITR from `VolumeSnapshot` objects +### Point-in-Time Recovery (PITR) from `VolumeSnapshot` Objects -The example that follows uses: +The following example demonstrates how to perform a **Point-in-Time Recovery (PITR)** using: -- A Kubernetes volume snapshot for the `PGDATA` containing the base backup from - which to start the recovery process. This snapshot is identified in the - `recovery.volumeSnapshots` section and called `test-snapshot-1`. -- A recovery object store in MinIO containing the WAL archive. The object store is identified by - the `recovery.source` option in the form of an external cluster definition. +- A Kubernetes `VolumeSnapshot` of the `PGDATA` directory, which provides the + base backup. This snapshot is specified in the `recovery.volumeSnapshots` + section and is named `test-snapshot-1`. +- A recovery object store (in this case, MinIO) containing the archived WAL + files. The object store is defined via a Barman Cloud Plugin `ObjectStore` + resource (not shown here), and referenced using the `recovery.source` field, + which points to an external cluster configuration. -The recovery target is based on a requested timestamp. +The cluster will be restored to a specific point in time using the +`recoveryTarget.targetTime` option. ```yaml apiVersion: postgresql.cnpg.io/v1 @@ -341,7 +344,7 @@ spec: # ... bootstrap: recovery: - source: cluster-example-with-backup + source: origin volumeSnapshots: storage: name: test-snapshot-1 @@ -350,19 +353,18 @@ spec: recoveryTarget: targetTime: "2023-07-06T08:00:39" externalClusters: - - name: cluster-example-with-backup - barmanObjectStore: - destinationPath: s3://backups/ - endpointURL: http://minio:9000 - s3Credentials: - accessKeyId: - name: minio - key: ACCESS_KEY_ID - secretAccessKey: - name: minio - key: ACCESS_SECRET_KEY + - name: origin + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: minio-backup + serverName: cluster-example ``` +This setup enables CloudNativePG to restore the base data from a volume +snapshot and apply WAL segments from the object store to reach the desired +recovery target. + !!! Note If the backed-up cluster had `walStorage` enabled, you also must specify the volume snapshot containing the `PGWAL` directory, as mentioned in @@ -381,6 +383,11 @@ targetTime [RFC 3339](https://datatracker.ietf.org/doc/html/rfc3339) format. (The precise stopping point is also influenced by the `exclusive` option.) +!!! Warning + PostgreSQL recovery will stop when it encounters the first transaction that + occurs after the specified time. If no such transaction exists after the + target time, the recovery process will fail. + targetXID : Transaction ID up to which recovery proceeds. (The precise stopping point is also influenced by the `exclusive` option.) @@ -416,7 +423,7 @@ kind: Cluster [...] bootstrap: recovery: - source: clusterBackup + source: origin recoveryTarget: backupID: 20220616T142236 targetName: 'restore_point_1' @@ -451,25 +458,19 @@ spec: bootstrap: recovery: - source: clusterBackup + source: origin recoveryTarget: backupID: 20220616T142236 targetName: "maintenance-activity" exclusive: true externalClusters: - - name: clusterBackup - barmanObjectStore: - destinationPath: https://STORAGEACCOUNTNAME.blob.core.windows.net/CONTAINERNAME/ - azureCredentials: - storageAccount: - name: recovery-object-store-secret - key: storage_account_name - storageKey: - name: recovery-object-store-secret - key: storage_account_key - wal: - maxParallel: 8 + - name: origin + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: cluster-example-backup + serverName: cluster-example ``` ## Configure the application database @@ -551,10 +552,10 @@ When the base backup recovery process is complete, the operator starts the Postgres instance in recovery mode. In this phase, PostgreSQL is up, though not able to accept connections, and the pod is healthy according to the liveness probe. By way of the `restore_command`, PostgreSQL starts fetching WAL -files from the archive. (You can speed up this phase by setting the -`maxParallel` option and enabling the parallel WAL restore capability.) +files from the archive. You can speed up this phase by setting the +`maxParallel` option and enabling the parallel WAL restore capability. -This phase terminates when PostgreSQL reaches the target (either the end of the +This phase terminates when PostgreSQL reaches the target, either the end of the WAL or the required target in case of PITR. You can optionally specify a `recoveryTarget` to perform a PITR. If left unspecified, the recovery continues up to the latest available WAL on the default target timeline (`latest`). @@ -566,57 +567,28 @@ remaining instances join the cluster as replicas. The process is transparent for the user and is managed by the instance manager running in the pods. -## Restoring into a cluster with a backup section - - - -A manifest for a cluster restore might include a `backup` section. This means -that,after recovery, the new cluster starts archiving WALs and taking backups -if configured to do so. - -For example, this section is part of a manifest for a cluster bootstrapping -from the cluster `cluster-example-backup`. In the storage bucket, it creates a -folder named `recoveredCluster`, where the base backups and WALs of the -recovered cluster are stored. - -``` yaml - backup: - barmanObjectStore: - destinationPath: s3://backups/ - endpointURL: http://minio:9000 - serverName: "recoveredCluster" - s3Credentials: - accessKeyId: - name: minio - key: ACCESS_KEY_ID - secretAccessKey: - name: minio - key: ACCESS_SECRET_KEY - retentionPolicy: "30d" +## Restoring into a Cluster with a Backup Section - externalClusters: - - name: cluster-example-backup - barmanObjectStore: - destinationPath: s3://backups/ - endpointURL: http://minio:9000 - s3Credentials: -``` +When restoring a cluster, the manifest may include a `plugins` section with +Barman Cloud plugin pointing to a *backup* object store resource. This enables +the newly created cluster to begin archiving WAL files and taking backups +immediately after recovery—provided backup policies are configured. -Don't reuse the same `barmanObjectStore` configuration for different clusters. -There might be cases where the existing information in the storage buckets -could be overwritten by the new cluster. +Avoid reusing the same `ObjectStore` configuration for both *backup* and +*recovery* in the same cluster. If you must, ensure that each cluster uses a +unique `serverName` to prevent accidental overwrites of backup or WAL archive +data. !!! Warning - The operator includes a safety check to ensure a cluster doesn't overwrite - a storage bucket that contained information. A cluster that would overwrite - existing storage remains in the state `Setting up primary` with pods in an - error state. The pod logs show: `ERROR: WAL archive check failed for server - recoveredCluster: Expected empty archive`. + CloudNativePG includes a safety check to prevent a cluster from overwriting + existing data in a shared storage bucket. If a conflict is detected, the + cluster remains in the `Setting up primary` state, and the associated pods will + fail with an error. The pod logs will display: + `ERROR: WAL archive check failed for server recoveredCluster: Expected empty archive`. !!! Important - If you set the `cnpg.io/skipEmptyWalArchiveCheck` annotation to `enabled` - in the recovered cluster, you can skip the safety check. We don't recommend - skipping the check because, for the general use case, the check works fine. - Skip this check only if you're familiar with the PostgreSQL recovery system, as - severe data loss can occur. - + You can bypass this safety check by setting the + `cnpg.io/skipEmptyWalArchiveCheck` annotation to `enabled` on the recovered + cluster. However, this is strongly discouraged unless you are highly + familiar with PostgreSQL's recovery process. Skipping the check incorrectly can + lead to severe data loss. Use with caution and only in expert scenarios. diff --git a/docs/src/replica_cluster.md b/docs/src/replica_cluster.md index 71fd23a594..13548b4530 100644 --- a/docs/src/replica_cluster.md +++ b/docs/src/replica_cluster.md @@ -1,12 +1,6 @@ # Replica clusters -!!! Warning - With the deprecation of native Barman Cloud support in CloudNativePG in - favor of the Barman Cloud Plugin, this page—and the backup and recovery - documentation—may undergo changes before the official release of version - 1.26.0. - A replica cluster is a CloudNativePG `Cluster` resource designed to replicate data from another PostgreSQL instance, ideally also managed by CloudNativePG. @@ -95,8 +89,8 @@ recovery. There are three main options: seamless data transfer. 2. **WAL Archive**: Use the WAL (Write-Ahead Logging) archive stored in an object store. WAL files are regularly transferred from the source cluster to - the object store, from where the `barman-cloud-wal-restore` utility retrieves - them for the replica cluster. + the object store, from where a CNPG-I plugin like [Barman Cloud](https://cloudnative-pg.io/plugin-barman-cloud/) + retrieves them for the replica cluster via the `restore_command`. 3. **Hybrid Approach**: Combine both streaming replication and WAL archive methods. PostgreSQL can manage and switch between these two approaches as needed to ensure data consistency and availability. @@ -105,13 +99,14 @@ recovery. There are three main options: When configuring the external cluster, you have the following options: - +- **`plugin` section**: + - Enables bootstrapping the replica cluster using a [CNPG-I](https://github.com/cloudnative-pg/cnpg-i) + plugin that support the + [`restore_job`](https://github.com/cloudnative-pg/cnpg-i/blob/main/docs/protocol.md#restore_job-proto) + and the [`wal`](https://github.com/cloudnative-pg/cnpg-i/blob/main/docs/protocol.md#wal-proto) protocols. + - CloudNativePG supports the [Barman Cloud Plugin](https://cloudnative-pg.io/plugin-barman-cloud/docs/usage/#restoring-a-cluster) + to allow bootstrapping the replica cluster from an object store. -- **`barmanObjectStore` section**: - - Enables use of the WAL archive, with CloudNativePG automatically setting - the `restore_command` in the designated primary instance. - - Allows bootstrapping the replica cluster from an object store using the - `recovery` section if volume snapshots are not feasible. - **`connectionParameters` section**: - Enables bootstrapping the replica cluster via streaming replication using the `pg_basebackup` section. @@ -119,6 +114,13 @@ When configuring the external cluster, you have the following options: designated primary instance, initiating a WAL receiver process to connect to the source cluster and receive data. +You still have access to the **`barmanObjectStore` section**, although deprecated: + +- Enables use of the WAL archive, with CloudNativePG automatically setting + the `restore_command` in the designated primary instance. +- Allows bootstrapping the replica cluster from an object store using the + `recovery` section if volume snapshots are not feasible. + ### Backup and Symmetric Architectures The replica cluster can perform backups to a reserved object store from the @@ -204,19 +206,23 @@ replication with WAL shipping as a fallback—as described in the section. Here’s how you would configure the `externalClusters` section for both -`Cluster` resources: +`Cluster` resources, relying on Barman Cloud Plugin for the object store: ```yaml # Distributed topology configuration externalClusters: - name: cluster-eu-south - barmanObjectStore: - destinationPath: s3://cluster-eu-south/ - # Additional configuration + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: cluster-eu-south + serverName: cluster-eu-south - name: cluster-eu-central - barmanObjectStore: - destinationPath: s3://cluster-eu-central/ - # Additional configuration + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: cluster-eu-central + serverName: cluster-eu-central ``` The `.spec.replica` stanza for the `cluster-eu-south` PostgreSQL primary @@ -508,10 +514,12 @@ a backup of the source cluster has been created already. ```yaml externalClusters: - name: - barmanObjectStore: - destinationPath: s3://backups/ - endpointURL: http://minio:9000 - s3Credentials: + # Example with Barman Cloud Plugin + plugin: + name: barman-cloud.cloudnative-pg.io + parameters: + barmanObjectName: + serverName: … connectionParameters: host: -rw.default.svc diff --git a/docs/src/wal_archiving.md b/docs/src/wal_archiving.md index 06872e6829..b82bfa2c5d 100644 --- a/docs/src/wal_archiving.md +++ b/docs/src/wal_archiving.md @@ -1,83 +1,51 @@ # WAL archiving -!!! Warning - With the deprecation of native Barman Cloud support in CloudNativePG in - favor of the Barman Cloud Plugin, this page—and the backup and recovery - documentation—may undergo changes before the official release of version - 1.26.0. +Write-Ahead Log (WAL) archiving in CloudNativePG is the process of continuously +shipping WAL files to a designated object store from the PostgreSQL primary. +These archives are essential for enabling Point-In-Time Recovery (PITR) and are +a foundational component for both object store and volume snapshot-based backup +strategies. -WAL archiving is the process that feeds a [WAL archive](backup.md#wal-archive) -in CloudNativePG. +## Plugin-Based Architecture -!!! Important - CloudNativePG currently only supports WAL archives on object stores. Such - WAL archives serve for both object store backups and volume snapshot backups. - -The WAL archive is defined in the `.spec.backup.barmanObjectStore` stanza of -a `Cluster` resource. Please proceed with the same instructions you find in -the ["Backup on object stores" section](backup_barmanobjectstore.md) to set up -the WAL archive. +CloudNativePG supports WAL archiving through a **plugin-based mechanism**, +defined via the [`spec.pluginConfiguration`](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-ClusterSpec) +section of the `Cluster` resource. -!!! Info - Please refer to [`BarmanObjectStoreConfiguration`](https://pkg.go.dev/github.com/cloudnative-pg/barman-cloud/pkg/api#BarmanObjectStoreConfiguration) - in the barman-cloud API for a full list of options. +Only **one plugin at a time** can be responsible for WAL archiving. This is +configured by setting the `isWALArchiver` field to `true` within the plugin +configuration. -If required, you can choose to compress WAL files as soon as they -are uploaded and/or encrypt them: +## Supported Plugins -```yaml -apiVersion: postgresql.cnpg.io/v1 -kind: Cluster -[...] -spec: - backup: - barmanObjectStore: - [...] - wal: - compression: gzip - encryption: AES256 -``` +Currently, the **Barman Cloud Plugin** is the only officially supported WAL +archiving plugin maintained by the CloudNativePG Community. +For full documentation, configuration options, and best practices, see the +[Barman Cloud Plugin documentation](https://cloudnative-pg.io/plugin-barman-cloud/docs/intro/). -You can configure the encryption directly in your bucket, and the operator -will use it unless you override it in the cluster configuration. +## Deprecation Notice: Native Barman Cloud -PostgreSQL implements a sequential archiving scheme, where the -`archive_command` will be executed sequentially for every WAL -segment to be archived. +CloudNativePG still supports WAL archiving natively through the +`.spec.backup.barmanObjectStore` field. While still functional, **this +interface is deprecated** and will be removed in a future release. !!! Important - By default, CloudNativePG sets `archive_timeout` to `5min`, ensuring - that WAL files, even in case of low workloads, are closed and archived - at least every 5 minutes, providing a deterministic time-based value for - your Recovery Point Objective ([RPO](before_you_start.md#rpo)). Even though you change the value - of the [`archive_timeout` setting in the PostgreSQL configuration](https://www.postgresql.org/docs/current/runtime-config-wal.html#GUC-ARCHIVE-TIMEOUT), - our experience suggests that the default value set by the operator is - suitable for most use cases. + All new deployments are strongly encouraged to adopt the plugin-based + architecture, which offers a more flexible and maintainable approach. -When the bandwidth between the PostgreSQL instance and the object -store allows archiving more than one WAL file in parallel, you -can use the parallel WAL archiving feature of the instance manager -like in the following example: +If you are currently using the native `.spec.backup.barmanObjectStore` +approach, refer to the official guide for a smooth transition: +[Migrating from Built-in CloudNativePG Backup](https://cloudnative-pg.io/plugin-barman-cloud/docs/migration/). -```yaml -apiVersion: postgresql.cnpg.io/v1 -kind: Cluster -[...] -spec: - backup: - barmanObjectStore: - [...] - wal: - compression: gzip - maxParallel: 8 - encryption: AES256 -``` +## About the archive timeout -In the previous example, the instance manager optimizes the WAL -archiving process by archiving in parallel at most eight ready -WALs, including the one requested by PostgreSQL. +By default, CloudNativePG sets `archive_timeout` to `5min`, ensuring +that WAL files, even in case of low workloads, are closed and archived +at least every 5 minutes, providing a deterministic time-based value for +your Recovery Point Objective ([RPO](before_you_start.md#rpo)). -When PostgreSQL will request the archiving of a WAL that has -already been archived by the instance manager as an optimization, -that archival request will be just dismissed with a positive status. +Even though you change the value of the +[`archive_timeout` setting in the PostgreSQL configuration](https://www.postgresql.org/docs/current/runtime-config-wal.html#GUC-ARCHIVE-TIMEOUT), +our experience suggests that the default value set by the operator is suitable +for most use cases. From 6607a88a0d1668a611620d4f01a4b050806d92e7 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Wed, 21 May 2025 21:40:47 +0200 Subject: [PATCH 597/836] fix: guard for nil pgDataImageInfo in major version upgrades reconciler (#7602) Closes #7601 Signed-off-by: Marco Nenciarini --- pkg/reconciler/majorupgrade/reconciler.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/reconciler/majorupgrade/reconciler.go b/pkg/reconciler/majorupgrade/reconciler.go index 40f9a4c2a6..673b7684d2 100644 --- a/pkg/reconciler/majorupgrade/reconciler.go +++ b/pkg/reconciler/majorupgrade/reconciler.go @@ -69,7 +69,7 @@ func Reconcile( contextLogger.Error(err, "Unable to retrieve the requested PostgreSQL version") return nil, err } - if requestedMajor <= cluster.Status.PGDataImageInfo.MajorVersion { + if cluster.Status.PGDataImageInfo == nil || requestedMajor <= cluster.Status.PGDataImageInfo.MajorVersion { return nil, nil } From 5ee6eb83a4bb621ad04a5cbb4b88a8212bd4c0e3 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Thu, 22 May 2025 15:33:42 +0200 Subject: [PATCH 598/836] chore: move dataDurability defaulting logic to webhook to avoid OLM issues (#7600) Defining an OpenAPI default for a field within an optional section (such as `spec.postgresql.synchronous`) causes OLM to treat the entire section as required. This leads to validation errors when other mandatory fields in that section are not set. To resolve this, the defaulting logic for `spec.postgresql.synchronous.dataDurability` has been moved from the CRD schema to the defaulting webhook. Closes #7599 Signed-off-by: Jonathan Gonzalez V. Signed-off-by: Marco Nenciarini Co-authored-by: Marco Nenciarini --- api/v1/cluster_defaults.go | 5 +++ api/v1/cluster_defaults_test.go | 42 +++++++++++++++++++ api/v1/cluster_types.go | 1 - .../bases/postgresql.cnpg.io_clusters.yaml | 1 - 4 files changed, 47 insertions(+), 2 deletions(-) diff --git a/api/v1/cluster_defaults.go b/api/v1/cluster_defaults.go index a851f3cedd..d026d70db0 100644 --- a/api/v1/cluster_defaults.go +++ b/api/v1/cluster_defaults.go @@ -132,6 +132,11 @@ func (r *Cluster) setDefaults(preserveUserSettings bool) { r.defaultTablespaces() } + if r.Spec.PostgresConfiguration.Synchronous != nil && + r.Spec.PostgresConfiguration.Synchronous.DataDurability == "" { + r.Spec.PostgresConfiguration.Synchronous.DataDurability = DataDurabilityLevelRequired + } + r.setDefaultPlugins(configuration.Current) } diff --git a/api/v1/cluster_defaults_test.go b/api/v1/cluster_defaults_test.go index 1c6f02e0e2..4250ef8bd3 100644 --- a/api/v1/cluster_defaults_test.go +++ b/api/v1/cluster_defaults_test.go @@ -317,3 +317,45 @@ var _ = Describe("setDefaultPlugins", func() { ContainElement(PluginConfiguration{Name: "predefined-plugin1", Enabled: ptr.To(true)})) }) }) + +var _ = Describe("default dataDurability", func() { + It("should default dataDurability to 'required' when synchronous is present", func() { + cluster := &Cluster{ + Spec: ClusterSpec{ + PostgresConfiguration: PostgresConfiguration{ + Synchronous: &SynchronousReplicaConfiguration{}, + }, + }, + } + cluster.SetDefaults() + Expect(cluster.Spec.PostgresConfiguration.Synchronous).ToNot(BeNil()) + Expect(cluster.Spec.PostgresConfiguration.Synchronous.DataDurability).To(Equal(DataDurabilityLevelRequired)) + }) + + It("should not touch synchronous if nil", func() { + cluster := &Cluster{ + Spec: ClusterSpec{ + PostgresConfiguration: PostgresConfiguration{ + Synchronous: nil, + }, + }, + } + cluster.SetDefaults() + Expect(cluster.Spec.PostgresConfiguration.Synchronous).To(BeNil()) + }) + + It("should not change the dataDurability when set", func() { + cluster := &Cluster{ + Spec: ClusterSpec{ + PostgresConfiguration: PostgresConfiguration{ + Synchronous: &SynchronousReplicaConfiguration{ + DataDurability: DataDurabilityLevelPreferred, + }, + }, + }, + } + cluster.SetDefaults() + Expect(cluster.Spec.PostgresConfiguration.Synchronous).ToNot(BeNil()) + Expect(cluster.Spec.PostgresConfiguration.Synchronous.DataDurability).To(Equal(DataDurabilityLevelPreferred)) + }) +}) diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go index 21c43565cf..3602933fd7 100644 --- a/api/v1/cluster_types.go +++ b/api/v1/cluster_types.go @@ -1317,7 +1317,6 @@ type SynchronousReplicaConfiguration struct { // to allow for operational continuity. This setting is only applicable if both // `standbyNamesPre` and `standbyNamesPost` are unset (empty). // +kubebuilder:validation:Enum=required;preferred - // +kubebuilder:default:=required // +optional DataDurability DataDurabilityLevel `json:"dataDurability,omitempty"` } diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml index 7b54737091..d6d25fc4ae 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml @@ -4146,7 +4146,6 @@ spec: feature properties: dataDurability: - default: required description: |- If set to "required", data durability is strictly enforced. Write operations with synchronous commit settings (`on`, `remote_write`, or `remote_apply`) will From 40abc00e831c4d03a30f1430f717a9d988e1d3ec Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Thu, 22 May 2025 16:38:17 +0200 Subject: [PATCH 599/836] chore: improve OLM interface by adding fields (#7617) Added some missing fields in the section `spec.postgresql.synchronous` and `spec.probes` for the OLM UI look better. Partially-closes #7616 Signed-off-by: Jonathan Gonzalez V. --- .../cloudnative-pg.clusterserviceversion.yaml | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml b/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml index 1a5de3dca5..46a72df498 100644 --- a/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml +++ b/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml @@ -139,6 +139,7 @@ spec: apiservicedefinitions: {} customresourcedefinitions: owned: + # Backup Section - kind: Backup name: backups.postgresql.cnpg.io displayName: Backups @@ -163,6 +164,7 @@ spec: path: phase x-descriptors: - 'urn:alm:descriptor:io.kubernetes.phase' + # Cluster Section - kind: Cluster name: clusters.postgresql.cnpg.io version: v1 @@ -319,6 +321,18 @@ spec: description: Boolean to enable TLS x-descriptors: - 'urn:alm:descriptor:com.tectonic.ui:booleanSwitch' + - path: postgresql.synchronous + displayName: Synchronous Replication Configuration + description: Configuration of the synchronous replication feature + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' + - path: postgresql.synchronous.method + displayName: Synchronous Replication Configuration Method + description: The method to use for synchronous replication feature + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:select:any' + - 'urn:alm:descriptor:com.tectonic.ui:select:first' + - 'urn:alm:descriptor:com.tectonic.ui:advanced' - path: tablespaces displayName: Tablespaces description: Configuration of the tablespaces @@ -452,6 +466,12 @@ spec: x-descriptors: - 'urn:alm:descriptor:com.tectonic.ui:number' - 'urn:alm:descriptor:com.tectonic.ui:advanced' + # Probes configuration section + - path: probes + display: Probes Configuration + description: Configuration of the probes to be injected in the PostgreSQL instances + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:advanced' # Affinity section - path: affinity displayName: Pod Affinity From 51bb12eaf6bc7095fb4a33e4f9439e68339a3499 Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Thu, 22 May 2025 16:55:39 +0200 Subject: [PATCH 600/836] docs: Release Notes for 1.26.0, 1.25.2, and 1.24.4 (#7595) Closes #7550 Signed-off-by: Gabriele Bartolini Signed-off-by: Marco Nenciarini Co-authored-by: Marco Nenciarini --- docs/src/installation_upgrade.md | 10 +- docs/src/preview_version.md | 8 +- docs/src/release_notes/v1.26.md | 196 ++++++++++++++----------------- docs/src/supported_releases.md | 6 +- 4 files changed, 100 insertions(+), 120 deletions(-) diff --git a/docs/src/installation_upgrade.md b/docs/src/installation_upgrade.md index 9a8bd66381..fd4db6a7c3 100644 --- a/docs/src/installation_upgrade.md +++ b/docs/src/installation_upgrade.md @@ -254,7 +254,6 @@ removed before installing the new one. This won't affect user data but only the operator itself. - +In this release, the `cnpg` plugin for `kubectl` transitions from an imperative +to a declarative approach for cluster hibernation. The `hibernate on` and +`hibernate off` commands are now convenient shortcuts that apply declarative +changes to enable or disable hibernation. The `hibernate status` command has +been removed, as its purpose is now fulfilled by the standard `status` command. ### Upgrading to 1.25 from a previous minor version diff --git a/docs/src/preview_version.md b/docs/src/preview_version.md index 587280b89a..9741b5c5c1 100644 --- a/docs/src/preview_version.md +++ b/docs/src/preview_version.md @@ -33,13 +33,13 @@ may contain serious bugs. Features in preview versions may change in ways that are not backwards compatible and could be removed entirely. ## Current Preview Version - -The current preview version is **1.26.0-rc3**. + diff --git a/docs/src/release_notes/v1.26.md b/docs/src/release_notes/v1.26.md index a61ffb0083..981388679e 100644 --- a/docs/src/release_notes/v1.26.md +++ b/docs/src/release_notes/v1.26.md @@ -7,98 +7,9 @@ For a complete list of changes, please refer to the [commits](https://github.com/cloudnative-pg/cloudnative-pg/commits/release-1.26) on the release branch in GitHub. -## Version 1.26.0-rc3 +## Version 1.26.0 -**Release date:** May 13, 2025 - -### Enhancements - -- Added a new field in the `status` of the `Cluster` resource to track the - latest known Pod IP (#7546). -- Introduced an opt-in experimental feature to enhance the liveness probe with - network isolation detection for primary instances. This feature can be - activated via the `alpha.cnpg.io/livenessPinger` annotation (#7466). -- Updated the default PostgreSQL version to 17.5 for new cluster - definitions. (#7556) - -### Fixes (since RC2) - -- Corrected replication lag comparison logic in custom health probes to accept - equality, enabling probes to succeed when lag is exactly zero (#7442). -- Fixed native replication slot synchronization and logical replication - failover for PostgreSQL 17 by appending the `dbname` parameter to - `primary_conninfo` in replica configurations (#7298). -- Improved performance and resilience of CNPG-I by removing timeouts for local - plugin operations, avoiding failures during longer backup or WAL archiving - executions (#7496). -- Fixed a regression in WAL restore operations that prevented fallback to the - in-tree `barmanObjectStore` configuration defined in the `externalCluster` - source when a plugin failed to locate a WAL file (#7507). -- Improved backup efficiency by introducing a fail-fast mechanism in WAL - archiving, allowing quicker detection of unexpected primary demotion and - avoiding unnecessary retries (#7483). -- Fixed an off-by-one error in parallel WAL archiving that could cause one - extra worker process to be spawned beyond the requested number (#7389). -- Enhanced declarative major version upgrade logic by prioritizing the declared - PostgreSQL version when using image catalogs. This change also replaces the - `majorVersionUpgradeFromImage` field with the new `pgDataImageInfo` object in - cluster status (#7387, #7403). -- `cnpg` plugin: - - - Increased the buffer size in the `logs pretty` command to better handle - larger log output (#7281). - - Ensured the `plugin-name` parameter is required for plugin-based backups - and disallowed for non-plugin backup methods (#7506). - -### Changes - -- Initiated deprecation of in-tree Barman Cloud support: - - - The `.spec.backup.barmanObjectStore` and `.spec.backup.retentionPolicy` - fields are now deprecated in favor of the external Barman Cloud Plugin, and a - warning is now emitted by the admission webhook when these fields are used in - the `Cluster` specification (#7500). - - - While support for in-tree object store backup and recovery remains - available in this release, it is planned for removal in version 1.28.0. - - - *We strongly encourage users to begin planning the migration of their - fleet to the new plugin-based approach. This release candidate provides - an opportunity to test the new plugin workflow ahead of the final 1.26.0 - release. Feedback is welcome*. - -- Updated the default PgBouncer version to **1.24.1** for new `Pooler` - deployments (#7399). - -## Version 1.26.0-rc2 - -**Release date:** April 16, 2025 - -### Enhancements - -- Introduced support for WAL recovery via CNPG-I plugins during snapshot - restore. (#7284) - -- Removed the `ENABLE_AZURE_PVC_UPDATES` configuration, as it is no longer - required to resize Azure volumes correctly. The Azure CSI driver includes the - necessary fix as of version [1.11.0](https://github.com/kubernetes-sigs/azuredisk-csi-driver/releases/tag/v1.11.0). (#7297) - -### Security - -- Set `imagePullPolicy` to `Always` for the operator deployment to ensure that - images are always pulled from the registry, reducing the risk of using - outdated or potentially unsafe local images. (#7250) - -### Fixes (since RC1) - -- Improved declarative major version upgrades by incorporating `pg_controldata` - information when creating the new data directory, ensuring a more reliable - and seamless upgrade process. (#7274) - - -## Version 1.26.0-rc1 - -**Release date:** March 28, 2025 +**Release date:** May 22, 2025 ### Important Changes @@ -110,9 +21,13 @@ on the release branch in GitHub. - **Deprecation of Native Barman Cloud Support**: Native support for Barman Cloud backups and recovery is now deprecated and will be fully removed in - CloudNativePG 1.28.0. Users must begin migrating their existing clusters to the - new [Barman Cloud Plugin](https://github.com/cloudnative-pg/plugin-barman-cloud) - to ensure a smooth transition. (#6876) + CloudNativePG version 1.28.0. Although still available in the current release, + users are strongly encouraged to begin migrating their existing clusters to the + new [Barman Cloud Plugin](https://cloudnative-pg.io/plugin-barman-cloud/) to + ensure a smooth and seamless transition. The plugin should also be used for all + new deployments. This change marks the first step toward making CloudNativePG a + backup-agnostic solution, a goal that will be fully realized when volume + snapshot support is also moved to a plugin-based architecture. (#6876) - **End of Support for Barman 3.4 and Earlier**: CloudNativePG no longer supports Barman versions 3.4 and earlier, including the capability detection @@ -129,7 +44,7 @@ on the release branch in GitHub. the operator unless you are prepared to migrate to the declarative hibernation method. (#7155) -### Features: +### Features - **Declarative Offline In-Place Major Upgrades of PostgreSQL**: Introduced support for offline in-place major upgrades when a new operand container @@ -150,26 +65,30 @@ on the release branch in GitHub. ### Enhancements +- Introduced an opt-in experimental feature to enhance the liveness probe with + network isolation detection for primary instances. This feature can be + activated via the `alpha.cnpg.io/livenessPinger` annotation (#7466). + - Introduced the `STANDBY_TCP_USER_TIMEOUT` operator configuration setting, allowing users to specify the `tcp_user_timeout` parameter on all standby instances managed by the operator. (#7036) -- Added the `pg_extensions` metric, providing information about installed - PostgreSQL extensions and their latest available versions. (#7195) - - Introduced the `DRAIN_TAINTS` operator configuration option, enabling users to customize which node taints indicate a node is being drained. This replaces the previous fixed behavior of only recognizing - `node.kubernetes.io/unschedulable` as a drain signal. + `node.kubernetes.io/unschedulable` as a drain signal. (#6928) + +- Added a new field in the `status` of the `Cluster` resource to track the + latest known Pod IP (#7546). + +- Added the `pg_extensions` metric, providing information about installed + PostgreSQL extensions and their latest available versions. (#7195) - Added the `KUBERNETES_CLUSTER_DOMAIN` configuration option to the operator, allowing users to specify the domain suffix for fully qualified domain names (FQDNs) generated within the Kubernetes cluster. If not set, it defaults to `cluster.local`. (#6989) -- Added support for LZ4, XZ, and Zstandard compression methods when archiving - WAL files via Barman Cloud (*deprecated*). (#7151) - - Implemented the `cnpg.io/validation` annotation, enabling users to disable the validation webhook on CloudNativePG-managed resources. Use with caution, as this allows unrestricted changes. (#7196) @@ -181,13 +100,49 @@ on the release branch in GitHub. - Added support for collecting `pg_stat_wal` metrics in PostgreSQL 18. (#7005) +- Removed the `ENABLE_AZURE_PVC_UPDATES` configuration, as it is no longer + required to resize Azure volumes correctly. The Azure CSI driver includes the + necessary fix as of version [1.11.0](https://github.com/kubernetes-sigs/azuredisk-csi-driver/releases/tag/v1.11.0). (#7297) <-- no 1.25 1.24 1.22 --> + +- The `.spec.backup.barmanObjectStore` and `.spec.backup.retentionPolicy` + fields are now deprecated in favor of the external Barman Cloud Plugin, and a + warning is now emitted by the admission webhook when these fields are used in + the `Cluster` specification (#7500). + +- Added support for LZ4, XZ, and Zstandard compression methods when archiving + WAL files via Barman Cloud (*deprecated*). (#7151) + - CloudNativePG Interface (CNPG-I): - A plugin can now trigger instance rollouts by implementing the `EVALUATE` verb, ensuring that plugin-induced changes are properly reconciled. (#7126) + - Introduced support for WAL recovery via CNPG-I plugins during snapshot + restore. (#7284) + +### Security + +- Set `imagePullPolicy` to `Always` for the operator deployment to ensure that + images are always pulled from the registry, reducing the risk of using + outdated or potentially unsafe local images. (#7250) + ### Fixes +- Fixed native replication slot synchronization and logical replication + failover for PostgreSQL 17 by appending the `dbname` parameter to + `primary_conninfo` in replica configurations (#7298). + +- Fixed a regression in WAL restore operations that prevented fallback to the + in-tree `barmanObjectStore` configuration defined in the `externalCluster` + source when a plugin failed to locate a WAL file (#7507). + +- Improved backup efficiency by introducing a fail-fast mechanism in WAL + archiving, allowing quicker detection of unexpected primary demotion and + avoiding unnecessary retries (#7483). + +- Fixed an off-by-one error in parallel WAL archiving that could cause one + extra worker process to be spawned beyond the requested number (#7389). + - Resolved a race condition that caused the operator to perform two switchovers when updating the PostgreSQL configuration. (#6991) @@ -204,22 +159,47 @@ on the release branch in GitHub. - Treated timeout errors during volume snapshot creation as retryable to prevent unnecessary backup failures. (#7010) -- `cnpg` plugin: - - Ensured that the primary Pod is recreated during an imperative restart when - `primaryUpdateMethod` is set to `restart`, aligning its definition with the - replicas. (#7122) +- Moved the defaulting logic for `.spec.postgresql.synchronous.dataDurability` + from the CRD to the webhook to avoid UI issues with OLM. (#7600) - CloudNativePG Interface (CNPG-I): + - Implemented automatic reloading of TLS certificates for plugins when they change. (#7029) + - Ensured the operator properly closes the plugin connection when performing a backup using the plugin. (#7095, #7096) + - Fixed an issue that prevented WALs from being archived on a former primary node when using a plugin. (#6964) + - Improved performance and resilience of CNPG-I by removing timeouts for local + plugin operations, avoiding failures during longer backup or WAL archiving + executions (#7496). + +- `cnpg` plugin: + + - Increased the buffer size in the `logs pretty` command to better handle + larger log output (#7281). + + - Ensured the `plugin-name` parameter is required for plugin-based backups + and disallowed for non-plugin backup methods (#7506). + + - Ensured that the primary Pod is recreated during an imperative restart when + `primaryUpdateMethod` is set to `restart`, aligning its definition with the + replicas. (#7122) + +### Changes + +- Updated the default PostgreSQL version to 17.5 for new cluster + definitions. (#7556) + +- Updated the default PgBouncer version to **1.24.1** for new `Pooler` + deployments (#7399). + ### Supported versions -- Kubernetes 1.32, 1.31, and 1.30 +- Kubernetes 1.33, 1.32, 1.31, and 1.30 - PostgreSQL 17, 16, 15, 14, and 13 - - PostgreSQL 17.X is the default image + - PostgreSQL 17.5 is the default image - PostgreSQL 13 support ends on November 12, 2025 diff --git a/docs/src/supported_releases.md b/docs/src/supported_releases.md index c6c364f9e6..bcb5d2765c 100644 --- a/docs/src/supported_releases.md +++ b/docs/src/supported_releases.md @@ -83,8 +83,9 @@ Git tags for versions are prefixed with `v`. | Version | Currently supported | Release date | End of life | Supported Kubernetes versions | Tested, but not supported | Supported Postgres versions | |-----------------|----------------------|--------------|-----------------|-------------------------------|---------------------------|-----------------------------| -| 1.25.x | Yes | Dec 23, 2024 | ~ Aug/Sep 2025 | 1.29, 1.30, 1.31, 1.32 | 1.27, 1.28 | 13 - 17 | -| 1.24.x | Yes | Aug 22, 2024 | Mar 23, 2025 | 1.28, 1.29, 1.30, 1.31 | 1.27 | 13 - 17 | +| 1.26.x | Yes | May 22, 2025 | ~ Nov 2025 | 1.30, 1.31, 1.32, 1.33 | 1.29 | 13 - 17 | +| 1.25.x | Yes | Dec 23, 2024 | 22 Aug 2025 | 1.29, 1.30, 1.31, 1.32 | 1.33 | 13 - 17 | +| 1.24.x | Yes | Aug 22, 2024 | Mar 23, 2025 | 1.28, 1.29, 1.30, 1.31 | | 13 - 17 | | main | No, development only | | | | | 13 - 17 | @@ -122,7 +123,6 @@ version of PostgreSQL, we might not be able to help you. | Version | Release date | End of life | |---------|--------------|-------------| -| 1.26.0 | ~ May, 2025 | ~ Nov, 2025 | | 1.27.0 | ~ Aug, 2025 | ~ Feb, 2026 | | 1.28.0 | ~ Nov, 2025 | ~ May, 2026 | | 1.29.0 | ~ Feb, 2025 | ~ Aug, 2026 | From e40343ba3ea5cabab20d618718ca9a822f1a0ad9 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Fri, 23 May 2025 08:45:49 +0200 Subject: [PATCH 601/836] docs: fix missing Release Notes for 1.25.2 and 1.24.4 (#7620) Closes #7550 Signed-off-by: Marco Nenciarini --- docs/src/release_notes.md | 2 +- docs/src/release_notes/v1.24.md | 92 ++++++++++++++++++++++++++ docs/src/release_notes/v1.25.md | 110 ++++++++++++++++++++++++++++++++ docs/src/release_notes/v1.26.md | 2 +- 4 files changed, 204 insertions(+), 2 deletions(-) diff --git a/docs/src/release_notes.md b/docs/src/release_notes.md index 24fc667355..5d66b75da5 100644 --- a/docs/src/release_notes.md +++ b/docs/src/release_notes.md @@ -4,7 +4,7 @@ History of user-visible changes for CloudNativePG, classified for each minor release. -- [CloudNativePG 1.26 - Release Candidate](release_notes/v1.26.md) +- [CloudNativePG 1.26](release_notes/v1.26.md) - [CloudNativePG 1.25](release_notes/v1.25.md) - [CloudNativePG 1.24](release_notes/v1.24.md) diff --git a/docs/src/release_notes/v1.24.md b/docs/src/release_notes/v1.24.md index 96c103fee0..37518b0e99 100644 --- a/docs/src/release_notes/v1.24.md +++ b/docs/src/release_notes/v1.24.md @@ -7,6 +7,98 @@ For a complete list of changes, please refer to the [commits](https://github.com/cloudnative-pg/cloudnative-pg/commits/release-1.24) on the release branch in GitHub. + +## Version 1.24.4 + +**Release date:** May 23, 2025 + +!!! Warning + This is the final release in the 1.24.x series. + Users are strongly encouraged to upgrade to a newer minor version, as 1.24 + is no longer supported. + +### Important Changes + +- **CloudNativePG is now officially a CNCF project**: CloudNativePG has been + accepted into the Cloud Native Computing Foundation (CNCF), marking a + significant milestone in its evolution. As part of this transition, the project + is now governed under **CloudNativePG, a Series of LF Projects, LLC**, ensuring + long-term sustainability and community-driven innovation. (#7203) + +### Enhancements + +- Added the `KUBERNETES_CLUSTER_DOMAIN` configuration option to the operator, + allowing users to specify the domain suffix for fully qualified domain names + (FQDNs) generated within the Kubernetes cluster. If not set, it defaults to + `cluster.local`. (#6989) + +- Implemented the `cnpg.io/validation` annotation, enabling users to disable + the validation webhook on CloudNativePG-managed resources. Use with caution, + as this allows unrestricted changes. (#7196) + +- Added support for collecting `pg_stat_wal` metrics in PostgreSQL 18. (#7005) + +- Added support for LZ4, XZ, and Zstandard compression methods when archiving + WAL files via Barman Cloud (*deprecated*). (#7151) + +### Security + +- Set `imagePullPolicy` to `Always` for the operator deployment to ensure that + images are always pulled from the registry, reducing the risk of using + outdated or potentially unsafe local images. (#7250) + +### Fixes + +- Fixed native replication slot synchronization and logical replication + failover for PostgreSQL 17 by appending the `dbname` parameter to + `primary_conninfo` in replica configurations (#7298). + +- Improved backup efficiency by introducing a fail-fast mechanism in WAL + archiving, allowing quicker detection of unexpected primary demotion and + avoiding unnecessary retries (#7483). + +- Fixed an off-by-one error in parallel WAL archiving that could cause one + extra worker process to be spawned beyond the requested number (#7389). + +- Resolved a race condition that caused the operator to perform two switchovers + when updating the PostgreSQL configuration. (#6991) + +- Corrected the `PodMonitor` configuration by adjusting the `matchLabels` scope + for the targeted pooler and cluster pods. Previously, the `matchLabels` were + too broad, inadvertently inheriting labels from the cluster and leading to data + collection from unintended targets. (#7063) + +- Added a webhook warning for clusters with a missing unit (e.g., MB, GB) in + the `shared_buffers` configuration. This will become an error in future + releases. Users should update their configurations to include explicit units + (e.g., `512MB` instead of `512`). (#7160) + +- CloudNativePG Interface (CNPG-I): + + - Implemented automatic reloading of TLS certificates for plugins when they + change. (#7029) + + - Ensured the operator properly closes the plugin connection when + performing a backup using the plugin. (#7095, #7096) + + - Improved performance and resilience of CNPG-I by removing timeouts for local + plugin operations, avoiding failures during longer backup or WAL archiving + executions (#7496). + +- `cnpg` plugin: + + - Ensured that the primary Pod is recreated during an imperative restart when + `primaryUpdateMethod` is set to `restart`, aligning its definition with the + replicas. (#7122) + +### Changes + +- Updated the default PostgreSQL version to 17.5 for new cluster + definitions. (#7556) + +- Updated the default PgBouncer version to **1.24.1** for new `Pooler` + deployments (#7399). + ## Version 1.24.3 **Release Date:** February 28, 2025 diff --git a/docs/src/release_notes/v1.25.md b/docs/src/release_notes/v1.25.md index 550c684160..5c0ba24b1d 100644 --- a/docs/src/release_notes/v1.25.md +++ b/docs/src/release_notes/v1.25.md @@ -7,6 +7,116 @@ For a complete list of changes, please refer to the [commits](https://github.com/cloudnative-pg/cloudnative-pg/commits/release-1.25) on the release branch in GitHub. +## Version 1.25.2 + +**Release date:** May 23, 2025 + +### Important Changes + +- **CloudNativePG is now officially a CNCF project**: CloudNativePG has been + accepted into the Cloud Native Computing Foundation (CNCF), marking a + significant milestone in its evolution. As part of this transition, the project + is now governed under **CloudNativePG, a Series of LF Projects, LLC**, ensuring + long-term sustainability and community-driven innovation. (#7203) + +### Enhancements + +- Added the `KUBERNETES_CLUSTER_DOMAIN` configuration option to the operator, + allowing users to specify the domain suffix for fully qualified domain names + (FQDNs) generated within the Kubernetes cluster. If not set, it defaults to + `cluster.local`. (#6989) + +- Implemented the `cnpg.io/validation` annotation, enabling users to disable + the validation webhook on CloudNativePG-managed resources. Use with caution, + as this allows unrestricted changes. (#7196) + +- Added support for collecting `pg_stat_wal` metrics in PostgreSQL 18. (#7005) + +- Added support for LZ4, XZ, and Zstandard compression methods when archiving + WAL files via Barman Cloud (*deprecated*). (#7151) + +- CloudNativePG Interface (CNPG-I): + + - A plugin can now trigger instance rollouts by implementing the `EVALUATE` + verb, ensuring that plugin-induced changes are properly reconciled. (#7126) + + - Introduced support for WAL recovery via CNPG-I plugins during snapshot + restore. (#7284) + +### Security + +- Set `imagePullPolicy` to `Always` for the operator deployment to ensure that + images are always pulled from the registry, reducing the risk of using + outdated or potentially unsafe local images. (#7250) + +### Fixes + +- Fixed native replication slot synchronization and logical replication + failover for PostgreSQL 17 by appending the `dbname` parameter to + `primary_conninfo` in replica configurations (#7298). + +- Fixed a regression in WAL restore operations that prevented fallback to the + in-tree `barmanObjectStore` configuration defined in the `externalCluster` + source when a plugin failed to locate a WAL file (#7507). + +- Improved backup efficiency by introducing a fail-fast mechanism in WAL + archiving, allowing quicker detection of unexpected primary demotion and + avoiding unnecessary retries (#7483). + +- Fixed an off-by-one error in parallel WAL archiving that could cause one + extra worker process to be spawned beyond the requested number (#7389). + +- Resolved a race condition that caused the operator to perform two switchovers + when updating the PostgreSQL configuration. (#6991) + +- Corrected the `PodMonitor` configuration by adjusting the `matchLabels` scope + for the targeted pooler and cluster pods. Previously, the `matchLabels` were + too broad, inadvertently inheriting labels from the cluster and leading to data + collection from unintended targets. (#7063) + +- Added a webhook warning for clusters with a missing unit (e.g., MB, GB) in + the `shared_buffers` configuration. This will become an error in future + releases. Users should update their configurations to include explicit units + (e.g., `512MB` instead of `512`). (#7160) + +- Treated timeout errors during volume snapshot creation as retryable to + prevent unnecessary backup failures. (#7010) + +- Moved the defaulting logic for `.spec.postgresql.synchronous.dataDurability` + from the CRD to the webhook to avoid UI issues with OLM. (#7600) + +- CloudNativePG Interface (CNPG-I): + + - Implemented automatic reloading of TLS certificates for plugins when they + change. (#7029) + + - Ensured the operator properly closes the plugin connection when + performing a backup using the plugin. (#7095, #7096) + + - Improved performance and resilience of CNPG-I by removing timeouts for local + plugin operations, avoiding failures during longer backup or WAL archiving + executions (#7496). + +- `cnpg` plugin: + + - Increased the buffer size in the `logs pretty` command to better handle + larger log output (#7281). + + - Ensured the `plugin-name` parameter is required for plugin-based backups + and disallowed for non-plugin backup methods (#7506). + + - Ensured that the primary Pod is recreated during an imperative restart when + `primaryUpdateMethod` is set to `restart`, aligning its definition with the + replicas. (#7122) + +### Changes + +- Updated the default PostgreSQL version to 17.5 for new cluster + definitions. (#7556) + +- Updated the default PgBouncer version to **1.24.1** for new `Pooler` + deployments (#7399). + ## Version 1.25.1 **Release Date:** February 28, 2025 diff --git a/docs/src/release_notes/v1.26.md b/docs/src/release_notes/v1.26.md index 981388679e..cd73badb64 100644 --- a/docs/src/release_notes/v1.26.md +++ b/docs/src/release_notes/v1.26.md @@ -9,7 +9,7 @@ on the release branch in GitHub. ## Version 1.26.0 -**Release date:** May 22, 2025 +**Release date:** May 23, 2025 ### Important Changes From f13fd08889b1e59fd6843d0b017c889148c171ef Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Fri, 23 May 2025 10:27:29 +0200 Subject: [PATCH 602/836] docs: fix 1.26 release date in supported releases (#7626) Signed-off-by: Marco Nenciarini --- docs/src/supported_releases.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/supported_releases.md b/docs/src/supported_releases.md index bcb5d2765c..9b212bfb7c 100644 --- a/docs/src/supported_releases.md +++ b/docs/src/supported_releases.md @@ -83,7 +83,7 @@ Git tags for versions are prefixed with `v`. | Version | Currently supported | Release date | End of life | Supported Kubernetes versions | Tested, but not supported | Supported Postgres versions | |-----------------|----------------------|--------------|-----------------|-------------------------------|---------------------------|-----------------------------| -| 1.26.x | Yes | May 22, 2025 | ~ Nov 2025 | 1.30, 1.31, 1.32, 1.33 | 1.29 | 13 - 17 | +| 1.26.x | Yes | May 23, 2025 | ~ Nov 2025 | 1.30, 1.31, 1.32, 1.33 | 1.29 | 13 - 17 | | 1.25.x | Yes | Dec 23, 2024 | 22 Aug 2025 | 1.29, 1.30, 1.31, 1.32 | 1.33 | 13 - 17 | | 1.24.x | Yes | Aug 22, 2024 | Mar 23, 2025 | 1.28, 1.29, 1.30, 1.31 | | 13 - 17 | | main | No, development only | | | | | 13 - 17 | From fe3e6f2fe8696fef5b553d71e6d72b297823a580 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Fri, 23 May 2025 10:39:33 +0200 Subject: [PATCH 603/836] chore(github): update issue template (#7629) Signed-off-by: Marco Nenciarini --- .github/ISSUE_TEMPLATE/bug.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml index 9b7a3bed78..5cdee79b94 100644 --- a/.github/ISSUE_TEMPLATE/bug.yml +++ b/.github/ISSUE_TEMPLATE/bug.yml @@ -48,11 +48,10 @@ body: label: Version description: What is the version of CloudNativePG you are running? options: - - "1.26.0-rc3" + - "1.26 (latest patch)" - "1.25 (latest patch)" - - "1.24 (latest patch)" - "trunk (main)" - - "older in 1.24.x" + - "older in 1.25.x" - "older minor (unsupported)" validations: required: true @@ -61,6 +60,7 @@ body: attributes: label: What version of Kubernetes are you using? options: + - "1.33" - "1.32" - "1.31" - "1.30" From b1a8080a48d3492d039b7391652240aae8f26b4e Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Fri, 23 May 2025 11:18:59 +0200 Subject: [PATCH 604/836] docs: update operator capability levels page (#7636) Signed-off-by: Marco Nenciarini Signed-off-by: Jonathan Gonzalez V. Co-authored-by: Jonathan Gonzalez V. --- .../cloudnative-pg.clusterserviceversion.yaml | 1 + docs/src/operator_capability_levels.md | 28 +++++-------------- docs/src/release_notes/v1.26.md | 2 +- 3 files changed, 9 insertions(+), 22 deletions(-) diff --git a/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml b/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml index 46a72df498..83704fdaaa 100644 --- a/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml +++ b/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml @@ -65,6 +65,7 @@ spec: - Offline and online import of PostgreSQL databases, including major upgrades: - *Offline Import*: Direct restore from existing databases. - *Online Import*: PostgreSQL native logical replication via the `Subscription` resource. + - Offline In-Place Major Upgrades of PostgreSQL - High Availability physical replication slots, including synchronization of user-defined replication slots. - Parallel WAL archiving and restore, ensuring high-performance data diff --git a/docs/src/operator_capability_levels.md b/docs/src/operator_capability_levels.md index b69204c882..a98993d7a6 100644 --- a/docs/src/operator_capability_levels.md +++ b/docs/src/operator_capability_levels.md @@ -505,25 +505,20 @@ scalability of PostgreSQL databases, ensuring a streamlined and optimized experience for managing large scale data storage in cloud-native environments. Support for temporary tablespaces is also included. -### Startup, Liveness, and Readiness Probes +### Customizable Startup, Liveness, and Readiness Probes CloudNativePG configures startup, liveness, and readiness probes for PostgreSQL containers, which are managed by the Kubernetes kubelet. These probes interact -with the `/healthz` and `/readyz` endpoints exposed by the instance manager's -web server to monitor the Pod's health and readiness. - -The startup and liveness probes use the `pg_isready` utility. A Pod is -considered healthy if `pg_isready` returns an exit code of 0 (indicating the -server is accepting connections) or 1 (indicating the server is rejecting -connections, such as during startup). - -The readiness probe executes a simple SQL query (`;`) to verify that the -PostgreSQL server is ready to accept client connections. +with the `/startupz`, `/healthz`, and `/readyz` endpoints exposed by +the instance manager's web server to monitor the Pod's health and readiness. All probes are configured with default settings but can be fully customized to meet specific needs, allowing for fine-tuning to align with your environment and workloads. +For detailed configuration options and advanced usage, +refer to the [Postgres instance manager](instance_manager.md) documentation. + ### Rolling deployments The operator supports rolling deployments to minimize the downtime. If a @@ -567,22 +562,13 @@ that, until the fence is lifted, data on the pod isn't modified by PostgreSQL and that you can investigate file system for debugging and troubleshooting purposes. -### Hibernation (declarative) +### Hibernation CloudNativePG supports [hibernation of a running PostgreSQL cluster](declarative_hibernation.md) in a declarative manner, through the `cnpg.io/hibernation` annotation. Hibernation enables saving CPU power by removing the database pods while keeping the database PVCs. This feature simulates scaling to 0 instances. -### Hibernation (imperative) - -CloudNativePG supports [hibernation of a running PostgreSQL cluster](kubectl-plugin.md#cluster-hibernation) -by way of the `cnpg` plugin. Hibernation shuts down all Postgres instances in the -high-availability cluster and keeps a static copy of the PVC group of the -primary. The copy contains `PGDATA` and WALs. The plugin enables you to exit the -hibernation phase by resuming the primary and then recreating all the -replicas, if they exist. - ### Reuse of persistent volumes storage in pods When the operator needs to create a pod that was deleted by the user or diff --git a/docs/src/release_notes/v1.26.md b/docs/src/release_notes/v1.26.md index cd73badb64..6e54b07411 100644 --- a/docs/src/release_notes/v1.26.md +++ b/docs/src/release_notes/v1.26.md @@ -102,7 +102,7 @@ on the release branch in GitHub. - Removed the `ENABLE_AZURE_PVC_UPDATES` configuration, as it is no longer required to resize Azure volumes correctly. The Azure CSI driver includes the - necessary fix as of version [1.11.0](https://github.com/kubernetes-sigs/azuredisk-csi-driver/releases/tag/v1.11.0). (#7297) <-- no 1.25 1.24 1.22 --> + necessary fix as of version [1.11.0](https://github.com/kubernetes-sigs/azuredisk-csi-driver/releases/tag/v1.11.0). (#7297) - The `.spec.backup.barmanObjectStore` and `.spec.backup.retentionPolicy` fields are now deprecated in favor of the external Barman Cloud Plugin, and a From 916ca9fc3adfd15a45af07330726fb80a1c8f533 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Fri, 23 May 2025 11:20:24 +0200 Subject: [PATCH 605/836] docs: release 1.24 is EOL (#7637) Signed-off-by: Marco Nenciarini --- docs/src/release_notes.md | 2 +- docs/src/release_notes/{ => old}/v1.24.md | 0 docs/src/supported_releases.md | 2 +- 3 files changed, 2 insertions(+), 2 deletions(-) rename docs/src/release_notes/{ => old}/v1.24.md (100%) diff --git a/docs/src/release_notes.md b/docs/src/release_notes.md index 5d66b75da5..ea434985d4 100644 --- a/docs/src/release_notes.md +++ b/docs/src/release_notes.md @@ -6,13 +6,13 @@ History of user-visible changes for CloudNativePG, classified for each minor rel - [CloudNativePG 1.26](release_notes/v1.26.md) - [CloudNativePG 1.25](release_notes/v1.25.md) -- [CloudNativePG 1.24](release_notes/v1.24.md) For information on the community support policy for CloudNativePG, please refer to ["Supported releases"](supported_releases.md). Older releases: +- [CloudNativePG 1.24](release_notes/old/v1.24.md) - [CloudNativePG 1.23](release_notes/old/v1.23.md) - [CloudNativePG 1.22](release_notes/old/v1.22.md) - [CloudNativePG 1.21](release_notes/old/v1.21.md) diff --git a/docs/src/release_notes/v1.24.md b/docs/src/release_notes/old/v1.24.md similarity index 100% rename from docs/src/release_notes/v1.24.md rename to docs/src/release_notes/old/v1.24.md diff --git a/docs/src/supported_releases.md b/docs/src/supported_releases.md index 9b212bfb7c..be7e97dd6d 100644 --- a/docs/src/supported_releases.md +++ b/docs/src/supported_releases.md @@ -85,7 +85,6 @@ Git tags for versions are prefixed with `v`. |-----------------|----------------------|--------------|-----------------|-------------------------------|---------------------------|-----------------------------| | 1.26.x | Yes | May 23, 2025 | ~ Nov 2025 | 1.30, 1.31, 1.32, 1.33 | 1.29 | 13 - 17 | | 1.25.x | Yes | Dec 23, 2024 | 22 Aug 2025 | 1.29, 1.30, 1.31, 1.32 | 1.33 | 13 - 17 | -| 1.24.x | Yes | Aug 22, 2024 | Mar 23, 2025 | 1.28, 1.29, 1.30, 1.31 | | 13 - 17 | | main | No, development only | | | | | 13 - 17 | @@ -142,6 +141,7 @@ version of PostgreSQL, we might not be able to help you. | Version | Release date | End of life | Compatible Kubernetes versions | |-----------------|-------------------|---------------------|--------------------------------| +| 1.24.x | Aug 22, 2024 | May 23, 2025 | 1.28, 1.29, 1.30, 1.31 | | 1.23.x | April 24, 2024 | November 24, 2024 | 1.27, 1.28, 1.29 | | 1.22.x | December 21, 2023 | July 24, 2024 | 1.26, 1.27, 1.28 | | 1.21.x | October 12, 2023 | Jun 12, 2024 | 1.25, 1.26, 1.27, 1.28 | From 1535f3c1742525b93f4f8bbb7dd37e42e122f41f Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 23 May 2025 13:15:59 +0200 Subject: [PATCH 606/836] Version tag to 1.26.0 (#7642) Signed-off-by: Marco Nenciarini Co-authored-by: Marco Nenciarini --- docs/src/installation_upgrade.md | 4 +- docs/src/kubectl-plugin.md | 30 +- pkg/versions/versions.go | 6 +- releases/cnpg-1.26.0.yaml | 18020 +++++++++++++++++++++++++++++ 4 files changed, 18040 insertions(+), 20 deletions(-) create mode 100644 releases/cnpg-1.26.0.yaml diff --git a/docs/src/installation_upgrade.md b/docs/src/installation_upgrade.md index fd4db6a7c3..dfedef1b97 100644 --- a/docs/src/installation_upgrade.md +++ b/docs/src/installation_upgrade.md @@ -8,12 +8,12 @@ The operator can be installed like any other resource in Kubernetes, through a YAML manifest applied via `kubectl`. -You can install the [latest operator manifest](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-1.26.0-rc3.yaml) +You can install the [latest operator manifest](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.26/releases/cnpg-1.26.0.yaml) for this minor release as follows: ```sh kubectl apply --server-side -f \ - https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-1.26.0-rc3.yaml + https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.26/releases/cnpg-1.26.0.yaml ``` You can verify that with: diff --git a/docs/src/kubectl-plugin.md b/docs/src/kubectl-plugin.md index d8e3d55a1b..dfb1147c41 100644 --- a/docs/src/kubectl-plugin.md +++ b/docs/src/kubectl-plugin.md @@ -31,11 +31,11 @@ them in your systems. #### Debian packages -For example, let's install the 1.26.0-rc3 release of the plugin, for an Intel based +For example, let's install the 1.26.0 release of the plugin, for an Intel based 64 bit server. First, we download the right `.deb` file. ```sh -wget https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.26.0-rc3/kubectl-cnpg_1.26.0-rc3_linux_x86_64.deb \ +wget https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.26.0/kubectl-cnpg_1.26.0_linux_x86_64.deb \ --output-document kube-plugin.deb ``` @@ -46,17 +46,17 @@ $ sudo dpkg -i kube-plugin.deb Selecting previously unselected package cnpg. (Reading database ... 6688 files and directories currently installed.) Preparing to unpack kube-plugin.deb ... -Unpacking cnpg (1.26.0-rc3) ... -Setting up cnpg (1.26.0-rc3) ... +Unpacking cnpg (1.26.0) ... +Setting up cnpg (1.26.0) ... ``` #### RPM packages -As in the example for `.rpm` packages, let's install the 1.26.0-rc3 release for an +As in the example for `.rpm` packages, let's install the 1.26.0 release for an Intel 64 bit machine. Note the `--output` flag to provide a file name. ```sh -curl -L https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.26.0-rc3/kubectl-cnpg_1.26.0-rc3_linux_x86_64.rpm \ +curl -L https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.26.0/kubectl-cnpg_1.26.0_linux_x86_64.rpm \ --output kube-plugin.rpm ``` @@ -70,7 +70,7 @@ Dependencies resolved. Package Architecture Version Repository Size ==================================================================================================== Installing: - cnpg x86_64 1.26.0-rc3 @commandline 20 M + cnpg x86_64 1.26.0 @commandline 20 M Transaction Summary ==================================================================================================== @@ -294,9 +294,9 @@ sandbox-3 0/604DE38 0/604DE38 0/604DE38 0/604DE38 00:00:00 00:00:00 00 Instances status Name Current LSN Replication role Status QoS Manager Version Node ---- ----------- ---------------- ------ --- --------------- ---- -sandbox-1 0/604DE38 Primary OK BestEffort 1.26.0-rc3 k8s-eu-worker -sandbox-2 0/604DE38 Standby (async) OK BestEffort 1.26.0-rc3 k8s-eu-worker2 -sandbox-3 0/604DE38 Standby (async) OK BestEffort 1.26.0-rc3 k8s-eu-worker +sandbox-1 0/604DE38 Primary OK BestEffort 1.26.0 k8s-eu-worker +sandbox-2 0/604DE38 Standby (async) OK BestEffort 1.26.0 k8s-eu-worker2 +sandbox-3 0/604DE38 Standby (async) OK BestEffort 1.26.0 k8s-eu-worker ``` If you require more detailed status information, use the `--verbose` option (or @@ -350,9 +350,9 @@ sandbox-primary primary 1 1 1 Instances status Name Current LSN Replication role Status QoS Manager Version Node ---- ----------- ---------------- ------ --- --------------- ---- -sandbox-1 0/6053720 Primary OK BestEffort 1.26.0-rc3 k8s-eu-worker -sandbox-2 0/6053720 Standby (async) OK BestEffort 1.26.0-rc3 k8s-eu-worker2 -sandbox-3 0/6053720 Standby (async) OK BestEffort 1.26.0-rc3 k8s-eu-worker +sandbox-1 0/6053720 Primary OK BestEffort 1.26.0 k8s-eu-worker +sandbox-2 0/6053720 Standby (async) OK BestEffort 1.26.0 k8s-eu-worker2 +sandbox-3 0/6053720 Standby (async) OK BestEffort 1.26.0 k8s-eu-worker ``` With an additional `-v` (e.g. `kubectl cnpg status sandbox -v -v`), you can @@ -575,12 +575,12 @@ Archive: report_operator_.zip ```output ====== Beginning of Previous Log ===== -2023-03-28T12:56:41.251711811Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.26.0-rc3","build":{"Version":"1.26.0-rc3+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} +2023-03-28T12:56:41.251711811Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.26.0","build":{"Version":"1.26.0+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} 2023-03-28T12:56:41.251851909Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"} ====== End of Previous Log ===== -2023-03-28T12:57:09.854306024Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.26.0-rc3","build":{"Version":"1.26.0-rc3+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} +2023-03-28T12:57:09.854306024Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.26.0","build":{"Version":"1.26.0+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} 2023-03-28T12:57:09.854363943Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"} ``` diff --git a/pkg/versions/versions.go b/pkg/versions/versions.go index 6304f15667..60d372f7e6 100644 --- a/pkg/versions/versions.go +++ b/pkg/versions/versions.go @@ -23,13 +23,13 @@ package versions const ( // Version is the version of the operator - Version = "1.26.0-rc3" + Version = "1.26.0" // DefaultImageName is the default image used by the operator to create pods DefaultImageName = "ghcr.io/cloudnative-pg/postgresql:17.5" // DefaultOperatorImageName is the default operator image used by the controller in the pods running PostgreSQL - DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.26.0-rc3" + DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.26.0" ) // BuildInfo is a struct containing all the info about the build @@ -39,7 +39,7 @@ type BuildInfo struct { var ( // buildVersion injected during the build - buildVersion = "1.26.0-rc3" + buildVersion = "1.26.0" // buildCommit injected during the build buildCommit = "none" diff --git a/releases/cnpg-1.26.0.yaml b/releases/cnpg-1.26.0.yaml new file mode 100644 index 0000000000..5b77b32d14 --- /dev/null +++ b/releases/cnpg-1.26.0.yaml @@ -0,0 +1,18020 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-system +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: backups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Backup + listKind: BackupList + plural: backups + singular: backup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.method + name: Method + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .status.error + name: Error + type: string + name: v1 + schema: + openAPIV3Schema: + description: A Backup resource is a request for a PostgreSQL backup by the + user. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the backup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + type: object + status: + description: |- + Most recently observed status of the backup. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + azureCredentials: + description: The credentials to use to upload data to Azure Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without providing + explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + backupId: + description: The ID of the Barman backup + type: string + backupLabelFile: + description: Backup label file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + backupName: + description: The Name of the Barman backup + type: string + beginLSN: + description: The starting xlog + type: string + beginWal: + description: The starting WAL + type: string + commandError: + description: The backup command output in case of error + type: string + commandOutput: + description: Unused. Retained for compatibility with old versions. + type: string + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data. This may not be populated in case of errors. + type: string + encryption: + description: Encryption method required to S3 API + type: string + endLSN: + description: The ending xlog + type: string + endWal: + description: The ending WAL + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + error: + description: The detected error + type: string + googleCredentials: + description: The credentials to use to upload data to Google Cloud + Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage JSON + file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + instanceID: + description: Information to identify the instance where the backup + has been taken from + properties: + ContainerID: + description: The container ID + type: string + podName: + description: The pod name + type: string + type: object + method: + description: The backup method being used + type: string + online: + description: Whether the backup was online/hot (`true`) or offline/cold + (`false`) + type: boolean + phase: + description: The last backup status + type: string + pluginMetadata: + additionalProperties: + type: string + description: A map containing the plugin metadata + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without providing + explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the region + name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + snapshotBackupStatus: + description: Status of the volumeSnapshot backup + properties: + elements: + description: The elements list, populated with the gathered volume + snapshots + items: + description: BackupSnapshotElementStatus is a volume snapshot + that is part of a volume snapshot method backup + properties: + name: + description: Name is the snapshot resource name + type: string + tablespaceName: + description: |- + TablespaceName is the name of the snapshotted tablespace. Only set + when type is PG_TABLESPACE + type: string + type: + description: Type is tho role of the snapshot in the cluster, + such as PG_DATA, PG_WAL and PG_TABLESPACE + type: string + required: + - name + - type + type: object + type: array + type: object + startedAt: + description: When the backup was started + format: date-time + type: string + stoppedAt: + description: When the backup was terminated + format: date-time + type: string + tablespaceMapFile: + description: Tablespace map file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: clusterimagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ClusterImageCatalog + listKind: ClusterImageCatalogList + plural: clusterimagecatalogs + singular: clusterimagecatalog + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ClusterImageCatalog is the Schema for the clusterimagecatalogs + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ClusterImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: clusters.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Cluster + listKind: ClusterList + plural: clusters + singular: cluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Number of instances + jsonPath: .status.instances + name: Instances + type: integer + - description: Number of ready instances + jsonPath: .status.readyInstances + name: Ready + type: integer + - description: Cluster current status + jsonPath: .status.phase + name: Status + type: string + - description: Primary pod + jsonPath: .status.currentPrimary + name: Primary + type: string + name: v1 + schema: + openAPIV3Schema: + description: Cluster is the Schema for the PostgreSQL API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the cluster. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + affinity: + description: Affinity/Anti-affinity rules for Pods + properties: + additionalPodAffinity: + description: AdditionalPodAffinity allows to specify pod affinity + terms to be passed to all the cluster's pods. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + additionalPodAntiAffinity: + description: |- + AdditionalPodAntiAffinity allows to specify pod anti-affinity terms to be added to the ones generated + by the operator if EnablePodAntiAffinity is set to true (default) or to be used exclusively if set to false. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + enablePodAntiAffinity: + description: |- + Activates anti-affinity for the pods. The operator will define pods + anti-affinity unless this field is explicitly set to false + type: boolean + nodeAffinity: + description: |- + NodeAffinity describes node affinity scheduling rules for the pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is map of key-value pairs used to define the nodes on which + the pods can run. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + podAntiAffinityType: + description: |- + PodAntiAffinityType allows the user to decide whether pod anti-affinity between cluster instance has to be + considered a strong requirement during scheduling or not. Allowed values are: "preferred" (default if empty) or + "required". Setting it to "required", could lead to instances remaining pending until new kubernetes nodes are + added if all the existing nodes don't match the required pod anti-affinity rule. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + type: string + tolerations: + description: |- + Tolerations is a list of Tolerations that should be set for all the pods, in order to allow them to run + on tainted nodes. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologyKey: + description: |- + TopologyKey to use for anti-affinity configuration. See k8s documentation + for more info on that + type: string + type: object + backup: + description: The configuration to be used for backups + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2`, and `snappy`. + enum: + - bzip2 + - gzip + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage + JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the + region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2`, + `lz4`, `snappy`, `xz`, and `zstd`. + enum: + - bzip2 + - gzip + - lz4 + - snappy + - xz + - zstd + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + retentionPolicy: + description: |- + RetentionPolicy is the retention policy to be used for backups + and WALs (i.e. '60d'). The retention policy is expressed in the form + of `XXu` where `XX` is a positive integer and `u` is in `[dwm]` - + days, weeks, months. + It's currently only applicable when using the BarmanObjectStore method. + pattern: ^[1-9][0-9]*[dwm]$ + type: string + target: + default: prefer-standby + description: |- + The policy to decide which instance should perform backups. Available + options are empty string, which will default to `prefer-standby` policy, + `primary` to have backups run always on primary instances, `prefer-standby` + to have backups run preferably on the most updated standby, if available. + enum: + - primary + - prefer-standby + type: string + volumeSnapshot: + description: VolumeSnapshot provides the configuration for the + execution of volume snapshot backups. + properties: + annotations: + additionalProperties: + type: string + description: Annotations key-value pairs that will be added + to .metadata.annotations snapshot resources. + type: object + className: + description: |- + ClassName specifies the Snapshot Class to be used for PG_DATA PersistentVolumeClaim. + It is the default class for the other types if no specific class is present + type: string + labels: + additionalProperties: + type: string + description: Labels are key-value pairs that will be added + to .metadata.labels snapshot resources. + type: object + online: + default: true + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + type: boolean + onlineConfiguration: + default: + immediateCheckpoint: false + waitForArchive: true + description: Configuration parameters to control the online/hot + backup with volume snapshots + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + snapshotOwnerReference: + default: none + description: SnapshotOwnerReference indicates the type of + owner reference the snapshot should have + enum: + - none + - cluster + - backup + type: string + tablespaceClassName: + additionalProperties: + type: string + description: |- + TablespaceClassName specifies the Snapshot Class to be used for the tablespaces. + defaults to the PGDATA Snapshot Class, if set + type: object + walClassName: + description: WalClassName specifies the Snapshot Class to + be used for the PG_WAL PersistentVolumeClaim. + type: string + type: object + type: object + bootstrap: + description: Instructions to bootstrap this cluster + properties: + initdb: + description: Bootstrap the cluster via initdb + properties: + builtinLocale: + description: |- + Specifies the locale name when the builtin provider is used. + This option requires `localeProvider` to be set to `builtin`. + Available from PostgreSQL 17. + type: string + dataChecksums: + description: |- + Whether the `-k` option should be passed to initdb, + enabling checksums on data pages (default: `false`) + type: boolean + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + encoding: + description: The value to be passed as option `--encoding` + for initdb (default:`UTF8`) + type: string + icuLocale: + description: |- + Specifies the ICU locale when the ICU provider is used. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 15. + type: string + icuRules: + description: |- + Specifies additional collation rules to customize the behavior of the default collation. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 16. + type: string + import: + description: |- + Bootstraps the new cluster by importing data from an existing PostgreSQL + instance using logical backup (`pg_dump` and `pg_restore`) + properties: + databases: + description: The databases to import + items: + type: string + type: array + pgDumpExtraOptions: + description: |- + List of custom options to pass to the `pg_dump` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + pgRestoreExtraOptions: + description: |- + List of custom options to pass to the `pg_restore` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + postImportApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after is imported - to be used with extreme care + (by default empty). Only available in microservice type. + items: + type: string + type: array + roles: + description: The roles to import + items: + type: string + type: array + schemaOnly: + description: |- + When set to true, only the `pre-data` and `post-data` sections of + `pg_restore` are invoked, avoiding data import. Default: `false`. + type: boolean + source: + description: The source of the import + properties: + externalCluster: + description: The name of the externalCluster used + for import + type: string + required: + - externalCluster + type: object + type: + description: The import type. Can be `microservice` or + `monolith`. + enum: + - microservice + - monolith + type: string + required: + - databases + - source + - type + type: object + locale: + description: Sets the default collation order and character + classification in the new database. + type: string + localeCType: + description: The value to be passed as option `--lc-ctype` + for initdb (default:`C`) + type: string + localeCollate: + description: The value to be passed as option `--lc-collate` + for initdb (default:`C`) + type: string + localeProvider: + description: |- + This option sets the locale provider for databases created in the new cluster. + Available from PostgreSQL 16. + type: string + options: + description: |- + The list of options that must be passed to initdb when creating the cluster. + Deprecated: This could lead to inconsistent configurations, + please use the explicit provided parameters instead. + If defined, explicit values will be ignored. + items: + type: string + type: array + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + postInitApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitApplicationSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the application database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitSQL: + description: |- + List of SQL queries to be executed as a superuser in the `postgres` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `postgres` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitTemplateSQL: + description: |- + List of SQL queries to be executed as a superuser in the `template1` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitTemplateSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `template1` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + walSegmentSize: + description: |- + The value in megabytes (1 to 1024) to be passed to the `--wal-segsize` + option for initdb (default: empty, resulting in PostgreSQL default: 16MB) + maximum: 1024 + minimum: 1 + type: integer + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider + is set to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is + set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set + to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + pg_basebackup: + description: |- + Bootstrap the cluster taking a physical backup of another compatible + PostgreSQL instance + properties: + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: The name of the server of which we need to take + a physical backup + minLength: 1 + type: string + required: + - source + type: object + recovery: + description: Bootstrap the cluster from a backup + properties: + backup: + description: |- + The backup object containing the physical base backup from which to + initiate the recovery procedure. + Mutually exclusive with `source` and `volumeSnapshots`. + properties: + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + name: + description: Name of the referent. + type: string + required: + - name + type: object + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + recoveryTarget: + description: |- + By default, the recovery process applies all the available + WAL files in the archive (full recovery). However, you can also + end the recovery as soon as a consistent state is reached or + recover to a point-in-time (PITR) by specifying a `RecoveryTarget` object, + as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...). + More info: https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET + properties: + backupID: + description: |- + The ID of the backup from which to start the recovery process. + If empty (default) the operator will automatically detect the backup + based on targetTime or targetLSN if specified. Otherwise use the + latest available backup in chronological order. + type: string + exclusive: + description: |- + Set the target to be exclusive. If omitted, defaults to false, so that + in Postgres, `recovery_target_inclusive` will be true + type: boolean + targetImmediate: + description: End recovery as soon as a consistent state + is reached + type: boolean + targetLSN: + description: The target LSN (Log Sequence Number) + type: string + targetName: + description: |- + The target name (to be previously created + with `pg_create_restore_point`) + type: string + targetTLI: + description: The target timeline ("latest" or a positive + integer) + type: string + targetTime: + description: The target time as a timestamp in the RFC3339 + standard + type: string + targetXID: + description: The target transaction ID + type: string + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: |- + The external cluster whose backup we will restore. This is also + used as the name of the folder under which the backup is stored, + so it must be set to the name of the source cluster + Mutually exclusive with `backup`. + type: string + volumeSnapshots: + description: |- + The static PVC data source(s) from which to initiate the + recovery procedure. Currently supporting `VolumeSnapshot` + and `PersistentVolumeClaim` resources that map an existing + PVC group, compatible with CloudNativePG, and taken with + a cold backup copy on a fenced Postgres instance (limitation + which will be removed in the future when online backup + will be implemented). + Mutually exclusive with `backup`. + properties: + storage: + description: Configuration of the storage of the instances + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + tablespaceStorage: + additionalProperties: + description: |- + TypedLocalObjectReference contains enough information to let you locate the + typed referenced object inside the same namespace. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + description: Configuration of the storage for PostgreSQL + tablespaces + type: object + walStorage: + description: Configuration of the storage for PostgreSQL + WAL (Write-Ahead Log) + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + required: + - storage + type: object + type: object + type: object + certificates: + description: The configuration for the CA and related certificates + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + description: + description: Description of this PostgreSQL cluster + type: string + enablePDB: + default: true + description: |- + Manage the `PodDisruptionBudget` resources within the cluster. When + configured as `true` (default setting), the pod disruption budgets + will safeguard the primary node from being terminated. Conversely, + setting it to `false` will result in the absence of any + `PodDisruptionBudget` resource, permitting the shutdown of all nodes + hosting the PostgreSQL cluster. This latter configuration is + advisable for any PostgreSQL cluster employed for + development/staging purposes. + type: boolean + enableSuperuserAccess: + default: false + description: |- + When this option is enabled, the operator will use the `SuperuserSecret` + to update the `postgres` user password (if the secret is + not present, the operator will automatically create one). When this + option is disabled, the operator will ignore the `SuperuserSecret` content, delete + it when automatically created, and then blank the password of the `postgres` + user by setting it to `NULL`. Disabled by default. + type: boolean + env: + description: |- + Env follows the Env format to pass environment variables + to the pods created in the cluster + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + EnvFrom follows the EnvFrom format to pass environment variables + sources to the pods to be used by Env + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each key in + the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + ephemeralVolumeSource: + description: EphemeralVolumeSource allows the user to configure the + source of ephemeral volumes. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to + consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the + PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + ephemeralVolumesSizeLimit: + description: |- + EphemeralVolumesSizeLimit allows the user to set the limits for the ephemeral + volumes + properties: + shm: + anyOf: + - type: integer + - type: string + description: Shm is the size limit of the shared memory volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + temporaryData: + anyOf: + - type: integer + - type: string + description: TemporaryData is the size limit of the temporary + data volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + externalClusters: + description: The list of external clusters which are used in the configuration + items: + description: |- + ExternalCluster represents the connection parameters to an + external cluster which is used in the other sections of the configuration + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2`, and `snappy`. + enum: + - bzip2 + - gzip + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud + Storage JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing + the region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2`, + `lz4`, `snappy`, `xz`, and `zstd`. + enum: + - bzip2 + - gzip + - lz4 + - snappy + - xz + - zstd + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + connectionParameters: + additionalProperties: + type: string + description: The list of connection parameters, such as dbname, + host, username, etc + type: object + name: + description: The server name, required + type: string + password: + description: |- + The reference to the password to be used to connect to the server. + If a password is provided, CloudNativePG creates a PostgreSQL + passfile at `/controller/external/NAME/pass` (where "NAME" is the + cluster's name). This passfile is automatically referenced in the + connection string when establishing a connection to the remote + PostgreSQL server from the current PostgreSQL `Cluster`. This ensures + secure and efficient password management for external clusters. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + plugin: + description: |- + The configuration of the plugin that is taking care + of WAL archiving and backups for this external cluster + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + isWALArchiver: + default: false + description: |- + Only one plugin can be declared as WALArchiver. + Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + sslCert: + description: |- + The reference to an SSL certificate to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslKey: + description: |- + The reference to an SSL private key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslRootCert: + description: |- + The reference to an SSL CA public key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - name + type: object + type: array + failoverDelay: + default: 0 + description: |- + The amount of time (in seconds) to wait before triggering a failover + after the primary PostgreSQL instance in the cluster was detected + to be unhealthy + format: int32 + type: integer + imageCatalogRef: + description: Defines the major PostgreSQL version we want to use within + an ImageCatalog + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + major: + description: The major version of PostgreSQL we want to use from + the ImageCatalog + type: integer + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - major + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: Only image catalogs are supported + rule: self.kind == 'ImageCatalog' || self.kind == 'ClusterImageCatalog' + - message: Only image catalogs are supported + rule: self.apiGroup == 'postgresql.cnpg.io' + imageName: + description: |- + Name of the container image, supporting both tags (`:`) + and digests for deterministic and repeatable deployments + (`:@sha256:`) + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of `Always`, `Never` or `IfNotPresent`. + If not defined, it defaults to `IfNotPresent`. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + imagePullSecrets: + description: The list of pull secrets to be used to pull the images + items: + description: |- + LocalObjectReference contains enough information to let you locate a + local object with a known type inside the same namespace + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + type: array + inheritedMetadata: + description: Metadata that will be inherited by all objects related + to the Cluster + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + instances: + default: 1 + description: Number of instances required in the cluster + minimum: 1 + type: integer + livenessProbeTimeout: + description: |- + LivenessProbeTimeout is the time (in seconds) that is allowed for a PostgreSQL instance + to successfully respond to the liveness probe (default 30). + The Liveness probe failure threshold is derived from this value using the formula: + ceiling(livenessProbe / 10). + format: int32 + type: integer + logLevel: + default: info + description: 'The instances'' log level, one of the following values: + error, warning, info (default), debug, trace' + enum: + - error + - warning + - info + - debug + - trace + type: string + managed: + description: The configuration that is used by the portions of PostgreSQL + that are managed by the instance manager + properties: + roles: + description: Database roles managed by the `Cluster` + items: + description: |- + RoleConfiguration is the representation, in Kubernetes, of a PostgreSQL role + with the additional field Ensure specifying whether to ensure the presence or + absence of the role in the database + + The defaults of the CREATE ROLE command are applied + Reference: https://www.postgresql.org/docs/current/sql-createrole.html + properties: + bypassrls: + description: |- + Whether a role bypasses every row-level security (RLS) policy. + Default is `false`. + type: boolean + comment: + description: Description of the role + type: string + connectionLimit: + default: -1 + description: |- + If the role can log in, this specifies how many concurrent + connections the role can make. `-1` (the default) means no limit. + format: int64 + type: integer + createdb: + description: |- + When set to `true`, the role being defined will be allowed to create + new databases. Specifying `false` (default) will deny a role the + ability to create databases. + type: boolean + createrole: + description: |- + Whether the role will be permitted to create, alter, drop, comment + on, change the security label for, and grant or revoke membership in + other roles. Default is `false`. + type: boolean + disablePassword: + description: DisablePassword indicates that a role's password + should be set to NULL in Postgres + type: boolean + ensure: + default: present + description: Ensure the role is `present` or `absent` - + defaults to "present" + enum: + - present + - absent + type: string + inRoles: + description: |- + List of one or more existing roles to which this role will be + immediately added as a new member. Default empty. + items: + type: string + type: array + inherit: + default: true + description: |- + Whether a role "inherits" the privileges of roles it is a member of. + Defaults is `true`. + type: boolean + login: + description: |- + Whether the role is allowed to log in. A role having the `login` + attribute can be thought of as a user. Roles without this attribute + are useful for managing database privileges, but are not users in + the usual sense of the word. Default is `false`. + type: boolean + name: + description: Name of the role + type: string + passwordSecret: + description: |- + Secret containing the password of the role (if present) + If null, the password will be ignored unless DisablePassword is set + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + replication: + description: |- + Whether a role is a replication role. A role must have this + attribute (or be a superuser) in order to be able to connect to the + server in replication mode (physical or logical replication) and in + order to be able to create or drop replication slots. A role having + the `replication` attribute is a very highly privileged role, and + should only be used on roles actually used for replication. Default + is `false`. + type: boolean + superuser: + description: |- + Whether the role is a `superuser` who can override all access + restrictions within the database - superuser status is dangerous and + should be used only when really needed. You must yourself be a + superuser to create a new superuser. Defaults is `false`. + type: boolean + validUntil: + description: |- + Date and time after which the role's password is no longer valid. + When omitted, the password will never expire (default). + format: date-time + type: string + required: + - name + type: object + type: array + services: + description: Services roles managed by the `Cluster` + properties: + additional: + description: Additional is a list of additional managed services + specified by the user. + items: + description: |- + ManagedService represents a specific service managed by the cluster. + It includes the type of service and its associated template specification. + properties: + selectorType: + description: |- + SelectorType specifies the type of selectors that the service will have. + Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services. + enum: + - rw + - r + - ro + type: string + serviceTemplate: + description: ServiceTemplate is the template specification + for the service. + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only + supported for certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information + on service's port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed + by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains + the configurations of session affinity. + properties: + clientIP: + description: clientIP contains the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic is + distributed to Service endpoints. Implementations can use this field as a + hint, but are not required to guarantee strict adherence. If the field is + not set, the implementation will apply its default routing strategy. If set + to "PreferClose", implementations should prioritize endpoints that are + topologically close (e.g., same zone). + This is a beta field and requires enabling ServiceTrafficDistribution feature. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + updateStrategy: + default: patch + description: UpdateStrategy describes how the service + differences should be reconciled + enum: + - patch + - replace + type: string + required: + - selectorType + - serviceTemplate + type: object + type: array + disabledDefaultServices: + description: |- + DisabledDefaultServices is a list of service types that are disabled by default. + Valid values are "r", and "ro", representing read, and read-only services. + items: + description: |- + ServiceSelectorType describes a valid value for generating the service selectors. + It indicates which type of service the selector applies to, such as read-write, read, or read-only + enum: + - rw + - r + - ro + type: string + type: array + type: object + type: object + maxSyncReplicas: + default: 0 + description: |- + The target value for the synchronous replication quorum, that can be + decreased if the number of ready standbys is lower than this. + Undefined or 0 disable synchronous replication. + minimum: 0 + type: integer + minSyncReplicas: + default: 0 + description: |- + Minimum number of instances required in synchronous replication with the + primary. Undefined or 0 allow writes to complete when no standby is + available. + minimum: 0 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this cluster + properties: + customQueriesConfigMap: + description: The list of config maps containing the custom queries + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + customQueriesSecret: + description: The list of secrets containing the custom queries + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + disableDefaultQueries: + default: false + description: |- + Whether the default queries should be injected. + Set it to `true` if you don't want to inject default queries into the cluster. + Default: false. + type: boolean + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + tls: + description: |- + Configure TLS communication for the metrics endpoint. + Changing tls.enabled option will force a rollout of all instances. + properties: + enabled: + default: false + description: |- + Enable TLS for the monitoring endpoint. + Changing this option will force a rollout of all instances. + type: boolean + type: object + type: object + nodeMaintenanceWindow: + description: Define a maintenance window for the Kubernetes nodes + properties: + inProgress: + default: false + description: Is there a node maintenance activity in progress? + type: boolean + reusePVC: + default: true + description: |- + Reuse the existing PVC (wait for the node to come + up again) or not (recreate it elsewhere - when `instances` >1) + type: boolean + type: object + plugins: + description: |- + The plugins configuration, containing + any plugin to be loaded with the corresponding configuration + items: + description: |- + PluginConfiguration specifies a plugin that need to be loaded for this + cluster to be reconciled + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + isWALArchiver: + default: false + description: |- + Only one plugin can be declared as WALArchiver. + Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + type: array + postgresGID: + default: 26 + description: The GID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresUID: + default: 26 + description: The UID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresql: + description: Configuration of the PostgreSQL server + properties: + enableAlterSystem: + description: |- + If this parameter is true, the user will be able to invoke `ALTER SYSTEM` + on this CloudNativePG Cluster. + This should only be used for debugging and troubleshooting. + Defaults to false. + type: boolean + ldap: + description: Options to specify LDAP configuration + properties: + bindAsAuth: + description: Bind as authentication configuration + properties: + prefix: + description: Prefix for the bind authentication option + type: string + suffix: + description: Suffix for the bind authentication option + type: string + type: object + bindSearchAuth: + description: Bind+Search authentication configuration + properties: + baseDN: + description: Root DN to begin the user search + type: string + bindDN: + description: DN of the user to bind to the directory + type: string + bindPassword: + description: Secret with the password for the user to + bind to the directory + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + searchAttribute: + description: Attribute to match against the username + type: string + searchFilter: + description: Search filter to use when doing the search+bind + authentication + type: string + type: object + port: + description: LDAP server port + type: integer + scheme: + description: LDAP schema to be used, possible options are + `ldap` and `ldaps` + enum: + - ldap + - ldaps + type: string + server: + description: LDAP hostname or IP address + type: string + tls: + description: Set to 'true' to enable LDAP over TLS. 'false' + is default + type: boolean + type: object + parameters: + additionalProperties: + type: string + description: PostgreSQL configuration options (postgresql.conf) + type: object + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + pg_ident: + description: |- + PostgreSQL User Name Maps rules (lines to be appended + to the pg_ident.conf file) + items: + type: string + type: array + promotionTimeout: + description: |- + Specifies the maximum number of seconds to wait when promoting an instance to primary. + Default value is 40000000, greater than one year in seconds, + big enough to simulate an infinite timeout + format: int32 + type: integer + shared_preload_libraries: + description: Lists of shared preload libraries to add to the default + ones + items: + type: string + type: array + syncReplicaElectionConstraint: + description: |- + Requirements to be met by sync replicas. This will affect how the "synchronous_standby_names" parameter will be + set up. + properties: + enabled: + description: This flag enables the constraints for sync replicas + type: boolean + nodeLabelsAntiAffinity: + description: A list of node labels values to extract and compare + to evaluate if the pods reside in the same topology or not + items: + type: string + type: array + required: + - enabled + type: object + synchronous: + description: Configuration of the PostgreSQL synchronous replication + feature + properties: + dataDurability: + description: |- + If set to "required", data durability is strictly enforced. Write operations + with synchronous commit settings (`on`, `remote_write`, or `remote_apply`) will + block if there are insufficient healthy replicas, ensuring data persistence. + If set to "preferred", data durability is maintained when healthy replicas + are available, but the required number of instances will adjust dynamically + if replicas become unavailable. This setting relaxes strict durability enforcement + to allow for operational continuity. This setting is only applicable if both + `standbyNamesPre` and `standbyNamesPost` are unset (empty). + enum: + - required + - preferred + type: string + maxStandbyNamesFromCluster: + description: |- + Specifies the maximum number of local cluster pods that can be + automatically included in the `synchronous_standby_names` option in + PostgreSQL. + type: integer + method: + description: |- + Method to select synchronous replication standbys from the listed + servers, accepting 'any' (quorum-based synchronous replication) or + 'first' (priority-based synchronous replication) as values. + enum: + - any + - first + type: string + number: + description: |- + Specifies the number of synchronous standby servers that + transactions must wait for responses from. + type: integer + x-kubernetes-validations: + - message: The number of synchronous replicas should be greater + than zero + rule: self > 0 + standbyNamesPost: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` after local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + standbyNamesPre: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` before local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + required: + - method + - number + type: object + x-kubernetes-validations: + - message: dataDurability set to 'preferred' requires empty 'standbyNamesPre' + and empty 'standbyNamesPost' + rule: self.dataDurability!='preferred' || ((!has(self.standbyNamesPre) + || self.standbyNamesPre.size()==0) && (!has(self.standbyNamesPost) + || self.standbyNamesPost.size()==0)) + type: object + primaryUpdateMethod: + default: restart + description: |- + Method to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be with a switchover (`switchover`) or in-place (`restart` - default) + enum: + - switchover + - restart + type: string + primaryUpdateStrategy: + default: unsupervised + description: |- + Deployment strategy to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be automated (`unsupervised` - default) or manual (`supervised`) + enum: + - unsupervised + - supervised + type: string + priorityClassName: + description: |- + Name of the priority class which will be used in every generated Pod, if the PriorityClass + specified does not exist, the pod will not be able to schedule. Please refer to + https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass + for more information + type: string + probes: + description: |- + The configuration of the probes to be injected + in the PostgreSQL Pods. + properties: + liveness: + description: The liveness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + readiness: + description: The readiness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + maximumLag: + anyOf: + - type: integer + - type: string + description: Lag limit. Used only for `streaming` strategy + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: + description: The probe strategy + enum: + - pg_isready + - streaming + - query + type: string + type: object + startup: + description: The startup probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + maximumLag: + anyOf: + - type: integer + - type: string + description: Lag limit. Used only for `streaming` strategy + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: + description: The probe strategy + enum: + - pg_isready + - streaming + - query + type: string + type: object + type: object + projectedVolumeTemplate: + description: |- + Template to be used to define projected volumes, projected volumes will be mounted + under `/projected` base folder + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root to write + the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap data + to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the + pod: only annotations, labels, name, namespace + and uid are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must not + be absolute or contain the ''..'' path. Must + be utf-8 encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the secret data to + project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether the Secret + or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information about the + serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + replica: + description: Replica cluster configuration + properties: + enabled: + description: |- + If replica mode is enabled, this cluster will be a replica of an + existing cluster. Replica cluster can be created from a recovery + object store or via streaming through pg_basebackup. + Refer to the Replica clusters page of the documentation for more information. + type: boolean + minApplyDelay: + description: |- + When replica mode is enabled, this parameter allows you to replay + transactions only when the system time is at least the configured + time past the commit time. This provides an opportunity to correct + data loss errors. Note that when this parameter is set, a promotion + token cannot be used. + type: string + primary: + description: |- + Primary defines which Cluster is defined to be the primary in the distributed PostgreSQL cluster, based on the + topology specified in externalClusters + type: string + promotionToken: + description: |- + A demotion token generated by an external cluster used to + check if the promotion requirements are met. + type: string + self: + description: |- + Self defines the name of this cluster. It is used to determine if this is a primary + or a replica cluster, comparing it with `primary` + type: string + source: + description: The name of the external cluster which is the replication + origin + minLength: 1 + type: string + required: + - source + type: object + replicationSlots: + default: + highAvailability: + enabled: true + description: Replication slots management configuration + properties: + highAvailability: + default: + enabled: true + description: Replication slots for high availability configuration + properties: + enabled: + default: true + description: |- + If enabled (default), the operator will automatically manage replication slots + on the primary instance and use them in streaming replication + connections with all the standby instances that are part of the HA + cluster. If disabled, the operator will not take advantage + of replication slots in streaming connections with the replicas. + This feature also controls replication slots in replica cluster, + from the designated primary to its cascading replicas. + type: boolean + slotPrefix: + default: _cnpg_ + description: |- + Prefix for replication slots managed by the operator for HA. + It may only contain lower case letters, numbers, and the underscore character. + This can only be set at creation time. By default set to `_cnpg_`. + pattern: ^[0-9a-z_]*$ + type: string + type: object + synchronizeReplicas: + description: Configures the synchronization of the user defined + physical replication slots + properties: + enabled: + default: true + description: When set to true, every replication slot that + is on the primary is synchronized on each standby + type: boolean + excludePatterns: + description: List of regular expression patterns to match + the names of replication slots to be excluded (by default + empty) + items: + type: string + type: array + required: + - enabled + type: object + updateInterval: + default: 30 + description: |- + Standby will update the status of the local replication slots + every `updateInterval` seconds (default 30). + minimum: 1 + type: integer + type: object + resources: + description: |- + Resources requirements of every generated Pod. Please refer to + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + for more information. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + schedulerName: + description: |- + If specified, the pod will be dispatched by specified Kubernetes + scheduler. If not specified, the pod will be dispatched by the default + scheduler. More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/ + type: string + seccompProfile: + description: |- + The SeccompProfile applied to every Pod and Container. + Defaults to: `RuntimeDefault` + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + serviceAccountTemplate: + description: Configure the generation of the service account + properties: + metadata: + description: |- + Metadata are the metadata to be used for the generated + service account + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + required: + - metadata + type: object + smartShutdownTimeout: + default: 180 + description: |- + The time in seconds that controls the window of time reserved for the smart shutdown of Postgres to complete. + Make sure you reserve enough time for the operator to request a fast shutdown of Postgres + (that is: `stopDelay` - `smartShutdownTimeout`). + format: int32 + type: integer + startDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + successfully start up (default 3600). + The startup probe failure threshold is derived from this value using the formula: + ceiling(startDelay / 10). + format: int32 + type: integer + stopDelay: + default: 1800 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + gracefully shutdown (default 1800) + format: int32 + type: integer + storage: + description: Configuration of the storage of the instances + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + superuserSecret: + description: |- + The secret containing the superuser password. If not defined a new + secret will be created with a randomly generated password + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + switchoverDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a primary PostgreSQL instance + to gracefully shutdown during a switchover. + Default value is 3600 seconds (1 hour). + format: int32 + type: integer + tablespaces: + description: The tablespaces configuration + items: + description: |- + TablespaceConfiguration is the configuration of a tablespace, and includes + the storage specification for the tablespace + properties: + name: + description: The name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + properties: + name: + type: string + type: object + storage: + description: The storage configuration for the tablespace + properties: + pvcTemplate: + description: Template to be used to generate the Persistent + Volume Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to + the PersistentVolume backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + temporary: + default: false + description: |- + When set to true, the tablespace will be added as a `temp_tablespaces` + entry in PostgreSQL, and will be available to automatically house temp + database objects, or other temporary files. Please refer to PostgreSQL + documentation for more information on the `temp_tablespaces` GUC. + type: boolean + required: + - name + - storage + type: object + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints specifies how to spread matching pods among the given topology. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ + items: + description: TopologySpreadConstraint specifies how to spread matching + pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + walStorage: + description: Configuration of the storage for PostgreSQL WAL (Write-Ahead + Log) + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + required: + - instances + type: object + x-kubernetes-validations: + - message: imageName and imageCatalogRef are mutually exclusive + rule: '!(has(self.imageCatalogRef) && has(self.imageName))' + status: + description: |- + Most recently observed status of the cluster. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + availableArchitectures: + description: AvailableArchitectures reports the available architectures + of a cluster + items: + description: AvailableArchitecture represents the state of a cluster's + architecture + properties: + goArch: + description: GoArch is the name of the executable architecture + type: string + hash: + description: Hash is the hash of the executable + type: string + required: + - goArch + - hash + type: object + type: array + certificates: + description: The configuration for the CA and related certificates, + initialized with defaults. + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + expirations: + additionalProperties: + type: string + description: Expiration dates for all certificates. + type: object + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + cloudNativePGCommitHash: + description: The commit hash number of which this operator running + type: string + cloudNativePGOperatorHash: + description: The hash of the binary of the operator + type: string + conditions: + description: Conditions for cluster object + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + configMapResourceVersion: + description: |- + The list of resource versions of the configmaps, + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + configmap data + properties: + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the config maps used to pass metrics. + Map keys are the config map names, map values are the versions + type: object + type: object + currentPrimary: + description: Current primary instance + type: string + currentPrimaryFailingSinceTimestamp: + description: |- + The timestamp when the primary was detected to be unhealthy + This field is reported when `.spec.failoverDelay` is populated or during online upgrades + type: string + currentPrimaryTimestamp: + description: The timestamp when the last actual promotion to primary + has occurred + type: string + danglingPVC: + description: |- + List of all the PVCs created by this cluster and still available + which are not attached to a Pod + items: + type: string + type: array + demotionToken: + description: |- + DemotionToken is a JSON token containing the information + from pg_controldata such as Database system identifier, Latest checkpoint's + TimeLineID, Latest checkpoint's REDO location, Latest checkpoint's REDO + WAL file, and Time of latest checkpoint + type: string + firstRecoverabilityPoint: + description: |- + The first recoverability point, stored as a date in RFC3339 format. + This field is calculated from the content of FirstRecoverabilityPointByMethod + type: string + firstRecoverabilityPointByMethod: + additionalProperties: + format: date-time + type: string + description: The first recoverability point, stored as a date in RFC3339 + format, per backup method type + type: object + healthyPVC: + description: List of all the PVCs not dangling nor initializing + items: + type: string + type: array + image: + description: Image contains the image name used by the pods + type: string + initializingPVC: + description: List of all the PVCs that are being initialized by this + cluster + items: + type: string + type: array + instanceNames: + description: List of instance names in the cluster + items: + type: string + type: array + instances: + description: The total number of PVC Groups detected in the cluster. + It may differ from the number of existing instance pods. + type: integer + instancesReportedState: + additionalProperties: + description: InstanceReportedState describes the last reported state + of an instance during a reconciliation loop + properties: + ip: + description: IP address of the instance + type: string + isPrimary: + description: indicates if an instance is the primary one + type: boolean + timeLineID: + description: indicates on which TimelineId the instance is + type: integer + required: + - isPrimary + type: object + description: The reported state of the instances during the last reconciliation + loop + type: object + instancesStatus: + additionalProperties: + items: + type: string + type: array + description: InstancesStatus indicates in which status the instances + are + type: object + jobCount: + description: How many Jobs have been created by this cluster + format: int32 + type: integer + lastFailedBackup: + description: Stored as a date in RFC3339 format + type: string + lastPromotionToken: + description: |- + LastPromotionToken is the last verified promotion token that + was used to promote a replica cluster + type: string + lastSuccessfulBackup: + description: |- + Last successful backup, stored as a date in RFC3339 format + This field is calculated from the content of LastSuccessfulBackupByMethod + type: string + lastSuccessfulBackupByMethod: + additionalProperties: + format: date-time + type: string + description: Last successful backup, stored as a date in RFC3339 format, + per backup method type + type: object + latestGeneratedNode: + description: ID of the latest generated node (used to avoid node name + clashing) + type: integer + managedRolesStatus: + description: ManagedRolesStatus reports the state of the managed roles + in the cluster + properties: + byStatus: + additionalProperties: + items: + type: string + type: array + description: ByStatus gives the list of roles in each state + type: object + cannotReconcile: + additionalProperties: + items: + type: string + type: array + description: |- + CannotReconcile lists roles that cannot be reconciled in PostgreSQL, + with an explanation of the cause + type: object + passwordStatus: + additionalProperties: + description: PasswordState represents the state of the password + of a managed RoleConfiguration + properties: + resourceVersion: + description: the resource version of the password secret + type: string + transactionID: + description: the last transaction ID to affect the role + definition in PostgreSQL + format: int64 + type: integer + type: object + description: PasswordStatus gives the last transaction id and + password secret version for each managed role + type: object + type: object + onlineUpdateEnabled: + description: OnlineUpdateEnabled shows if the online upgrade is enabled + inside the cluster + type: boolean + pgDataImageInfo: + description: PGDataImageInfo contains the details of the latest image + that has run on the current data directory. + properties: + image: + description: Image is the image name + type: string + majorVersion: + description: MajorVersion is the major version of the image + type: integer + required: + - image + - majorVersion + type: object + phase: + description: Current phase of the cluster + type: string + phaseReason: + description: Reason for the current phase + type: string + pluginStatus: + description: PluginStatus is the status of the loaded plugins + items: + description: PluginStatus is the status of a loaded plugin + properties: + backupCapabilities: + description: |- + BackupCapabilities are the list of capabilities of the + plugin regarding the Backup management + items: + type: string + type: array + capabilities: + description: |- + Capabilities are the list of capabilities of the + plugin + items: + type: string + type: array + name: + description: Name is the name of the plugin + type: string + operatorCapabilities: + description: |- + OperatorCapabilities are the list of capabilities of the + plugin regarding the reconciler + items: + type: string + type: array + restoreJobHookCapabilities: + description: |- + RestoreJobHookCapabilities are the list of capabilities of the + plugin regarding the RestoreJobHook management + items: + type: string + type: array + status: + description: Status contain the status reported by the plugin + through the SetStatusInCluster interface + type: string + version: + description: |- + Version is the version of the plugin loaded by the + latest reconciliation loop + type: string + walCapabilities: + description: |- + WALCapabilities are the list of capabilities of the + plugin regarding the WAL management + items: + type: string + type: array + required: + - name + - version + type: object + type: array + poolerIntegrations: + description: The integration needed by poolers referencing the cluster + properties: + pgBouncerIntegration: + description: PgBouncerIntegrationStatus encapsulates the needed + integration for the pgbouncer poolers referencing the cluster + properties: + secrets: + items: + type: string + type: array + type: object + type: object + pvcCount: + description: How many PVCs have been created by this cluster + format: int32 + type: integer + readService: + description: Current list of read pods + type: string + readyInstances: + description: The total number of ready instances in the cluster. It + is equal to the number of ready instance pods. + type: integer + resizingPVC: + description: List of all the PVCs that have ResizingPVC condition. + items: + type: string + type: array + secretsResourceVersion: + description: |- + The list of resource versions of the secrets + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + secret data + properties: + applicationSecretVersion: + description: The resource version of the "app" user secret + type: string + barmanEndpointCA: + description: The resource version of the Barman Endpoint CA if + provided + type: string + caSecretVersion: + description: Unused. Retained for compatibility with old versions. + type: string + clientCaSecretVersion: + description: The resource version of the PostgreSQL client-side + CA secret version + type: string + externalClusterSecretVersion: + additionalProperties: + type: string + description: The resource versions of the external cluster secrets + type: object + managedRoleSecretVersion: + additionalProperties: + type: string + description: The resource versions of the managed roles secrets + type: object + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the secrets used to pass metrics. + Map keys are the secret names, map values are the versions + type: object + replicationSecretVersion: + description: The resource version of the "streaming_replica" user + secret + type: string + serverCaSecretVersion: + description: The resource version of the PostgreSQL server-side + CA secret version + type: string + serverSecretVersion: + description: The resource version of the PostgreSQL server-side + secret version + type: string + superuserSecretVersion: + description: The resource version of the "postgres" user secret + type: string + type: object + switchReplicaClusterStatus: + description: SwitchReplicaClusterStatus is the status of the switch + to replica cluster + properties: + inProgress: + description: InProgress indicates if there is an ongoing procedure + of switching a cluster to a replica cluster. + type: boolean + type: object + tablespacesStatus: + description: TablespacesStatus reports the state of the declarative + tablespaces in the cluster + items: + description: TablespaceState represents the state of a tablespace + in a cluster + properties: + error: + description: Error is the reconciliation error, if any + type: string + name: + description: Name is the name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + type: string + state: + description: State is the latest reconciliation state + type: string + required: + - name + - state + type: object + type: array + targetPrimary: + description: |- + Target primary instance, this is different from the previous one + during a switchover or a failover + type: string + targetPrimaryTimestamp: + description: The timestamp when the last request for a new primary + has occurred + type: string + timelineID: + description: The timeline of the Postgres cluster + type: integer + topology: + description: Instances topology. + properties: + instances: + additionalProperties: + additionalProperties: + type: string + description: PodTopologyLabels represent the topology of a Pod. + map[labelName]labelValue + type: object + description: Instances contains the pod topology of the instances + type: object + nodesUsed: + description: |- + NodesUsed represents the count of distinct nodes accommodating the instances. + A value of '1' suggests that all instances are hosted on a single node, + implying the absence of High Availability (HA). Ideally, this value should + be the same as the number of instances in the Postgres HA cluster, implying + shared nothing architecture on the compute side. + format: int32 + type: integer + successfullyExtracted: + description: |- + SuccessfullyExtracted indicates if the topology data was extract. It is useful to enact fallback behaviors + in synchronous replica election in case of failures + type: boolean + type: object + unusablePVC: + description: List of all the PVCs that are unusable because another + PVC is missing + items: + type: string + type: array + writeService: + description: Current write pod + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: databases.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Database + listKind: DatabaseList + plural: databases + singular: database + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Database is the Schema for the databases API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired Database. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allowConnections: + description: |- + Maps to the `ALLOW_CONNECTIONS` parameter of `CREATE DATABASE` and + `ALTER DATABASE`. If false then no one can connect to this database. + type: boolean + builtinLocale: + description: |- + Maps to the `BUILTIN_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the locale name when the + builtin provider is used. This option requires `localeProvider` to + be set to `builtin`. Available from PostgreSQL 17. + type: string + x-kubernetes-validations: + - message: builtinLocale is immutable + rule: self == oldSelf + cluster: + description: The name of the PostgreSQL cluster hosting the database. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + collationVersion: + description: |- + Maps to the `COLLATION_VERSION` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: collationVersion is immutable + rule: self == oldSelf + connectionLimit: + description: |- + Maps to the `CONNECTION LIMIT` clause of `CREATE DATABASE` and + `ALTER DATABASE`. How many concurrent connections can be made to + this database. -1 (the default) means no limit. + type: integer + databaseReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this database. + enum: + - delete + - retain + type: string + encoding: + description: |- + Maps to the `ENCODING` parameter of `CREATE DATABASE`. This setting + cannot be changed. Character set encoding to use in the database. + type: string + x-kubernetes-validations: + - message: encoding is immutable + rule: self == oldSelf + ensure: + default: present + description: Ensure the PostgreSQL database is `present` or `absent` + - defaults to "present". + enum: + - present + - absent + type: string + extensions: + description: The list of extensions to be managed in the database + items: + description: ExtensionSpec configures an extension in a database + properties: + ensure: + default: present + description: |- + Specifies whether an extension/schema should be present or absent in + the database. If set to `present`, the extension/schema will be + created if it does not exist. If set to `absent`, the + extension/schema will be removed if it exists. + enum: + - present + - absent + type: string + name: + description: Name of the extension/schema + type: string + schema: + description: |- + The name of the schema in which to install the extension's objects, + in case the extension allows its contents to be relocated. If not + specified (default), and the extension's control file does not + specify a schema either, the current default object creation schema + is used. + type: string + version: + description: |- + The version of the extension to install. If empty, the operator will + install the default version (whatever is specified in the + extension's control file) + type: string + required: + - name + type: object + type: array + icuLocale: + description: |- + Maps to the `ICU_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the ICU locale when the ICU + provider is used. This option requires `localeProvider` to be set to + `icu`. Available from PostgreSQL 15. + type: string + x-kubernetes-validations: + - message: icuLocale is immutable + rule: self == oldSelf + icuRules: + description: |- + Maps to the `ICU_RULES` parameter of `CREATE DATABASE`. This setting + cannot be changed. Specifies additional collation rules to customize + the behavior of the default collation. This option requires + `localeProvider` to be set to `icu`. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: icuRules is immutable + rule: self == oldSelf + isTemplate: + description: |- + Maps to the `IS_TEMPLATE` parameter of `CREATE DATABASE` and `ALTER + DATABASE`. If true, this database is considered a template and can + be cloned by any user with `CREATEDB` privileges. + type: boolean + locale: + description: |- + Maps to the `LOCALE` parameter of `CREATE DATABASE`. This setting + cannot be changed. Sets the default collation order and character + classification in the new database. + type: string + x-kubernetes-validations: + - message: locale is immutable + rule: self == oldSelf + localeCType: + description: |- + Maps to the `LC_CTYPE` parameter of `CREATE DATABASE`. This setting + cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCType is immutable + rule: self == oldSelf + localeCollate: + description: |- + Maps to the `LC_COLLATE` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCollate is immutable + rule: self == oldSelf + localeProvider: + description: |- + Maps to the `LOCALE_PROVIDER` parameter of `CREATE DATABASE`. This + setting cannot be changed. This option sets the locale provider for + databases created in the new cluster. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: localeProvider is immutable + rule: self == oldSelf + name: + description: The name of the database to create inside PostgreSQL. + This setting cannot be changed. + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + - message: the name postgres is reserved + rule: self != 'postgres' + - message: the name template0 is reserved + rule: self != 'template0' + - message: the name template1 is reserved + rule: self != 'template1' + owner: + description: |- + Maps to the `OWNER` parameter of `CREATE DATABASE`. + Maps to the `OWNER TO` command of `ALTER DATABASE`. + The role name of the user who owns the database inside PostgreSQL. + type: string + schemas: + description: The list of schemas to be managed in the database + items: + description: SchemaSpec configures a schema in a database + properties: + ensure: + default: present + description: |- + Specifies whether an extension/schema should be present or absent in + the database. If set to `present`, the extension/schema will be + created if it does not exist. If set to `absent`, the + extension/schema will be removed if it exists. + enum: + - present + - absent + type: string + name: + description: Name of the extension/schema + type: string + owner: + description: |- + The role name of the user who owns the schema inside PostgreSQL. + It maps to the `AUTHORIZATION` parameter of `CREATE SCHEMA` and the + `OWNER TO` command of `ALTER SCHEMA`. + type: string + required: + - name + type: object + type: array + tablespace: + description: |- + Maps to the `TABLESPACE` parameter of `CREATE DATABASE`. + Maps to the `SET TABLESPACE` command of `ALTER DATABASE`. + The name of the tablespace (in PostgreSQL) that will be associated + with the new database. This tablespace will be the default + tablespace used for objects created in this database. + type: string + template: + description: |- + Maps to the `TEMPLATE` parameter of `CREATE DATABASE`. This setting + cannot be changed. The name of the template from which to create + this database. + type: string + x-kubernetes-validations: + - message: template is immutable + rule: self == oldSelf + required: + - cluster + - name + - owner + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider is set + to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + status: + description: |- + Most recently observed status of the Database. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + applied: + description: Applied is true if the database was reconciled correctly + type: boolean + extensions: + description: Extensions is the status of the managed extensions + items: + description: DatabaseObjectStatus is the status of the managed database + objects + properties: + applied: + description: |- + True of the object has been installed successfully in + the database + type: boolean + message: + description: Message is the object reconciliation message + type: string + name: + description: The name of the object + type: string + required: + - applied + - name + type: object + type: array + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + schemas: + description: Schemas is the status of the managed schemas + items: + description: DatabaseObjectStatus is the status of the managed database + objects + properties: + applied: + description: |- + True of the object has been installed successfully in + the database + type: boolean + message: + description: Message is the object reconciliation message + type: string + name: + description: The name of the object + type: string + required: + - applied + - name + type: object + type: array + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: imagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ImageCatalog + listKind: ImageCatalogList + plural: imagecatalogs + singular: imagecatalog + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ImageCatalog is the Schema for the imagecatalogs API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: poolers.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Pooler + listKind: PoolerList + plural: poolers + singular: pooler + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.type + name: Type + type: string + name: v1 + schema: + openAPIV3Schema: + description: Pooler is the Schema for the poolers API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the Pooler. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: |- + This is the cluster reference on which the Pooler will work. + Pooler name should never match with any cluster name within the same namespace. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + deploymentStrategy: + description: The deployment strategy to use for pgbouncer to replace + existing pods with new ones + properties: + rollingUpdate: + description: |- + Rolling update config params. Present only if DeploymentStrategyType = + RollingUpdate. + properties: + maxSurge: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be scheduled above the desired number of + pods. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + This can not be 0 if MaxUnavailable is 0. + Absolute number is calculated from percentage by rounding up. + Defaults to 25%. + Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when + the rolling update starts, such that the total number of old and new pods do not exceed + 130% of desired pods. Once old pods have been killed, + new ReplicaSet can be scaled up further, ensuring that total number of pods running + at any time during the update is at most 130% of desired pods. + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be unavailable during the update. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + Absolute number is calculated from percentage by rounding down. + This can not be 0 if MaxSurge is 0. + Defaults to 25%. + Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods + immediately when the rolling update starts. Once new pods are ready, old ReplicaSet + can be scaled down further, followed by scaling up the new ReplicaSet, ensuring + that the total number of pods available at all times during the update is at + least 70% of desired pods. + x-kubernetes-int-or-string: true + type: object + type: + description: Type of deployment. Can be "Recreate" or "RollingUpdate". + Default is RollingUpdate. + type: string + type: object + instances: + default: 1 + description: 'The number of replicas we want. Default: 1.' + format: int32 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this pooler. + properties: + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + type: object + pgbouncer: + description: The PgBouncer configuration + properties: + authQuery: + description: |- + The query that will be used to download the hash of the password + of a certain user. Default: "SELECT usename, passwd FROM public.user_search($1)". + In case it is specified, also an AuthQuerySecret has to be specified and + no automatic CNPG Cluster integration will be triggered. + type: string + authQuerySecret: + description: |- + The credentials of the user that need to be used for the authentication + query. In case it is specified, also an AuthQuery + (e.g. "SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1") + has to be specified and no automatic CNPG Cluster integration will be triggered. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + parameters: + additionalProperties: + type: string + description: |- + Additional parameters to be passed to PgBouncer - please check + the CNPG documentation for a list of options you can configure + type: object + paused: + default: false + description: |- + When set to `true`, PgBouncer will disconnect from the PostgreSQL + server, first waiting for all queries to complete, and pause all new + client connections until this value is set to `false` (default). Internally, + the operator calls PgBouncer's `PAUSE` and `RESUME` commands. + type: boolean + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + poolMode: + default: session + description: 'The pool mode. Default: `session`.' + enum: + - session + - transaction + type: string + type: object + serviceTemplate: + description: Template for the Service to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information on service's + port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the configurations + of session affinity. + properties: + clientIP: + description: clientIP contains the configurations of Client + IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic is + distributed to Service endpoints. Implementations can use this field as a + hint, but are not required to guarantee strict adherence. If the field is + not set, the implementation will apply its default routing strategy. If set + to "PreferClose", implementations should prioritize endpoints that are + topologically close (e.g., same zone). + This is a beta field and requires enabling ServiceTrafficDistribution feature. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + template: + description: The template of the Pod to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the pod. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + activeDeadlineSeconds: + description: |- + Optional duration in seconds the pod may be active on the node relative to + StartTime before the system will actively try to mark it failed and kill associated containers. + Value must be a positive integer. + format: int64 + type: integer + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + automountServiceAccountToken: + description: AutomountServiceAccountToken indicates whether + a service account token should be automatically mounted. + type: boolean + containers: + description: |- + List of containers belonging to the pod. + Containers cannot currently be added or removed. + There must be at least one container in a Pod. + Cannot be updated. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + dnsConfig: + description: |- + Specifies the DNS parameters of a pod. + Parameters specified here will be merged to the generated DNS + configuration based on DNSPolicy. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver + options of a pod. + properties: + name: + description: |- + Name is this DNS resolver option's name. + Required. + type: string + value: + description: Value is this DNS resolver option's + value. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + dnsPolicy: + description: |- + Set DNS policy for the pod. + Defaults to "ClusterFirst". + Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + To have DNS options set along with hostNetwork, you have to specify DNS policy + explicitly to 'ClusterFirstWithHostNet'. + type: string + enableServiceLinks: + description: |- + EnableServiceLinks indicates whether information about services should be injected into pod's + environment variables, matching the syntax of Docker links. + Optional: Defaults to true. + type: boolean + ephemeralContainers: + description: |- + List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + pod to perform user-initiated actions such as debugging. This list cannot be specified when + creating a pod, and it cannot be modified by updating the pod spec. In order to add an + ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + items: + description: |- + An EphemeralContainer is a temporary container that you may add to an existing Pod for + user-initiated activities such as debugging. Ephemeral containers have no resource or + scheduling guarantees, and they will not be restarted when they exit or when a Pod is + removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the + Pod to exceed its resource allocation. + + To add an ephemeral container, use the ephemeralcontainers subresource of an existing + Pod. Ephemeral containers may not be removed or restarted. + properties: + args: + description: |- + Arguments to the entrypoint. + The image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: Lifecycle is not allowed for ephemeral + containers. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the ephemeral container specified as a DNS_LABEL. + This name must be unique among all containers, init containers and ephemeral containers. + type: string + ports: + description: Ports are not allowed for ephemeral containers. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + already allocated to the pod. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for the container to manage the restart behavior of each + container within a pod. + This may only be set for init containers. You cannot set this field on + ephemeral containers. + type: string + securityContext: + description: |- + Optional: SecurityContext defines the security options the ephemeral container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + targetContainerName: + description: |- + If set, the name of the container from PodSpec that this ephemeral container targets. + The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + If not set then the ephemeral container uses the namespaces configured in the Pod spec. + + The container runtime must implement support for this feature. If the runtime does not + support namespace targeting then the result of setting this field is undefined. + type: string + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + hostAliases: + description: |- + HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + file if specified. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + description: IP address of the host file entry. + type: string + required: + - ip + type: object + type: array + x-kubernetes-list-map-keys: + - ip + x-kubernetes-list-type: map + hostIPC: + description: |- + Use the host's ipc namespace. + Optional: Default to false. + type: boolean + hostNetwork: + description: |- + Host networking requested for this pod. Use the host's network namespace. + If this option is set, the ports that will be used must be specified. + Default to false. + type: boolean + hostPID: + description: |- + Use the host's pid namespace. + Optional: Default to false. + type: boolean + hostUsers: + description: |- + Use the host's user namespace. + Optional: Default to true. + If set to true or not present, the pod will be run in the host user namespace, useful + for when the pod needs a feature only available to the host user namespace, such as + loading a kernel module with CAP_SYS_MODULE. + When set to false, a new userns is created for the pod. Setting false is useful for + mitigating container breakout vulnerabilities even allowing users to run their + containers as root without actually having root privileges on the host. + This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. + type: boolean + hostname: + description: |- + Specifies the hostname of the Pod + If not specified, the pod's hostname will be set to a system-defined value. + type: string + imagePullSecrets: + description: |- + ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + If specified, these secrets will be passed to individual puller implementations for them to use. + More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + initContainers: + description: |- + List of initialization containers belonging to the pod. + Init containers are executed in order prior to containers being started. If any + init container fails, the pod is considered to have failed and is handled according + to its restartPolicy. The name for an init container or normal container must be + unique among all containers. + Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. + The resourceRequirements of an init container are taken into account during scheduling + by finding the highest request/limit for each resource type, and then using the max of + of that value or the sum of the normal containers. Limits are applied to init containers + in a similar fashion. + Init containers cannot currently be added or removed. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + nodeName: + description: |- + NodeName indicates in which node this pod is scheduled. + If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. + Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. + This field should not be used to express a desire for the pod to be scheduled on a specific node. + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the pod to fit on a node. + Selector which must match a node's labels for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + x-kubernetes-map-type: atomic + os: + description: |- + Specifies the OS of the containers in the pod. + Some pod and container fields are restricted if this is set. + + If the OS field is set to linux, the following fields must be unset: + -securityContext.windowsOptions + + If the OS field is set to windows, following fields must be unset: + - spec.hostPID + - spec.hostIPC + - spec.hostUsers + - spec.securityContext.appArmorProfile + - spec.securityContext.seLinuxOptions + - spec.securityContext.seccompProfile + - spec.securityContext.fsGroup + - spec.securityContext.fsGroupChangePolicy + - spec.securityContext.sysctls + - spec.shareProcessNamespace + - spec.securityContext.runAsUser + - spec.securityContext.runAsGroup + - spec.securityContext.supplementalGroups + - spec.securityContext.supplementalGroupsPolicy + - spec.containers[*].securityContext.appArmorProfile + - spec.containers[*].securityContext.seLinuxOptions + - spec.containers[*].securityContext.seccompProfile + - spec.containers[*].securityContext.capabilities + - spec.containers[*].securityContext.readOnlyRootFilesystem + - spec.containers[*].securityContext.privileged + - spec.containers[*].securityContext.allowPrivilegeEscalation + - spec.containers[*].securityContext.procMount + - spec.containers[*].securityContext.runAsUser + - spec.containers[*].securityContext.runAsGroup + properties: + name: + description: |- + Name is the name of the operating system. The currently supported values are linux and windows. + Additional value may be defined in future and can be one of: + https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + Clients should expect to handle additional values and treat unrecognized values in this field as os: null + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + This field will be autopopulated at admission time by the RuntimeClass admission controller. If + the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + The RuntimeClass admission controller will reject Pod create requests which have the overhead already + set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md + type: object + preemptionPolicy: + description: |- + PreemptionPolicy is the Policy for preempting pods with lower priority. + One of Never, PreemptLowerPriority. + Defaults to PreemptLowerPriority if unset. + type: string + priority: + description: |- + The priority value. Various system components use this field to find the + priority of the pod. When Priority Admission Controller is enabled, it + prevents users from setting this field. The admission controller populates + this field from PriorityClassName. + The higher the value, the higher the priority. + format: int32 + type: integer + priorityClassName: + description: |- + If specified, indicates the pod's priority. "system-node-critical" and + "system-cluster-critical" are two special keywords which indicate the + highest priorities with the former being the highest priority. Any other + name must be defined by creating a PriorityClass object with that name. + If not specified, the pod priority will be default or zero if there is no + default. + type: string + readinessGates: + description: |- + If specified, all readiness gates will be evaluated for pod readiness. + A pod is ready when all its containers are ready AND + all conditions specified in the readiness gates have status equal to "True" + More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates + items: + description: PodReadinessGate contains the reference to + a pod condition + properties: + conditionType: + description: ConditionType refers to a condition in + the pod's condition list with matching type. + type: string + required: + - conditionType + type: object + type: array + x-kubernetes-list-type: atomic + resourceClaims: + description: |- + ResourceClaims defines which ResourceClaims must be allocated + and reserved before the Pod is allowed to start. The resources + will be made available to those containers which consume them + by name. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. + items: + description: |- + PodResourceClaim references exactly one ResourceClaim, either directly + or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim + for the pod. + + It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. + Containers that need access to the ResourceClaim reference it with this name. + properties: + name: + description: |- + Name uniquely identifies this resource claim inside the pod. + This must be a DNS_LABEL. + type: string + resourceClaimName: + description: |- + ResourceClaimName is the name of a ResourceClaim object in the same + namespace as this pod. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + resourceClaimTemplateName: + description: |- + ResourceClaimTemplateName is the name of a ResourceClaimTemplate + object in the same namespace as this pod. + + The template will be used to create a new ResourceClaim, which will + be bound to this pod. When this pod is deleted, the ResourceClaim + will also be deleted. The pod name and resource name, along with a + generated component, will be used to form a unique name for the + ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + + This field is immutable and no changes will be made to the + corresponding ResourceClaim by the control plane after creating the + ResourceClaim. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + resources: + description: |- + Resources is the total amount of CPU and Memory resources required by all + containers in the pod. It supports specifying Requests and Limits for + "cpu" and "memory" resource names only. ResourceClaims are not supported. + + This field enables fine-grained control over resource allocation for the + entire pod, allowing resource sharing among containers in a pod. + + This is an alpha field and requires enabling the PodLevelResources feature + gate. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for all containers within the pod. + One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. + Default to Always. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy + type: string + runtimeClassName: + description: |- + RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + empty definition that uses the default runtime handler. + More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + type: string + schedulerName: + description: |- + If specified, the pod will be dispatched by specified scheduler. + If not specified, the pod will be dispatched by default scheduler. + type: string + schedulingGates: + description: |- + SchedulingGates is an opaque list of values that if specified will block scheduling the pod. + If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + scheduler will not attempt to schedule the pod. + + SchedulingGates can only be set at pod creation time, and be removed only afterwards. + items: + description: PodSchedulingGate is associated to a Pod to + guard its scheduling. + properties: + name: + description: |- + Name of the scheduling gate. + Each scheduling gate must have a unique name field. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + securityContext: + description: |- + SecurityContext holds pod-level security attributes and common container settings. + Optional: Defaults to empty. See type description for default values of each field. + properties: + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxChangePolicy: + description: |- + seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. + It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. + Valid values are "MountOption" and "Recursive". + + "Recursive" means relabeling of all files on all Pod volumes by the container runtime. + This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. + + "MountOption" mounts all eligible Pod volumes with `-o context` mount option. + This requires all Pods that share the same volume to use the same SELinux label. + It is not possible to share the same volume among privileged and unprivileged Pods. + Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes + whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their + CSIDriver instance. Other volumes are always re-labelled recursively. + "MountOption" value is allowed only when SELinuxMount feature gate is enabled. + + If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. + If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes + and "Recursive" for all other volumes. + + This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers. + + All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. + Note that this field cannot be set when spec.os.name is windows. + type: string + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in + addition to the container's primary GID and fsGroup (if specified). If + the SupplementalGroupsPolicy feature is enabled, the + supplementalGroupsPolicy field determines whether these are in addition + to or instead of any group memberships defined in the container image. + If unspecified, no additional groups are added, though group memberships + defined in the container image may still be used, depending on the + supplementalGroupsPolicy field. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + description: |- + Defines how supplemental groups of the first container processes are calculated. + Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + and the container runtime must implement support for this feature. + Note that this field cannot be set when spec.os.name is windows. + type: string + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be + set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: |- + DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. + Deprecated: Use serviceAccountName instead. + type: string + serviceAccountName: + description: |- + ServiceAccountName is the name of the ServiceAccount to use to run this pod. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + type: string + setHostnameAsFQDN: + description: |- + If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). + In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). + In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. + If a pod does not have FQDN, this has no effect. + Default to false. + type: boolean + shareProcessNamespace: + description: |- + Share a single process namespace between all of the containers in a pod. + When this is set containers will be able to view and signal processes from other containers + in the same pod, and the first process in each container will not be assigned PID 1. + HostPID and ShareProcessNamespace cannot both be set. + Optional: Default to false. + type: boolean + subdomain: + description: |- + If specified, the fully qualified Pod hostname will be "...svc.". + If not specified, the pod will not have a domainname at all. + type: string + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + If this value is nil, the default grace period will be used instead. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: If specified, the pod's tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of pods ought to spread across topology + domains. Scheduler will schedule pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + description: |- + List of volumes that can be mounted by containers belonging to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk + in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in + the blob storage + type: string + fsType: + default: ext4 + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage account Managed: azure + managed data disk (only in managed availability + set). defaults to shared' + type: string + readOnly: + default: false + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default + is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers. + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about + the pod that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing the + pod field + properties: + fieldRef: + description: 'Required: Selects a field of + the pod: only annotations, labels, name, + namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path. + Must be utf-8 encoded. The first item of + the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource + that is attached to a kubelet's host machine and then + exposed to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target + worldwide names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. + properties: + driver: + description: driver is the name of the driver to + use for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the + specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + image: + description: |- + image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + The volume is resolved at pod startup depending on which PullPolicy value is provided: + + - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + + The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + The volume will be mounted read-only (ro) and non-executable files (noexec). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + default: default + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon + Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources + secrets, configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume + root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the + configMap data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about + the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name, namespace and uid + are supported.' + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file + to be created. Must not be absolute + or contain the ''..'' path. Must + be utf-8 encoded. The first item + of the relative path must not + start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the + secret data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + default: /etc/ceph/keyring + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. + properties: + fsType: + default: xfs + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the + ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the + ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL + communication with Gateway, default false + type: boolean + storageMode: + default: ThinProvisioned + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage + Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether the + Secret or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy + Based Management (SPBM) profile ID associated + with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + required: + - containers + type: object + type: object + type: + default: rw + description: 'Type of service to forward traffic to. Default: `rw`.' + enum: + - rw + - ro + - r + type: string + required: + - cluster + - pgbouncer + type: object + status: + description: |- + Most recently observed status of the Pooler. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + instances: + description: The number of pods trying to be scheduled + format: int32 + type: integer + secrets: + description: The resource version of the config object + properties: + clientCA: + description: The client CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + pgBouncerSecrets: + description: The version of the secrets used by PgBouncer + properties: + authQuery: + description: The auth query secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + serverCA: + description: The server CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + serverTLS: + description: The server TLS secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: publications.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Publication + listKind: PublicationList + plural: publications + singular: publication + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Publication is the Schema for the publications API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PublicationSpec defines the desired state of Publication + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "publisher" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "publisher" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + name: + description: The name of the publication inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Publication parameters part of the `WITH` clause as expected by + PostgreSQL `CREATE PUBLICATION` command + type: object + publicationReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this publication + enum: + - delete + - retain + type: string + target: + description: Target of the publication as expected by PostgreSQL `CREATE + PUBLICATION` command + properties: + allTables: + description: |- + Marks the publication as one that replicates changes for all tables + in the database, including tables created in the future. + Corresponding to `FOR ALL TABLES` in PostgreSQL. + type: boolean + x-kubernetes-validations: + - message: allTables is immutable + rule: self == oldSelf + objects: + description: Just the following schema objects + items: + description: PublicationTargetObject is an object to publish + properties: + table: + description: |- + Specifies a list of tables to add to the publication. Corresponding + to `FOR TABLE` in PostgreSQL. + properties: + columns: + description: The columns to publish + items: + type: string + type: array + name: + description: The table name + type: string + only: + description: Whether to limit to the table only or include + all its descendants + type: boolean + schema: + description: The schema name + type: string + required: + - name + type: object + tablesInSchema: + description: |- + Marks the publication as one that replicates changes for all tables + in the specified list of schemas, including tables created in the + future. Corresponding to `FOR TABLES IN SCHEMA` in PostgreSQL. + type: string + type: object + x-kubernetes-validations: + - message: tablesInSchema and table are mutually exclusive + rule: (has(self.tablesInSchema) && !has(self.table)) || (!has(self.tablesInSchema) + && has(self.table)) + maxItems: 100000 + type: array + x-kubernetes-validations: + - message: specifying a column list when the publication also + publishes tablesInSchema is not supported + rule: '!(self.exists(o, has(o.table) && has(o.table.columns)) + && self.exists(o, has(o.tablesInSchema)))' + type: object + x-kubernetes-validations: + - message: allTables and objects are mutually exclusive + rule: (has(self.allTables) && !has(self.objects)) || (!has(self.allTables) + && has(self.objects)) + required: + - cluster + - dbname + - name + - target + type: object + status: + description: PublicationStatus defines the observed state of Publication + properties: + applied: + description: Applied is true if the publication was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: scheduledbackups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ScheduledBackup + listKind: ScheduledBackupList + plural: scheduledbackups + singular: scheduledbackup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .status.lastScheduleTime + name: Last Backup + type: date + name: v1 + schema: + openAPIV3Schema: + description: ScheduledBackup is the Schema for the scheduledbackups API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ScheduledBackup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + backupOwnerReference: + default: none + description: |- + Indicates which ownerReference should be put inside the created backup resources.
+ - none: no owner reference for created backup objects (same behavior as before the field was introduced)
+ - self: sets the Scheduled backup object as owner of the backup
+ - cluster: set the cluster as owner of the backup
+ enum: + - none + - self + - cluster + type: string + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + immediate: + description: If the first backup has to be immediately start after + creation or not + type: boolean + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + schedule: + description: |- + The schedule does not follow the same format used in Kubernetes CronJobs + as it includes an additional seconds specifier, + see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format + type: string + suspend: + description: If this backup is suspended or not + type: boolean + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + - schedule + type: object + status: + description: |- + Most recently observed status of the ScheduledBackup. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + lastCheckTime: + description: The latest time the schedule + format: date-time + type: string + lastScheduleTime: + description: Information when was the last time that backup was successfully + scheduled. + format: date-time + type: string + nextScheduleTime: + description: Next time we will run a backup + format: date-time + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: subscriptions.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Subscription + listKind: SubscriptionList + plural: subscriptions + singular: subscription + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Subscription is the Schema for the subscriptions API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SubscriptionSpec defines the desired state of Subscription + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "subscriber" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "subscriber" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + externalClusterName: + description: The name of the external cluster with the publication + ("publisher") + type: string + name: + description: The name of the subscription inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Subscription parameters part of the `WITH` clause as expected by + PostgreSQL `CREATE SUBSCRIPTION` command + type: object + publicationDBName: + description: |- + The name of the database containing the publication on the external + cluster. Defaults to the one in the external cluster definition. + type: string + publicationName: + description: |- + The name of the publication inside the PostgreSQL database in the + "publisher" + type: string + subscriptionReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this subscription + enum: + - delete + - retain + type: string + required: + - cluster + - dbname + - externalClusterName + - name + - publicationName + type: object + status: + description: SubscriptionStatus defines the observed state of Subscription + properties: + applied: + description: Applied is true if the subscription was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cnpg-manager +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps/status + - secrets/status + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - pods + - pods/exec + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - pods/status + verbs: + - get +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - get + - patch +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +- apiGroups: + - monitoring.coreos.com + resources: + - podmonitors + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups + - clusters + - databases + - poolers + - publications + - scheduledbackups + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups/status + - databases/status + - publications/status + - scheduledbackups/status + - subscriptions/status + verbs: + - get + - patch + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusterimagecatalogs + - imagecatalogs + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/finalizers + - poolers/finalizers + verbs: + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/status + - poolers/status + verbs: + - get + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - create + - get + - list + - patch + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cnpg-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cnpg-manager +subjects: +- kind: ServiceAccount + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: v1 +data: + queries: | + backends: + query: | + SELECT sa.datname + , sa.usename + , sa.application_name + , states.state + , COALESCE(sa.count, 0) AS total + , COALESCE(sa.max_tx_secs, 0) AS max_tx_duration_seconds + FROM ( VALUES ('active') + , ('idle') + , ('idle in transaction') + , ('idle in transaction (aborted)') + , ('fastpath function call') + , ('disabled') + ) AS states(state) + LEFT JOIN ( + SELECT datname + , state + , usename + , COALESCE(application_name, '') AS application_name + , COUNT(*) + , COALESCE(EXTRACT (EPOCH FROM (max(now() - xact_start))), 0) AS max_tx_secs + FROM pg_catalog.pg_stat_activity + GROUP BY datname, state, usename, application_name + ) sa ON states.state = sa.state + WHERE sa.usename IS NOT NULL + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - usename: + usage: "LABEL" + description: "Name of the user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - state: + usage: "LABEL" + description: "State of the backend" + - total: + usage: "GAUGE" + description: "Number of backends" + - max_tx_duration_seconds: + usage: "GAUGE" + description: "Maximum duration of a transaction in seconds" + + backends_waiting: + query: | + SELECT count(*) AS total + FROM pg_catalog.pg_locks blocked_locks + JOIN pg_catalog.pg_locks blocking_locks + ON blocking_locks.locktype = blocked_locks.locktype + AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database + AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation + AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page + AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple + AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid + AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid + AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid + AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid + AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid + AND blocking_locks.pid != blocked_locks.pid + JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid + WHERE NOT blocked_locks.granted + metrics: + - total: + usage: "GAUGE" + description: "Total number of backends that are currently waiting on other queries" + + pg_database: + query: | + SELECT datname + , pg_catalog.pg_database_size(datname) AS size_bytes + , pg_catalog.age(datfrozenxid) AS xid_age + , pg_catalog.mxid_age(datminmxid) AS mxid_age + FROM pg_catalog.pg_database + WHERE datallowconn + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - size_bytes: + usage: "GAUGE" + description: "Disk space used by the database" + - xid_age: + usage: "GAUGE" + description: "Number of transactions from the frozen XID to the current one" + - mxid_age: + usage: "GAUGE" + description: "Number of multiple transactions (Multixact) from the frozen XID to the current one" + + pg_postmaster: + query: | + SELECT EXTRACT(EPOCH FROM pg_postmaster_start_time) AS start_time + FROM pg_catalog.pg_postmaster_start_time() + metrics: + - start_time: + usage: "GAUGE" + description: "Time at which postgres started (based on epoch)" + + pg_replication: + query: "SELECT CASE WHEN ( + NOT pg_catalog.pg_is_in_recovery() + OR pg_catalog.pg_last_wal_receive_lsn() = pg_catalog.pg_last_wal_replay_lsn()) + THEN 0 + ELSE GREATEST (0, + EXTRACT(EPOCH FROM (now() - pg_catalog.pg_last_xact_replay_timestamp()))) + END AS lag, + pg_catalog.pg_is_in_recovery() AS in_recovery, + EXISTS (TABLE pg_stat_wal_receiver) AS is_wal_receiver_up, + (SELECT count(*) FROM pg_catalog.pg_stat_replication) AS streaming_replicas" + metrics: + - lag: + usage: "GAUGE" + description: "Replication lag behind primary in seconds" + - in_recovery: + usage: "GAUGE" + description: "Whether the instance is in recovery" + - is_wal_receiver_up: + usage: "GAUGE" + description: "Whether the instance wal_receiver is up" + - streaming_replicas: + usage: "GAUGE" + description: "Number of streaming replicas connected to the instance" + + pg_replication_slots: + query: | + SELECT slot_name, + slot_type, + database, + active, + (CASE pg_catalog.pg_is_in_recovery() + WHEN TRUE THEN pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_last_wal_receive_lsn(), restart_lsn) + ELSE pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), restart_lsn) + END) as pg_wal_lsn_diff + FROM pg_catalog.pg_replication_slots + WHERE NOT temporary + metrics: + - slot_name: + usage: "LABEL" + description: "Name of the replication slot" + - slot_type: + usage: "LABEL" + description: "Type of the replication slot" + - database: + usage: "LABEL" + description: "Name of the database" + - active: + usage: "GAUGE" + description: "Flag indicating whether the slot is active" + - pg_wal_lsn_diff: + usage: "GAUGE" + description: "Replication lag in bytes" + + pg_stat_archiver: + query: | + SELECT archived_count + , failed_count + , COALESCE(EXTRACT(EPOCH FROM (now() - last_archived_time)), -1) AS seconds_since_last_archival + , COALESCE(EXTRACT(EPOCH FROM (now() - last_failed_time)), -1) AS seconds_since_last_failure + , COALESCE(EXTRACT(EPOCH FROM last_archived_time), -1) AS last_archived_time + , COALESCE(EXTRACT(EPOCH FROM last_failed_time), -1) AS last_failed_time + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_archived_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_archived_wal_start_lsn + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_failed_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_failed_wal_start_lsn + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_archiver + metrics: + - archived_count: + usage: "COUNTER" + description: "Number of WAL files that have been successfully archived" + - failed_count: + usage: "COUNTER" + description: "Number of failed attempts for archiving WAL files" + - seconds_since_last_archival: + usage: "GAUGE" + description: "Seconds since the last successful archival operation" + - seconds_since_last_failure: + usage: "GAUGE" + description: "Seconds since the last failed archival operation" + - last_archived_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving succeeded" + - last_failed_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving failed" + - last_archived_wal_start_lsn: + usage: "GAUGE" + description: "Archived WAL start LSN" + - last_failed_wal_start_lsn: + usage: "GAUGE" + description: "Last failed WAL LSN" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_bgwriter: + runonserver: "<17.0.0" + query: | + SELECT checkpoints_timed + , checkpoints_req + , checkpoint_write_time + , checkpoint_sync_time + , buffers_checkpoint + , buffers_clean + , maxwritten_clean + , buffers_backend + , buffers_backend_fsync + , buffers_alloc + FROM pg_catalog.pg_stat_bgwriter + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - checkpoint_write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds" + - checkpoint_sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds" + - buffers_checkpoint: + usage: "COUNTER" + description: "Number of buffers written during checkpoints" + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_backend: + usage: "COUNTER" + description: "Number of buffers written directly by a backend" + - buffers_backend_fsync: + usage: "COUNTER" + description: "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + + pg_stat_bgwriter_17: + runonserver: ">=17.0.0" + name: pg_stat_bgwriter + query: | + SELECT buffers_clean + , maxwritten_clean + , buffers_alloc + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_bgwriter + metrics: + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_checkpointer: + runonserver: ">=17.0.0" + query: | + SELECT num_timed AS checkpoints_timed + , num_requested AS checkpoints_req + , restartpoints_timed + , restartpoints_req + , restartpoints_done + , write_time + , sync_time + , buffers_written + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_checkpointer + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - restartpoints_timed: + usage: "COUNTER" + description: "Number of scheduled restartpoints due to timeout or after a failed attempt to perform it" + - restartpoints_req: + usage: "COUNTER" + description: "Number of requested restartpoints that have been performed" + - restartpoints_done: + usage: "COUNTER" + description: "Number of restartpoints that have been performed" + - write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are written to disk, in milliseconds" + - sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are synchronized to disk, in milliseconds" + - buffers_written: + usage: "COUNTER" + description: "Number of buffers written during checkpoints and restartpoints" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_database: + query: | + SELECT datname + , xact_commit + , xact_rollback + , blks_read + , blks_hit + , tup_returned + , tup_fetched + , tup_inserted + , tup_updated + , tup_deleted + , conflicts + , temp_files + , temp_bytes + , deadlocks + , blk_read_time + , blk_write_time + FROM pg_catalog.pg_stat_database + metrics: + - datname: + usage: "LABEL" + description: "Name of this database" + - xact_commit: + usage: "COUNTER" + description: "Number of transactions in this database that have been committed" + - xact_rollback: + usage: "COUNTER" + description: "Number of transactions in this database that have been rolled back" + - blks_read: + usage: "COUNTER" + description: "Number of disk blocks read in this database" + - blks_hit: + usage: "COUNTER" + description: "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)" + - tup_returned: + usage: "COUNTER" + description: "Number of rows returned by queries in this database" + - tup_fetched: + usage: "COUNTER" + description: "Number of rows fetched by queries in this database" + - tup_inserted: + usage: "COUNTER" + description: "Number of rows inserted by queries in this database" + - tup_updated: + usage: "COUNTER" + description: "Number of rows updated by queries in this database" + - tup_deleted: + usage: "COUNTER" + description: "Number of rows deleted by queries in this database" + - conflicts: + usage: "COUNTER" + description: "Number of queries canceled due to conflicts with recovery in this database" + - temp_files: + usage: "COUNTER" + description: "Number of temporary files created by queries in this database" + - temp_bytes: + usage: "COUNTER" + description: "Total amount of data written to temporary files by queries in this database" + - deadlocks: + usage: "COUNTER" + description: "Number of deadlocks detected in this database" + - blk_read_time: + usage: "COUNTER" + description: "Time spent reading data file blocks by backends in this database, in milliseconds" + - blk_write_time: + usage: "COUNTER" + description: "Time spent writing data file blocks by backends in this database, in milliseconds" + + pg_stat_replication: + primary: true + query: | + SELECT usename + , COALESCE(application_name, '') AS application_name + , COALESCE(client_addr::text, '') AS client_addr + , COALESCE(client_port::text, '') AS client_port + , EXTRACT(EPOCH FROM backend_start) AS backend_start + , COALESCE(pg_catalog.age(backend_xmin), 0) AS backend_xmin_age + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), sent_lsn) AS sent_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), write_lsn) AS write_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), flush_lsn) AS flush_diff_bytes + , COALESCE(pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), replay_lsn),0) AS replay_diff_bytes + , COALESCE((EXTRACT(EPOCH FROM write_lag)),0)::float AS write_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM flush_lag)),0)::float AS flush_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM replay_lag)),0)::float AS replay_lag_seconds + FROM pg_catalog.pg_stat_replication + metrics: + - usename: + usage: "LABEL" + description: "Name of the replication user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - client_addr: + usage: "LABEL" + description: "Client IP address" + - client_port: + usage: "LABEL" + description: "Client TCP port" + - backend_start: + usage: "COUNTER" + description: "Time when this process was started" + - backend_xmin_age: + usage: "COUNTER" + description: "The age of this standby's xmin horizon" + - sent_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location sent on this connection" + - write_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location written to disk by this standby server" + - flush_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location flushed to disk by this standby server" + - replay_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location replayed into the database on this standby server" + - write_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it" + - flush_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it" + - replay_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it" + + pg_settings: + query: | + SELECT name, + CASE setting WHEN 'on' THEN '1' WHEN 'off' THEN '0' ELSE setting END AS setting + FROM pg_catalog.pg_settings + WHERE vartype IN ('integer', 'real', 'bool') + ORDER BY 1 + metrics: + - name: + usage: "LABEL" + description: "Name of the setting" + - setting: + usage: "GAUGE" + description: "Setting value" + + pg_extensions: + query: | + SELECT + current_database() as datname, + name as extname, + default_version, + installed_version, + CASE + WHEN default_version = installed_version THEN 0 + ELSE 1 + END AS update_available + FROM pg_catalog.pg_available_extensions + WHERE installed_version IS NOT NULL + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - extname: + usage: "LABEL" + description: "Extension name" + - default_version: + usage: "LABEL" + description: "Default version" + - installed_version: + usage: "LABEL" + description: "Installed version" + - update_available: + usage: "GAUGE" + description: "An update is available" + target_databases: + - '*' +kind: ConfigMap +metadata: + labels: + cnpg.io/reload: "" + name: cnpg-default-monitoring + namespace: cnpg-system +--- +apiVersion: v1 +kind: Service +metadata: + name: cnpg-webhook-service + namespace: cnpg-system +spec: + ports: + - port: 443 + targetPort: 9443 + selector: + app.kubernetes.io/name: cloudnative-pg +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-controller-manager + namespace: cnpg-system +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: cloudnative-pg + template: + metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + spec: + containers: + - args: + - controller + - --leader-elect + - --max-concurrent-reconciles=10 + - --config-map-name=cnpg-controller-manager-config + - --secret-name=cnpg-controller-manager-config + - --webhook-port=9443 + command: + - /manager + env: + - name: OPERATOR_IMAGE_NAME + value: ghcr.io/cloudnative-pg/cloudnative-pg:1.26.0 + - name: OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MONITORING_QUERIES_CONFIGMAP + value: cnpg-default-monitoring + image: ghcr.io/cloudnative-pg/cloudnative-pg:1.26.0 + imagePullPolicy: Always + livenessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + name: manager + ports: + - containerPort: 8080 + name: metrics + protocol: TCP + - containerPort: 9443 + name: webhook-server + protocol: TCP + readinessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + resources: + limits: + cpu: 100m + memory: 200Mi + requests: + cpu: 100m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 10001 + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + startupProbe: + failureThreshold: 6 + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + periodSeconds: 5 + volumeMounts: + - mountPath: /controller + name: scratch-data + - mountPath: /run/secrets/cnpg.io/webhook + name: webhook-certificates + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: cnpg-manager + terminationGracePeriodSeconds: 10 + volumes: + - emptyDir: {} + name: scratch-data + - name: webhook-certificates + secret: + defaultMode: 420 + optional: true + secretName: cnpg-webhook-cert +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: cnpg-mutating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: mbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: mcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-database + failurePolicy: Fail + name: mdatabase.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - databases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: mscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: cnpg-validating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: vbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: vcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-database + failurePolicy: Fail + name: vdatabase.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - databases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-pooler + failurePolicy: Fail + name: vpooler.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - poolers + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: vscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None From b01202cfd46394aba1665e78ddb1aac9459242b0 Mon Sep 17 00:00:00 2001 From: German Eichberger Date: Fri, 23 May 2025 07:54:10 -0700 Subject: [PATCH 607/836] chore: update ADOPTERS.md (#7621) Adds DocumentDB Operator to the ADOPTERS list Signed-off-by: German Eichberger --- ADOPTERS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/ADOPTERS.md b/ADOPTERS.md index b2c4ad1816..d3767f1b57 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -63,3 +63,4 @@ This list is sorted in chronological order, based on the submission date. | [Mirakl](https://www.mirakl.com/) | @ThomasBoussekey | 2025-02-03 | CloudNativePG is our default hosting solution for marketplace instances. With over 300 CloudNativePG clusters managing 8 TB of data, we have developed highly customizable Helm charts that support connection pooling, logical replication, and many other advanced features. | | [Bitnami](https://bitnami.com) | [@carrodher](https://github.com/carrodher) | 2025-03-04 | Bitnami provides CloudNativePG as part of its open-source [Helm charts catalog](https://github.com/bitnami/charts), enabling users to easily deploy PostgreSQL clusters on Kubernetes. Additionally, CloudNativePG is available through [Tanzu Application Catalog](https://www.vmware.com/products/app-platform/tanzu-application-catalog) and [Bitnami Premium](https://www.arrow.com/globalecs/na/vendors/bitnami-premium/), where customers can benefit from advanced security and compliance features such as VEX, SBOM, SLSA3, and CVE scanning. | | [Giant Swarm](https://www.giantswarm.io/) | [@stone-z](https://github.com/stone-z) | 2025-05-02 | Giant Swarm's full-service Kubernetes security and observability platforms are powered by PostgreSQL clusters delightfully managed with CloudNativePG. | +| [DocumentDB Operator](https://github.com/microsoft/documentdb-kubernetes-operator) | [@xgerman](https://github.com/xgerman) | 2025-05-22 | The DocumentDB Kubernetes Operator is an open-source project to run and manage DocumentDB on Kubernetes. [DocumentDB](https://github.com/microsoft/documentdb) is the engine powering vCore-based [Azure Cosmos DB for MongoDB](https://learn.microsoft.com/en-us/azure/cosmos-db/mongodb/vcore/). The operator uses CloudNativePG behind the scenes. | From e244b06818eba7ab9c9cd02a9936b9b2ac15a200 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Mon, 26 May 2025 10:53:32 +0200 Subject: [PATCH 608/836] chore(ci): remove release-1.24 branch and added release-1.26 (#7650) The branch release-1.24 is now locked, and no changes should be made to it. The new branch added to the list is release-1.26. Closes #7649 Signed-off-by: Jonathan Gonzalez V. --- .github/renovate.json5 | 4 ++-- .github/workflows/backport.yml | 6 +++--- .github/workflows/continuous-delivery.yml | 2 +- .github/workflows/continuous-integration.yml | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/renovate.json5 b/.github/renovate.json5 index 601d4502f0..7fa2706846 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -11,8 +11,8 @@ baseBranches: [ 'main', 'release-1.22', - 'release-1.24', - 'release-1.25' + 'release-1.25', + 'release-1.26', ], ignorePaths: [ 'docs/**', diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 43f4102c0a..1eec5327a0 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -36,8 +36,8 @@ jobs: labels: | backport-requested :arrow_backward: release-1.22 - release-1.24 release-1.25 + release-1.26 - name: Create comment uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4 @@ -60,8 +60,8 @@ jobs: labels: | backport-requested :arrow_backward: release-1.22 - release-1.24 release-1.25 + release-1.26 ## backport pull request in condition when pr contains 'backport-requested' label and contains target branches labels back-porting-pr: @@ -77,7 +77,7 @@ jobs: strategy: fail-fast: false matrix: - branch: [release-1.22, release-1.24, release-1.25] + branch: [release-1.22, release-1.25, release-1.26] env: PR: ${{ github.event.pull_request.number }} outputs: diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index 1c91224e3e..efe79f0789 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -74,7 +74,7 @@ jobs: strategy: fail-fast: false matrix: - branch: [release-1.22, release-1.24, release-1.25] + branch: [release-1.22, release-1.25, release-1.26] steps: - name: Invoke workflow with inputs uses: benc-uk/workflow-dispatch@e2e5e9a103e331dad343f381a29e654aea3cf8fc # v1 diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 8b6a8878c9..440f79de8c 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -57,7 +57,7 @@ jobs: strategy: fail-fast: false matrix: - branch: [release-1.22, release-1.24, release-1.25] + branch: [release-1.22, release-1.25, release-1.26] steps: - name: Invoke workflow with inputs uses: benc-uk/workflow-dispatch@e2e5e9a103e331dad343f381a29e654aea3cf8fc # v1 From bf81e33b73d72e30c6090c948df168fe55c5f5a5 Mon Sep 17 00:00:00 2001 From: Peggie Date: Mon, 26 May 2025 10:55:27 +0200 Subject: [PATCH 609/836] feat: Public Cloud K8S versions update (#7643) Update the versions used to test the operator on public cloud providers Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: public-cloud-k8s-versions-check --- .github/aks_versions.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/aks_versions.json b/.github/aks_versions.json index 4f53fabd97..bea3c37cce 100644 --- a/.github/aks_versions.json +++ b/.github/aks_versions.json @@ -1,6 +1,6 @@ [ "1.33.0", - "1.32.3", - "1.31.7", + "1.32.4", + "1.31.8", "1.30.9" ] From 67b8594ac18c37c46ea5a3b4c9d373fc2492f9d3 Mon Sep 17 00:00:00 2001 From: Francesco Canovai Date: Mon, 26 May 2025 11:31:55 +0200 Subject: [PATCH 610/836] test(e2e): strenghten readiness checks (#7603) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes some cases in the e2e where the tests would race to the next test when the operator is not ready yet, or when the nodes are not ready. Refactor the code to improve reuse and simplify signatures. Closes #7589 Signed-off-by: Francesco Canovai Signed-off-by: Niccolò Fei Co-authored-by: Niccolò Fei --- tests/e2e/asserts_test.go | 19 ---- tests/e2e/config_support_test.go | 4 +- tests/e2e/openshift_upgrade_test.go | 7 +- tests/e2e/operator_deployment_test.go | 6 +- tests/e2e/operator_unavailable_test.go | 2 +- tests/e2e/self_fencing_test.go | 11 +- tests/e2e/webhook_test.go | 4 +- tests/utils/deployments/deployment.go | 31 +++++- tests/utils/nodes/nodes.go | 29 +++++ tests/utils/operator/operator.go | 144 +++++++++---------------- tests/utils/operator/webhooks.go | 62 ++++++++--- 11 files changed, 171 insertions(+), 148 deletions(-) diff --git a/tests/e2e/asserts_test.go b/tests/e2e/asserts_test.go index e28d510f66..45acd2428d 100644 --- a/tests/e2e/asserts_test.go +++ b/tests/e2e/asserts_test.go @@ -20,7 +20,6 @@ SPDX-License-Identifier: Apache-2.0 package e2e import ( - "context" "database/sql" "errors" "fmt" @@ -38,7 +37,6 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes" "k8s.io/client-go/util/retry" "k8s.io/utils/strings/slices" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" @@ -452,23 +450,6 @@ func AssertConnection( }) } -// AssertOperatorIsReady verifies that the operator is ready -func AssertOperatorIsReady( - ctx context.Context, - crudClient ctrlclient.Client, - kubeInterface kubernetes.Interface, -) { - Eventually(func() (bool, error) { - ready, err := operator.IsReady(ctx, crudClient, kubeInterface) - if ready && err == nil { - return true, nil - } - // Waiting a bit to avoid overloading the API server - time.Sleep(1 * time.Second) - return ready, err - }, testTimeouts[timeouts.OperatorIsReady]).Should(BeTrue(), "Operator pod is not ready") -} - type TableLocator struct { Namespace string ClusterName string diff --git a/tests/e2e/config_support_test.go b/tests/e2e/config_support_test.go index 92977539d7..4d06170296 100644 --- a/tests/e2e/config_support_test.go +++ b/tests/e2e/config_support_test.go @@ -122,7 +122,7 @@ var _ = Describe("Config support", Serial, Ordered, Label(tests.LabelDisruptive, Expect(err).ToNot(HaveOccurred()) } - err = operator.ReloadDeployment(env.Ctx, env.Client, env.Interface, 120) + err = operator.ReloadDeployment(env.Ctx, env.Client, 120) Expect(err).ToNot(HaveOccurred()) }) @@ -156,7 +156,7 @@ var _ = Describe("Config support", Serial, Ordered, Label(tests.LabelDisruptive, }, 10).Should(HaveLen(1)) // Reload the operator with the new config - err = operator.ReloadDeployment(env.Ctx, env.Client, env.Interface, 120) + err = operator.ReloadDeployment(env.Ctx, env.Client, 120) Expect(err).ToNot(HaveOccurred()) }) diff --git a/tests/e2e/openshift_upgrade_test.go b/tests/e2e/openshift_upgrade_test.go index 9672475b24..189c851904 100644 --- a/tests/e2e/openshift_upgrade_test.go +++ b/tests/e2e/openshift_upgrade_test.go @@ -31,6 +31,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/tests/utils/openshift" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/operator" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -123,7 +124,8 @@ var _ = Describe("Upgrade Paths on OpenShift", Label(tests.LabelUpgrade), Ordere By("Applying the initial subscription", func() { err := openshift.CreateSubscription(env.Ctx, env.Client, initialSubscription) Expect(err).ToNot(HaveOccurred()) - AssertOperatorIsReady(env.Ctx, env.Client, env.Interface) + Expect(operator.WaitForReady(env.Ctx, env.Client, uint(testTimeouts[timeouts.OperatorIsReady]), + true)).Should(Succeed()) }) // Gather the version and semantic Versions of the operator @@ -155,7 +157,8 @@ var _ = Describe("Upgrade Paths on OpenShift", Label(tests.LabelUpgrade), Ordere return openshift.GetSubscriptionVersion(env.Ctx, env.Client) }, 300). ShouldNot(BeEquivalentTo(currentVersion)) - AssertOperatorIsReady(env.Ctx, env.Client, env.Interface) + Expect(operator.WaitForReady(env.Ctx, env.Client, uint(testTimeouts[timeouts.OperatorIsReady]), + true)).Should(Succeed()) }) // Check if the upgrade was successful by making sure all the pods diff --git a/tests/e2e/operator_deployment_test.go b/tests/e2e/operator_deployment_test.go index 95263e6bc4..17e1ff6c77 100644 --- a/tests/e2e/operator_deployment_test.go +++ b/tests/e2e/operator_deployment_test.go @@ -22,6 +22,7 @@ package e2e import ( "github.com/cloudnative-pg/cloudnative-pg/tests" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/operator" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -38,10 +39,11 @@ var _ = Describe("PostgreSQL operator deployment", Label(tests.LabelBasic, tests It("sets up the operator", func() { By("having a pod for the operator in state ready", func() { - AssertOperatorIsReady(env.Ctx, env.Client, env.Interface) + Expect(operator.WaitForReady(env.Ctx, env.Client, uint(testTimeouts[timeouts.OperatorIsReady]), + true)).Should(Succeed()) }) By("having a deployment for the operator in state ready", func() { - ready, err := operator.IsDeploymentReady(env.Ctx, env.Client) + ready, err := operator.IsReady(env.Ctx, env.Client, true) Expect(err).ToNot(HaveOccurred()) Expect(ready).To(BeTrue()) }) diff --git a/tests/e2e/operator_unavailable_test.go b/tests/e2e/operator_unavailable_test.go index e170c92071..4b3b635cae 100644 --- a/tests/e2e/operator_unavailable_test.go +++ b/tests/e2e/operator_unavailable_test.go @@ -211,7 +211,7 @@ var _ = Describe("Operator unavailable", Serial, Label(tests.LabelDisruptive, te g.Expect(podList.Items[0].Name).NotTo(BeEquivalentTo(operatorPodName)) }, timeout).Should(Succeed()) Eventually(func() (bool, error) { - return operator.IsDeploymentReady(env.Ctx, env.Client) + return operator.IsReady(env.Ctx, env.Client, true) }, timeout).Should(BeTrue()) }) diff --git a/tests/e2e/self_fencing_test.go b/tests/e2e/self_fencing_test.go index 072ed32791..17d072d914 100644 --- a/tests/e2e/self_fencing_test.go +++ b/tests/e2e/self_fencing_test.go @@ -31,6 +31,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/nodes" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/operator" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" @@ -132,11 +133,15 @@ var _ = Describe("Self-fencing with liveness probe", Serial, Label(tests.LabelDi Name: oldPrimaryPod.Name, } timeout := 180 - Eventually(func() (bool, error) { + Eventually(func(g Gomega) { pod := corev1.Pod{} err := env.Client.Get(env.Ctx, namespacedName, &pod) - return utils.IsPodActive(pod) && utils.IsPodReady(pod) && specs.IsPodStandby(pod), err - }, timeout).Should(BeTrue()) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(utils.IsPodActive(pod)).To(BeTrue()) + g.Expect(utils.IsPodReady(pod)).To(BeTrue()) + g.Expect(specs.IsPodStandby(pod)).To(BeTrue()) + g.Expect(nodes.IsNodeReachable(env.Ctx, env.Client, isolatedNode)).To(BeTrue()) + }, timeout).Should(Succeed()) }) }) }) diff --git a/tests/e2e/webhook_test.go b/tests/e2e/webhook_test.go index 0931112fe3..2960c05006 100644 --- a/tests/e2e/webhook_test.go +++ b/tests/e2e/webhook_test.go @@ -71,7 +71,7 @@ var _ = Describe("webhook", Serial, Label(tests.LabelDisruptive, tests.LabelOper err := operator.ScaleOperatorDeployment(env.Ctx, env.Client, 1) Expect(err).ToNot(HaveOccurred()) - ready, err := operator.IsDeploymentReady(env.Ctx, env.Client) + ready, err := operator.IsReady(env.Ctx, env.Client, true) Expect(err).ShouldNot(HaveOccurred()) Expect(ready).To(BeTrue()) }) @@ -124,7 +124,7 @@ var _ = Describe("webhook", Serial, Label(tests.LabelDisruptive, tests.LabelOper // Make sure the operator is intact and not crashing By("having a deployment for the operator in state ready", func() { - ready, err := operator.IsDeploymentReady(env.Ctx, env.Client) + ready, err := operator.IsReady(env.Ctx, env.Client, false) Expect(err).ShouldNot(HaveOccurred()) Expect(ready).To(BeTrue()) }) diff --git a/tests/utils/deployments/deployment.go b/tests/utils/deployments/deployment.go index 89031c8ae3..ae05f1e4d6 100644 --- a/tests/utils/deployments/deployment.go +++ b/tests/utils/deployments/deployment.go @@ -30,9 +30,32 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -// isReady checks if a Deployment is ready -func isReady(deployment appsv1.Deployment) bool { - return deployment.Status.ReadyReplicas == *deployment.Spec.Replicas +// IsReady checks if a Deployment is ready +func IsReady(deployment appsv1.Deployment) bool { + // If the deployment has been scaled down to 0 replicas, we consider it ready + if deployment.Status.Replicas == 0 && *deployment.Spec.Replicas == 0 { + return true + } + + if deployment.Status.ObservedGeneration < deployment.Generation || + deployment.Status.UpdatedReplicas < deployment.Status.Replicas || + deployment.Status.AvailableReplicas < deployment.Status.Replicas || + deployment.Status.ReadyReplicas < deployment.Status.Replicas { + return false + } + + if deployment.Status.Conditions == nil { + return false + } + for _, condition := range deployment.Status.Conditions { + if condition.Type == appsv1.DeploymentAvailable && condition.Status != "True" { + return false + } + if condition.Type == appsv1.DeploymentProgressing && condition.Status != "True" { + return false + } + } + return true } // WaitForReady waits for a Deployment to be ready @@ -50,7 +73,7 @@ func WaitForReady( }, deployment); err != nil { return err } - if !isReady(*deployment) { + if !IsReady(*deployment) { return fmt.Errorf( "deployment not ready. Namespace: %v, Name: %v", deployment.Namespace, diff --git a/tests/utils/nodes/nodes.go b/tests/utils/nodes/nodes.go index faabde4c13..c07a75186b 100644 --- a/tests/utils/nodes/nodes.go +++ b/tests/utils/nodes/nodes.go @@ -132,3 +132,32 @@ func DescribeKubernetesNodes(ctx context.Context, crudClient client.Client) (str } return report.String(), nil } + +// IsNodeReachable checks if a node is: +// 1. Ready +// 2. Not tainted with the unreachable taint +func IsNodeReachable( + ctx context.Context, + crudClient client.Client, + nodeName string, +) (bool, error) { + node := &v1.Node{} + err := crudClient.Get(ctx, client.ObjectKey{Name: nodeName}, node) + if err != nil { + return false, err + } + for _, condition := range node.Status.Conditions { + if condition.Type == v1.NodeReady && condition.Status == v1.ConditionFalse { + return false, nil + } + } + + // check that the node does not have the unreachable taint + for _, taint := range node.Spec.Taints { + if taint.Key == v1.TaintNodeUnreachable { + return false, nil + } + } + + return true, nil +} diff --git a/tests/utils/operator/operator.go b/tests/utils/operator/operator.go index f5f333eb26..fe333045dd 100644 --- a/tests/utils/operator/operator.go +++ b/tests/utils/operator/operator.go @@ -34,14 +34,13 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/rand" "k8s.io/client-go/kubernetes" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/controller" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/deployments" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/pods" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/run" @@ -52,35 +51,21 @@ import ( func ReloadDeployment( ctx context.Context, crudClient client.Client, - kubeInterface kubernetes.Interface, timeoutSeconds uint, ) error { operatorPod, err := GetPod(ctx, crudClient) if err != nil { return err } - zero := int64(0) + err = crudClient.Delete(ctx, &operatorPod, - &client.DeleteOptions{GracePeriodSeconds: &zero}, + &client.DeleteOptions{GracePeriodSeconds: ptr.To(int64(1))}, ) if err != nil { return err } - err = retry.Do( - func() error { - ready, err := IsReady(ctx, crudClient, kubeInterface) - if err != nil { - return err - } - if !ready { - return fmt.Errorf("operator pod %v is not ready", operatorPod.Name) - } - return nil - }, - retry.Delay(time.Second), - retry.Attempts(timeoutSeconds), - ) - return err + // Wait for the operator pod to be ready + return WaitForReady(ctx, crudClient, timeoutSeconds, true) } // Dump logs the JSON for the deployment in an operator namespace, its pods and endpoints @@ -155,7 +140,7 @@ func GetDeployment(ctx context.Context, crudClient client.Client) (appsv1.Deploy func GetPod(ctx context.Context, crudClient client.Client) (corev1.Pod, error) { podList := &corev1.PodList{} - // This will work for newer version of the operator, which are using + // This will work for newer versions of the operator, which are using // our custom label if err := objects.List( ctx, crudClient, @@ -163,29 +148,6 @@ func GetPod(ctx context.Context, crudClient client.Client) (corev1.Pod, error) { return corev1.Pod{}, err } activePods := utils.FilterActivePods(podList.Items) - switch { - case len(activePods) > 1: - err := fmt.Errorf("number of running operator pods greater than 1: %v pods running", len(activePods)) - return corev1.Pod{}, err - - case len(activePods) == 1: - return activePods[0], nil - } - - operatorNamespace, err := NamespaceName(ctx, crudClient) - if err != nil { - return corev1.Pod{}, err - } - - // This will work for older version of the operator, which are using - // the default label from kube-builder - if err := objects.List( - ctx, crudClient, podList, - client.MatchingLabels{"control-plane": "controller-manager"}, - client.InNamespace(operatorNamespace)); err != nil { - return corev1.Pod{}, err - } - activePods = utils.FilterActivePods(podList.Items) if len(activePods) != 1 { err := fmt.Errorf("number of running operator different than 1: %v pods running", len(activePods)) return corev1.Pod{}, err @@ -207,23 +169,26 @@ func NamespaceName(ctx context.Context, crudClient client.Client) (string, error func IsReady( ctx context.Context, crudClient client.Client, - kubeInterface kubernetes.Interface, + checkWebhook bool, ) (bool, error) { - pod, err := GetPod(ctx, crudClient) - if err != nil { - return false, err + if ready, err := isDeploymentReady(ctx, crudClient); err != nil || !ready { + return ready, err } - isPodReady := utils.IsPodReady(pod) - if !isPodReady { - return false, err + // If the operator is not managing webhooks, we don't need to check. Exit early + if !checkWebhook { + return true, nil } - namespace := pod.Namespace + deploy, err := GetDeployment(ctx, crudClient) + if err != nil { + return false, err + } + namespace := deploy.GetNamespace() // Detect if we are running under OLM var webhookManagedByOLM bool - for _, envVar := range pod.Spec.Containers[0].Env { + for _, envVar := range deploy.Spec.Template.Spec.Containers[0].Env { if envVar.Name == "WEBHOOK_CERT_DIR" { webhookManagedByOLM = true } @@ -231,58 +196,52 @@ func IsReady( // If the operator is managing certificates for webhooks, check that the setup is completed if !webhookManagedByOLM { - err = checkWebhookReady(ctx, crudClient, kubeInterface, namespace) + err = checkWebhookSetup(ctx, crudClient, namespace) if err != nil { return false, err } } - // Dry run object creation to check that webhook Service is correctly running - testCluster := &apiv1.Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "readiness-check-" + rand.String(5), - Namespace: "default", - }, - Spec: apiv1.ClusterSpec{ - Instances: 3, - StorageConfiguration: apiv1.StorageConfiguration{ - Size: "1Gi", - }, + return isWebhookWorking(ctx, crudClient) +} + +// WaitForReady waits for the operator deployment to be ready. +// If checkWebhook is true, it will also check that the webhook is replying +func WaitForReady( + ctx context.Context, + crudClient client.Client, + timeoutSeconds uint, + checkWebhook bool, +) error { + return retry.Do( + func() error { + ready, err := IsReady(ctx, crudClient, checkWebhook) + if err != nil || !ready { + return fmt.Errorf("operator deployment is not ready") + } + return nil }, - } - _, err = objects.Create( - ctx, - crudClient, - testCluster, - &client.CreateOptions{DryRun: []string{metav1.DryRunAll}}, + retry.Delay(time.Second), + retry.Attempts(timeoutSeconds), ) - if err != nil { - return false, err - } - - return true, err } -// IsDeploymentReady returns true if the operator deployment has the expected number +// isDeploymentReady returns true if the operator deployment has the expected number // of ready pods. // It returns an error if there was a problem getting the operator deployment -func IsDeploymentReady(ctx context.Context, crudClient client.Client) (bool, error) { +func isDeploymentReady(ctx context.Context, crudClient client.Client) (bool, error) { operatorDeployment, err := GetDeployment(ctx, crudClient) if err != nil { return false, err } - if operatorDeployment.Spec.Replicas != nil && - operatorDeployment.Status.ReadyReplicas != *operatorDeployment.Spec.Replicas { - return false, fmt.Errorf("deployment not ready %v of %v ready", - operatorDeployment.Status.ReadyReplicas, operatorDeployment.Status.ReadyReplicas) - } - - return true, nil + return deployments.IsReady(operatorDeployment), nil } -// ScaleOperatorDeployment will scale the operator to n replicas and return error in case of failure -func ScaleOperatorDeployment(ctx context.Context, crudClient client.Client, replicas int32) error { +// ScaleOperatorDeployment will scale the operator to n replicas and return an error in case of failure +func ScaleOperatorDeployment( + ctx context.Context, crudClient client.Client, replicas int32, +) error { operatorDeployment, err := GetDeployment(ctx, crudClient) if err != nil { return err @@ -291,20 +250,13 @@ func ScaleOperatorDeployment(ctx context.Context, crudClient client.Client, repl updatedOperatorDeployment := *operatorDeployment.DeepCopy() updatedOperatorDeployment.Spec.Replicas = ptr.To(replicas) - // Scale down operator deployment to zero replicas err = crudClient.Patch(ctx, &updatedOperatorDeployment, client.MergeFrom(&operatorDeployment)) if err != nil { return err } - return retry.Do( - func() error { - _, err := IsDeploymentReady(ctx, crudClient) - return err - }, - retry.Delay(time.Second), - retry.Attempts(120), - ) + // Wait for the operator deployment to be ready + return WaitForReady(ctx, crudClient, 120, replicas > 0) } // PodRenamed checks if the operator pod was renamed diff --git a/tests/utils/operator/webhooks.go b/tests/utils/operator/webhooks.go index 351606c58c..0e4ece0c95 100644 --- a/tests/utils/operator/webhooks.go +++ b/tests/utils/operator/webhooks.go @@ -26,11 +26,13 @@ import ( admissionregistrationv1 "k8s.io/api/admissionregistration/v1" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes" "sigs.k8s.io/controller-runtime/pkg/client" + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/controller" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/objects" ) @@ -75,16 +77,16 @@ func UpdateMutatingWebhookConf( } // getCNPGsValidatingWebhookConf get the ValidatingWebhook linked to the operator -func getCNPGsValidatingWebhookConf(kubeInterface kubernetes.Interface) ( +func getCNPGsValidatingWebhookConf( + ctx context.Context, + crudClient client.Client, +) ( *admissionregistrationv1.ValidatingWebhookConfiguration, error, ) { - ctx := context.Background() - validatingWebhookConfig, err := kubeInterface.AdmissionregistrationV1().ValidatingWebhookConfigurations().Get( - ctx, controller.ValidatingWebhookConfigurationName, metav1.GetOptions{}) - if err != nil { - return nil, err - } - return validatingWebhookConfig, nil + validatingWebhookConf := &admissionregistrationv1.ValidatingWebhookConfiguration{} + err := crudClient.Get(ctx, types.NamespacedName{Name: controller.ValidatingWebhookConfigurationName}, + validatingWebhookConf) + return validatingWebhookConf, err } // GetValidatingWebhookByName get ValidatingWebhook by the name of one @@ -126,11 +128,10 @@ func UpdateValidatingWebhookConf( return nil } -// checkWebhookReady ensures that the operator has finished the webhook setup. -func checkWebhookReady( +// checkWebhookSetup ensures that the operator has finished the webhook setup. +func checkWebhookSetup( ctx context.Context, crudClient client.Client, - kubeInterface kubernetes.Interface, namespace string, ) error { // Check CA @@ -146,7 +147,7 @@ func checkWebhookReady( ca := secret.Data["tls.crt"] - mutatingWebhookConfig, err := getCNPGsMutatingWebhookConf(ctx, kubeInterface) + mutatingWebhookConfig, err := getCNPGsMutatingWebhookConf(ctx, crudClient) if err != nil { return err } @@ -158,7 +159,7 @@ func checkWebhookReady( } } - validatingWebhookConfig, err := getCNPGsValidatingWebhookConf(kubeInterface) + validatingWebhookConfig, err := getCNPGsValidatingWebhookConf(ctx, crudClient) if err != nil { return err } @@ -176,11 +177,38 @@ func checkWebhookReady( // getCNPGsMutatingWebhookConf get the MutatingWebhook linked to the operator func getCNPGsMutatingWebhookConf( ctx context.Context, - kubeInterface kubernetes.Interface, + crudClient client.Client, ) ( *admissionregistrationv1.MutatingWebhookConfiguration, error, ) { - return kubeInterface.AdmissionregistrationV1(). - MutatingWebhookConfigurations(). - Get(ctx, controller.MutatingWebhookConfigurationName, metav1.GetOptions{}) + mutatingWebhookConfiguration := &admissionregistrationv1.MutatingWebhookConfiguration{} + err := crudClient.Get(ctx, types.NamespacedName{Name: controller.MutatingWebhookConfigurationName}, + mutatingWebhookConfiguration) + return mutatingWebhookConfiguration, err +} + +// CheckWebhookSetup checks if the webhook denies an invalid request +func isWebhookWorking( + ctx context.Context, + crudClient client.Client, +) (bool, error) { + invalidCluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Namespace: "default", Name: "invalid"}, + Spec: apiv1.ClusterSpec{Instances: 1}, + } + _, err := objects.Create( + ctx, + crudClient, + invalidCluster, + &client.CreateOptions{DryRun: []string{metav1.DryRunAll}}, + ) + // If the error is not an invalid error, return false + if !errors.IsInvalid(err) { + return false, fmt.Errorf("expected invalid error, got: %v", err) + } + // If the error doesn't contain the expected message, return false + if !bytes.Contains([]byte(err.Error()), []byte("spec.storage.size")) { + return false, fmt.Errorf("expected error to contain 'spec.storage.size', got: %v", err) + } + return true, nil } From 4757415b1eabdfa9f9680721001dfd8fc3889cd6 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Mon, 26 May 2025 14:26:16 +0200 Subject: [PATCH 611/836] feat(plugin): remove 386 and arm architectures from the build (#7648) The architectures 386 and arm5/6/7 aren't way too used, and just removing these architectures we reduce to half the amount of os/arch we build for the plugin. Closes #7564 Signed-off-by: Jonathan Gonzalez V. --- .goreleaser.yml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/.goreleaser.yml b/.goreleaser.yml index 3dad477e41..1b6e40c5b8 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -71,9 +71,7 @@ builds: - windows goarch: - amd64 - - 386 - arm64 - - arm - ppc64le - s390x goarm: @@ -81,8 +79,6 @@ builds: - 6 - 7 ignore: - - goos: darwin - goarch: 386 - goos: windows goarch: ppc64le - goos: windows @@ -93,7 +89,6 @@ archives: kubectl-cnpg_{{ .Version }}_ {{- .Os }}_ {{- if eq .Arch "amd64" }}x86_64 - {{- else if eq .Arch "386" }}i386 {{- else }}{{ .Arch }}{{ with .Arm }}v{{ . }}{{ end }}{{ end }} ids: - kubectl-cnpg @@ -104,7 +99,6 @@ nfpms: kubectl-cnpg_{{ .Version }}_ {{- .Os }}_ {{- if eq .Arch "amd64" }}x86_64 - {{- else if eq .Arch "386" }}i386 {{- else }}{{ .Arch }}{{ with .Arm }}v{{ . }}{{ end }}{{ end }} homepage: https://github.com/cloudnative-pg/cloudnative-pg bindir: /usr/local/bin From f65cde38a68125382e25c4f8841a04504023ae11 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Mon, 26 May 2025 17:32:47 +0200 Subject: [PATCH 612/836] ci: add missing permissions for the catalog creation (#7652) The bundle and catalog for OLM wasn't being created due to the lack of permission to push to the registry. Closes #7630 Signed-off-by: Jonathan Gonzalez V. --- .github/workflows/continuous-delivery.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index efe79f0789..79ea3d4cc4 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -1877,6 +1877,9 @@ jobs: max-parallel: 6 matrix: ${{ fromJSON(needs.generate-jobs.outputs.openshiftMatrix) }} runs-on: ubuntu-24.04 + permissions: + contents: read + packages: write env: # TEST_DEPTH determines the maximum test level the suite should be running TEST_DEPTH: ${{ needs.evaluate_options.outputs.test_level }} From 8f702e0ee3345c1da8c1d725f34c9791ccbfa718 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Wed, 28 May 2025 11:47:07 +0200 Subject: [PATCH 613/836] test(e2e): replace `corev1.Endpoints` with `discoveryv1.EndpointSlice` (#7544) The corev1.Endpoints() has been deprecated on Kubernetes API 1.33 and replaced by discoveryv1.EndpointSlice(). Closes #7543 Signed-off-by: Jonathan Gonzalez V. Signed-off-by: Armando Ruocco Co-authored-by: Armando Ruocco --- tests/e2e/asserts_test.go | 42 ++++++++++++--------------- tests/e2e/fastswitchover_test.go | 21 ++++---------- tests/e2e/rolling_update_test.go | 23 ++++----------- tests/utils/endpoints.go | 44 +++++++++++++++++++++++++---- tests/utils/namespaces/namespace.go | 9 +++--- 5 files changed, 74 insertions(+), 65 deletions(-) diff --git a/tests/e2e/asserts_test.go b/tests/e2e/asserts_test.go index 45acd2428d..85bbe3d7d3 100644 --- a/tests/e2e/asserts_test.go +++ b/tests/e2e/asserts_test.go @@ -35,6 +35,7 @@ import ( "github.com/thoas/go-funk" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + discoveryv1 "k8s.io/api/discovery/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/retry" @@ -1229,24 +1230,15 @@ func AssertFastFailOver( // Node 1 should be the primary, so the -rw service should // point there. We verify this. By("having the current primary on node1", func() { - endpointName := clusterName + "-rw" - endpoint := &corev1.Endpoints{} - endpointNamespacedName := types.NamespacedName{ - Namespace: namespace, - Name: endpointName, - } - podName := clusterName + "-1" + rwServiceName := clusterName + "-rw" + endpointSlice, err := testsUtils.GetEndpointSliceByServiceName(env.Ctx, env.Client, namespace, rwServiceName) + Expect(err).ToNot(HaveOccurred()) + pod := &corev1.Pod{} - podNamespacedName := types.NamespacedName{ - Namespace: namespace, - Name: podName, - } - err = env.Client.Get(env.Ctx, endpointNamespacedName, - endpoint) + podName := clusterName + "-1" + err = env.Client.Get(env.Ctx, types.NamespacedName{Namespace: namespace, Name: podName}, pod) Expect(err).ToNot(HaveOccurred()) - err = env.Client.Get(env.Ctx, podNamespacedName, pod) - Expect(testsUtils.FirstEndpointIP(endpoint), err).To( - BeEquivalentTo(pod.Status.PodIP)) + Expect(testsUtils.FirstEndpointSliceIP(endpointSlice)).To(BeEquivalentTo(pod.Status.PodIP)) }) By("preparing the db for the test scenario", func() { @@ -2220,13 +2212,13 @@ func assertPGBouncerEndpointsContainsPodsIP( poolerYamlFilePath string, expectedPodCount int, ) { - var pgBouncerPods []*corev1.Pod - endpoint := &corev1.Endpoints{} - endpointName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerYamlFilePath) + poolerServiceName, err := yaml.GetResourceNameFromYAML(env.Scheme, poolerYamlFilePath) Expect(err).ToNot(HaveOccurred()) + endpointSlice := &discoveryv1.EndpointSlice{} Eventually(func(g Gomega) { - err := env.Client.Get(env.Ctx, types.NamespacedName{Namespace: namespace, Name: endpointName}, endpoint) + var err error + endpointSlice, err = testsUtils.GetEndpointSliceByServiceName(env.Ctx, env.Client, namespace, poolerServiceName) g.Expect(err).ToNot(HaveOccurred()) }).Should(Succeed()) @@ -2236,18 +2228,20 @@ func assertPGBouncerEndpointsContainsPodsIP( err = env.Client.List(env.Ctx, podList, ctrlclient.InNamespace(namespace), ctrlclient.MatchingLabels{utils.PgbouncerNameLabel: poolerName}) Expect(err).ToNot(HaveOccurred()) - Expect(endpoint.Subsets).ToNot(BeEmpty()) + Expect(endpointSlice.Endpoints).ToNot(BeEmpty()) - for _, ip := range endpoint.Subsets[0].Addresses { + var pgBouncerPods []*corev1.Pod + for _, endpoint := range endpointSlice.Endpoints { + ip := endpoint.Addresses[0] for podIndex, pod := range podList.Items { - if pod.Status.PodIP == ip.IP { + if pod.Status.PodIP == ip { pgBouncerPods = append(pgBouncerPods, &podList.Items[podIndex]) continue } } } - Expect(pgBouncerPods).Should(HaveLen(expectedPodCount), "Pod length or IP mismatch in ep") + Expect(pgBouncerPods).Should(HaveLen(expectedPodCount), "Pod length or IP mismatch in endpoint") } // assertPGBouncerHasServiceNameInsideHostParameter makes sure that the service name is contained inside the host file diff --git a/tests/e2e/fastswitchover_test.go b/tests/e2e/fastswitchover_test.go index e8a478e646..6a7909eb6a 100644 --- a/tests/e2e/fastswitchover_test.go +++ b/tests/e2e/fastswitchover_test.go @@ -115,24 +115,15 @@ func assertFastSwitchover(namespace, sampleFile, clusterName, webTestFile, webTe // Node 1 should be the primary, so the -rw service should // point there. We verify this. By("having the current primary on node1", func() { - endpointName := clusterName + "-rw" - endpoint := &corev1.Endpoints{} - endpointNamespacedName := types.NamespacedName{ - Namespace: namespace, - Name: endpointName, - } + rwServiceName := clusterName + "-rw" + endpointSlice, err := utils.GetEndpointSliceByServiceName(env.Ctx, env.Client, namespace, rwServiceName) + Expect(err).ToNot(HaveOccurred()) + oldPrimary = clusterName + "-1" pod := &corev1.Pod{} - podNamespacedName := types.NamespacedName{ - Namespace: namespace, - Name: oldPrimary, - } - err := env.Client.Get(env.Ctx, endpointNamespacedName, - endpoint) + err = env.Client.Get(env.Ctx, types.NamespacedName{Namespace: namespace, Name: oldPrimary}, pod) Expect(err).ToNot(HaveOccurred()) - err = env.Client.Get(env.Ctx, podNamespacedName, pod) - Expect(utils.FirstEndpointIP(endpoint), err).To( - BeEquivalentTo(pod.Status.PodIP)) + Expect(utils.FirstEndpointSliceIP(endpointSlice)).To(BeEquivalentTo(pod.Status.PodIP)) }) By("preparing the db for the test scenario", func() { // Create the table used by the scenario diff --git a/tests/e2e/rolling_update_test.go b/tests/e2e/rolling_update_test.go index 174591400f..02370589e1 100644 --- a/tests/e2e/rolling_update_test.go +++ b/tests/e2e/rolling_update_test.go @@ -227,38 +227,27 @@ var _ = Describe("Rolling updates", Label(tests.LabelPostgresConfiguration), fun Expect(err).ToNot(HaveOccurred()) endpointName := clusterName + "-rw" - endpointNamespacedName := types.NamespacedName{ - Namespace: namespace, - Name: endpointName, - } // we give 10 seconds to the apiserver to update the endpoint timeout := 10 Eventually(func() (string, error) { - endpoint := &corev1.Endpoints{} - err := env.Client.Get(env.Ctx, endpointNamespacedName, endpoint) - return testsUtils.FirstEndpointIP(endpoint), err + endpointSlice, err := testsUtils.GetEndpointSliceByServiceName(env.Ctx, env.Client, namespace, endpointName) + return testsUtils.FirstEndpointSliceIP(endpointSlice), err }, timeout).Should(BeEquivalentTo(currentPrimaryPod.Status.PodIP)) } // Verify that the IPs of the podutils match the ones in the -r endpoint and // that the amount of podutils is the expected one AssertReadyEndpoint := func(namespace string, clusterName string, expectedEndpoints int) { - endpointName := clusterName + "-r" - endpoint := &corev1.Endpoints{} - endpointNamespacedName := types.NamespacedName{ - Namespace: namespace, - Name: endpointName, - } - err := env.Client.Get(env.Ctx, endpointNamespacedName, - endpoint) + readServiceName := clusterName + "-r" + endpointSlice, err := testsUtils.GetEndpointSliceByServiceName(env.Ctx, env.Client, namespace, readServiceName) Expect(err).ToNot(HaveOccurred()) podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) Expect(expectedEndpoints, err).To(BeEquivalentTo(len(podList.Items))) matchingIP := 0 for _, pod := range podList.Items { ip := pod.Status.PodIP - for _, addr := range endpoint.Subsets[0].Addresses { - if ip == addr.IP { + for _, endpoint := range endpointSlice.Endpoints { + if ip == endpoint.Addresses[0] { matchingIP++ } } diff --git a/tests/utils/endpoints.go b/tests/utils/endpoints.go index 2773ecfbfd..66eabd4849 100644 --- a/tests/utils/endpoints.go +++ b/tests/utils/endpoints.go @@ -19,15 +19,49 @@ SPDX-License-Identifier: Apache-2.0 package utils -import corev1 "k8s.io/api/core/v1" +import ( + "context" + "fmt" -// FirstEndpointIP returns the IP of first Address in the Endpoint -func FirstEndpointIP(endpoint *corev1.Endpoints) string { + discoveryv1 "k8s.io/api/discovery/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// FirstEndpointSliceIP returns the IP of the first Address in the EndpointSlice +func FirstEndpointSliceIP(endpoint *discoveryv1.EndpointSlice) string { if endpoint == nil { return "" } - if len(endpoint.Subsets) == 0 || len(endpoint.Subsets[0].Addresses) == 0 { + if len(endpoint.Endpoints) == 0 || len(endpoint.Endpoints[0].Addresses) == 0 { return "" } - return endpoint.Subsets[0].Addresses[0].IP + return endpoint.Endpoints[0].Addresses[0] +} + +// GetEndpointSliceByServiceName returns the EndpointSlice for a given service name in a given namespace +func GetEndpointSliceByServiceName( + ctx context.Context, + crudClient client.Client, + namespace, serviceName string, +) (*discoveryv1.EndpointSlice, error) { + endpointSliceList := &discoveryv1.EndpointSliceList{} + + if err := crudClient.List( + ctx, + endpointSliceList, + client.InNamespace(namespace), + client.MatchingLabels{"kubernetes.io/service-name": serviceName}, + ); err != nil { + return nil, err + } + + if len(endpointSliceList.Items) == 0 { + return nil, fmt.Errorf("no endpointslice found for service %s in namespace %s", serviceName, namespace) + } + + if len(endpointSliceList.Items) > 1 { + return nil, fmt.Errorf("multiple endpointslice found for service %s in namespace %s", serviceName, namespace) + } + + return &endpointSliceList.Items[0], nil } diff --git a/tests/utils/namespaces/namespace.go b/tests/utils/namespaces/namespace.go index b2d33756b3..8fabffcb2f 100644 --- a/tests/utils/namespaces/namespace.go +++ b/tests/utils/namespaces/namespace.go @@ -36,6 +36,7 @@ import ( "github.com/onsi/ginkgo/v2" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" + discoveryv1 "k8s.io/api/discovery/v1" v1 "k8s.io/api/events/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -318,10 +319,10 @@ func DumpNamespaceObjects( Namespace: namespace, Name: cluster.Name + suffix, } - endpoint := &corev1.Endpoints{} - _ = crudClient.Get(ctx, namespacedName, endpoint) - out, _ := json.MarshalIndent(endpoint, "", " ") - _, _ = fmt.Fprintf(w, "Dumping %v/%v endpoint\n", namespace, endpoint.Name) + endpointSlice := &discoveryv1.EndpointSlice{} + _ = crudClient.Get(ctx, namespacedName, endpointSlice) + out, _ := json.MarshalIndent(endpointSlice, "", " ") + _, _ = fmt.Fprintf(w, "Dumping %v/%v endpointSlice\n", namespace, endpointSlice.Name) _, _ = fmt.Fprintln(w, string(out)) } } From a9d81ac8223d3232ed0bf16cd24d50007209b262 Mon Sep 17 00:00:00 2001 From: Floor Drees Date: Wed, 28 May 2025 12:03:24 +0200 Subject: [PATCH 614/836] Rewording CONTRIBUTING file (#7615) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reworded the contributing file slightly for grammar and clarity 🙏 Signed-off-by: Floor Drees --- CONTRIBUTING.md | 32 +++++++++++++++----------------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index cb39b99447..39738a451f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,23 +1,21 @@ # Contributing to CloudNativePG -Welcome! We are glad that you want to contribute to our CloudNativePG project! 💖 +Welcome! We are glad that you want to contribute to the CloudNativePG project! 💖 -As you get started, you are in the best position to give us feedbacks on areas of -our project that we need help with, including: +To get started, here's some areas the project could really use some help with: * Problems found while setting up the development environment * Gaps in our documentation -* Bugs in our Github actions +* Bugs in our GitHub actions * Promotion of PostgreSQL on Kubernetes with our operator First, though, it is important that you read the [code of conduct](CODE_OF_CONDUCT.md). The guidelines below are a starting point. We don't want to limit your -creativity, passion, and initiative. If you think there's a better way, please -feel free to bring it up in a Github discussion, or open a pull request. We're -certain there are always better ways to do things, we just need to start some -constructive dialogue! +creativity, passion, and initiative. If you think there are other things +you can contribute, please feel free to bring it up in a GitHub Issue, +or open a Pull Request! ## Ways to contribute @@ -28,10 +26,10 @@ We welcome many types of contributions including: * Bug fixes * [Documentation](docs/README.md) * Issue Triage -* Answering questions on Slack or Github Discussions -* [Website](https://github.com/cloudnative-pg/cloudnative-pg.github.io) +* Answering questions on [Slack](README.md#communications) or GitHub Discussions +* The [website](https://github.com/cloudnative-pg/cloudnative-pg.github.io) * Communications / Social Media / Blog Posts -* Events participation +* Advocacy at Events (let us know when your talk about CloudNativePG is accepted!) * Release management For development contributions, please refer to the separate section called @@ -40,16 +38,16 @@ For development contributions, please refer to the separate section called ## Ask for Help The best way to reach us with a question when contributing is to drop a line in -our [Slack channel](README.md#communications), or start a new Github discussion. +our [Slack channel](README.md#communications), or start a new GitHub discussion. ## Raising Issues -When raising issues, please specify the following: +When raising [Issues](https://github.com/cloudnative-pg/cloudnative-pg/issues), please specify the following: -- Setup details as specified in the issue template -- A scenario where the issue occurred (with details on how to reproduce it) -- Errors and log messages that are displayed by the involved software -- Any other detail that might be useful +* Setup details as specified in the Issue template +* A scenario where the issue occurred (with details on how to reproduce it) +* Errors and log messages that are displayed by the involved software +* Any other detail that might be useful If you are trying to report a vulnerability, please refer to the [security policy](SECURITY.md). From 9369dc56b9c39ea749e015497eeb2f94afa47ddb Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 29 May 2025 00:18:23 +0200 Subject: [PATCH 615/836] fix(deps): update kubernetes packages to v0.33.1 (main) (#7485) This PR contains the following updates: https://github.com/kubernetes/api `v0.32.3` -> `v0.33.1` https://github.com/kubernetes/apiextensions-apiserver `v0.32.3` -> `v0.33.1` https://github.com/kubernetes/apimachinery `v0.32.3` -> `v0.33.1` https://github.com/kubernetes/cli-runtime `v0.32.3` -> `v0.33.1` https://github.com/kubernetes/client-go `v0.32.3` -> `v0.33.1` Signed-off-by: Jonathan Gonzalez V. Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- .../bases/postgresql.cnpg.io_clusters.yaml | 28 +++------ .../crd/bases/postgresql.cnpg.io_poolers.yaml | 63 ++++++++++--------- go.mod | 19 +++--- go.sum | 42 +++++++------ 4 files changed, 75 insertions(+), 77 deletions(-) diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml index d6d25fc4ae..d9aa43c9d4 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml @@ -146,7 +146,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -161,7 +160,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -327,7 +325,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -342,7 +339,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -505,7 +501,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -520,7 +515,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -686,7 +680,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -701,7 +694,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -2252,6 +2244,7 @@ spec: sources to the pods to be used by Env items: description: EnvFromSource represents the source of a set of ConfigMaps + or Secrets properties: configMapRef: description: The ConfigMap to select from @@ -2271,8 +2264,8 @@ spec: type: object x-kubernetes-map-type: atomic prefix: - description: An optional identifier to prepend to each key in - the ConfigMap. Must be a C_IDENTIFIER. + description: Optional text to prepend to the name of each environment + variable. Must be a C_IDENTIFIER. type: string secretRef: description: The Secret to select from @@ -3638,13 +3631,12 @@ spec: type: object trafficDistribution: description: |- - TrafficDistribution offers a way to express preferences for how traffic is - distributed to Service endpoints. Implementations can use this field as a - hint, but are not required to guarantee strict adherence. If the field is - not set, the implementation will apply its default routing strategy. If set - to "PreferClose", implementations should prioritize endpoints that are - topologically close (e.g., same zone). - This is a beta field and requires enabling ServiceTrafficDistribution feature. + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. type: string type: description: |- @@ -5623,7 +5615,6 @@ spec: - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. If this value is nil, the behavior is equivalent to the Honor policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string nodeTaintsPolicy: description: |- @@ -5634,7 +5625,6 @@ spec: - Ignore: node taints are ignored. All nodes are included. If this value is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string topologyKey: description: |- diff --git a/config/crd/bases/postgresql.cnpg.io_poolers.yaml b/config/crd/bases/postgresql.cnpg.io_poolers.yaml index f6a927772c..97abe1054e 100644 --- a/config/crd/bases/postgresql.cnpg.io_poolers.yaml +++ b/config/crd/bases/postgresql.cnpg.io_poolers.yaml @@ -701,13 +701,12 @@ spec: type: object trafficDistribution: description: |- - TrafficDistribution offers a way to express preferences for how traffic is - distributed to Service endpoints. Implementations can use this field as a - hint, but are not required to guarantee strict adherence. If the field is - not set, the implementation will apply its default routing strategy. If set - to "PreferClose", implementations should prioritize endpoints that are - topologically close (e.g., same zone). - This is a beta field and requires enabling ServiceTrafficDistribution feature. + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. type: string type: description: |- @@ -1061,7 +1060,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1076,7 +1074,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1244,7 +1241,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1259,7 +1255,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1425,7 +1420,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1440,7 +1434,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1608,7 +1601,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1623,7 +1615,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1885,7 +1876,7 @@ spec: Cannot be updated. items: description: EnvFromSource represents the source of - a set of ConfigMaps + a set of ConfigMaps or Secrets properties: configMapRef: description: The ConfigMap to select from @@ -1906,8 +1897,8 @@ spec: type: object x-kubernetes-map-type: atomic prefix: - description: An optional identifier to prepend - to each key in the ConfigMap. Must be a C_IDENTIFIER. + description: Optional text to prepend to the name + of each environment variable. Must be a C_IDENTIFIER. type: string secretRef: description: The Secret to select from @@ -2173,6 +2164,12 @@ spec: - port type: object type: object + stopSignal: + description: |- + StopSignal defines which signal will be sent to a container when it is being stopped. + If not specified, the default is defined by the container runtime in use. + StopSignal can only be set for Pods with a non-empty .spec.os.name + type: string type: object livenessProbe: description: |- @@ -3399,7 +3396,7 @@ spec: Cannot be updated. items: description: EnvFromSource represents the source of - a set of ConfigMaps + a set of ConfigMaps or Secrets properties: configMapRef: description: The ConfigMap to select from @@ -3420,8 +3417,8 @@ spec: type: object x-kubernetes-map-type: atomic prefix: - description: An optional identifier to prepend - to each key in the ConfigMap. Must be a C_IDENTIFIER. + description: Optional text to prepend to the name + of each environment variable. Must be a C_IDENTIFIER. type: string secretRef: description: The Secret to select from @@ -3684,6 +3681,12 @@ spec: - port type: object type: object + stopSignal: + description: |- + StopSignal defines which signal will be sent to a container when it is being stopped. + If not specified, the default is defined by the container runtime in use. + StopSignal can only be set for Pods with a non-empty .spec.os.name + type: string type: object livenessProbe: description: Probes are not allowed for ephemeral containers. @@ -4731,7 +4734,7 @@ spec: Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of - of that value or the sum of the normal containers. Limits are applied to init containers + that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. @@ -4906,7 +4909,7 @@ spec: Cannot be updated. items: description: EnvFromSource represents the source of - a set of ConfigMaps + a set of ConfigMaps or Secrets properties: configMapRef: description: The ConfigMap to select from @@ -4927,8 +4930,8 @@ spec: type: object x-kubernetes-map-type: atomic prefix: - description: An optional identifier to prepend - to each key in the ConfigMap. Must be a C_IDENTIFIER. + description: Optional text to prepend to the name + of each environment variable. Must be a C_IDENTIFIER. type: string secretRef: description: The Secret to select from @@ -5194,6 +5197,12 @@ spec: - port type: object type: object + stopSignal: + description: |- + StopSignal defines which signal will be sent to a container when it is being stopped. + If not specified, the default is defined by the container runtime in use. + StopSignal can only be set for Pods with a non-empty .spec.os.name + type: string type: object livenessProbe: description: |- @@ -6908,7 +6917,6 @@ spec: - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. If this value is nil, the behavior is equivalent to the Honor policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string nodeTaintsPolicy: description: |- @@ -6919,7 +6927,6 @@ spec: - Ignore: node taints are ignored. All nodes are included. If this value is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string topologyKey: description: |- @@ -7898,7 +7905,7 @@ spec: The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). - Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. properties: pullPolicy: diff --git a/go.mod b/go.mod index 67a61e0b79..710dbcb70a 100644 --- a/go.mod +++ b/go.mod @@ -38,11 +38,11 @@ require ( golang.org/x/term v0.32.0 google.golang.org/grpc v1.72.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.32.3 - k8s.io/apiextensions-apiserver v0.32.3 - k8s.io/apimachinery v0.32.3 - k8s.io/cli-runtime v0.32.3 - k8s.io/client-go v0.32.3 + k8s.io/api v0.33.1 + k8s.io/apiextensions-apiserver v0.33.1 + k8s.io/apimachinery v0.33.1 + k8s.io/cli-runtime v0.33.1 + k8s.io/client-go v0.33.1 k8s.io/utils v0.0.0-20250502105355-0f33e8f1c979 sigs.k8s.io/controller-runtime v0.20.4 sigs.k8s.io/yaml v1.4.0 @@ -64,14 +64,12 @@ require ( github.com/go-openapi/swag v0.23.0 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.3 // indirect github.com/google/gnostic-models v0.6.9 // indirect github.com/google/go-cmp v0.7.0 // indirect - github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/gorilla/websocket v1.5.0 // indirect + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect @@ -112,9 +110,10 @@ require ( gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 // indirect + k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect sigs.k8s.io/kustomize/api v0.19.0 // indirect sigs.k8s.io/kustomize/kyaml v0.19.0 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.5.0 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect ) diff --git a/go.sum b/go.sum index 931f79b1a2..d209e8919b 100644 --- a/go.sum +++ b/go.sum @@ -79,8 +79,8 @@ github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaU github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= -github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2 h1:sGm2vDRFUrQJO/Veii4h4zG2vvqG6uWNkBHSTqXOZk0= @@ -151,9 +151,8 @@ github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+v github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.80.1 h1:DP+PUNVOc+Bkft8a4QunLzaZ0RspWuD3tBbcPHr2PeE= @@ -168,8 +167,8 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= @@ -283,20 +282,20 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= -k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= -k8s.io/apiextensions-apiserver v0.32.3 h1:4D8vy+9GWerlErCwVIbcQjsWunF9SUGNu7O7hiQTyPY= -k8s.io/apiextensions-apiserver v0.32.3/go.mod h1:8YwcvVRMVzw0r1Stc7XfGAzB/SIVLunqApySV5V7Dss= -k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= -k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= -k8s.io/cli-runtime v0.32.3 h1:khLF2ivU2T6Q77H97atx3REY9tXiA3OLOjWJxUrdvss= -k8s.io/cli-runtime v0.32.3/go.mod h1:vZT6dZq7mZAca53rwUfdFSZjdtLyfF61mkf/8q+Xjak= -k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= -k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY= +k8s.io/api v0.33.1 h1:tA6Cf3bHnLIrUK4IqEgb2v++/GYUtqiu9sRVk3iBXyw= +k8s.io/api v0.33.1/go.mod h1:87esjTn9DRSRTD4fWMXamiXxJhpOIREjWOSjsW1kEHw= +k8s.io/apiextensions-apiserver v0.33.1 h1:N7ccbSlRN6I2QBcXevB73PixX2dQNIW0ZRuguEE91zI= +k8s.io/apiextensions-apiserver v0.33.1/go.mod h1:uNQ52z1A1Gu75QSa+pFK5bcXc4hq7lpOXbweZgi4dqA= +k8s.io/apimachinery v0.33.1 h1:mzqXWV8tW9Rw4VeW9rEkqvnxj59k1ezDUl20tFK/oM4= +k8s.io/apimachinery v0.33.1/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= +k8s.io/cli-runtime v0.33.1 h1:TvpjEtF71ViFmPeYMj1baZMJR4iWUEplklsUQ7D3quA= +k8s.io/cli-runtime v0.33.1/go.mod h1:9dz5Q4Uh8io4OWCLiEf/217DXwqNgiTS/IOuza99VZE= +k8s.io/client-go v0.33.1 h1:ZZV/Ks2g92cyxWkRRnfUDsnhNn28eFpt26aGc8KbXF4= +k8s.io/client-go v0.33.1/go.mod h1:JAsUrl1ArO7uRVFWfcj6kOomSlCv+JpvIsp6usAGefA= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 h1:hcha5B1kVACrLujCKLbr8XWMxCxzQx42DY8QKYJrDLg= -k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7/go.mod h1:GewRfANuJ70iYzvn+i4lezLDAFzvjxZYK1gn1lWcfas= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= k8s.io/utils v0.0.0-20250502105355-0f33e8f1c979 h1:jgJW5IePPXLGB8e/1wvd0Ich9QE97RvvF3a8J3fP/Lg= k8s.io/utils v0.0.0-20250502105355-0f33e8f1c979/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU= @@ -307,7 +306,10 @@ sigs.k8s.io/kustomize/api v0.19.0 h1:F+2HB2mU1MSiR9Hp1NEgoU2q9ItNOaBJl0I4Dlus5SQ sigs.k8s.io/kustomize/api v0.19.0/go.mod h1:/BbwnivGVcBh1r+8m3tH1VNxJmHSk1PzP5fkP6lbL1o= sigs.k8s.io/kustomize/kyaml v0.19.0 h1:RFge5qsO1uHhwJsu3ipV7RNolC7Uozc0jUBC/61XSlA= sigs.k8s.io/kustomize/kyaml v0.19.0/go.mod h1:FeKD5jEOH+FbZPpqUghBP8mrLjJ3+zD3/rf9NNu1cwY= -sigs.k8s.io/structured-merge-diff/v4 v4.5.0 h1:nbCitCK2hfnhyiKo6uf2HxUPTCodY6Qaf85SbDIaMBk= -sigs.k8s.io/structured-merge-diff/v4 v4.5.0/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= +sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= From 851e9583fee6b6e1c7aba8f3c7a317d7381ef43c Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 29 May 2025 00:43:31 +0200 Subject: [PATCH 616/836] chore(deps): update all non-major github action (main) (#7667) This PR contains the following updates: https://github.com/docker/bake-action `76f9fa3` -> `37816e7` https://github.com/docker/build-push-action `14487ce` -> `2634353` https://github.com/github/codeql-action `60168ef` -> `ff0a06e` https://github.com/kubernetes-sigs/kind `v0.27.0` -> `v0.29.0` https://github.com/rojopolis/spellcheck-github-actions `0.48.0` -> `0.49.0` https://github.com/sigstore/cosign-installer `d7d6bc7` -> `3454372` Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 4 ++-- .github/workflows/continuous-delivery.yml | 8 ++++---- .github/workflows/continuous-integration.yml | 8 ++++---- .github/workflows/ossf_scorecard.yml | 2 +- .github/workflows/release-publish.yml | 4 ++-- .github/workflows/snyk.yml | 4 ++-- .github/workflows/spellcheck.yml | 2 +- 7 files changed, 16 insertions(+), 16 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index b59f4d6125..75c3510bc1 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -75,7 +75,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@60168efe1c415ce0f5521ea06d5c2062adbeed1b # v3 + uses: github/codeql-action/init@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3 with: languages: "go" build-mode: manual @@ -92,6 +92,6 @@ jobs: make - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@60168efe1c415ce0f5521ea06d5c2062adbeed1b # v3 + uses: github/codeql-action/analyze@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3 with: category: "/language:go" diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index 79ea3d4cc4..41d944eab5 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -38,7 +38,7 @@ permissions: read-all env: GOLANG_VERSION: "1.24.x" KUBEBUILDER_VERSION: "2.3.1" - KIND_VERSION: "v0.27.0" + KIND_VERSION: "v0.29.0" ROOK_VERSION: "v1.17.1" EXTERNAL_SNAPSHOTTER_VERSION: "v8.2.1" OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing" @@ -373,7 +373,7 @@ jobs: password: ${{ env.REGISTRY_PASSWORD }} - name: Build and push - uses: docker/bake-action@76f9fa3a758507623da19f6092dc4089a7e61592 # v6 + uses: docker/bake-action@37816e747588cb137173af99ab33873600c46ea8 # v6 id: bake-push env: environment: "testing" @@ -389,7 +389,7 @@ jobs: - name: Install cosign if: env.SIGN_IMAGES == 'true' - uses: sigstore/cosign-installer@d7d6bc7722e3daa8354c50bcb52f4837da5e9b6a # v3 + uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3 # See https://github.blog/security/supply-chain-security/safeguard-container-signing-capability-actions/ # and https://github.com/actions/starter-workflows/blob/main/ci/docker-publish.yml for more details on # how to use cosign. @@ -458,7 +458,7 @@ jobs: # NOTE: we only fire this in TEST DEPTH = 4, as that is the level of the # upgrade test name: Build and push image for upgrade test - uses: docker/build-push-action@14487ce63c7a62a4a324b0bfb37086795e31c6c1 # v6 + uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6 id: build-prime if: | always() && !cancelled() && diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 440f79de8c..ef37b20554 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -21,7 +21,7 @@ env: GOLANG_VERSION: "1.24.x" GOLANGCI_LINT_VERSION: "v2.1.6" KUBEBUILDER_VERSION: "2.3.1" - KIND_VERSION: "v0.27.0" + KIND_VERSION: "v0.29.0" OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing" API_DOC_NAME: "cloudnative-pg.v1.md" SLACK_USERNAME: "cnpg-bot" @@ -556,7 +556,7 @@ jobs: password: ${{ env.REGISTRY_PASSWORD }} - name: Build and push - uses: docker/bake-action@76f9fa3a758507623da19f6092dc4089a7e61592 # v6 + uses: docker/bake-action@37816e747588cb137173af99ab33873600c46ea8 # v6 id: bake-push env: environment: "testing" @@ -614,7 +614,7 @@ jobs: args: --severity-threshold=high --file=Dockerfile - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@60168efe1c415ce0f5521ea06d5c2062adbeed1b # v3 + uses: github/codeql-action/upload-sarif@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3 if: | !github.event.repository.fork && !github.event.pull_request.head.repo.fork @@ -626,7 +626,7 @@ jobs: if: | env.SIGN_IMAGES == 'true' && env.PUSH == 'true' - uses: sigstore/cosign-installer@d7d6bc7722e3daa8354c50bcb52f4837da5e9b6a # v3 + uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3 # See https://github.blog/security/supply-chain-security/safeguard-container-signing-capability-actions/ # and https://github.com/actions/starter-workflows/blob/main/ci/docker-publish.yml for more details on # how to use cosign. diff --git a/.github/workflows/ossf_scorecard.yml b/.github/workflows/ossf_scorecard.yml index dc92ed236c..425c5cb499 100644 --- a/.github/workflows/ossf_scorecard.yml +++ b/.github/workflows/ossf_scorecard.yml @@ -74,6 +74,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard (optional). # Commenting out will disable upload of results to your repo's Code Scanning dashboard - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@60168efe1c415ce0f5521ea06d5c2062adbeed1b # v3 + uses: github/codeql-action/upload-sarif@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3 with: sarif_file: results.sarif diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml index 05cf6f1ecf..43fdd9e6f1 100644 --- a/.github/workflows/release-publish.yml +++ b/.github/workflows/release-publish.yml @@ -197,7 +197,7 @@ jobs: password: ${{ secrets.GITHUB_TOKEN }} - name: Build and push - uses: docker/bake-action@76f9fa3a758507623da19f6092dc4089a7e61592 # v6 + uses: docker/bake-action@37816e747588cb137173af99ab33873600c46ea8 # v6 id: bake-push env: environment: "production" @@ -213,7 +213,7 @@ jobs: targets: "default" - name: Install cosign - uses: sigstore/cosign-installer@d7d6bc7722e3daa8354c50bcb52f4837da5e9b6a # v3 + uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3 # See https://github.blog/security/supply-chain-security/safeguard-container-signing-capability-actions/ # and https://github.com/actions/starter-workflows/blob/main/ci/docker-publish.yml for more details on # how to use cosign. diff --git a/.github/workflows/snyk.yml b/.github/workflows/snyk.yml index 2633dc340a..d420805b62 100644 --- a/.github/workflows/snyk.yml +++ b/.github/workflows/snyk.yml @@ -30,7 +30,7 @@ jobs: args: --sarif-file-output=snyk-static.sarif - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@60168efe1c415ce0f5521ea06d5c2062adbeed1b # v3 + uses: github/codeql-action/upload-sarif@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3 with: sarif_file: snyk-static.sarif @@ -43,6 +43,6 @@ jobs: args: --sarif-file-output=snyk-test.sarif - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@60168efe1c415ce0f5521ea06d5c2062adbeed1b # v3 + uses: github/codeql-action/upload-sarif@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3 with: sarif_file: snyk-test.sarif diff --git a/.github/workflows/spellcheck.yml b/.github/workflows/spellcheck.yml index ff7eb7dc6c..599ada9355 100644 --- a/.github/workflows/spellcheck.yml +++ b/.github/workflows/spellcheck.yml @@ -30,4 +30,4 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: Spellcheck - uses: rojopolis/spellcheck-github-actions@23dc186319866e1de224f94fe1d31b72797aeec7 # 0.48.0 + uses: rojopolis/spellcheck-github-actions@584b2ae95998967a53af7fbfb7f5b15352c38748 # 0.49.0 From aa3e41d8bf912b16a5ed771ec51f9446b34be76c Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 29 May 2025 01:28:27 +0200 Subject: [PATCH 617/836] chore(deps): update backup test tools (main) (#7668) This PR contains the following updates: https://redirect.github.com/vmware-tanzu/velero `1.16.0` -> `1.16.1` https://redirect.github.com/vmware-tanzu/velero-plugin-for-aws `1.12.0` -> `1.12.1` Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- .github/workflows/continuous-delivery.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index 41d944eab5..46368a11e6 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -1321,8 +1321,8 @@ jobs: name: Setup Velero uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3 env: - VELERO_VERSION: "v1.16.0" - VELERO_AWS_PLUGIN_VERSION: "v1.12.0" + VELERO_VERSION: "v1.16.1" + VELERO_AWS_PLUGIN_VERSION: "v1.12.1" with: timeout_minutes: 10 max_attempts: 3 From ccccd6587621bf953a00f72148ae8973e1eacf6b Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 29 May 2025 01:54:18 +0200 Subject: [PATCH 618/836] chore(deps): update dependency rook/rook to v1.17.2 (main) (#7672) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Update | Change | |---|---|---| | [rook/rook](https://redirect.github.com/rook/rook) | patch | `v1.17.1` -> `v1.17.2` | --- ### Release Notes
rook/rook (rook/rook) ### [`v1.17.2`](https://redirect.github.com/rook/rook/releases/tag/v1.17.2) [Compare Source](https://redirect.github.com/rook/rook/compare/v1.17.1...v1.17.2) ### Improvements Rook v1.17.2 is a patch release limited in scope and focusing on feature additions and bug fixes to the Ceph operator. - block: Add more deletion conditions to blockpool and radosnamespace status ([#​15817](https://redirect.github.com/rook/rook/issues/15817), [@​travisn](https://redirect.github.com/travisn)) - object: fix uppercase serialization of fields in KafkaEndpointSpec ([#​15815](https://redirect.github.com/rook/rook/issues/15815), [@​jhoblitt](https://redirect.github.com/jhoblitt)) - cephfs: Update ceph-csi CephFS caps to include executable permission ([#​15793](https://redirect.github.com/rook/rook/issues/15793), [@​flx5](https://redirect.github.com/flx5)) - object: Change `CephObjectStore "foo" found` log level to debug ([#​15829](https://redirect.github.com/rook/rook/issues/15829), [@​jhoblitt](https://redirect.github.com/jhoblitt)) - rgw: Use pod name in ops log filename ([#​15605](https://redirect.github.com/rook/rook/issues/15605), [@​arttor](https://redirect.github.com/arttor)) - exporter: Add name to containerPort ([#​15801](https://redirect.github.com/rook/rook/issues/15801), [@​jrcichra](https://redirect.github.com/jrcichra)) - rbdmirror: Log message clarification with namespace ([#​15798](https://redirect.github.com/rook/rook/issues/15798), [@​subhamkrai](https://redirect.github.com/subhamkrai)) - osd: Fix osd disk cleanup for mpath setups ([#​15761](https://redirect.github.com/rook/rook/issues/15761), [@​sp98](https://redirect.github.com/sp98)) - ci: Add test support for latest K8s version 1.33 ([#​15795](https://redirect.github.com/rook/rook/issues/15795), [@​subhamkrai](https://redirect.github.com/subhamkrai))
--- ### Configuration 📅 **Schedule**: Branch creation - At any time (no schedule defined), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Never, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/cloudnative-pg/cloudnative-pg). Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- .github/workflows/continuous-delivery.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index 46368a11e6..c9b78431db 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -39,7 +39,7 @@ env: GOLANG_VERSION: "1.24.x" KUBEBUILDER_VERSION: "2.3.1" KIND_VERSION: "v0.29.0" - ROOK_VERSION: "v1.17.1" + ROOK_VERSION: "v1.17.2" EXTERNAL_SNAPSHOTTER_VERSION: "v8.2.1" OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing" BUILD_PUSH_PROVENANCE: "" From 0dd43a45e003c6fa2c3e60598a4178609336bfee Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 29 May 2025 09:35:51 +0200 Subject: [PATCH 619/836] chore(deps): update jonasbn/github-action-spellcheck docker tag to v0.49.0 (main) (#7679) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Update | Change | |---|---|---| | jonasbn/github-action-spellcheck | minor | `0.48.0` -> `0.49.0` | --- > [!WARNING] > Some dependencies could not be looked up. Check the Dependency Dashboard for more information. --- ### Configuration 📅 **Schedule**: Branch creation - At any time (no schedule defined), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Never, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/cloudnative-pg/cloudnative-pg). Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 8ccadec256..70a95ad303 100644 --- a/Makefile +++ b/Makefile @@ -55,7 +55,7 @@ KUSTOMIZE_VERSION ?= v5.6.0 CONTROLLER_TOOLS_VERSION ?= v0.17.3 GENREF_VERSION ?= 015aaac611407c4fe591bc8700d2c67b7521efca GORELEASER_VERSION ?= v2.9.0 -SPELLCHECK_VERSION ?= 0.48.0 +SPELLCHECK_VERSION ?= 0.49.0 WOKE_VERSION ?= 0.19.0 OPERATOR_SDK_VERSION ?= v1.39.2 OPM_VERSION ?= v1.54.0 From b46fbaf206707c8d7f67ca86547d411d019dbe4e Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 29 May 2025 11:15:54 +0200 Subject: [PATCH 620/836] chore(deps): update operator framework (main) (#7686) --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 70a95ad303..d63a7eb810 100644 --- a/Makefile +++ b/Makefile @@ -58,8 +58,8 @@ GORELEASER_VERSION ?= v2.9.0 SPELLCHECK_VERSION ?= 0.49.0 WOKE_VERSION ?= 0.19.0 OPERATOR_SDK_VERSION ?= v1.39.2 -OPM_VERSION ?= v1.54.0 -PREFLIGHT_VERSION ?= 1.13.0 +OPM_VERSION ?= v1.55.0 +PREFLIGHT_VERSION ?= 1.13.1 OPENSHIFT_VERSIONS ?= v4.12-v4.19 ARCH ?= amd64 From 6cd4ce59e977e8aa4be322745b25996912e9360f Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 29 May 2025 16:13:24 +0200 Subject: [PATCH 621/836] chore(deps): update kindest/node docker tag to v1.33.1 (main) (#7685) --- hack/e2e/run-e2e-kind.sh | 2 +- hack/setup-cluster.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hack/e2e/run-e2e-kind.sh b/hack/e2e/run-e2e-kind.sh index 1b9341a978..7217b30390 100755 --- a/hack/e2e/run-e2e-kind.sh +++ b/hack/e2e/run-e2e-kind.sh @@ -33,7 +33,7 @@ E2E_DIR="${HACK_DIR}/e2e" export PRESERVE_CLUSTER=${PRESERVE_CLUSTER:-false} export BUILD_IMAGE=${BUILD_IMAGE:-false} -KIND_NODE_DEFAULT_VERSION=v1.33.0 +KIND_NODE_DEFAULT_VERSION=v1.33.1 export K8S_VERSION=${K8S_VERSION:-$KIND_NODE_DEFAULT_VERSION} export CLUSTER_ENGINE=kind export CLUSTER_NAME=pg-operator-e2e-${K8S_VERSION//./-} diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh index 0488a0fc62..eaa9f7fda3 100755 --- a/hack/setup-cluster.sh +++ b/hack/setup-cluster.sh @@ -27,7 +27,7 @@ if [ "${DEBUG-}" = true ]; then fi # Defaults -KIND_NODE_DEFAULT_VERSION=v1.33.0 +KIND_NODE_DEFAULT_VERSION=v1.33.1 CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=v1.16.1 EXTERNAL_SNAPSHOTTER_VERSION=v8.2.1 EXTERNAL_PROVISIONER_VERSION=v5.2.0 From f361b72a31340eda65ee2d7081235bfc9b388af0 Mon Sep 17 00:00:00 2001 From: Tudor Golubenco Date: Fri, 30 May 2025 13:45:07 +0200 Subject: [PATCH 622/836] docs: add Xata to `ADOPTERS.md` (#7701) Signed-off-by: Tudor Golubenco --- ADOPTERS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/ADOPTERS.md b/ADOPTERS.md index d3767f1b57..78ab5bfcc5 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -64,3 +64,4 @@ This list is sorted in chronological order, based on the submission date. | [Bitnami](https://bitnami.com) | [@carrodher](https://github.com/carrodher) | 2025-03-04 | Bitnami provides CloudNativePG as part of its open-source [Helm charts catalog](https://github.com/bitnami/charts), enabling users to easily deploy PostgreSQL clusters on Kubernetes. Additionally, CloudNativePG is available through [Tanzu Application Catalog](https://www.vmware.com/products/app-platform/tanzu-application-catalog) and [Bitnami Premium](https://www.arrow.com/globalecs/na/vendors/bitnami-premium/), where customers can benefit from advanced security and compliance features such as VEX, SBOM, SLSA3, and CVE scanning. | | [Giant Swarm](https://www.giantswarm.io/) | [@stone-z](https://github.com/stone-z) | 2025-05-02 | Giant Swarm's full-service Kubernetes security and observability platforms are powered by PostgreSQL clusters delightfully managed with CloudNativePG. | | [DocumentDB Operator](https://github.com/microsoft/documentdb-kubernetes-operator) | [@xgerman](https://github.com/xgerman) | 2025-05-22 | The DocumentDB Kubernetes Operator is an open-source project to run and manage DocumentDB on Kubernetes. [DocumentDB](https://github.com/microsoft/documentdb) is the engine powering vCore-based [Azure Cosmos DB for MongoDB](https://learn.microsoft.com/en-us/azure/cosmos-db/mongodb/vcore/). The operator uses CloudNativePG behind the scenes. | +| [Xata](https://xata.io) | [@tsg](https://github.com/tsg) | 2025-05-29 | Xata is a PostgreSQL platform offering instant database branching, separation of storage/compute, and PII anonymization. It uses CloudNativePG for the compute part. | \ No newline at end of file From 3c5a017e56d3cf1dccfda3761d24d19609b5a534 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Tue, 3 Jun 2025 11:08:01 +0200 Subject: [PATCH 623/836] feat: allow postgres executable name customization (#7640) The default PostgreSQL executable name can be changed by adding the `POSTGRES_NAME` environment variable to the instance container. Closes #7625 Signed-off-by: Armando Ruocco --- .../cmd/manager/instance/run/lifecycle/reaper.go | 2 +- pkg/management/postgres/instance.go | 16 ++++++++++++---- pkg/management/postgres/pidfile_test.go | 4 ++-- 3 files changed, 15 insertions(+), 7 deletions(-) diff --git a/internal/cmd/manager/instance/run/lifecycle/reaper.go b/internal/cmd/manager/instance/run/lifecycle/reaper.go index 55d17106d3..7a4f47b4cd 100644 --- a/internal/cmd/manager/instance/run/lifecycle/reaper.go +++ b/internal/cmd/manager/instance/run/lifecycle/reaper.go @@ -102,7 +102,7 @@ func (z *PostgresOrphansReaper) handleSignal(contextLogger log.Logger) error { pidFile := path.Join(z.instance.PgData, postgres.PostgresqlPidFile) _, postMasterPid, _ := z.instance.GetPostmasterPidFromFile(pidFile) for _, p := range processes { - if p.PPid() == 1 && p.Executable() == "postgres" { + if p.PPid() == 1 && p.Executable() == postgres.GetPostgresExecutableName() { pid := p.Pid() if pid == postMasterPid { continue diff --git a/pkg/management/postgres/instance.go b/pkg/management/postgres/instance.go index 8ea2efc439..6aa7f4b9ff 100644 --- a/pkg/management/postgres/instance.go +++ b/pkg/management/postgres/instance.go @@ -59,7 +59,6 @@ import ( ) const ( - postgresName = "postgres" pgCtlName = "pg_ctl" pgRewindName = "pg_rewind" pgBaseBackupName = "pg_basebackup" @@ -73,6 +72,15 @@ const ( pgPingNoAttempt = 3 // connection not attempted (bad params) ) +// GetPostgresExecutableName returns the name of the PostgreSQL executable +func GetPostgresExecutableName() string { + if name := os.Getenv("POSTGRES_NAME"); name != "" { + return name + } + + return "postgres" +} + // shutdownMode represent a way to request the postmaster shutdown type shutdownMode string @@ -670,7 +678,7 @@ func (instance *Instance) Reload(ctx context.Context) error { // Run this instance returning an OS process needed // to control the instance execution func (instance *Instance) Run() (*execlog.StreamingCmd, error) { - process, err := instance.CheckForExistingPostmaster(postgresName) + process, err := instance.CheckForExistingPostmaster(GetPostgresExecutableName()) if err != nil { return nil, err } @@ -703,11 +711,11 @@ func (instance *Instance) Run() (*execlog.StreamingCmd, error) { return nil, err } - postgresCmd := exec.Command(postgresName, options...) // #nosec + postgresCmd := exec.Command(GetPostgresExecutableName(), options...) // #nosec postgresCmd.Env = instance.Env compatibility.AddInstanceRunCommands(postgresCmd) - streamingCmd, err := execlog.RunStreamingNoWait(postgresCmd, postgresName) + streamingCmd, err := execlog.RunStreamingNoWait(postgresCmd, GetPostgresExecutableName()) if err != nil { return nil, err } diff --git a/pkg/management/postgres/pidfile_test.go b/pkg/management/postgres/pidfile_test.go index 271b00b243..460a61b13a 100644 --- a/pkg/management/postgres/pidfile_test.go +++ b/pkg/management/postgres/pidfile_test.go @@ -52,7 +52,7 @@ var _ = Describe("the detection of a postmaster process using the pid file", fun instance := NewInstance() instance.PgData = pgdata instance.SocketDirectory = socketDir - process, err := instance.CheckForExistingPostmaster(postgresName) + process, err := instance.CheckForExistingPostmaster(GetPostgresExecutableName()) Expect(err).ShouldNot(HaveOccurred()) Expect(process).To(BeNil()) }) @@ -70,7 +70,7 @@ var _ = Describe("the detection of a postmaster process using the pid file", fun err = os.WriteFile(filepath.Join(socketDir, ".s.PGSQL.5432.lock"), []byte("1234"), 0o400) Expect(err).ShouldNot(HaveOccurred()) - process, err := instance.CheckForExistingPostmaster(postgresName) + process, err := instance.CheckForExistingPostmaster(GetPostgresExecutableName()) Expect(err).ShouldNot(HaveOccurred()) Expect(process).To(BeNil()) From 17af8d46922e036a5a07931085ef8c511f71e9d6 Mon Sep 17 00:00:00 2001 From: Peggie Date: Tue, 3 Jun 2025 14:00:22 +0200 Subject: [PATCH 624/836] chore: refresh licenses directory (#7706) Refresh the licenses directory Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: license-updater --- .../github.com/golang/protobuf/proto/LICENSE | 28 ------------------- .../gofuzz => sigs.k8s.io/randfill}/LICENSE | 6 ++-- .../go-licenses/sigs.k8s.io/randfill/NOTICE | 24 ++++++++++++++++ 3 files changed, 27 insertions(+), 31 deletions(-) delete mode 100644 licenses/go-licenses/github.com/golang/protobuf/proto/LICENSE rename licenses/go-licenses/{github.com/google/gofuzz => sigs.k8s.io/randfill}/LICENSE (99%) create mode 100644 licenses/go-licenses/sigs.k8s.io/randfill/NOTICE diff --git a/licenses/go-licenses/github.com/golang/protobuf/proto/LICENSE b/licenses/go-licenses/github.com/golang/protobuf/proto/LICENSE deleted file mode 100644 index 0f646931a4..0000000000 --- a/licenses/go-licenses/github.com/golang/protobuf/proto/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright 2010 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/licenses/go-licenses/github.com/google/gofuzz/LICENSE b/licenses/go-licenses/sigs.k8s.io/randfill/LICENSE similarity index 99% rename from licenses/go-licenses/github.com/google/gofuzz/LICENSE rename to licenses/go-licenses/sigs.k8s.io/randfill/LICENSE index d645695673..9dd29274c3 100644 --- a/licenses/go-licenses/github.com/google/gofuzz/LICENSE +++ b/licenses/go-licenses/sigs.k8s.io/randfill/LICENSE @@ -1,4 +1,3 @@ - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -179,7 +178,7 @@ APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" + boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -187,7 +186,8 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright 2014 The gofuzz Authors + Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/licenses/go-licenses/sigs.k8s.io/randfill/NOTICE b/licenses/go-licenses/sigs.k8s.io/randfill/NOTICE new file mode 100644 index 0000000000..6984e71f65 --- /dev/null +++ b/licenses/go-licenses/sigs.k8s.io/randfill/NOTICE @@ -0,0 +1,24 @@ +When donating the randfill project to the CNCF, we could not reach all the +gofuzz contributors to sign the CNCF CLA. As such, according to the CNCF rules +to donate a repository, we must add a NOTICE referencing section 7 of the CLA +with a list of developers who could not be reached. + +`7. Should You wish to submit work that is not Your original creation, You may +submit it to the Foundation separately from any Contribution, identifying the +complete details of its source and of any license or other restriction +(including, but not limited to, related patents, trademarks, and license +agreements) of which you are personally aware, and conspicuously marking the +work as "Submitted on behalf of a third-party: [named here]".` + +Submitted on behalf of a third-party: @dnephin (Daniel Nephin) +Submitted on behalf of a third-party: @AlekSi (Alexey Palazhchenko) +Submitted on behalf of a third-party: @bbigras (Bruno Bigras) +Submitted on behalf of a third-party: @samirkut (Samir) +Submitted on behalf of a third-party: @posener (Eyal Posener) +Submitted on behalf of a third-party: @Ashikpaul (Ashik Paul) +Submitted on behalf of a third-party: @kwongtailau (Kwongtai) +Submitted on behalf of a third-party: @ericcornelissen (Eric Cornelissen) +Submitted on behalf of a third-party: @eclipseo (Robert-André Mauchin) +Submitted on behalf of a third-party: @yanzhoupan (Andrew Pan) +Submitted on behalf of a third-party: @STRRL (Zhiqiang ZHOU) +Submitted on behalf of a third-party: @disconnect3d (Disconnect3d) From 4c68b961d6d3f850cb4ca9bb8d8b23d413bd09aa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niccol=C3=B2=20Fei?= Date: Thu, 5 Jun 2025 17:22:52 +0200 Subject: [PATCH 625/836] chore(e2e): update minio images (#7730) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes #7729 Signed-off-by: Niccolò Fei --- tests/utils/minio/minio.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/utils/minio/minio.go b/tests/utils/minio/minio.go index 2e1189f0ef..6b1c337d80 100644 --- a/tests/utils/minio/minio.go +++ b/tests/utils/minio/minio.go @@ -49,8 +49,8 @@ import ( ) const ( - minioImage = "minio/minio:RELEASE.2022-06-20T23-13-45Z" - minioClientImage = "minio/mc:RELEASE.2022-06-11T21-10-36Z" + minioImage = "minio/minio:RELEASE.2025-05-24T17-08-30Z" + minioClientImage = "minio/mc:RELEASE.2025-05-21T01-59-54Z" ) // Env contains all the information related or required by MinIO deployment and From 4c9049bfe5efd714375149e2248ed6abbd4e038a Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 6 Jun 2025 09:39:29 +0200 Subject: [PATCH 626/836] fix(deps): update all non-major go dependencies (main) (#7689) This PR contains the following updates: https://redirect.github.com/go-logr/logr `v1.4.2` -> `v1.4.3` https://redirect.github.com/jackc/pgx `v5.7.4` -> `v5.7.5` https://redirect.github.com/grpc/grpc-go `v1.72.0` -> `v1.73.0` --- go.mod | 16 ++++++++-------- go.sum | 52 ++++++++++++++++++++++++++-------------------------- 2 files changed, 34 insertions(+), 34 deletions(-) diff --git a/go.mod b/go.mod index 710dbcb70a..ded35df29f 100644 --- a/go.mod +++ b/go.mod @@ -13,10 +13,10 @@ require ( github.com/cloudnative-pg/machinery v0.2.0 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/evanphx/json-patch/v5 v5.9.11 - github.com/go-logr/logr v1.4.2 + github.com/go-logr/logr v1.4.3 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2 - github.com/jackc/pgx/v5 v5.7.4 + github.com/jackc/pgx/v5 v5.7.5 github.com/jackc/puddle/v2 v2.2.2 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 github.com/kubernetes-csi/external-snapshotter/client/v8 v8.2.0 @@ -36,7 +36,7 @@ require ( go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 golang.org/x/term v0.32.0 - google.golang.org/grpc v1.72.0 + google.golang.org/grpc v1.73.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.33.1 k8s.io/apiextensions-apiserver v0.33.1 @@ -96,16 +96,16 @@ require ( github.com/x448/float16 v0.8.4 // indirect github.com/xlab/treeprint v1.2.0 // indirect go.uber.org/automaxprocs v1.6.0 // indirect - golang.org/x/crypto v0.36.0 // indirect + golang.org/x/crypto v0.37.0 // indirect golang.org/x/net v0.38.0 // indirect - golang.org/x/oauth2 v0.27.0 // indirect - golang.org/x/sync v0.12.0 // indirect + golang.org/x/oauth2 v0.28.0 // indirect + golang.org/x/sync v0.13.0 // indirect golang.org/x/sys v0.33.0 // indirect - golang.org/x/text v0.23.0 // indirect + golang.org/x/text v0.24.0 // indirect golang.org/x/time v0.9.0 // indirect golang.org/x/tools v0.31.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 // indirect google.golang.org/protobuf v1.36.6 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index d209e8919b..2400587a09 100644 --- a/go.sum +++ b/go.sum @@ -45,8 +45,8 @@ github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= @@ -91,8 +91,8 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.7.4 h1:9wKznZrhWa2QiHL+NjTSPP6yjl3451BX3imWDnokYlg= -github.com/jackc/pgx/v5 v5.7.4/go.mod h1:ncY89UGWxg82EykZUwSpUKEfccBGGYq1xjrOpsbsfGQ= +github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs= +github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -198,16 +198,16 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= -go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= -go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= -go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= -go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= -go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= -go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= -go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= -go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= -go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= +go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= +go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= +go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= +go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= +go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY= +go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg= +go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o= +go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= +go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= +go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= @@ -221,8 +221,8 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= -golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= +golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -231,13 +231,13 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= -golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= -golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc= +golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= -golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= +golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -249,8 +249,8 @@ golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= -golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= +golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -265,10 +265,10 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a h1:51aaUVRocpvUOSQKM6Q7VuoaktNIaMCLuhZB6DKksq4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a/go.mod h1:uRxBH1mhmO8PGhU89cMcHaXKZqO+OfakD8QQO0oYwlQ= -google.golang.org/grpc v1.72.0 h1:S7UkcVa60b5AAQTaO6ZKamFp1zMZSU0fGDK2WZLbBnM= -google.golang.org/grpc v1.72.0/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 h1:e0AIkUUhxyBKh6ssZNrAMeqhA7RKUj42346d1y02i2g= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok= +google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc= google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From 6d32d1a2218382903a2fec29a2fa96928eb7f48f Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 6 Jun 2025 10:14:29 +0200 Subject: [PATCH 627/836] chore(deps): update all non-major github action (main) (#7734) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [github/codeql-action](https://redirect.github.com/github/codeql-action) | action | digest | `ff0a06e` -> `fca7ace` | | [ossf/scorecard-action](https://redirect.github.com/ossf/scorecard-action) | action | patch | `v2.4.1` -> `v2.4.2` | --- > [!WARNING] > Some dependencies could not be looked up. Check the Dependency Dashboard for more information. --- ### Release Notes
ossf/scorecard-action (ossf/scorecard-action) ### [`v2.4.2`](https://redirect.github.com/ossf/scorecard-action/releases/tag/v2.4.2) [Compare Source](https://redirect.github.com/ossf/scorecard-action/compare/v2.4.1...v2.4.2) #### What's Changed This update bumps the Scorecard version to the v5.2.1 release. For a complete list of changes, please refer to the Scorecard [v5.2.0](https://redirect.github.com/ossf/scorecard/releases/tag/v5.2.0) and [v5.2.1](https://redirect.github.com/ossf/scorecard/releases/tag/v5.2.1) release notes. **Full Changelog**: https://github.com/ossf/scorecard-action/compare/v2.4.1...v2.4.2
--- ### Configuration 📅 **Schedule**: Branch creation - At any time (no schedule defined), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Never, or you tick the rebase/retry checkbox. 👻 **Immortal**: This PR will be recreated if closed unmerged. Get [config help](https://redirect.github.com/renovatebot/renovate/discussions) if that's undesired. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/cloudnative-pg/cloudnative-pg). Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 4 ++-- .github/workflows/continuous-integration.yml | 2 +- .github/workflows/ossf_scorecard.yml | 4 ++-- .github/workflows/snyk.yml | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 75c3510bc1..bef638971f 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -75,7 +75,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3 + uses: github/codeql-action/init@fca7ace96b7d713c7035871441bd52efbe39e27e # v3 with: languages: "go" build-mode: manual @@ -92,6 +92,6 @@ jobs: make - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3 + uses: github/codeql-action/analyze@fca7ace96b7d713c7035871441bd52efbe39e27e # v3 with: category: "/language:go" diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index ef37b20554..2acad1b1e0 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -614,7 +614,7 @@ jobs: args: --severity-threshold=high --file=Dockerfile - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3 + uses: github/codeql-action/upload-sarif@fca7ace96b7d713c7035871441bd52efbe39e27e # v3 if: | !github.event.repository.fork && !github.event.pull_request.head.repo.fork diff --git a/.github/workflows/ossf_scorecard.yml b/.github/workflows/ossf_scorecard.yml index 425c5cb499..1b3ebae855 100644 --- a/.github/workflows/ossf_scorecard.yml +++ b/.github/workflows/ossf_scorecard.yml @@ -40,7 +40,7 @@ jobs: persist-credentials: false - name: "Run analysis" - uses: ossf/scorecard-action@f49aabe0b5af0936a0987cfb85d86b75731b0186 # v2.4.1 + uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # v2.4.2 with: results_file: results.sarif results_format: sarif @@ -74,6 +74,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard (optional). # Commenting out will disable upload of results to your repo's Code Scanning dashboard - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3 + uses: github/codeql-action/upload-sarif@fca7ace96b7d713c7035871441bd52efbe39e27e # v3 with: sarif_file: results.sarif diff --git a/.github/workflows/snyk.yml b/.github/workflows/snyk.yml index d420805b62..0e835f40f3 100644 --- a/.github/workflows/snyk.yml +++ b/.github/workflows/snyk.yml @@ -30,7 +30,7 @@ jobs: args: --sarif-file-output=snyk-static.sarif - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3 + uses: github/codeql-action/upload-sarif@fca7ace96b7d713c7035871441bd52efbe39e27e # v3 with: sarif_file: snyk-static.sarif @@ -43,6 +43,6 @@ jobs: args: --sarif-file-output=snyk-test.sarif - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3 + uses: github/codeql-action/upload-sarif@fca7ace96b7d713c7035871441bd52efbe39e27e # v3 with: sarif_file: snyk-test.sarif From acbe9462a1489af3ae872d19bac30ed4f3b46e3b Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 6 Jun 2025 10:43:42 +0200 Subject: [PATCH 628/836] chore(deps): update operator framework to v1.40.0 (main) (#7740) --- Makefile | 2 +- config/olm-scorecard/patches/basic.config.yaml | 2 +- config/olm-scorecard/patches/olm.config.yaml | 10 +++++----- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Makefile b/Makefile index d63a7eb810..d07cfc227f 100644 --- a/Makefile +++ b/Makefile @@ -57,7 +57,7 @@ GENREF_VERSION ?= 015aaac611407c4fe591bc8700d2c67b7521efca GORELEASER_VERSION ?= v2.9.0 SPELLCHECK_VERSION ?= 0.49.0 WOKE_VERSION ?= 0.19.0 -OPERATOR_SDK_VERSION ?= v1.39.2 +OPERATOR_SDK_VERSION ?= v1.40.0 OPM_VERSION ?= v1.55.0 PREFLIGHT_VERSION ?= 1.13.1 OPENSHIFT_VERSIONS ?= v4.12-v4.19 diff --git a/config/olm-scorecard/patches/basic.config.yaml b/config/olm-scorecard/patches/basic.config.yaml index cea781d2ee..f7e0ed492b 100644 --- a/config/olm-scorecard/patches/basic.config.yaml +++ b/config/olm-scorecard/patches/basic.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - basic-check-spec - image: quay.io/operator-framework/scorecard-test:v1.39.2 + image: quay.io/operator-framework/scorecard-test:v1.40.0 labels: suite: basic test: basic-check-spec-test diff --git a/config/olm-scorecard/patches/olm.config.yaml b/config/olm-scorecard/patches/olm.config.yaml index fdde08eb9a..85895c9e93 100644 --- a/config/olm-scorecard/patches/olm.config.yaml +++ b/config/olm-scorecard/patches/olm.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - olm-bundle-validation - image: quay.io/operator-framework/scorecard-test:v1.39.2 + image: quay.io/operator-framework/scorecard-test:v1.40.0 labels: suite: olm test: olm-bundle-validation-test @@ -14,7 +14,7 @@ entrypoint: - scorecard-test - olm-crds-have-validation - image: quay.io/operator-framework/scorecard-test:v1.39.2 + image: quay.io/operator-framework/scorecard-test:v1.40.0 labels: suite: olm test: olm-crds-have-validation-test @@ -24,7 +24,7 @@ entrypoint: - scorecard-test - olm-crds-have-resources - image: quay.io/operator-framework/scorecard-test:v1.39.2 + image: quay.io/operator-framework/scorecard-test:v1.40.0 labels: suite: olm test: olm-crds-have-resources-test @@ -34,7 +34,7 @@ entrypoint: - scorecard-test - olm-spec-descriptors - image: quay.io/operator-framework/scorecard-test:v1.39.2 + image: quay.io/operator-framework/scorecard-test:v1.40.0 labels: suite: olm test: olm-spec-descriptors-test @@ -44,7 +44,7 @@ entrypoint: - scorecard-test - olm-status-descriptors - image: quay.io/operator-framework/scorecard-test:v1.39.2 + image: quay.io/operator-framework/scorecard-test:v1.40.0 labels: suite: olm test: olm-status-descriptors-test From 56d41a5469100fef4a914502149127df8dd948f8 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Fri, 6 Jun 2025 13:54:05 +0200 Subject: [PATCH 629/836] chore: improve renovate configuration (#7663) As of now, the renovate configuration was about adding more and more `customManagers` to handle different versions, now, we adopt the concept of using a comment on top with the required fields to update the versions inside the specified files. Closes #7423 Signed-off-by: Jonathan Gonzalez V. Signed-off-by: Marco Nenciarini Co-authored-by: Marco Nenciarini --- .github/renovate.json5 | 301 +++++-------------- .github/workflows/backport.yml | 1 + .github/workflows/codeql-analysis.yml | 1 + .github/workflows/continuous-delivery.yml | 5 + .github/workflows/continuous-integration.yml | 3 + .github/workflows/refresh-licenses.yml | 1 + .github/workflows/release-publish.yml | 1 + .github/workflows/require-labels.yml | 2 +- Makefile | 8 + 9 files changed, 88 insertions(+), 235 deletions(-) diff --git a/.github/renovate.json5 b/.github/renovate.json5 index 7fa2706846..7514f68745 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -20,382 +20,215 @@ 'contribute/**', 'licenses/**', 'pkg/versions/**', - 'pkg/specs/pgbouncer/', + 'pkg/specs/pgbouncer/' ], postUpdateOptions: [ - 'gomodTidy', + 'gomodTidy' ], semanticCommits: 'enabled', labels: [ 'automated', 'do not backport', - 'no-issue', + 'no-issue' ], customManagers: [ { - customType: 'regex', + customType: "regex", fileMatch: [ - '^Makefile$', + "^Makefile$" ], matchStrings: [ - 'KUSTOMIZE_VERSION \\?= (?.*?)\\n', - ], - datasourceTemplate: 'go', - depNameTemplate: 'sigs.k8s.io/kustomize/kustomize/v5', + "# renovate: datasource=(?[a-z-.]+?) depName=(?[^\\s]+?)(?: (?:lookupName|packageName)=(?[^\\s]+?))?(?: versioning=(?[^\\s]+?))?(?: extractVersion=(?[^\\s]+?))?\\s+[A-Za-z0-9_]+?_VERSION\\s*\\?=\\s*[\"']?(?.+?)[\"']?\\s" + ] }, { - customType: 'regex', - fileMatch: [ - '^Makefile$', - ], - matchStrings: [ - 'CONTROLLER_TOOLS_VERSION \\?= (?.*?)\\n', - ], - datasourceTemplate: 'go', - depNameTemplate: 'sigs.k8s.io/controller-tools', - }, -{ - customType: 'regex', + customType: "regex", fileMatch: [ - '^Makefile$', + '^\\.github\\/workflows\\/[^/]+\\.ya?ml$' ], matchStrings: [ - 'GENREF_VERSION \\?= (?.*?)\\n', - ], - datasourceTemplate: 'go', - depNameTemplate: 'github.com/kubernetes-sigs/reference-docs/genref', - }, - { - customType: 'regex', - fileMatch: [ - '^Makefile$', - ], - matchStrings: [ - 'GORELEASER_VERSION \\?= (?.*?)\\n', - ], - datasourceTemplate: 'go', - versioningTemplate: 'loose', - depNameTemplate: 'github.com/goreleaser/goreleaser', + "# renovate: datasource=(?[a-z-.]+?) depName=(?[^\\s]+?)(?: (?:lookupName|packageName)=(?[^\\s]+?))?(?: versioning=(?[^\\s]+?))?(?: extractVersion=(?[^\\s]+?))?\\s+[A-Za-z0-9_]+?_VERSION\\s*\: \\s*[\"']?(?.+?)[\"']?\\s" + ] }, { customType: 'regex', fileMatch: [ '^.github/workflows/continuous-delivery.yml', - '^hack/setup-cluster.sh$', + '^hack/setup-cluster.sh$' ], matchStrings: [ 'EXTERNAL_SNAPSHOTTER_VERSION: "(?.*?)"', - 'EXTERNAL_SNAPSHOTTER_VERSION=(?.*?)\\n', + 'EXTERNAL_SNAPSHOTTER_VERSION=(?.*?)\\n' ], datasourceTemplate: 'github-releases', versioningTemplate: 'loose', depNameTemplate: 'kubernetes-csi/external-snapshotter', - extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)', + extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)' }, { customType: 'regex', fileMatch: [ - '^hack/setup-cluster.sh$', + '^hack/setup-cluster.sh$' ], matchStrings: [ - 'EXTERNAL_PROVISIONER_VERSION=(?.*?)\\n', + 'EXTERNAL_PROVISIONER_VERSION=(?.*?)\\n' ], datasourceTemplate: 'github-releases', versioningTemplate: 'loose', depNameTemplate: 'kubernetes-csi/external-provisioner', - extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)', + extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)' }, { customType: 'regex', fileMatch: [ - '^hack/setup-cluster.sh$', + '^hack/setup-cluster.sh$' ], matchStrings: [ - 'EXTERNAL_RESIZER_VERSION=(?.*?)\\n', + 'EXTERNAL_RESIZER_VERSION=(?.*?)\\n' ], datasourceTemplate: 'github-releases', versioningTemplate: 'loose', depNameTemplate: 'kubernetes-csi/external-resizer', - extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)', + extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)' }, { customType: 'regex', fileMatch: [ - '^hack/setup-cluster.sh$', + '^hack/setup-cluster.sh$' ], matchStrings: [ - 'EXTERNAL_ATTACHER_VERSION=(?.*?)\\n', + 'EXTERNAL_ATTACHER_VERSION=(?.*?)\\n' ], datasourceTemplate: 'github-releases', versioningTemplate: 'loose', depNameTemplate: 'kubernetes-csi/external-attacher', - extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)', + extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)' }, { customType: 'regex', fileMatch: [ - '^hack/setup-cluster.sh$', + '^hack/setup-cluster.sh$' ], matchStrings: [ - 'CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=(?.*?)\\n', + 'CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=(?.*?)\\n' ], datasourceTemplate: 'github-releases', versioningTemplate: 'loose', depNameTemplate: 'kubernetes-csi/csi-driver-host-path', - extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)', - }, - { - customType: 'regex', - fileMatch: [ - '^.github/workflows/continuous-delivery.yml', - ], - matchStrings: [ - 'ROOK_VERSION: "(?.*?)"', - ], - datasourceTemplate: 'github-releases', - versioningTemplate: 'loose', - depNameTemplate: 'rook/rook', - extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)', - }, - { - customType: 'regex', - fileMatch: [ - '^.github/workflows/continuous-delivery.yml', - '^.github/workflows/continuous-integration.yml', - ], - matchStrings: [ - 'KIND_VERSION: "(?.*?)"', - ], - datasourceTemplate: 'github-tags', - depNameTemplate: 'kubernetes-sigs/kind', + extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)' }, { customType: 'regex', fileMatch: [ '^hack/setup-cluster.sh$', - '^hack/e2e/run-e2e-kind.sh$', - ], - matchStrings: [ - 'KIND_NODE_DEFAULT_VERSION=(?.*?)\\n', - ], - datasourceTemplate: 'docker', - versioningTemplate: 'loose', - depNameTemplate: 'kindest/node', - }, - { - customType: 'regex', - fileMatch: [ - '^Makefile$', + '^hack/e2e/run-e2e-kind.sh$' ], matchStrings: [ - 'SPELLCHECK_VERSION \\?= (?.*?)\\n', + 'KIND_NODE_DEFAULT_VERSION=(?.*?)\\n' ], datasourceTemplate: 'docker', versioningTemplate: 'loose', - depNameTemplate: 'jonasbn/github-action-spellcheck', - }, - { - customType: 'regex', - fileMatch: [ - '^Makefile$', - ], - matchStrings: [ - 'WOKE_VERSION \\?= (?.*?)\\n', - ], - datasourceTemplate: 'docker', - versioningTemplate: 'loose', - depNameTemplate: 'getwoke/woke', - }, - { - customType: 'regex', - fileMatch: [ - '^Makefile$', - ], - matchStrings: [ - 'OPERATOR_SDK_VERSION \\?= (?.*?)\\n', - ], - datasourceTemplate: 'github-releases', - depNameTemplate: 'operator-framework/operator-sdk', - versioningTemplate: 'loose', - extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)', - }, - { - customType: 'regex', - fileMatch: [ - '^Makefile$', - ], - matchStrings: [ - 'OPM_VERSION \\?= (?.*?)\\n', - ], - datasourceTemplate: 'github-releases', - depNameTemplate: 'operator-framework/operator-registry', - versioningTemplate: 'loose', - extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)', - }, - { - customType: 'regex', - fileMatch: [ - '^Makefile$', - ], - matchStrings: [ - 'PREFLIGHT_VERSION \\?= (?.*?)\\n', - ], - datasourceTemplate: 'github-releases', - depNameTemplate: 'redhat-openshift-ecosystem/openshift-preflight', - versioningTemplate: 'loose', - extractVersionTemplate: '^(?\\d+\\.\\d+\\.\\d+)', + depNameTemplate: 'kindest/node' }, { customType: 'regex', fileMatch: [ '^config\\/olm-scorecard\\/patches\\/basic\\.config\\.yaml$', - '^config\\/olm-scorecard\\/patches\\/olm\\.config\\.yaml$', + '^config\\/olm-scorecard\\/patches\\/olm\\.config\\.yaml$' ], matchStrings: [ - 'image: quay.io/operator-framework/scorecard-test:(?.*?)\\n', + 'image: quay.io/operator-framework/scorecard-test:(?.*?)\\n' ], datasourceTemplate: 'docker', versioningTemplate: 'loose', depNameTemplate: 'quay.io/operator-framework/scorecard-test', - extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)', + extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)' }, { customType: 'regex', fileMatch: [ '^pkg\\/versions\\/versions\\.go$', - '^pkg\\/specs\\/pgbouncer\\/deployments\\.go$', + '^pkg\\/specs\\/pgbouncer\\/deployments\\.go$' ], matchStrings: [ 'DefaultImageName = "(?.+?):(?.*?)"\\n', - 'DefaultPgbouncerImage = "(?.+?):(?.*?)"\\n', + 'DefaultPgbouncerImage = "(?.+?):(?.*?)"\\n' ], datasourceTemplate: 'docker', - versioningTemplate: 'loose', - }, - { - customType: 'regex', - fileMatch: [ - '^\\.github\\/workflows\\/[^/]+\\.ya?ml$', - ], - matchStrings: [ - 'GOLANG_VERSION: "(?.*?)\\.x"', - ], - datasourceTemplate: 'golang-version', - depNameTemplate: 'golang', - versioningTemplate: 'loose', - extractVersionTemplate: '^(?\\d+\\.\\d+)', - }, - { - customType: 'regex', - fileMatch: [ - '^\\.github\\/workflows\\/[^/]+\\.ya?ml$', - ], - matchStrings: [ - 'GOLANGCI_LINT_VERSION: "v(?.*?)"', - ], - datasourceTemplate: 'github-releases', - depNameTemplate: 'golangci/golangci-lint', - versioningTemplate: 'loose', - extractVersionTemplate: '^v(?\\d+\\.\\d+\\.\\d+)', - }, - { - customType: 'regex', - fileMatch: [ - '^.github/workflows/continuous-delivery.yml', - ], - matchStrings: [ - 'VELERO_VERSION: "v(?.*?)"', - ], - datasourceTemplate: 'github-releases', - depNameTemplate: 'vmware-tanzu/velero', - versioningTemplate: 'loose', - extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)', - }, - { - customType: 'regex', - fileMatch: [ - '^.github/workflows/continuous-delivery.yml', - ], - matchStrings: [ - 'VELERO_AWS_PLUGIN_VERSION: "v(?.*?)"', - ], - datasourceTemplate: 'github-releases', - depNameTemplate: 'vmware-tanzu/velero-plugin-for-aws', - versioningTemplate: 'loose', - extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)', + versioningTemplate: 'loose' }, ], packageRules: [ { matchDatasources: [ - 'docker', + 'docker' ], - allowedVersions: '!/alpha/', + allowedVersions: '!/alpha/' }, { matchDatasources: [ - 'go', + 'go' ], matchDepNames: [ - 'k8s.io/client-go', + 'k8s.io/client-go' ], - allowedVersions: '<1.0', + allowedVersions: '<1.0' }, { matchDatasources: [ - 'go', + 'go' ], groupName: 'kubernetes patches', matchUpdateTypes: [ 'patch', - 'digest', + 'digest' ], matchPackageNames: [ 'k8s.io{/,}**', 'sigs.k8s.io{/,}**', - 'github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring{/,}**', + 'github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring{/,}**' ], matchDepNames: [ '!sigs.k8s.io/kustomize/kustomize/v5', - '!sigs.k8s.io/controller-tools', - ], + '!sigs.k8s.io/controller-tools' + ] }, { matchDatasources: [ - 'go', + 'go' ], matchUpdateTypes: [ 'major', - 'minor', + 'minor' ], matchPackageNames: [ 'k8s.io{/,}**', 'sigs.k8s.io{/,}**', - 'github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring{/,}**', - ], + 'github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring{/,}**' + ] }, { matchDatasources: [ - 'go', + 'go' ], matchUpdateTypes: [ - 'major', + 'major' ], matchPackageNames: [ '*', '!k8s.io{/,}**', '!sigs.k8s.io{/,}**', - '!github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring{/,}**', - ], + '!github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring{/,}**' + ] }, { matchDatasources: [ - 'go', + 'go' ], matchUpdateTypes: [ 'minor', 'patch', - 'digest', + 'digest' ], groupName: 'all non-major go dependencies', matchPackageNames: [ @@ -403,8 +236,8 @@ '!k8s.io{/,}**', '!sigs.k8s.io{/,}**', '!github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring{/,}**', - '!github.com/cloudnative-pg/{/,}**', - ], + '!github.com/cloudnative-pg/{/,}**' + ] }, { matchDatasources: [ @@ -425,16 +258,16 @@ pinDigests: false, matchPackageNames: [ 'kubernetes-csi{/,}**', - 'rook{/,}**', - ], + 'rook{/,}**' + ] }, { groupName: 'backup test tools', separateMajorMinor: false, pinDigests: false, matchPackageNames: [ - 'vmware-tanzu{/,}**', - ], + 'vmware-tanzu{/,}**' + ] }, { groupName: 'operator framework', @@ -443,16 +276,16 @@ matchPackageNames: [ 'operator-framework{/,}**', 'redhat-openshift-ecosystem{/,}**', - 'quay.io/operator-framework{/,}**', - ], + 'quay.io/operator-framework{/,}**' + ] }, { groupName: 'cnpg', matchPackageNames: [ - 'github.com/cloudnative-pg/', + 'github.com/cloudnative-pg/' ], separateMajorMinor: false, - pinDigests: false, - }, - ], + pinDigests: false + } + ] } diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 1eec5327a0..4485d9a575 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -11,6 +11,7 @@ on: permissions: read-all env: + # renovate: datasource=golang-version depName=golang versioning=loose GOLANG_VERSION: "1.24.x" jobs: diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index bef638971f..9619376b2a 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -34,6 +34,7 @@ permissions: read-all # set up environment variables to be used across all the jobs env: + # renovate: datasource=golang-version depName=golang versioning=loose GOLANG_VERSION: "1.24.x" jobs: diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index c9b78431db..8122dae670 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -36,9 +36,12 @@ permissions: read-all # set up environment variables to be used across all the jobs env: + # renovate: datasource=golang-version depName=golang versioning=loose GOLANG_VERSION: "1.24.x" KUBEBUILDER_VERSION: "2.3.1" + # renovate: datasource=github-tags depName=kubernetes-sigs/kind versioning=semver KIND_VERSION: "v0.29.0" + # renovate: datasource=github-releases depName=rook/rook versioning=loose ROOK_VERSION: "v1.17.2" EXTERNAL_SNAPSHOTTER_VERSION: "v8.2.1" OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing" @@ -1321,7 +1324,9 @@ jobs: name: Setup Velero uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3 env: + # renovate: datasource=github-releases depName=vmware-tanzu/velero VELERO_VERSION: "v1.16.1" + # renovate: datasource=github-releases depName=vmware-tanzu/velero VELERO_AWS_PLUGIN_VERSION: "v1.12.1" with: timeout_minutes: 10 diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 2acad1b1e0..5e8d1fd99b 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -18,9 +18,12 @@ permissions: read-all # set up environment variables to be used across all the jobs env: + # renovate: datasource=golang-version depName=golang versioning=loose GOLANG_VERSION: "1.24.x" + # renovate: datasource=github-releases depName=golangci/golangci-lint versioning=loose GOLANGCI_LINT_VERSION: "v2.1.6" KUBEBUILDER_VERSION: "2.3.1" + # renovate: datasource=github-tags depName=kubernetes-sigs/kind versioning=semver KIND_VERSION: "v0.29.0" OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing" API_DOC_NAME: "cloudnative-pg.v1.md" diff --git a/.github/workflows/refresh-licenses.yml b/.github/workflows/refresh-licenses.yml index b1e2c5e104..247fb2c856 100644 --- a/.github/workflows/refresh-licenses.yml +++ b/.github/workflows/refresh-licenses.yml @@ -9,6 +9,7 @@ on: permissions: read-all env: + # renovate: datasource=golang-version depName=golang versioning=loose GOLANG_VERSION: "1.24.x" jobs: diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml index 43fdd9e6f1..3a7a89087a 100644 --- a/.github/workflows/release-publish.yml +++ b/.github/workflows/release-publish.yml @@ -10,6 +10,7 @@ on: permissions: read-all env: + # renovate: datasource=golang-version depName=golang versioning=loose GOLANG_VERSION: "1.24.x" REGISTRY: "ghcr.io" diff --git a/.github/workflows/require-labels.yml b/.github/workflows/require-labels.yml index 4c428c81ec..775024a720 100644 --- a/.github/workflows/require-labels.yml +++ b/.github/workflows/require-labels.yml @@ -18,7 +18,7 @@ jobs: runs-on: ubuntu-24.04 steps: - name: Require labels - uses: docker://agilepathway/pull-request-label-checker:v1.6.65 + uses: agilepathway/label-checker@v1.6.65 with: any_of: "ok to merge :ok_hand:" none_of: "do not merge" diff --git a/Makefile b/Makefile index d07cfc227f..8d0716efd4 100644 --- a/Makefile +++ b/Makefile @@ -51,14 +51,22 @@ LOCALBIN ?= $(shell pwd)/bin BUILD_IMAGE ?= true POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions.go" | cut -f 2 -d \") +# renovate: datasource=github-releases depName=kubernetes-sigs/kustomize versioning=loose KUSTOMIZE_VERSION ?= v5.6.0 +# renovate: datasource=go depName=sigs.k8s.io/controller-tools CONTROLLER_TOOLS_VERSION ?= v0.17.3 GENREF_VERSION ?= 015aaac611407c4fe591bc8700d2c67b7521efca +# renovate: datasource=go depName=github.com/goreleaser/goreleaser GORELEASER_VERSION ?= v2.9.0 +# renovate: datasource=docker depName=jonasbn/github-action-spellcheck versioning=docker SPELLCHECK_VERSION ?= 0.49.0 +# renovate: datasource=docker depName=getwoke/woke versioning=docker WOKE_VERSION ?= 0.19.0 +# renovate: datasource=github-releases depName=operator-framework/operator-sdk versioning=loose OPERATOR_SDK_VERSION ?= v1.40.0 +# renovate: datasource=github-tags depName=operator-framework/operator-registry OPM_VERSION ?= v1.55.0 +# renovate: datasource=github-tags depName=redhat-openshift-ecosystem/openshift-preflight PREFLIGHT_VERSION ?= 1.13.1 OPENSHIFT_VERSIONS ?= v4.12-v4.19 ARCH ?= amd64 From c8628fc3b0bdcb4dd340f8b1c913725076ac8fab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niccol=C3=B2=20Fei?= Date: Fri, 6 Jun 2025 14:20:22 +0200 Subject: [PATCH 630/836] chore(devcontainer): fix kubectx-kubens image location (#7751) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The repository hosting the image has been migrated. Adjusting the image reference. Closes #7750 Signed-off-by: Niccolò Fei --- .devcontainer/devcontainer.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index e66ed8dbf3..f8469ae5a3 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -12,7 +12,7 @@ "cilium": "none" }, "ghcr.io/guiyomh/features/golangci-lint:0": {}, - "ghcr.io/devcontainers-contrib/features/kubectx-kubens:1": {}, + "ghcr.io/devcontainers-extra/features/kubectx-kubens:1": {}, "ghcr.io/dhoeric/features/stern:1": {} }, From 447a07961f932a72680ade17d24a11e7a5148c2b Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Fri, 6 Jun 2025 14:43:36 +0200 Subject: [PATCH 631/836] feat: keep track of the PG system ID in the status (#7717) This patch add field names `.status.SystemID`, that keeps track of the system ID reported by the PostgreSQL instances. The new field is set only if the reported system ID is consistent across all the instances. If there is no reported system ID or if there are inconsistencies, the field set to empty. A new condition keeps track of the consistency status. Closes: #7716 Signed-off-by: Leonardo Cecchi Signed-off-by: Armando Ruocco Co-authored-by: Armando Ruocco --- .wordlist-en-custom.txt | 2 + api/v1/cluster_types.go | 7 + .../bases/postgresql.cnpg.io_clusters.yaml | 3 + docs/src/cloudnative-pg.v1.md | 7 + internal/controller/cluster_status.go | 43 ++++ internal/controller/cluster_status_test.go | 214 ++++++++++++++++++ 6 files changed, 276 insertions(+) diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index cc086513df..db131a2293 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -480,6 +480,7 @@ SynchronizeReplicasConfiguration SynchronousReplicaConfiguration SynchronousReplicaConfigurationMethod Synopsys +SystemID TCP TLS TLSv @@ -1331,6 +1332,7 @@ synchronizeReplicas synchronizeReplicasCache sys syslog +systemID systemd sysv tAc diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go index 3602933fd7..66c16160e1 100644 --- a/api/v1/cluster_types.go +++ b/api/v1/cluster_types.go @@ -956,6 +956,10 @@ type ClusterStatus struct { // WAL file, and Time of latest checkpoint // +optional DemotionToken string `json:"demotionToken,omitempty"` + + // SystemID is the latest detected PostgreSQL SystemID + // +optional + SystemID string `json:"systemID,omitempty"` } // ImageInfo contains the information about a PostgreSQL image @@ -996,6 +1000,9 @@ const ( ConditionBackup ClusterConditionType = "LastBackupSucceeded" // ConditionClusterReady represents whether a cluster is Ready ConditionClusterReady ClusterConditionType = "Ready" + // ConditionConsistentSystemID is true when the all the instances of the + // cluster report the same System ID. + ConditionConsistentSystemID ClusterConditionType = "ConsistentSystemID" ) // ConditionStatus defines conditions of resources diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml index d9aa43c9d4..2eb4f627cb 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml @@ -6376,6 +6376,9 @@ spec: of switching a cluster to a replica cluster. type: boolean type: object + systemID: + description: SystemID is the latest detected PostgreSQL SystemID + type: string tablespacesStatus: description: TablespacesStatus reports the state of the declarative tablespaces in the cluster diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md index 4ffd8790ec..777443256f 100644 --- a/docs/src/cloudnative-pg.v1.md +++ b/docs/src/cloudnative-pg.v1.md @@ -2289,6 +2289,13 @@ TimeLineID, Latest checkpoint's REDO location, Latest checkpoint's REDO WAL file, and Time of latest checkpoint

+systemID
+string + + +

SystemID is the latest detected PostgreSQL SystemID

+ + diff --git a/internal/controller/cluster_status.go b/internal/controller/cluster_status.go index 6cf6d3c3f2..8819e7038a 100644 --- a/internal/controller/cluster_status.go +++ b/internal/controller/cluster_status.go @@ -28,9 +28,11 @@ import ( "github.com/cloudnative-pg/machinery/pkg/log" pgTime "github.com/cloudnative-pg/machinery/pkg/postgres/time" + "github.com/cloudnative-pg/machinery/pkg/stringset" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/strings/slices" "sigs.k8s.io/controller-runtime/pkg/client" @@ -763,12 +765,53 @@ func (r *ClusterReconciler) updateClusterStatusThatRequiresInstancesState( } // we update any relevant cluster status that depends on the primary instance + detectedSystemID := stringset.New() for _, item := range statuses.Items { // we refresh the last known timeline on the status root. // This avoids to have a zero timeline id in case that no primary instance is up during reconciliation. if item.IsPrimary && item.TimeLineID != 0 { cluster.Status.TimelineID = item.TimeLineID } + if item.SystemID != "" { + detectedSystemID.Put(item.SystemID) + } + } + + // we update the system ID field in the cluster status + switch detectedSystemID.Len() { + case 0: + cluster.Status.SystemID = "" + + message := "No instances are present in the cluster to report a system ID." + if len(statuses.Items) > 0 { + message = "Instances are present, but none have reported a system ID." + } + + meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ + Type: string(apiv1.ConditionConsistentSystemID), + Status: metav1.ConditionFalse, + Reason: "NotFound", + Message: message, + }) + + case 1: + cluster.Status.SystemID = detectedSystemID.ToList()[0] + meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ + Type: string(apiv1.ConditionConsistentSystemID), + Status: metav1.ConditionTrue, + Reason: "Unique", + Message: "A single, unique system ID was found across reporting instances.", + }) + + default: + // the instances are reporting an inconsistent system ID + cluster.Status.SystemID = "" + meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ + Type: string(apiv1.ConditionConsistentSystemID), + Status: metav1.ConditionFalse, + Reason: "Mismatch", + Message: fmt.Sprintf("Multiple differing system IDs reported by instances: %q", detectedSystemID.ToSortedList()), + }) } if !reflect.DeepEqual(existingClusterStatus, cluster.Status) { diff --git a/internal/controller/cluster_status_test.go b/internal/controller/cluster_status_test.go index 605c0b32ea..4e9d4786b3 100644 --- a/internal/controller/cluster_status_test.go +++ b/internal/controller/cluster_status_test.go @@ -24,11 +24,14 @@ import ( batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/rand" v1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/certs" + "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/reconciler/persistentvolumeclaim" . "github.com/onsi/ginkgo/v2" @@ -174,3 +177,214 @@ var _ = Describe("cluster_status unit tests", func() { }) }) }) + +var _ = Describe("updateClusterStatusThatRequiresInstancesState tests", func() { + var ( + env *testingEnvironment + cluster *v1.Cluster + ) + + BeforeEach(func() { + env = buildTestEnvironment() + cluster = newFakeCNPGCluster(env.client, newFakeNamespace(env.client)) + }) + + It("should handle empty status list", func(ctx SpecContext) { + statuses := postgres.PostgresqlStatusList{} + + err := env.clusterReconciler.updateClusterStatusThatRequiresInstancesState(ctx, cluster, statuses) + Expect(err).ToNot(HaveOccurred()) + + Expect(cluster.Status.InstancesReportedState).To(BeEmpty()) + Expect(cluster.Status.SystemID).To(BeEmpty()) + + condition := meta.FindStatusCondition(cluster.Status.Conditions, string(v1.ConditionConsistentSystemID)) + Expect(condition).ToNot(BeNil()) + Expect(condition.Status).To(Equal(metav1.ConditionFalse)) + Expect(condition.Reason).To(Equal("NotFound")) + Expect(condition.Message).To(Equal("No instances are present in the cluster to report a system ID.")) + }) + + It("should handle instances without SystemID", func(ctx SpecContext) { + statuses := postgres.PostgresqlStatusList{ + Items: []postgres.PostgresqlStatus{ + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "pod-1"}, + Status: corev1.PodStatus{PodIP: "192.168.1.1"}, + }, + IsPrimary: true, + TimeLineID: 123, + SystemID: "", + }, + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "pod-2"}, + Status: corev1.PodStatus{PodIP: "192.168.1.2"}, + }, + IsPrimary: false, + SystemID: "", + }, + }, + } + + err := env.clusterReconciler.updateClusterStatusThatRequiresInstancesState(ctx, cluster, statuses) + Expect(err).ToNot(HaveOccurred()) + + Expect(cluster.Status.InstancesReportedState).To(HaveLen(2)) + Expect(cluster.Status.TimelineID).To(Equal(123)) + Expect(cluster.Status.SystemID).To(BeEmpty()) + + condition := meta.FindStatusCondition(cluster.Status.Conditions, string(v1.ConditionConsistentSystemID)) + Expect(condition).ToNot(BeNil()) + Expect(condition.Status).To(Equal(metav1.ConditionFalse)) + Expect(condition.Reason).To(Equal("NotFound")) + Expect(condition.Message).To(Equal("Instances are present, but none have reported a system ID.")) + }) + + It("should handle instances with a single SystemID", func(ctx SpecContext) { + const systemID = "system123" + statuses := postgres.PostgresqlStatusList{ + Items: []postgres.PostgresqlStatus{ + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "pod-1"}, + Status: corev1.PodStatus{PodIP: "192.168.1.1"}, + }, + IsPrimary: true, + TimeLineID: 123, + SystemID: systemID, + }, + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "pod-2"}, + Status: corev1.PodStatus{PodIP: "192.168.1.2"}, + }, + IsPrimary: false, + SystemID: systemID, + }, + }, + } + + err := env.clusterReconciler.updateClusterStatusThatRequiresInstancesState(ctx, cluster, statuses) + Expect(err).ToNot(HaveOccurred()) + + Expect(cluster.Status.InstancesReportedState).To(HaveLen(2)) + Expect(cluster.Status.TimelineID).To(Equal(123)) + Expect(cluster.Status.SystemID).To(Equal(systemID)) + + condition := meta.FindStatusCondition(cluster.Status.Conditions, string(v1.ConditionConsistentSystemID)) + Expect(condition).ToNot(BeNil()) + Expect(condition.Status).To(Equal(metav1.ConditionTrue)) + Expect(condition.Reason).To(Equal("Unique")) + Expect(condition.Message).To(Equal("A single, unique system ID was found across reporting instances.")) + }) + + It("should handle instances with multiple SystemIDs", func(ctx SpecContext) { + statuses := postgres.PostgresqlStatusList{ + Items: []postgres.PostgresqlStatus{ + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "pod-1"}, + Status: corev1.PodStatus{PodIP: "192.168.1.1"}, + }, + IsPrimary: true, + TimeLineID: 123, + SystemID: "system1", + }, + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "pod-2"}, + Status: corev1.PodStatus{PodIP: "192.168.1.2"}, + }, + IsPrimary: false, + SystemID: "system2", + }, + }, + } + + err := env.clusterReconciler.updateClusterStatusThatRequiresInstancesState(ctx, cluster, statuses) + Expect(err).ToNot(HaveOccurred()) + + Expect(cluster.Status.InstancesReportedState).To(HaveLen(2)) + Expect(cluster.Status.TimelineID).To(Equal(123)) + Expect(cluster.Status.SystemID).To(BeEmpty()) + + condition := meta.FindStatusCondition(cluster.Status.Conditions, string(v1.ConditionConsistentSystemID)) + Expect(condition).ToNot(BeNil()) + Expect(condition.Status).To(Equal(metav1.ConditionFalse)) + Expect(condition.Reason).To(Equal("Mismatch")) + Expect(condition.Message).To(ContainSubstring("Multiple differing system IDs reported by instances:")) + Expect(condition.Message).To(ContainSubstring("system1")) + Expect(condition.Message).To(ContainSubstring("system2")) + }) + + It("should update timeline ID from the primary instance", func(ctx SpecContext) { + const timelineID = 999 + statuses := postgres.PostgresqlStatusList{ + Items: []postgres.PostgresqlStatus{ + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "pod-1"}, + Status: corev1.PodStatus{PodIP: "192.168.1.1"}, + }, + IsPrimary: true, + TimeLineID: timelineID, + SystemID: "system1", + }, + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "pod-2"}, + Status: corev1.PodStatus{PodIP: "192.168.1.2"}, + }, + IsPrimary: false, + TimeLineID: 123, + SystemID: "system1", + }, + }, + } + + err := env.clusterReconciler.updateClusterStatusThatRequiresInstancesState(ctx, cluster, statuses) + Expect(err).ToNot(HaveOccurred()) + + Expect(cluster.Status.TimelineID).To(Equal(timelineID)) + }) + + It("should correctly populate InstancesReportedState", func(ctx SpecContext) { + statuses := postgres.PostgresqlStatusList{ + Items: []postgres.PostgresqlStatus{ + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "pod-1"}, + Status: corev1.PodStatus{PodIP: "192.168.1.1"}, + }, + IsPrimary: true, + TimeLineID: 123, + }, + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "pod-2"}, + Status: corev1.PodStatus{PodIP: "192.168.1.2"}, + }, + IsPrimary: false, + TimeLineID: 123, + }, + }, + } + + err := env.clusterReconciler.updateClusterStatusThatRequiresInstancesState(ctx, cluster, statuses) + Expect(err).ToNot(HaveOccurred()) + + Expect(cluster.Status.InstancesReportedState).To(HaveLen(2)) + + state1 := cluster.Status.InstancesReportedState["pod-1"] + Expect(state1.IsPrimary).To(BeTrue()) + Expect(state1.TimeLineID).To(Equal(123)) + Expect(state1.IP).To(Equal("192.168.1.1")) + + state2 := cluster.Status.InstancesReportedState["pod-2"] + Expect(state2.IsPrimary).To(BeFalse()) + Expect(state2.TimeLineID).To(Equal(123)) + Expect(state2.IP).To(Equal("192.168.1.2")) + }) +}) From 013e20e0cf32597a09f9cad594e43beb418ce868 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niccol=C3=B2=20Fei?= Date: Fri, 6 Jun 2025 17:56:11 +0200 Subject: [PATCH 632/836] test: adjust E2E tests to run with minimal images (#7655) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Minor adjustments to a few E2E tests to be able to run the testing suite using a -minimal image, specifically: - Avoid referencing locales that are not available in minimal images - Label Replica Cluster E2Es which require bootstrapping from a backup with LabelBackupRestore. This is useful because it allows running E2Es excluding all the tests that require backup/recovery capabilities, given that -minimal images do not contain Barman cloud. They can be skipped by setting FEATURE_TYPE=!backup-restore Closes #7654 Signed-off-by: Niccolò Fei Signed-off-by: Armando Ruocco Co-authored-by: Armando Ruocco --- .../fixtures/declarative_databases/database.yaml.template | 2 +- .../e2e/fixtures/initdb/cluster-custom-locale.yaml.template | 2 +- tests/e2e/initdb_test.go | 2 +- tests/e2e/replica_mode_cluster_test.go | 6 +++--- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/e2e/fixtures/declarative_databases/database.yaml.template b/tests/e2e/fixtures/declarative_databases/database.yaml.template index 1b2cfc8fdc..a9c43b0d1e 100644 --- a/tests/e2e/fixtures/declarative_databases/database.yaml.template +++ b/tests/e2e/fixtures/declarative_databases/database.yaml.template @@ -5,7 +5,7 @@ metadata: spec: name: declarative owner: app - localeCType: "en_US.utf8" + localeCType: C localeCollate: C encoding: SQL_ASCII template: template0 diff --git a/tests/e2e/fixtures/initdb/cluster-custom-locale.yaml.template b/tests/e2e/fixtures/initdb/cluster-custom-locale.yaml.template index 73e2206fd3..e402a2fb9d 100644 --- a/tests/e2e/fixtures/initdb/cluster-custom-locale.yaml.template +++ b/tests/e2e/fixtures/initdb/cluster-custom-locale.yaml.template @@ -25,7 +25,7 @@ spec: owner: app options: - "--locale" - - "en_US.utf8" + - "C" # Persistent storage configuration storage: diff --git a/tests/e2e/initdb_test.go b/tests/e2e/initdb_test.go index 29a3635667..5906d70a2a 100644 --- a/tests/e2e/initdb_test.go +++ b/tests/e2e/initdb_test.go @@ -176,7 +176,7 @@ var _ = Describe("InitDB settings", Label(tests.LabelSmoke, tests.LabelBasic), f }, "postgres", "select datcollate from pg_catalog.pg_database where datname='template0'") Expect(err).ToNot(HaveOccurred()) - Expect(stdout, err).To(Equal("en_US.utf8\n")) + Expect(strings.TrimSpace(stdout), err).To(Equal("C")) }) }) }) diff --git a/tests/e2e/replica_mode_cluster_test.go b/tests/e2e/replica_mode_cluster_test.go index f2c2f06c3d..03eaba9040 100644 --- a/tests/e2e/replica_mode_cluster_test.go +++ b/tests/e2e/replica_mode_cluster_test.go @@ -269,7 +269,7 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { }) }) - Context("archive mode set to 'always' on designated primary", func() { + Context("archive mode set to 'always' on designated primary", Label(tests.LabelBackupRestore), func() { It("verifies replica cluster can archive WALs from the designated primary", func() { const ( replicaClusterSample = fixturesDir + replicaModeClusterDir + "cluster-replica-archive-mode-always.yaml.template" @@ -340,7 +340,7 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { }) }) - Context("can bootstrap a replica cluster from a backup", Ordered, func() { + Context("can bootstrap a replica cluster from a backup", Label(tests.LabelBackupRestore), Ordered, func() { const ( clusterSample = fixturesDir + replicaModeClusterDir + "cluster-replica-src-with-backup.yaml.template" namespacePrefix = "replica-cluster-from-backup" @@ -492,7 +492,7 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() { // In this test we create a replica cluster from a backup and then promote it to a primary. // We expect the original primary to be demoted to a replica and be able to follow the new primary. -var _ = Describe("Replica switchover", Label(tests.LabelReplication), Ordered, func() { +var _ = Describe("Replica switchover", Label(tests.LabelReplication, tests.LabelBackupRestore), Ordered, func() { const ( replicaSwitchoverClusterDir = "/replica_mode_cluster/" namespacePrefix = "replica-switchover" From 7bfd9c1f34d3904fbea549d21015221f4f6256f6 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sat, 7 Jun 2025 13:22:02 +0200 Subject: [PATCH 633/836] chore(deps): update kubernetes csi (main) (#7739) This PR contains the following updates: https://github.com/kubernetes-csi/external-attacher `v4.8.1` -> `v4.9.0` https://github.com/kubernetes-csi/external-provisioner `v5.2.0` -> `v5.3.0` https://github.com/rook/rook `v1.17.2` -> `v1.17.4` --- .github/workflows/continuous-delivery.yml | 2 +- hack/setup-cluster.sh | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index 8122dae670..f941c047cb 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -42,7 +42,7 @@ env: # renovate: datasource=github-tags depName=kubernetes-sigs/kind versioning=semver KIND_VERSION: "v0.29.0" # renovate: datasource=github-releases depName=rook/rook versioning=loose - ROOK_VERSION: "v1.17.2" + ROOK_VERSION: "v1.17.4" EXTERNAL_SNAPSHOTTER_VERSION: "v8.2.1" OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing" BUILD_PUSH_PROVENANCE: "" diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh index eaa9f7fda3..4c0a747cc8 100755 --- a/hack/setup-cluster.sh +++ b/hack/setup-cluster.sh @@ -30,9 +30,9 @@ fi KIND_NODE_DEFAULT_VERSION=v1.33.1 CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=v1.16.1 EXTERNAL_SNAPSHOTTER_VERSION=v8.2.1 -EXTERNAL_PROVISIONER_VERSION=v5.2.0 +EXTERNAL_PROVISIONER_VERSION=v5.3.0 EXTERNAL_RESIZER_VERSION=v1.13.2 -EXTERNAL_ATTACHER_VERSION=v4.8.1 +EXTERNAL_ATTACHER_VERSION=v4.9.0 K8S_VERSION=${K8S_VERSION-} KUBECTL_VERSION=${KUBECTL_VERSION-} CSI_DRIVER_HOST_PATH_VERSION=${CSI_DRIVER_HOST_PATH_VERSION:-$CSI_DRIVER_HOST_PATH_DEFAULT_VERSION} From 02981f7ed9418a4b5956dcb8d39cbebad11bdb24 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sat, 7 Jun 2025 15:53:35 +0200 Subject: [PATCH 634/836] chore(deps): update dependency golang to v1.24.4 (main) (#7757) --- .github/workflows/backport.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/continuous-delivery.yml | 2 +- .github/workflows/continuous-integration.yml | 2 +- .github/workflows/refresh-licenses.yml | 2 +- .github/workflows/release-publish.yml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 4485d9a575..b08fc29c6a 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -12,7 +12,7 @@ permissions: read-all env: # renovate: datasource=golang-version depName=golang versioning=loose - GOLANG_VERSION: "1.24.x" + GOLANG_VERSION: "1.24.4" jobs: # Label the source pull request with 'backport-requested' and all supported releases label, the goal is, by default diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 9619376b2a..9998b84e35 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -35,7 +35,7 @@ permissions: read-all # set up environment variables to be used across all the jobs env: # renovate: datasource=golang-version depName=golang versioning=loose - GOLANG_VERSION: "1.24.x" + GOLANG_VERSION: "1.24.4" jobs: duplicate_runs: diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index f941c047cb..a703d1a629 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -37,7 +37,7 @@ permissions: read-all # set up environment variables to be used across all the jobs env: # renovate: datasource=golang-version depName=golang versioning=loose - GOLANG_VERSION: "1.24.x" + GOLANG_VERSION: "1.24.4" KUBEBUILDER_VERSION: "2.3.1" # renovate: datasource=github-tags depName=kubernetes-sigs/kind versioning=semver KIND_VERSION: "v0.29.0" diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 5e8d1fd99b..152a175f05 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -19,7 +19,7 @@ permissions: read-all # set up environment variables to be used across all the jobs env: # renovate: datasource=golang-version depName=golang versioning=loose - GOLANG_VERSION: "1.24.x" + GOLANG_VERSION: "1.24.4" # renovate: datasource=github-releases depName=golangci/golangci-lint versioning=loose GOLANGCI_LINT_VERSION: "v2.1.6" KUBEBUILDER_VERSION: "2.3.1" diff --git a/.github/workflows/refresh-licenses.yml b/.github/workflows/refresh-licenses.yml index 247fb2c856..08de202dc2 100644 --- a/.github/workflows/refresh-licenses.yml +++ b/.github/workflows/refresh-licenses.yml @@ -10,7 +10,7 @@ permissions: read-all env: # renovate: datasource=golang-version depName=golang versioning=loose - GOLANG_VERSION: "1.24.x" + GOLANG_VERSION: "1.24.4" jobs: licenses: diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml index 3a7a89087a..f3b7b37f1e 100644 --- a/.github/workflows/release-publish.yml +++ b/.github/workflows/release-publish.yml @@ -11,7 +11,7 @@ permissions: read-all env: # renovate: datasource=golang-version depName=golang versioning=loose - GOLANG_VERSION: "1.24.x" + GOLANG_VERSION: "1.24.4" REGISTRY: "ghcr.io" jobs: From 61a0537c45a394c94fa3d9e31312f95ab90afe2a Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sat, 7 Jun 2025 23:13:17 +0200 Subject: [PATCH 635/836] chore(deps): pin agilepathway/label-checker action to c3d16ad (main) (#7756) --- .github/workflows/require-labels.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/require-labels.yml b/.github/workflows/require-labels.yml index 775024a720..66638a0c73 100644 --- a/.github/workflows/require-labels.yml +++ b/.github/workflows/require-labels.yml @@ -18,7 +18,7 @@ jobs: runs-on: ubuntu-24.04 steps: - name: Require labels - uses: agilepathway/label-checker@v1.6.65 + uses: agilepathway/label-checker@c3d16ad512e7cea5961df85ff2486bb774caf3c5 # v1.6.65 with: any_of: "ok to merge :ok_hand:" none_of: "do not merge" From 282a2cf8c2bdc8ff2827c00d760b5e410150acb4 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sun, 8 Jun 2025 01:47:12 +0200 Subject: [PATCH 636/836] chore(deps): update dependency vmware-tanzu/velero to v1.16.1 (main) (#7761) --- .github/workflows/continuous-delivery.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index a703d1a629..6e3556fe8e 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -1327,7 +1327,7 @@ jobs: # renovate: datasource=github-releases depName=vmware-tanzu/velero VELERO_VERSION: "v1.16.1" # renovate: datasource=github-releases depName=vmware-tanzu/velero - VELERO_AWS_PLUGIN_VERSION: "v1.12.1" + VELERO_AWS_PLUGIN_VERSION: "v1.16.1" with: timeout_minutes: 10 max_attempts: 3 From fefe48bb99e13b41a4e68f6b1a4cf948ab6c898a Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sun, 8 Jun 2025 09:40:37 +0200 Subject: [PATCH 637/836] fix(deps): update k8s.io/utils digest to 4c0f3b2 (main) (#7735) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ded35df29f..d5eee109e3 100644 --- a/go.mod +++ b/go.mod @@ -43,7 +43,7 @@ require ( k8s.io/apimachinery v0.33.1 k8s.io/cli-runtime v0.33.1 k8s.io/client-go v0.33.1 - k8s.io/utils v0.0.0-20250502105355-0f33e8f1c979 + k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 sigs.k8s.io/controller-runtime v0.20.4 sigs.k8s.io/yaml v1.4.0 ) diff --git a/go.sum b/go.sum index 2400587a09..d1cd2bba13 100644 --- a/go.sum +++ b/go.sum @@ -296,8 +296,8 @@ k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= -k8s.io/utils v0.0.0-20250502105355-0f33e8f1c979 h1:jgJW5IePPXLGB8e/1wvd0Ich9QE97RvvF3a8J3fP/Lg= -k8s.io/utils v0.0.0-20250502105355-0f33e8f1c979/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU= sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= From be9f03ffac55ba38a68b3ee0ea43fccc2953615b Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Mon, 9 Jun 2025 10:50:09 +0200 Subject: [PATCH 638/836] chore(ci): link velero plugin version to container images in Renovate (#7780) The velero plugin should be updated based on the availability of the images since those are the one required when deploying Velero. The Velero version should be always the GitHub release, since that's what we use to install Velero Closes #7779 Signed-off-by: Jonathan Gonzalez V. --- .github/workflows/continuous-delivery.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index 6e3556fe8e..922a1ca501 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -1326,7 +1326,7 @@ jobs: env: # renovate: datasource=github-releases depName=vmware-tanzu/velero VELERO_VERSION: "v1.16.1" - # renovate: datasource=github-releases depName=vmware-tanzu/velero + # renovate: datasource=docker depName=velero/velero-plugin-for-aws VELERO_AWS_PLUGIN_VERSION: "v1.16.1" with: timeout_minutes: 10 From 9e3b53949bf4eec40fb91ec170f2049501805b62 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Mon, 9 Jun 2025 13:23:51 +0200 Subject: [PATCH 639/836] chore: set the proper velero plugin version (#7782) Signed-off-by: Jonathan Gonzalez V. --- .github/workflows/continuous-delivery.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index 922a1ca501..3aa5f21dea 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -1327,7 +1327,7 @@ jobs: # renovate: datasource=github-releases depName=vmware-tanzu/velero VELERO_VERSION: "v1.16.1" # renovate: datasource=docker depName=velero/velero-plugin-for-aws - VELERO_AWS_PLUGIN_VERSION: "v1.16.1" + VELERO_AWS_PLUGIN_VERSION: "v1.12.1" with: timeout_minutes: 10 max_attempts: 3 From bccac20d68d5efd19cc344c7884f28d94e78989d Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 9 Jun 2025 13:53:02 +0200 Subject: [PATCH 640/836] chore(config): migrate renovate config (#7781) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Renovate config in this repository needs migrating. Typically this is because one or more configuration options you are using have been renamed. You don't need to merge this PR right away, because Renovate will continue to migrate these fields internally each time it runs. But later some of these fields may be fully deprecated and the migrations removed. So it's a good idea to merge this migration PR soon. #### [PLEASE NOTE](https://docs.renovatebot.com/configuration-options#configmigration): JSON5 config file migrated! All comments & trailing commas were removed. 🔕 **Ignore**: Close this PR and you won't be reminded about config migration again, but one day your current config may no longer be valid. ❓ Got questions? Does something look wrong to you? Please don't hesitate to [request help here](https://redirect.github.com/renovatebot/renovate/discussions). --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/cloudnative-pg/cloudnative-pg). --------- Signed-off-by: Jonathan Gonzalez V. Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: Jonathan Gonzalez V. --- .github/renovate.json5 | 168 +++++++++---------- .github/workflows/continuous-integration.yml | 2 +- 2 files changed, 85 insertions(+), 85 deletions(-) diff --git a/.github/renovate.json5 b/.github/renovate.json5 index 7514f68745..5cfb345463 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -20,215 +20,215 @@ 'contribute/**', 'licenses/**', 'pkg/versions/**', - 'pkg/specs/pgbouncer/' + 'pkg/specs/pgbouncer/', ], postUpdateOptions: [ - 'gomodTidy' + 'gomodTidy', ], semanticCommits: 'enabled', labels: [ 'automated', 'do not backport', - 'no-issue' + 'no-issue', ], customManagers: [ { - customType: "regex", - fileMatch: [ - "^Makefile$" + customType: 'regex', + managerFilePatterns: [ + '/^Makefile$/', ], matchStrings: [ - "# renovate: datasource=(?[a-z-.]+?) depName=(?[^\\s]+?)(?: (?:lookupName|packageName)=(?[^\\s]+?))?(?: versioning=(?[^\\s]+?))?(?: extractVersion=(?[^\\s]+?))?\\s+[A-Za-z0-9_]+?_VERSION\\s*\\?=\\s*[\"']?(?.+?)[\"']?\\s" - ] + '# renovate: datasource=(?[a-z-.]+?) depName=(?[^\\s]+?)(?: (?:lookupName|packageName)=(?[^\\s]+?))?(?: versioning=(?[^\\s]+?))?(?: extractVersion=(?[^\\s]+?))?\\s+[A-Za-z0-9_]+?_VERSION\\s*\\?=\\s*["\']?(?.+?)["\']?\\s', + ], }, { - customType: "regex", - fileMatch: [ - '^\\.github\\/workflows\\/[^/]+\\.ya?ml$' + customType: 'regex', + managerFilePatterns: [ + '/^\\.github\\/workflows\\/[^/]+\\.ya?ml$/', ], matchStrings: [ - "# renovate: datasource=(?[a-z-.]+?) depName=(?[^\\s]+?)(?: (?:lookupName|packageName)=(?[^\\s]+?))?(?: versioning=(?[^\\s]+?))?(?: extractVersion=(?[^\\s]+?))?\\s+[A-Za-z0-9_]+?_VERSION\\s*\: \\s*[\"']?(?.+?)[\"']?\\s" - ] + '# renovate: datasource=(?[a-z-.]+?) depName=(?[^\\s]+?)(?: (?:lookupName|packageName)=(?[^\\s]+?))?(?: versioning=(?[^\\s]+?))?(?: extractVersion=(?[^\\s]+?))?\\s+[A-Za-z0-9_]+?_VERSION\\s*: \\s*["\']?(?.+?)["\']?\\s', + ], }, { customType: 'regex', - fileMatch: [ - '^.github/workflows/continuous-delivery.yml', - '^hack/setup-cluster.sh$' + managerFilePatterns: [ + '/^.github/workflows/continuous-delivery.yml/', + '/^hack/setup-cluster.sh$/', ], matchStrings: [ 'EXTERNAL_SNAPSHOTTER_VERSION: "(?.*?)"', - 'EXTERNAL_SNAPSHOTTER_VERSION=(?.*?)\\n' + 'EXTERNAL_SNAPSHOTTER_VERSION=(?.*?)\\n', ], datasourceTemplate: 'github-releases', versioningTemplate: 'loose', depNameTemplate: 'kubernetes-csi/external-snapshotter', - extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)' + extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)', }, { customType: 'regex', - fileMatch: [ - '^hack/setup-cluster.sh$' + managerFilePatterns: [ + '/^hack/setup-cluster.sh$/', ], matchStrings: [ - 'EXTERNAL_PROVISIONER_VERSION=(?.*?)\\n' + 'EXTERNAL_PROVISIONER_VERSION=(?.*?)\\n', ], datasourceTemplate: 'github-releases', versioningTemplate: 'loose', depNameTemplate: 'kubernetes-csi/external-provisioner', - extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)' + extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)', }, { customType: 'regex', - fileMatch: [ - '^hack/setup-cluster.sh$' + managerFilePatterns: [ + '/^hack/setup-cluster.sh$/', ], matchStrings: [ - 'EXTERNAL_RESIZER_VERSION=(?.*?)\\n' + 'EXTERNAL_RESIZER_VERSION=(?.*?)\\n', ], datasourceTemplate: 'github-releases', versioningTemplate: 'loose', depNameTemplate: 'kubernetes-csi/external-resizer', - extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)' + extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)', }, { customType: 'regex', - fileMatch: [ - '^hack/setup-cluster.sh$' + managerFilePatterns: [ + '/^hack/setup-cluster.sh$/', ], matchStrings: [ - 'EXTERNAL_ATTACHER_VERSION=(?.*?)\\n' + 'EXTERNAL_ATTACHER_VERSION=(?.*?)\\n', ], datasourceTemplate: 'github-releases', versioningTemplate: 'loose', depNameTemplate: 'kubernetes-csi/external-attacher', - extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)' + extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)', }, { customType: 'regex', - fileMatch: [ - '^hack/setup-cluster.sh$' + managerFilePatterns: [ + '/^hack/setup-cluster.sh$/', ], matchStrings: [ - 'CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=(?.*?)\\n' + 'CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=(?.*?)\\n', ], datasourceTemplate: 'github-releases', versioningTemplate: 'loose', depNameTemplate: 'kubernetes-csi/csi-driver-host-path', - extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)' + extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)', }, { customType: 'regex', - fileMatch: [ - '^hack/setup-cluster.sh$', - '^hack/e2e/run-e2e-kind.sh$' + managerFilePatterns: [ + '/^hack/setup-cluster.sh$/', + '/^hack/e2e/run-e2e-kind.sh$/', ], matchStrings: [ - 'KIND_NODE_DEFAULT_VERSION=(?.*?)\\n' + 'KIND_NODE_DEFAULT_VERSION=(?.*?)\\n', ], datasourceTemplate: 'docker', versioningTemplate: 'loose', - depNameTemplate: 'kindest/node' + depNameTemplate: 'kindest/node', }, { customType: 'regex', - fileMatch: [ - '^config\\/olm-scorecard\\/patches\\/basic\\.config\\.yaml$', - '^config\\/olm-scorecard\\/patches\\/olm\\.config\\.yaml$' + managerFilePatterns: [ + '/^config\\/olm-scorecard\\/patches\\/basic\\.config\\.yaml$/', + '/^config\\/olm-scorecard\\/patches\\/olm\\.config\\.yaml$/', ], matchStrings: [ - 'image: quay.io/operator-framework/scorecard-test:(?.*?)\\n' + 'image: quay.io/operator-framework/scorecard-test:(?.*?)\\n', ], datasourceTemplate: 'docker', versioningTemplate: 'loose', depNameTemplate: 'quay.io/operator-framework/scorecard-test', - extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)' + extractVersionTemplate: '^(?v\\d+\\.\\d+\\.\\d+)', }, { customType: 'regex', - fileMatch: [ - '^pkg\\/versions\\/versions\\.go$', - '^pkg\\/specs\\/pgbouncer\\/deployments\\.go$' + managerFilePatterns: [ + '/^pkg\\/versions\\/versions\\.go$/', + '/^pkg\\/specs\\/pgbouncer\\/deployments\\.go$/', ], matchStrings: [ 'DefaultImageName = "(?.+?):(?.*?)"\\n', - 'DefaultPgbouncerImage = "(?.+?):(?.*?)"\\n' + 'DefaultPgbouncerImage = "(?.+?):(?.*?)"\\n', ], datasourceTemplate: 'docker', - versioningTemplate: 'loose' + versioningTemplate: 'loose', }, ], packageRules: [ { matchDatasources: [ - 'docker' + 'docker', ], - allowedVersions: '!/alpha/' + allowedVersions: '!/alpha/', }, { matchDatasources: [ - 'go' + 'go', ], matchDepNames: [ - 'k8s.io/client-go' + 'k8s.io/client-go', ], - allowedVersions: '<1.0' + allowedVersions: '<1.0', }, { matchDatasources: [ - 'go' + 'go', ], groupName: 'kubernetes patches', matchUpdateTypes: [ 'patch', - 'digest' + 'digest', ], matchPackageNames: [ 'k8s.io{/,}**', 'sigs.k8s.io{/,}**', - 'github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring{/,}**' + 'github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring{/,}**', ], matchDepNames: [ '!sigs.k8s.io/kustomize/kustomize/v5', - '!sigs.k8s.io/controller-tools' - ] + '!sigs.k8s.io/controller-tools', + ], }, { matchDatasources: [ - 'go' + 'go', ], matchUpdateTypes: [ 'major', - 'minor' + 'minor', ], matchPackageNames: [ 'k8s.io{/,}**', 'sigs.k8s.io{/,}**', - 'github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring{/,}**' - ] + 'github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring{/,}**', + ], }, { matchDatasources: [ - 'go' + 'go', ], matchUpdateTypes: [ - 'major' + 'major', ], matchPackageNames: [ '*', '!k8s.io{/,}**', '!sigs.k8s.io{/,}**', - '!github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring{/,}**' - ] + '!github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring{/,}**', + ], }, { matchDatasources: [ - 'go' + 'go', ], matchUpdateTypes: [ 'minor', 'patch', - 'digest' + 'digest', ], groupName: 'all non-major go dependencies', matchPackageNames: [ @@ -236,21 +236,21 @@ '!k8s.io{/,}**', '!sigs.k8s.io{/,}**', '!github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring{/,}**', - '!github.com/cloudnative-pg/{/,}**' - ] + '!github.com/cloudnative-pg/{/,}**', + ], }, { matchDatasources: [ - 'github-tags' + 'github-tags', ], matchUpdateTypes: [ 'digest', 'pinDigest', 'minor', - 'patch' + 'patch', ], groupName: 'all non-major github action', - pinDigests: true + pinDigests: true, }, { groupName: 'kubernetes CSI', @@ -258,16 +258,16 @@ pinDigests: false, matchPackageNames: [ 'kubernetes-csi{/,}**', - 'rook{/,}**' - ] + 'rook{/,}**', + ], }, { groupName: 'backup test tools', separateMajorMinor: false, pinDigests: false, matchPackageNames: [ - 'vmware-tanzu{/,}**' - ] + 'vmware-tanzu{/,}**', + ], }, { groupName: 'operator framework', @@ -276,16 +276,16 @@ matchPackageNames: [ 'operator-framework{/,}**', 'redhat-openshift-ecosystem{/,}**', - 'quay.io/operator-framework{/,}**' - ] + 'quay.io/operator-framework{/,}**', + ], }, { groupName: 'cnpg', matchPackageNames: [ - 'github.com/cloudnative-pg/' + 'github.com/cloudnative-pg/', ], separateMajorMinor: false, - pinDigests: false - } - ] + pinDigests: false, + }, + ], } diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 152a175f05..6543023ecf 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -187,7 +187,7 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: Validate Renovate JSON - run: npx --yes --package renovate -- renovate-config-validator + run: npx --yes --package renovate@40.48.6 -- renovate-config-validator go-vulncheck: name: Run govulncheck From e90a71b266b10c87e8b31a9d1512feacdc8d05e7 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Mon, 9 Jun 2025 14:14:02 +0200 Subject: [PATCH 641/836] chore(lint): strict nil check in termination grace period comparison (#7770) Add proper nil checks before dereferencing TerminationGracePeriodSeconds pointers to prevent panic when only one of the pointers is nil. In practice only the proposed pod could have the nil value, for this reason I'm tagging the PR as a strict nil check lint Signed-off-by: Armando Ruocco --- pkg/specs/podspec_diff.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pkg/specs/podspec_diff.go b/pkg/specs/podspec_diff.go index 4e4e04403c..8317aed787 100644 --- a/pkg/specs/podspec_diff.go +++ b/pkg/specs/podspec_diff.go @@ -92,8 +92,9 @@ func ComparePodSpecs( return currentPodSpec.Hostname == targetPodSpec.Hostname }, "termination-grace-period": func() bool { - return currentPodSpec.TerminationGracePeriodSeconds == nil && targetPodSpec.TerminationGracePeriodSeconds == nil || - *currentPodSpec.TerminationGracePeriodSeconds == *targetPodSpec.TerminationGracePeriodSeconds + return (currentPodSpec.TerminationGracePeriodSeconds == nil && targetPodSpec.TerminationGracePeriodSeconds == nil) || + (currentPodSpec.TerminationGracePeriodSeconds != nil && targetPodSpec.TerminationGracePeriodSeconds != nil && + *currentPodSpec.TerminationGracePeriodSeconds == *targetPodSpec.TerminationGracePeriodSeconds) }, } From 7d856afa37af963280327870d98a852f84a7ca1e Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 9 Jun 2025 14:27:54 +0200 Subject: [PATCH 642/836] fix(deps): update module sigs.k8s.io/controller-runtime to v0.21.0 (main) (#7692) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index d5eee109e3..7dc1f7792f 100644 --- a/go.mod +++ b/go.mod @@ -44,7 +44,7 @@ require ( k8s.io/cli-runtime v0.33.1 k8s.io/client-go v0.33.1 k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 - sigs.k8s.io/controller-runtime v0.20.4 + sigs.k8s.io/controller-runtime v0.21.0 sigs.k8s.io/yaml v1.4.0 ) diff --git a/go.sum b/go.sum index d1cd2bba13..250863b36e 100644 --- a/go.sum +++ b/go.sum @@ -298,8 +298,8 @@ k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUy k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU= -sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= +sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8= +sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/kustomize/api v0.19.0 h1:F+2HB2mU1MSiR9Hp1NEgoU2q9ItNOaBJl0I4Dlus5SQ= From 1089ec92f155e88f82a0965de5f0f1d5686a7848 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Mon, 9 Jun 2025 14:40:19 +0200 Subject: [PATCH 643/836] feat(cnpgi): add `Postgres` interface support to the operator (#7179) The `Postgres` CNPG-I interface allows the plugin developers to change the configuration of the PostgreSQL instance after the manager has generated it. Signed-off-by: Jaime Silvela Signed-off-by: Armando Ruocco Signed-off-by: Leonardo Cecchi --- api/v1/cluster_funcs.go | 45 ++++ go.mod | 2 +- go.sum | 4 +- internal/cmd/manager/instance/run/cmd.go | 10 +- .../manager/instance/upgrade/execute/cmd.go | 20 +- internal/cmd/plugin/status/status.go | 2 + internal/cnpi/plugin/client/cluster_test.go | 5 +- internal/cnpi/plugin/client/contracts.go | 1 + internal/cnpi/plugin/client/interfaces.go | 39 +++ internal/cnpi/plugin/client/postgres.go | 71 ++++++ internal/cnpi/plugin/client/postgres_test.go | 226 ++++++++++++++++++ internal/cnpi/plugin/client/suite_test.go | 14 ++ internal/cnpi/plugin/connection/connection.go | 44 ++++ internal/cnpi/plugin/connection/metadata.go | 1 + internal/controller/cluster_upgrade_test.go | 1 + .../controller/instance_controller.go | 29 ++- internal/management/controller/manager.go | 4 + pkg/management/postgres/configuration.go | 26 +- pkg/management/postgres/configuration_test.go | 71 ++++-- pkg/management/postgres/initdb.go | 19 +- pkg/management/postgres/restore.go | 17 +- pkg/postgres/configuration.go | 5 + pkg/postgres/plugin/config.go | 72 ++++++ pkg/postgres/plugin/doc.go | 21 ++ 24 files changed, 720 insertions(+), 29 deletions(-) create mode 100644 internal/cnpi/plugin/client/interfaces.go create mode 100644 internal/cnpi/plugin/client/postgres.go create mode 100644 internal/cnpi/plugin/client/postgres_test.go create mode 100644 pkg/postgres/plugin/config.go create mode 100644 pkg/postgres/plugin/doc.go diff --git a/api/v1/cluster_funcs.go b/api/v1/cluster_funcs.go index 7234cbcfe3..8c1f7aa393 100644 --- a/api/v1/cluster_funcs.go +++ b/api/v1/cluster_funcs.go @@ -27,6 +27,7 @@ import ( "strings" "time" + "github.com/cloudnative-pg/cnpg-i/pkg/identity" "github.com/cloudnative-pg/machinery/pkg/image/reference" "github.com/cloudnative-pg/machinery/pkg/log" pgTime "github.com/cloudnative-pg/machinery/pkg/postgres/time" @@ -85,6 +86,50 @@ func GetPluginConfigurationEnabledPluginNames(pluginList []PluginConfiguration) return pluginNames } +// GetInstanceEnabledPluginNames gets the name of the plugins that are available to the instance container +func (cluster *Cluster) GetInstanceEnabledPluginNames() (result []string) { + var instance []string + for _, pluginStatus := range cluster.Status.PluginStatus { + if slices.Contains(pluginStatus.Capabilities, + identity.PluginCapability_Service_TYPE_INSTANCE_SIDECAR_INJECTION.String()) { + instance = append(instance, pluginStatus.Name) + } + } + + enabled := GetPluginConfigurationEnabledPluginNames(cluster.Spec.Plugins) + + var instanceEnabled []string + for _, pluginName := range instance { + if slices.Contains(enabled, pluginName) { + instanceEnabled = append(instanceEnabled, pluginName) + } + } + + return instanceEnabled +} + +// GetJobEnabledPluginNames gets the name of the plugins that are available to the job container +func (cluster *Cluster) GetJobEnabledPluginNames() (result []string) { + var instance []string + for _, pluginStatus := range cluster.Status.PluginStatus { + if slices.Contains(pluginStatus.Capabilities, + identity.PluginCapability_Service_TYPE_INSTANCE_JOB_SIDECAR_INJECTION.String()) { + instance = append(instance, pluginStatus.Name) + } + } + + enabled := GetPluginConfigurationEnabledPluginNames(cluster.Spec.Plugins) + + var instanceEnabled []string + for _, pluginName := range instance { + if slices.Contains(enabled, pluginName) { + instanceEnabled = append(instanceEnabled, pluginName) + } + } + + return instanceEnabled +} + // GetExternalClustersEnabledPluginNames gets the name of the plugins that are // involved in the reconciliation of this external cluster list. This // list is usually composed by the plugins that need to be active to diff --git a/go.mod b/go.mod index 7dc1f7792f..38425da440 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/blang/semver v3.5.1+incompatible github.com/cheynewallace/tabby v1.1.1 github.com/cloudnative-pg/barman-cloud v0.3.1 - github.com/cloudnative-pg/cnpg-i v0.2.1 + github.com/cloudnative-pg/cnpg-i v0.2.2-0.20250609075931-7cb57628933b github.com/cloudnative-pg/machinery v0.2.0 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/evanphx/json-patch/v5 v5.9.11 diff --git a/go.sum b/go.sum index 250863b36e..5fd2562a16 100644 --- a/go.sum +++ b/go.sum @@ -20,8 +20,8 @@ github.com/cheynewallace/tabby v1.1.1 h1:JvUR8waht4Y0S3JF17G6Vhyt+FRhnqVCkk8l4Yr github.com/cheynewallace/tabby v1.1.1/go.mod h1:Pba/6cUL8uYqvOc9RkyvFbHGrQ9wShyrn6/S/1OYVys= github.com/cloudnative-pg/barman-cloud v0.3.1 h1:kzkY77k2lN/caoyh7ibXDSZjJeSJTNvnVt6Gfa8Iq5M= github.com/cloudnative-pg/barman-cloud v0.3.1/go.mod h1:4HL3AjY9oEl2Ed0HSkyvTZEQPhwyFOaAnuCz9lfVeYQ= -github.com/cloudnative-pg/cnpg-i v0.2.1 h1:g96BE1ojdiFtDwtb7tg5wUF9a2kAh0eVg4SkjsO8jnk= -github.com/cloudnative-pg/cnpg-i v0.2.1/go.mod h1:kPfJpPGAKN1/2xvwBcC3WzMP46pj3sKLHLNB8NHr77U= +github.com/cloudnative-pg/cnpg-i v0.2.2-0.20250609075931-7cb57628933b h1:B7Ugp5epMIDNPe0bIOcqpErKkiQfuCM3nXoGh4GiPHM= +github.com/cloudnative-pg/cnpg-i v0.2.2-0.20250609075931-7cb57628933b/go.mod h1:FUA8ELMnqHpA2MIOeG425sX7D+u3m8SD/oFd1CnXSEw= github.com/cloudnative-pg/machinery v0.2.0 h1:x8OAwxdeL/6wkbxqorz+nX6UovTyx7/TBeCfiRebR2o= github.com/cloudnative-pg/machinery v0.2.0/go.mod h1:Kg8W8Tb/1UFGGtw3hR8S5SytSWddlHaCnJSgBo4x/nc= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= diff --git a/internal/cmd/manager/instance/run/cmd.go b/internal/cmd/manager/instance/run/cmd.go index b1e33a08b3..82b118f726 100644 --- a/internal/cmd/manager/instance/run/cmd.go +++ b/internal/cmd/manager/instance/run/cmd.go @@ -42,6 +42,8 @@ import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/instance/run/lifecycle" + "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository" + "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" "github.com/cloudnative-pg/cloudnative-pg/internal/management/controller" "github.com/cloudnative-pg/cloudnative-pg/internal/management/controller/externalservers" "github.com/cloudnative-pg/cloudnative-pg/internal/management/controller/roles" @@ -210,8 +212,14 @@ func runSubCommand(ctx context.Context, instance *postgres.Instance) error { postgresStartConditions := concurrency.MultipleExecuted{} exitedConditions := concurrency.MultipleExecuted{} + pluginRepository := repository.New() + if _, err := pluginRepository.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir); err != nil { + contextLogger.Error(err, "Unable to load sidecar CNPG-i plugins, skipping") + } + defer pluginRepository.Close() + metricsExporter := metricserver.NewExporter(instance) - reconciler := controller.NewInstanceReconciler(instance, mgr.GetClient(), metricsExporter) + reconciler := controller.NewInstanceReconciler(instance, mgr.GetClient(), metricsExporter, pluginRepository) err = ctrl.NewControllerManagedBy(mgr). For(&apiv1.Cluster{}). Named("instance-cluster"). diff --git a/internal/cmd/manager/instance/upgrade/execute/cmd.go b/internal/cmd/manager/instance/upgrade/execute/cmd.go index 9fc44816de..43c309738d 100644 --- a/internal/cmd/manager/instance/upgrade/execute/cmd.go +++ b/internal/cmd/manager/instance/upgrade/execute/cmd.go @@ -31,16 +31,19 @@ import ( "strings" "time" + cnpgiPostgres "github.com/cloudnative-pg/cnpg-i/pkg/postgres" "github.com/cloudnative-pg/machinery/pkg/env" "github.com/cloudnative-pg/machinery/pkg/execlog" "github.com/cloudnative-pg/machinery/pkg/fileutils" "github.com/cloudnative-pg/machinery/pkg/fileutils/compatibility" "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/stringset" "github.com/spf13/cobra" "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + pluginClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client" "github.com/cloudnative-pg/cloudnative-pg/internal/management/istio" "github.com/cloudnative-pg/cloudnative-pg/internal/management/linkerd" "github.com/cloudnative-pg/cloudnative-pg/pkg/configfile" @@ -402,8 +405,23 @@ func prepareConfigurationFiles(ctx context.Context, cluster apiv1.Cluster, destD tmpCluster.Spec.PostgresConfiguration.Parameters["idle_replication_slot_timeout"] = "0" } + enabledPluginNamesSet := stringset.From(cluster.GetJobEnabledPluginNames()) + pluginCli, err := pluginClient.NewClient(ctx, enabledPluginNamesSet) + if err != nil { + return fmt.Errorf("error while creating the plugin client: %w", err) + } + defer pluginCli.Close(ctx) + + ctx = pluginClient.SetPluginClientInContext(ctx, pluginCli) + ctx = cluster.SetInContext(ctx) + newInstance := postgres.Instance{PgData: destDir} - if _, err := newInstance.RefreshConfigurationFilesFromCluster(ctx, tmpCluster, false); err != nil { + if _, err := newInstance.RefreshConfigurationFilesFromCluster( + ctx, + tmpCluster, + false, + cnpgiPostgres.OperationType_TYPE_UPGRADE, + ); err != nil { return fmt.Errorf("error while creating the configuration files for new datadir %q: %w", destDir, err) } diff --git a/internal/cmd/plugin/status/status.go b/internal/cmd/plugin/status/status.go index 84720d7aa8..91db0ba383 100644 --- a/internal/cmd/plugin/status/status.go +++ b/internal/cmd/plugin/status/status.go @@ -1206,6 +1206,8 @@ func (fullStatus *PostgresqlStatus) printPluginStatus(verbosity int) { result[idx] = "Operator Service" case identity.PluginCapability_Service_TYPE_LIFECYCLE_SERVICE.String(): result[idx] = "Lifecycle Service" + case identity.PluginCapability_Service_TYPE_POSTGRES.String(): + result[idx] = "Postgres Service" case identity.PluginCapability_Service_TYPE_UNSPECIFIED.String(): continue default: diff --git a/internal/cnpi/plugin/client/cluster_test.go b/internal/cnpi/plugin/client/cluster_test.go index 01bbd4fd66..f2b8380257 100644 --- a/internal/cnpi/plugin/client/cluster_test.go +++ b/internal/cnpi/plugin/client/cluster_test.go @@ -25,7 +25,6 @@ import ( "github.com/cloudnative-pg/cnpg-i/pkg/operator" - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/connection" . "github.com/onsi/ginkgo/v2" @@ -36,9 +35,9 @@ var _ = Describe("SetStatusInCluster", func() { const pluginName = "fake-plugin" const pluginName2 = "fake-plugin2" - var cluster *apiv1.Cluster + var cluster fakeCluster BeforeEach(func() { - cluster = &apiv1.Cluster{} + cluster = fakeCluster{} }) It("should correctly set the status of a single plugin", func(ctx SpecContext) { diff --git a/internal/cnpi/plugin/client/contracts.go b/internal/cnpi/plugin/client/contracts.go index 6af9e7419e..22594ea65f 100644 --- a/internal/cnpi/plugin/client/contracts.go +++ b/internal/cnpi/plugin/client/contracts.go @@ -41,6 +41,7 @@ type Client interface { WalCapabilities BackupCapabilities RestoreJobHooksCapabilities + PostgresConfigurationCapabilities } // SetPluginClientInContext records the plugin client in the given context diff --git a/internal/cnpi/plugin/client/interfaces.go b/internal/cnpi/plugin/client/interfaces.go new file mode 100644 index 0000000000..162d341ec6 --- /dev/null +++ b/internal/cnpi/plugin/client/interfaces.go @@ -0,0 +1,39 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + + "github.com/cloudnative-pg/cnpg-i/pkg/postgres" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// PostgresConfigurationCapabilities is the interface that defines the +// capabilities of interacting with PostgreSQL. +type PostgresConfigurationCapabilities interface { + // EnrichConfiguration is the method that enriches the PostgreSQL configuration + EnrichConfiguration( + ctx context.Context, + cluster client.Object, + config map[string]string, + operationType postgres.OperationType_Type, + ) (map[string]string, error) +} diff --git a/internal/cnpi/plugin/client/postgres.go b/internal/cnpi/plugin/client/postgres.go new file mode 100644 index 0000000000..a3259a666c --- /dev/null +++ b/internal/cnpi/plugin/client/postgres.go @@ -0,0 +1,71 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + "encoding/json" + "slices" + + postgresClient "github.com/cloudnative-pg/cnpg-i/pkg/postgres" + "github.com/cloudnative-pg/machinery/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (data *data) EnrichConfiguration( + ctx context.Context, + cluster client.Object, + config map[string]string, + operationType postgresClient.OperationType_Type, +) (map[string]string, error) { + tempConfig := config + + contextLogger := log.FromContext(ctx).WithName("enrichConfiguration") + + clusterDefinition, marshalErr := json.Marshal(cluster) + if marshalErr != nil { + return nil, marshalErr + } + + for idx := range data.plugins { + plugin := data.plugins[idx] + + if !slices.Contains(plugin.PostgresCapabilities(), postgresClient.PostgresCapability_RPC_TYPE_ENRICH_CONFIGURATION) { + contextLogger.Debug("skipping plugin", "plugin", plugin.Name()) + continue + } + req := &postgresClient.EnrichConfigurationRequest{ + Configs: config, + ClusterDefinition: clusterDefinition, + OperationType: &postgresClient.OperationType{Type: operationType}, + } + res, err := plugin.PostgresClient().EnrichConfiguration(ctx, req) + if err != nil { + return nil, err + } + contextLogger.Debug("received response", "resConfig", res.Configs) + if len(res.Configs) == 0 { + continue + } + tempConfig = res.Configs + } + + return tempConfig, nil +} diff --git a/internal/cnpi/plugin/client/postgres_test.go b/internal/cnpi/plugin/client/postgres_test.go new file mode 100644 index 0000000000..80f426cd86 --- /dev/null +++ b/internal/cnpi/plugin/client/postgres_test.go @@ -0,0 +1,226 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + "errors" + + "github.com/cloudnative-pg/cnpg-i/pkg/postgres" + "google.golang.org/grpc" + + "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/connection" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +type fakePostgresClient struct { + enrichConfigResponse *postgres.EnrichConfigurationResult + enrichConfigError error +} + +type fakePostgresConnection struct { + name string + capabilities []postgres.PostgresCapability_RPC_Type + postgresClient *fakePostgresClient + connection.Interface +} + +func (f *fakePostgresClient) GetCapabilities( + _ context.Context, + _ *postgres.PostgresCapabilitiesRequest, + _ ...grpc.CallOption, +) (*postgres.PostgresCapabilitiesResult, error) { + return &postgres.PostgresCapabilitiesResult{ + Capabilities: []*postgres.PostgresCapability{ + { + Type: &postgres.PostgresCapability_Rpc{ + Rpc: &postgres.PostgresCapability_RPC{ + Type: postgres.PostgresCapability_RPC_TYPE_ENRICH_CONFIGURATION, + }, + }, + }, + }, + }, nil +} + +func (f *fakePostgresClient) EnrichConfiguration( + _ context.Context, + _ *postgres.EnrichConfigurationRequest, + _ ...grpc.CallOption, +) (*postgres.EnrichConfigurationResult, error) { + return f.enrichConfigResponse, f.enrichConfigError +} + +func (f *fakePostgresConnection) Name() string { + return f.name +} + +func (f *fakePostgresConnection) PostgresClient() postgres.PostgresClient { + return f.postgresClient +} + +func (f *fakePostgresConnection) PostgresCapabilities() []postgres.PostgresCapability_RPC_Type { + return f.capabilities +} + +var _ = Describe("EnrichConfiguration", func() { + var ( + d *data + cluster *fakeCluster + config map[string]string + ) + + BeforeEach(func() { + config = map[string]string{"key1": "value1"} + d = &data{plugins: []connection.Interface{}} + + cluster = &fakeCluster{} + }) + + It("should successfully enrich configuration", func(ctx SpecContext) { + postgresClient := &fakePostgresClient{ + enrichConfigResponse: &postgres.EnrichConfigurationResult{ + Configs: map[string]string{"key1": "value1", "key2": "value2"}, + }, + } + + plugin := &fakePostgresConnection{ + name: "test-plugin", + capabilities: []postgres.PostgresCapability_RPC_Type{postgres.PostgresCapability_RPC_TYPE_ENRICH_CONFIGURATION}, + postgresClient: postgresClient, + } + + d.plugins = append(d.plugins, plugin) + + config, err := d.EnrichConfiguration(ctx, cluster, config, postgres.OperationType_TYPE_UNSPECIFIED) + + Expect(err).ToNot(HaveOccurred()) + Expect(config).To(HaveKeyWithValue("key1", "value1")) + Expect(config).To(HaveKeyWithValue("key2", "value2")) + }) + + It("should return error when plugin returns error", func(ctx SpecContext) { + expectedErr := errors.New("plugin error") + + postgresClient := &fakePostgresClient{ + enrichConfigError: expectedErr, + } + + plugin := &fakePostgresConnection{ + name: "test-plugin", + capabilities: []postgres.PostgresCapability_RPC_Type{postgres.PostgresCapability_RPC_TYPE_ENRICH_CONFIGURATION}, + postgresClient: postgresClient, + } + + d.plugins = append(d.plugins, plugin) + + _, err := d.EnrichConfiguration(ctx, cluster, config, postgres.OperationType_TYPE_UNSPECIFIED) + + Expect(err).To(HaveOccurred()) + Expect(err).To(Equal(expectedErr)) + }) + + It("should skip plugins without required capability", func(ctx SpecContext) { + plugin := &fakePostgresConnection{ + name: "test-plugin", + capabilities: []postgres.PostgresCapability_RPC_Type{}, + } + + d.plugins = append(d.plugins, plugin) + + origMap := cloneMap(config) + + config, err := d.EnrichConfiguration(ctx, cluster, config, postgres.OperationType_TYPE_UNSPECIFIED) + + Expect(err).ToNot(HaveOccurred()) + Expect(config).To(BeEquivalentTo(origMap)) + }) + + It("should merge configurations from multiple plugins", func(ctx SpecContext) { + postgresClient1 := &fakePostgresClient{ + enrichConfigResponse: &postgres.EnrichConfigurationResult{ + Configs: map[string]string{"key2": "value2"}, + }, + } + + plugin1 := &fakePostgresConnection{ + name: "plugin1", + capabilities: []postgres.PostgresCapability_RPC_Type{postgres.PostgresCapability_RPC_TYPE_ENRICH_CONFIGURATION}, + postgresClient: postgresClient1, + } + + postgresClient2 := &fakePostgresClient{ + enrichConfigResponse: &postgres.EnrichConfigurationResult{ + Configs: map[string]string{ + "key1": "value1", + "key2": "value2", + "key3": "value3", + }, + }, + } + + plugin2 := &fakePostgresConnection{ + name: "plugin2", + capabilities: []postgres.PostgresCapability_RPC_Type{postgres.PostgresCapability_RPC_TYPE_ENRICH_CONFIGURATION}, + postgresClient: postgresClient2, + } + + d.plugins = append(d.plugins, plugin1, plugin2) + + config, err := d.EnrichConfiguration(ctx, cluster, config, postgres.OperationType_TYPE_UNSPECIFIED) + + Expect(err).ToNot(HaveOccurred()) + Expect(config).To(HaveKeyWithValue("key1", "value1")) + Expect(config).To(HaveKeyWithValue("key2", "value2")) + Expect(config).To(HaveKeyWithValue("key3", "value3")) + }) + + It("should overwrite existing config key when plugin returns the same key", func(ctx SpecContext) { + postgresClient := &fakePostgresClient{ + enrichConfigResponse: &postgres.EnrichConfigurationResult{ + Configs: map[string]string{"key1": "overwritten-value"}, + }, + } + + plugin := &fakePostgresConnection{ + name: "test-plugin", + capabilities: []postgres.PostgresCapability_RPC_Type{postgres.PostgresCapability_RPC_TYPE_ENRICH_CONFIGURATION}, + postgresClient: postgresClient, + } + + d.plugins = append(d.plugins, plugin) + + config, err := d.EnrichConfiguration(ctx, cluster, config, postgres.OperationType_TYPE_UNSPECIFIED) + + Expect(err).ToNot(HaveOccurred()) + Expect(config).To(HaveKeyWithValue("key1", "overwritten-value")) + Expect(config).To(HaveLen(1)) + }) +}) + +func cloneMap(original map[string]string) map[string]string { + clone := make(map[string]string, len(original)) + for k, v := range original { + clone[k] = v + } + return clone +} diff --git a/internal/cnpi/plugin/client/suite_test.go b/internal/cnpi/plugin/client/suite_test.go index 7be0300f18..6773c09b15 100644 --- a/internal/cnpi/plugin/client/suite_test.go +++ b/internal/cnpi/plugin/client/suite_test.go @@ -27,10 +27,12 @@ import ( "github.com/cloudnative-pg/cnpg-i/pkg/identity" "github.com/cloudnative-pg/cnpg-i/pkg/lifecycle" "github.com/cloudnative-pg/cnpg-i/pkg/operator" + postgresClient "github.com/cloudnative-pg/cnpg-i/pkg/postgres" "github.com/cloudnative-pg/cnpg-i/pkg/reconciler" restore "github.com/cloudnative-pg/cnpg-i/pkg/restore/job" "github.com/cloudnative-pg/cnpg-i/pkg/wal" "google.golang.org/grpc" + k8client "sigs.k8s.io/controller-runtime/pkg/client" "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/connection" @@ -107,6 +109,14 @@ type fakeConnection struct { operatorClient *fakeOperatorClient } +func (f *fakeConnection) PostgresClient() postgresClient.PostgresClient { + panic("implement me") +} + +func (f *fakeConnection) PostgresCapabilities() []postgresClient.PostgresCapability_RPC_Type { + panic("implement me") +} + func (f *fakeConnection) RestoreJobHooksClient() restore.RestoreJobHooksClient { panic("implement me") } @@ -188,3 +198,7 @@ func (f *fakeConnection) Ping(_ context.Context) error { func (f *fakeConnection) Close() error { panic("not implemented") // TODO: Implement } + +type fakeCluster struct { + k8client.Object +} diff --git a/internal/cnpi/plugin/connection/connection.go b/internal/cnpi/plugin/connection/connection.go index fdfae176b5..44a096d409 100644 --- a/internal/cnpi/plugin/connection/connection.go +++ b/internal/cnpi/plugin/connection/connection.go @@ -30,6 +30,7 @@ import ( "github.com/cloudnative-pg/cnpg-i/pkg/identity" "github.com/cloudnative-pg/cnpg-i/pkg/lifecycle" "github.com/cloudnative-pg/cnpg-i/pkg/operator" + postgresClient "github.com/cloudnative-pg/cnpg-i/pkg/postgres" "github.com/cloudnative-pg/cnpg-i/pkg/reconciler" restore "github.com/cloudnative-pg/cnpg-i/pkg/restore/job" "github.com/cloudnative-pg/cnpg-i/pkg/wal" @@ -63,6 +64,7 @@ type Interface interface { BackupClient() backup.BackupClient ReconcilerHooksClient() reconciler.ReconcilerHooksClient RestoreJobHooksClient() restore.RestoreJobHooksClient + PostgresClient() postgresClient.PostgresClient PluginCapabilities() []identity.PluginCapability_Service_Type OperatorCapabilities() []operator.OperatorCapability_RPC_Type @@ -71,6 +73,7 @@ type Interface interface { BackupCapabilities() []backup.BackupCapability_RPC_Type ReconcilerCapabilities() []reconciler.ReconcilerHooksCapability_Kind RestoreJobHooksCapabilities() []restore.RestoreJobHooksCapability_Kind + PostgresCapabilities() []postgresClient.PostgresCapability_RPC_Type Ping(ctx context.Context) error Close() error @@ -85,6 +88,7 @@ type data struct { backupClient backup.BackupClient reconcilerHooksClient reconciler.ReconcilerHooksClient restoreJobHooksClient restore.RestoreJobHooksClient + postgresClient postgresClient.PostgresClient name string version string @@ -95,6 +99,7 @@ type data struct { backupCapabilities []backup.BackupCapability_RPC_Type reconcilerCapabilities []reconciler.ReconcilerHooksCapability_Kind restoreJobHooksCapabilities []restore.RestoreJobHooksCapability_Kind + postgresCapabilities []postgresClient.PostgresCapability_RPC_Type } func newPluginDataFromConnection(ctx context.Context, connection Handler) (data, error) { @@ -122,6 +127,7 @@ func newPluginDataFromConnection(ctx context.Context, connection Handler) (data, backupClient: backup.NewBackupClient(connection), reconcilerHooksClient: reconciler.NewReconcilerHooksClient(connection), restoreJobHooksClient: restore.NewRestoreJobHooksClient(connection), + postgresClient: postgresClient.NewPostgresClient(connection), } return result, err @@ -264,6 +270,27 @@ func (pluginData *data) loadRestoreJobHooksCapabilities(ctx context.Context) err return nil } +func (pluginData *data) loadPostgresCapabilities(ctx context.Context) error { + var postgresCapabilitiesResponse *postgresClient.PostgresCapabilitiesResult + var err error + + if postgresCapabilitiesResponse, err = pluginData.postgresClient.GetCapabilities( + ctx, + &postgresClient.PostgresCapabilitiesRequest{}, + ); err != nil { + return fmt.Errorf("while querying plugin operator capabilities: %w", err) + } + + pluginData.postgresCapabilities = make( + []postgresClient.PostgresCapability_RPC_Type, + len(postgresCapabilitiesResponse.Capabilities)) + for i := range pluginData.postgresCapabilities { + pluginData.postgresCapabilities[i] = postgresCapabilitiesResponse.Capabilities[i].GetRpc().Type + } + + return nil +} + // Metadata extracts the plugin metadata reading from // the internal metadata func (pluginData *data) Metadata() Metadata { @@ -275,6 +302,7 @@ func (pluginData *data) Metadata() Metadata { WALCapabilities: make([]string, len(pluginData.walCapabilities)), BackupCapabilities: make([]string, len(pluginData.backupCapabilities)), RestoreJobHookCapabilities: make([]string, len(pluginData.restoreJobHooksCapabilities)), + PostgresCapabilities: make([]string, len(pluginData.postgresCapabilities)), } for i := range pluginData.capabilities { @@ -333,6 +361,10 @@ func (pluginData *data) ReconcilerHooksClient() reconciler.ReconcilerHooksClient return pluginData.reconcilerHooksClient } +func (pluginData *data) PostgresClient() postgresClient.PostgresClient { + return pluginData.postgresClient +} + func (pluginData *data) PluginCapabilities() []identity.PluginCapability_Service_Type { return pluginData.capabilities } @@ -361,6 +393,10 @@ func (pluginData *data) RestoreJobHooksCapabilities() []restore.RestoreJobHooksC return pluginData.restoreJobHooksCapabilities } +func (pluginData *data) PostgresCapabilities() []postgresClient.PostgresCapability_RPC_Type { + return pluginData.postgresCapabilities +} + func (pluginData *data) Ping(ctx context.Context) error { _, err := pluginData.identityClient.Probe(ctx, &identity.ProbeRequest{}) return err @@ -427,5 +463,13 @@ func LoadPlugin(ctx context.Context, handler Handler) (Interface, error) { } } + // If the plugin implements the postgres service, load its + // capabilities + if slices.Contains(result.capabilities, identity.PluginCapability_Service_TYPE_POSTGRES) { + if err = result.loadPostgresCapabilities(ctx); err != nil { + return nil, err + } + } + return &result, nil } diff --git a/internal/cnpi/plugin/connection/metadata.go b/internal/cnpi/plugin/connection/metadata.go index b8744a19e9..18643faebc 100644 --- a/internal/cnpi/plugin/connection/metadata.go +++ b/internal/cnpi/plugin/connection/metadata.go @@ -29,4 +29,5 @@ type Metadata struct { WALCapabilities []string BackupCapabilities []string RestoreJobHookCapabilities []string + PostgresCapabilities []string } diff --git a/internal/controller/cluster_upgrade_test.go b/internal/controller/cluster_upgrade_test.go index b9b97e792d..9b180df291 100644 --- a/internal/controller/cluster_upgrade_test.go +++ b/internal/controller/cluster_upgrade_test.go @@ -860,6 +860,7 @@ var _ = Describe("checkPodSpec with plugins", Ordered, func() { pluginCli := fakePluginClientRollout{ returnedPod: podModifiedByPlugins, } + ctx := pluginClient.SetPluginClientInContext(context.TODO(), pluginCli) rollout, err := checkPodSpecIsOutdated(ctx, pod, &cluster) diff --git a/internal/management/controller/instance_controller.go b/internal/management/controller/instance_controller.go index e659f1d778..085c0c3406 100644 --- a/internal/management/controller/instance_controller.go +++ b/internal/management/controller/instance_controller.go @@ -30,6 +30,7 @@ import ( "strconv" "time" + postgresClient "github.com/cloudnative-pg/cnpg-i/pkg/postgres" "github.com/cloudnative-pg/machinery/pkg/fileutils" "github.com/cloudnative-pg/machinery/pkg/log" pgTime "github.com/cloudnative-pg/machinery/pkg/postgres/time" @@ -44,6 +45,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + cnpgiclient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client" "github.com/cloudnative-pg/cloudnative-pg/internal/controller" "github.com/cloudnative-pg/cloudnative-pg/internal/management/controller/roles" "github.com/cloudnative-pg/cloudnative-pg/internal/management/controller/slots/reconciler" @@ -116,9 +118,27 @@ func (r *InstanceReconciler) Reconcile( return reconcile.Result{}, fmt.Errorf("could not fetch Cluster: %w", err) } - // Print the Cluster contextLogger.Debug("Reconciling Cluster") + pluginLoadingContext, cancelPluginLoading := context.WithTimeout(ctx, 5*time.Second) + defer cancelPluginLoading() + + pluginClient, err := cnpgiclient.WithPlugins( + pluginLoadingContext, + r.pluginRepository, + cluster.GetInstanceEnabledPluginNames()..., + ) + if err != nil { + contextLogger.Error(err, "Error loading plugins, retrying") + return ctrl.Result{}, err + } + defer func() { + pluginClient.Close(ctx) + }() + + ctx = cnpgiclient.SetPluginClientInContext(ctx, pluginClient) + ctx = cluster.SetInContext(ctx) + // Reconcile PostgreSQL instance parameters r.reconcileInstance(cluster) @@ -363,7 +383,12 @@ func (r *InstanceReconciler) refreshConfigurationFiles( // Reconcile PostgreSQL configuration // This doesn't need the PG connection, but it needs to reload it in case of changes - reloadConfig, err := r.instance.RefreshConfigurationFilesFromCluster(ctx, cluster, false) + reloadConfig, err := r.instance.RefreshConfigurationFilesFromCluster( + ctx, + cluster, + false, + postgresClient.OperationType_TYPE_RECONCILE, + ) if err != nil { return false, err } diff --git a/internal/management/controller/manager.go b/internal/management/controller/manager.go index dba3414df8..76929fef62 100644 --- a/internal/management/controller/manager.go +++ b/internal/management/controller/manager.go @@ -31,6 +31,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository" "github.com/cloudnative-pg/cloudnative-pg/pkg/concurrency" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/metricserver" @@ -52,6 +53,7 @@ type InstanceReconciler struct { metricsServerExporter *metricserver.Exporter certificateReconciler *instancecertificate.Reconciler + pluginRepository repository.Interface } // NewInstanceReconciler creates a new instance reconciler @@ -59,6 +61,7 @@ func NewInstanceReconciler( instance *postgres.Instance, client ctrl.Client, metricsExporter *metricserver.Exporter, + pluginRepository repository.Interface, ) *InstanceReconciler { return &InstanceReconciler{ instance: instance, @@ -68,6 +71,7 @@ func NewInstanceReconciler( systemInitialization: concurrency.NewExecuted(), metricsServerExporter: metricsExporter, certificateReconciler: instancecertificate.NewReconciler(client, instance), + pluginRepository: pluginRepository, } } diff --git a/pkg/management/postgres/configuration.go b/pkg/management/postgres/configuration.go index a4abbdffc9..da075978c2 100644 --- a/pkg/management/postgres/configuration.go +++ b/pkg/management/postgres/configuration.go @@ -28,6 +28,7 @@ import ( "sort" "strings" + postgresClient "github.com/cloudnative-pg/cnpg-i/pkg/postgres" "github.com/cloudnative-pg/machinery/pkg/fileutils" "github.com/cloudnative-pg/machinery/pkg/log" @@ -36,6 +37,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/constants" postgresutils "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/utils" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" + "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres/plugin" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres/replication" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) @@ -68,13 +70,23 @@ func (instance *Instance) RefreshConfigurationFilesFromCluster( ctx context.Context, cluster *apiv1.Cluster, preserveUserSettings bool, + operationType postgresClient.OperationType_Type, ) (bool, error) { pgMajor, err := postgresutils.GetMajorVersionFromPgData(instance.PgData) if err != nil { return false, err } - postgresConfiguration, sha256 := createPostgresqlConfiguration(cluster, preserveUserSettings, pgMajor) + postgresConfiguration, sha256, err := createPostgresqlConfiguration( + ctx, + cluster, + preserveUserSettings, + pgMajor, + operationType, + ) + if err != nil { + return false, fmt.Errorf("creating postgresql configuration: %w", err) + } postgresConfigurationChanged, err := InstallPgDataFileContent( ctx, instance.PgData, @@ -379,10 +391,12 @@ func (instance *Instance) migratePostgresAutoConfFile(ctx context.Context) (chan // createPostgresqlConfiguration creates the PostgreSQL configuration to be // used for this cluster and return it and its sha256 checksum func createPostgresqlConfiguration( + ctx context.Context, cluster *apiv1.Cluster, preserveUserSettings bool, majorVersion int, -) (string, string) { + operationType postgresClient.OperationType_Type, +) (string, string, error) { info := postgres.ConfigurationInfo{ Settings: postgres.CnpgConfigurationSettings, MajorVersion: majorVersion, @@ -417,7 +431,13 @@ func createPostgresqlConfiguration( info.RecoveryMinApplyDelay = cluster.Spec.ReplicaCluster.MinApplyDelay.Duration } - return postgres.CreatePostgresqlConfFile(postgres.CreatePostgresqlConfiguration(info)) + config, err := plugin.CreatePostgresqlConfigurationWithPlugins(ctx, info, operationType) + if err != nil { + return "", "", err + } + + file, sha := postgres.CreatePostgresqlConfFile(config) + return file, sha, nil } // configurePostgresForImport configures Postgres to be optimized for the firt import diff --git a/pkg/management/postgres/configuration_test.go b/pkg/management/postgres/configuration_test.go index 54bceec9da..38ffa9010a 100644 --- a/pkg/management/postgres/configuration_test.go +++ b/pkg/management/postgres/configuration_test.go @@ -24,6 +24,7 @@ import ( "strings" "time" + "github.com/cloudnative-pg/cnpg-i/pkg/postgres" "github.com/cloudnative-pg/machinery/pkg/image/reference" "github.com/cloudnative-pg/machinery/pkg/postgres/version" corev1 "k8s.io/api/core/v1" @@ -122,8 +123,8 @@ var _ = Describe("testing the building of the ldap config string", func() { }) var _ = Describe("Test building of the list of temporary tablespaces", func() { - defaultVersion, err := version.FromTag(reference.New(versions.DefaultImageName).Tag) - Expect(err).ToNot(HaveOccurred()) + defaultVersion, defaultVersionErr := version.FromTag(reference.New(versions.DefaultImageName).Tag) + Expect(defaultVersionErr).ToNot(HaveOccurred()) defaultMajor := int(defaultVersion.Major()) clusterWithoutTablespaces := apiv1.Cluster{ @@ -175,18 +176,39 @@ var _ = Describe("Test building of the list of temporary tablespaces", func() { }, } - It("doesn't set temp_tablespaces if there are no declared tablespaces", func() { - config, _ := createPostgresqlConfiguration(&clusterWithoutTablespaces, true, defaultMajor) + It("doesn't set temp_tablespaces if there are no declared tablespaces", func(ctx SpecContext) { + config, _, err := createPostgresqlConfiguration( + ctx, + &clusterWithoutTablespaces, + true, + defaultMajor, + postgres.OperationType_TYPE_UNSPECIFIED, + ) + Expect(err).ToNot(HaveOccurred()) Expect(config).ToNot(ContainSubstring("temp_tablespaces")) }) - It("doesn't set temp_tablespaces if there are no temporary tablespaces", func() { - config, _ := createPostgresqlConfiguration(&clusterWithoutTemporaryTablespaces, true, defaultMajor) + It("doesn't set temp_tablespaces if there are no temporary tablespaces", func(ctx SpecContext) { + config, _, err := createPostgresqlConfiguration( + ctx, + &clusterWithoutTemporaryTablespaces, + true, + defaultMajor, + postgres.OperationType_TYPE_UNSPECIFIED, + ) + Expect(err).ToNot(HaveOccurred()) Expect(config).ToNot(ContainSubstring("temp_tablespaces")) }) - It("sets temp_tablespaces when there are temporary tablespaces", func() { - config, _ := createPostgresqlConfiguration(&clusterWithTemporaryTablespaces, true, defaultMajor) + It("sets temp_tablespaces when there are temporary tablespaces", func(ctx SpecContext) { + config, _, err := createPostgresqlConfiguration( + ctx, + &clusterWithTemporaryTablespaces, + true, + defaultMajor, + postgres.OperationType_TYPE_UNSPECIFIED, + ) + Expect(err).ToNot(HaveOccurred()) Expect(config).To(ContainSubstring("temp_tablespaces = 'other_temporary_tablespace,temporary_tablespace'")) }) }) @@ -241,24 +263,45 @@ var _ = Describe("recovery_min_apply_delay", func() { }, } - It("do not set recovery_min_apply_delay in primary clusters", func() { + It("do not set recovery_min_apply_delay in primary clusters", func(ctx SpecContext) { Expect(primaryCluster.IsReplica()).To(BeFalse()) - config, _ := createPostgresqlConfiguration(&primaryCluster, true, defaultMajor) + config, _, err := createPostgresqlConfiguration( + ctx, + &primaryCluster, + true, + defaultMajor, + postgres.OperationType_TYPE_UNSPECIFIED, + ) + Expect(err).ToNot(HaveOccurred()) Expect(config).ToNot(ContainSubstring("recovery_min_apply_delay")) }) - It("set recovery_min_apply_delay in replica clusters when set", func() { + It("set recovery_min_apply_delay in replica clusters when set", func(ctx SpecContext) { Expect(replicaCluster.IsReplica()).To(BeTrue()) - config, _ := createPostgresqlConfiguration(&replicaCluster, true, defaultMajor) + config, _, err := createPostgresqlConfiguration( + ctx, + &replicaCluster, + true, + defaultMajor, + postgres.OperationType_TYPE_UNSPECIFIED, + ) + Expect(err).ToNot(HaveOccurred()) Expect(config).To(ContainSubstring("recovery_min_apply_delay = '3600s'")) }) - It("do not set recovery_min_apply_delay in replica clusters when not set", func() { + It("do not set recovery_min_apply_delay in replica clusters when not set", func(ctx SpecContext) { Expect(replicaClusterWithNoDelay.IsReplica()).To(BeTrue()) - config, _ := createPostgresqlConfiguration(&replicaClusterWithNoDelay, true, defaultMajor) + config, _, err := createPostgresqlConfiguration( + ctx, + &replicaClusterWithNoDelay, + true, + defaultMajor, + postgres.OperationType_TYPE_UNSPECIFIED, + ) + Expect(err).ToNot(HaveOccurred()) Expect(config).ToNot(ContainSubstring("recovery_min_apply_delay")) }) }) diff --git a/pkg/management/postgres/initdb.go b/pkg/management/postgres/initdb.go index a2c6ad2ed7..f3b7c6cd3c 100644 --- a/pkg/management/postgres/initdb.go +++ b/pkg/management/postgres/initdb.go @@ -33,14 +33,17 @@ import ( "sort" "time" + "github.com/cloudnative-pg/cnpg-i/pkg/postgres" "github.com/cloudnative-pg/machinery/pkg/execlog" "github.com/cloudnative-pg/machinery/pkg/fileutils" "github.com/cloudnative-pg/machinery/pkg/fileutils/compatibility" "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/stringset" "github.com/jackc/pgx/v5" ctrl "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + pluginClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client" "github.com/cloudnative-pg/cloudnative-pg/pkg/configfile" "github.com/cloudnative-pg/cloudnative-pg/pkg/management" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/external" @@ -447,6 +450,15 @@ func (info InitInfo) Bootstrap(ctx context.Context) error { return err } + enabledPluginNamesSet := stringset.From(cluster.GetJobEnabledPluginNames()) + pluginCli, err := pluginClient.NewClient(ctx, enabledPluginNamesSet) + if err != nil { + return fmt.Errorf("error while creating the plugin client: %w", err) + } + defer pluginCli.Close(ctx) + ctx = pluginClient.SetPluginClientInContext(ctx, pluginCli) + ctx = cluster.SetInContext(ctx) + coredumpFilter := cluster.GetCoredumpFilter() if err := system.SetCoredumpFilter(coredumpFilter); err != nil { return err @@ -464,7 +476,12 @@ func (info InitInfo) Bootstrap(ctx context.Context) error { cluster.Spec.Bootstrap.InitDB != nil && cluster.Spec.Bootstrap.InitDB.Import != nil - if applied, err := instance.RefreshConfigurationFilesFromCluster(ctx, cluster, true); err != nil { + if applied, err := instance.RefreshConfigurationFilesFromCluster( + ctx, + cluster, + true, + postgres.OperationType_TYPE_INIT, + ); err != nil { return fmt.Errorf("while writing the config: %w", err) } else if !applied { return fmt.Errorf("could not apply the config") diff --git a/pkg/management/postgres/restore.go b/pkg/management/postgres/restore.go index 0b2041cf8d..cf053adbdc 100644 --- a/pkg/management/postgres/restore.go +++ b/pkg/management/postgres/restore.go @@ -40,6 +40,7 @@ import ( barmanCredentials "github.com/cloudnative-pg/barman-cloud/pkg/credentials" barmanRestorer "github.com/cloudnative-pg/barman-cloud/pkg/restorer" barmanUtils "github.com/cloudnative-pg/barman-cloud/pkg/utils" + "github.com/cloudnative-pg/cnpg-i/pkg/postgres" restore "github.com/cloudnative-pg/cnpg-i/pkg/restore/job" "github.com/cloudnative-pg/machinery/pkg/envmap" "github.com/cloudnative-pg/machinery/pkg/execlog" @@ -817,6 +818,15 @@ func (info InitInfo) WriteInitialPostgresqlConf(ctx context.Context, cluster *ap } }() + enabledPluginNamesSet := stringset.From(cluster.GetJobEnabledPluginNames()) + pluginCli, err := pluginClient.NewClient(ctx, enabledPluginNamesSet) + if err != nil { + return fmt.Errorf("error while creating the plugin client: %w", err) + } + defer pluginCli.Close(ctx) + ctx = pluginClient.SetPluginClientInContext(ctx, pluginCli) + ctx = cluster.SetInContext(ctx) + temporaryInitInfo := InitInfo{ PgData: tempDataDir, Temporary: true, @@ -838,7 +848,12 @@ func (info InitInfo) WriteInitialPostgresqlConf(ctx context.Context, cluster *ap if err != nil { return fmt.Errorf("while generating pg_ident.conf: %w", err) } - _, err = temporaryInstance.RefreshConfigurationFilesFromCluster(ctx, cluster, false) + _, err = temporaryInstance.RefreshConfigurationFilesFromCluster( + ctx, + cluster, + false, + postgres.OperationType_TYPE_RESTORE, + ) if err != nil { return fmt.Errorf("while generating Postgres configuration: %w", err) } diff --git a/pkg/postgres/configuration.go b/pkg/postgres/configuration.go index 3c577d6a98..d591593db5 100644 --- a/pkg/postgres/configuration.go +++ b/pkg/postgres/configuration.go @@ -567,6 +567,11 @@ func (p *PgConfiguration) GetConfigurationParameters() map[string]string { return p.configs } +// SetConfigurationParameters sets the configuration parameters +func (p *PgConfiguration) SetConfigurationParameters(configs map[string]string) { + p.configs = configs +} + // OverwriteConfig overwrites a configuration in the map, given the key/value pair. // If the map is nil, it is created and the pair is added func (p *PgConfiguration) OverwriteConfig(key, value string) { diff --git a/pkg/postgres/plugin/config.go b/pkg/postgres/plugin/config.go new file mode 100644 index 0000000000..b63afdb37a --- /dev/null +++ b/pkg/postgres/plugin/config.go @@ -0,0 +1,72 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package plugin + +import ( + "context" + + postgresClient "github.com/cloudnative-pg/cnpg-i/pkg/postgres" + "github.com/cloudnative-pg/machinery/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/client" + + cnpgiClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client" + "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" + contextutils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils/context" +) + +// CreatePostgresqlConfigurationWithPlugins creates a new PostgreSQL configuration and enriches it by invoking +// the registered Plugins +func CreatePostgresqlConfigurationWithPlugins( + ctx context.Context, + info postgres.ConfigurationInfo, + operationType postgresClient.OperationType_Type, +) (*postgres.PgConfiguration, error) { + contextLogger := log.FromContext(ctx).WithName("enrichConfigurationWithPlugins") + + pgConf := postgres.CreatePostgresqlConfiguration(info) + + cluster, ok := ctx.Value(contextutils.ContextKeyCluster).(client.Object) + if !ok || cluster == nil { + contextLogger.Trace("skipping CreatePostgresqlConfigurationWithPlugins, cannot find the cluster inside the context") + return pgConf, nil + } + + pluginClient := cnpgiClient.GetPluginClientFromContext(ctx) + if pluginClient == nil { + contextLogger.Trace( + "skipping CreatePostgresqlConfigurationWithPlugins, cannot find the plugin client inside the context") + return pgConf, nil + } + + conf, err := pluginClient.EnrichConfiguration( + ctx, + cluster, + pgConf.GetConfigurationParameters(), + operationType, + ) + if err != nil { + contextLogger.Error(err, "failed to enrich configuration with plugins") + return nil, err + } + + pgConf.SetConfigurationParameters(conf) + + return pgConf, nil +} diff --git a/pkg/postgres/plugin/doc.go b/pkg/postgres/plugin/doc.go new file mode 100644 index 0000000000..3d9f82c9ce --- /dev/null +++ b/pkg/postgres/plugin/doc.go @@ -0,0 +1,21 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package plugin contains the methods to interact with the plugins that have the Postgres capabilities +package plugin From 14324f815af387d17f6e92cdada66fd57ff5f0ca Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 9 Jun 2025 14:43:30 +0200 Subject: [PATCH 644/836] chore(deps): update module sigs.k8s.io/controller-tools to v0.18.0 (main) (#7530) --- Makefile | 2 +- config/crd/bases/postgresql.cnpg.io_backups.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_clusters.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_databases.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_poolers.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_publications.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_subscriptions.yaml | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Makefile b/Makefile index 8d0716efd4..cb2c4a6652 100644 --- a/Makefile +++ b/Makefile @@ -54,7 +54,7 @@ POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions # renovate: datasource=github-releases depName=kubernetes-sigs/kustomize versioning=loose KUSTOMIZE_VERSION ?= v5.6.0 # renovate: datasource=go depName=sigs.k8s.io/controller-tools -CONTROLLER_TOOLS_VERSION ?= v0.17.3 +CONTROLLER_TOOLS_VERSION ?= v0.18.0 GENREF_VERSION ?= 015aaac611407c4fe591bc8700d2c67b7521efca # renovate: datasource=go depName=github.com/goreleaser/goreleaser GORELEASER_VERSION ?= v2.9.0 diff --git a/config/crd/bases/postgresql.cnpg.io_backups.yaml b/config/crd/bases/postgresql.cnpg.io_backups.yaml index c0923c1e5c..2a46956dd2 100644 --- a/config/crd/bases/postgresql.cnpg.io_backups.yaml +++ b/config/crd/bases/postgresql.cnpg.io_backups.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.3 + controller-gen.kubebuilder.io/version: v0.18.0 name: backups.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml b/config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml index 959811fa2e..c5358863e3 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.3 + controller-gen.kubebuilder.io/version: v0.18.0 name: clusterimagecatalogs.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml index 2eb4f627cb..d2236b8354 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.3 + controller-gen.kubebuilder.io/version: v0.18.0 name: clusters.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_databases.yaml b/config/crd/bases/postgresql.cnpg.io_databases.yaml index bc3f23d8e1..50626148be 100644 --- a/config/crd/bases/postgresql.cnpg.io_databases.yaml +++ b/config/crd/bases/postgresql.cnpg.io_databases.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.3 + controller-gen.kubebuilder.io/version: v0.18.0 name: databases.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml b/config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml index 9c1b49f9ce..cd869be0b8 100644 --- a/config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml +++ b/config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.3 + controller-gen.kubebuilder.io/version: v0.18.0 name: imagecatalogs.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_poolers.yaml b/config/crd/bases/postgresql.cnpg.io_poolers.yaml index 97abe1054e..054cfa85d4 100644 --- a/config/crd/bases/postgresql.cnpg.io_poolers.yaml +++ b/config/crd/bases/postgresql.cnpg.io_poolers.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.3 + controller-gen.kubebuilder.io/version: v0.18.0 name: poolers.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_publications.yaml b/config/crd/bases/postgresql.cnpg.io_publications.yaml index a6ae1ed765..a468639eca 100644 --- a/config/crd/bases/postgresql.cnpg.io_publications.yaml +++ b/config/crd/bases/postgresql.cnpg.io_publications.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.3 + controller-gen.kubebuilder.io/version: v0.18.0 name: publications.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml b/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml index 92abc21fc9..051567e4bd 100644 --- a/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml +++ b/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.3 + controller-gen.kubebuilder.io/version: v0.18.0 name: scheduledbackups.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_subscriptions.yaml b/config/crd/bases/postgresql.cnpg.io_subscriptions.yaml index 8213b82dd5..37fdac9121 100644 --- a/config/crd/bases/postgresql.cnpg.io_subscriptions.yaml +++ b/config/crd/bases/postgresql.cnpg.io_subscriptions.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.3 + controller-gen.kubebuilder.io/version: v0.18.0 name: subscriptions.postgresql.cnpg.io spec: group: postgresql.cnpg.io From 31e1a08cc301324bc7edb684b4652511f6e0a701 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niccol=C3=B2=20Fei?= Date: Mon, 9 Jun 2025 14:47:55 +0200 Subject: [PATCH 645/836] chore: add MinIO to Renovate (#7754) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add MinIO images being used by the testing suite to Renovate. Closes #7748 Signed-off-by: Niccolò Fei --- .github/renovate.json5 | 13 +++++++++++++ tests/utils/minio/minio.go | 4 +++- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/.github/renovate.json5 b/.github/renovate.json5 index 5cfb345463..dc686a6ab5 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -157,6 +157,18 @@ datasourceTemplate: 'docker', versioningTemplate: 'loose', }, + { + customType: 'regex', + fileMatch: [ + '^tests\\/utils\\/minio\\/minio\\.go$', + ], + matchStrings: [ + 'minioImage = "(?.+?):(?.*?)"', + 'minioClientImage = "(?.+?):(?.*?)"', + ], + datasourceTemplate: 'docker', + versioningTemplate: "regex:^RELEASE\\.(?\\d{4})-(?\\d{2})-(?\\d{2})T\\d{2}-\\d{2}-\\d{2}Z$" + }, ], packageRules: [ { @@ -267,6 +279,7 @@ pinDigests: false, matchPackageNames: [ 'vmware-tanzu{/,}**', + 'minio{/,}**' ], }, { diff --git a/tests/utils/minio/minio.go b/tests/utils/minio/minio.go index 6b1c337d80..d81f2a8e0f 100644 --- a/tests/utils/minio/minio.go +++ b/tests/utils/minio/minio.go @@ -49,7 +49,9 @@ import ( ) const ( - minioImage = "minio/minio:RELEASE.2025-05-24T17-08-30Z" + // minioImage is the image used to run a MinIO server + minioImage = "minio/minio:RELEASE.2025-05-24T17-08-30Z" + // minioClientImage is the image used to run a MinIO client minioClientImage = "minio/mc:RELEASE.2025-05-21T01-59-54Z" ) From 9c2769815ff78cbfc28e2a2818f3a04add33477c Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 9 Jun 2025 15:12:53 +0200 Subject: [PATCH 646/836] chore(deps): update module github.com/goreleaser/goreleaser to v2.10.2 (main) (#7784) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index cb2c4a6652..80c628be2a 100644 --- a/Makefile +++ b/Makefile @@ -57,7 +57,7 @@ KUSTOMIZE_VERSION ?= v5.6.0 CONTROLLER_TOOLS_VERSION ?= v0.18.0 GENREF_VERSION ?= 015aaac611407c4fe591bc8700d2c67b7521efca # renovate: datasource=go depName=github.com/goreleaser/goreleaser -GORELEASER_VERSION ?= v2.9.0 +GORELEASER_VERSION ?= v2.10.2 # renovate: datasource=docker depName=jonasbn/github-action-spellcheck versioning=docker SPELLCHECK_VERSION ?= 0.49.0 # renovate: datasource=docker depName=getwoke/woke versioning=docker From d56ec45811c04d3f14abbff0ee889e1456b7336b Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Wed, 11 Jun 2025 09:41:14 +0200 Subject: [PATCH 647/836] docs: provide information about the LFX Mentorship Program (#7803) Closes #7802 Signed-off-by: Gabriele Bartolini --- contribute/lfx-mentorship-program.md | 69 ++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) create mode 100644 contribute/lfx-mentorship-program.md diff --git a/contribute/lfx-mentorship-program.md b/contribute/lfx-mentorship-program.md new file mode 100644 index 0000000000..30c9d06edb --- /dev/null +++ b/contribute/lfx-mentorship-program.md @@ -0,0 +1,69 @@ +# LFX Mentorship Program + +CloudNativePG, as a CNCF project, proudly supports the +[LFX Mentorship Program](https://lfx.linuxfoundation.org/tools/mentorship/) +by the Linux Foundation. + +This page lists the accepted CloudNativePG mentorship projects and provides +resources for current mentees, prospective applicants, and contributors +interested in getting involved. + +Each mentorship project spans **12 weeks** and is designed to be a **full-time +learning opportunity**, requiring significant commitment and dedication from +mentees. + +*Note:* we use the +["LFX Mentorship" label](https://github.com/cloudnative-pg/cloudnative-pg/issues?q=is%3Aissue%20state%3Aopen%20label%3A%22LFX%20Mentorship%22) +to classify issues and pull requests. + +--- + +## Current Mentorship Programs + +If you’re interested in applying for a future term, we recommend preparing in +the areas outlined below. + +| Year | Term | Project | Mentee | +| ---- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------ | ---------------------------------------- | +| 2025 | 2 (Jun–Aug) | [Declarative Management of PostgreSQL FDWs](https://mentorship.lfx.linuxfoundation.org/project/53fa853e-b5fa-4d68-be71-f005c75aea89) | [Ying Zhu](https://github.com/EdwinaZhu) | + +--- + +## Past Mentorship Programs + +*To be updated when available.* + +--- + +## Recommended Preparation + +While each project has its own skill requirements, the program aims to deepen +mentees’ knowledge in the following areas: + +- **Go programming**, with a focus on operator development +- **Kubernetes** and **Custom Resource Definitions (CRDs)** +- **Git** and **GitHub workflows** +- **CloudNativePG architecture and usage** +- **PostgreSQL fundamentals** + +We encourage aspiring mentees to begin familiarising themselves with these +topics in preparation for upcoming application cycles. + +--- + +### Suggested Resources + +Below are some key resources to support your learning journey: + +- [Kubebuilder Book](https://book.kubebuilder.io/) +- [Programming Kubernetes](https://www.oreilly.com/library/view/programming-kubernetes/9781492047094/) + +--- + +## Getting Started as a Mentee + +To hit the ground running, make sure you: + +- Join the [CNCF Slack channel](../README.md#communications) for CloudNativePG +- [Set up your development environment](development_environment/README.md) +- [Run E2E tests locally](e2e_testing_environment/README.md) From 5adcf5fcfd985f6ee39eb99d39f4f018c61071fd Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 13 Jun 2025 10:34:57 +0200 Subject: [PATCH 648/836] chore(deps): update all non-major github action (main) (#7795) This PR contains the following updates: | Package | Type | Update | Change | https://github.com/github/codeql-action `fca7ace` -> `ce28f5b` https://github.com/softprops/action-gh-release `da05d55` -> `72f2c25` --- .github/workflows/codeql-analysis.yml | 4 ++-- .github/workflows/continuous-integration.yml | 2 +- .github/workflows/ossf_scorecard.yml | 2 +- .github/workflows/release-publish.yml | 2 +- .github/workflows/snyk.yml | 4 ++-- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 9998b84e35..de1bc628bd 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -76,7 +76,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@fca7ace96b7d713c7035871441bd52efbe39e27e # v3 + uses: github/codeql-action/init@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3 with: languages: "go" build-mode: manual @@ -93,6 +93,6 @@ jobs: make - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@fca7ace96b7d713c7035871441bd52efbe39e27e # v3 + uses: github/codeql-action/analyze@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3 with: category: "/language:go" diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 6543023ecf..130ec813d0 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -617,7 +617,7 @@ jobs: args: --severity-threshold=high --file=Dockerfile - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@fca7ace96b7d713c7035871441bd52efbe39e27e # v3 + uses: github/codeql-action/upload-sarif@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3 if: | !github.event.repository.fork && !github.event.pull_request.head.repo.fork diff --git a/.github/workflows/ossf_scorecard.yml b/.github/workflows/ossf_scorecard.yml index 1b3ebae855..1745a473a7 100644 --- a/.github/workflows/ossf_scorecard.yml +++ b/.github/workflows/ossf_scorecard.yml @@ -74,6 +74,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard (optional). # Commenting out will disable upload of results to your repo's Code Scanning dashboard - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@fca7ace96b7d713c7035871441bd52efbe39e27e # v3 + uses: github/codeql-action/upload-sarif@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3 with: sarif_file: results.sarif diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml index f3b7b37f1e..e8126a3c47 100644 --- a/.github/workflows/release-publish.yml +++ b/.github/workflows/release-publish.yml @@ -75,7 +75,7 @@ jobs: /src/docs/src/${{ env.FILE }} - name: Release - uses: softprops/action-gh-release@da05d552573ad5aba039eaac05058a918a7bf631 # v2 + uses: softprops/action-gh-release@72f2c25fcb47643c292f7107632f7a47c1df5cd8 # v2 with: body_path: release_notes.md draft: false diff --git a/.github/workflows/snyk.yml b/.github/workflows/snyk.yml index 0e835f40f3..74a1a6d508 100644 --- a/.github/workflows/snyk.yml +++ b/.github/workflows/snyk.yml @@ -30,7 +30,7 @@ jobs: args: --sarif-file-output=snyk-static.sarif - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@fca7ace96b7d713c7035871441bd52efbe39e27e # v3 + uses: github/codeql-action/upload-sarif@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3 with: sarif_file: snyk-static.sarif @@ -43,6 +43,6 @@ jobs: args: --sarif-file-output=snyk-test.sarif - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@fca7ace96b7d713c7035871441bd52efbe39e27e # v3 + uses: github/codeql-action/upload-sarif@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3 with: sarif_file: snyk-test.sarif From 9a519b6287e525b7346d5af92a78f2c6b6374d67 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sat, 14 Jun 2025 11:07:23 +0200 Subject: [PATCH 649/836] chore(deps): update dependency redhat-openshift-ecosystem/openshift-preflight to v1.13.2 (main) (#7815) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 80c628be2a..db09124d2d 100644 --- a/Makefile +++ b/Makefile @@ -67,7 +67,7 @@ OPERATOR_SDK_VERSION ?= v1.40.0 # renovate: datasource=github-tags depName=operator-framework/operator-registry OPM_VERSION ?= v1.55.0 # renovate: datasource=github-tags depName=redhat-openshift-ecosystem/openshift-preflight -PREFLIGHT_VERSION ?= 1.13.1 +PREFLIGHT_VERSION ?= 1.13.2 OPENSHIFT_VERSIONS ?= v4.12-v4.19 ARCH ?= amd64 From d2b8d9f53d8cbeda245eaa5a652aabd7bbbb7994 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 16 Jun 2025 10:16:19 +0200 Subject: [PATCH 650/836] chore(deps): update dependency kubernetes-csi/external-resizer to v1.14.0 (main) (#7806) --- hack/setup-cluster.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh index 4c0a747cc8..4f8c86673b 100755 --- a/hack/setup-cluster.sh +++ b/hack/setup-cluster.sh @@ -31,7 +31,7 @@ KIND_NODE_DEFAULT_VERSION=v1.33.1 CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=v1.16.1 EXTERNAL_SNAPSHOTTER_VERSION=v8.2.1 EXTERNAL_PROVISIONER_VERSION=v5.3.0 -EXTERNAL_RESIZER_VERSION=v1.13.2 +EXTERNAL_RESIZER_VERSION=v1.14.0 EXTERNAL_ATTACHER_VERSION=v4.9.0 K8S_VERSION=${K8S_VERSION-} KUBECTL_VERSION=${KUBECTL_VERSION-} From f8cf2434f5939fed18ee42ec4e8f75aa9fe767e2 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Mon, 16 Jun 2025 15:40:52 +0200 Subject: [PATCH 651/836] chore(initdb) remove app-db-name and app-user defaults (#7811) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes #7810 Signed-off-by: Marco Nenciarini Signed-off-by: Armando Ruocco Signed-off-by: Niccolò Fei Co-authored-by: Armando Ruocco Co-authored-by: Niccolò Fei --- internal/cmd/manager/instance/initdb/cmd.go | 4 +- pkg/management/postgres/initdb.go | 57 +++++++++++---------- 2 files changed, 31 insertions(+), 30 deletions(-) diff --git a/internal/cmd/manager/instance/initdb/cmd.go b/internal/cmd/manager/instance/initdb/cmd.go index 36619bc9bd..9406d7b5d5 100644 --- a/internal/cmd/manager/instance/initdb/cmd.go +++ b/internal/cmd/manager/instance/initdb/cmd.go @@ -120,9 +120,9 @@ func NewCmd() *cobra.Command { }, } - cmd.Flags().StringVar(&appDBName, "app-db-name", "app", + cmd.Flags().StringVar(&appDBName, "app-db-name", "", "The name of the application containing the database") - cmd.Flags().StringVar(&appUser, "app-user", "app", + cmd.Flags().StringVar(&appUser, "app-user", "", "The name of the application user") cmd.Flags().StringVar(&clusterName, "cluster-name", os.Getenv("CLUSTER_NAME"), "The name of the "+ "current cluster in k8s, used to coordinate switchover and failover") diff --git a/pkg/management/postgres/initdb.go b/pkg/management/postgres/initdb.go index f3b7c6cd3c..e49cdb1737 100644 --- a/pkg/management/postgres/initdb.go +++ b/pkg/management/postgres/initdb.go @@ -303,23 +303,6 @@ func (info InitInfo) ConfigureNewInstance(instance *Instance) error { return fmt.Errorf("while getting superuser database: %w", err) } - var existsRole bool - userRow := dbSuperUser.QueryRow("SELECT COUNT(*) > 0 FROM pg_catalog.pg_roles WHERE rolname = $1", - info.ApplicationUser) - err = userRow.Scan(&existsRole) - if err != nil { - return err - } - - if !existsRole { - _, err = dbSuperUser.Exec(fmt.Sprintf( - "CREATE ROLE %v LOGIN", - pgx.Identifier{info.ApplicationUser}.Sanitize())) - if err != nil { - return err - } - } - // Execute the custom set of init queries for the `postgres` database log.Info("Executing post-init SQL instructions") if err = info.executeQueries(dbSuperUser, info.PostInitSQL); err != nil { @@ -341,16 +324,41 @@ func (info InitInfo) ConfigureNewInstance(instance *Instance) error { if err = info.executeSQLRefs(dbTemplate, info.PostInitTemplateSQLRefsFolder); err != nil { return fmt.Errorf("could not execute post init application SQL refs: %w", err) } + + filePath := filepath.Join(info.PgData, constants.CheckEmptyWalArchiveFile) + // We create the check empty wal archive file to tell that we should check if the + // destination path it is empty + if err := fileutils.CreateEmptyFile(filepath.Clean(filePath)); err != nil { + return fmt.Errorf("could not create %v file: %w", filePath, err) + } + + if info.ApplicationUser == "" { + return nil + } + + var existsRole bool + userRow := dbSuperUser.QueryRow("SELECT COUNT(*) > 0 FROM pg_catalog.pg_roles WHERE rolname = $1", + info.ApplicationUser) + if err = userRow.Scan(&existsRole); err != nil { + return err + } + + if !existsRole { + if _, err = dbSuperUser.Exec(fmt.Sprintf( + "CREATE ROLE %v LOGIN", + pgx.Identifier{info.ApplicationUser}.Sanitize())); err != nil { + return err + } + } + if info.ApplicationDatabase == "" { return nil } var existsDB bool - dbRow := dbSuperUser.QueryRow( - "SELECT COUNT(*) > 0 FROM pg_catalog.pg_database WHERE datname = $1", + dbRow := dbSuperUser.QueryRow("SELECT COUNT(*) > 0 FROM pg_catalog.pg_database WHERE datname = $1", info.ApplicationDatabase) - err = dbRow.Scan(&existsDB) - if err != nil { + if err = dbRow.Scan(&existsDB); err != nil { return err } @@ -377,13 +385,6 @@ func (info InitInfo) ConfigureNewInstance(instance *Instance) error { return fmt.Errorf("could not execute post init application SQL refs: %w", err) } - filePath := filepath.Join(info.PgData, constants.CheckEmptyWalArchiveFile) - // We create the check empty wal archive file to tell that we should check if the - // destination path it is empty - if err := fileutils.CreateEmptyFile(filepath.Clean(filePath)); err != nil { - return fmt.Errorf("could not create %v file: %w", filePath, err) - } - return nil } From 149b32a4d42cae785c3c81b67d4d7fbbcd752ae9 Mon Sep 17 00:00:00 2001 From: Carlos Barria Date: Tue, 17 Jun 2025 17:32:58 -0400 Subject: [PATCH 652/836] docs: add Vera Rubin Observatory to ADOPTERS.md (#7837) Signed-off-by: Carlos Barria --- ADOPTERS.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ADOPTERS.md b/ADOPTERS.md index 78ab5bfcc5..9ccc898c1d 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -64,4 +64,5 @@ This list is sorted in chronological order, based on the submission date. | [Bitnami](https://bitnami.com) | [@carrodher](https://github.com/carrodher) | 2025-03-04 | Bitnami provides CloudNativePG as part of its open-source [Helm charts catalog](https://github.com/bitnami/charts), enabling users to easily deploy PostgreSQL clusters on Kubernetes. Additionally, CloudNativePG is available through [Tanzu Application Catalog](https://www.vmware.com/products/app-platform/tanzu-application-catalog) and [Bitnami Premium](https://www.arrow.com/globalecs/na/vendors/bitnami-premium/), where customers can benefit from advanced security and compliance features such as VEX, SBOM, SLSA3, and CVE scanning. | | [Giant Swarm](https://www.giantswarm.io/) | [@stone-z](https://github.com/stone-z) | 2025-05-02 | Giant Swarm's full-service Kubernetes security and observability platforms are powered by PostgreSQL clusters delightfully managed with CloudNativePG. | | [DocumentDB Operator](https://github.com/microsoft/documentdb-kubernetes-operator) | [@xgerman](https://github.com/xgerman) | 2025-05-22 | The DocumentDB Kubernetes Operator is an open-source project to run and manage DocumentDB on Kubernetes. [DocumentDB](https://github.com/microsoft/documentdb) is the engine powering vCore-based [Azure Cosmos DB for MongoDB](https://learn.microsoft.com/en-us/azure/cosmos-db/mongodb/vcore/). The operator uses CloudNativePG behind the scenes. | -| [Xata](https://xata.io) | [@tsg](https://github.com/tsg) | 2025-05-29 | Xata is a PostgreSQL platform offering instant database branching, separation of storage/compute, and PII anonymization. It uses CloudNativePG for the compute part. | \ No newline at end of file +| [Xata](https://xata.io) | [@tsg](https://github.com/tsg) | 2025-05-29 | Xata is a PostgreSQL platform offering instant database branching, separation of storage/compute, and PII anonymization. It uses CloudNativePG for the compute part. | +| [Vera Rubin Observatory](https://www.lsst.org) | [@cbarria](https://github.com/cbarria) | 2025-06-17 | At the heart of our operations, CloudNativePG supports the telescope's systems and plays a key role in making astronomical data openly accessible to the world. | From 55fd48dcbccf440d0fe70f3cf404d4ae4a10fe68 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 18 Jun 2025 10:24:35 +0200 Subject: [PATCH 653/836] chore(deps): update all non-major github action (main) (#7828) This PR contains the following updates: | Package | Type | Update | Change | https://github.com/docker/setup-buildx-action `b5ca514` -> `18ce135` https://redirect.github.com/rojopolis/spellcheck-github-actions `0.49.0` -> `0.50.0` https://redirect.github.com/sigstore/cosign-installer `3454372` -> `fb28c2b` Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- .github/workflows/continuous-delivery.yml | 6 +++--- .github/workflows/continuous-integration.yml | 8 ++++---- .github/workflows/release-publish.yml | 6 +++--- .github/workflows/spellcheck.yml | 2 +- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index 3aa5f21dea..0c1906a9ed 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -366,7 +366,7 @@ jobs: platforms: ${{ env.PLATFORMS }} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3 + uses: docker/setup-buildx-action@18ce135bb5112fa8ce4ed6c17ab05699d7f3a5e0 # v3 - name: Login into docker registry uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 @@ -392,7 +392,7 @@ jobs: - name: Install cosign if: env.SIGN_IMAGES == 'true' - uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3 + uses: sigstore/cosign-installer@fb28c2b6339dcd94da6e4cbcbc5e888961f6f8c3 # v3 # See https://github.blog/security/supply-chain-security/safeguard-container-signing-capability-actions/ # and https://github.com/actions/starter-workflows/blob/main/ci/docker-publish.yml for more details on # how to use cosign. @@ -1944,7 +1944,7 @@ jobs: platforms: ${{ env.PLATFORMS }} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3 + uses: docker/setup-buildx-action@18ce135bb5112fa8ce4ed6c17ab05699d7f3a5e0 # v3 - ## In case hack/setup-cluster.sh need pull operand image from registry name: Login into docker registry diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 130ec813d0..f74c1f42a7 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -549,7 +549,7 @@ jobs: cache-image: false - name: Set up Docker Buildx - uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3 + uses: docker/setup-buildx-action@18ce135bb5112fa8ce4ed6c17ab05699d7f3a5e0 # v3 - name: Login into docker registry uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 @@ -629,7 +629,7 @@ jobs: if: | env.SIGN_IMAGES == 'true' && env.PUSH == 'true' - uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3 + uses: sigstore/cosign-installer@fb28c2b6339dcd94da6e4cbcbc5e888961f6f8c3 # v3 # See https://github.blog/security/supply-chain-security/safeguard-container-signing-capability-actions/ # and https://github.com/actions/starter-workflows/blob/main/ci/docker-publish.yml for more details on # how to use cosign. @@ -677,7 +677,7 @@ jobs: check-latest: true - name: Set up Docker Buildx - uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3 + uses: docker/setup-buildx-action@18ce135bb5112fa8ce4ed6c17ab05699d7f3a5e0 # v3 - name: Login into docker registry uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 @@ -787,7 +787,7 @@ jobs: cache-image: false - name: Set up Docker Buildx - uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3 + uses: docker/setup-buildx-action@18ce135bb5112fa8ce4ed6c17ab05699d7f3a5e0 # v3 - name: Login into docker registry uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml index e8126a3c47..e484ea755c 100644 --- a/.github/workflows/release-publish.yml +++ b/.github/workflows/release-publish.yml @@ -188,7 +188,7 @@ jobs: platforms: ${{ env.PLATFORMS }} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3 + uses: docker/setup-buildx-action@18ce135bb5112fa8ce4ed6c17ab05699d7f3a5e0 # v3 - name: Login to ghcr.io uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 @@ -214,7 +214,7 @@ jobs: targets: "default" - name: Install cosign - uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3 + uses: sigstore/cosign-installer@fb28c2b6339dcd94da6e4cbcbc5e888961f6f8c3 # v3 # See https://github.blog/security/supply-chain-security/safeguard-container-signing-capability-actions/ # and https://github.com/actions/starter-workflows/blob/main/ci/docker-publish.yml for more details on # how to use cosign. @@ -268,7 +268,7 @@ jobs: check-latest: true - name: Set up Docker Buildx - uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3 + uses: docker/setup-buildx-action@18ce135bb5112fa8ce4ed6c17ab05699d7f3a5e0 # v3 - name: Login to ghcr.io uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 diff --git a/.github/workflows/spellcheck.yml b/.github/workflows/spellcheck.yml index 599ada9355..0df6257e8b 100644 --- a/.github/workflows/spellcheck.yml +++ b/.github/workflows/spellcheck.yml @@ -30,4 +30,4 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: Spellcheck - uses: rojopolis/spellcheck-github-actions@584b2ae95998967a53af7fbfb7f5b15352c38748 # 0.49.0 + uses: rojopolis/spellcheck-github-actions@63aba9473ee34d681dd48dee26b3d43ea0bbc462 # 0.50.0 From 07921c4834dbba68cb15e5bac122b3a624471dac Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 18 Jun 2025 16:27:25 +0200 Subject: [PATCH 654/836] chore(deps): update dependency redhat-openshift-ecosystem/openshift-preflight to v1.13.3 (main) (#7839) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index db09124d2d..655a722747 100644 --- a/Makefile +++ b/Makefile @@ -67,7 +67,7 @@ OPERATOR_SDK_VERSION ?= v1.40.0 # renovate: datasource=github-tags depName=operator-framework/operator-registry OPM_VERSION ?= v1.55.0 # renovate: datasource=github-tags depName=redhat-openshift-ecosystem/openshift-preflight -PREFLIGHT_VERSION ?= 1.13.2 +PREFLIGHT_VERSION ?= 1.13.3 OPENSHIFT_VERSIONS ?= v4.12-v4.19 ARCH ?= amd64 From 0da371279f2e2508f6c03ad19a135a752d697163 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 18 Jun 2025 17:01:02 +0200 Subject: [PATCH 655/836] chore(deps): update docker/setup-buildx-action digest to e468171 (main) (#7841) --- .github/workflows/continuous-delivery.yml | 4 ++-- .github/workflows/continuous-integration.yml | 6 +++--- .github/workflows/release-publish.yml | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index 0c1906a9ed..6d720a6514 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -366,7 +366,7 @@ jobs: platforms: ${{ env.PLATFORMS }} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@18ce135bb5112fa8ce4ed6c17ab05699d7f3a5e0 # v3 + uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3 - name: Login into docker registry uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 @@ -1944,7 +1944,7 @@ jobs: platforms: ${{ env.PLATFORMS }} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@18ce135bb5112fa8ce4ed6c17ab05699d7f3a5e0 # v3 + uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3 - ## In case hack/setup-cluster.sh need pull operand image from registry name: Login into docker registry diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index f74c1f42a7..403ad0b44b 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -549,7 +549,7 @@ jobs: cache-image: false - name: Set up Docker Buildx - uses: docker/setup-buildx-action@18ce135bb5112fa8ce4ed6c17ab05699d7f3a5e0 # v3 + uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3 - name: Login into docker registry uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 @@ -677,7 +677,7 @@ jobs: check-latest: true - name: Set up Docker Buildx - uses: docker/setup-buildx-action@18ce135bb5112fa8ce4ed6c17ab05699d7f3a5e0 # v3 + uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3 - name: Login into docker registry uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 @@ -787,7 +787,7 @@ jobs: cache-image: false - name: Set up Docker Buildx - uses: docker/setup-buildx-action@18ce135bb5112fa8ce4ed6c17ab05699d7f3a5e0 # v3 + uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3 - name: Login into docker registry uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml index e484ea755c..aef4299ac2 100644 --- a/.github/workflows/release-publish.yml +++ b/.github/workflows/release-publish.yml @@ -188,7 +188,7 @@ jobs: platforms: ${{ env.PLATFORMS }} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@18ce135bb5112fa8ce4ed6c17ab05699d7f3a5e0 # v3 + uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3 - name: Login to ghcr.io uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 @@ -268,7 +268,7 @@ jobs: check-latest: true - name: Set up Docker Buildx - uses: docker/setup-buildx-action@18ce135bb5112fa8ce4ed6c17ab05699d7f3a5e0 # v3 + uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3 - name: Login to ghcr.io uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 From 391ca599050a14a2b0c03fb2e8f349e8ac00bb66 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 19 Jun 2025 10:34:49 +0200 Subject: [PATCH 656/836] chore(deps): update jonasbn/github-action-spellcheck docker tag to v0.50.0 (main) (#7832) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 655a722747..c0ebb71a84 100644 --- a/Makefile +++ b/Makefile @@ -59,7 +59,7 @@ GENREF_VERSION ?= 015aaac611407c4fe591bc8700d2c67b7521efca # renovate: datasource=go depName=github.com/goreleaser/goreleaser GORELEASER_VERSION ?= v2.10.2 # renovate: datasource=docker depName=jonasbn/github-action-spellcheck versioning=docker -SPELLCHECK_VERSION ?= 0.49.0 +SPELLCHECK_VERSION ?= 0.50.0 # renovate: datasource=docker depName=getwoke/woke versioning=docker WOKE_VERSION ?= 0.19.0 # renovate: datasource=github-releases depName=operator-framework/operator-sdk versioning=loose From 9f9a5a270687216bf5a10f7a8b39201d05d9f04e Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 23 Jun 2025 14:42:40 +0200 Subject: [PATCH 657/836] chore(deps): update kubernetes csi (main) (#7846) This PR contains the following updates: | Package | Update | Change | https://github.com/kubernetes-csi/external-snapshotter `v8.2.1` -> `v8.3.0` https://github.com/rook/rook `v1.17.4` -> `v1.17.5` --- .github/workflows/continuous-delivery.yml | 4 ++-- hack/setup-cluster.sh | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index 6d720a6514..9a3f12b938 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -42,8 +42,8 @@ env: # renovate: datasource=github-tags depName=kubernetes-sigs/kind versioning=semver KIND_VERSION: "v0.29.0" # renovate: datasource=github-releases depName=rook/rook versioning=loose - ROOK_VERSION: "v1.17.4" - EXTERNAL_SNAPSHOTTER_VERSION: "v8.2.1" + ROOK_VERSION: "v1.17.5" + EXTERNAL_SNAPSHOTTER_VERSION: "v8.3.0" OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing" BUILD_PUSH_PROVENANCE: "" BUILD_PUSH_CACHE_FROM: "" diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh index 4f8c86673b..6d8d008c30 100755 --- a/hack/setup-cluster.sh +++ b/hack/setup-cluster.sh @@ -29,7 +29,7 @@ fi # Defaults KIND_NODE_DEFAULT_VERSION=v1.33.1 CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=v1.16.1 -EXTERNAL_SNAPSHOTTER_VERSION=v8.2.1 +EXTERNAL_SNAPSHOTTER_VERSION=v8.3.0 EXTERNAL_PROVISIONER_VERSION=v5.3.0 EXTERNAL_RESIZER_VERSION=v1.14.0 EXTERNAL_ATTACHER_VERSION=v4.9.0 From b040f82520f96695d39dff22c4e7dd27307c8ed3 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Tue, 24 Jun 2025 09:27:07 +0200 Subject: [PATCH 658/836] fix(initdb): restore initialization order in initdb job (#7871) Restore the initialization order as it was before merging #7811 Closes #7870 --------- Signed-off-by: Marco Nenciarini Signed-off-by: Armando Ruocco Co-authored-by: Armando Ruocco --- pkg/management/postgres/initdb.go | 49 +++++++++-------- pkg/management/postgres/initdb_test.go | 74 ++++++++++++++++++++++++++ pkg/management/postgres/instance.go | 2 +- pkg/management/postgres/suite_test.go | 37 +++++++++++++ 4 files changed, 140 insertions(+), 22 deletions(-) diff --git a/pkg/management/postgres/initdb.go b/pkg/management/postgres/initdb.go index e49cdb1737..21d250fd68 100644 --- a/pkg/management/postgres/initdb.go +++ b/pkg/management/postgres/initdb.go @@ -53,6 +53,15 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/system" ) +type connectionProvider interface { + // GetSuperUserDB returns the superuser database connection + GetSuperUserDB() (*sql.DB, error) + // GetTemplateDB returns the template database connection + GetTemplateDB() (*sql.DB, error) + // ConnectionPool returns the connection pool for this instance + ConnectionPool() pool.Pooler +} + // InitInfo contains all the info needed to bootstrap a new PostgreSQL instance type InitInfo struct { // The data directory where to generate the new cluster @@ -295,7 +304,7 @@ func (info InitInfo) GetInstance() *Instance { // ConfigureNewInstance creates the expected users and databases in a new // PostgreSQL instance. If any error occurs, we return it -func (info InitInfo) ConfigureNewInstance(instance *Instance) error { +func (info InitInfo) ConfigureNewInstance(instance connectionProvider) error { log.Info("Configuring new PostgreSQL instance") dbSuperUser, err := instance.GetSuperUserDB() @@ -303,6 +312,23 @@ func (info InitInfo) ConfigureNewInstance(instance *Instance) error { return fmt.Errorf("while getting superuser database: %w", err) } + if info.ApplicationUser != "" { + var existsRole bool + userRow := dbSuperUser.QueryRow("SELECT COUNT(*) > 0 FROM pg_catalog.pg_roles WHERE rolname = $1", + info.ApplicationUser) + if err = userRow.Scan(&existsRole); err != nil { + return err + } + + if !existsRole { + if _, err = dbSuperUser.Exec(fmt.Sprintf( + "CREATE ROLE %v LOGIN", + pgx.Identifier{info.ApplicationUser}.Sanitize())); err != nil { + return err + } + } + } + // Execute the custom set of init queries for the `postgres` database log.Info("Executing post-init SQL instructions") if err = info.executeQueries(dbSuperUser, info.PostInitSQL); err != nil { @@ -332,26 +358,7 @@ func (info InitInfo) ConfigureNewInstance(instance *Instance) error { return fmt.Errorf("could not create %v file: %w", filePath, err) } - if info.ApplicationUser == "" { - return nil - } - - var existsRole bool - userRow := dbSuperUser.QueryRow("SELECT COUNT(*) > 0 FROM pg_catalog.pg_roles WHERE rolname = $1", - info.ApplicationUser) - if err = userRow.Scan(&existsRole); err != nil { - return err - } - - if !existsRole { - if _, err = dbSuperUser.Exec(fmt.Sprintf( - "CREATE ROLE %v LOGIN", - pgx.Identifier{info.ApplicationUser}.Sanitize())); err != nil { - return err - } - } - - if info.ApplicationDatabase == "" { + if info.ApplicationUser == "" || info.ApplicationDatabase == "" { return nil } diff --git a/pkg/management/postgres/initdb_test.go b/pkg/management/postgres/initdb_test.go index d6a6d5d906..bc81cfd3c0 100644 --- a/pkg/management/postgres/initdb_test.go +++ b/pkg/management/postgres/initdb_test.go @@ -21,7 +21,11 @@ package postgres import ( "os" + "path" "path/filepath" + "regexp" + + "github.com/DATA-DOG/go-sqlmock" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -125,3 +129,73 @@ var _ = Describe("renameExistingTargetDataDirectories", func() { Expect(filelist).To(BeEmpty()) }) }) + +var _ = Describe("ConfigureNewInstance role creation", func() { + var ( + info InitInfo + mi *mockInstance + mockSuperUser sqlmock.Sqlmock + testDir string + ) + + BeforeEach(func() { + var err error + + testDir = path.Join(GinkgoT().TempDir(), "initdb_test") + + Expect(os.MkdirAll(testDir, 0o700)).To(Succeed()) + + mi = &mockInstance{} + mi.superUserDB, mockSuperUser, err = sqlmock.New() + Expect(err).NotTo(HaveOccurred()) + + mi.appDB, _, err = sqlmock.New() + Expect(err).NotTo(HaveOccurred()) + + info = InitInfo{ + ApplicationUser: "app_user", + PostInitSQL: []string{"CREATE ROLE post_init_role LOGIN"}, + PgData: testDir, + } + }) + + AfterEach(func() { + Expect(mockSuperUser.ExpectationsWereMet()).NotTo(HaveOccurred()) + }) + + It("ensures that we create the application user before postIniSQL", func() { + // Expect check if application role exists + mockSuperUser.ExpectQuery(regexp.QuoteMeta("SELECT COUNT(*) > 0 FROM pg_catalog.pg_roles WHERE rolname = $1")). + WithArgs("app_user"). + WillReturnRows(sqlmock.NewRows([]string{"exists"}).AddRow(false)) + + mockSuperUser.ExpectExec(`CREATE ROLE \"app_user\" LOGIN`). + WillReturnResult(sqlmock.NewResult(1, 1)) + + mockSuperUser.ExpectExec("CREATE ROLE post_init_role LOGIN"). + WillReturnResult(sqlmock.NewResult(1, 1)) + + err := info.ConfigureNewInstance(mi) + + Expect(err).NotTo(HaveOccurred()) + }) + + It("ensures that we do not create the application user if already exists", func() { + // Expect check if application role exists - return true this time + mockSuperUser.ExpectQuery(regexp.QuoteMeta("SELECT COUNT(*) > 0 FROM pg_catalog.pg_roles WHERE rolname = $1")). + WithArgs("app_user"). + WillReturnRows(sqlmock.NewRows([]string{"exists"}).AddRow(true)) + + // No direct role creation expected + + mockSuperUser.ExpectExec("CREATE ROLE post_init_role LOGIN"). + WillReturnResult(sqlmock.NewResult(1, 1)) + + // Execute function under test + err := info.ConfigureNewInstance(mi) + + // Verify results + Expect(err).NotTo(HaveOccurred()) + Expect(mockSuperUser.ExpectationsWereMet()).NotTo(HaveOccurred()) + }) +}) diff --git a/pkg/management/postgres/instance.go b/pkg/management/postgres/instance.go index 6aa7f4b9ff..bada828ec4 100644 --- a/pkg/management/postgres/instance.go +++ b/pkg/management/postgres/instance.go @@ -786,7 +786,7 @@ func (instance *Instance) GetPgVersion() (semver.Version, error) { } // ConnectionPool gets or initializes the connection pool for this instance -func (instance *Instance) ConnectionPool() *pool.ConnectionPool { +func (instance *Instance) ConnectionPool() pool.Pooler { const applicationName = "cnpg-instance-manager" if instance.pool == nil { socketDir := GetSocketDir() diff --git a/pkg/management/postgres/suite_test.go b/pkg/management/postgres/suite_test.go index 1d88c1b84f..b602dcf53f 100644 --- a/pkg/management/postgres/suite_test.go +++ b/pkg/management/postgres/suite_test.go @@ -20,8 +20,11 @@ SPDX-License-Identifier: Apache-2.0 package postgres import ( + "database/sql" "testing" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/pool" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) @@ -30,3 +33,37 @@ func TestPostgres(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "PostgreSQL instance manager test suite") } + +// mockInstance implements the minimum required functionality to test ConfigureNewInstance +type mockInstance struct { + superUserDB *sql.DB + templateDB *sql.DB + appDB *sql.DB +} + +type fakePooler struct { + db *sql.DB +} + +func (f fakePooler) Connection(_ string) (*sql.DB, error) { + return f.db, nil +} + +func (f fakePooler) GetDsn(dbName string) string { + return dbName +} + +func (f fakePooler) ShutdownConnections() { +} + +func (m *mockInstance) GetSuperUserDB() (*sql.DB, error) { + return m.superUserDB, nil +} + +func (m *mockInstance) GetTemplateDB() (*sql.DB, error) { + return m.templateDB, nil +} + +func (m *mockInstance) ConnectionPool() pool.Pooler { + return &fakePooler{db: m.appDB} +} From 7f8ddf4e09cb8b88fd460ee2409d94bf160f2591 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Thu, 26 Jun 2025 12:50:07 +0200 Subject: [PATCH 659/836] chore(typo): fix spelling of ParameterRecoveryMinApplyDelay (#7897) Signed-off-by: Marco Nenciarini --- pkg/postgres/configuration.go | 10 +++++----- pkg/postgres/configuration_test.go | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/pkg/postgres/configuration.go b/pkg/postgres/configuration.go index d591593db5..66a0c10924 100644 --- a/pkg/postgres/configuration.go +++ b/pkg/postgres/configuration.go @@ -46,8 +46,8 @@ const ( // ParameterWalLogHints the configuration key containing the wal_log_hints value ParameterWalLogHints = "wal_log_hints" - // ParameterRecoveyMinApplyDelay is the configuration key containing the recovery_min_apply_delay parameter - ParameterRecoveyMinApplyDelay = "recovery_min_apply_delay" + // ParameterRecoveryMinApplyDelay is the configuration key containing the recovery_min_apply_delay parameter + ParameterRecoveryMinApplyDelay = "recovery_min_apply_delay" ) // An acceptable wal_level value @@ -354,8 +354,8 @@ type ManagedExtension struct { SkipCreateExtension bool } -// IsUsed checks whether a configuration namespace in the namespaces list -// is used in the user provided configuration +// IsUsed checks whether a configuration namespace in the extension namespaces list +// is used in the user-provided configuration func (e ManagedExtension) IsUsed(userConfigs map[string]string) bool { for k := range userConfigs { for _, namespace := range e.Namespaces { @@ -681,7 +681,7 @@ func CreatePostgresqlConfiguration(info ConfigurationInfo) *PgConfiguration { // primary and on the replicas, setting it on both // is a safe approach. configuration.OverwriteConfig( - ParameterRecoveyMinApplyDelay, + ParameterRecoveryMinApplyDelay, fmt.Sprintf("%vs", math.Floor(info.RecoveryMinApplyDelay.Seconds()))) } diff --git a/pkg/postgres/configuration_test.go b/pkg/postgres/configuration_test.go index d27c8becc1..415c896de7 100644 --- a/pkg/postgres/configuration_test.go +++ b/pkg/postgres/configuration_test.go @@ -375,7 +375,7 @@ var _ = Describe("recovery_min_apply_delay", func() { RecoveryMinApplyDelay: 0, } config := CreatePostgresqlConfiguration(info) - Expect(config.GetConfig(ParameterRecoveyMinApplyDelay)).To(BeEmpty()) + Expect(config.GetConfig(ParameterRecoveryMinApplyDelay)).To(BeEmpty()) }) It("is added to the configuration when specified", func() { @@ -388,6 +388,6 @@ var _ = Describe("recovery_min_apply_delay", func() { RecoveryMinApplyDelay: 1 * time.Hour, } config := CreatePostgresqlConfiguration(info) - Expect(config.GetConfig(ParameterRecoveyMinApplyDelay)).To(Equal("3600s")) + Expect(config.GetConfig(ParameterRecoveryMinApplyDelay)).To(Equal("3600s")) }) }) From b05fee050ad4e32b0af8eb661545fd68a5457ec6 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 27 Jun 2025 10:27:04 +0200 Subject: [PATCH 660/836] chore(deps): update jonasbn/github-action-spellcheck docker tag to v0.51.0 (main) (#7874) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index c0ebb71a84..f497c2118f 100644 --- a/Makefile +++ b/Makefile @@ -59,7 +59,7 @@ GENREF_VERSION ?= 015aaac611407c4fe591bc8700d2c67b7521efca # renovate: datasource=go depName=github.com/goreleaser/goreleaser GORELEASER_VERSION ?= v2.10.2 # renovate: datasource=docker depName=jonasbn/github-action-spellcheck versioning=docker -SPELLCHECK_VERSION ?= 0.50.0 +SPELLCHECK_VERSION ?= 0.51.0 # renovate: datasource=docker depName=getwoke/woke versioning=docker WOKE_VERSION ?= 0.19.0 # renovate: datasource=github-releases depName=operator-framework/operator-sdk versioning=loose From 3a342b4d325e1f996786c151fac9a27e38f10fb5 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 27 Jun 2025 10:50:56 +0200 Subject: [PATCH 661/836] chore(deps): update all non-major github action (main) (#7873) This PR contains the following updates: | Package | Type | Update | Change | https://github.com/azure/setup-kubectl `3e0aec4` -> `776406b` https://github.com/rojopolis/spellcheck-github-actions `0.50.0` -> `0.51.0` https://github.com/sigstore/cosign-installer `fb28c2b` -> `398d4b0` --- .github/workflows/continuous-delivery.yml | 4 ++-- .github/workflows/continuous-integration.yml | 2 +- .github/workflows/release-publish.yml | 2 +- .github/workflows/spellcheck.yml | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index 9a3f12b938..decff8ded9 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -392,7 +392,7 @@ jobs: - name: Install cosign if: env.SIGN_IMAGES == 'true' - uses: sigstore/cosign-installer@fb28c2b6339dcd94da6e4cbcbc5e888961f6f8c3 # v3 + uses: sigstore/cosign-installer@398d4b0eeef1380460a10c8013a76f728fb906ac # v3 # See https://github.blog/security/supply-chain-security/safeguard-container-signing-capability-actions/ # and https://github.com/actions/starter-workflows/blob/main/ci/docker-publish.yml for more details on # how to use cosign. @@ -921,7 +921,7 @@ jobs: creds: ${{ secrets.AZURE_CREDENTIALS }} - name: Install kubectl - uses: azure/setup-kubectl@3e0aec4d80787158d308d7b364cb1b702e7feb7f # v4 + uses: azure/setup-kubectl@776406bce94f63e41d621b960d78ee25c8b76ede # v4 with: version: v${{ env.K8S_VERSION }} - diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 403ad0b44b..acd691385e 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -629,7 +629,7 @@ jobs: if: | env.SIGN_IMAGES == 'true' && env.PUSH == 'true' - uses: sigstore/cosign-installer@fb28c2b6339dcd94da6e4cbcbc5e888961f6f8c3 # v3 + uses: sigstore/cosign-installer@398d4b0eeef1380460a10c8013a76f728fb906ac # v3 # See https://github.blog/security/supply-chain-security/safeguard-container-signing-capability-actions/ # and https://github.com/actions/starter-workflows/blob/main/ci/docker-publish.yml for more details on # how to use cosign. diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml index aef4299ac2..50fb67b8f4 100644 --- a/.github/workflows/release-publish.yml +++ b/.github/workflows/release-publish.yml @@ -214,7 +214,7 @@ jobs: targets: "default" - name: Install cosign - uses: sigstore/cosign-installer@fb28c2b6339dcd94da6e4cbcbc5e888961f6f8c3 # v3 + uses: sigstore/cosign-installer@398d4b0eeef1380460a10c8013a76f728fb906ac # v3 # See https://github.blog/security/supply-chain-security/safeguard-container-signing-capability-actions/ # and https://github.com/actions/starter-workflows/blob/main/ci/docker-publish.yml for more details on # how to use cosign. diff --git a/.github/workflows/spellcheck.yml b/.github/workflows/spellcheck.yml index 0df6257e8b..80865b85ef 100644 --- a/.github/workflows/spellcheck.yml +++ b/.github/workflows/spellcheck.yml @@ -30,4 +30,4 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: Spellcheck - uses: rojopolis/spellcheck-github-actions@63aba9473ee34d681dd48dee26b3d43ea0bbc462 # 0.50.0 + uses: rojopolis/spellcheck-github-actions@35a02bae020e6999c5c37fabaf447f2eb8822ca7 # 0.51.0 From 1689c5e6134a29cf0ca8e75405622bee9b15c1eb Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 27 Jun 2025 11:15:19 +0200 Subject: [PATCH 662/836] chore(deps): update dependency redhat-openshift-ecosystem/openshift-preflight to v1.14.0 (main) (#7913) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index f497c2118f..706957830d 100644 --- a/Makefile +++ b/Makefile @@ -67,7 +67,7 @@ OPERATOR_SDK_VERSION ?= v1.40.0 # renovate: datasource=github-tags depName=operator-framework/operator-registry OPM_VERSION ?= v1.55.0 # renovate: datasource=github-tags depName=redhat-openshift-ecosystem/openshift-preflight -PREFLIGHT_VERSION ?= 1.13.3 +PREFLIGHT_VERSION ?= 1.14.0 OPENSHIFT_VERSIONS ?= v4.12-v4.19 ARCH ?= amd64 From 7564f538a49ec8c741623b76906016e15457ffee Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 27 Jun 2025 14:36:35 +0200 Subject: [PATCH 663/836] chore(deps): update minio/minio docker tag to release.2025-06-13t11-33-47z (main) (#7917) --- tests/utils/minio/minio.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/utils/minio/minio.go b/tests/utils/minio/minio.go index d81f2a8e0f..edcb997c9a 100644 --- a/tests/utils/minio/minio.go +++ b/tests/utils/minio/minio.go @@ -50,7 +50,7 @@ import ( const ( // minioImage is the image used to run a MinIO server - minioImage = "minio/minio:RELEASE.2025-05-24T17-08-30Z" + minioImage = "minio/minio:RELEASE.2025-06-13T11-33-47Z" // minioClientImage is the image used to run a MinIO client minioClientImage = "minio/mc:RELEASE.2025-05-21T01-59-54Z" ) From 31cefed5bdd9d5d260b361a11bdb159ce182bf49 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 27 Jun 2025 17:28:32 +0200 Subject: [PATCH 664/836] chore(deps): update dependency kubernetes-csi/csi-driver-host-path to v1.17.0 (main) (#7909) --- hack/setup-cluster.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh index 6d8d008c30..254baeaca8 100755 --- a/hack/setup-cluster.sh +++ b/hack/setup-cluster.sh @@ -28,7 +28,7 @@ fi # Defaults KIND_NODE_DEFAULT_VERSION=v1.33.1 -CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=v1.16.1 +CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=v1.17.0 EXTERNAL_SNAPSHOTTER_VERSION=v8.3.0 EXTERNAL_PROVISIONER_VERSION=v5.3.0 EXTERNAL_RESIZER_VERSION=v1.14.0 From 9aff1c8911cfae1f1383efb268c6db92f1c830d3 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 27 Jun 2025 18:46:02 +0200 Subject: [PATCH 665/836] chore(deps): update github/codeql-action digest to 39edc49 (main) (#7924) --- .github/workflows/codeql-analysis.yml | 4 ++-- .github/workflows/continuous-integration.yml | 2 +- .github/workflows/ossf_scorecard.yml | 2 +- .github/workflows/snyk.yml | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index de1bc628bd..b3c2e8f34f 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -76,7 +76,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3 + uses: github/codeql-action/init@39edc492dbe16b1465b0cafca41432d857bdb31a # v3 with: languages: "go" build-mode: manual @@ -93,6 +93,6 @@ jobs: make - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3 + uses: github/codeql-action/analyze@39edc492dbe16b1465b0cafca41432d857bdb31a # v3 with: category: "/language:go" diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index acd691385e..1ad79ebb25 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -617,7 +617,7 @@ jobs: args: --severity-threshold=high --file=Dockerfile - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3 + uses: github/codeql-action/upload-sarif@39edc492dbe16b1465b0cafca41432d857bdb31a # v3 if: | !github.event.repository.fork && !github.event.pull_request.head.repo.fork diff --git a/.github/workflows/ossf_scorecard.yml b/.github/workflows/ossf_scorecard.yml index 1745a473a7..2f9ce05f03 100644 --- a/.github/workflows/ossf_scorecard.yml +++ b/.github/workflows/ossf_scorecard.yml @@ -74,6 +74,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard (optional). # Commenting out will disable upload of results to your repo's Code Scanning dashboard - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3 + uses: github/codeql-action/upload-sarif@39edc492dbe16b1465b0cafca41432d857bdb31a # v3 with: sarif_file: results.sarif diff --git a/.github/workflows/snyk.yml b/.github/workflows/snyk.yml index 74a1a6d508..1003979a22 100644 --- a/.github/workflows/snyk.yml +++ b/.github/workflows/snyk.yml @@ -30,7 +30,7 @@ jobs: args: --sarif-file-output=snyk-static.sarif - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3 + uses: github/codeql-action/upload-sarif@39edc492dbe16b1465b0cafca41432d857bdb31a # v3 with: sarif_file: snyk-static.sarif @@ -43,6 +43,6 @@ jobs: args: --sarif-file-output=snyk-test.sarif - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3 + uses: github/codeql-action/upload-sarif@39edc492dbe16b1465b0cafca41432d857bdb31a # v3 with: sarif_file: snyk-test.sarif From 86d70684723c7ff37280128f1e357c687fb9cb00 Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Tue, 1 Jul 2025 12:09:09 +0200 Subject: [PATCH 666/836] docs: explain `wal_level` in Postgres configuration page (#7893) Closes #7890 Signed-off-by: Gabriele Bartolini Signed-off-by: Jonathan Battiato Signed-off-by: Marco Nenciarini Co-authored-by: Jonathan Battiato Co-authored-by: Marco Nenciarini --- docs/src/postgresql_conf.md | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/docs/src/postgresql_conf.md b/docs/src/postgresql_conf.md index d547767aa3..614ccd3a7a 100644 --- a/docs/src/postgresql_conf.md +++ b/docs/src/postgresql_conf.md @@ -130,6 +130,30 @@ Since the fixed parameters are added at the end, they can't be overridden by the user via the YAML configuration. Those parameters are required for correct WAL archiving and replication. +### Write-Ahead Log Level + +The [`wal_level`](https://www.postgresql.org/docs/current/runtime-config-wal.html) +parameter in PostgreSQL determines the amount of information written to the +Write-Ahead Log (WAL). It accepts the following values: + +- `minimal`: Writes only the information required for crash recovery. +- `replica`: Adds sufficient information to support WAL archiving and streaming + replication, including the ability to run read-only queries on standby + instances. +- `logical`: Includes all information from `replica`, plus additional information + required for logical decoding and replication. + +By default, upstream PostgreSQL sets `wal_level` to `replica`. CloudNativePG, +instead, sets `wal_level` to `logical` by default to enable logical replication +out of the box. This makes it easier to support use cases such as migrations +from external PostgreSQL servers. + +If your cluster does not require logical replication, it is recommended to set +`wal_level` to `replica` to reduce WAL volume and overhead. + +Finally, CloudNativePG allows `wal_level` to be set to `minimal` only for +single-instance clusters with WAL archiving disabled. + ### Replication Settings The `primary_conninfo`, `restore_command`, and `recovery_target_timeline` From 41cbd96bf09676f0f9ac545d55a8f12db5339dc6 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Tue, 1 Jul 2025 12:27:35 +0200 Subject: [PATCH 667/836] fix(probes): avoid liveness probe to timeout when not needed (#7902) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When the API server is not reachable, the default liveness probe configuration may trigger an automatic shutdown of the PG instance, even when the isolation checker is not active. This happens because a call to the API server may time out after the time out of the Kubelet. This fix includes a safe limit, chosen after the default timeout of Pods' probes and is safe to use with the default CNPG configuration. Closes: #7901 Signed-off-by: Leonardo Cecchi Signed-off-by: Armando Ruocco Signed-off-by: Niccolò Fei Co-authored-by: Armando Ruocco Co-authored-by: Niccolò Fei --- .../postgres/webserver/probes/liveness.go | 17 +++-- ...ter-liveness-pinger-disabled.yaml.template | 27 ++++++++ ...ter-liveness-pinger-enabled.yaml.template} | 2 +- tests/e2e/self_fencing_test.go | 68 +++++++++++++------ 4 files changed, 89 insertions(+), 25 deletions(-) create mode 100644 tests/e2e/fixtures/self-fencing/cluster-liveness-pinger-disabled.yaml.template rename tests/e2e/fixtures/self-fencing/{cluster-self-fencing.yaml.template => cluster-liveness-pinger-enabled.yaml.template} (93%) diff --git a/pkg/management/postgres/webserver/probes/liveness.go b/pkg/management/postgres/webserver/probes/liveness.go index 0449b76752..bbb07601a6 100644 --- a/pkg/management/postgres/webserver/probes/liveness.go +++ b/pkg/management/postgres/webserver/probes/liveness.go @@ -23,6 +23,7 @@ import ( "context" "fmt" "net/http" + "time" "github.com/cloudnative-pg/machinery/pkg/log" "sigs.k8s.io/controller-runtime/pkg/client" @@ -49,12 +50,15 @@ func NewLivenessChecker( } } -// tryRefreshLatestCluster refreshes the latest cluster definition, returns a bool indicating if the operation was -// successful -func (e *livenessExecutor) tryRefreshLatestCluster(ctx context.Context) bool { +// tryRefreshLatestClusterWithTimeout refreshes the latest cluster definition, returns a bool indicating if the +// operation was successful +func (e *livenessExecutor) tryRefreshLatestClusterWithTimeout(ctx context.Context, timeout time.Duration) bool { + timeoutContext, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + var cluster apiv1.Cluster err := e.cli.Get( - ctx, + timeoutContext, client.ObjectKey{Namespace: e.instance.GetNamespaceName(), Name: e.instance.GetClusterName()}, &cluster, ) @@ -87,7 +91,10 @@ func (e *livenessExecutor) IsHealthy( return } - if clusterRefreshed := e.tryRefreshLatestCluster(ctx); clusterRefreshed { + // We set a safe context timeout of 500ms to avoid a failed request from taking + // more time than the minimum configurable timeout (1s) of the container's livenessProbe, + // which otherwise could have triggered a restart of the instance. + if clusterRefreshed := e.tryRefreshLatestClusterWithTimeout(ctx, 500*time.Millisecond); clusterRefreshed { // We correctly reached the API server but, as a failsafe measure, we // exercise the reachability checker and leave a log message if something // is not right. diff --git a/tests/e2e/fixtures/self-fencing/cluster-liveness-pinger-disabled.yaml.template b/tests/e2e/fixtures/self-fencing/cluster-liveness-pinger-disabled.yaml.template new file mode 100644 index 0000000000..daa6861370 --- /dev/null +++ b/tests/e2e/fixtures/self-fencing/cluster-liveness-pinger-disabled.yaml.template @@ -0,0 +1,27 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: postgresql-liveness-pinger-disabled + annotations: + alpha.cnpg.io/livenessPinger: '{"enabled": false}' +spec: + instances: 3 + + postgresql: + parameters: + max_connections: "110" + log_checkpoints: "on" + log_lock_waits: "on" + log_min_duration_statement: '1000' + log_statement: 'ddl' + log_temp_files: '1024' + log_autovacuum_min_duration: '1s' + log_replication_commands: 'on' + + # Persistent storage configuration + storage: + storageClass: ${E2E_DEFAULT_STORAGE_CLASS} + size: 1Gi + walStorage: + storageClass: ${E2E_DEFAULT_STORAGE_CLASS} + size: 1Gi diff --git a/tests/e2e/fixtures/self-fencing/cluster-self-fencing.yaml.template b/tests/e2e/fixtures/self-fencing/cluster-liveness-pinger-enabled.yaml.template similarity index 93% rename from tests/e2e/fixtures/self-fencing/cluster-self-fencing.yaml.template rename to tests/e2e/fixtures/self-fencing/cluster-liveness-pinger-enabled.yaml.template index 5235464f2a..37654455a5 100644 --- a/tests/e2e/fixtures/self-fencing/cluster-self-fencing.yaml.template +++ b/tests/e2e/fixtures/self-fencing/cluster-liveness-pinger-enabled.yaml.template @@ -1,7 +1,7 @@ apiVersion: postgresql.cnpg.io/v1 kind: Cluster metadata: - name: postgresql-self-fencing + name: postgresql-liveness-pinger-enabled annotations: alpha.cnpg.io/livenessPinger: '{"enabled": true}' spec: diff --git a/tests/e2e/self_fencing_test.go b/tests/e2e/self_fencing_test.go index 17d072d914..0e767ed669 100644 --- a/tests/e2e/self_fencing_test.go +++ b/tests/e2e/self_fencing_test.go @@ -44,7 +44,6 @@ import ( var _ = Describe("Self-fencing with liveness probe", Serial, Label(tests.LabelDisruptive), func() { const ( level = tests.Lowest - sampleFile = fixturesDir + "/self-fencing/cluster-self-fencing.yaml.template" namespacePrefix = "self-fencing" ) @@ -57,7 +56,38 @@ var _ = Describe("Self-fencing with liveness probe", Serial, Label(tests.LabelDi } }) - It("will terminate an isolated primary", func() { + verifyIsolatedPrimary := func(namespace, isolatedPod, isolatedNode string, livenessPingerEnabled bool) { + By("verifying the isolatedPod behaviour", func() { + defaultCommand := fmt.Sprintf( + "docker exec %v crictl ps -a -q "+ + "--label io.kubernetes.pod.namespace=%s,io.kubernetes.pod.name=%s "+ + "--name postgres", isolatedNode, namespace, isolatedPod) + + if livenessPingerEnabled { + Eventually(func(g Gomega) { + out, _, err := run.Unchecked(fmt.Sprintf("%s -s Exited", defaultCommand)) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(out).ToNot(BeEmpty()) + if out != "" { + GinkgoWriter.Printf("Container %s (%s) has been terminated\n", + isolatedPod, strings.TrimSpace(out)) + } + }, 120).Should(Succeed()) + } else { + Consistently(func(g Gomega) { + out, _, err := run.Unchecked(fmt.Sprintf("%s -s Running", defaultCommand)) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(out).ToNot(BeEmpty()) + if out != "" { + GinkgoWriter.Printf("Container %s (%s) is still running\n", + isolatedPod, strings.TrimSpace(out)) + } + }, 20, 5).Should(Succeed()) + } + }) + } + + assertLivenessPinger := func(clusterManifest string, livenessPingerEnabled bool) { var namespace, clusterName, isolatedNode string var err error var oldPrimaryPod *corev1.Pod @@ -70,11 +100,11 @@ var _ = Describe("Self-fencing with liveness probe", Serial, Label(tests.LabelDi }) By("creating a Cluster", func() { - clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, sampleFile) + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterManifest) Expect(err).ToNot(HaveOccurred()) namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) - AssertCreateCluster(namespace, clusterName, sampleFile, env) + AssertCreateCluster(namespace, clusterName, clusterManifest, env) }) By("setting up the environment", func() { @@ -107,21 +137,7 @@ var _ = Describe("Self-fencing with liveness probe", Serial, Label(tests.LabelDi }, testTimeouts[timeouts.NewPrimaryAfterFailover]).Should(Succeed()) }) - By("verifying that oldPrimary will self isolate", func() { - // Assert that the oldPrimary is eventually terminated - Eventually(func(g Gomega) { - out, _, err := run.Unchecked(fmt.Sprintf( - "docker exec %v crictl ps -a "+ - "--label io.kubernetes.pod.namespace=%s,io.kubernetes.pod.name=%s "+ - "--name postgres -s Exited -q", isolatedNode, namespace, oldPrimaryPod.Name)) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(out).ToNot(BeEmpty()) - if out != "" { - GinkgoWriter.Printf("Container %s (%s) has been terminated\n", - oldPrimaryPod.Name, strings.TrimSpace(out)) - } - }, 120).Should(Succeed()) - }) + verifyIsolatedPrimary(namespace, oldPrimaryPod.Name, isolatedNode, livenessPingerEnabled) By("reconnecting the isolated Node", func() { _, _, err = run.Unchecked(fmt.Sprintf("docker network connect kind %v", isolatedNode)) @@ -143,5 +159,19 @@ var _ = Describe("Self-fencing with liveness probe", Serial, Label(tests.LabelDi g.Expect(nodes.IsNodeReachable(env.Ctx, env.Client, isolatedNode)).To(BeTrue()) }, timeout).Should(Succeed()) }) + } + + When("livenessPinger is enabled", func() { + const sampleFile = fixturesDir + "/self-fencing/cluster-liveness-pinger-enabled.yaml.template" + It("will terminate an isolated primary", func() { + assertLivenessPinger(sampleFile, true) + }) + }) + + When("livenessPinger is disabled", func() { + const sampleFile = fixturesDir + "/self-fencing/cluster-liveness-pinger-disabled.yaml.template" + It("will not restart an isolated primary", func() { + assertLivenessPinger(sampleFile, false) + }) }) }) From c425fc272e5ee958532685d45bc9d3195fee4408 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 1 Jul 2025 17:11:32 +0200 Subject: [PATCH 668/836] fix(deps): update module sigs.k8s.io/yaml to v1.5.0 (main) (#7918) --- go.mod | 4 +++- go.sum | 7 ++++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 38425da440..8a999a7769 100644 --- a/go.mod +++ b/go.mod @@ -45,7 +45,7 @@ require ( k8s.io/client-go v0.33.1 k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 sigs.k8s.io/controller-runtime v0.21.0 - sigs.k8s.io/yaml v1.4.0 + sigs.k8s.io/yaml v1.5.0 ) require ( @@ -96,6 +96,8 @@ require ( github.com/x448/float16 v0.8.4 // indirect github.com/xlab/treeprint v1.2.0 // indirect go.uber.org/automaxprocs v1.6.0 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect + go.yaml.in/yaml/v3 v3.0.3 // indirect golang.org/x/crypto v0.37.0 // indirect golang.org/x/net v0.38.0 // indirect golang.org/x/oauth2 v0.28.0 // indirect diff --git a/go.sum b/go.sum index 5fd2562a16..4b3d824ab5 100644 --- a/go.sum +++ b/go.sum @@ -218,6 +218,10 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v3 v3.0.3 h1:bXOww4E/J3f66rav3pX3m8w6jDE4knZjGOw8b5Y6iNE= +go.yaml.in/yaml/v3 v3.0.3/go.mod h1:tBHosrYAkRZjRAOREWbDnBXUf08JOwYq++0QNwQiWzI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -311,5 +315,6 @@ sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +sigs.k8s.io/yaml v1.5.0 h1:M10b2U7aEUY6hRtU870n2VTPgR5RZiL/I6Lcc2F4NUQ= +sigs.k8s.io/yaml v1.5.0/go.mod h1:wZs27Rbxoai4C0f8/9urLZtZtF3avA3gKvGyPdDqTO4= From 72f666a3e06f65194c1909df07a7e1a57ffefb9b Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 2 Jul 2025 11:03:18 +0200 Subject: [PATCH 669/836] chore(deps): update github/codeql-action digest to 181d5ee (main) (#7948) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [github/codeql-action](https://redirect.github.com/github/codeql-action) | action | digest | `39edc49` -> `181d5ee` | --- ### Configuration 📅 **Schedule**: Branch creation - At any time (no schedule defined), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Never, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/cloudnative-pg/cloudnative-pg). Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 4 ++-- .github/workflows/continuous-integration.yml | 2 +- .github/workflows/ossf_scorecard.yml | 2 +- .github/workflows/snyk.yml | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index b3c2e8f34f..12e2f2fb19 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -76,7 +76,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@39edc492dbe16b1465b0cafca41432d857bdb31a # v3 + uses: github/codeql-action/init@181d5eefc20863364f96762470ba6f862bdef56b # v3 with: languages: "go" build-mode: manual @@ -93,6 +93,6 @@ jobs: make - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@39edc492dbe16b1465b0cafca41432d857bdb31a # v3 + uses: github/codeql-action/analyze@181d5eefc20863364f96762470ba6f862bdef56b # v3 with: category: "/language:go" diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 1ad79ebb25..c4d8267b55 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -617,7 +617,7 @@ jobs: args: --severity-threshold=high --file=Dockerfile - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@39edc492dbe16b1465b0cafca41432d857bdb31a # v3 + uses: github/codeql-action/upload-sarif@181d5eefc20863364f96762470ba6f862bdef56b # v3 if: | !github.event.repository.fork && !github.event.pull_request.head.repo.fork diff --git a/.github/workflows/ossf_scorecard.yml b/.github/workflows/ossf_scorecard.yml index 2f9ce05f03..10b1a8c287 100644 --- a/.github/workflows/ossf_scorecard.yml +++ b/.github/workflows/ossf_scorecard.yml @@ -74,6 +74,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard (optional). # Commenting out will disable upload of results to your repo's Code Scanning dashboard - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@39edc492dbe16b1465b0cafca41432d857bdb31a # v3 + uses: github/codeql-action/upload-sarif@181d5eefc20863364f96762470ba6f862bdef56b # v3 with: sarif_file: results.sarif diff --git a/.github/workflows/snyk.yml b/.github/workflows/snyk.yml index 1003979a22..50853e5665 100644 --- a/.github/workflows/snyk.yml +++ b/.github/workflows/snyk.yml @@ -30,7 +30,7 @@ jobs: args: --sarif-file-output=snyk-static.sarif - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@39edc492dbe16b1465b0cafca41432d857bdb31a # v3 + uses: github/codeql-action/upload-sarif@181d5eefc20863364f96762470ba6f862bdef56b # v3 with: sarif_file: snyk-static.sarif @@ -43,6 +43,6 @@ jobs: args: --sarif-file-output=snyk-test.sarif - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@39edc492dbe16b1465b0cafca41432d857bdb31a # v3 + uses: github/codeql-action/upload-sarif@181d5eefc20863364f96762470ba6f862bdef56b # v3 with: sarif_file: snyk-test.sarif From bf3ed0e56f5af971a7ecb7a344b117bbb178929b Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 2 Jul 2025 11:44:02 +0200 Subject: [PATCH 670/836] chore(config): migrate renovate config (#7953) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- .github/renovate.json5 | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/renovate.json5 b/.github/renovate.json5 index dc686a6ab5..3364ab9d1d 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -159,15 +159,15 @@ }, { customType: 'regex', - fileMatch: [ - '^tests\\/utils\\/minio\\/minio\\.go$', + managerFilePatterns: [ + '/^tests\\/utils\\/minio\\/minio\\.go$/', ], matchStrings: [ 'minioImage = "(?.+?):(?.*?)"', 'minioClientImage = "(?.+?):(?.*?)"', ], datasourceTemplate: 'docker', - versioningTemplate: "regex:^RELEASE\\.(?\\d{4})-(?\\d{2})-(?\\d{2})T\\d{2}-\\d{2}-\\d{2}Z$" + versioningTemplate: 'regex:^RELEASE\\.(?\\d{4})-(?\\d{2})-(?\\d{2})T\\d{2}-\\d{2}-\\d{2}Z$', }, ], packageRules: [ @@ -279,7 +279,7 @@ pinDigests: false, matchPackageNames: [ 'vmware-tanzu{/,}**', - 'minio{/,}**' + 'minio{/,}**', ], }, { From a05791a569a3b6632cb239cc335f6ffdbc52c884 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Wed, 2 Jul 2025 13:16:07 +0200 Subject: [PATCH 671/836] ci(linter): configure the new varname check in latest revive linter (#7954) Signed-off-by: Marco Nenciarini --- .golangci.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.golangci.yml b/.golangci.yml index 071690d162..59f0bd4d11 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -68,6 +68,10 @@ linters: - staticcheck path: api/ text: 'ST1016:' + - linters: + - revive + path: /(utils|common)/[^/]+.go + text: avoid meaningless package names paths: - zz_generated.* - third_party$ From e333277fe815e043918059791d2e97b0f42c0fbe Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 2 Jul 2025 13:43:58 +0200 Subject: [PATCH 672/836] chore(deps): update dependency golangci/golangci-lint to v2.2.1 (main) (#7955) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Update | Change | |---|---|---| | [golangci/golangci-lint](https://redirect.github.com/golangci/golangci-lint) | minor | `v2.1.6` -> `v2.2.1` | --- ### Release Notes
golangci/golangci-lint (golangci/golangci-lint) ### [`v2.2.1`](https://redirect.github.com/golangci/golangci-lint/blob/HEAD/CHANGELOG.md#v221) [Compare Source](https://redirect.github.com/golangci/golangci-lint/compare/v2.2.0...v2.2.1) 1. Linters bug fixes - `varnamelen`: fix configuration ### [`v2.2.0`](https://redirect.github.com/golangci/golangci-lint/blob/HEAD/CHANGELOG.md#v220) [Compare Source](https://redirect.github.com/golangci/golangci-lint/compare/v2.1.6...v2.2.0) 1. New linters - Add `arangolint` linter https://github.com/Crocmagnon/arangolint - Add `embeddedstructfieldcheck` linter https://github.com/manuelarte/embeddedstructfieldcheck - Add `noinlineerr` linter https://github.com/AlwxSin/noinlineerr - Add `swaggo` formatter https://github.com/golangci/swaggoswag 2. Linters new features or changes - `errcheck`: add `verbose` option - `funcorder`: from 0.2.1 to 0.5.0 (new option `alphabetical`) - `gomoddirectives`: from 0.6.1 to 0.7.0 (new option `ignore-forbidden`) - `iface`: from 1.3.1 to 1.4.0 (new option `unexported`) - `noctx`: from 0.1.0 to 0.3.3 (new report messages, and new rules related to `database/sql`) - `noctx`: from 0.3.3 to 0.3.4 (new SQL functions detection) - `revive`: from 1.9.0 to 1.10.0 (new rules: `time-date`, `unnecessary-format`, `use-fmt-print`) - `usestdlibvars`: from 1.28.0 to 1.29.0 (new option `time-date-month`) - `wsl`: deprecation - `wsl_v5`: from 4.7.0 to 5.0.0 (major version with new configuration) 3. Linters bug fixes - `dupword`: from 0.1.3 to 0.1.6 - `exptostd`: from 0.4.3 to 0.4.4 - `forbidigo`: from 1.6.0 to 2.1.0 - `gci`: consistently format the code - `go-spancheck`: from 0.6.4 to 0.6.5 - `goconst`: from 1.8.1 to 1.8.2 - `gosec`: from 2.22.3 to 2.22.4 - `gosec`: from 2.22.4 to 2.22.5 - `makezero`: from 1.2.0 to 2.0.1 - `misspell`: from 0.6.0 to 0.7.0 - `usetesting`: from 0.4.3 to 0.5.0 4. Misc. - exclusions: fix `path-expect` - formatters: write the input to `stdout` when using `stdin` and there are no changes - migration: improve the error message when trying to migrate a migrated config - `typecheck`: deduplicate errors - `typecheck`: stops the analysis after the first error - Deprecate `print-resources-usage` flag - Unique version per custom build 5. Documentation - Improves typecheck FAQ - Adds plugin systems recommendations - Add description for `linters.default` sets
--- ### Configuration 📅 **Schedule**: Branch creation - At any time (no schedule defined), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Never, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/cloudnative-pg/cloudnative-pg). Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- .github/workflows/continuous-integration.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index c4d8267b55..e4ddce1072 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -21,7 +21,7 @@ env: # renovate: datasource=golang-version depName=golang versioning=loose GOLANG_VERSION: "1.24.4" # renovate: datasource=github-releases depName=golangci/golangci-lint versioning=loose - GOLANGCI_LINT_VERSION: "v2.1.6" + GOLANGCI_LINT_VERSION: "v2.2.1" KUBEBUILDER_VERSION: "2.3.1" # renovate: datasource=github-tags depName=kubernetes-sigs/kind versioning=semver KIND_VERSION: "v0.29.0" From 87a0137f89eadd514888bcc146a9763f3167e7dc Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 2 Jul 2025 16:02:14 +0200 Subject: [PATCH 673/836] fix(deps): update module github.com/masterminds/semver/v3 to v3.4.0 (main) (#7936) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Change | Age | Adoption | Passing | Confidence | |---|---|---|---|---|---| | [github.com/Masterminds/semver/v3](https://redirect.github.com/Masterminds/semver) | `v3.3.1` -> `v3.4.0` | [![age](https://developer.mend.io/api/mc/badges/age/go/github.com%2fMasterminds%2fsemver%2fv3/v3.4.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/go/github.com%2fMasterminds%2fsemver%2fv3/v3.4.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/go/github.com%2fMasterminds%2fsemver%2fv3/v3.3.1/v3.4.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/go/github.com%2fMasterminds%2fsemver%2fv3/v3.3.1/v3.4.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | --- ### Release Notes
Masterminds/semver (github.com/Masterminds/semver/v3) ### [`v3.4.0`](https://redirect.github.com/Masterminds/semver/releases/tag/v3.4.0) [Compare Source](https://redirect.github.com/Masterminds/semver/compare/v3.3.1...v3.4.0) There are a few changes in this release to highlight: 1. `Constraints` now has a property `IncludePrerelease`. When set to true the `Check` and `Validate` methods will include prereleases. 2. When an AND group has one constraint with a prerelease but more than one constraint then prereleases will be included. For example, `>1.0.0-beta.1 < 2`. In the past this would not have included prereleases because each constraint needed to have a prerelease. Now, only one constraint needs to have a prerelease. This is considered a long standing bug fix. Note, this does not carry across OR groups. For example, `>1.0.0-beta.1 < 2 || > 3`. In this case, prereleases will not be included when evaluating against `>3`. 3. `NewVersion` coercion with leading "0"'s is restored. This can be disabled by setting the package level property `CoerceNewVersion` to `false`. #### What's Changed - fix the CodeQL link by [@​dmitris](https://redirect.github.com/dmitris) in [https://github.com/Masterminds/semver/pull/257](https://redirect.github.com/Masterminds/semver/pull/257) - Restore detailed errors when failed to parse with NewVersion by [@​mattfarina](https://redirect.github.com/mattfarina) in [https://github.com/Masterminds/semver/pull/262](https://redirect.github.com/Masterminds/semver/pull/262) - updating go version tested with by [@​mattfarina](https://redirect.github.com/mattfarina) in [https://github.com/Masterminds/semver/pull/263](https://redirect.github.com/Masterminds/semver/pull/263) - Restore the ability to have leading 0's with NewVersion by [@​mattfarina](https://redirect.github.com/mattfarina) in [https://github.com/Masterminds/semver/pull/266](https://redirect.github.com/Masterminds/semver/pull/266) - Handle pre-releases on all in an and group by [@​mattfarina](https://redirect.github.com/mattfarina) in [https://github.com/Masterminds/semver/pull/267](https://redirect.github.com/Masterminds/semver/pull/267) - Add property to include prereleases by [@​mattfarina](https://redirect.github.com/mattfarina) in [https://github.com/Masterminds/semver/pull/268](https://redirect.github.com/Masterminds/semver/pull/268) - Updating the error message handling by [@​mattfarina](https://redirect.github.com/mattfarina) in [https://github.com/Masterminds/semver/pull/269](https://redirect.github.com/Masterminds/semver/pull/269) - Update the release notes and readme for new version by [@​mattfarina](https://redirect.github.com/mattfarina) in [https://github.com/Masterminds/semver/pull/270](https://redirect.github.com/Masterminds/semver/pull/270) #### New Contributors - [@​dmitris](https://redirect.github.com/dmitris) made their first contribution in [https://github.com/Masterminds/semver/pull/257](https://redirect.github.com/Masterminds/semver/pull/257) **Full Changelog**: https://github.com/Masterminds/semver/compare/v3.3.1...v3.4.0
--- ### Configuration 📅 **Schedule**: Branch creation - At any time (no schedule defined), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Never, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/cloudnative-pg/cloudnative-pg). Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 8a999a7769..d58e2f4b96 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.24.1 require ( github.com/DATA-DOG/go-sqlmock v1.5.2 - github.com/Masterminds/semver/v3 v3.3.1 + github.com/Masterminds/semver/v3 v3.4.0 github.com/avast/retry-go/v4 v4.6.1 github.com/blang/semver v3.5.1+incompatible github.com/cheynewallace/tabby v1.1.1 diff --git a/go.sum b/go.sum index 4b3d824ab5..cc9063b817 100644 --- a/go.sum +++ b/go.sum @@ -2,8 +2,8 @@ github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEK github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= -github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4= -github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= +github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/avast/retry-go/v4 v4.6.1 h1:VkOLRubHdisGrHnTu89g08aQEWEgRU7LVEop3GbIcMk= From 7ce8aa1347c8f8e37d0cf4ab06dbc5a98524892c Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 2 Jul 2025 16:37:12 +0200 Subject: [PATCH 674/836] fix(deps): update kubernetes patches to v0.33.2 (main) (#7860) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Change | Age | Confidence | |---|---|---|---| | [k8s.io/api](https://redirect.github.com/kubernetes/api) | `v0.33.1` -> `v0.33.2` | [![age](https://developer.mend.io/api/mc/badges/age/go/k8s.io%2fapi/v0.33.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/go/k8s.io%2fapi/v0.33.1/v0.33.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | | [k8s.io/apiextensions-apiserver](https://redirect.github.com/kubernetes/apiextensions-apiserver) | `v0.33.1` -> `v0.33.2` | [![age](https://developer.mend.io/api/mc/badges/age/go/k8s.io%2fapiextensions-apiserver/v0.33.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/go/k8s.io%2fapiextensions-apiserver/v0.33.1/v0.33.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | | [k8s.io/apimachinery](https://redirect.github.com/kubernetes/apimachinery) | `v0.33.1` -> `v0.33.2` | [![age](https://developer.mend.io/api/mc/badges/age/go/k8s.io%2fapimachinery/v0.33.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/go/k8s.io%2fapimachinery/v0.33.1/v0.33.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | | [k8s.io/cli-runtime](https://redirect.github.com/kubernetes/cli-runtime) | `v0.33.1` -> `v0.33.2` | [![age](https://developer.mend.io/api/mc/badges/age/go/k8s.io%2fcli-runtime/v0.33.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/go/k8s.io%2fcli-runtime/v0.33.1/v0.33.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | | [k8s.io/client-go](https://redirect.github.com/kubernetes/client-go) | `v0.33.1` -> `v0.33.2` | [![age](https://developer.mend.io/api/mc/badges/age/go/k8s.io%2fclient-go/v0.33.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/go/k8s.io%2fclient-go/v0.33.1/v0.33.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | --- ### Release Notes
kubernetes/api (k8s.io/api) ### [`v0.33.2`](https://redirect.github.com/kubernetes/api/compare/v0.33.1...v0.33.2) [Compare Source](https://redirect.github.com/kubernetes/api/compare/v0.33.1...v0.33.2)
kubernetes/apiextensions-apiserver (k8s.io/apiextensions-apiserver) ### [`v0.33.2`](https://redirect.github.com/kubernetes/apiextensions-apiserver/compare/v0.33.1...v0.33.2) [Compare Source](https://redirect.github.com/kubernetes/apiextensions-apiserver/compare/v0.33.1...v0.33.2)
kubernetes/apimachinery (k8s.io/apimachinery) ### [`v0.33.2`](https://redirect.github.com/kubernetes/apimachinery/compare/v0.33.1...v0.33.2) [Compare Source](https://redirect.github.com/kubernetes/apimachinery/compare/v0.33.1...v0.33.2)
kubernetes/cli-runtime (k8s.io/cli-runtime) ### [`v0.33.2`](https://redirect.github.com/kubernetes/cli-runtime/compare/v0.33.1...v0.33.2) [Compare Source](https://redirect.github.com/kubernetes/cli-runtime/compare/v0.33.1...v0.33.2)
kubernetes/client-go (k8s.io/client-go) ### [`v0.33.2`](https://redirect.github.com/kubernetes/client-go/compare/v0.33.1...v0.33.2) [Compare Source](https://redirect.github.com/kubernetes/client-go/compare/v0.33.1...v0.33.2)
--- ### Configuration 📅 **Schedule**: Branch creation - At any time (no schedule defined), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Never, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about these updates again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/cloudnative-pg/cloudnative-pg). Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- go.mod | 10 +++++----- go.sum | 20 ++++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index d58e2f4b96..522d8c0cbf 100644 --- a/go.mod +++ b/go.mod @@ -38,11 +38,11 @@ require ( golang.org/x/term v0.32.0 google.golang.org/grpc v1.73.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.33.1 - k8s.io/apiextensions-apiserver v0.33.1 - k8s.io/apimachinery v0.33.1 - k8s.io/cli-runtime v0.33.1 - k8s.io/client-go v0.33.1 + k8s.io/api v0.33.2 + k8s.io/apiextensions-apiserver v0.33.2 + k8s.io/apimachinery v0.33.2 + k8s.io/cli-runtime v0.33.2 + k8s.io/client-go v0.33.2 k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 sigs.k8s.io/controller-runtime v0.21.0 sigs.k8s.io/yaml v1.5.0 diff --git a/go.sum b/go.sum index cc9063b817..78748648a2 100644 --- a/go.sum +++ b/go.sum @@ -286,16 +286,16 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.33.1 h1:tA6Cf3bHnLIrUK4IqEgb2v++/GYUtqiu9sRVk3iBXyw= -k8s.io/api v0.33.1/go.mod h1:87esjTn9DRSRTD4fWMXamiXxJhpOIREjWOSjsW1kEHw= -k8s.io/apiextensions-apiserver v0.33.1 h1:N7ccbSlRN6I2QBcXevB73PixX2dQNIW0ZRuguEE91zI= -k8s.io/apiextensions-apiserver v0.33.1/go.mod h1:uNQ52z1A1Gu75QSa+pFK5bcXc4hq7lpOXbweZgi4dqA= -k8s.io/apimachinery v0.33.1 h1:mzqXWV8tW9Rw4VeW9rEkqvnxj59k1ezDUl20tFK/oM4= -k8s.io/apimachinery v0.33.1/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= -k8s.io/cli-runtime v0.33.1 h1:TvpjEtF71ViFmPeYMj1baZMJR4iWUEplklsUQ7D3quA= -k8s.io/cli-runtime v0.33.1/go.mod h1:9dz5Q4Uh8io4OWCLiEf/217DXwqNgiTS/IOuza99VZE= -k8s.io/client-go v0.33.1 h1:ZZV/Ks2g92cyxWkRRnfUDsnhNn28eFpt26aGc8KbXF4= -k8s.io/client-go v0.33.1/go.mod h1:JAsUrl1ArO7uRVFWfcj6kOomSlCv+JpvIsp6usAGefA= +k8s.io/api v0.33.2 h1:YgwIS5jKfA+BZg//OQhkJNIfie/kmRsO0BmNaVSimvY= +k8s.io/api v0.33.2/go.mod h1:fhrbphQJSM2cXzCWgqU29xLDuks4mu7ti9vveEnpSXs= +k8s.io/apiextensions-apiserver v0.33.2 h1:6gnkIbngnaUflR3XwE1mCefN3YS8yTD631JXQhsU6M8= +k8s.io/apiextensions-apiserver v0.33.2/go.mod h1:IvVanieYsEHJImTKXGP6XCOjTwv2LUMos0YWc9O+QP8= +k8s.io/apimachinery v0.33.2 h1:IHFVhqg59mb8PJWTLi8m1mAoepkUNYmptHsV+Z1m5jY= +k8s.io/apimachinery v0.33.2/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= +k8s.io/cli-runtime v0.33.2 h1:koNYQKSDdq5AExa/RDudXMhhtFasEg48KLS2KSAU74Y= +k8s.io/cli-runtime v0.33.2/go.mod h1:gnhsAWpovqf1Zj5YRRBBU7PFsRc6NkEkwYNQE+mXL88= +k8s.io/client-go v0.33.2 h1:z8CIcc0P581x/J1ZYf4CNzRKxRvQAwoAolYPbtQes+E= +k8s.io/client-go v0.33.2/go.mod h1:9mCgT4wROvL948w6f6ArJNb7yQd7QsvqavDeZHvNmHo= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= From e224fe22a60ec8d807a92cf3860473de1799b509 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 2 Jul 2025 18:01:00 +0200 Subject: [PATCH 675/836] fix(deps): update module github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring to v0.83.0 (main) (#7963) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Change | Age | Confidence | |---|---|---|---| | [github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring](https://redirect.github.com/prometheus-operator/prometheus-operator) | `v0.80.1` -> `v0.83.0` | [![age](https://developer.mend.io/api/mc/badges/age/go/github.com%2fprometheus-operator%2fprometheus-operator%2fpkg%2fapis%2fmonitoring/v0.83.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/go/github.com%2fprometheus-operator%2fprometheus-operator%2fpkg%2fapis%2fmonitoring/v0.80.1/v0.83.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | --- ### Release Notes
prometheus-operator/prometheus-operator (github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring) ### [`v0.83.0`](https://redirect.github.com/prometheus-operator/prometheus-operator/releases/tag/v0.83.0): 0.83.0 / 2025-05-30 [Compare Source](https://redirect.github.com/prometheus-operator/prometheus-operator/compare/v0.82.2...v0.83.0) - \[FEATURE] Add `limits` option for Alertmanager silences. [#​7478](https://redirect.github.com/prometheus-operator/prometheus-operator/issues/7478) - \[FEATURE] Add `NoTranslation` as a `translationStrategy` option to OTLP config. [#​7539](https://redirect.github.com/prometheus-operator/prometheus-operator/issues/7539) - \[FEATURE] Add `nameEscapingScheme` field to Prometheus CRDs. [#​7538](https://redirect.github.com/prometheus-operator/prometheus-operator/issues/7538) - \[FEATURE] Add `convertHistogramsToNHCB` field to OTLPConfig. [#​7533](https://redirect.github.com/prometheus-operator/prometheus-operator/issues/7533) - \[FEATURE] Add `convert_classic_histograms_to_nhcb` option to Prometheus global config. [#​7543](https://redirect.github.com/prometheus-operator/prometheus-operator/issues/7543) - \[FEATURE] Add new `MetricNameEscapingScheme` and `MetricNameValidationScheme` parameters to ScrapeConfig. [#​7555](https://redirect.github.com/prometheus-operator/prometheus-operator/issues/7555) - \[ENHANCEMENT] Do not insert sharding relabeling in scrapeConfigs if already present. [#​7421](https://redirect.github.com/prometheus-operator/prometheus-operator/issues/7421) ### [`v0.82.2`](https://redirect.github.com/prometheus-operator/prometheus-operator/releases/tag/v0.82.2): 0.82.2 / 2025-05-12 [Compare Source](https://redirect.github.com/prometheus-operator/prometheus-operator/compare/v0.82.1...v0.82.2) - \[BUGFIX] Fix Alertmanager peer discovery for Alertmanager when using custom service name. [#​7512](https://redirect.github.com/prometheus-operator/prometheus-operator/issues/7512) - \[BUGFIX] Fix parsing `role` field in openstackSDConfigs in ScrapeConfig CRD. [#​7516](https://redirect.github.com/prometheus-operator/prometheus-operator/issues/7516) ### [`v0.82.1`](https://redirect.github.com/prometheus-operator/prometheus-operator/releases/tag/v0.82.1): 0.82.1 / 2025-05-06 [Compare Source](https://redirect.github.com/prometheus-operator/prometheus-operator/compare/v0.82.0...v0.82.1) - \[BUGFIX] Fix ThanosRuler when no remote-write configuration is defined. [#​7498](https://redirect.github.com/prometheus-operator/prometheus-operator/issues/7498) ### [`v0.82.0`](https://redirect.github.com/prometheus-operator/prometheus-operator/releases/tag/v0.82.0): 0.82.0 / 2025-04-17 [Compare Source](https://redirect.github.com/prometheus-operator/prometheus-operator/compare/v0.81.0...v0.82.0) - \[CHANGE] Add more API validations to the ScrapeConfig CRD. [#​7413](https://redirect.github.com/prometheus-operator/prometheus-operator/issues/7413) - \[CHANGE] Remove web console arguments if Prometheus version >= 3. [#​7457](https://redirect.github.com/prometheus-operator/prometheus-operator/issues/7457) - \[FEATURE] Add RemoteWrite support for ThanosRuler CRD. Note that when remote write is configured, Thanos Ruler runs in stateless mode. [#​7444](https://redirect.github.com/prometheus-operator/prometheus-operator/issues/7444) - \[FEATURE] Add `msTeamsV2Configs` receiver to AlertManagerConfig CRD. [#​7464](https://redirect.github.com/prometheus-operator/prometheus-operator/issues/7464) - \[FEATURE] Add `content`, `username` and `avatarURL` fields to `discordConfigs` receiver in AlertmanagerConfig CRD. [#​7307](https://redirect.github.com/prometheus-operator/prometheus-operator/issues/7307) - \[FEATURE] Add `convertClassicHistogramsToNHCB` field to `ServiceMonitor`, `PodMonitor`, `Probe` and `ScrapeConfig` CRDs. [#​7448](https://redirect.github.com/prometheus-operator/prometheus-operator/issues/7448) - \[FEATURE] Add `timeout` field to `webhookConfigs` receiver in AlertManagerConfig CRD. [#​7310](https://redirect.github.com/prometheus-operator/prometheus-operator/issues/7310) - \[FEATURE] Add `additionalArgs` field to AlertManager CRD. [#​7385](https://redirect.github.com/prometheus-operator/prometheus-operator/issues/7385) - \[ENHANCEMENT] Add `maxSamplesPerSend` field to RemoteWrite Metadata Config. [#​7443](https://redirect.github.com/prometheus-operator/prometheus-operator/issues/7443) - \[ENHANCEMENT] Add the `terminationGracePeriodSeconds` to the Alertmanager, Prometheus, PrometheusAgent and ThanosRuler CRDs. [#​7439](https://redirect.github.com/prometheus-operator/prometheus-operator/issues/7439) - \[ENHANCEMENT] Validate `proxyURL` field in AlertManagerConfig CRD. [#​6876](https://redirect.github.com/prometheus-operator/prometheus-operator/issues/6876) - \[BUGFIX] Fix `nameValidationScheme` support for Prometheus and PrometheusAgent CRDs. [#​7414](https://redirect.github.com/prometheus-operator/prometheus-operator/issues/7414) - \[BUGFIX] Support `socks5` scheme in `proxyUrl` field for all CRDs. [#​7460](https://redirect.github.com/prometheus-operator/prometheus-operator/issues/7460) [#​7466](https://redirect.github.com/prometheus-operator/prometheus-operator/issues/7466) ### [`v0.81.0`](https://redirect.github.com/prometheus-operator/prometheus-operator/releases/tag/v0.81.0): 0.81.0 / 2025-03-11 [Compare Source](https://redirect.github.com/prometheus-operator/prometheus-operator/compare/v0.80.1...v0.81.0) - \[CHANGE] Remove the lower-case `hypervisor` and `instance` roles for the OpenStack Service Discovery from the ScrapeConfig CRD (users are required to use `Hypervisor` and `Instance` instead). [#​7370](https://redirect.github.com/prometheus-operator/prometheus-operator/issues/7370) - \[FEATURE] Add `serviceName` field to the ThanosRuler and Alertmanager CRDs. [#​7325](https://redirect.github.com/prometheus-operator/prometheus-operator/issues/7325) - \[FEATURE] Add `shardRetentionPolicy` field to the Prometheus CRD (it requires the `PrometheusShardRetentionPolicy` feature gate). [#​7274](https://redirect.github.com/prometheus-operator/prometheus-operator/issues/7274) - \[FEATURE] Add support for the cluster mTLS configuration to the Alertmanager CRD. [#​7149](https://redirect.github.com/prometheus-operator/prometheus-operator/issues/7149) - \[FEATURE] Add `LoadBalancer` role for the OpenStack Service Discovery to the ScrapeConfig CRD. [#​7356](https://redirect.github.com/prometheus-operator/prometheus-operator/issues/7356) - \[ENHANCEMENT] Add `enableServiceLinks` field to the Alertmanager, Prometheus, PrometheusAgent and ThanosRuler CRDs. [#​7384](https://redirect.github.com/prometheus-operator/prometheus-operator/issues/7384) - \[BUGFIX] Fix Alertmanager configuration with PushOver duration fields. [#​7249](https://redirect.github.com/prometheus-operator/prometheus-operator/issues/7249)
--- ### Configuration 📅 **Schedule**: Branch creation - At any time (no schedule defined), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Never, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/cloudnative-pg/cloudnative-pg). Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- go.mod | 14 +++++++------- go.sum | 28 ++++++++++++++-------------- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/go.mod b/go.mod index 522d8c0cbf..68f441274f 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( github.com/mitchellh/go-ps v1.0.0 github.com/onsi/ginkgo/v2 v2.23.4 github.com/onsi/gomega v1.37.0 - github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.80.1 + github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.83.0 github.com/prometheus/client_golang v1.22.0 github.com/robfig/cron v1.2.0 github.com/sethvargo/go-password v0.3.1 @@ -56,7 +56,7 @@ require ( github.com/emicklei/go-restful/v3 v3.12.1 // indirect github.com/fatih/color v1.18.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/fxamacker/cbor/v2 v2.8.0 // indirect github.com/go-errors/errors v1.5.1 // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect @@ -98,12 +98,12 @@ require ( go.uber.org/automaxprocs v1.6.0 // indirect go.yaml.in/yaml/v2 v2.4.2 // indirect go.yaml.in/yaml/v3 v3.0.3 // indirect - golang.org/x/crypto v0.37.0 // indirect - golang.org/x/net v0.38.0 // indirect + golang.org/x/crypto v0.38.0 // indirect + golang.org/x/net v0.40.0 // indirect golang.org/x/oauth2 v0.28.0 // indirect - golang.org/x/sync v0.13.0 // indirect + golang.org/x/sync v0.14.0 // indirect golang.org/x/sys v0.33.0 // indirect - golang.org/x/text v0.24.0 // indirect + golang.org/x/text v0.25.0 // indirect golang.org/x/time v0.9.0 // indirect golang.org/x/tools v0.31.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect @@ -117,5 +117,5 @@ require ( sigs.k8s.io/kustomize/api v0.19.0 // indirect sigs.k8s.io/kustomize/kyaml v0.19.0 // indirect sigs.k8s.io/randfill v1.0.0 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.7.0 // indirect ) diff --git a/go.sum b/go.sum index 78748648a2..b7247eb4e0 100644 --- a/go.sum +++ b/go.sum @@ -41,8 +41,8 @@ github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= -github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/fxamacker/cbor/v2 v2.8.0 h1:fFtUGXUzXPHTIUdne5+zzMPTfffl3RD5qYnkY40vtxU= +github.com/fxamacker/cbor/v2 v2.8.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= @@ -155,8 +155,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.80.1 h1:DP+PUNVOc+Bkft8a4QunLzaZ0RspWuD3tBbcPHr2PeE= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.80.1/go.mod h1:6x4x0t9BP35g4XcjkHE9EB3RxhyfxpdpmZKd/Qyk8+M= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.83.0 h1:j9Ce3W6X6Tzi0QnSap+YzGwpqJLJGP/7xV6P9f86jjM= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.83.0/go.mod h1:sSxwdmprUfmRfTknPc4KIjUd2ZIc/kirw4UdXNhOauM= github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= @@ -225,23 +225,23 @@ go.yaml.in/yaml/v3 v3.0.3/go.mod h1:tBHosrYAkRZjRAOREWbDnBXUf08JOwYq++0QNwQiWzI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= -golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= +golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8= +golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= -golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= +golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc= golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= -golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= +golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -253,8 +253,8 @@ golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= -golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= +golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= +golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -313,8 +313,8 @@ sigs.k8s.io/kustomize/kyaml v0.19.0/go.mod h1:FeKD5jEOH+FbZPpqUghBP8mrLjJ3+zD3/r sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= -sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= -sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= +sigs.k8s.io/structured-merge-diff/v4 v4.7.0 h1:qPeWmscJcXP0snki5IYF79Z8xrl8ETFxgMd7wez1XkI= +sigs.k8s.io/structured-merge-diff/v4 v4.7.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= sigs.k8s.io/yaml v1.5.0 h1:M10b2U7aEUY6hRtU870n2VTPgR5RZiL/I6Lcc2F4NUQ= sigs.k8s.io/yaml v1.5.0/go.mod h1:wZs27Rbxoai4C0f8/9urLZtZtF3avA3gKvGyPdDqTO4= From bb1c9dbf2d857f061144a2209fac6392cf3f138b Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 3 Jul 2025 09:55:24 +0200 Subject: [PATCH 676/836] fix(deps): update module github.com/cloudnative-pg/machinery to v0.3.0 (main) (#7968) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Change | Age | Confidence | |---|---|---|---| | [github.com/cloudnative-pg/machinery](https://redirect.github.com/cloudnative-pg/machinery) | `v0.2.0` -> `v0.3.0` | [![age](https://developer.mend.io/api/mc/badges/age/go/github.com%2fcloudnative-pg%2fmachinery/v0.3.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/go/github.com%2fcloudnative-pg%2fmachinery/v0.2.0/v0.3.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | --- ### Release Notes
cloudnative-pg/machinery (github.com/cloudnative-pg/machinery) ### [`v0.3.0`](https://redirect.github.com/cloudnative-pg/machinery/releases/tag/v0.3.0) [Compare Source](https://redirect.github.com/cloudnative-pg/machinery/compare/v0.2.0...v0.3.0) ##### Features - add `hash` pkg ([#​128](https://redirect.github.com/cloudnative-pg/machinery/issues/128)) ([db1f40a](https://redirect.github.com/cloudnative-pg/machinery/commit/db1f40a60719c93c863f0b47520efc4c6819ed48)) ##### Bug Fixes - **deps:** update all non-major go dependencies ([#​103](https://redirect.github.com/cloudnative-pg/machinery/issues/103)) ([c12fbef](https://redirect.github.com/cloudnative-pg/machinery/commit/c12fbef05a8b605d8a2ad2437469ab162ca889e7)) - **deps:** update module github.com/go-logr/logr to v1.4.3 ([#​125](https://redirect.github.com/cloudnative-pg/machinery/issues/125)) ([10b85e8](https://redirect.github.com/cloudnative-pg/machinery/commit/10b85e80f4fa0e1ac414f4ad9dfd81fa9a22d945)) - **deps:** update module golang.org/x/sys to v0.33.0 ([#​116](https://redirect.github.com/cloudnative-pg/machinery/issues/116)) ([092d91d](https://redirect.github.com/cloudnative-pg/machinery/commit/092d91d775f993a632e8a00d57e554597fc1bc00)) - **deps:** update module k8s.io/apimachinery to v0.33.2 ([#​111](https://redirect.github.com/cloudnative-pg/machinery/issues/111)) ([872299c](https://redirect.github.com/cloudnative-pg/machinery/commit/872299c2f294deafd71a792bc59886989047a1be)) - **deps:** update module sigs.k8s.io/controller-runtime to v0.21.0 ([#​122](https://redirect.github.com/cloudnative-pg/machinery/issues/122)) ([ed57704](https://redirect.github.com/cloudnative-pg/machinery/commit/ed577046d562fdbbbb9de172aeee100f7b4e897f))
--- ### Configuration 📅 **Schedule**: Branch creation - At any time (no schedule defined), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Never, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/cloudnative-pg/cloudnative-pg). Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 68f441274f..f64cb5fc69 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/cheynewallace/tabby v1.1.1 github.com/cloudnative-pg/barman-cloud v0.3.1 github.com/cloudnative-pg/cnpg-i v0.2.2-0.20250609075931-7cb57628933b - github.com/cloudnative-pg/machinery v0.2.0 + github.com/cloudnative-pg/machinery v0.3.0 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/evanphx/json-patch/v5 v5.9.11 github.com/go-logr/logr v1.4.3 diff --git a/go.sum b/go.sum index b7247eb4e0..372953941f 100644 --- a/go.sum +++ b/go.sum @@ -22,8 +22,8 @@ github.com/cloudnative-pg/barman-cloud v0.3.1 h1:kzkY77k2lN/caoyh7ibXDSZjJeSJTNv github.com/cloudnative-pg/barman-cloud v0.3.1/go.mod h1:4HL3AjY9oEl2Ed0HSkyvTZEQPhwyFOaAnuCz9lfVeYQ= github.com/cloudnative-pg/cnpg-i v0.2.2-0.20250609075931-7cb57628933b h1:B7Ugp5epMIDNPe0bIOcqpErKkiQfuCM3nXoGh4GiPHM= github.com/cloudnative-pg/cnpg-i v0.2.2-0.20250609075931-7cb57628933b/go.mod h1:FUA8ELMnqHpA2MIOeG425sX7D+u3m8SD/oFd1CnXSEw= -github.com/cloudnative-pg/machinery v0.2.0 h1:x8OAwxdeL/6wkbxqorz+nX6UovTyx7/TBeCfiRebR2o= -github.com/cloudnative-pg/machinery v0.2.0/go.mod h1:Kg8W8Tb/1UFGGtw3hR8S5SytSWddlHaCnJSgBo4x/nc= +github.com/cloudnative-pg/machinery v0.3.0 h1:t1DzXGeK3RUYXS5KWIdIk30oh4EmwxZ+6sWM4wJDBac= +github.com/cloudnative-pg/machinery v0.3.0/go.mod h1:6NhajP3JlioeecYceVuOBLD2lfsJty8qSZsFpSb/vmA= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= From b21f768be78f18cb79790d69363a3af60a79e55b Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Thu, 3 Jul 2025 13:14:17 +0200 Subject: [PATCH 677/836] fix(cluster,backup): ensure Backup status is set after failure (#7898) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch ensures that the status stanza of the Backup resource is set after a backup has failed, including the `method` field. As a consequence of this, the `LastBackupSucceeded` condition and the `lastFailedBackup` field will be properly set on the Cluster resource. Closes #7892 Signed-off-by: Armando Ruocco Signed-off-by: Niccolò Fei Signed-off-by: Leonardo Cecchi Co-authored-by: Niccolò Fei Co-authored-by: Leonardo Cecchi --- internal/controller/backup_controller.go | 64 +++------ pkg/management/postgres/backup.go | 28 +--- .../postgres/webserver/plugin_backup.go | 27 +--- pkg/resources/status/backup.go | 134 ++++++++++++++++++ pkg/resources/status/backup_test.go | 83 +++++++++++ pkg/resources/status/suite_test.go | 32 +++++ 6 files changed, 267 insertions(+), 101 deletions(-) create mode 100644 pkg/resources/status/backup.go create mode 100644 pkg/resources/status/backup_test.go create mode 100644 pkg/resources/status/suite_test.go diff --git a/internal/controller/backup_controller.go b/internal/controller/backup_controller.go index d228094922..f2316a10d1 100644 --- a/internal/controller/backup_controller.go +++ b/internal/controller/backup_controller.go @@ -132,7 +132,8 @@ func (r *BackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr return ctrl.Result{RequeueAfter: 30 * time.Second}, nil } - tryFlagBackupAsFailed(ctx, r.Client, &backup, fmt.Errorf("while getting cluster %s: %w", clusterName, err)) + _ = resourcestatus.FlagBackupAsFailed(ctx, r.Client, &backup, nil, + fmt.Errorf("while getting cluster %s: %w", clusterName, err)) r.Recorder.Eventf(&backup, "Warning", "FindingCluster", "Error getting cluster %v, will not retry: %s", clusterName, err.Error()) return ctrl.Result{}, nil @@ -142,7 +143,7 @@ func (r *BackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr message := "cannot proceed with the backup as the cluster has no plugin configured" contextLogger.Warning(message) r.Recorder.Event(&backup, "Warning", "ClusterHasNoBackupExecutorPlugin", message) - tryFlagBackupAsFailed(ctx, r.Client, &backup, errors.New(message)) + _ = resourcestatus.FlagBackupAsFailed(ctx, r.Client, &backup, &cluster, errors.New(message)) return ctrl.Result{}, nil } @@ -150,7 +151,7 @@ func (r *BackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr message := "cannot proceed with the backup as the cluster has no backup section" contextLogger.Warning(message) r.Recorder.Event(&backup, "Warning", "ClusterHasBackupConfigured", message) - tryFlagBackupAsFailed(ctx, r.Client, &backup, errors.New(message)) + _ = resourcestatus.FlagBackupAsFailed(ctx, r.Client, &backup, &cluster, errors.New(message)) return ctrl.Result{}, nil } @@ -180,7 +181,7 @@ func (r *BackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr message := "cannot proceed with the backup as the Kubernetes cluster has no VolumeSnapshot support" contextLogger.Warning(message) r.Recorder.Event(&backup, "Warning", "ClusterHasNoVolumeSnapshotCRD", message) - tryFlagBackupAsFailed(ctx, r.Client, &backup, errors.New(message)) + _ = resourcestatus.FlagBackupAsFailed(ctx, r.Client, &backup, &cluster, errors.New(message)) return ctrl.Result{}, nil } @@ -204,7 +205,7 @@ func (r *BackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr if backup.Spec.Method == apiv1.BackupMethodBarmanObjectStore { if cluster.Spec.Backup == nil || cluster.Spec.Backup.BarmanObjectStore == nil { - tryFlagBackupAsFailed(ctx, r.Client, &backup, + _ = resourcestatus.FlagBackupAsFailed(ctx, r.Client, &backup, &cluster, errors.New("no barmanObjectStore section defined on the target cluster")) return ctrl.Result{}, nil } @@ -247,7 +248,7 @@ func (r *BackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr return ctrl.Result{RequeueAfter: 30 * time.Second}, nil } if err != nil { - tryFlagBackupAsFailed(ctx, r.Client, &backup, fmt.Errorf("while getting pod: %w", err)) + _ = resourcestatus.FlagBackupAsFailed(ctx, r.Client, &backup, &cluster, fmt.Errorf("while getting pod: %w", err)) r.Recorder.Eventf(&backup, "Warning", "FindingPod", "Error getting target pod: %s", cluster.Status.TargetPrimary) return ctrl.Result{}, nil @@ -272,12 +273,13 @@ func (r *BackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr // This backup can be started if err := startInstanceManagerBackup(ctx, r.Client, &backup, pod, &cluster); err != nil { r.Recorder.Eventf(&backup, "Warning", "Error", "Backup exit with error %v", err) - tryFlagBackupAsFailed(ctx, r.Client, &backup, fmt.Errorf("encountered an error while taking the backup: %w", err)) + _ = resourcestatus.FlagBackupAsFailed(ctx, r.Client, &backup, &cluster, + fmt.Errorf("encountered an error while taking the backup: %w", err)) return ctrl.Result{}, nil } case apiv1.BackupMethodVolumeSnapshot: if cluster.Spec.Backup == nil || cluster.Spec.Backup.VolumeSnapshot == nil { - tryFlagBackupAsFailed(ctx, r.Client, &backup, + _ = resourcestatus.FlagBackupAsFailed(ctx, r.Client, &backup, &cluster, errors.New("no volumeSnapshot section defined on the target cluster")) return ctrl.Result{}, nil } @@ -403,7 +405,7 @@ func (r *BackupReconciler) reconcileSnapshotBackup( return &ctrl.Result{RequeueAfter: 30 * time.Second}, nil } if err != nil { - tryFlagBackupAsFailed(ctx, r.Client, backup, fmt.Errorf("while getting pod: %w", err)) + _ = resourcestatus.FlagBackupAsFailed(ctx, r.Client, backup, cluster, fmt.Errorf("while getting pod: %w", err)) r.Recorder.Eventf(backup, "Warning", "FindingPod", "Error getting target pod: %s", cluster.Status.TargetPrimary) return &ctrl.Result{}, nil @@ -472,18 +474,9 @@ func (r *BackupReconciler) reconcileSnapshotBackup( // Volume Snapshot errors are not retryable, we need to set this backup as failed // and un-fence the Pod contextLogger.Error(err, "while executing snapshot backup") - // Update backup status in cluster conditions - if errCond := resourcestatus.PatchConditionsWithOptimisticLock( - ctx, - r.Client, - cluster, - apiv1.BuildClusterBackupFailedCondition(err), - ); errCond != nil { - contextLogger.Error(errCond, "Error while updating backup condition (backup snapshot failed)") - } - r.Recorder.Eventf(backup, "Warning", "Error", "snapshot backup failed: %v", err) - tryFlagBackupAsFailed(ctx, r.Client, backup, fmt.Errorf("can't execute snapshot backup: %w", err)) + _ = resourcestatus.FlagBackupAsFailed(ctx, r.Client, backup, cluster, + fmt.Errorf("can't execute snapshot backup: %w", err)) return nil, volumesnapshot.EnsurePodIsUnfenced(ctx, r.Client, r.Recorder, cluster, backup, targetPod) } @@ -666,20 +659,10 @@ func startInstanceManagerBackup( }) if err != nil { log.FromContext(ctx).Error(err, "executing backup", "stdout", stdout, "stderr", stderr) - status.SetAsFailed(fmt.Errorf("can't execute backup: %w", err)) - status.CommandError = stderr - status.CommandError = stdout - - // Update backup status in cluster conditions - if errCond := resourcestatus.PatchConditionsWithOptimisticLock( - ctx, - client, - cluster, - apiv1.BuildClusterBackupFailedCondition(err), - ); errCond != nil { - log.FromContext(ctx).Error(errCond, "Error while updating backup condition (backup failed)") + setCommandErr := func(backup *apiv1.Backup) { + backup.Status.CommandError = fmt.Sprintf("with stderr: %s, with stdout: %s", stderr, stdout) } - return postgres.PatchBackupStatusAndRetry(ctx, client, backup) + return resourcestatus.FlagBackupAsFailed(ctx, client, backup, cluster, err, setCommandErr) } return nil @@ -724,18 +707,3 @@ func (r *BackupReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manage controllerBuilder = controllerBuilder.WithOptions(controller.Options{MaxConcurrentReconciles: 1}) return controllerBuilder.Complete(r) } - -func tryFlagBackupAsFailed( - ctx context.Context, - cli client.Client, - backup *apiv1.Backup, - err error, -) { - contextLogger := log.FromContext(ctx) - origBackup := backup.DeepCopy() - backup.Status.SetAsFailed(err) - - if err := cli.Status().Patch(ctx, backup, client.MergeFrom(origBackup)); err != nil { - contextLogger.Error(err, "while flagging backup as failed") - } -} diff --git a/pkg/management/postgres/backup.go b/pkg/management/postgres/backup.go index 46648c4abb..292d670e41 100644 --- a/pkg/management/postgres/backup.go +++ b/pkg/management/postgres/backup.go @@ -35,7 +35,6 @@ import ( "github.com/cloudnative-pg/machinery/pkg/log" pgTime "github.com/cloudnative-pg/machinery/pkg/postgres/time" "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" @@ -155,36 +154,11 @@ func (b *BackupCommand) run(ctx context.Context) { ) if err := b.takeBackup(ctx); err != nil { - backupStatus := b.Backup.GetStatus() - // record the failure b.Log.Error(err, "Backup failed") b.Recorder.Event(b.Backup, "Normal", "Failed", "Backup failed") - // update backup status as failed - backupStatus.SetAsFailed(err) - if err := PatchBackupStatusAndRetry(ctx, b.Client, b.Backup); err != nil { - b.Log.Error(err, "Can't mark backup as failed") - // We do not terminate here because we still want to do the maintenance - // activity on the backups and to set the condition on the cluster. - } - - // add backup failed condition to the cluster - if failErr := b.retryWithRefreshedCluster(ctx, func() error { - return status.PatchWithOptimisticLock( - ctx, - b.Client, - b.Cluster, - func(cluster *apiv1.Cluster) { - meta.SetStatusCondition(&cluster.Status.Conditions, apiv1.BuildClusterBackupFailedCondition(err)) - cluster.Status.LastFailedBackup = pgTime.GetCurrentTimestampWithFormat(time.RFC3339) - }, - ) - }); failErr != nil { - b.Log.Error(failErr, "while setting cluster condition for failed backup") - // We do not terminate here because it's more important to properly handle - // the backup maintenance activity than putting a condition in the cluster - } + _ = status.FlagBackupAsFailed(ctx, b.Client, b.Backup, b.Cluster, err) } b.backupMaintenance(ctx) diff --git a/pkg/management/postgres/webserver/plugin_backup.go b/pkg/management/postgres/webserver/plugin_backup.go index 98e0415fda..a51d25db89 100644 --- a/pkg/management/postgres/webserver/plugin_backup.go +++ b/pkg/management/postgres/webserver/plugin_backup.go @@ -22,12 +22,9 @@ package webserver import ( "context" "fmt" - "time" "github.com/cloudnative-pg/machinery/pkg/log" - pgTime "github.com/cloudnative-pg/machinery/pkg/postgres/time" "github.com/cloudnative-pg/machinery/pkg/stringset" - "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" "k8s.io/utils/ptr" @@ -166,33 +163,11 @@ func (b *PluginBackupCommand) invokeStart(ctx context.Context) { func (b *PluginBackupCommand) markBackupAsFailed(ctx context.Context, failure error) { contextLogger := log.FromContext(ctx) - backupStatus := b.Backup.GetStatus() - // record the failure contextLogger.Error(failure, "Backup failed") b.Recorder.Event(b.Backup, "Normal", "Failed", "Backup failed") - // update backup status as failed - backupStatus.SetAsFailed(failure) - if err := postgres.PatchBackupStatusAndRetry(ctx, b.Client, b.Backup); err != nil { - contextLogger.Error(err, "Can't mark backup as failed") - // We do not terminate here because we still want to set the condition on the cluster. - } - - // add backup failed condition to the cluster - if failErr := b.retryWithRefreshedCluster(ctx, func() error { - return status.PatchWithOptimisticLock( - ctx, - b.Client, - b.Cluster, - func(cluster *apiv1.Cluster) { - meta.SetStatusCondition(&cluster.Status.Conditions, apiv1.BuildClusterBackupFailedCondition(failure)) - cluster.Status.LastFailedBackup = pgTime.GetCurrentTimestampWithFormat(time.RFC3339) - }, - ) - }); failErr != nil { - contextLogger.Error(failErr, "while setting cluster condition for failed backup") - } + _ = status.FlagBackupAsFailed(ctx, b.Client, b.Backup, b.Cluster, failure) } func (b *PluginBackupCommand) retryWithRefreshedCluster( diff --git a/pkg/resources/status/backup.go b/pkg/resources/status/backup.go new file mode 100644 index 0000000000..8850e90737 --- /dev/null +++ b/pkg/resources/status/backup.go @@ -0,0 +1,134 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package status + +import ( + "context" + "fmt" + "time" + + "github.com/cloudnative-pg/machinery/pkg/log" + pgTime "github.com/cloudnative-pg/machinery/pkg/postgres/time" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/client" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" +) + +// BackupTransaction is a function that modifies a Backup object. +type BackupTransaction func(*apiv1.Backup) + +type flagBackupErrors struct { + clusterStatusErr error + backupErr error + clusterConditionErr error +} + +func (f flagBackupErrors) Error() string { + var message string + if f.clusterStatusErr != nil { + message += fmt.Sprintf("error patching cluster status: %v; ", f.clusterStatusErr) + } + if f.backupErr != nil { + message += fmt.Sprintf("error patching backup status: %v; ", f.backupErr) + } + if f.clusterConditionErr != nil { + message += fmt.Sprintf("error patching cluster conditions: %v; ", f.clusterConditionErr) + } + + return message +} + +// toError returns the errors encountered or nil +func (f flagBackupErrors) toError() error { + if f.clusterStatusErr != nil || f.backupErr != nil || f.clusterConditionErr != nil { + return f + } + return nil +} + +// FlagBackupAsFailed updates the status of a Backup object to indicate that it has failed. +func FlagBackupAsFailed( + ctx context.Context, + cli client.Client, + backup *apiv1.Backup, + cluster *apiv1.Cluster, + err error, + transactions ...BackupTransaction, +) error { + contextLogger := log.FromContext(ctx) + + var flagErr flagBackupErrors + + if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + var livingBackup apiv1.Backup + if err := cli.Get(ctx, client.ObjectKeyFromObject(backup), &livingBackup); err != nil { + contextLogger.Error(err, "failed to get backup") + return err + } + origBackup := livingBackup.DeepCopy() + livingBackup.Status.SetAsFailed(err) + livingBackup.Status.Method = livingBackup.Spec.Method + for _, transaction := range transactions { + transaction(&livingBackup) + } + + err := cli.Status().Patch(ctx, &livingBackup, client.MergeFrom(origBackup)) + if err != nil { + contextLogger.Error(err, "while patching backup status") + return err + } + // we mutate the original object + backup.Status = livingBackup.Status + + return nil + }); err != nil { + contextLogger.Error(err, "while flagging backup as failed") + flagErr.backupErr = err + } + + if cluster == nil { + return flagErr.toError() + } + + if err := PatchWithOptimisticLock( + ctx, + cli, + cluster, + func(cluster *apiv1.Cluster) { + cluster.Status.LastFailedBackup = pgTime.GetCurrentTimestampWithFormat(time.RFC3339) + }, + ); err != nil { + contextLogger.Error(err, "while patching cluster status with last failed backup") + flagErr.clusterStatusErr = err + } + + if err := PatchConditionsWithOptimisticLock( + ctx, + cli, + cluster, + apiv1.BuildClusterBackupFailedCondition(err), + ); err != nil { + contextLogger.Error(err, "while patching backup condition in the cluster status (backup failed)") + flagErr.clusterConditionErr = err + } + + return flagErr.toError() +} diff --git a/pkg/resources/status/backup_test.go b/pkg/resources/status/backup_test.go new file mode 100644 index 0000000000..3727b4835a --- /dev/null +++ b/pkg/resources/status/backup_test.go @@ -0,0 +1,83 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package status + +import ( + "errors" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + schemeBuilder "github.com/cloudnative-pg/cloudnative-pg/internal/scheme" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("FlagBackupAsFailed", func() { + scheme := schemeBuilder.BuildWithAllKnownScheme() + k8sClient := fake.NewClientBuilder().WithScheme(scheme). + WithStatusSubresource(&apiv1.Cluster{}, &apiv1.Backup{}). + Build() + + It("selects the new target primary right away", func(ctx SpecContext) { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-example", + Namespace: "default", + }, + } + + backup := &apiv1.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Name: cluster.Name, + Namespace: cluster.Namespace, + }, + Spec: apiv1.BackupSpec{ + Cluster: apiv1.LocalObjectReference{ + Name: cluster.Name, + }, + }, + Status: apiv1.BackupStatus{ + Phase: apiv1.BackupPhaseRunning, + }, + } + Expect(k8sClient.Create(ctx, cluster)).To(Succeed()) + Expect(k8sClient.Create(ctx, backup)).To(Succeed()) + + err := FlagBackupAsFailed(ctx, k8sClient, backup, cluster, errors.New("my sample error")) + Expect(err).NotTo(HaveOccurred()) + + // Backup status assertions + Expect(backup.Status.Phase).To(BeEquivalentTo(apiv1.BackupPhaseFailed)) + Expect(backup.Status.Error).To(BeEquivalentTo("my sample error")) + + // Cluster status assertions + Expect(cluster.Status.LastFailedBackup).ToNot(BeEmpty()) + for _, condition := range cluster.Status.Conditions { + if condition.Type == string(apiv1.ConditionBackup) { + Expect(condition.Status).To(BeEquivalentTo(metav1.ConditionFalse)) + Expect(condition.Reason).To(BeEquivalentTo(string(apiv1.ConditionReasonLastBackupFailed))) + Expect(condition.Message).To(BeEquivalentTo("my sample error")) + } + } + }) +}) diff --git a/pkg/resources/status/suite_test.go b/pkg/resources/status/suite_test.go new file mode 100644 index 0000000000..7ef3812e04 --- /dev/null +++ b/pkg/resources/status/suite_test.go @@ -0,0 +1,32 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package status + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestConfiguration(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Internal Configuration Test Suite") +} From 9eecc6db1c01b5ef3cef829d1bbe307a2ce1b51f Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Fri, 4 Jul 2025 10:18:23 +0200 Subject: [PATCH 678/836] fix(docs): remove version 13 from cluster-example.yaml (#7978) Signed-off-by: Gabriele Bartolini --- docs/src/samples/cluster-example.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/src/samples/cluster-example.yaml b/docs/src/samples/cluster-example.yaml index 924f62bf25..2ca0d8d71f 100644 --- a/docs/src/samples/cluster-example.yaml +++ b/docs/src/samples/cluster-example.yaml @@ -4,7 +4,6 @@ metadata: name: cluster-example spec: instances: 3 - imageName: ghcr.io/cloudnative-pg/postgresql:13 storage: size: 1Gi From b6b00d7fdbfeb50df665c66ed7f4c59327808faa Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Fri, 4 Jul 2025 14:55:35 +0200 Subject: [PATCH 679/836] feat(spec,probes): primary isolation check in the liveness probe (#7845) This patch promotes the liveness pinger feature from its previous experimental status (using the `alpha.cnpg.io/livenessPinger` annotation) to a stable, supported configuration within the `Cluster` resource, located in the `.spec.probes.liveness.isolationCheck` section. The feature is now called "isolation check" and is enabled by default in the liveness probe. The values for `requestTimeout` and `connectionTimeout` are set to 1 second by default. Closes #7551 ### Release notes ``` Primary isolation checks for the liveness probe: the liveness pinger, previously configured with the `alpha.cnpg.io/livenessPinger` annotation, is now included in the `Cluster` specification under the `isolationCheck` stanza and is enabled by default, enforcing the shutdown of an isolated primary. ``` --------- Signed-off-by: Armando Ruocco Signed-off-by: Leonardo Cecchi Signed-off-by: Marco Nenciarini Signed-off-by: Gabriele Bartolini Co-authored-by: Leonardo Cecchi Co-authored-by: Marco Nenciarini Co-authored-by: Gabriele Bartolini --- .wordlist-en-custom.txt | 4 + api/v1/cluster_defaults.go | 87 ++++++++++ api/v1/cluster_defaults_test.go | 156 ++++++++++++++++++ api/v1/cluster_types.go | 35 +++- api/v1/zz_generated.deepcopy.go | 43 ++++- .../bases/postgresql.cnpg.io_clusters.yaml | 24 +++ docs/src/cloudnative-pg.v1.md | 80 ++++++++- docs/src/installation_upgrade.md | 25 +++ docs/src/instance_manager.md | 37 ++--- internal/webhook/v1/cluster_webhook.go | 3 +- .../postgres/webserver/probes/liveness.go | 23 ++- .../postgres/webserver/probes/pinger.go | 78 +-------- pkg/management/postgres/webserver/remote.go | 2 +- tests/e2e/probes_test.go | 4 +- 14 files changed, 498 insertions(+), 103 deletions(-) diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index db131a2293..1ca2965c38 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -208,6 +208,7 @@ InfoSec Innocenti InstanceID InstanceReportedState +IsolationCheckConfiguration Istio Istio's JSON @@ -698,6 +699,7 @@ conn connectionLimit connectionParameters connectionString +connectionTimeout conninfo containerImage containerPort @@ -910,6 +912,7 @@ ips isPrimary isTemplate isWALArchiver +isolationCheck issuecomment italy jdbc @@ -1193,6 +1196,7 @@ repmgr reportNonRedacted reportRedacted req +requestTimeout requiredDuringSchedulingIgnoredDuringExecution resizeInUseVolumes resizingPVC diff --git a/api/v1/cluster_defaults.go b/api/v1/cluster_defaults.go index d026d70db0..5a5607d8dd 100644 --- a/api/v1/cluster_defaults.go +++ b/api/v1/cluster_defaults.go @@ -20,6 +20,9 @@ SPDX-License-Identifier: Apache-2.0 package v1 import ( + "encoding/json" + "fmt" + "github.com/cloudnative-pg/machinery/pkg/log" "github.com/cloudnative-pg/machinery/pkg/stringset" "k8s.io/utils/ptr" @@ -138,6 +141,7 @@ func (r *Cluster) setDefaults(preserveUserSettings bool) { } r.setDefaultPlugins(configuration.Current) + r.setProbes() } func (r *Cluster) setDefaultPlugins(config *configuration.Data) { @@ -269,3 +273,86 @@ func (r *Cluster) defaultPgBaseBackup() { r.Spec.Bootstrap.PgBaseBackup.Owner = r.Spec.Bootstrap.PgBaseBackup.Database } } + +const ( + // defaultRequestTimeout is the default value of the request timeout + defaultRequestTimeout = 1000 + + // defaultConnectionTimeout is the default value of the connection timeout + defaultConnectionTimeout = 1000 +) + +func (r *Cluster) setProbes() { + if r.Spec.Probes == nil { + r.Spec.Probes = &ProbesConfiguration{} + } + + if r.Spec.Probes.Liveness == nil { + r.Spec.Probes.Liveness = &LivenessProbe{} + } + + // we don't override the isolation check if it is already set + if r.Spec.Probes.Liveness.IsolationCheck != nil { + return + } + + // STEP 1: check if the alpha annotation is present, in that case convert it to spec + r.tryConvertAlphaLivenessPinger() + + if r.Spec.Probes.Liveness.IsolationCheck != nil { + return + } + + // STEP 2: set defaults. + r.Spec.Probes.Liveness.IsolationCheck = &IsolationCheckConfiguration{ + Enabled: ptr.To(true), + RequestTimeout: defaultRequestTimeout, + ConnectionTimeout: defaultConnectionTimeout, + } +} + +func (r *Cluster) tryConvertAlphaLivenessPinger() { + if _, ok := r.Annotations[utils.LivenessPingerAnnotationName]; !ok { + return + } + v, err := NewLivenessPingerConfigFromAnnotations(r.Annotations) + if err != nil || v == nil { + // the error will be raised by the validation webhook + return + } + + r.Spec.Probes.Liveness.IsolationCheck = &IsolationCheckConfiguration{ + Enabled: v.Enabled, + RequestTimeout: v.RequestTimeout, + ConnectionTimeout: v.ConnectionTimeout, + } +} + +// NewLivenessPingerConfigFromAnnotations creates a new pinger configuration from the annotations +// in the cluster definition +func NewLivenessPingerConfigFromAnnotations( + annotations map[string]string, +) (*IsolationCheckConfiguration, error) { + v, ok := annotations[utils.LivenessPingerAnnotationName] + if !ok { + return nil, nil + } + + var cfg IsolationCheckConfiguration + if err := json.Unmarshal([]byte(v), &cfg); err != nil { + return nil, fmt.Errorf("while unmarshalling pinger config: %w", err) + } + + if cfg.Enabled == nil { + return nil, fmt.Errorf("pinger config is missing the enabled field") + } + + if cfg.RequestTimeout == 0 { + cfg.RequestTimeout = defaultRequestTimeout + } + if cfg.ConnectionTimeout == 0 { + cfg.ConnectionTimeout = defaultConnectionTimeout + } + + return &cfg, nil +} diff --git a/api/v1/cluster_defaults_test.go b/api/v1/cluster_defaults_test.go index 4250ef8bd3..608e994003 100644 --- a/api/v1/cluster_defaults_test.go +++ b/api/v1/cluster_defaults_test.go @@ -20,9 +20,11 @@ SPDX-License-Identifier: Apache-2.0 package v1 import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" + "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -359,3 +361,157 @@ var _ = Describe("default dataDurability", func() { Expect(cluster.Spec.PostgresConfiguration.Synchronous.DataDurability).To(Equal(DataDurabilityLevelPreferred)) }) }) + +var _ = Describe("NewLivenessPingerConfigFromAnnotations", func() { + It("returns a nil configuration when annotation is not present", func() { + annotations := map[string]string{} + + config, err := NewLivenessPingerConfigFromAnnotations(annotations) + + Expect(err).ToNot(HaveOccurred()) + Expect(config).To(BeNil()) + }) + + It("returns an error when annotation contains invalid JSON", func() { + annotations := map[string]string{ + utils.LivenessPingerAnnotationName: "{invalid_json", + } + + config, err := NewLivenessPingerConfigFromAnnotations(annotations) + + Expect(err).To(HaveOccurred()) + Expect(config).To(BeNil()) + }) + + It("applies default values when timeouts are not specified", func() { + annotations := map[string]string{ + utils.LivenessPingerAnnotationName: `{"enabled": true}`, + } + + config, err := NewLivenessPingerConfigFromAnnotations(annotations) + + Expect(err).ToNot(HaveOccurred()) + Expect(config).ToNot(BeNil()) + Expect(config.Enabled).To(HaveValue(BeTrue())) + Expect(config.RequestTimeout).To(Equal(1000)) + Expect(config.ConnectionTimeout).To(Equal(1000)) + }) + + It("preserves values when all fields are specified", func() { + annotations := map[string]string{ + utils.LivenessPingerAnnotationName: `{"enabled": true, "requestTimeout": 300, "connectionTimeout": 600}`, + } + + config, err := NewLivenessPingerConfigFromAnnotations(annotations) + + Expect(err).ToNot(HaveOccurred()) + Expect(config).ToNot(BeNil()) + Expect(config.Enabled).To(HaveValue(BeTrue())) + Expect(config.RequestTimeout).To(Equal(300)) + Expect(config.ConnectionTimeout).To(Equal(600)) + }) + + It("correctly sets enabled to false when specified", func() { + annotations := map[string]string{ + utils.LivenessPingerAnnotationName: `{"enabled": false, "requestTimeout": 300, "connectionTimeout": 600}`, + } + + config, err := NewLivenessPingerConfigFromAnnotations(annotations) + + Expect(err).ToNot(HaveOccurred()) + Expect(config).ToNot(BeNil()) + Expect(config.Enabled).To(HaveValue(BeFalse())) + Expect(config.RequestTimeout).To(Equal(300)) + Expect(config.ConnectionTimeout).To(Equal(600)) + }) + + It("correctly handles zero values for timeouts", func() { + annotations := map[string]string{ + utils.LivenessPingerAnnotationName: `{"enabled": true, "requestTimeout": 0, "connectionTimeout": 0}`, + } + + config, err := NewLivenessPingerConfigFromAnnotations(annotations) + + Expect(err).ToNot(HaveOccurred()) + Expect(config).ToNot(BeNil()) + Expect(config.RequestTimeout).To(Equal(1000)) + Expect(config.ConnectionTimeout).To(Equal(1000)) + }) +}) + +var _ = Describe("probe defaults", func() { + It("should set isolationCheck probe to true by default when no probes are specified", func() { + cluster := &Cluster{} + cluster.Default() + Expect(cluster.Spec.Probes.Liveness.IsolationCheck).ToNot(BeNil()) + Expect(cluster.Spec.Probes.Liveness.IsolationCheck.Enabled).To(HaveValue(BeTrue())) + Expect(cluster.Spec.Probes.Liveness.IsolationCheck.RequestTimeout).To(Equal(1000)) + Expect(cluster.Spec.Probes.Liveness.IsolationCheck.ConnectionTimeout).To(Equal(1000)) + }) + + It("should not override isolationCheck probe if already set", func() { + cluster := &Cluster{ + Spec: ClusterSpec{ + Probes: &ProbesConfiguration{ + Liveness: &LivenessProbe{ + IsolationCheck: &IsolationCheckConfiguration{ + Enabled: ptr.To(false), + RequestTimeout: 300, + ConnectionTimeout: 600, + }, + }, + }, + }, + } + cluster.Default() + Expect(cluster.Spec.Probes.Liveness.IsolationCheck).ToNot(BeNil()) + Expect(cluster.Spec.Probes.Liveness.IsolationCheck.Enabled).To(HaveValue(BeFalse())) + Expect(cluster.Spec.Probes.Liveness.IsolationCheck.RequestTimeout).To(Equal(300)) + Expect(cluster.Spec.Probes.Liveness.IsolationCheck.ConnectionTimeout).To(Equal(600)) + }) + + It("should set isolationCheck probe when it is not set but liveness probe is present", func() { + cluster := &Cluster{ + Spec: ClusterSpec{ + Probes: &ProbesConfiguration{ + Liveness: &LivenessProbe{}, + }, + }, + } + cluster.Default() + Expect(cluster.Spec.Probes.Liveness.IsolationCheck).ToNot(BeNil()) + Expect(cluster.Spec.Probes.Liveness.IsolationCheck.Enabled).To(HaveValue(BeTrue())) + Expect(cluster.Spec.Probes.Liveness.IsolationCheck.RequestTimeout).To(Equal(1000)) + Expect(cluster.Spec.Probes.Liveness.IsolationCheck.ConnectionTimeout).To(Equal(1000)) + }) + + It("should convert the existing annotations if set to true", func() { + cluster := &Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.LivenessPingerAnnotationName: `{"enabled": true, "requestTimeout": 300, "connectionTimeout": 600}`, + }, + }, + } + cluster.Default() + Expect(cluster.Spec.Probes.Liveness.IsolationCheck).ToNot(BeNil()) + Expect(cluster.Spec.Probes.Liveness.IsolationCheck.Enabled).To(HaveValue(BeTrue())) + Expect(cluster.Spec.Probes.Liveness.IsolationCheck.RequestTimeout).To(Equal(300)) + Expect(cluster.Spec.Probes.Liveness.IsolationCheck.ConnectionTimeout).To(Equal(600)) + }) + + It("should convert the existing annotations if set to false", func() { + cluster := &Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.LivenessPingerAnnotationName: `{"enabled": false, "requestTimeout": 300, "connectionTimeout": 600}`, + }, + }, + } + cluster.Default() + Expect(cluster.Spec.Probes.Liveness.IsolationCheck).ToNot(BeNil()) + Expect(cluster.Spec.Probes.Liveness.IsolationCheck.Enabled).To(HaveValue(BeFalse())) + Expect(cluster.Spec.Probes.Liveness.IsolationCheck.RequestTimeout).To(Equal(300)) + Expect(cluster.Spec.Probes.Liveness.IsolationCheck.ConnectionTimeout).To(Equal(600)) + }) +}) diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go index 66c16160e1..b7a781b558 100644 --- a/api/v1/cluster_types.go +++ b/api/v1/cluster_types.go @@ -490,7 +490,7 @@ type ProbesConfiguration struct { Startup *ProbeWithStrategy `json:"startup,omitempty"` // The liveness probe configuration - Liveness *Probe `json:"liveness,omitempty"` + Liveness *LivenessProbe `json:"liveness,omitempty"` // The readiness probe configuration Readiness *ProbeWithStrategy `json:"readiness,omitempty"` @@ -568,6 +568,39 @@ type Probe struct { TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"` } +// LivenessProbe is the configuration of the liveness probe +type LivenessProbe struct { + // Probe is the standard probe configuration + Probe `json:",inline"` + + // Configure the feature that extends the liveness probe for a primary + // instance. In addition to the basic checks, this verifies whether the + // primary is isolated from the Kubernetes API server and from its + // replicas, ensuring that it can be safely shut down if network + // partition or API unavailability is detected. Enabled by default. + // +optional + IsolationCheck *IsolationCheckConfiguration `json:"isolationCheck,omitempty"` +} + +// IsolationCheckConfiguration contains the configuration for the isolation check +// functionality in the liveness probe +type IsolationCheckConfiguration struct { + // Whether primary isolation checking is enabled for the liveness probe + // +optional + // +kubebuilder:default:=true + Enabled *bool `json:"enabled,omitempty"` + + // Timeout in milliseconds for requests during the primary isolation check + // +optional + // +kubebuilder:default:=1000 + RequestTimeout int `json:"requestTimeout,omitempty"` + + // Timeout in milliseconds for connections during the primary isolation check + // +optional + // +kubebuilder:default:=1000 + ConnectionTimeout int `json:"connectionTimeout,omitempty"` +} + const ( // PhaseSwitchover when a cluster is changing the primary node PhaseSwitchover = "Switchover in progress" diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index c8b3917c96..042fe56758 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -1525,6 +1525,26 @@ func (in *InstanceReportedState) DeepCopy() *InstanceReportedState { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IsolationCheckConfiguration) DeepCopyInto(out *IsolationCheckConfiguration) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IsolationCheckConfiguration. +func (in *IsolationCheckConfiguration) DeepCopy() *IsolationCheckConfiguration { + if in == nil { + return nil + } + out := new(IsolationCheckConfiguration) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LDAPBindAsAuth) DeepCopyInto(out *LDAPBindAsAuth) { *out = *in @@ -1585,6 +1605,27 @@ func (in *LDAPConfig) DeepCopy() *LDAPConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LivenessProbe) DeepCopyInto(out *LivenessProbe) { + *out = *in + in.Probe.DeepCopyInto(&out.Probe) + if in.IsolationCheck != nil { + in, out := &in.IsolationCheck, &out.IsolationCheck + *out = new(IsolationCheckConfiguration) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LivenessProbe. +func (in *LivenessProbe) DeepCopy() *LivenessProbe { + if in == nil { + return nil + } + out := new(LivenessProbe) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ManagedConfiguration) DeepCopyInto(out *ManagedConfiguration) { *out = *in @@ -2322,7 +2363,7 @@ func (in *ProbesConfiguration) DeepCopyInto(out *ProbesConfiguration) { } if in.Liveness != nil { in, out := &in.Liveness, &out.Liveness - *out = new(Probe) + *out = new(LivenessProbe) (*in).DeepCopyInto(*out) } if in.Readiness != nil { diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml index d2236b8354..4be1f4b297 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml @@ -4249,6 +4249,30 @@ spec: More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer + isolationCheck: + description: |- + Configure the feature that extends the liveness probe for a primary + instance. In addition to the basic checks, this verifies whether the + primary is isolated from the Kubernetes API server and from its + replicas, ensuring that it can be safely shut down if network + partition or API unavailability is detected. Enabled by default. + properties: + connectionTimeout: + default: 1000 + description: Timeout in milliseconds for connections during + the primary isolation check + type: integer + enabled: + default: true + description: Whether primary isolation checking is enabled + for the liveness probe + type: boolean + requestTimeout: + default: 1000 + description: Timeout in milliseconds for requests during + the primary isolation check + type: integer + type: object periodSeconds: description: |- How often (in seconds) to perform the probe. diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md index 777443256f..86ed453e67 100644 --- a/docs/src/cloudnative-pg.v1.md +++ b/docs/src/cloudnative-pg.v1.md @@ -3210,6 +3210,45 @@ conflict with the operator's intended functionality or design.

+## IsolationCheckConfiguration {#postgresql-cnpg-io-v1-IsolationCheckConfiguration} + + +**Appears in:** + +- [LivenessProbe](#postgresql-cnpg-io-v1-LivenessProbe) + + +

IsolationCheckConfiguration contains the configuration for the isolation check +functionality in the liveness probe

+ + + + + + + + + + + + + + + +
FieldDescription
enabled
+bool +
+

Whether primary isolation checking is enabled for the liveness probe

+
requestTimeout
+int +
+

Timeout in milliseconds for requests during the primary isolation check

+
connectionTimeout
+int +
+

Timeout in milliseconds for connections during the primary isolation check

+
+ ## LDAPBindAsAuth {#postgresql-cnpg-io-v1-LDAPBindAsAuth} @@ -3368,6 +3407,41 @@ the bind+search LDAP authentication process

+## LivenessProbe {#postgresql-cnpg-io-v1-LivenessProbe} + + +**Appears in:** + +- [ProbesConfiguration](#postgresql-cnpg-io-v1-ProbesConfiguration) + + +

LivenessProbe is the configuration of the liveness probe

+ + + + + + + + + + + + +
FieldDescription
Probe
+Probe +
(Members of Probe are embedded into this type.) +

Probe is the standard probe configuration

+
isolationCheck
+IsolationCheckConfiguration +
+

Configure the feature that extends the liveness probe for a primary +instance. In addition to the basic checks, this verifies whether the +primary is isolated from the Kubernetes API server and from its +replicas, ensuring that it can be safely shut down if network +partition or API unavailability is detected. Enabled by default.

+
+ ## ManagedConfiguration {#postgresql-cnpg-io-v1-ManagedConfiguration} @@ -4412,9 +4486,9 @@ the primary server of the cluster as part of rolling updates

**Appears in:** -- [ProbeWithStrategy](#postgresql-cnpg-io-v1-ProbeWithStrategy) +- [LivenessProbe](#postgresql-cnpg-io-v1-LivenessProbe) -- [ProbesConfiguration](#postgresql-cnpg-io-v1-ProbesConfiguration) +- [ProbeWithStrategy](#postgresql-cnpg-io-v1-ProbeWithStrategy)

Probe describes a health check to be performed against a container to determine whether it is @@ -4560,7 +4634,7 @@ to be injected in the PostgreSQL Pods

liveness [Required]
-Probe +LivenessProbe

The liveness probe configuration

diff --git a/docs/src/installation_upgrade.md b/docs/src/installation_upgrade.md index dfedef1b97..6ce978874b 100644 --- a/docs/src/installation_upgrade.md +++ b/docs/src/installation_upgrade.md @@ -254,6 +254,31 @@ removed before installing the new one. This won't affect user data but only the operator itself. + + ### Upgrading to 1.26.0 or 1.25.2 !!! Important diff --git a/docs/src/instance_manager.md b/docs/src/instance_manager.md index a914637b82..e7ac50f1d3 100644 --- a/docs/src/instance_manager.md +++ b/docs/src/instance_manager.md @@ -188,9 +188,9 @@ spec: failureThreshold: 10 ``` -### Primary Isolation (alpha) +### Primary Isolation -CloudNativePG 1.26 introduces an opt-in experimental behavior for the liveness +CloudNativePG 1.27 introduces an additional behavior for the liveness probe of a PostgreSQL primary, which will report a failure if **both** of the following conditions are met: @@ -199,35 +199,34 @@ following conditions are met: The effect of this behavior is to consider an isolated primary to be not alive and subsequently **shut it down** when the liveness probe fails. -It is **disabled by default** and can be enabled by adding the following -annotation to the `Cluster` resource: +It is **enabled by default** and can be disabled by adding the following: ```yaml -metadata: - annotations: - alpha.cnpg.io/livenessPinger: '{"enabled": true}' +spec: + probes: + liveness: + isolationCheck: + enabled: false ``` -!!! Warning - This feature is experimental and will be introduced in a future CloudNativePG - release with a new API. If you decide to use it now, note that the API **will - change**. - !!! Important - If you plan to enable this experimental feature, be aware that the default - liveness probe settings—automatically derived from `livenessProbeTimeout`—might + Be aware that the default liveness probe settings—automatically derived from `livenessProbeTimeout`—might be aggressive (30 seconds). As such, we recommend explicitly setting the liveness probe configuration to suit your environment. -The annotation also accepts two optional network settings: `requestTimeout` -and `connectionTimeout`, both defaulting to `500` (in milliseconds). +The spec also accepts two optional network settings: `requestTimeout` +and `connectionTimeout`, both defaulting to `1000` (in milliseconds). In cloud environments, you may need to increase these values. For example: ```yaml -metadata: - annotations: - alpha.cnpg.io/livenessPinger: '{"enabled": true,"requestTimeout":1000,"connectionTimeout":1000}' +spec: + probes: + liveness: + isolationCheck: + enabled: true + requestTimeout: "2000" + connectionTimeout: "2000" ``` ## Readiness Probe diff --git a/internal/webhook/v1/cluster_webhook.go b/internal/webhook/v1/cluster_webhook.go index 7091457935..ecf19a6b80 100644 --- a/internal/webhook/v1/cluster_webhook.go +++ b/internal/webhook/v1/cluster_webhook.go @@ -49,7 +49,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook/admission" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/probes" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" @@ -2508,7 +2507,7 @@ func (v *ClusterCustomValidator) validateLivenessPingerProbe(r *apiv1.Cluster) f return nil } - _, err := probes.NewLivenessPingerConfigFromAnnotations(context.Background(), r.Annotations) + _, err := apiv1.NewLivenessPingerConfigFromAnnotations(r.Annotations) if err != nil { return field.ErrorList{ field.Invalid( diff --git a/pkg/management/postgres/webserver/probes/liveness.go b/pkg/management/postgres/webserver/probes/liveness.go index bbb07601a6..04e661e2a0 100644 --- a/pkg/management/postgres/webserver/probes/liveness.go +++ b/pkg/management/postgres/webserver/probes/liveness.go @@ -21,6 +21,7 @@ package probes import ( "context" + "errors" "fmt" "net/http" "time" @@ -102,7 +103,8 @@ func (e *livenessExecutor) IsHealthy( // quickly as possible. if err := evaluateLivenessPinger(ctx, e.lastestKnownCluster.DeepCopy()); err != nil { contextLogger.Warning( - "Instance connectivity error - liveness probe failing but API server is reachable", + "Instance connectivity error - liveness probe succeeding because "+ + "the API server is reachable", "err", err.Error(), ) @@ -151,11 +153,20 @@ func evaluateLivenessPinger( ) error { contextLogger := log.FromContext(ctx) - cfg, err := NewLivenessPingerConfigFromAnnotations(ctx, cluster.Annotations) - if err != nil { - return err + var cfg *apiv1.IsolationCheckConfiguration + if cluster.Spec.Probes != nil && cluster.Spec.Probes.Liveness != nil { + cfg = cluster.Spec.Probes.Liveness.IsolationCheck + } + if cfg == nil { + return nil } - if !cfg.isEnabled() { + + // This should never happen given that we set a default value. Fail fast. + if cfg.Enabled == nil { + return errors.New("enabled field is not set in the liveness isolation check configuration") + } + + if !*cfg.Enabled { contextLogger.Debug("pinger config not enabled, skipping") return nil } @@ -165,7 +176,7 @@ func evaluateLivenessPinger( return nil } - checker, err := buildInstanceReachabilityChecker(*cfg) + checker, err := buildInstanceReachabilityChecker(cfg) if err != nil { return fmt.Errorf("failed to build instance reachability checker: %w", err) } diff --git a/pkg/management/postgres/webserver/probes/pinger.go b/pkg/management/postgres/webserver/probes/pinger.go index a66470324f..67fc2aaca1 100644 --- a/pkg/management/postgres/webserver/probes/pinger.go +++ b/pkg/management/postgres/webserver/probes/pinger.go @@ -20,9 +20,8 @@ SPDX-License-Identifier: Apache-2.0 package probes import ( - "context" "crypto/x509" - "encoding/json" + "errors" "fmt" "net" "net/http" @@ -30,89 +29,29 @@ import ( "os" "time" - "github.com/cloudnative-pg/machinery/pkg/log" - "k8s.io/utils/ptr" - apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/certs" cnpgUrl "github.com/cloudnative-pg/cloudnative-pg/pkg/management/url" postgresSpec "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" - "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) -// LivenessPingerCfg if the configuration of the instance -// reachability checker -type LivenessPingerCfg struct { - Enabled *bool `json:"enabled"` - RequestTimeout int `json:"requestTimeout,omitempty"` - ConnectionTimeout int `json:"connectionTimeout,omitempty"` -} - -func (probe *LivenessPingerCfg) isEnabled() bool { - if probe == nil || probe.Enabled == nil { - return false - } - - return *probe.Enabled -} - -// NewLivenessPingerConfigFromAnnotations creates a new pinger configuration from the annotations -// in the cluster definition -func NewLivenessPingerConfigFromAnnotations( - ctx context.Context, - annotations map[string]string, -) (*LivenessPingerCfg, error) { - const ( - // defaultRequestTimeout is the default value of the request timeout - defaultRequestTimeout = 500 - - // defaultConnectionTimeout is the default value of the connection timeout - defaultConnectionTimeout = 1000 - ) - - contextLogger := log.FromContext(ctx) - - v, ok := annotations[utils.LivenessPingerAnnotationName] - if !ok { - contextLogger.Debug("pinger config not found in the cluster annotations") - return &LivenessPingerCfg{ - Enabled: ptr.To(false), - }, nil - } - - var cfg LivenessPingerCfg - if err := json.Unmarshal([]byte(v), &cfg); err != nil { - contextLogger.Error(err, "failed to unmarshal pinger config") - return nil, fmt.Errorf("while unmarshalling pinger config: %w", err) - } - - if cfg.Enabled == nil { - return nil, fmt.Errorf("pinger config is missing the enabled field") - } - - if cfg.RequestTimeout == 0 { - cfg.RequestTimeout = defaultRequestTimeout - } - if cfg.ConnectionTimeout == 0 { - cfg.ConnectionTimeout = defaultConnectionTimeout - } - - return &cfg, nil -} - // pinger can check if a certain instance is reachable by using // the failsafe REST endpoint type pinger struct { dialer *net.Dialer client *http.Client - config LivenessPingerCfg + config *apiv1.IsolationCheckConfiguration } // buildInstanceReachabilityChecker creates a new instance reachability checker by loading // the server CA certificate from the same location that will be used by PostgreSQL. // In this case, we avoid using the API Server as it may be unreliable. -func buildInstanceReachabilityChecker(cfg LivenessPingerCfg) (*pinger, error) { +func buildInstanceReachabilityChecker(cfg *apiv1.IsolationCheckConfiguration) (*pinger, error) { + if cfg == nil { + return nil, errors.New("isolation check configuration is nil") + } + certificateLocation := postgresSpec.ServerCACertificateLocation caCertificate, err := os.ReadFile(certificateLocation) //nolint:gosec if err != nil { @@ -155,6 +94,7 @@ func (e *pinger) ping(host, ip string) error { if res, err = e.client.Get(failsafeURL.String()); err != nil { return &pingError{ host: host, + ip: ip, err: err, config: e.config, } @@ -182,7 +122,7 @@ type pingError struct { host string ip string - config LivenessPingerCfg + config *apiv1.IsolationCheckConfiguration err error } diff --git a/pkg/management/postgres/webserver/remote.go b/pkg/management/postgres/webserver/remote.go index ef76017ba9..13163c3de4 100644 --- a/pkg/management/postgres/webserver/remote.go +++ b/pkg/management/postgres/webserver/remote.go @@ -231,7 +231,7 @@ func (ws *remoteWebserverEndpoints) failSafe(w http.ResponseWriter, _ *http.Requ _, _ = fmt.Fprint(w, "OK") } -// This is the failsafe probe +// This is the liveness probe func (ws *remoteWebserverEndpoints) isServerHealthy(w http.ResponseWriter, req *http.Request) { ws.livenessChecker.IsHealthy(req.Context(), w) } diff --git a/tests/e2e/probes_test.go b/tests/e2e/probes_test.go index b43ed7b950..6d15855da2 100644 --- a/tests/e2e/probes_test.go +++ b/tests/e2e/probes_test.go @@ -62,7 +62,9 @@ var _ = Describe("Probes configuration tests", Label(tests.LabelBasic), func() { Startup: &apiv1.ProbeWithStrategy{ Probe: probeConfiguration, }, - Liveness: probeConfiguration.DeepCopy(), + Liveness: &apiv1.LivenessProbe{ + Probe: probeConfiguration, + }, Readiness: &apiv1.ProbeWithStrategy{ Probe: probeConfiguration, }, From 108b764b509ae3974c632f41c815549051729d2d Mon Sep 17 00:00:00 2001 From: Pierrick <139142330+pchovelon@users.noreply.github.com> Date: Mon, 7 Jul 2025 09:01:51 +0200 Subject: [PATCH 680/836] fix(pg_rewind): typo in an error message (#7980) Signed-off-by: Pierrick Chovelon --- internal/management/controller/instance_startup.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/management/controller/instance_startup.go b/internal/management/controller/instance_startup.go index bd170dcba3..c22052bfbe 100644 --- a/internal/management/controller/instance_startup.go +++ b/internal/management/controller/instance_startup.go @@ -132,7 +132,7 @@ func (r *InstanceReconciler) verifyPgDataCoherenceForPrimary(ctx context.Context err = r.instance.Rewind(ctx) if err != nil { - return fmt.Errorf("while exucuting pg_rewind: %w", err) + return fmt.Errorf("while executing pg_rewind: %w", err) } // Now I can demote myself From fe8349c3b3f9dc777a593f897db746f4d49ab0c3 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Mon, 7 Jul 2025 10:33:23 +0200 Subject: [PATCH 681/836] fix(backup,online): fail backup if targetPod becomes unhealthy (#7944) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch ensures that the backup process is immediately aborted if the targetPod becomes unhealthy during an online backup operation. Previously, the backup would continue even if the target instance was no longer in a healthy state, potentially resulting in incomplete or inconsistent backups. By monitoring the pod's health status, the process now fails fast, providing early feedback and increasing the accuracy of the Backup object status. Related #7503 Closes #7905 Signed-off-by: Armando Ruocco Signed-off-by: Leonardo Cecchi Signed-off-by: Niccolò Fei Co-authored-by: Leonardo Cecchi Co-authored-by: Niccolò Fei --- api/v1/backup_funcs.go | 24 ++++++++ internal/controller/backup_controller.go | 73 +++++++++++++++++++++++- 2 files changed, 95 insertions(+), 2 deletions(-) diff --git a/api/v1/backup_funcs.go b/api/v1/backup_funcs.go index f53f8e34b0..1e08d97868 100644 --- a/api/v1/backup_funcs.go +++ b/api/v1/backup_funcs.go @@ -236,6 +236,30 @@ func (backup *Backup) GetAssignedInstance(ctx context.Context, cli client.Client return &previouslyElectedPod, nil } +// GetOnlineOrDefault returns the online value for the backup. +func (backup *Backup) GetOnlineOrDefault(cluster *Cluster) bool { + // Offline backups are supported only with the + // volume snapshot backup method. + if backup.Spec.Method != BackupMethodVolumeSnapshot { + return true + } + + if backup.Spec.Online != nil { + return *backup.Spec.Online + } + + if cluster.Spec.Backup == nil || cluster.Spec.Backup.VolumeSnapshot == nil { + return true + } + + config := backup.GetVolumeSnapshotConfiguration(*cluster.Spec.Backup.VolumeSnapshot) + if config.Online != nil { + return *config.Online + } + + return true +} + // GetVolumeSnapshotConfiguration overrides the configuration value with the ones specified // in the backup, if present. func (backup *Backup) GetVolumeSnapshotConfiguration( diff --git a/internal/controller/backup_controller.go b/internal/controller/backup_controller.go index f2316a10d1..c43f53de44 100644 --- a/internal/controller/backup_controller.go +++ b/internal/controller/backup_controller.go @@ -24,6 +24,7 @@ import ( "errors" "fmt" "reflect" + "slices" "time" "github.com/cloudnative-pg/machinery/pkg/log" @@ -65,6 +66,16 @@ const backupPhase = ".status.phase" // where the name of the cluster is written const clusterName = ".spec.cluster.name" +// getIsRunningResult gets the result that is returned to periodically +// check for running backups. +// This is particularly important when the target Pod is destroyed +// or stops responding. +// +// This result should be used almost always when a backup is running +func getIsRunningResult() ctrl.Result { + return ctrl.Result{RequeueAfter: 10 * time.Minute} +} + // BackupReconciler reconciles a Backup object type BackupReconciler struct { client.Client @@ -203,6 +214,19 @@ func (r *BackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr return ctrl.Result{}, err } + if isRunning && backup.GetOnlineOrDefault(&cluster) { + if err := r.ensureTargetPodHealthy(ctx, r.Client, &backup, &cluster); err != nil { + contextLogger.Error(err, "while ensuring target pod is healthy") + _ = resourcestatus.FlagBackupAsFailed(ctx, r.Client, &backup, nil, + fmt.Errorf("while ensuring target pod is healthy: %w", err)) + r.Recorder.Eventf(&backup, "Warning", "TargetPodNotHealthy", + "Error ensuring target pod is healthy: %s", err.Error()) + // this ensures that we will retry in case of errors + // if everything was flagged correctly we will not come back again in this state + return ctrl.Result{RequeueAfter: 30 * time.Second}, nil + } + } + if backup.Spec.Method == apiv1.BackupMethodBarmanObjectStore { if cluster.Spec.Backup == nil || cluster.Spec.Backup.BarmanObjectStore == nil { _ = resourcestatus.FlagBackupAsFailed(ctx, r.Client, &backup, &cluster, @@ -211,7 +235,7 @@ func (r *BackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr } if isRunning { - return ctrl.Result{}, nil + return getIsRunningResult(), nil } r.Recorder.Eventf(&backup, "Normal", "Starting", @@ -220,7 +244,7 @@ func (r *BackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr if backup.Spec.Method == apiv1.BackupMethodPlugin { if isRunning { - return ctrl.Result{}, nil + return getIsRunningResult(), nil } r.Recorder.Eventf(&backup, "Normal", "Starting", @@ -707,3 +731,48 @@ func (r *BackupReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manage controllerBuilder = controllerBuilder.WithOptions(controller.Options{MaxConcurrentReconciles: 1}) return controllerBuilder.Complete(r) } + +func (r *BackupReconciler) ensureTargetPodHealthy( + ctx context.Context, + cli client.Client, + backup *apiv1.Backup, + cluster *apiv1.Cluster, +) error { + if backup.Status.InstanceID == nil || len(backup.Status.InstanceID.PodName) == 0 { + return fmt.Errorf("no target pod assigned for backup %s", backup.Name) + } + + podName := backup.Status.InstanceID.PodName + + var pod corev1.Pod + if err := cli.Get(ctx, client.ObjectKey{ + Namespace: backup.Namespace, + Name: podName, + }, &pod); err != nil { + if apierrs.IsNotFound(err) { + return fmt.Errorf("target pod %s not found in namespace %s for backup %s", podName, backup.Namespace, backup.Name) + } + return fmt.Errorf( + "error getting target pod %s in namespace %s for backup %s: %w", podName, backup.Namespace, + backup.Name, + err, + ) + } + + // if the pod is present we evaluate its health status + healthyPods, ok := cluster.Status.InstancesStatus[apiv1.PodHealthy] + if !ok { + return fmt.Errorf("no status found for target pod %s in cluster %s", podName, cluster.Name) + } + + if !slices.Contains(healthyPods, podName) { + return fmt.Errorf("target pod %s is not healthy for backup in cluster %s", podName, cluster.Name) + } + + contextLogger := log.FromContext(ctx) + contextLogger.Debug("Target pod is healthy for backup", + "podName", podName, + "backupName", backup.Name, + ) + return nil +} From b79a1004fcbb95c93b8073014dc29d6dc4a32a63 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Mon, 7 Jul 2025 11:57:35 +0200 Subject: [PATCH 682/836] chore(backup,logging): log the correct namespace value (#7987) Signed-off-by: Armando Ruocco --- pkg/management/postgres/webserver/plugin_backup.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/management/postgres/webserver/plugin_backup.go b/pkg/management/postgres/webserver/plugin_backup.go index a51d25db89..b8d3f05a96 100644 --- a/pkg/management/postgres/webserver/plugin_backup.go +++ b/pkg/management/postgres/webserver/plugin_backup.go @@ -73,7 +73,7 @@ func (b *PluginBackupCommand) invokeStart(ctx context.Context) { contextLogger := log.FromContext(ctx).WithValues( "pluginConfiguration", b.Backup.Spec.PluginConfiguration, "backupName", b.Backup.Name, - "backupNamespace", b.Backup.Name) + "backupNamespace", b.Backup.Namespace) plugins := repository.New() defer plugins.Close() From 60bda6537a180fd974cfdc2983298f8ca1289f7c Mon Sep 17 00:00:00 2001 From: wvengen Date: Tue, 8 Jul 2025 09:28:25 +0000 Subject: [PATCH 683/836] Add note on WAL recovery and tablespace changes (#7972) Closes #4407 Signed-off-by: wvengen --- docs/src/recovery.md | 6 ++++++ docs/src/tablespaces.md | 4 ++++ 2 files changed, 10 insertions(+) diff --git a/docs/src/recovery.md b/docs/src/recovery.md index 5aba0eecc0..85779fd5df 100644 --- a/docs/src/recovery.md +++ b/docs/src/recovery.md @@ -374,6 +374,12 @@ recovery target. It's your responsibility to ensure that the end time of the base backup in the volume snapshot is before the recovery target timestamp. +!!! Warning + If you added or removed a [tablespace](tablespaces.md) in your cluster + since the last base backup, replaying the WAL will fail. You need a base + backup between the time of the tablespace change and the recovery target + timestamp. + ### Recovery targets Here are the recovery target criteria you can use: diff --git a/docs/src/tablespaces.md b/docs/src/tablespaces.md index cd214d1713..48538995f5 100644 --- a/docs/src/tablespaces.md +++ b/docs/src/tablespaces.md @@ -254,6 +254,10 @@ tablespace map) both on object stores and volume snapshots. backup. The lag will be resolved in a maximum of 5 minutes, with the next reconciliation. +!!! Warning + When you add or remove a tablespace in an existing cluster, recovery + from WAL will fail until you take a new base backup. + Once a cluster with tablespaces has a base backup, you can restore a new cluster from it. When it comes to the recovery side, it's your responsibility to ensure that the `Cluster` definition of the recovered From dc45fe55c2a0daa972c730e305ca052591e8a21d Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Tue, 8 Jul 2025 13:57:06 +0200 Subject: [PATCH 684/836] feat(replication): add `synchronizeLogicalDecoding` for HA logical slots (#7931) Add the `synchronizeLogicalDecoding` field under `spec.replicationSlots.highAvailability` to enable automatic synchronization of logical decoding slots across high-availability clusters. This feature primarily leverages the `sync_replication_slots` server parameter, introduced in PostgreSQL 17, to provide native and future-proof slot synchronization. For clusters running older PostgreSQL versions, the `pg_failover_slots` extension is used as a fallback. A validating admission webhook is included to enforce prerequisites and prevent misconfiguration. Closes #7454 Signed-off-by: Marco Nenciarini Signed-off-by: Armando Ruocco Signed-off-by: Gabriele Bartolini Co-authored-by: Armando Ruocco Co-authored-by: Gabriele Bartolini --- .wordlist-en-custom.txt | 1 + api/v1/cluster_types.go | 10 ++ .../bases/postgresql.cnpg.io_clusters.yaml | 9 ++ docs/src/cloudnative-pg.v1.md | 13 ++ docs/src/index.md | 2 +- docs/src/logical_replication.md | 17 +- docs/src/operator_capability_levels.md | 10 +- docs/src/replication.md | 72 +++++++++ internal/webhook/v1/cluster_webhook.go | 145 ++++++++++++------ internal/webhook/v1/cluster_webhook_test.go | 122 ++++++++++++++- pkg/management/postgres/configuration.go | 18 +++ pkg/postgres/configuration.go | 41 +++++ 12 files changed, 408 insertions(+), 52 deletions(-) diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index 1ca2965c38..839186d3fe 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -1332,6 +1332,7 @@ switchReplicaClusterStatus switchoverDelay switchovers syncReplicaElectionConstraint +synchronizeLogicalDecoding synchronizeReplicas synchronizeReplicasCache sys diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go index b7a781b558..b5d2be6ab3 100644 --- a/api/v1/cluster_types.go +++ b/api/v1/cluster_types.go @@ -1210,6 +1210,16 @@ type ReplicationSlotsHAConfiguration struct { // +kubebuilder:validation:Pattern=^[0-9a-z_]*$ // +optional SlotPrefix string `json:"slotPrefix,omitempty"` + + // When enabled, the operator automatically manages synchronization of logical + // decoding (replication) slots across high-availability clusters. + // + // Requires one of the following conditions: + // - PostgreSQL version 17 or later + // - PostgreSQL version < 17 with pg_failover_slots extension enabled + // + // +optional + SynchronizeLogicalDecoding bool `json:"synchronizeLogicalDecoding,omitempty"` } // KubernetesUpgradeStrategy tells the operator if the user want to diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml index 4be1f4b297..feb6be0621 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml @@ -4844,6 +4844,15 @@ spec: This can only be set at creation time. By default set to `_cnpg_`. pattern: ^[0-9a-z_]*$ type: string + synchronizeLogicalDecoding: + description: |- + When enabled, the operator automatically manages synchronization of logical + decoding (replication) slots across high-availability clusters. + + Requires one of the following conditions: + - PostgreSQL version 17 or later + - PostgreSQL version < 17 with pg_failover_slots extension enabled + type: boolean type: object synchronizeReplicas: description: Configures the synchronization of the user defined diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md index 86ed453e67..08160dba78 100644 --- a/docs/src/cloudnative-pg.v1.md +++ b/docs/src/cloudnative-pg.v1.md @@ -5108,6 +5108,19 @@ It may only contain lower case letters, numbers, and the underscore character. This can only be set at creation time. By default set to _cnpg_.

+synchronizeLogicalDecoding
+bool + + +

When enabled, the operator automatically manages synchronization of logical +decoding (replication) slots across high-availability clusters.

+

Requires one of the following conditions:

+
    +
  • PostgreSQL version 17 or later
  • +
  • PostgreSQL version < 17 with pg_failover_slots extension enabled
  • +
+ + diff --git a/docs/src/index.md b/docs/src/index.md index 540274a100..1c89078a7d 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -131,7 +131,7 @@ Additionally, the community provides images for the [PostGIS extension](postgis. - *Offline Import*: Direct restore from existing databases. - *Online Import*: PostgreSQL native logical replication via the `Subscription` resource. - High Availability physical replication slots, including synchronization of - user-defined replication slots. + user-defined replication slots and logical decoding failover. - Parallel WAL archiving and restore, ensuring high-performance data synchronization in high-write environments. - TLS support, including: diff --git a/docs/src/logical_replication.md b/docs/src/logical_replication.md index 5bd36027b9..02dd5fbea8 100644 --- a/docs/src/logical_replication.md +++ b/docs/src/logical_replication.md @@ -12,6 +12,14 @@ connect to publications on a publisher node. Subscribers pull data changes from these publications and can re-publish them, enabling cascading replication and complex topologies. +!!! Important + To protect your logical replication subscribers after a failover of the + publisher cluster in CloudNativePG, ensure that replication slot + synchronization for logical decoding is enabled. Without this, your logical + replication clients may lose data and fail to continue seamlessly after a + failover. For configuration details, see + ["Replication: Logical Decoding Slot Synchronization"](replication.md#logical-decoding-slot-synchronization). + This flexible model is particularly useful for: - Online data migrations @@ -245,7 +253,7 @@ the `Subscription` status will reflect the following: If an error occurs during reconciliation, `status.applied` will be `false`, and an error message will be included in the `status.message` field. -### Removing a subscription +### Removing a Subscription The `subscriptionReclaimPolicy` field controls the behavior when deleting a `Subscription` object: @@ -274,6 +282,13 @@ spec: In this case, deleting the `Subscription` object also removes the `subscriber` subscription from the `app` database of the `king` cluster. +### Resilience to Failovers + +To ensure that your logical replication subscriptions remain operational after +a failover of the publisher, configure CloudNativePG to synchronize logical +decoding slots across the cluster. For detailed instructions, see +[Logical Decoding Slot Synchronization](replication.md#logical-decoding-slot-synchronization). + ## Limitations Logical replication in PostgreSQL has some inherent limitations, as outlined in diff --git a/docs/src/operator_capability_levels.md b/docs/src/operator_capability_levels.md index a98993d7a6..4c4a4ad2e8 100644 --- a/docs/src/operator_capability_levels.md +++ b/docs/src/operator_capability_levels.md @@ -115,11 +115,11 @@ than `1`, the operator manages `instances -1` replicas, including high availability (HA) through automated failover and rolling updates through switchover operations. -CloudNativePG manages replication slots for all the replicas -in the HA cluster. The implementation is inspired by the previously -proposed patch for PostgreSQL, called -[failover slots](https://wiki.postgresql.org/wiki/Failover_slots), and -also supports user defined physical replication slots on the primary. +CloudNativePG manages replication slots for all replicas in the +high-availability cluster. It also supports user-defined physical replication +slots on the primary and enables logical decoding failover—natively for +PostgreSQL 17 and later using `sync_replication_slots`, and through the +`pg_failover_slots` extension for earlier versions. ### Service Configuration diff --git a/docs/src/replication.md b/docs/src/replication.md index f992796c38..6489963276 100644 --- a/docs/src/replication.md +++ b/docs/src/replication.md @@ -734,6 +734,78 @@ spec: size: 1Gi ``` +### Logical Decoding Slot Synchronization + +CloudNativePG can synchronize logical decoding (replication) slots across all +nodes in a high-availability cluster, ensuring seamless continuation of logical +replication after a failover or switchover. This feature is disabled by +default, and enabling it requires two steps. + +The first step is to enable logical decoding slot synchronization: + +```yaml + # ... + replicationSlots: + highAvailability: + synchronizeLogicalDecoding: true +``` + +The second step involves configuring PostgreSQL parameters: the required +configuration depends on your PostgreSQL version, as explained below. + +When enabled, the operator automatically manages logical decoding slot states +during failover and switchover, preventing slot invalidation and avoiding data +loss for logical replication clients. + +#### Behavior on PostgreSQL 17 and later + +For PostgreSQL 17 and newer, CloudNativePG transparently manages the +[`synchronized_standby_slots` parameter](https://www.postgresql.org/docs/current/runtime-config-replication.html#GUC-SYNCHRONIZED-STANDBY-SLOTS). + +You must enable both `sync_replication_slots` and `hot_standby_feedback` in +your PostgreSQL configuration: + +```yaml +# ... +postgresql: + parameters: + # ... + hot_standby_feedback: 'on' + sync_replication_slots: 'on' +``` + +Additionally, you must create the logical replication `Subscription` with the +`failover` option enabled, for example: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Subscription +# ... +spec: +# ... + parameters: + failover: 'true' +# ... +``` + +When configured, logical WAL sender processes send decoded changes to plugins +only after the specified replication slots confirm receiving and flushing the +relevant WAL, ensuring that: + +- logical replication slots do not consume changes until they are safely + received by the replicas of the publisher, and +- logical replication clients can seamlessly reconnect to a promoted standby + without missing data after failover. + +For more details on logical replication slot synchronization, see the +PostgreSQL documentation on [Logical Replication Failover](https://www.postgresql.org/docs/current/logical-replication-failover.html). + +#### Behavior on PostgreSQL 16 and earlier + +For PostgreSQL 16 and older versions, CloudNativePG uses the +[`pg_failover_slots` extension](https://github.com/EnterpriseDB/pg_failover_slots) +to maintain synchronization of logical replication slots across failovers. + ### Capping the WAL size retained for replication slots When replication slots is enabled, you might end up running out of disk space diff --git a/internal/webhook/v1/cluster_webhook.go b/internal/webhook/v1/cluster_webhook.go index ecf19a6b80..e3ee3a7558 100644 --- a/internal/webhook/v1/cluster_webhook.go +++ b/internal/webhook/v1/cluster_webhook.go @@ -209,6 +209,7 @@ func (v *ClusterCustomValidator) validate(r *apiv1.Cluster) (allErrs field.Error v.validateSynchronousReplicaConfiguration, v.validateLDAP, v.validateReplicationSlots, + v.validateSynchronizeLogicalDecoding, v.validateEnv, v.validateManagedServices, v.validateManagedRoles, @@ -1038,25 +1039,25 @@ func (v *ClusterCustomValidator) validateConfiguration(r *apiv1.Cluster) field.E } } - walLogHintsValue, walLogHintsSet := r.Spec.PostgresConfiguration.Parameters[postgres.ParameterWalLogHints] - if walLogHintsSet { - walLogHintsActivated, err := postgres.ParsePostgresConfigBoolean(walLogHintsValue) - if err != nil { - result = append( - result, - field.Invalid( - field.NewPath("spec", "postgresql", "parameters", postgres.ParameterWalLogHints), - walLogHintsValue, - "invalid `wal_log_hints`. Must be a postgres boolean")) - } - if r.Spec.Instances > 1 && !walLogHintsActivated { - result = append( - result, - field.Invalid( - field.NewPath("spec", "postgresql", "parameters", postgres.ParameterWalLogHints), - r.Spec.PostgresConfiguration.Parameters[postgres.ParameterWalLogHints], - "`wal_log_hints` must be set to `on` when `instances` > 1")) - } + if _, fieldError := tryParseBooleanPostgresParameter(r, postgres.ParameterHotStandbyFeedback); fieldError != nil { + result = append(result, fieldError) + } + + if _, fieldError := tryParseBooleanPostgresParameter(r, postgres.ParameterSyncReplicationSlots); fieldError != nil { + result = append(result, fieldError) + } + + walLogHintsActivated, fieldError := tryParseBooleanPostgresParameter(r, postgres.ParameterWalLogHints) + if fieldError != nil { + result = append(result, fieldError) + } + if walLogHintsActivated != nil && !*walLogHintsActivated && r.Spec.Instances > 1 { + result = append( + result, + field.Invalid( + field.NewPath("spec", "postgresql", "parameters", postgres.ParameterWalLogHints), + r.Spec.PostgresConfiguration.Parameters[postgres.ParameterWalLogHints], + "`wal_log_hints` must be set to `on` when `instances` > 1")) } // verify the postgres setting min_wal_size < max_wal_size < volume size @@ -1072,6 +1073,24 @@ func (v *ClusterCustomValidator) validateConfiguration(r *apiv1.Cluster) field.E return result } +// tryParseBooleanPostgresParameter attempts to parse a boolean PostgreSQL parameter +// from the cluster specification. If the parameter is not set, it returns nil. +func tryParseBooleanPostgresParameter(r *apiv1.Cluster, parameterName string) (*bool, *field.Error) { + stringValue, hasParameter := r.Spec.PostgresConfiguration.Parameters[parameterName] + if !hasParameter { + return nil, nil + } + + value, err := postgres.ParsePostgresConfigBoolean(stringValue) + if err != nil { + return nil, field.Invalid( + field.NewPath("spec", "postgresql", "parameters", parameterName), + stringValue, + fmt.Sprintf("invalid `%s` value. Must be a postgres boolean", parameterName)) + } + return &value, nil +} + // validateWalSizeConfiguration verifies that min_wal_size < max_wal_size < wal volume size func validateWalSizeConfiguration( postgresConfig apiv1.PostgresConfiguration, walVolumeSize *resource.Quantity, @@ -2070,6 +2089,62 @@ func (v *ClusterCustomValidator) validateReplicationSlots(r *apiv1.Cluster) fiel return nil } +func (v *ClusterCustomValidator) validateSynchronizeLogicalDecoding(r *apiv1.Cluster) field.ErrorList { + replicationSlots := r.Spec.ReplicationSlots + if replicationSlots.HighAvailability == nil || !replicationSlots.HighAvailability.SynchronizeLogicalDecoding { + return nil + } + + if postgres.IsManagedExtensionUsed("pg_failover_slots", r.Spec.PostgresConfiguration.Parameters) { + return nil + } + + pgMajor, err := r.GetPostgresqlMajorVersion() + if err != nil { + return nil + } + + if pgMajor < 17 { + return field.ErrorList{ + field.Invalid( + field.NewPath("spec", "replicationSlots", "highAvailability", "synchronizeLogicalDecoding"), + replicationSlots.HighAvailability.SynchronizeLogicalDecoding, + "pg_failover_slots extension must be enabled to use synchronizeLogicalDecoding with Postgres versions < 17", + ), + } + } + + result := field.ErrorList{} + + hotStandbyFeedback, _ := postgres.ParsePostgresConfigBoolean( + r.Spec.PostgresConfiguration.Parameters[postgres.ParameterHotStandbyFeedback]) + if !hotStandbyFeedback { + result = append( + result, + field.Invalid( + field.NewPath("spec", "postgresql", "parameters", postgres.ParameterHotStandbyFeedback), + hotStandbyFeedback, + fmt.Sprintf("`%s` must be enabled to enable "+ + "`spec.replicationSlots.highAvailability.synchronizeLogicalDecoding`", + postgres.ParameterHotStandbyFeedback))) + } + + const syncReplicationSlotsKey = "sync_replication_slots" + syncReplicationSlots, _ := postgres.ParsePostgresConfigBoolean( + r.Spec.PostgresConfiguration.Parameters[syncReplicationSlotsKey]) + if !syncReplicationSlots { + result = append( + result, + field.Invalid( + field.NewPath("spec", "postgresql", "parameters", syncReplicationSlotsKey), + syncReplicationSlots, + fmt.Sprintf("either `%s` setting or pg_failover_slots extension must be enabled to enable "+ + "`spec.replicationSlots.highAvailability.synchronizeLogicalDecoding`", syncReplicationSlotsKey))) + } + + return result +} + func (v *ClusterCustomValidator) validateReplicationSlotsChange(r, old *apiv1.Cluster) field.ErrorList { newReplicationSlots := r.Spec.ReplicationSlots oldReplicationSlots := old.Spec.ReplicationSlots @@ -2276,38 +2351,20 @@ func (v *ClusterCustomValidator) validatePgFailoverSlots(r *apiv1.Cluster) field var result field.ErrorList var pgFailoverSlots postgres.ManagedExtension - for i, ext := range postgres.ManagedExtensions { - if ext.Name == "pg_failover_slots" { - pgFailoverSlots = postgres.ManagedExtensions[i] - } - } - if !pgFailoverSlots.IsUsed(r.Spec.PostgresConfiguration.Parameters) { + if !postgres.IsManagedExtensionUsed("pg_failover_slots", r.Spec.PostgresConfiguration.Parameters) { return nil } - const hotStandbyFeedbackKey = "hot_standby_feedback" - hotStandbyFeedbackActivated := false - hotStandbyFeedback, hasHotStandbyFeedback := r.Spec.PostgresConfiguration.Parameters[hotStandbyFeedbackKey] - if hasHotStandbyFeedback { - var err error - hotStandbyFeedbackActivated, err = postgres.ParsePostgresConfigBoolean(hotStandbyFeedback) - if err != nil { - result = append( - result, - field.Invalid( - field.NewPath("spec", "postgresql", "parameters", hotStandbyFeedbackKey), - hotStandbyFeedback, - fmt.Sprintf("invalid `%s` value. Must be a postgres boolean", hotStandbyFeedbackKey))) - } - } - - if !hotStandbyFeedbackActivated { + hotStandbyFeedback, _ := postgres.ParsePostgresConfigBoolean( + r.Spec.PostgresConfiguration.Parameters[postgres.ParameterHotStandbyFeedback]) + if !hotStandbyFeedback { result = append( result, field.Invalid( - field.NewPath("spec", "postgresql", "parameters", hotStandbyFeedbackKey), + field.NewPath("spec", "postgresql", "parameters", postgres.ParameterHotStandbyFeedback), hotStandbyFeedback, - fmt.Sprintf("`%s` must be enabled to use %s extension", hotStandbyFeedbackKey, pgFailoverSlots.Name))) + fmt.Sprintf("`%s` must be enabled to use %s extension", + postgres.ParameterHotStandbyFeedback, pgFailoverSlots.Name))) } if r.Spec.ReplicationSlots == nil { diff --git a/internal/webhook/v1/cluster_webhook_test.go b/internal/webhook/v1/cluster_webhook_test.go index 2ab0e37a73..5d21839e3a 100644 --- a/internal/webhook/v1/cluster_webhook_test.go +++ b/internal/webhook/v1/cluster_webhook_test.go @@ -3686,6 +3686,126 @@ var _ = Describe("validation of replication slots configuration", func() { errors := v.validateReplicationSlots(cluster) Expect(errors).To(BeEmpty()) }) + + It("returns no errors when synchronizeLogicalDecoding is disabled", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{ + HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{ + SynchronizeLogicalDecoding: false, + }, + }, + }, + } + + result := v.validateSynchronizeLogicalDecoding(cluster) + Expect(result).To(BeNil()) + }) + + It("returns no errors when pg_failover_slots is enabled", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{ + HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{ + SynchronizeLogicalDecoding: true, + }, + }, + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "hot_standby_feedback": "true", + "pg_failover_slots.synchronize_slot_names": "name_like:%", + }, + }, + }, + } + + result := v.validateSynchronizeLogicalDecoding(cluster) + Expect(result).To(BeNil()) + }) + + It("returns an error when Postgres version is < 17 and pg_failover_slots is not enabled", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:16", + ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{ + HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{ + SynchronizeLogicalDecoding: true, + }, + }, + }, + } + result := v.validateSynchronizeLogicalDecoding(cluster) + Expect(result).To(HaveLen(1)) + Expect(result[0].Error()).To(ContainSubstring("pg_failover_slots extension must be enabled")) + }) + + It("returns an error when Postgres version is 17 and hot_standby_feedback is disabled", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:17", + ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{ + HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{ + SynchronizeLogicalDecoding: true, + }, + }, + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "sync_replication_slots": "on", + "hot_standby_feedback": "off", + }, + }, + }, + } + + result := v.validateSynchronizeLogicalDecoding(cluster) + Expect(result).To(HaveLen(1)) + Expect(result[0].Error()).To(ContainSubstring("`hot_standby_feedback` must be enabled")) + }) + + It("returns an error when Postgres version is 17 and sync_replication_slots is disabled", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:17", + ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{ + HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{ + SynchronizeLogicalDecoding: true, + }, + }, + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "hot_standby_feedback": "on", + "sync_replication_slots": "off", + }, + }, + }, + } + result := v.validateSynchronizeLogicalDecoding(cluster) + Expect(result).To(HaveLen(1)) + Expect(result[0].Error()).To(ContainSubstring( + "`sync_replication_slots` setting or pg_failover_slots extension must be enabled")) + }) + + It("returns no errors when all conditions are met", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:17", + ReplicationSlots: &apiv1.ReplicationSlotsConfiguration{ + HighAvailability: &apiv1.ReplicationSlotsHAConfiguration{ + SynchronizeLogicalDecoding: true, + }, + }, + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "hot_standby_feedback": "on", + "sync_replication_slots": "on", + }, + }, + }, + } + + result := v.validateSynchronizeLogicalDecoding(cluster) + Expect(result).To(BeEmpty()) + }) }) var _ = Describe("Environment variables validation", func() { @@ -3994,7 +4114,7 @@ var _ = Describe("Managed Extensions validation", func() { }, }, } - Expect(v.validatePgFailoverSlots(cluster)).To(HaveLen(2)) + Expect(v.validatePgFailoverSlots(cluster)).To(HaveLen(1)) }) It("should succeed if pg_failover_slots and its prerequisites are enabled", func() { diff --git a/pkg/management/postgres/configuration.go b/pkg/management/postgres/configuration.go index da075978c2..1445e08600 100644 --- a/pkg/management/postgres/configuration.go +++ b/pkg/management/postgres/configuration.go @@ -431,6 +431,17 @@ func createPostgresqlConfiguration( info.RecoveryMinApplyDelay = cluster.Spec.ReplicaCluster.MinApplyDelay.Duration } + if isSynchronizeLogicalDecodingEnabled(cluster) { + slots := make([]string, 0, len(cluster.Status.InstanceNames)-1) + for _, instanceName := range cluster.Status.InstanceNames { + if instanceName == cluster.Status.CurrentPrimary { + continue + } + slots = append(slots, cluster.GetSlotNameFromInstanceName(instanceName)) + } + info.SynchronizedStandbySlots = slots + } + config, err := plugin.CreatePostgresqlConfigurationWithPlugins(ctx, info, operationType) if err != nil { return "", "", err @@ -440,6 +451,13 @@ func createPostgresqlConfiguration( return file, sha, nil } +func isSynchronizeLogicalDecodingEnabled(cluster *apiv1.Cluster) bool { + return cluster.Spec.ReplicationSlots != nil && + cluster.Spec.ReplicationSlots.HighAvailability != nil && + cluster.Spec.ReplicationSlots.HighAvailability.GetEnabled() && + cluster.Spec.ReplicationSlots.HighAvailability.SynchronizeLogicalDecoding +} + // configurePostgresForImport configures Postgres to be optimized for the firt import // process, by writing dedicated options the override.conf file just for this phase func configurePostgresForImport(ctx context.Context, pgData string) (changed bool, err error) { diff --git a/pkg/postgres/configuration.go b/pkg/postgres/configuration.go index 66a0c10924..118dcd5cde 100644 --- a/pkg/postgres/configuration.go +++ b/pkg/postgres/configuration.go @@ -48,6 +48,12 @@ const ( // ParameterRecoveryMinApplyDelay is the configuration key containing the recovery_min_apply_delay parameter ParameterRecoveryMinApplyDelay = "recovery_min_apply_delay" + + // ParameterSyncReplicationSlots the configuration key containing the sync_replication_slots value + ParameterSyncReplicationSlots = "sync_replication_slots" + + // ParameterHotStandbyFeedback the configuration key containing the hot_standby_feedback value + ParameterHotStandbyFeedback = "hot_standby_feedback" ) // An acceptable wal_level value @@ -297,6 +303,9 @@ type ConfigurationInfo struct { // The synchronous_standby_names configuration to be applied SynchronousStandbyNames string + // The synchronized_standby_slots configuration to be applied + SynchronizedStandbySlots []string + // List of additional sharedPreloadLibraries to be loaded AdditionalSharedPreloadLibraries []string @@ -367,6 +376,23 @@ func (e ManagedExtension) IsUsed(userConfigs map[string]string) bool { return false } +// IsManagedExtensionUsed checks whether a configuration namespace in the named extension namespaces list +// is used in the user-provided configuration +func IsManagedExtensionUsed(name string, userConfigs map[string]string) bool { + var extension *ManagedExtension + for _, ext := range ManagedExtensions { + if ext.Name == name { + extension = &ext + break + } + } + if extension == nil { + return false + } + + return extension.IsUsed(userConfigs) +} + var ( // ManagedExtensions contains the list of extensions the operator supports to manage ManagedExtensions = []ManagedExtension{ @@ -443,6 +469,7 @@ var ( "log_rotation_size": blockedConfigurationParameter, "log_truncate_on_rotation": blockedConfigurationParameter, "pg_failover_slots.primary_dsn": fixedConfigurationParameter, + "pg_failover_slots.standby_slot_names": fixedConfigurationParameter, "promote_trigger_file": blockedConfigurationParameter, "recovery_end_command": blockedConfigurationParameter, "recovery_min_apply_delay": blockedConfigurationParameter, @@ -459,6 +486,7 @@ var ( "ssl_prefer_server_ciphers": fixedConfigurationParameter, "stats_temp_directory": blockedConfigurationParameter, "synchronous_standby_names": fixedConfigurationParameter, + "synchronized_standby_slots": fixedConfigurationParameter, "syslog_facility": blockedConfigurationParameter, "syslog_ident": blockedConfigurationParameter, "syslog_sequence_numbers": blockedConfigurationParameter, @@ -665,6 +693,19 @@ func CreatePostgresqlConfiguration(info ConfigurationInfo) *PgConfiguration { configuration.OverwriteConfig(SynchronousStandbyNames, syncStandbyNames) } + if len(info.SynchronizedStandbySlots) > 0 { + synchronizedStandbySlots := strings.Join(info.SynchronizedStandbySlots, ",") + if IsManagedExtensionUsed("pg_failover_slots", info.UserSettings) { + configuration.OverwriteConfig("pg_failover_slots.standby_slot_names", synchronizedStandbySlots) + } + + if info.MajorVersion >= 17 { + if isEnabled, _ := ParsePostgresConfigBoolean(info.UserSettings["sync_replication_slots"]); isEnabled { + configuration.OverwriteConfig("synchronized_standby_slots", synchronizedStandbySlots) + } + } + } + if info.ClusterName != "" { configuration.OverwriteConfig("cluster_name", info.ClusterName) } From d4647e8228558aa2a4f66476f0d93ceb33446d9a Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Tue, 8 Jul 2025 14:43:42 +0200 Subject: [PATCH 685/836] docs(examples): use synchronizeLogicalDecoding in the examples (#7994) The logical replication examples are now using the new `synchronizeLogicalDecoding` field to configure failover logical replication slots. Signed-off-by: Leonardo Cecchi --- .../cluster-example-logical-destination.yaml | 2 ++ .../samples/cluster-example-logical-source.yaml | 14 ++++++++++++-- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/docs/src/samples/cluster-example-logical-destination.yaml b/docs/src/samples/cluster-example-logical-destination.yaml index e8a2f574f9..abaf006ecf 100644 --- a/docs/src/samples/cluster-example-logical-destination.yaml +++ b/docs/src/samples/cluster-example-logical-destination.yaml @@ -39,3 +39,5 @@ spec: dbname: app publicationName: pub externalClusterName: cluster-example + parameters: + failover: 'true' diff --git a/docs/src/samples/cluster-example-logical-source.yaml b/docs/src/samples/cluster-example-logical-source.yaml index 95bac8cd82..39eb64bdbc 100644 --- a/docs/src/samples/cluster-example-logical-source.yaml +++ b/docs/src/samples/cluster-example-logical-source.yaml @@ -3,9 +3,9 @@ kind: Cluster metadata: name: cluster-example spec: - instances: 1 + instances: 3 - imageName: ghcr.io/cloudnative-pg/postgresql:16 + imageName: ghcr.io/cloudnative-pg/postgresql:17 storage: size: 1Gi @@ -25,11 +25,21 @@ spec: - INSERT INTO another_schema.numbers_three (m) (SELECT generate_series(1,10000)) - ALTER TABLE another_schema.numbers_three OWNER TO app + replicationSlots: + highAvailability: + synchronizeLogicalDecoding: true + managed: roles: - name: app login: true replication: true + + postgresql: + parameters: + hot_standby_feedback: 'on' + sync_replication_slots: 'on' + --- apiVersion: postgresql.cnpg.io/v1 kind: Publication From d270f7b366ac6de1f0612250e8d0f9f9bb8f66cf Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Tue, 8 Jul 2025 16:42:29 +0200 Subject: [PATCH 686/836] feat(user,secret): add `fqdn-uri` and `fqdn-jdbc-uri` fields (#7852) Closes #7807 Signed-off-by: Armando Ruocco Signed-off-by: Jonathan Battiato Signed-off-by: Leonardo Cecchi Co-authored-by: Jonathan Battiato Co-authored-by: Leonardo Cecchi --- .wordlist-en-custom.txt | 1 + docs/src/applications.md | 7 ++++++ pkg/specs/secrets.go | 49 +++++++++++++++++++++++++-------------- pkg/specs/secrets_test.go | 8 +++++++ 4 files changed, 47 insertions(+), 18 deletions(-) diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index 839186d3fe..4f4ae02071 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -831,6 +831,7 @@ fio fips firstRecoverabilityPoint firstRecoverabilityPointByMethod +fqdn freddie fuzzystrmatch gapped diff --git a/docs/src/applications.md b/docs/src/applications.md index 5828e99e17..f2a7f8dc06 100644 --- a/docs/src/applications.md +++ b/docs/src/applications.md @@ -72,6 +72,13 @@ Each secret contain the following: * a working [`.pgpass file`](https://www.postgresql.org/docs/current/libpq-pgpass.html) * [uri](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING) * [jdbc-uri](https://jdbc.postgresql.org/documentation/use/#connecting-to-the-database) +* [fqdn-uri](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING) +* [fqdn-jdbc-uri](https://jdbc.postgresql.org/documentation/use/#connecting-to-the-database) + +The FQDN to be used in the URIs is calculated using the Kubernetes cluster +domain specified in the `KUBERNETES_CLUSTER_DOMAIN` configuration parameter. +See [the operator configuration documentation](operator_conf.md) for more information +about that. The `-app` credentials are the ones that should be used by applications connecting to the PostgreSQL cluster, and correspond to the user *owning* the diff --git a/pkg/specs/secrets.go b/pkg/specs/secrets.go index 1ec994966e..52888fc6f2 100644 --- a/pkg/specs/secrets.go +++ b/pkg/specs/secrets.go @@ -26,6 +26,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/cloudnative-pg/cloudnative-pg/internal/configuration" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) @@ -40,7 +41,28 @@ func CreateSecret( password string, usertype utils.UserType, ) *corev1.Secret { - uriBuilder := newConnectionStringBuilder(hostname, dbname, username, password, namespace) + hostWithNamespace := fmt.Sprintf("%s.%s:%d", hostname, namespace, postgres.ServerPort) + hostWithFQDN := fmt.Sprintf( + "%s.%s.svc.%s:%d", + hostname, + namespace, + configuration.Current.KubernetesClusterDomain, + postgres.ServerPort, + ) + + namespacedBuilder := &connectionStringBuilder{ + host: hostWithNamespace, + dbname: dbname, + username: username, + password: password, + } + + fqdnBuilder := &connectionStringBuilder{ + host: hostWithFQDN, + dbname: dbname, + username: username, + password: password, + } return &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -66,28 +88,19 @@ func CreateSecret( dbname, username, password), - "uri": uriBuilder.buildPostgres(), - "jdbc-uri": uriBuilder.buildJdbc(), + "uri": namespacedBuilder.buildPostgres(), + "jdbc-uri": namespacedBuilder.buildJdbc(), + "fqdn-uri": fqdnBuilder.buildPostgres(), + "fqdn-jdbc-uri": fqdnBuilder.buildJdbc(), }, } } type connectionStringBuilder struct { - host string - dbname string - username string - password string - namespace string -} - -func newConnectionStringBuilder(hostname, dbname, username, password, namespace string) *connectionStringBuilder { - return &connectionStringBuilder{ - host: fmt.Sprintf("%s.%s:%d", hostname, namespace, postgres.ServerPort), - dbname: dbname, - username: username, - password: password, - namespace: namespace, - } + host string + dbname string + username string + password string } func (c connectionStringBuilder) buildPostgres() string { diff --git a/pkg/specs/secrets_test.go b/pkg/specs/secrets_test.go index 22ab765033..8693fadbf9 100644 --- a/pkg/specs/secrets_test.go +++ b/pkg/specs/secrets_test.go @@ -44,6 +44,14 @@ var _ = Describe("Secret creation", func() { Expect(secret.StringData["jdbc-uri"]).To( Equal("jdbc:postgresql://thishost.namespace:5432/thisdb?password=thispassword&user=thisuser"), ) + + Expect(secret.StringData["fqdn-uri"]).To( + Equal("postgresql://thisuser:thispassword@thishost.namespace.svc.cluster.local:5432/thisdb"), + ) + Expect(secret.StringData["fqdn-jdbc-uri"]).To( + Equal("jdbc:postgresql://thishost.namespace.svc.cluster.local:5432/thisdb?password=thispassword&user=thisuser"), + ) + Expect(secret.Labels).To( HaveKeyWithValue(utils.UserTypeLabelName, string(utils.UserTypeApp))) }) From 3b17eb149321fddcbce14b40a54a606e9eb358ba Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Wed, 9 Jul 2025 10:21:10 +0200 Subject: [PATCH 687/836] fix(pvc): support `VolumeAttributesClass` reconciliation (#7885) Add missing support for reconciling `VolumeAttributesClass` in `pvcTemplate`, leveraging the feature promoted to beta in Kubernetes 1.31 (disabled by default). A `VolumeAttributesClass` allows administrators (typically cloud providers) to define mutable "classes" of storage, mapping to different quality-of-service levels, while Kubernetes remains unopinionated about their semantics. See: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ This patch also refactors the PVC reconciliation logic to simplify handling additional parameters. Closes #7800 ## Release notes The operator now ensures that the `VolumeAttributesClassName` in PVCs matches the configuration defined in the `pvcTemplate`. Signed-off-by: Armando Ruocco --- internal/controller/cluster_controller.go | 2 +- .../{requests.go => existing.go} | 98 +++++++++--- .../persistentvolumeclaim/reconciler.go | 2 +- .../persistentvolumeclaim/reconciler_test.go | 150 ++++++++++++++++-- 4 files changed, 210 insertions(+), 42 deletions(-) rename pkg/reconciler/persistentvolumeclaim/{requests.go => existing.go} (50%) diff --git a/internal/controller/cluster_controller.go b/internal/controller/cluster_controller.go index 08d235e3cd..9cd46f5719 100644 --- a/internal/controller/cluster_controller.go +++ b/internal/controller/cluster_controller.go @@ -735,7 +735,7 @@ func (r *ClusterReconciler) reconcileResources( cluster, resources.instances.Items, resources.pvcs.Items, - ); !res.IsZero() || err != nil { + ); err != nil || !res.IsZero() { return res, err } diff --git a/pkg/reconciler/persistentvolumeclaim/requests.go b/pkg/reconciler/persistentvolumeclaim/existing.go similarity index 50% rename from pkg/reconciler/persistentvolumeclaim/requests.go rename to pkg/reconciler/persistentvolumeclaim/existing.go index d43d9b9e6c..b104d990c5 100644 --- a/pkg/reconciler/persistentvolumeclaim/requests.go +++ b/pkg/reconciler/persistentvolumeclaim/existing.go @@ -21,6 +21,7 @@ package persistentvolumeclaim import ( "context" + "fmt" "github.com/cloudnative-pg/machinery/pkg/log" corev1 "k8s.io/api/core/v1" @@ -31,52 +32,103 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" ) -// reconcileResourceRequests align the resource requests -func reconcileResourceRequests( +type reconciliationUnit func( + ctx context.Context, + c client.Client, + storageConfiguration *apiv1.StorageConfiguration, + pvc *corev1.PersistentVolumeClaim, +) error + +// reconcileExistingPVCs align the existing pvcs to the desired state +func reconcileExistingPVCs( ctx context.Context, c client.Client, cluster *apiv1.Cluster, pvcs []corev1.PersistentVolumeClaim, ) error { - if !cluster.ShouldResizeInUseVolumes() { + if len(pvcs) == 0 { + return nil + } + + contextLogger := log.FromContext(ctx) + + var reconciliationUnits []reconciliationUnit + + if cluster.ShouldResizeInUseVolumes() { + reconciliationUnits = append(reconciliationUnits, reconcilePVCQuantity) + } + if cluster.Spec.StorageConfiguration.PersistentVolumeClaimTemplate != nil { + reconciliationUnits = append(reconciliationUnits, reconcileVolumeAttributeClass) + } + + if len(reconciliationUnits) == 0 { return nil } for idx := range pvcs { - if err := reconcilePVCQuantity(ctx, c, cluster, &pvcs[idx]); err != nil { + pvc := &pvcs[idx] + + pvcRole, err := GetExpectedObjectCalculator(pvc.GetLabels()) + if err != nil { + contextLogger.Error(err, + "encountered an error while trying to get pvc role from label", + "role", pvc.Labels[utils.PvcRoleLabelName], + ) return err } + + storageConfiguration, err := pvcRole.GetStorageConfiguration(cluster) + if err != nil { + contextLogger.Error(err, + "encountered an error while trying to obtain the storage configuration", + "role", pvc.Labels[utils.PvcRoleLabelName], + "pvcName", pvc.Name, + ) + return err + } + + for _, reconciler := range reconciliationUnits { + if err := reconciler(ctx, c, &storageConfiguration, pvc); err != nil { + return err + } + } } return nil } -func reconcilePVCQuantity( +func reconcileVolumeAttributeClass( ctx context.Context, c client.Client, - cluster *apiv1.Cluster, + storageConfiguration *apiv1.StorageConfiguration, pvc *corev1.PersistentVolumeClaim, ) error { - contextLogger := log.FromContext(ctx) - pvcRole, err := GetExpectedObjectCalculator(pvc.GetLabels()) - if err != nil { - contextLogger.Error(err, - "encountered an error while trying to get pvc role from label", - "role", pvc.Labels[utils.PvcRoleLabelName], - ) - return err + if storageConfiguration.PersistentVolumeClaimTemplate == nil { + return nil } - storageConfiguration, err := pvcRole.GetStorageConfiguration(cluster) - if err != nil { - contextLogger.Error(err, - "encountered an error while trying to obtain the storage configuration", - "role", pvc.Labels[utils.PvcRoleLabelName], - "pvcName", pvc.Name, - ) - return err + expectedVolumeAttributesClassName := storageConfiguration.PersistentVolumeClaimTemplate.VolumeAttributesClassName + if expectedVolumeAttributesClassName == pvc.Spec.VolumeAttributesClassName { + return nil } + oldPVC := pvc.DeepCopy() + pvc.Spec.VolumeAttributesClassName = expectedVolumeAttributesClassName + if err := c.Patch(ctx, pvc, client.MergeFrom(oldPVC)); err != nil { + return fmt.Errorf("error while changing PVC volume attributes class name: %w", err) + } + + return nil +} + +func reconcilePVCQuantity( + ctx context.Context, + c client.Client, + storageConfiguration *apiv1.StorageConfiguration, + pvc *corev1.PersistentVolumeClaim, +) error { + contextLogger := log.FromContext(ctx) + parsedSize := storageConfiguration.GetSizeOrNil() if parsedSize == nil { return ErrorInvalidSize @@ -105,7 +157,7 @@ func reconcilePVCQuantity( "pvc", pvc, "requests", pvc.Spec.Resources.Requests, "oldRequests", oldPVC.Spec.Resources.Requests) - return err + return fmt.Errorf("error while changing PVC storage requirement: %w", err) } return nil diff --git a/pkg/reconciler/persistentvolumeclaim/reconciler.go b/pkg/reconciler/persistentvolumeclaim/reconciler.go index 111f5614fe..1ccbb37fbb 100644 --- a/pkg/reconciler/persistentvolumeclaim/reconciler.go +++ b/pkg/reconciler/persistentvolumeclaim/reconciler.go @@ -45,7 +45,7 @@ func Reconcile( return res, err } - if err := reconcileResourceRequests(ctx, c, cluster, pvcs); err != nil { + if err := reconcileExistingPVCs(ctx, c, cluster, pvcs); err != nil { if apierrs.IsConflict(err) { contextLogger.Debug("Conflict error while reconciling PVCs", "error", err) return ctrl.Result{Requeue: true}, nil diff --git a/pkg/reconciler/persistentvolumeclaim/reconciler_test.go b/pkg/reconciler/persistentvolumeclaim/reconciler_test.go index 33d84a116a..1fd18cd3f6 100644 --- a/pkg/reconciler/persistentvolumeclaim/reconciler_test.go +++ b/pkg/reconciler/persistentvolumeclaim/reconciler_test.go @@ -161,7 +161,7 @@ var _ = Describe("Reconcile resource requests", func() { cluster := &apiv1.Cluster{} It("Reconcile resources with empty PVCs shouldn't fail", func() { - err := reconcileResourceRequests( + err := reconcileExistingPVCs( context.Background(), cli, cluster, @@ -178,7 +178,7 @@ var _ = Describe("Reconcile resource requests", func() { } cli := fake.NewClientBuilder().WithScheme(scheme.BuildWithAllKnownScheme()).WithObjects(cluster).Build() - err := reconcileResourceRequests( + err := reconcileExistingPVCs( context.Background(), cli, cluster, @@ -486,11 +486,13 @@ var _ = Describe("Reconcile PVC Quantity", func() { }) It("fail if we dont' have the proper role", func() { - err := reconcilePVCQuantity( - context.Background(), - cli, - cluster, - &pvc) + pvcRole, err := GetExpectedObjectCalculator(pvc.GetLabels()) + Expect(err).ToNot(HaveOccurred()) + + storageConfiguration, err := pvcRole.GetStorageConfiguration(cluster) + Expect(err).ToNot(HaveOccurred()) + + err = reconcilePVCQuantity(context.Background(), cli, &storageConfiguration, &pvc) Expect(err).To(HaveOccurred()) }) @@ -499,10 +501,16 @@ var _ = Describe("Reconcile PVC Quantity", func() { utils.PvcRoleLabelName: string(utils.PVCRolePgData), } - err := reconcilePVCQuantity( + pvcRole, err := GetExpectedObjectCalculator(pvc.GetLabels()) + Expect(err).ToNot(HaveOccurred()) + + storageConfiguration, err := pvcRole.GetStorageConfiguration(cluster) + Expect(err).ToNot(HaveOccurred()) + + err = reconcilePVCQuantity( context.Background(), cli, - cluster, + &storageConfiguration, &pvc) Expect(err).To(HaveOccurred()) }) @@ -510,11 +518,17 @@ var _ = Describe("Reconcile PVC Quantity", func() { It("If we don't have the proper storage configuration it should fail", func() { cluster.Spec.StorageConfiguration = apiv1.StorageConfiguration{} + pvcRole, err := GetExpectedObjectCalculator(pvc.GetLabels()) + Expect(err).ToNot(HaveOccurred()) + + storageConfiguration, err := pvcRole.GetStorageConfiguration(cluster) + Expect(err).ToNot(HaveOccurred()) + // If we don't have a proper storage configuration we should also fail - err := reconcilePVCQuantity( + err = reconcilePVCQuantity( context.Background(), cli, - cluster, + &storageConfiguration, &pvc) Expect(err).To(HaveOccurred()) }) @@ -525,10 +539,16 @@ var _ = Describe("Reconcile PVC Quantity", func() { } cluster.Spec.StorageConfiguration.Size = "1Gi" - err := reconcilePVCQuantity( + pvcRole, err := GetExpectedObjectCalculator(pvc.GetLabels()) + Expect(err).ToNot(HaveOccurred()) + + storageConfiguration, err := pvcRole.GetStorageConfiguration(cluster) + Expect(err).ToNot(HaveOccurred()) + + err = reconcilePVCQuantity( context.Background(), cli, - cluster, + &storageConfiguration, &pvc) Expect(err).ToNot(HaveOccurred()) }) @@ -544,10 +564,16 @@ var _ = Describe("Reconcile PVC Quantity", func() { }, } - err := reconcilePVCQuantity( + pvcRole, err := GetExpectedObjectCalculator(pvc2.GetLabels()) + Expect(err).ToNot(HaveOccurred()) + + storageConfiguration, err := pvcRole.GetStorageConfiguration(cluster) + Expect(err).ToNot(HaveOccurred()) + + err = reconcilePVCQuantity( context.Background(), cli, - cluster, + &storageConfiguration, &pvc2) Expect(err).ToNot(HaveOccurred()) }) @@ -563,11 +589,101 @@ var _ = Describe("Reconcile PVC Quantity", func() { }, } - err := reconcilePVCQuantity( + pvcRole, err := GetExpectedObjectCalculator(pvc2.GetLabels()) + Expect(err).ToNot(HaveOccurred()) + + storageConfiguration, err := pvcRole.GetStorageConfiguration(cluster) + Expect(err).ToNot(HaveOccurred()) + + err = reconcilePVCQuantity( context.Background(), cli, - cluster, + &storageConfiguration, &pvc2) Expect(err).ToNot(HaveOccurred()) }) }) + +var _ = Describe("Reconcile Volume Attribute Class", func() { + var ( + clusterName = "cluster-volume-attr" + cluster *apiv1.Cluster + pvc corev1.PersistentVolumeClaim + cli client.Client + ctx context.Context + ) + + BeforeEach(func() { + ctx = context.Background() + cluster = &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + }, + } + pvc = makePVC(clusterName, "1", "1", NewPgDataCalculator(), false) + cli = fake.NewClientBuilder(). + WithScheme(scheme.BuildWithAllKnownScheme()). + WithObjects(cluster, &pvc). + Build() + }) + + It("does nothing if PersistentVolumeClaimTemplate is nil", func() { + storage := &apiv1.StorageConfiguration{ + Size: "1Gi", + PersistentVolumeClaimTemplate: nil, + } + + err := reconcileVolumeAttributeClass(ctx, cli, storage, &pvc) + Expect(err).NotTo(HaveOccurred()) + Expect(pvc.Spec.VolumeAttributesClassName).To(BeNil()) + }) + + It("does nothing if VolumeAttributesClassName is already the expected value", func() { + className := "fast-class" + pvc.Spec.VolumeAttributesClassName = &className + + storage := &apiv1.StorageConfiguration{ + Size: "1Gi", + PersistentVolumeClaimTemplate: &corev1.PersistentVolumeClaimSpec{ + VolumeAttributesClassName: &className, + }, + } + + err := reconcileVolumeAttributeClass(ctx, cli, storage, &pvc) + Expect(err).NotTo(HaveOccurred()) + Expect(*pvc.Spec.VolumeAttributesClassName).To(Equal(className)) + }) + + It("updates VolumeAttributesClassName when it differs from the expected value", func() { + currentClassName := "slow-class" + expectedClassName := "fast-class" + pvc.Spec.VolumeAttributesClassName = ¤tClassName + + storage := &apiv1.StorageConfiguration{ + Size: "1Gi", + PersistentVolumeClaimTemplate: &corev1.PersistentVolumeClaimSpec{ + VolumeAttributesClassName: &expectedClassName, + }, + } + + err := reconcileVolumeAttributeClass(ctx, cli, storage, &pvc) + Expect(err).NotTo(HaveOccurred()) + Expect(*pvc.Spec.VolumeAttributesClassName).To(Equal(expectedClassName)) + }) + + It("sets VolumeAttributesClassName to nil when template specifies nil", func() { + className := "existing-class" + pvc.Spec.VolumeAttributesClassName = &className + + storage := &apiv1.StorageConfiguration{ + Size: "1Gi", + PersistentVolumeClaimTemplate: &corev1.PersistentVolumeClaimSpec{ + VolumeAttributesClassName: nil, + }, + } + + err := reconcileVolumeAttributeClass(ctx, cli, storage, &pvc) + Expect(err).NotTo(HaveOccurred()) + Expect(pvc.Spec.VolumeAttributesClassName).To(BeNil()) + }) +}) From 4dc8a6f027e08f2a0753805f2c366d2585420604 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 9 Jul 2025 16:51:55 +0200 Subject: [PATCH 688/836] chore(deps): update operator framework (main) (#7974) This PR contains the following updates: https://github.com/operator-framework/operator-registry `v1.55.0` -> `v1.56.0` https://github.com/operator-framework/operator-sdk `v1.40.0` -> `v1.41.0` quay.io/operator-framework/scorecard-test `v1.40.0` -> `v1.41.0` --- Makefile | 4 ++-- config/olm-scorecard/patches/basic.config.yaml | 2 +- config/olm-scorecard/patches/olm.config.yaml | 10 +++++----- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Makefile b/Makefile index 706957830d..f057bb7324 100644 --- a/Makefile +++ b/Makefile @@ -63,9 +63,9 @@ SPELLCHECK_VERSION ?= 0.51.0 # renovate: datasource=docker depName=getwoke/woke versioning=docker WOKE_VERSION ?= 0.19.0 # renovate: datasource=github-releases depName=operator-framework/operator-sdk versioning=loose -OPERATOR_SDK_VERSION ?= v1.40.0 +OPERATOR_SDK_VERSION ?= v1.41.0 # renovate: datasource=github-tags depName=operator-framework/operator-registry -OPM_VERSION ?= v1.55.0 +OPM_VERSION ?= v1.56.0 # renovate: datasource=github-tags depName=redhat-openshift-ecosystem/openshift-preflight PREFLIGHT_VERSION ?= 1.14.0 OPENSHIFT_VERSIONS ?= v4.12-v4.19 diff --git a/config/olm-scorecard/patches/basic.config.yaml b/config/olm-scorecard/patches/basic.config.yaml index f7e0ed492b..22884c5382 100644 --- a/config/olm-scorecard/patches/basic.config.yaml +++ b/config/olm-scorecard/patches/basic.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - basic-check-spec - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: basic test: basic-check-spec-test diff --git a/config/olm-scorecard/patches/olm.config.yaml b/config/olm-scorecard/patches/olm.config.yaml index 85895c9e93..d2f829662c 100644 --- a/config/olm-scorecard/patches/olm.config.yaml +++ b/config/olm-scorecard/patches/olm.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - olm-bundle-validation - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: olm test: olm-bundle-validation-test @@ -14,7 +14,7 @@ entrypoint: - scorecard-test - olm-crds-have-validation - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: olm test: olm-crds-have-validation-test @@ -24,7 +24,7 @@ entrypoint: - scorecard-test - olm-crds-have-resources - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: olm test: olm-crds-have-resources-test @@ -34,7 +34,7 @@ entrypoint: - scorecard-test - olm-spec-descriptors - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: olm test: olm-spec-descriptors-test @@ -44,7 +44,7 @@ entrypoint: - scorecard-test - olm-status-descriptors - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: olm test: olm-status-descriptors-test From 4cd966039690bbc3e845b947de3ceb36b5014c0d Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 9 Jul 2025 17:26:23 +0200 Subject: [PATCH 689/836] chore(deps): update dependency golang to v1.24.5 (main) (#7995) --- .github/workflows/backport.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/continuous-delivery.yml | 2 +- .github/workflows/continuous-integration.yml | 2 +- .github/workflows/refresh-licenses.yml | 2 +- .github/workflows/release-publish.yml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index b08fc29c6a..26209947a3 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -12,7 +12,7 @@ permissions: read-all env: # renovate: datasource=golang-version depName=golang versioning=loose - GOLANG_VERSION: "1.24.4" + GOLANG_VERSION: "1.24.5" jobs: # Label the source pull request with 'backport-requested' and all supported releases label, the goal is, by default diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 12e2f2fb19..aaa4a9fd10 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -35,7 +35,7 @@ permissions: read-all # set up environment variables to be used across all the jobs env: # renovate: datasource=golang-version depName=golang versioning=loose - GOLANG_VERSION: "1.24.4" + GOLANG_VERSION: "1.24.5" jobs: duplicate_runs: diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index decff8ded9..62141d6e2a 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -37,7 +37,7 @@ permissions: read-all # set up environment variables to be used across all the jobs env: # renovate: datasource=golang-version depName=golang versioning=loose - GOLANG_VERSION: "1.24.4" + GOLANG_VERSION: "1.24.5" KUBEBUILDER_VERSION: "2.3.1" # renovate: datasource=github-tags depName=kubernetes-sigs/kind versioning=semver KIND_VERSION: "v0.29.0" diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index e4ddce1072..71c537ffc0 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -19,7 +19,7 @@ permissions: read-all # set up environment variables to be used across all the jobs env: # renovate: datasource=golang-version depName=golang versioning=loose - GOLANG_VERSION: "1.24.4" + GOLANG_VERSION: "1.24.5" # renovate: datasource=github-releases depName=golangci/golangci-lint versioning=loose GOLANGCI_LINT_VERSION: "v2.2.1" KUBEBUILDER_VERSION: "2.3.1" diff --git a/.github/workflows/refresh-licenses.yml b/.github/workflows/refresh-licenses.yml index 08de202dc2..a762f7dd6f 100644 --- a/.github/workflows/refresh-licenses.yml +++ b/.github/workflows/refresh-licenses.yml @@ -10,7 +10,7 @@ permissions: read-all env: # renovate: datasource=golang-version depName=golang versioning=loose - GOLANG_VERSION: "1.24.4" + GOLANG_VERSION: "1.24.5" jobs: licenses: diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml index 50fb67b8f4..7c3012ff9e 100644 --- a/.github/workflows/release-publish.yml +++ b/.github/workflows/release-publish.yml @@ -11,7 +11,7 @@ permissions: read-all env: # renovate: datasource=golang-version depName=golang versioning=loose - GOLANG_VERSION: "1.24.4" + GOLANG_VERSION: "1.24.5" REGISTRY: "ghcr.io" jobs: From 5246abb4d0240382a58f3df59a35a1348375348a Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 10 Jul 2025 08:57:45 +0200 Subject: [PATCH 690/836] chore(deps): update module github.com/goreleaser/goreleaser to v2.11.0 (main) (#7999) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Change | Age | Confidence | |---|---|---|---| | [github.com/goreleaser/goreleaser](https://redirect.github.com/goreleaser/goreleaser) | `v2.10.2` -> `v2.11.0` | [![age](https://developer.mend.io/api/mc/badges/age/go/github.com%2fgoreleaser%2fgoreleaser/v2.11.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/go/github.com%2fgoreleaser%2fgoreleaser/v2.10.2/v2.11.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | --- ### Release Notes
goreleaser/goreleaser (github.com/goreleaser/goreleaser) ### [`v2.11.0`](https://redirect.github.com/goreleaser/goreleaser/releases/tag/v2.11.0) [Compare Source](https://redirect.github.com/goreleaser/goreleaser/compare/v2.10.2...v2.11.0) #### Changelog ##### New Features - [`eaaf0c5`](https://redirect.github.com/goreleaser/goreleaser/commit/eaaf0c50ebd36e19399f443d78ad14551b2d2757): feat(cask): place `custom_block` on top of cask definition ([#​5831](https://redirect.github.com/goreleaser/goreleaser/issues/5831)) ([@​sushichan044](https://redirect.github.com/sushichan044)) - [`b443215`](https://redirect.github.com/goreleaser/goreleaser/commit/b443215a430cd8cd38b14b22acfd8305cdb4f289): feat(cask): pluralize manpages ([#​5839](https://redirect.github.com/goreleaser/goreleaser/issues/5839)) ([@​caarlos0](https://redirect.github.com/caarlos0)) - [`ee5e42f`](https://redirect.github.com/goreleaser/goreleaser/commit/ee5e42fb48704ef4efb2d953f09035c5c4e5854c): feat(ux): use fang ([#​5842](https://redirect.github.com/goreleaser/goreleaser/issues/5842)) ([@​caarlos0](https://redirect.github.com/caarlos0)) - [`b3b2a92`](https://redirect.github.com/goreleaser/goreleaser/commit/b3b2a92a59ee81404d747eeb51bad1e4e886f8a3): feat: add signed commits capability ([#​5820](https://redirect.github.com/goreleaser/goreleaser/issues/5820)) ([@​hugodocto](https://redirect.github.com/hugodocto)) - [`4990a80`](https://redirect.github.com/goreleaser/goreleaser/commit/4990a8079139d94cb01777b90e30f330478bca9f): feat: improve error handling ([#​5878](https://redirect.github.com/goreleaser/goreleaser/issues/5878)) ([@​caarlos0](https://redirect.github.com/caarlos0)) - [`9e466d9`](https://redirect.github.com/goreleaser/goreleaser/commit/9e466d9daea1d4da41f25cef840e663e17b4ee54): feat: log duration of all shell commands ([#​5872](https://redirect.github.com/goreleaser/goreleaser/issues/5872)) ([@​caarlos0](https://redirect.github.com/caarlos0)) - [`4d9c105`](https://redirect.github.com/goreleaser/goreleaser/commit/4d9c105228d346021514e25cf51336440de79850): feat: run custom publishers last ([@​caarlos0](https://redirect.github.com/caarlos0)) ##### Bug fixes - [`158cbfc`](https://redirect.github.com/goreleaser/goreleaser/commit/158cbfc8457db4528c2e604bd9b4338b8a764ef7): fix(archive): ignore override with no formats ([@​caarlos0](https://redirect.github.com/caarlos0)) - [`988be8f`](https://redirect.github.com/goreleaser/goreleaser/commit/988be8fdad721e2ae03fb1bf41e56fd6a60a46a7): fix(archive): override with no goos ([@​caarlos0](https://redirect.github.com/caarlos0)) - [`0dd4742`](https://redirect.github.com/goreleaser/goreleaser/commit/0dd474227dcfc8c98515df8e1c594e8b2a5caf38): fix(cask): add test and docs for custom URL ([#​5844](https://redirect.github.com/goreleaser/goreleaser/issues/5844)) ([@​caarlos0](https://redirect.github.com/caarlos0)) - [`5d927a0`](https://redirect.github.com/goreleaser/goreleaser/commit/5d927a048ecdbc1e74658b3174a74bce312acbc8): fix(cask): invalid string quoting in uninstall/zap arrays ([#​5885](https://redirect.github.com/goreleaser/goreleaser/issues/5885)) ([@​stupside](https://redirect.github.com/stupside)) - [`63c4755`](https://redirect.github.com/goreleaser/goreleaser/commit/63c47558e5bf0bb9314a5648cfaa6968e46e7864): fix(casks): binary packages need to rename to binary ([#​5840](https://redirect.github.com/goreleaser/goreleaser/issues/5840)) ([@​caarlos0](https://redirect.github.com/caarlos0)) - [`b67fe02`](https://redirect.github.com/goreleaser/goreleaser/commit/b67fe028dd1bec24a1703e576aa3f7e99465ddbc): fix(deps): update to lipgloss/v2 ([#​5877](https://redirect.github.com/goreleaser/goreleaser/issues/5877)) ([@​caarlos0](https://redirect.github.com/caarlos0)) - [`076452b`](https://redirect.github.com/goreleaser/goreleaser/commit/076452b52472c9674da0d1abde5eb1cdc86b3bc8): fix(http): improve log output ([@​caarlos0](https://redirect.github.com/caarlos0)) - [`735ed6f`](https://redirect.github.com/goreleaser/goreleaser/commit/735ed6f3c5d3db8de1d840de4ea5f8b5f94ef884): fix(nix): improve nix-hash check ([#​5883](https://redirect.github.com/goreleaser/goreleaser/issues/5883)) ([@​caarlos0](https://redirect.github.com/caarlos0)) - [`d9614b3`](https://redirect.github.com/goreleaser/goreleaser/commit/d9614b3a0a6d495e4b67b3c1f6ee4f3f1ebc3595): fix(rust): properly group binaries by platform ([#​5866](https://redirect.github.com/goreleaser/goreleaser/issues/5866)) ([@​caarlos0](https://redirect.github.com/caarlos0)) - [`6866c14`](https://redirect.github.com/goreleaser/goreleaser/commit/6866c14acd8a3d2b9ea3fb7dd1e5897424b5a5cb): fix(schema): upload.mode enum ([@​caarlos0](https://redirect.github.com/caarlos0)) - [`f39686b`](https://redirect.github.com/goreleaser/goreleaser/commit/f39686b9acc7cdf0edb7894ca5c880b24b9e5986): fix(semver): skip=validate allows invalid semver ([#​5845](https://redirect.github.com/goreleaser/goreleaser/issues/5845)) ([@​caarlos0](https://redirect.github.com/caarlos0)) - [`d4b3fd6`](https://redirect.github.com/goreleaser/goreleaser/commit/d4b3fd602a899c20550143b5edbf7455b2a7cf46): fix: artifact.Checksum should set artifact.Extra field ([#​5849](https://redirect.github.com/goreleaser/goreleaser/issues/5849)) ([@​alexandear](https://redirect.github.com/alexandear)) - [`cb8b6f0`](https://redirect.github.com/goreleaser/goreleaser/commit/cb8b6f0c2347033f431bb492b254a47bcaeb9560): fix: keyword style ([@​caarlos0](https://redirect.github.com/caarlos0)) - [`3af52ad`](https://redirect.github.com/goreleaser/goreleaser/commit/3af52ad0368aec5760349ed2db86193b740eecfb): fix: remove opts set by fang ([@​caarlos0](https://redirect.github.com/caarlos0)) - [`1a72d4e`](https://redirect.github.com/goreleaser/goreleaser/commit/1a72d4ead32ed57ee81aa836177bffe2198fab89): fix: skip empty image\_templates in docker\_manifests ([#​5825](https://redirect.github.com/goreleaser/goreleaser/issues/5825)) ([@​zerospiel](https://redirect.github.com/zerospiel)) - [`c057943`](https://redirect.github.com/goreleaser/goreleaser/commit/c0579434a84eecde46cddffa899bde4dc71d5ab2): fix: typo ([@​caarlos0](https://redirect.github.com/caarlos0)) - [`2d1128c`](https://redirect.github.com/goreleaser/goreleaser/commit/2d1128cf35177a888280c94a994419ac640b93cf): fix: warn if cask directory is not Casks ([@​caarlos0](https://redirect.github.com/caarlos0)) - [`f409252`](https://redirect.github.com/goreleaser/goreleaser/commit/f4092525e7c95a81ad648b441435c6fb942a33b8): refactor: improve commitauthor templating ([#​5858](https://redirect.github.com/goreleaser/goreleaser/issues/5858)) ([@​caarlos0](https://redirect.github.com/caarlos0)) - [`27dd9af`](https://redirect.github.com/goreleaser/goreleaser/commit/27dd9af051aafede61687859ce9d50f99726da6e): refactor: rename package ([@​caarlos0](https://redirect.github.com/caarlos0)) - [`39e7191`](https://redirect.github.com/goreleaser/goreleaser/commit/39e7191a35bd8b238c60a973366ebbffcd8c6c18): refactor: simplify tests with require.ErrorContains ([#​5855](https://redirect.github.com/goreleaser/goreleaser/issues/5855)) ([@​alexandear](https://redirect.github.com/alexandear)) - [`38c4491`](https://redirect.github.com/goreleaser/goreleaser/commit/38c449116c8bdcce573698040d8d75666961bdcb): refactor: using signal.NotifyContext, better context usage ([#​5859](https://redirect.github.com/goreleaser/goreleaser/issues/5859)) ([@​caarlos0](https://redirect.github.com/caarlos0)) ##### Documentation updates - [`f4869f9`](https://redirect.github.com/goreleaser/goreleaser/commit/f4869f96e58c6cebc142cfbf1e003ced62f93872): docs(cask): fix the incorrect Ruby code in the example ([#​5847](https://redirect.github.com/goreleaser/goreleaser/issues/5847)) ([@​sushichan044](https://redirect.github.com/sushichan044)) - [`3fac43e`](https://redirect.github.com/goreleaser/goreleaser/commit/3fac43ee82a1d4272756a57a4d12ea4d4fa44cc2): docs: clarify 'binary' mode usage ([@​caarlos0](https://redirect.github.com/caarlos0)) - [`49b6904`](https://redirect.github.com/goreleaser/goreleaser/commit/49b6904cb7e8eea8be74f22e2268ec3d8663e189): docs: cleanup ([@​caarlos0](https://redirect.github.com/caarlos0)) - [`f6116d0`](https://redirect.github.com/goreleaser/goreleaser/commit/f6116d0e7030d4e5fafa6189237568e938bc8ef8): docs: fix ([@​caarlos0](https://redirect.github.com/caarlos0)) - [`70cad02`](https://redirect.github.com/goreleaser/goreleaser/commit/70cad02d55d2526e68b28455c8ec4986901924fd): docs: improve cask deprecation notice ([@​caarlos0](https://redirect.github.com/caarlos0)) - [`9f6e648`](https://redirect.github.com/goreleaser/goreleaser/commit/9f6e6482fb13f2e4e937a10d1a37b3696e451e08): docs: readme update ([@​caarlos0](https://redirect.github.com/caarlos0)) ##### Other work - [`d043b9b`](https://redirect.github.com/goreleaser/goreleaser/commit/d043b9bf46b93264a36fc0f5cad64767fa7cee67): Revert "chore(deps): bump github.com/charmbracelet/lipgloss/v2 from 2.0.0-beta.2.0.20250707173510-045a87bf1420 to 2.0.0-beta1 ([#​5889](https://redirect.github.com/goreleaser/goreleaser/issues/5889))" ([@​caarlos0](https://redirect.github.com/caarlos0)) - [`fb05318`](https://redirect.github.com/goreleaser/goreleaser/commit/fb05318593ddd47b81bef7de4c6bbbff6412b944): chore: fix lint issues ([@​caarlos0](https://redirect.github.com/caarlos0)) - [`2665e72`](https://redirect.github.com/goreleaser/goreleaser/commit/2665e72a6d90c434f6b5a1b43ff823e06b157a51): chore: lint ([@​caarlos0](https://redirect.github.com/caarlos0)) **Full Changelog**: https://github.com/goreleaser/goreleaser/compare/v2.10.2...v2.11.0 #### Helping out This release is only possible thanks to **all** the support of some **awesome people**! Want to be one of them? You can [sponsor](https://goreleaser.com/sponsors/), get a [Pro License](https://goreleaser.com/pro) or [contribute with code](https://goreleaser.com/contributing). #### Where to go next? - Find examples and commented usage of all options in our [website](https://goreleaser.com/intro/). - Reach out on [Discord](https://discord.gg/RGEBtg8vQ6) and [Twitter](https://twitter.com/goreleaser)! GoReleaser logo
--- ### Configuration 📅 **Schedule**: Branch creation - At any time (no schedule defined), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Never, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/cloudnative-pg/cloudnative-pg). Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index f057bb7324..a197914d75 100644 --- a/Makefile +++ b/Makefile @@ -57,7 +57,7 @@ KUSTOMIZE_VERSION ?= v5.6.0 CONTROLLER_TOOLS_VERSION ?= v0.18.0 GENREF_VERSION ?= 015aaac611407c4fe591bc8700d2c67b7521efca # renovate: datasource=go depName=github.com/goreleaser/goreleaser -GORELEASER_VERSION ?= v2.10.2 +GORELEASER_VERSION ?= v2.11.0 # renovate: datasource=docker depName=jonasbn/github-action-spellcheck versioning=docker SPELLCHECK_VERSION ?= 0.51.0 # renovate: datasource=docker depName=getwoke/woke versioning=docker From 5986cfd01863d528e4facddc6732819a84888d78 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 14 Jul 2025 10:13:41 +0200 Subject: [PATCH 691/836] chore(deps): update operator framework to v1.41.1 (main) (#8008) --- Makefile | 2 +- config/olm-scorecard/patches/basic.config.yaml | 2 +- config/olm-scorecard/patches/olm.config.yaml | 10 +++++----- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Makefile b/Makefile index a197914d75..91a8ff4485 100644 --- a/Makefile +++ b/Makefile @@ -63,7 +63,7 @@ SPELLCHECK_VERSION ?= 0.51.0 # renovate: datasource=docker depName=getwoke/woke versioning=docker WOKE_VERSION ?= 0.19.0 # renovate: datasource=github-releases depName=operator-framework/operator-sdk versioning=loose -OPERATOR_SDK_VERSION ?= v1.41.0 +OPERATOR_SDK_VERSION ?= v1.41.1 # renovate: datasource=github-tags depName=operator-framework/operator-registry OPM_VERSION ?= v1.56.0 # renovate: datasource=github-tags depName=redhat-openshift-ecosystem/openshift-preflight diff --git a/config/olm-scorecard/patches/basic.config.yaml b/config/olm-scorecard/patches/basic.config.yaml index 22884c5382..8237b70d80 100644 --- a/config/olm-scorecard/patches/basic.config.yaml +++ b/config/olm-scorecard/patches/basic.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - basic-check-spec - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.41.1 labels: suite: basic test: basic-check-spec-test diff --git a/config/olm-scorecard/patches/olm.config.yaml b/config/olm-scorecard/patches/olm.config.yaml index d2f829662c..416660a77e 100644 --- a/config/olm-scorecard/patches/olm.config.yaml +++ b/config/olm-scorecard/patches/olm.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - olm-bundle-validation - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.41.1 labels: suite: olm test: olm-bundle-validation-test @@ -14,7 +14,7 @@ entrypoint: - scorecard-test - olm-crds-have-validation - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.41.1 labels: suite: olm test: olm-crds-have-validation-test @@ -24,7 +24,7 @@ entrypoint: - scorecard-test - olm-crds-have-resources - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.41.1 labels: suite: olm test: olm-crds-have-resources-test @@ -34,7 +34,7 @@ entrypoint: - scorecard-test - olm-spec-descriptors - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.41.1 labels: suite: olm test: olm-spec-descriptors-test @@ -44,7 +44,7 @@ entrypoint: - scorecard-test - olm-status-descriptors - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.41.1 labels: suite: olm test: olm-status-descriptors-test From e52c8406355a2f87698e2eb8d1842798c043d894 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 14 Jul 2025 15:00:47 +0200 Subject: [PATCH 692/836] fix(deps): update module golang.org/x/term to v0.33.0 (main) (#8009) --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index f64cb5fc69..751113f4d4 100644 --- a/go.mod +++ b/go.mod @@ -35,7 +35,7 @@ require ( go.uber.org/atomic v1.11.0 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 - golang.org/x/term v0.32.0 + golang.org/x/term v0.33.0 google.golang.org/grpc v1.73.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.33.2 @@ -102,7 +102,7 @@ require ( golang.org/x/net v0.40.0 // indirect golang.org/x/oauth2 v0.28.0 // indirect golang.org/x/sync v0.14.0 // indirect - golang.org/x/sys v0.33.0 // indirect + golang.org/x/sys v0.34.0 // indirect golang.org/x/text v0.25.0 // indirect golang.org/x/time v0.9.0 // indirect golang.org/x/tools v0.31.0 // indirect diff --git a/go.sum b/go.sum index 372953941f..f5d8615c40 100644 --- a/go.sum +++ b/go.sum @@ -247,10 +247,10 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= -golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= +golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= +golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg= +golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= From 0117580f46017e5fbcc3daf5903e86ecc7691ad1 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Mon, 14 Jul 2025 16:54:05 +0200 Subject: [PATCH 693/836] fix(webhooks): clarify rejection of multiple bootstrap methods (#8025) Closes #8024 Signed-off-by: Marco Nenciarini --- docs/src/bootstrap.md | 3 +++ internal/webhook/v1/cluster_webhook.go | 5 ++--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/docs/src/bootstrap.md b/docs/src/bootstrap.md index f074ced936..0dd68f2f41 100644 --- a/docs/src/bootstrap.md +++ b/docs/src/bootstrap.md @@ -54,6 +54,9 @@ specification. CloudNativePG currently supports the following bootstrap methods: [`pg_basebackup` subsection](#bootstrap-from-a-live-cluster-pg_basebackup) carefully. +Only one bootstrap method can be specified in the manifest. +Attempting to define multiple bootstrap methods will result in validation errors. + In contrast to the `initdb` method, both `recovery` and `pg_basebackup` create a new cluster based on another one (either offline or online) and can be used to spin up replica clusters. They both rely on the definition of external diff --git a/internal/webhook/v1/cluster_webhook.go b/internal/webhook/v1/cluster_webhook.go index e3ee3a7558..ca149dc0cf 100644 --- a/internal/webhook/v1/cluster_webhook.go +++ b/internal/webhook/v1/cluster_webhook.go @@ -649,10 +649,9 @@ func (v *ClusterCustomValidator) validateBootstrapMethod(r *apiv1.Cluster) field if bootstrapMethods > 1 { result = append( result, - field.Invalid( + field.Forbidden( field.NewPath("spec", "bootstrap"), - "", - "Too many bootstrap types specified")) + "Only one bootstrap method can be specified at a time")) } return result From a65243c09a7866801449eaa80835eea7bd5bfcb0 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 15 Jul 2025 14:39:41 +0200 Subject: [PATCH 694/836] chore(deps): update dependency golangci/golangci-lint to v2.2.2 (main) (#8026) --- .github/workflows/continuous-integration.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 71c537ffc0..86a4bcc976 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -21,7 +21,7 @@ env: # renovate: datasource=golang-version depName=golang versioning=loose GOLANG_VERSION: "1.24.5" # renovate: datasource=github-releases depName=golangci/golangci-lint versioning=loose - GOLANGCI_LINT_VERSION: "v2.2.1" + GOLANGCI_LINT_VERSION: "v2.2.2" KUBEBUILDER_VERSION: "2.3.1" # renovate: datasource=github-tags depName=kubernetes-sigs/kind versioning=semver KIND_VERSION: "v0.29.0" From 6239feaa0979dbd4222db12a39ee4f7ac2bc9e7b Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 15 Jul 2025 18:55:09 +0200 Subject: [PATCH 695/836] chore(deps): update dependency rook/rook to v1.17.6 (main) (#8027) --- .github/workflows/continuous-delivery.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index 62141d6e2a..77885bed09 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -42,7 +42,7 @@ env: # renovate: datasource=github-tags depName=kubernetes-sigs/kind versioning=semver KIND_VERSION: "v0.29.0" # renovate: datasource=github-releases depName=rook/rook versioning=loose - ROOK_VERSION: "v1.17.5" + ROOK_VERSION: "v1.17.6" EXTERNAL_SNAPSHOTTER_VERSION: "v8.3.0" OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing" BUILD_PUSH_PROVENANCE: "" From 685ec28545cb4f0a79c72faf94a6b7c5941e64ce Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 17 Jul 2025 11:48:49 +0200 Subject: [PATCH 696/836] fix(deps): update module github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring to v0.84.0 (main) (#8037) --- go.mod | 12 ++++++------ go.sum | 24 ++++++++++++------------ 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/go.mod b/go.mod index 751113f4d4..e48ec9b3aa 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( github.com/mitchellh/go-ps v1.0.0 github.com/onsi/ginkgo/v2 v2.23.4 github.com/onsi/gomega v1.37.0 - github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.83.0 + github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.84.0 github.com/prometheus/client_golang v1.22.0 github.com/robfig/cron v1.2.0 github.com/sethvargo/go-password v0.3.1 @@ -98,14 +98,14 @@ require ( go.uber.org/automaxprocs v1.6.0 // indirect go.yaml.in/yaml/v2 v2.4.2 // indirect go.yaml.in/yaml/v3 v3.0.3 // indirect - golang.org/x/crypto v0.38.0 // indirect - golang.org/x/net v0.40.0 // indirect + golang.org/x/crypto v0.39.0 // indirect + golang.org/x/net v0.41.0 // indirect golang.org/x/oauth2 v0.28.0 // indirect - golang.org/x/sync v0.14.0 // indirect + golang.org/x/sync v0.16.0 // indirect golang.org/x/sys v0.34.0 // indirect - golang.org/x/text v0.25.0 // indirect + golang.org/x/text v0.27.0 // indirect golang.org/x/time v0.9.0 // indirect - golang.org/x/tools v0.31.0 // indirect + golang.org/x/tools v0.34.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 // indirect google.golang.org/protobuf v1.36.6 // indirect diff --git a/go.sum b/go.sum index f5d8615c40..a5130cfb49 100644 --- a/go.sum +++ b/go.sum @@ -155,8 +155,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.83.0 h1:j9Ce3W6X6Tzi0QnSap+YzGwpqJLJGP/7xV6P9f86jjM= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.83.0/go.mod h1:sSxwdmprUfmRfTknPc4KIjUd2ZIc/kirw4UdXNhOauM= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.84.0 h1:V/HLst0rSw4BZp8nIqhaTnnW4/EGxEoYbgjcDqzPJ5U= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.84.0/go.mod h1:MruMqbSS9aYrKhBImrO9X9g52hwz3I0B+tcoeAwkmuM= github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= @@ -225,23 +225,23 @@ go.yaml.in/yaml/v3 v3.0.3/go.mod h1:tBHosrYAkRZjRAOREWbDnBXUf08JOwYq++0QNwQiWzI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8= -golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw= +golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= +golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= -golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= +golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= +golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc= golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= -golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -253,16 +253,16 @@ golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg= golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= -golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= +golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4= +golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU= golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU= -golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ= +golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo= +golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From 504767bcc361e78a6453eaea9a64a42f746e23a2 Mon Sep 17 00:00:00 2001 From: Jaime Silvela Date: Thu, 17 Jul 2025 15:08:00 +0200 Subject: [PATCH 697/836] docs: clarify the defaults for microservice imports (#7903) Make it explicit that if the `import.database` is not set, databases imported via microservice may be renamed to `app`. Signed-off-by: Jaime Silvela Signed-off-by: Gabriele Bartolini Co-authored-by: Anish Bista <108048384+anishbista60@users.noreply.github.com> Co-authored-by: Gabriele Bartolini --- docs/src/database_import.md | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/docs/src/database_import.md b/docs/src/database_import.md index 5642977fa3..f397a6c06e 100644 --- a/docs/src/database_import.md +++ b/docs/src/database_import.md @@ -56,14 +56,14 @@ into the destination cluster: - **monolith approach**: the destination cluster is designed to host multiple databases and different users, imported from the source cluster -The first import method is available via the `microservice` type, while the -latter by the `monolith` type. +The first import method is available via the `microservice` type, the +second via the `monolith` type. !!! Warning It is your responsibility to ensure that the destination cluster can access the source cluster with a superuser or a user having enough privileges to take a logical backup with `pg_dump`. Please refer to the - [PostgreSQL documentation on "SQL Dump"](https://www.postgresql.org/docs/current/app-pgdump.html) + [PostgreSQL documentation on `pg_dump`](https://www.postgresql.org/docs/current/app-pgdump.html) for further information. ## The `microservice` type @@ -82,6 +82,10 @@ performed in 4 steps: database via the `postImportApplicationSQL` parameter - execution of `ANALYZE VERBOSE` on the imported database +In the figure below, a single PostgreSQL cluster containing *N* databases is +imported into separate CloudNativePG clusters, with each cluster using a +microservice import for one of the *N* source databases. + ![Example of microservice import type](./images/microservice-import.png) For example, the YAML below creates a new 3 instance PostgreSQL cluster (latest @@ -135,7 +139,7 @@ spec: and unsupported versions of Postgres too, giving you the chance to move your legacy data to a better system, inside Kubernetes. This is the main reason why we used 9.6 in the examples of this section. - We'd be interested to hear from you should you experience any issues in this area. + We'd be interested to hear from you, should you experience any issues in this area. There are a few things you need to be aware of when using the `microservice` type: @@ -154,6 +158,12 @@ There are a few things you need to be aware of when using the `microservice` typ - Only one database can be specified inside the `initdb.import.databases` array - Roles are not imported - and as such they cannot be specified inside `initdb.import.roles` +!!! Hint + The microservice approach adheres to CloudNativePG conventions and defaults + for the destination cluster. If you do not set `initdb.database` or + `initdb.owner` for the destination cluster, both parameters will default to + `app`. + ## The `monolith` type With the monolith approach, you can specify a set of roles and databases you From 48ddea1543777d83b5f59b0d00eb9fb33af05725 Mon Sep 17 00:00:00 2001 From: Julian <374571+l00ptr@users.noreply.github.com> Date: Thu, 17 Jul 2025 15:54:59 +0200 Subject: [PATCH 698/836] docs(upgrades): warn about Postgres restarts when upgrading to 1.26 (#8018) When upgrading from a previous version to 1.26, PostgreSQL clusters will be restarted even with in-place updates enabled, due to changes in the Startup probe definition (PR #6623). Closes #7727 Signed-off-by: Julian Vanden Broeck Signed-off-by: Gabriele Bartolini Co-authored-by: Julian Vanden Broeck Co-authored-by: Gabriele Bartolini --- docs/src/installation_upgrade.md | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/docs/src/installation_upgrade.md b/docs/src/installation_upgrade.md index 6ce978874b..686a62f2c2 100644 --- a/docs/src/installation_upgrade.md +++ b/docs/src/installation_upgrade.md @@ -279,18 +279,26 @@ spec: ``` --> -### Upgrading to 1.26.0 or 1.25.2 +### Upgrading to 1.26 from a previous minor version !!! Important We strongly recommend that all CloudNativePG users upgrade to version - 1.26.0 or at least to the latest stable version of the minor release you are - currently using (namely 1.25.x). + 1.26.1, or at a minimum, to the latest stable version of your current minor + release (for example, 1.25.x). + +!!! Warning + Due to changes in the startup probe for the manager component + ([#6623](https://github.com/cloudnative-pg/cloudnative-pg/pull/6623)), + upgrading the operator will trigger a restart of your PostgreSQL clusters, + even if in-place updates are enabled (`ENABLE_INSTANCE_MANAGER_INPLACE_UPDATES=true`). + Your applications will need to reconnect to PostgreSQL after the upgrade. In this release, the `cnpg` plugin for `kubectl` transitions from an imperative -to a declarative approach for cluster hibernation. The `hibernate on` and -`hibernate off` commands are now convenient shortcuts that apply declarative -changes to enable or disable hibernation. The `hibernate status` command has -been removed, as its purpose is now fulfilled by the standard `status` command. +to a [declarative approach for cluster hibernation](declarative_hibernation.md). +The `hibernate on` and `hibernate off` commands are now convenient shortcuts +that apply declarative changes to enable or disable hibernation. +The `hibernate status` command has been removed, as its purpose is now +fulfilled by the standard `status` command. ### Upgrading to 1.25 from a previous minor version From 02b124952dd7e94d95541060d90e5b7d0abd6fa2 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Thu, 17 Jul 2025 17:56:22 +0200 Subject: [PATCH 699/836] fix(postgres,instance): set postgres workers `oom_score_adj` (#7891) When the quality of service is set to "Guaranteed", CloudNativePG sets the `PG_OOM_ADJUST_VALUE` for the `postmaster` process to `0`. This allows the `postmaster` to retain its low Out-Of-Memory (OOM) score of `-997`, while its child processes run with an OOM score adjustment of `0`. As a result, if the OOM killer is triggered, it will terminate the child processes before the `postmaster`. This behaviour helps keep the PostgreSQL instance alive for as long as possible and enables a clean shutdown procedure in the event of an eviction. Closes #7132 Signed-off-by: Marco Nenciarini Signed-off-by: Gabriele Bartolini Co-authored-by: Marco Nenciarini Co-authored-by: Gabriele Bartolini --- docs/src/resource_management.md | 16 ++++++++++++++-- pkg/management/postgres/instance.go | 18 +++++++++++++++--- 2 files changed, 29 insertions(+), 5 deletions(-) diff --git a/docs/src/resource_management.md b/docs/src/resource_management.md index fef4e68bbd..7301a1b8ad 100644 --- a/docs/src/resource_management.md +++ b/docs/src/resource_management.md @@ -40,13 +40,25 @@ section in the Kubernetes documentation. For a PostgreSQL workload it is recommended to set a "Guaranteed" QoS. +!!! Info + When the quality of service is set to "Guaranteed", CloudNativePG sets the + `PG_OOM_ADJUST_VALUE` for the `postmaster` process to `0`, in line with the + [PostgreSQL documentation](https://www.postgresql.org/docs/current/kernel-resources.html#LINUX-MEMORY-OVERCOMMIT). + This allows the `postmaster` to retain its low Out-Of-Memory (OOM) score of + `-997`, while its child processes run with an OOM score adjustment of `0`. As a + result, if the OOM killer is triggered, it will terminate the child processes + before the `postmaster`. This behavior helps keep the PostgreSQL instance + alive for as long as possible and enables a clean shutdown procedure in the + event of an eviction. + To avoid resources related issues in Kubernetes, we can refer to the best practices for "out of resource" handling while creating a cluster: - Specify your required values for memory and CPU in the resources section of the manifest file. - This way, you can avoid the `OOM Killed` (where "OOM" stands for Out Of Memory) and `CPU throttle` or any other + This way, you can avoid the `OOM Killed` and `CPU throttle` or any other resource-related issues on running instances. -- For your cluster's pods to get assigned to the "Guaranteed" QoS class, you must set limits and requests +- For your cluster's pods to get assigned to the "Guaranteed" QoS class, you + must set limits and requests for both memory and CPU to the same value. - Specify your required PostgreSQL memory parameters consistently with the pod resources (as you would do in a VM or physical machine scenario - see below). diff --git a/pkg/management/postgres/instance.go b/pkg/management/postgres/instance.go index bada828ec4..58d768f83c 100644 --- a/pkg/management/postgres/instance.go +++ b/pkg/management/postgres/instance.go @@ -487,7 +487,7 @@ func (instance *Instance) Startup() error { } pgCtlCmd := exec.Command(pgCtlName, options...) // #nosec - pgCtlCmd.Env = instance.Env + pgCtlCmd.Env = instance.buildPostgresEnv() err := execlog.RunStreaming(pgCtlCmd, pgCtlName) if err != nil { return fmt.Errorf("error starting PostgreSQL instance: %w", err) @@ -712,7 +712,7 @@ func (instance *Instance) Run() (*execlog.StreamingCmd, error) { } postgresCmd := exec.Command(GetPostgresExecutableName(), options...) // #nosec - postgresCmd.Env = instance.Env + postgresCmd.Env = instance.buildPostgresEnv() compatibility.AddInstanceRunCommands(postgresCmd) streamingCmd, err := execlog.RunStreamingNoWait(postgresCmd, GetPostgresExecutableName()) @@ -723,6 +723,18 @@ func (instance *Instance) Run() (*execlog.StreamingCmd, error) { return streamingCmd, nil } +func (instance *Instance) buildPostgresEnv() []string { + env := instance.Env + if env == nil { + env = os.Environ() + } + env = append(env, + "PG_OOM_ADJUST_FILE=/proc/self/oom_score_adj", + "PG_OOM_ADJUST_VALUE=0", + ) + return env +} + // WithActiveInstance execute the internal function while this // PostgreSQL instance is running func (instance *Instance) WithActiveInstance(inner func() error) error { @@ -1071,7 +1083,7 @@ func (instance *Instance) Rewind(ctx context.Context) error { "options", options) pgRewindCmd := exec.Command(pgRewindName, options...) // #nosec - pgRewindCmd.Env = instance.Env + pgRewindCmd.Env = instance.buildPostgresEnv() err = execlog.RunStreaming(pgRewindCmd, pgRewindName) if err != nil { contextLogger.Error(err, "Failed to execute pg_rewind", "options", options) From f802c3f810732c4aeab9537a6d77504beccceee9 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Fri, 18 Jul 2025 15:55:34 +0200 Subject: [PATCH 700/836] chore: azurite test failing due to API versions mismatch (#8060) The azurite tests were failing with the following error: ERROR: Can't connect to cloud provider: The API version 2025-07-05 is not supported by Azurite. Please upgrade Azurite to latest version and retry. If you are using Azurite in Visual Studio, please check you have installed latest Visual Studio patch. Azurite command line parameter \"--skipApiVersionCheck\" or Visual Studio Code configuration \"Skip Api Version Check\" can skip this error. Added the `--skipApiVersionCheck` option to the Azurite server we run, which avoids this issue. This problem was already reported in the Azurite community at https://github.com/azure/azurite/issues/2564 Closes #8059 Signed-off-by: Jonathan Gonzalez V. --- tests/utils/backups/azurite.go | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/utils/backups/azurite.go b/tests/utils/backups/azurite.go index f846901006..5c010b9f3b 100644 --- a/tests/utils/backups/azurite.go +++ b/tests/utils/backups/azurite.go @@ -290,6 +290,7 @@ func getAzuriteDeployment(namespace string) apiv1.Deployment { Name: "azurite", Command: []string{"azurite"}, Args: []string{ + "--skipApiVersionCheck", "-l", "/data", "--cert", "/etc/ssl/certs/azurite.pem", "--key", "/etc/ssl/certs/azurite-key.pem", "--oauth", "basic", "--blobHost", "0.0.0.0", From dcd94e9c50bb7a42f76571e3383852dd5260c65b Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Fri, 18 Jul 2025 16:12:13 +0200 Subject: [PATCH 701/836] chore(podlogs): add context.Done logic to `SingleStream` (#8054) This patch makes the function more robust and better handles the flaky test failing on the main branch. Signed-off-by: Armando Ruocco --- pkg/podlogs/cluster_writer.go | 8 ++++++-- pkg/podlogs/cluster_writer_test.go | 18 +++++++++++++----- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/pkg/podlogs/cluster_writer.go b/pkg/podlogs/cluster_writer.go index 70941b4fd6..bd235661b3 100644 --- a/pkg/podlogs/cluster_writer.go +++ b/pkg/podlogs/cluster_writer.go @@ -223,8 +223,12 @@ func (csr *ClusterWriter) SingleStream(ctx context.Context, writer io.Writer) er if streamSet.isZero() { return nil } - // wait before looking for new pods to log - time.Sleep(csr.getFollowWaitingTime()) + + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(csr.getFollowWaitingTime()): + } } } diff --git a/pkg/podlogs/cluster_writer_test.go b/pkg/podlogs/cluster_writer_test.go index 982700dbec..45d2fb292a 100644 --- a/pkg/podlogs/cluster_writer_test.go +++ b/pkg/podlogs/cluster_writer_test.go @@ -22,6 +22,7 @@ package podlogs import ( "bytes" "context" + "strings" "sync" "time" @@ -154,10 +155,14 @@ var _ = Describe("Cluster logging tests", func() { It("should catch extra logs if given the follow option", func(ctx context.Context) { client := fake.NewClientset(pod) + + wg := sync.WaitGroup{} + wg.Add(1) var logBuffer syncBuffer + // let's set a short follow-wait, and keep the cluster streaming for two // cycles - followWaiting := 200 * time.Millisecond + followWaiting := 150 * time.Millisecond ctx2, cancel := context.WithTimeout(ctx, 300*time.Millisecond) go func() { defer GinkgoRecover() @@ -170,12 +175,15 @@ var _ = Describe("Cluster logging tests", func() { Client: client, } err := streamClusterLogs.SingleStream(ctx2, &logBuffer) - Expect(err).NotTo(HaveOccurred()) + Expect(err).To(Equal(context.DeadlineExceeded)) + wg.Done() }() - // give the stream call time to do a new search for pods + time.Sleep(350 * time.Millisecond) cancel() - // the fake pod will be seen twice - Expect(logBuffer.String()).To(BeEquivalentTo("fake logs\nfake logs\n")) + wg.Wait() + + fakeLogCount := strings.Count(logBuffer.String(), "fake logs\n") + Expect(fakeLogCount).To(BeNumerically(">=", 2)) }) }) From e532269d24561e980faaba8db0ea1a78dabdf244 Mon Sep 17 00:00:00 2001 From: Matthias Riegler Date: Mon, 21 Jul 2025 02:38:37 -0700 Subject: [PATCH 702/836] fix(connectionPool): add mutex to protect concurrent access to connections map (#7804) This patch introduces a mutex to ensure concurrency-safe access to the map of connections in the connection pool. Without this safeguard, concurrent access has the potential to cause sporadic crashes of the PostgreSQL instance manager. --- pkg/management/postgres/pool/pool.go | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/pkg/management/postgres/pool/pool.go b/pkg/management/postgres/pool/pool.go index eda3064b4e..5125b23910 100644 --- a/pkg/management/postgres/pool/pool.go +++ b/pkg/management/postgres/pool/pool.go @@ -24,6 +24,7 @@ package pool import ( "database/sql" "fmt" + "sync" // this is needed to correctly open the sql connection with the pgx driver _ "github.com/jackc/pgx/v5/stdlib" @@ -51,7 +52,8 @@ type ConnectionPool struct { connectionProfile ConnectionProfile // A map of connection for every used database - connectionMap map[string]*sql.DB + connectionMap map[string]*sql.DB + connectionMapMutex sync.Mutex } // NewPostgresqlConnectionPool creates a new connectionMap of connections given @@ -78,6 +80,8 @@ func newConnectionPool(baseConnectionString string, connectionProfile Connection // Connection gets the connection for the given database func (pool *ConnectionPool) Connection(dbname string) (*sql.DB, error) { + pool.connectionMapMutex.Lock() + defer pool.connectionMapMutex.Unlock() if result, ok := pool.connectionMap[dbname]; ok { return result, nil } @@ -93,6 +97,9 @@ func (pool *ConnectionPool) Connection(dbname string) (*sql.DB, error) { // ShutdownConnections closes every database connection func (pool *ConnectionPool) ShutdownConnections() { + pool.connectionMapMutex.Lock() + defer pool.connectionMapMutex.Unlock() + for _, db := range pool.connectionMap { _ = db.Close() } @@ -100,7 +107,7 @@ func (pool *ConnectionPool) ShutdownConnections() { pool.connectionMap = make(map[string]*sql.DB) } -// newConnection creates a database connection connectionMap, connecting via +// newConnection creates a database connection, connecting via // Unix domain socket to a database with a certain name func (pool *ConnectionPool) newConnection(dbname string) (*sql.DB, error) { dsn := pool.GetDsn(dbname) From 8e5552f57aa3bf54e3d09d16b0fae914be3d18e7 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Mon, 21 Jul 2025 11:56:04 +0200 Subject: [PATCH 703/836] feat(backup): make `Backup.Spec` immutable after creation (#7904) Add an XValidation rule to prevent modification of `Backup.spec` fields after initial creation, ensuring the backup specification remains consistent throughout its lifecycle. Signed-off-by: Armando Ruocco --- api/v1/backup_types.go | 1 + config/crd/bases/postgresql.cnpg.io_backups.yaml | 3 +++ 2 files changed, 4 insertions(+) diff --git a/api/v1/backup_types.go b/api/v1/backup_types.go index c39ede578d..e5b9ea2b36 100644 --- a/api/v1/backup_types.go +++ b/api/v1/backup_types.go @@ -119,6 +119,7 @@ const ( ) // BackupSpec defines the desired state of Backup +// +kubebuilder:validation:XValidation:rule="oldSelf == self",message="BackupSpec is immutable once set" type BackupSpec struct { // The cluster to backup Cluster LocalObjectReference `json:"cluster"` diff --git a/config/crd/bases/postgresql.cnpg.io_backups.yaml b/config/crd/bases/postgresql.cnpg.io_backups.yaml index 2a46956dd2..2cffcd3aea 100644 --- a/config/crd/bases/postgresql.cnpg.io_backups.yaml +++ b/config/crd/bases/postgresql.cnpg.io_backups.yaml @@ -142,6 +142,9 @@ spec: required: - cluster type: object + x-kubernetes-validations: + - message: BackupSpec is immutable once set + rule: oldSelf == self status: description: |- Most recently observed status of the backup. This data may not be up to From e711b32dffc42585ad11fabc4036ffbca9967915 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 21 Jul 2025 14:46:41 +0200 Subject: [PATCH 704/836] fix(deps): update kubernetes patches to v0.33.3 (main) (#8046) This PR contains the following updates: https://github.com/kubernetes/api `v0.33.2` -> `v0.33.3` https://github.com/kubernetes/apiextensions-apiserver `v0.33.2` -> `v0.33.3` https://github.com/kubernetes/apimachinery `v0.33.2` -> `v0.33.3` https://github.com/kubernetes/cli-runtime `v0.33.2` -> `v0.33.3` https://github.com/kubernetes/client-go `v0.33.2` -> `v0.33.3` --- go.mod | 10 +++++----- go.sum | 20 ++++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index e48ec9b3aa..e40b7adf54 100644 --- a/go.mod +++ b/go.mod @@ -38,11 +38,11 @@ require ( golang.org/x/term v0.33.0 google.golang.org/grpc v1.73.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.33.2 - k8s.io/apiextensions-apiserver v0.33.2 - k8s.io/apimachinery v0.33.2 - k8s.io/cli-runtime v0.33.2 - k8s.io/client-go v0.33.2 + k8s.io/api v0.33.3 + k8s.io/apiextensions-apiserver v0.33.3 + k8s.io/apimachinery v0.33.3 + k8s.io/cli-runtime v0.33.3 + k8s.io/client-go v0.33.3 k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 sigs.k8s.io/controller-runtime v0.21.0 sigs.k8s.io/yaml v1.5.0 diff --git a/go.sum b/go.sum index a5130cfb49..669da485c8 100644 --- a/go.sum +++ b/go.sum @@ -286,16 +286,16 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.33.2 h1:YgwIS5jKfA+BZg//OQhkJNIfie/kmRsO0BmNaVSimvY= -k8s.io/api v0.33.2/go.mod h1:fhrbphQJSM2cXzCWgqU29xLDuks4mu7ti9vveEnpSXs= -k8s.io/apiextensions-apiserver v0.33.2 h1:6gnkIbngnaUflR3XwE1mCefN3YS8yTD631JXQhsU6M8= -k8s.io/apiextensions-apiserver v0.33.2/go.mod h1:IvVanieYsEHJImTKXGP6XCOjTwv2LUMos0YWc9O+QP8= -k8s.io/apimachinery v0.33.2 h1:IHFVhqg59mb8PJWTLi8m1mAoepkUNYmptHsV+Z1m5jY= -k8s.io/apimachinery v0.33.2/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= -k8s.io/cli-runtime v0.33.2 h1:koNYQKSDdq5AExa/RDudXMhhtFasEg48KLS2KSAU74Y= -k8s.io/cli-runtime v0.33.2/go.mod h1:gnhsAWpovqf1Zj5YRRBBU7PFsRc6NkEkwYNQE+mXL88= -k8s.io/client-go v0.33.2 h1:z8CIcc0P581x/J1ZYf4CNzRKxRvQAwoAolYPbtQes+E= -k8s.io/client-go v0.33.2/go.mod h1:9mCgT4wROvL948w6f6ArJNb7yQd7QsvqavDeZHvNmHo= +k8s.io/api v0.33.3 h1:SRd5t//hhkI1buzxb288fy2xvjubstenEKL9K51KBI8= +k8s.io/api v0.33.3/go.mod h1:01Y/iLUjNBM3TAvypct7DIj0M0NIZc+PzAHCIo0CYGE= +k8s.io/apiextensions-apiserver v0.33.3 h1:qmOcAHN6DjfD0v9kxL5udB27SRP6SG/MTopmge3MwEs= +k8s.io/apiextensions-apiserver v0.33.3/go.mod h1:oROuctgo27mUsyp9+Obahos6CWcMISSAPzQ77CAQGz8= +k8s.io/apimachinery v0.33.3 h1:4ZSrmNa0c/ZpZJhAgRdcsFcZOw1PQU1bALVQ0B3I5LA= +k8s.io/apimachinery v0.33.3/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= +k8s.io/cli-runtime v0.33.3 h1:Dgy4vPjNIu8LMJBSvs8W0LcdV0PX/8aGG1DA1W8lklA= +k8s.io/cli-runtime v0.33.3/go.mod h1:yklhLklD4vLS8HNGgC9wGiuHWze4g7x6XQZ+8edsKEo= +k8s.io/client-go v0.33.3 h1:M5AfDnKfYmVJif92ngN532gFqakcGi6RvaOF16efrpA= +k8s.io/client-go v0.33.3/go.mod h1:luqKBQggEf3shbxHY4uVENAxrDISLOarxpTKMiUuujg= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= From 0512d90ca0c309ebfd8ab9b19905ea67ce40ff7b Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 21 Jul 2025 15:58:39 +0200 Subject: [PATCH 705/836] chore(deps): update github/codeql-action digest to d6bbdef (main) (#8073) --- .github/workflows/codeql-analysis.yml | 4 ++-- .github/workflows/continuous-integration.yml | 2 +- .github/workflows/ossf_scorecard.yml | 2 +- .github/workflows/snyk.yml | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index aaa4a9fd10..6687311009 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -76,7 +76,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@181d5eefc20863364f96762470ba6f862bdef56b # v3 + uses: github/codeql-action/init@d6bbdef45e766d081b84a2def353b0055f728d3e # v3 with: languages: "go" build-mode: manual @@ -93,6 +93,6 @@ jobs: make - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@181d5eefc20863364f96762470ba6f862bdef56b # v3 + uses: github/codeql-action/analyze@d6bbdef45e766d081b84a2def353b0055f728d3e # v3 with: category: "/language:go" diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 86a4bcc976..7afdacdd94 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -617,7 +617,7 @@ jobs: args: --severity-threshold=high --file=Dockerfile - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@181d5eefc20863364f96762470ba6f862bdef56b # v3 + uses: github/codeql-action/upload-sarif@d6bbdef45e766d081b84a2def353b0055f728d3e # v3 if: | !github.event.repository.fork && !github.event.pull_request.head.repo.fork diff --git a/.github/workflows/ossf_scorecard.yml b/.github/workflows/ossf_scorecard.yml index 10b1a8c287..ae153a1caf 100644 --- a/.github/workflows/ossf_scorecard.yml +++ b/.github/workflows/ossf_scorecard.yml @@ -74,6 +74,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard (optional). # Commenting out will disable upload of results to your repo's Code Scanning dashboard - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@181d5eefc20863364f96762470ba6f862bdef56b # v3 + uses: github/codeql-action/upload-sarif@d6bbdef45e766d081b84a2def353b0055f728d3e # v3 with: sarif_file: results.sarif diff --git a/.github/workflows/snyk.yml b/.github/workflows/snyk.yml index 50853e5665..73be8334fa 100644 --- a/.github/workflows/snyk.yml +++ b/.github/workflows/snyk.yml @@ -30,7 +30,7 @@ jobs: args: --sarif-file-output=snyk-static.sarif - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@181d5eefc20863364f96762470ba6f862bdef56b # v3 + uses: github/codeql-action/upload-sarif@d6bbdef45e766d081b84a2def353b0055f728d3e # v3 with: sarif_file: snyk-static.sarif @@ -43,6 +43,6 @@ jobs: args: --sarif-file-output=snyk-test.sarif - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@181d5eefc20863364f96762470ba6f862bdef56b # v3 + uses: github/codeql-action/upload-sarif@d6bbdef45e766d081b84a2def353b0055f728d3e # v3 with: sarif_file: snyk-test.sarif From c40d6f89c72ec369529df4856eb909c42ece0e49 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 21 Jul 2025 17:24:17 +0200 Subject: [PATCH 706/836] chore(deps): update backup test tools (main) (#8074) This PR contains the following updates: minio/mc | minor | `RELEASE.2025-05-21T01-59-54Z` -> `RELEASE.2025-07-16T15-35-03Z` minio/minio | minor | `RELEASE.2025-06-13T11-33-47Z` -> `RELEASE.2025-07-18T21-56-31Z` --- tests/utils/minio/minio.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/utils/minio/minio.go b/tests/utils/minio/minio.go index edcb997c9a..8de344ab4b 100644 --- a/tests/utils/minio/minio.go +++ b/tests/utils/minio/minio.go @@ -50,9 +50,9 @@ import ( const ( // minioImage is the image used to run a MinIO server - minioImage = "minio/minio:RELEASE.2025-06-13T11-33-47Z" + minioImage = "minio/minio:RELEASE.2025-07-18T21-56-31Z" // minioClientImage is the image used to run a MinIO client - minioClientImage = "minio/mc:RELEASE.2025-05-21T01-59-54Z" + minioClientImage = "minio/mc:RELEASE.2025-07-16T15-35-03Z" ) // Env contains all the information related or required by MinIO deployment and From 04276672259e11c4fe95dda31d680b382905550b Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Mon, 21 Jul 2025 17:49:27 +0200 Subject: [PATCH 707/836] fix(webserver): make routines not blocking (#8071) Signed-off-by: Armando Ruocco --- pkg/management/postgres/webserver/webserver.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/management/postgres/webserver/webserver.go b/pkg/management/postgres/webserver/webserver.go index 5a68a30e3f..f69c5db605 100644 --- a/pkg/management/postgres/webserver/webserver.go +++ b/pkg/management/postgres/webserver/webserver.go @@ -105,7 +105,7 @@ func (ws *Webserver) Start(ctx context.Context) error { defer cancel() for _, routine := range ws.routines { - routine(subCtx) + go routine(subCtx) } select { From b97531ee025884e754110c4768caa5b41f70829d Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Mon, 21 Jul 2025 17:52:58 +0200 Subject: [PATCH 708/836] fix(pooler): apply `resources` to the `bootstrap-controller` (#7922) The resources specified in the `Pooler` template now correctly apply to the init `bootstrap-controller` container. Closes #7822 Signed-off-by: Armando Ruocco --- api/v1/pooler_funcs.go | 15 ++++++++++ internal/controller/pooler_status_test.go | 34 +++++++++++++++++++++++ pkg/podspec/builder.go | 22 +++++++++++++++ pkg/specs/pgbouncer/deployments.go | 1 + 4 files changed, 72 insertions(+) diff --git a/api/v1/pooler_funcs.go b/api/v1/pooler_funcs.go index 807e9da8f2..f1ca476e1a 100644 --- a/api/v1/pooler_funcs.go +++ b/api/v1/pooler_funcs.go @@ -19,6 +19,8 @@ SPDX-License-Identifier: Apache-2.0 package v1 +import corev1 "k8s.io/api/core/v1" + // IsPaused returns whether all database should be paused or not. func (in PgBouncerSpec) IsPaused() bool { return in.Paused != nil && *in.Paused @@ -58,3 +60,16 @@ func (in *Pooler) IsAutomatedIntegration() bool { } return true } + +// GetResourcesRequirements returns the resource requirements for the Pooler +func (in *Pooler) GetResourcesRequirements() corev1.ResourceRequirements { + if in.Spec.Template == nil { + return corev1.ResourceRequirements{} + } + + if in.Spec.Template.Spec.Resources == nil { + return corev1.ResourceRequirements{} + } + + return *in.Spec.Template.Spec.Resources +} diff --git a/internal/controller/pooler_status_test.go b/internal/controller/pooler_status_test.go index bf8434cfed..9bbba948fd 100644 --- a/internal/controller/pooler_status_test.go +++ b/internal/controller/pooler_status_test.go @@ -23,6 +23,7 @@ import ( "context" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -95,6 +96,39 @@ var _ = Describe("pooler_status unit tests", func() { Expect(pooler.Status.Instances).To(Equal(dep.Status.Replicas)) }) + It("should correctly set pod resources to the bootstrap init container", func() { + cluster := newFakeCNPGCluster(env.client, "test-namespace") + + pooler := &v1.Pooler{ + Spec: v1.PoolerSpec{ + Template: &v1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Resources: &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("128Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("200m"), + corev1.ResourceMemory: resource.MustParse("256Mi"), + }, + }, + }, + }, + }, + } + + dep, err := pgbouncer.Deployment(pooler, cluster) + Expect(err).ToNot(HaveOccurred()) + // check that the init container has the correct resources + Expect(dep.Spec.Template.Spec.InitContainers).To(HaveLen(1)) + initResources := dep.Spec.Template.Spec.InitContainers[0].Resources + Expect(initResources.Requests).To(HaveKeyWithValue(corev1.ResourceCPU, resource.MustParse("100m"))) + Expect(initResources.Requests).To(HaveKeyWithValue(corev1.ResourceMemory, resource.MustParse("128Mi"))) + Expect(initResources.Limits).To(HaveKeyWithValue(corev1.ResourceCPU, resource.MustParse("200m"))) + Expect(initResources.Limits).To(HaveKeyWithValue(corev1.ResourceMemory, resource.MustParse("256Mi"))) + }) + It("should correctly interact with the api server", func() { ctx := context.Background() namespace := newFakeNamespace(env.client) diff --git a/pkg/podspec/builder.go b/pkg/podspec/builder.go index b4402cf31a..b38ed71d20 100644 --- a/pkg/podspec/builder.go +++ b/pkg/podspec/builder.go @@ -386,6 +386,28 @@ func (builder *Builder) WithInitContainerSecurityContext( return builder } +// WithInitContainerResources ensures that, if in the current status there is +// an init container with the passed name and the resources are empty, the resources will be +// set to the ones passed. +// If `overwrite` is true the resources are overwritten even when they're not empty +func (builder *Builder) WithInitContainerResources( + name string, + resources corev1.ResourceRequirements, + overwrite bool, +) *Builder { + builder.WithInitContainer(name) + + for idx, value := range builder.status.Spec.InitContainers { + if value.Name == name { + if overwrite || value.Resources.Limits == nil && value.Resources.Requests == nil { + builder.status.Spec.InitContainers[idx].Resources = resources + } + } + } + + return builder +} + // Build gets the final Pod template func (builder *Builder) Build() *apiv1.PodTemplateSpec { return &builder.status diff --git a/pkg/specs/pgbouncer/deployments.go b/pkg/specs/pgbouncer/deployments.go index bc0993f8bd..2df9f7d71a 100644 --- a/pkg/specs/pgbouncer/deployments.go +++ b/pkg/specs/pgbouncer/deployments.go @@ -94,6 +94,7 @@ func Deployment(pooler *apiv1.Pooler, cluster *apiv1.Cluster) (*appsv1.Deploymen WithInitContainerCommand(specs.BootstrapControllerContainerName, []string{"/manager", "bootstrap", "/controller/manager"}, true). + WithInitContainerResources(specs.BootstrapControllerContainerName, pooler.GetResourcesRequirements(), true). WithInitContainerSecurityContext(specs.BootstrapControllerContainerName, specs.CreateContainerSecurityContext(cluster.GetSeccompProfile()), true). From 6bb208626eeb422824528f6d082c0fd6b165d72f Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Mon, 21 Jul 2025 18:46:26 +0200 Subject: [PATCH 709/836] fix(upgrade): stop testing `max_slot_wal_keep_size` (#8082) The behaviour is due to a PostgreSQL bug solved in https://github.com/postgres/postgres/commit/f36e5774. We kept the workaround for PostgreSQL 17 as the upgrade target, and documented which versions contain the bug. Closes #7283 Signed-off-by: Marco Nenciarini Signed-off-by: Gabriele Bartolini Co-authored-by: Gabriele Bartolini --- docs/src/postgres_upgrades.md | 9 +++++++++ internal/cmd/manager/instance/upgrade/execute/cmd.go | 4 +++- tests/e2e/cluster_major_upgrade_test.go | 1 - 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/docs/src/postgres_upgrades.md b/docs/src/postgres_upgrades.md index 4f8af744fe..35a17057cf 100644 --- a/docs/src/postgres_upgrades.md +++ b/docs/src/postgres_upgrades.md @@ -59,6 +59,15 @@ requested for a cluster. operating system distribution. For example, if your previous version uses a `bullseye` image, you cannot upgrade to a `bookworm` image. +!!! Warning + There is a bug in PostgreSQL 17.0 through 17.5 that prevents successful upgrades + if the `max_slot_wal_keep_size` parameter is set to any value other than `-1`. + The upgrade process will fail with an error related to replication slot configuration. + This issue has been [fixed in PostgreSQL 17.6 and 18beta2 or later versions](https://github.com/postgres/postgres/commit/f36e5774). + If you are using PostgreSQL 17.0 through 17.5, ensure that you upgrade to at least + PostgreSQL 17.6 before attempting a major upgrade, or make sure to temporarily set + the `max_slot_wal_keep_size` parameter to `-1` in your cluster configuration. + You can trigger the upgrade in one of two ways: - By updating the major version in the image tag via the `.spec.imageName` diff --git a/internal/cmd/manager/instance/upgrade/execute/cmd.go b/internal/cmd/manager/instance/upgrade/execute/cmd.go index 43c309738d..418ce4f332 100644 --- a/internal/cmd/manager/instance/upgrade/execute/cmd.go +++ b/internal/cmd/manager/instance/upgrade/execute/cmd.go @@ -393,7 +393,9 @@ func prepareConfigurationFiles(ctx context.Context, cluster apiv1.Cluster, destD return fmt.Errorf("appending inclusion directives to postgresql.conf file resulted in an error: %w", err) } - // Set `max_slot_wal_keep_size` to the default value because any other value it is not supported in pg_upgrade + // Set `max_slot_wal_keep_size` to the default value because any other value causes an error + // during pg_upgrade in PostgreSQL 17 before 17.6. The bug has been fixed with the commit + // https://github.com/postgres/postgres/commit/f36e5774 tmpCluster := cluster.DeepCopy() tmpCluster.Spec.PostgresConfiguration.Parameters["max_slot_wal_keep_size"] = "-1" diff --git a/tests/e2e/cluster_major_upgrade_test.go b/tests/e2e/cluster_major_upgrade_test.go index 819e574e64..17b94df2fb 100644 --- a/tests/e2e/cluster_major_upgrade_test.go +++ b/tests/e2e/cluster_major_upgrade_test.go @@ -103,7 +103,6 @@ var _ = Describe("Postgres Major Upgrade", Label(tests.LabelPostgresMajorUpgrade "log_temp_files": "1024", "log_autovacuum_min_duration": "1000", "log_replication_commands": "on", - "max_slot_wal_keep_size": "1GB", }, }, }, From 5ddd3e620f3f80d35344d11589006fffb030bb3a Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 21 Jul 2025 22:43:58 +0200 Subject: [PATCH 710/836] chore(deps): update dependency golangci/golangci-lint to v2.3.0 (main) (#8083) --- .github/workflows/continuous-integration.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 7afdacdd94..43152113f3 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -21,7 +21,7 @@ env: # renovate: datasource=golang-version depName=golang versioning=loose GOLANG_VERSION: "1.24.5" # renovate: datasource=github-releases depName=golangci/golangci-lint versioning=loose - GOLANGCI_LINT_VERSION: "v2.2.2" + GOLANGCI_LINT_VERSION: "v2.3.0" KUBEBUILDER_VERSION: "2.3.1" # renovate: datasource=github-tags depName=kubernetes-sigs/kind versioning=semver KIND_VERSION: "v0.29.0" From c168e9dddbb6c43bbee4d5e6f7cd8c8fc2b66428 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Tue, 22 Jul 2025 11:13:30 +0200 Subject: [PATCH 711/836] fix(subscriptions): restrict subscription parameter updates to valid PostgreSQL options (#7844) This commit fixes a bug related to updating PostgreSQL subscription parameters in CloudNativePG. Previously, the operator attempted to update parameters such as `copy_data` via `ALTER SUBSCRIPTION`, which is not permitted by PostgreSQL and led to reconciliation failures. With this patch, the operator restricts updates to only those subscription parameters allowed by PostgreSQL, preventing invalid SQL statements. Closes #7585 Signed-off-by: Marco Nenciarini Signed-off-by: Armando Ruocco Signed-off-by: Gabriele Bartolini Co-authored-by: Armando Ruocco Co-authored-by: Gabriele Bartolini --- api/v1/subscription_types.go | 7 +++- .../postgresql.cnpg.io_subscriptions.yaml | 7 +++- docs/src/cloudnative-pg.v1.md | 7 +++- .../controller/subscription_controller.go | 11 +++-- .../controller/subscription_controller_sql.go | 36 +++++++++++++++-- .../subscription_controller_sql_test.go | 40 +++++++++++++++---- .../subscription_controller_test.go | 5 +++ 7 files changed, 94 insertions(+), 19 deletions(-) diff --git a/api/v1/subscription_types.go b/api/v1/subscription_types.go index 2a4f4f7185..adf9f70f18 100644 --- a/api/v1/subscription_types.go +++ b/api/v1/subscription_types.go @@ -52,8 +52,11 @@ type SubscriptionSpec struct { // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="dbname is immutable" DBName string `json:"dbname"` - // Subscription parameters part of the `WITH` clause as expected by - // PostgreSQL `CREATE SUBSCRIPTION` command + // Subscription parameters included in the `WITH` clause of the PostgreSQL + // `CREATE SUBSCRIPTION` command. Most parameters cannot be changed + // after the subscription is created and will be ignored if modified + // later, except for a limited set documented at: + // https://www.postgresql.org/docs/current/sql-altersubscription.html#SQL-ALTERSUBSCRIPTION-PARAMS-SET // +optional Parameters map[string]string `json:"parameters,omitempty"` diff --git a/config/crd/bases/postgresql.cnpg.io_subscriptions.yaml b/config/crd/bases/postgresql.cnpg.io_subscriptions.yaml index 37fdac9121..72d7b72fe5 100644 --- a/config/crd/bases/postgresql.cnpg.io_subscriptions.yaml +++ b/config/crd/bases/postgresql.cnpg.io_subscriptions.yaml @@ -93,8 +93,11 @@ spec: additionalProperties: type: string description: |- - Subscription parameters part of the `WITH` clause as expected by - PostgreSQL `CREATE SUBSCRIPTION` command + Subscription parameters included in the `WITH` clause of the PostgreSQL + `CREATE SUBSCRIPTION` command. Most parameters cannot be changed + after the subscription is created and will be ignored if modified + later, except for a limited set documented at: + https://www.postgresql.org/docs/current/sql-altersubscription.html#SQL-ALTERSUBSCRIPTION-PARAMS-SET type: object publicationDBName: description: |- diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md index 08160dba78..d3853641a2 100644 --- a/docs/src/cloudnative-pg.v1.md +++ b/docs/src/cloudnative-pg.v1.md @@ -5838,8 +5838,11 @@ the "subscriber" cluster

map[string]string -

Subscription parameters part of the WITH clause as expected by -PostgreSQL CREATE SUBSCRIPTION command

+

Subscription parameters included in the WITH clause of the PostgreSQL +CREATE SUBSCRIPTION command. Most parameters cannot be changed +after the subscription is created and will be ignored if modified +later, except for a limited set documented at: +https://www.postgresql.org/docs/current/sql-altersubscription.html#SQL-ALTERSUBSCRIPTION-PARAMS-SET

publicationName [Required]
diff --git a/internal/management/controller/subscription_controller.go b/internal/management/controller/subscription_controller.go index 16450f0772..10f06a7192 100644 --- a/internal/management/controller/subscription_controller.go +++ b/internal/management/controller/subscription_controller.go @@ -42,9 +42,10 @@ type SubscriptionReconciler struct { client.Client Scheme *runtime.Scheme - instance *postgres.Instance - finalizerReconciler *finalizerReconciler[*apiv1.Subscription] - getDB func(name string) (*sql.DB, error) + instance *postgres.Instance + finalizerReconciler *finalizerReconciler[*apiv1.Subscription] + getDB func(name string) (*sql.DB, error) + getPostgresMajorVersion func() (int, error) } // subscriptionReconciliationInterval is the time between the @@ -190,6 +191,10 @@ func NewSubscriptionReconciler( getDB: func(name string) (*sql.DB, error) { return instance.ConnectionPool().Connection(name) }, + getPostgresMajorVersion: func() (int, error) { + version, err := instance.GetPgVersion() + return int(version.Major), err //nolint:gosec + }, } sr.finalizerReconciler = newFinalizerReconciler( mgr.GetClient(), diff --git a/internal/management/controller/subscription_controller_sql.go b/internal/management/controller/subscription_controller_sql.go index c81a0f5a9d..89b255fc6d 100644 --- a/internal/management/controller/subscription_controller_sql.go +++ b/internal/management/controller/subscription_controller_sql.go @@ -77,7 +77,11 @@ func (r *SubscriptionReconciler) patchSubscription( obj *apiv1.Subscription, connString string, ) error { - sqls := toSubscriptionAlterSQL(obj, connString) + version, err := r.getPostgresMajorVersion() + if err != nil { + return fmt.Errorf("while getting the PostgreSQL major version: %w", err) + } + sqls := toSubscriptionAlterSQL(obj, connString, version) for _, sqlQuery := range sqls { if _, err := db.ExecContext(ctx, sqlQuery); err != nil { return err @@ -112,7 +116,7 @@ func toSubscriptionCreateSQL(obj *apiv1.Subscription, connString string) string return createQuery } -func toSubscriptionAlterSQL(obj *apiv1.Subscription, connString string) []string { +func toSubscriptionAlterSQL(obj *apiv1.Subscription, connString string, pgMajorVersion int) []string { result := make([]string, 0, 3) setPublicationSQL := fmt.Sprintf( @@ -133,7 +137,7 @@ func toSubscriptionAlterSQL(obj *apiv1.Subscription, connString string) []string fmt.Sprintf( "ALTER SUBSCRIPTION %s SET (%s)", pgx.Identifier{obj.Spec.Name}.Sanitize(), - toPostgresParameters(obj.Spec.Parameters), + toPostgresParameters(filterSubscriptionUpdatableParameters(obj.Spec.Parameters, pgMajorVersion)), ), ) } @@ -141,6 +145,32 @@ func toSubscriptionAlterSQL(obj *apiv1.Subscription, connString string) []string return result } +func filterSubscriptionUpdatableParameters(parameters map[string]string, pgMajorVersion int) map[string]string { + // Only a limited set of the parameters can be updated + // see https://www.postgresql.org/docs/current/sql-altersubscription.html#SQL-ALTERSUBSCRIPTION-PARAMS-SET + allowedParameters := []string{ + "slot_name", + "synchronous_commit", + "binary", + "streaming", + "disable_on_error", + "password_required", + "run_as_owner", + "origin", + "failover", + } + if pgMajorVersion >= 18 { + allowedParameters = append(allowedParameters, "two_phase") + } + filteredParameters := make(map[string]string, len(parameters)) + for _, key := range allowedParameters { + if _, present := parameters[key]; present { + filteredParameters[key] = parameters[key] + } + } + return filteredParameters +} + func executeDropSubscription(ctx context.Context, db *sql.DB, name string) error { if _, err := db.ExecContext( ctx, diff --git a/internal/management/controller/subscription_controller_sql_test.go b/internal/management/controller/subscription_controller_sql_test.go index d3628858b2..2678e75c54 100644 --- a/internal/management/controller/subscription_controller_sql_test.go +++ b/internal/management/controller/subscription_controller_sql_test.go @@ -35,6 +35,8 @@ import ( // nolint: dupl var _ = Describe("subscription sql", func() { + const defaultPostgresMajorVersion = 17 + var ( dbMock sqlmock.Sqlmock db *sql.DB @@ -132,28 +134,52 @@ var _ = Describe("subscription sql", func() { } connString := "host=localhost user=test dbname=test" - sqls := toSubscriptionAlterSQL(obj, connString) + sqls := toSubscriptionAlterSQL(obj, connString, defaultPostgresMajorVersion) Expect(sqls).To(ContainElement(`ALTER SUBSCRIPTION "test_sub" SET PUBLICATION "test_pub"`)) Expect(sqls).To(ContainElement(`ALTER SUBSCRIPTION "test_sub" CONNECTION 'host=localhost user=test dbname=test'`)) }) - It("generates correct SQL for altering subscription with parameters", func() { + It("generates correct SQL for altering subscription with parameters for PostgreSQL 17", func() { obj := &apiv1.Subscription{ Spec: apiv1.SubscriptionSpec{ Name: "test_sub", PublicationName: "test_pub", Parameters: map[string]string{ - "param1": "value1", - "param2": "value2", + "copy_data": "true", + "origin": "none", + "failover": "true", + "two_phase": "true", + }, + }, + } + connString := "host=localhost user=test dbname=test" + + sqls := toSubscriptionAlterSQL(obj, connString, 17) + Expect(sqls).To(ContainElement(`ALTER SUBSCRIPTION "test_sub" SET PUBLICATION "test_pub"`)) + Expect(sqls).To(ContainElement(`ALTER SUBSCRIPTION "test_sub" CONNECTION 'host=localhost user=test dbname=test'`)) + Expect(sqls).To(ContainElement(`ALTER SUBSCRIPTION "test_sub" SET ("failover" = 'true', "origin" = 'none')`)) + }) + + It("generates correct SQL for altering subscription with parameters for PostgreSQL 18", func() { + obj := &apiv1.Subscription{ + Spec: apiv1.SubscriptionSpec{ + Name: "test_sub", + PublicationName: "test_pub", + Parameters: map[string]string{ + "copy_data": "true", + "origin": "none", + "failover": "true", + "two_phase": "true", }, }, } connString := "host=localhost user=test dbname=test" - sqls := toSubscriptionAlterSQL(obj, connString) + sqls := toSubscriptionAlterSQL(obj, connString, 18) Expect(sqls).To(ContainElement(`ALTER SUBSCRIPTION "test_sub" SET PUBLICATION "test_pub"`)) Expect(sqls).To(ContainElement(`ALTER SUBSCRIPTION "test_sub" CONNECTION 'host=localhost user=test dbname=test'`)) - Expect(sqls).To(ContainElement(`ALTER SUBSCRIPTION "test_sub" SET ("param1" = 'value1', "param2" = 'value2')`)) + Expect(sqls).To(ContainElement( + `ALTER SUBSCRIPTION "test_sub" SET ("failover" = 'true', "origin" = 'none', "two_phase" = 'true')`)) }) It("returns correct SQL for altering subscription with no owner or parameters", func() { @@ -165,7 +191,7 @@ var _ = Describe("subscription sql", func() { } connString := "host=localhost user=test dbname=test" - sqls := toSubscriptionAlterSQL(obj, connString) + sqls := toSubscriptionAlterSQL(obj, connString, defaultPostgresMajorVersion) Expect(sqls).To(ContainElement(`ALTER SUBSCRIPTION "test_sub" SET PUBLICATION "test_pub"`)) Expect(sqls).To(ContainElement(`ALTER SUBSCRIPTION "test_sub" CONNECTION 'host=localhost user=test dbname=test'`)) }) diff --git a/internal/management/controller/subscription_controller_test.go b/internal/management/controller/subscription_controller_test.go index 99a2dd1f81..343dd4e4b8 100644 --- a/internal/management/controller/subscription_controller_test.go +++ b/internal/management/controller/subscription_controller_test.go @@ -50,6 +50,8 @@ const subscriptionDetectionQuery = `SELECT count(*) WHERE subname = $1` var _ = Describe("Managed subscription controller tests", func() { + const defaultPostgresMajorVersion = 17 + var ( dbMock sqlmock.Sqlmock db *sql.DB @@ -123,6 +125,9 @@ var _ = Describe("Managed subscription controller tests", func() { getDB: func(_ string) (*sql.DB, error) { return db, nil }, + getPostgresMajorVersion: func() (int, error) { + return defaultPostgresMajorVersion, nil + }, } r.finalizerReconciler = newFinalizerReconciler( fakeClient, From e0c09688d12788e20d2c4e7a1f2afe886e2158c9 Mon Sep 17 00:00:00 2001 From: Vitor Floriano <107767584+vitorfloriano@users.noreply.github.com> Date: Tue, 22 Jul 2025 08:28:27 -0300 Subject: [PATCH 712/836] docs(contributing): clarify instructions for external contributors (#8064) Closes #8017 Signed-off-by: Vitor Floriano <107767584+vitorfloriano@users.noreply.github.com> --- CONTRIBUTING.md | 8 ++++++++ contribute/README.md | 18 +++++++++++------- contribute/e2e_testing_environment/README.md | 9 ++++++++- 3 files changed, 27 insertions(+), 8 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 39738a451f..94bafbd5c2 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -35,6 +35,14 @@ We welcome many types of contributions including: For development contributions, please refer to the separate section called ["Contributing to the source code"](contribute/README.md). +## External Contributors vs Maintainers + +**External Contributors:** If you're contributing from outside the core team, please note that some instructions in our detailed development docs apply only to maintainers. See the [development contribution guide](contribute/README.md) for complete details, but note: + +- **Issue Assignment**: Comment "I'd like to work on this" instead of self-assigning +- **Testing**: Run local unit tests and basic e2e tests (see [testing guide](contribute/e2e_testing_environment/README.md)); maintainers will handle comprehensive cloud-based E2E testing +- **Project Boards**: Maintainers will move tickets through project phases + ## Ask for Help The best way to reach us with a question when contributing is to drop a line in diff --git a/contribute/README.md b/contribute/README.md index 2f66de58bb..d5c9d2a266 100644 --- a/contribute/README.md +++ b/contribute/README.md @@ -94,15 +94,19 @@ If you have written code for an improvement to CloudNativePG or a bug fix, please follow this procedure to submit a pull request: 1. [Create a fork](development_environment/README.md#forking-the-repository) of CloudNativePG -2. Self-assign the ticket and begin working on it in the forked project. Move - the ticket to `Analysis` or `In Development` phase of +2. **External contributors**: Comment on the issue with "I'd like to work on this" and wait for assignment. + **Maintainers**: Self-assign the ticket and move it to `Analysis` or `In Development` phase of [CloudNativePG operator development](https://github.com/orgs/cloudnative-pg/projects/2) -3. [Run the e2e tests in the forked repository](e2e_testing_environment/README.md#running-e2e-tests-on-a-fork-of-the-repository) +3. **External contributors**: Run local unit tests and basic e2e tests using `FEATURE_TYPE=smoke,basic make e2e-test-kind` or `TEST_DEPTH=0 make e2e-test-kind` for critical tests only. + **Maintainers**: [Run the comprehensive e2e tests in the forked repository](e2e_testing_environment/README.md#running-e2e-tests-on-a-fork-of-the-repository) 4. Once development is finished, create a pull request from your forked project - to the CloudNativePG project and move the ticket to the `Waiting for First Review` - phase. Please make sure the pull request title and message follow - [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) - + to the CloudNativePG project. **Maintainers** will move the ticket to the `Waiting for First Review` + phase. + + > Please make sure the pull request title and message follow + > [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) + + > To facilitate collaboration, always [allow edits by maintainers](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/allowing-changes-to-a-pull-request-branch-created-from-a-fork) One of the maintainers will then proceed with the first review and approve the CI workflow to run in the CloudNativePG project. The second reviewer will run diff --git a/contribute/e2e_testing_environment/README.md b/contribute/e2e_testing_environment/README.md index 45ed0e5937..cd70269c4e 100644 --- a/contribute/e2e_testing_environment/README.md +++ b/contribute/e2e_testing_environment/README.md @@ -273,11 +273,18 @@ the `kind` engine. ### Running E2E tests on a fork of the repository -Additionally, if you fork the repository and want to run the tests on your fork, you can do so +**For maintainers and organization members:** If you fork the repository and want to run the tests on your fork, you can do so by running the `/test` command in a Pull Request opened in your forked repository. `/test` is used to trigger a run of the end-to-end tests in the GitHub Actions. Only users who have `write` permission to the repository can use this command. +**For external contributors:** You can run local e2e tests using: +- `FEATURE_TYPE=smoke,basic make e2e-test-kind` for smoke and basic tests +- `TEST_DEPTH=0 make e2e-test-kind` for critical tests only +- `TEST_DEPTH=1 make e2e-test-kind` for critical and high priority tests + +Maintainers will handle comprehensive cloud-based E2E testing during the pull request review process. + Options supported are: - test_level (`level` or `tl` for short) From 1b0926fec879ac9aa61f189dbaac77d4322e76f3 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Tue, 22 Jul 2025 13:43:15 +0200 Subject: [PATCH 713/836] fix: add the correct secretName to `ensureReplicationClientLeafCertificate` not found error (#8086) Signed-off-by: Armando Ruocco --- internal/controller/cluster_pki.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/controller/cluster_pki.go b/internal/controller/cluster_pki.go index 38b93adfda..ab8575cd1f 100644 --- a/internal/controller/cluster_pki.go +++ b/internal/controller/cluster_pki.go @@ -268,7 +268,7 @@ func (r *ClusterReconciler) ensureReplicationClientLeafCertificate( caSecret *v1.Secret, ) error { // Generating postgres client certificate - replicationSecretName := client.ObjectKey{ + replicationSecretObjectKey := client.ObjectKey{ Namespace: cluster.GetNamespace(), Name: cluster.GetReplicationSecretName(), } @@ -278,7 +278,7 @@ func (r *ClusterReconciler) ensureReplicationClientLeafCertificate( return r.ensureLeafCertificate( ctx, cluster, - replicationSecretName, + replicationSecretObjectKey, apiv1.StreamingReplicationUser, caSecret, certs.CertTypeClient, @@ -288,9 +288,9 @@ func (r *ClusterReconciler) ensureReplicationClientLeafCertificate( } var replicationClientSecret v1.Secret - if err := r.Get(ctx, replicationSecretName, &replicationClientSecret); apierrors.IsNotFound(err) { + if err := r.Get(ctx, replicationSecretObjectKey, &replicationClientSecret); apierrors.IsNotFound(err) { return fmt.Errorf("missing specified replication TLS secret %s: %w", - cluster.Status.Certificates.ServerTLSSecret, err) + replicationSecretObjectKey.Name, err) } else if err != nil { return err } From 8cb490e8cfe11b042d3a5607471d9b856be07f45 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niccol=C3=B2=20Fei?= Date: Tue, 22 Jul 2025 14:36:39 +0200 Subject: [PATCH 714/836] test: support beta versions in major upgrade E2Es (#8036) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes #8035 Signed-off-by: Niccolò Fei Signed-off-by: Jonathan Gonzalez V. Signed-off-by: Marco Nenciarini Signed-off-by: Francesco Canovai Co-authored-by: Jonathan Gonzalez V. Co-authored-by: Marco Nenciarini Co-authored-by: Francesco Canovai --- .github/pg_versions.json | 4 ++ tests/e2e/cluster_major_upgrade_test.go | 96 +++++++++++++++++-------- 2 files changed, 69 insertions(+), 31 deletions(-) diff --git a/.github/pg_versions.json b/.github/pg_versions.json index 828288a6cb..85a0a03c92 100644 --- a/.github/pg_versions.json +++ b/.github/pg_versions.json @@ -1,4 +1,8 @@ { + "18": [ + "18beta1", + "18beta1-7" + ], "17": [ "17.5", "17.4" diff --git a/tests/e2e/cluster_major_upgrade_test.go b/tests/e2e/cluster_major_upgrade_test.go index 17b94df2fb..178dd7e063 100644 --- a/tests/e2e/cluster_major_upgrade_test.go +++ b/tests/e2e/cluster_major_upgrade_test.go @@ -24,6 +24,7 @@ import ( "fmt" "os" "strconv" + "strings" "time" "github.com/cloudnative-pg/machinery/pkg/image/reference" @@ -109,9 +110,9 @@ var _ = Describe("Postgres Major Upgrade", Label(tests.LabelPostgresMajorUpgrade } } - generatePostgreSQLCluster := func(namespace string, storageClass string, majorVersion int) *v1.Cluster { + generatePostgreSQLCluster := func(namespace string, storageClass string, tagVersion string) *v1.Cluster { cluster := generateBaseCluster(namespace, storageClass) - cluster.Spec.ImageName = fmt.Sprintf("ghcr.io/cloudnative-pg/postgresql:%d-standard-bookworm", majorVersion) + cluster.Spec.ImageName = fmt.Sprintf("ghcr.io/cloudnative-pg/postgresql:%s-standard-bookworm", tagVersion) cluster.Spec.Bootstrap.InitDB.PostInitSQL = []string{ "CREATE EXTENSION IF NOT EXISTS pg_stat_statements;", "CREATE EXTENSION IF NOT EXISTS pg_trgm;", @@ -119,15 +120,15 @@ var _ = Describe("Postgres Major Upgrade", Label(tests.LabelPostgresMajorUpgrade cluster.Spec.PostgresConfiguration.Parameters["pg_stat_statements.track"] = "top" return cluster } - generatePostgreSQLMinimalCluster := func(namespace string, storageClass string, majorVersion int) *v1.Cluster { - cluster := generatePostgreSQLCluster(namespace, storageClass, majorVersion) - cluster.Spec.ImageName = fmt.Sprintf("ghcr.io/cloudnative-pg/postgresql:%d-minimal-bookworm", majorVersion) + generatePostgreSQLMinimalCluster := func(namespace string, storageClass string, tagVersion string) *v1.Cluster { + cluster := generatePostgreSQLCluster(namespace, storageClass, tagVersion) + cluster.Spec.ImageName = fmt.Sprintf("ghcr.io/cloudnative-pg/postgresql:%s-minimal-bookworm", tagVersion) return cluster } - generatePostGISCluster := func(namespace string, storageClass string, majorVersion int) *v1.Cluster { + generatePostGISCluster := func(namespace string, storageClass string, tagVersion string) *v1.Cluster { cluster := generateBaseCluster(namespace, storageClass) - cluster.Spec.ImageName = "ghcr.io/cloudnative-pg/postgis:" + strconv.Itoa(majorVersion) + cluster.Spec.ImageName = fmt.Sprintf("ghcr.io/cloudnative-pg/postgis:%s", tagVersion) cluster.Spec.Bootstrap.InitDB.PostInitApplicationSQL = []string{ "CREATE EXTENSION postgis", "CREATE EXTENSION postgis_raster", @@ -148,17 +149,26 @@ var _ = Describe("Postgres Major Upgrade", Label(tests.LabelPostgresMajorUpgrade return cluster } - determineVersionsForTesting := func() (uint64, uint64) { + type versionInfo struct { + currentMajor uint64 + currentTag string + targetMajor uint64 + targetTag string + } + + determineVersionsForTesting := func() versionInfo { currentImage := os.Getenv("POSTGRES_IMG") Expect(currentImage).ToNot(BeEmpty()) currentVersion, err := version.FromTag(reference.New(currentImage).Tag) Expect(err).NotTo(HaveOccurred()) currentMajor := currentVersion.Major() + currentTag := strconv.FormatUint(currentMajor, 10) targetVersion, err := version.FromTag(reference.New(versions.DefaultImageName).Tag) Expect(err).ToNot(HaveOccurred()) targetMajor := targetVersion.Major() + targetTag := strconv.FormatUint(targetMajor, 10) // If same version, choose a previous one for testing if currentMajor == targetMajor { @@ -166,12 +176,28 @@ var _ = Describe("Postgres Major Upgrade", Label(tests.LabelPostgresMajorUpgrade GinkgoWriter.Printf("Using %v as the current major version instead.\n", currentMajor) } - return currentMajor, targetMajor + // This means we are on a beta version, so we can just invert the versions + if currentMajor > targetMajor { + currentMajor, targetMajor = targetMajor, currentMajor + currentTag = targetTag + // Beta images don't have a major version only tag yet, and + // are most likely in the following format: "18beta1", "18rc2" + // So, we split at the first `-` and use that prefix to build the target image. + targetTag = strings.Split(reference.New(currentImage).Tag, "-")[0] + GinkgoWriter.Printf("Using %v as the current major and upgrading to %v.\n", currentMajor, targetMajor) + } + + return versionInfo{ + currentMajor: currentMajor, + currentTag: currentTag, + targetMajor: targetMajor, + targetTag: targetTag, + } } // generateTargetImages, given a targetMajor, generates a target image for each buildScenario. // MAJOR_UPGRADE_IMAGE_REPO env allows to customize the target image repository. - generateTargetImages := func(targetMajor uint64) map[string]string { + generateTargetImages := func(targetTag string) map[string]string { const ( // ImageRepository is the default repository for Postgres container images ImageRepository = "ghcr.io/cloudnative-pg/postgresql" @@ -182,43 +208,45 @@ var _ = Describe("Postgres Major Upgrade", Label(tests.LabelPostgresMajorUpgrade // Default target Images targetImages := map[string]string{ - postgisEntry: fmt.Sprintf("%v:%v", PostgisImageRepository, targetMajor), - postgresqlEntry: fmt.Sprintf("%v:%v-standard-bookworm", ImageRepository, targetMajor), - postgresqlMinimalEntry: fmt.Sprintf("%v:%v-minimal-bookworm", ImageRepository, targetMajor), + postgisEntry: fmt.Sprintf("%v:%v", PostgisImageRepository, targetTag), + postgresqlEntry: fmt.Sprintf("%v:%v-standard-bookworm", ImageRepository, targetTag), + postgresqlMinimalEntry: fmt.Sprintf("%v:%v-minimal-bookworm", ImageRepository, targetTag), } // Set custom targets when detecting a given env variable if envValue := os.Getenv(customImageRegistryEnvVar); envValue != "" { - targetImages[postgisEntry] = fmt.Sprintf("%v:%v-postgis-bookworm", envValue, targetMajor) - targetImages[postgresqlEntry] = fmt.Sprintf("%v:%v-standard-bookworm", envValue, targetMajor) - targetImages[postgresqlMinimalEntry] = fmt.Sprintf("%v:%v-minimal-bookworm", envValue, targetMajor) + targetImages[postgisEntry] = fmt.Sprintf("%v:%v-postgis-bookworm", envValue, targetTag) + targetImages[postgresqlEntry] = fmt.Sprintf("%v:%v-standard-bookworm", envValue, targetTag) + targetImages[postgresqlMinimalEntry] = fmt.Sprintf("%v:%v-minimal-bookworm", envValue, targetTag) } return targetImages } buildScenarios := func( - namespace string, storageClass string, currentMajor, targetMajor uint64, + namespace string, storageClass string, info versionInfo, ) map[string]*scenario { - targetImages := generateTargetImages(targetMajor) + targetImages := generateTargetImages(info.targetTag) return map[string]*scenario{ postgisEntry: { - startingCluster: generatePostGISCluster(namespace, storageClass, int(currentMajor)), - startingMajor: int(currentMajor), + startingCluster: generatePostGISCluster(namespace, storageClass, strconv.FormatUint(info.currentMajor, 10)), + startingMajor: int(info.currentMajor), targetImage: targetImages[postgisEntry], - targetMajor: int(targetMajor), + targetMajor: int(info.targetMajor), }, postgresqlEntry: { - startingCluster: generatePostgreSQLCluster(namespace, storageClass, int(currentMajor)), - startingMajor: int(currentMajor), - targetImage: targetImages[postgresqlEntry], - targetMajor: int(targetMajor), + startingCluster: generatePostgreSQLCluster(namespace, storageClass, + strconv.FormatUint(info.currentMajor, 10)), + startingMajor: int(info.currentMajor), + targetImage: targetImages[postgresqlEntry], + targetMajor: int(info.targetMajor), }, postgresqlMinimalEntry: { - startingCluster: generatePostgreSQLMinimalCluster(namespace, storageClass, int(currentMajor)), - startingMajor: int(currentMajor), - targetImage: targetImages[postgresqlMinimalEntry], - targetMajor: int(targetMajor), + startingCluster: generatePostgreSQLMinimalCluster(namespace, storageClass, + strconv.FormatUint(info.currentMajor, 10)), + startingMajor: int(info.currentMajor), + targetImage: targetImages[postgresqlMinimalEntry], + targetMajor: int(info.targetMajor), }, } } @@ -290,7 +318,7 @@ var _ = Describe("Postgres Major Upgrade", Label(tests.LabelPostgresMajorUpgrade Skip("Test depth is lower than the amount requested for this test") } - currentMajor, targetMajor := determineVersionsForTesting() + versionInfo := determineVersionsForTesting() var err error namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) Expect(err).ToNot(HaveOccurred()) @@ -301,7 +329,7 @@ var _ = Describe("Postgres Major Upgrade", Label(tests.LabelPostgresMajorUpgrade // We cannot use generated entries in the DescribeTable, so we use the scenario key as a constant, but // define the actual content here. // See https://onsi.github.io/ginkgo/#mental-model-table-specs-are-just-syntactic-sugar - scenarios = buildScenarios(namespace, storageClass, currentMajor, targetMajor) + scenarios = buildScenarios(namespace, storageClass, versionInfo) }) DescribeTable("can upgrade a Cluster to a newer major version", func(scenarioName string) { @@ -315,6 +343,12 @@ var _ = Describe("Postgres Major Upgrade", Label(tests.LabelPostgresMajorUpgrade } scenario := scenarios[scenarioName] + + if scenarioName == postgisEntry && scenario.targetMajor > 17 { + // PostGIS images are not available for Postgres versions greater than 17 + Skip("Skipping major upgrades on PostGIS images for Postgres versions greater than 17") + } + cluster := scenario.startingCluster err := env.Client.Create(env.Ctx, cluster) Expect(err).NotTo(HaveOccurred()) From 801825b2ddf5f1a9f2b9714f5bfca75f745452da Mon Sep 17 00:00:00 2001 From: Peggie Date: Tue, 22 Jul 2025 15:33:00 +0200 Subject: [PATCH 715/836] test: Updated Postgres versions used in E2E tests (#8099) Update the Postgres versions used in E2E tests Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: postgres-versions-updater --- .github/pg_versions.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/pg_versions.json b/.github/pg_versions.json index 85a0a03c92..77dee9750e 100644 --- a/.github/pg_versions.json +++ b/.github/pg_versions.json @@ -1,7 +1,7 @@ { "18": [ - "18beta1", - "18beta1-7" + "18beta2", + "18beta2-1" ], "17": [ "17.5", From 278677ae1991513663b7eb9aa85b2f1f8b90f047 Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Tue, 22 Jul 2025 16:38:50 +0200 Subject: [PATCH 716/836] docs(cnpg-i): deprecate backup metrics and fields in `Cluster` status (#8091) Due to the transition to a plugin-based, backup-agnostic architecture in CloudNativePG, we are deprecating: - `firstRecoverabilityPoint`, `firstRecoverabilityPointByMethod`, `lastSuccessfulBackup`, `lastSuccessfulBackupByMethod`, and `lastFailedBackup` in `Cluster.status` - `cnpg_collector_first_recoverability_point`, `cnpg_collector_last_failed_backup_timestamp`, `cnpg_collector_last_available_backup_timestamp` metrics If you have migrated to a plugin-based backup and recovery solution such as Barman Cloud, these fields/metrics are no longer synchronized and will not be updated. Users still relying on the in-core support for Barman Cloud and volume snapshots can continue to use these fields for the time being. Closes #8051 Signed-off-by: Gabriele Bartolini Signed-off-by: Marco Nenciarini Signed-off-by: Francesco Canovai Co-authored-by: Marco Nenciarini Co-authored-by: Francesco Canovai --- api/v1/cluster_types.go | 22 +++++++--- .../bases/postgresql.cnpg.io_clusters.yaml | 27 ++++++++---- docs/src/cloudnative-pg.v1.md | 17 +++++--- docs/src/installation_upgrade.md | 41 +++++++++++++++++++ docs/src/monitoring.md | 19 ++++++--- docs/src/troubleshooting.md | 11 ----- internal/cmd/plugin/status/status.go | 2 +- internal/controller/backup_controller_test.go | 30 ++++++++++++++ pkg/management/postgres/backup_test.go | 2 +- .../webserver/metricserver/pg_collector.go | 13 +++--- pkg/resources/status/backup.go | 2 +- pkg/resources/status/backup_test.go | 2 +- tests/e2e/backup_restore_azure_test.go | 4 +- tests/e2e/backup_restore_azurite_test.go | 4 +- tests/e2e/backup_restore_minio_test.go | 16 ++++---- tests/e2e/tablespaces_test.go | 8 ++-- 16 files changed, 157 insertions(+), 63 deletions(-) diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go index b5d2be6ab3..0fd9cd3c49 100644 --- a/api/v1/cluster_types.go +++ b/api/v1/cluster_types.go @@ -905,24 +905,34 @@ type ClusterStatus struct { Certificates CertificatesStatus `json:"certificates,omitempty"` // The first recoverability point, stored as a date in RFC3339 format. - // This field is calculated from the content of FirstRecoverabilityPointByMethod + // This field is calculated from the content of FirstRecoverabilityPointByMethod. + // + // Deprecated: the field is not set for backup plugins. // +optional FirstRecoverabilityPoint string `json:"firstRecoverabilityPoint,omitempty"` - // The first recoverability point, stored as a date in RFC3339 format, per backup method type + // The first recoverability point, stored as a date in RFC3339 format, per backup method type. + // + // Deprecated: the field is not set for backup plugins. // +optional FirstRecoverabilityPointByMethod map[BackupMethod]metav1.Time `json:"firstRecoverabilityPointByMethod,omitempty"` - // Last successful backup, stored as a date in RFC3339 format - // This field is calculated from the content of LastSuccessfulBackupByMethod + // Last successful backup, stored as a date in RFC3339 format. + // This field is calculated from the content of LastSuccessfulBackupByMethod. + // + // Deprecated: the field is not set for backup plugins. // +optional LastSuccessfulBackup string `json:"lastSuccessfulBackup,omitempty"` - // Last successful backup, stored as a date in RFC3339 format, per backup method type + // Last successful backup, stored as a date in RFC3339 format, per backup method type. + // + // Deprecated: the field is not set for backup plugins. // +optional LastSuccessfulBackupByMethod map[BackupMethod]metav1.Time `json:"lastSuccessfulBackupByMethod,omitempty"` - // Stored as a date in RFC3339 format + // Last failed backup, stored as a date in RFC3339 format. + // + // Deprecated: the field is not set for backup plugins. // +optional LastFailedBackup string `json:"lastFailedBackup,omitempty"` diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml index feb6be0621..f1a78bc165 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml @@ -6110,14 +6110,18 @@ spec: firstRecoverabilityPoint: description: |- The first recoverability point, stored as a date in RFC3339 format. - This field is calculated from the content of FirstRecoverabilityPointByMethod + This field is calculated from the content of FirstRecoverabilityPointByMethod. + + Deprecated: the field is not set for backup plugins. type: string firstRecoverabilityPointByMethod: additionalProperties: format: date-time type: string - description: The first recoverability point, stored as a date in RFC3339 - format, per backup method type + description: |- + The first recoverability point, stored as a date in RFC3339 format, per backup method type. + + Deprecated: the field is not set for backup plugins. type: object healthyPVC: description: List of all the PVCs not dangling nor initializing @@ -6175,7 +6179,10 @@ spec: format: int32 type: integer lastFailedBackup: - description: Stored as a date in RFC3339 format + description: |- + Last failed backup, stored as a date in RFC3339 format. + + Deprecated: the field is not set for backup plugins. type: string lastPromotionToken: description: |- @@ -6184,15 +6191,19 @@ spec: type: string lastSuccessfulBackup: description: |- - Last successful backup, stored as a date in RFC3339 format - This field is calculated from the content of LastSuccessfulBackupByMethod + Last successful backup, stored as a date in RFC3339 format. + This field is calculated from the content of LastSuccessfulBackupByMethod. + + Deprecated: the field is not set for backup plugins. type: string lastSuccessfulBackupByMethod: additionalProperties: format: date-time type: string - description: Last successful backup, stored as a date in RFC3339 format, - per backup method type + description: |- + Last successful backup, stored as a date in RFC3339 format, per backup method type. + + Deprecated: the field is not set for backup plugins. type: object latestGeneratedNode: description: ID of the latest generated node (used to avoid node name diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md index d3853641a2..2b9e6d5116 100644 --- a/docs/src/cloudnative-pg.v1.md +++ b/docs/src/cloudnative-pg.v1.md @@ -2148,36 +2148,41 @@ configmap data

The first recoverability point, stored as a date in RFC3339 format. -This field is calculated from the content of FirstRecoverabilityPointByMethod

+This field is calculated from the content of FirstRecoverabilityPointByMethod.

+

Deprecated: the field is not set for backup plugins.

firstRecoverabilityPointByMethod
map[BackupMethod]meta/v1.Time -

The first recoverability point, stored as a date in RFC3339 format, per backup method type

+

The first recoverability point, stored as a date in RFC3339 format, per backup method type.

+

Deprecated: the field is not set for backup plugins.

lastSuccessfulBackup
string -

Last successful backup, stored as a date in RFC3339 format -This field is calculated from the content of LastSuccessfulBackupByMethod

+

Last successful backup, stored as a date in RFC3339 format. +This field is calculated from the content of LastSuccessfulBackupByMethod.

+

Deprecated: the field is not set for backup plugins.

lastSuccessfulBackupByMethod
map[BackupMethod]meta/v1.Time -

Last successful backup, stored as a date in RFC3339 format, per backup method type

+

Last successful backup, stored as a date in RFC3339 format, per backup method type.

+

Deprecated: the field is not set for backup plugins.

lastFailedBackup
string -

Stored as a date in RFC3339 format

+

Last failed backup, stored as a date in RFC3339 format.

+

Deprecated: the field is not set for backup plugins.

cloudNativePGCommitHash
diff --git a/docs/src/installation_upgrade.md b/docs/src/installation_upgrade.md index 686a62f2c2..7191be7e68 100644 --- a/docs/src/installation_upgrade.md +++ b/docs/src/installation_upgrade.md @@ -293,6 +293,47 @@ spec: even if in-place updates are enabled (`ENABLE_INSTANCE_MANAGER_INPLACE_UPDATES=true`). Your applications will need to reconnect to PostgreSQL after the upgrade. +#### Deprecation of backup metrics and fields in the `Cluster` `.status` + +With the transition to a backup and recovery agnostic approach based on CNPG-I +plugins in CloudNativePG, which began with version 1.26.0 for Barman Cloud, we +are starting the deprecation period for the following fields in the `.status` +section of the `Cluster` resource: + +- `firstRecoverabilityPoint` +- `firstRecoverabilityPointByMethod` +- `lastSuccessfulBackup` +- `lastSuccessfulBackupByMethod` +- `lastFailedBackup` + +The following Prometheus metrics are also deprecated: + +- `cnpg_collector_first_recoverability_point` +- `cnpg_collector_last_failed_backup_timestamp` +- `cnpg_collector_last_available_backup_timestamp` + +!!! Warning + If you have migrated to a plugin-based backup and recovery solution such as + Barman Cloud, these fields and metrics are no longer synchronized and will + not be updated. Users still relying on the in-core support for Barman Cloud + and volume snapshots can continue to use these fields for the time being. + +Under the new plugin-based approach, multiple backup methods can operate +simultaneously, each with its own timeline for backup and recovery. For +example, some plugins may provide snapshots without WAL archiving, while others +support continuous archiving. + +Because of this flexibility, maintaining centralized status fields in the +`Cluster` resource could be misleading or confusing, as they would not +accurately represent the state across all configured backup methods. +For this reason, these fields are being deprecated. + +Instead, each plugin is responsible for exposing its own backup status +information and providing metrics back to the instance manager for monitoring +and operational awareness. + +#### Declarative Hibernation in the `cnpg` plugin + In this release, the `cnpg` plugin for `kubectl` transitions from an imperative to a [declarative approach for cluster hibernation](declarative_hibernation.md). The `hibernate on` and `hibernate off` commands are now convenient shortcuts diff --git a/docs/src/monitoring.md b/docs/src/monitoring.md index dddf3b135b..a63120d34e 100644 --- a/docs/src/monitoring.md +++ b/docs/src/monitoring.md @@ -220,15 +220,15 @@ cnpg_collector_up{cluster="cluster-example"} 1 # TYPE cnpg_collector_postgres_version gauge cnpg_collector_postgres_version{cluster="cluster-example",full="17.5"} 17.5 -# HELP cnpg_collector_last_failed_backup_timestamp The last failed backup as a unix timestamp +# HELP cnpg_collector_last_failed_backup_timestamp The last failed backup as a unix timestamp (Deprecated) # TYPE cnpg_collector_last_failed_backup_timestamp gauge cnpg_collector_last_failed_backup_timestamp 0 -# HELP cnpg_collector_last_available_backup_timestamp The last available backup as a unix timestamp +# HELP cnpg_collector_last_available_backup_timestamp The last available backup as a unix timestamp (Deprecated) # TYPE cnpg_collector_last_available_backup_timestamp gauge cnpg_collector_last_available_backup_timestamp 1.63238406e+09 -# HELP cnpg_collector_first_recoverability_point The first point of recoverability for the cluster as a unix timestamp +# HELP cnpg_collector_first_recoverability_point The first point of recoverability for the cluster as a unix timestamp (Deprecated) # TYPE cnpg_collector_first_recoverability_point gauge cnpg_collector_first_recoverability_point 1.63238406e+09 @@ -398,9 +398,16 @@ go_threads 18 `Major.Minor.Patch` can be found inside one of its label field named `full`. -!!! Note - `cnpg_collector_first_recoverability_point` and `cnpg_collector_last_available_backup_timestamp` - will be zero until your first backup to the object store. This is separate from the WAL archival. +!!! Warning + The metrics `cnpg_collector_last_failed_backup_timestamp`, + `cnpg_collector_last_available_backup_timestamp`, and + `cnpg_collector_first_recoverability_point` have been deprecated starting + from version 1.26. These metrics will continue to function with native backup + solutions such as in-core Barman Cloud (deprecated) and volume snapshots. Note + that for these cases, `cnpg_collector_first_recoverability_point` and + `cnpg_collector_last_available_backup_timestamp` will remain zero until the + first backup is completed to the object store. This is separate from WAL + archiving. ### User defined metrics diff --git a/docs/src/troubleshooting.md b/docs/src/troubleshooting.md index 745a703fe3..5aa521235f 100644 --- a/docs/src/troubleshooting.md +++ b/docs/src/troubleshooting.md @@ -60,17 +60,6 @@ Please refer to the [plugin document](kubectl-plugin.md) for complete instructio After getting the cluster manifest with the plugin, you should verify if backups are set up and working. -In a cluster with backups set up, you will find, in the cluster Status, the fields -`lastSuccessfulBackup` and `firstRecoverabilityPoint`. You should make sure -there is a recent `lastSuccessfulBackup`. - -A cluster lacking the `.spec.backup` stanza won't have backups. -An insistent message will appear in the PostgreSQL logs: - -``` -Backup not configured, skip WAL archiving. -``` - Before proceeding with troubleshooting operations, it may be advisable to perform an emergency backup depending on your findings regarding backups. Refer to the following section for instructions. diff --git a/internal/cmd/plugin/status/status.go b/internal/cmd/plugin/status/status.go index 91db0ba383..50ded9a1d1 100644 --- a/internal/cmd/plugin/status/status.go +++ b/internal/cmd/plugin/status/status.go @@ -520,7 +520,7 @@ func (fullStatus *PostgresqlStatus) printBackupStatus() { return } status := tabby.New() - FPoR := cluster.Status.FirstRecoverabilityPoint + FPoR := cluster.Status.FirstRecoverabilityPoint //nolint:staticcheck if FPoR == "" { FPoR = "Not Available" } diff --git a/internal/controller/backup_controller_test.go b/internal/controller/backup_controller_test.go index 543bb00668..b5615a8958 100644 --- a/internal/controller/backup_controller_test.go +++ b/internal/controller/backup_controller_test.go @@ -336,9 +336,13 @@ var _ = Describe("update snapshot backup metadata", func() { }) It("should update cluster with no metadata", func(ctx context.Context) { + //nolint:staticcheck Expect(cluster.Status.FirstRecoverabilityPoint).To(BeEmpty()) + //nolint:staticcheck Expect(cluster.Status.FirstRecoverabilityPointByMethod).To(BeEmpty()) + //nolint:staticcheck Expect(cluster.Status.LastSuccessfulBackup).To(BeEmpty()) + //nolint:staticcheck Expect(cluster.Status.LastSuccessfulBackupByMethod).To(BeEmpty()) fakeClient := fake.NewClientBuilder().WithScheme(schemeBuilder.BuildWithAllKnownScheme()). WithObjects(cluster). @@ -354,24 +358,34 @@ var _ = Describe("update snapshot backup metadata", func() { Name: cluster.Name, }, &updatedCluster) Expect(err).ToNot(HaveOccurred()) + //nolint:staticcheck Expect(updatedCluster.Status.FirstRecoverabilityPoint).To(Equal(twoHoursAgo.Format(time.RFC3339))) + //nolint:staticcheck Expect(updatedCluster.Status.FirstRecoverabilityPointByMethod). ToNot(HaveKey(apiv1.BackupMethodBarmanObjectStore)) + //nolint:staticcheck Expect(updatedCluster.Status.FirstRecoverabilityPointByMethod[apiv1.BackupMethodVolumeSnapshot]). To(Equal(twoHoursAgo)) + //nolint:staticcheck Expect(updatedCluster.Status.LastSuccessfulBackup).To(Equal(oneHourAgo.Format(time.RFC3339))) + //nolint:staticcheck Expect(updatedCluster.Status.LastSuccessfulBackupByMethod). ToNot(HaveKey(apiv1.BackupMethodBarmanObjectStore)) + //nolint:staticcheck Expect(updatedCluster.Status.LastSuccessfulBackupByMethod[apiv1.BackupMethodVolumeSnapshot]). To(Equal(oneHourAgo)) }) It("should consider other methods when update the metadata", func(ctx context.Context) { + //nolint:staticcheck cluster.Status.FirstRecoverabilityPoint = threeHoursAgo.Format(time.RFC3339) + //nolint:staticcheck cluster.Status.FirstRecoverabilityPointByMethod = map[apiv1.BackupMethod]metav1.Time{ apiv1.BackupMethodBarmanObjectStore: threeHoursAgo, } + //nolint:staticcheck cluster.Status.LastSuccessfulBackup = now.Format(time.RFC3339) + //nolint:staticcheck cluster.Status.LastSuccessfulBackupByMethod = map[apiv1.BackupMethod]metav1.Time{ apiv1.BackupMethodBarmanObjectStore: now, } @@ -389,25 +403,35 @@ var _ = Describe("update snapshot backup metadata", func() { Name: cluster.Name, }, &updatedCluster) Expect(err).ToNot(HaveOccurred()) + //nolint:staticcheck Expect(updatedCluster.Status.FirstRecoverabilityPoint).To(Equal(threeHoursAgo.Format(time.RFC3339))) + //nolint:staticcheck Expect(updatedCluster.Status.FirstRecoverabilityPointByMethod[apiv1.BackupMethodBarmanObjectStore]). To(Equal(threeHoursAgo)) + //nolint:staticcheck Expect(updatedCluster.Status.FirstRecoverabilityPointByMethod[apiv1.BackupMethodVolumeSnapshot]). To(Equal(twoHoursAgo)) + //nolint:staticcheck Expect(updatedCluster.Status.LastSuccessfulBackup).To(Equal(now.Format(time.RFC3339))) + //nolint:staticcheck Expect(updatedCluster.Status.LastSuccessfulBackupByMethod[apiv1.BackupMethodBarmanObjectStore]). To(Equal(now)) + //nolint:staticcheck Expect(updatedCluster.Status.LastSuccessfulBackupByMethod[apiv1.BackupMethodVolumeSnapshot]). To(Equal(oneHourAgo)) }) It("should override other method metadata when appropriate", func(ctx context.Context) { + //nolint:staticcheck cluster.Status.FirstRecoverabilityPoint = oneHourAgo.Format(time.RFC3339) + //nolint:staticcheck cluster.Status.FirstRecoverabilityPointByMethod = map[apiv1.BackupMethod]metav1.Time{ apiv1.BackupMethodBarmanObjectStore: oneHourAgo, apiv1.BackupMethodVolumeSnapshot: now, } + //nolint:staticcheck cluster.Status.LastSuccessfulBackup = oneHourAgo.Format(time.RFC3339) + //nolint:staticcheck cluster.Status.LastSuccessfulBackupByMethod = map[apiv1.BackupMethod]metav1.Time{ apiv1.BackupMethodBarmanObjectStore: twoHoursAgo, apiv1.BackupMethodVolumeSnapshot: threeHoursAgo, @@ -426,14 +450,20 @@ var _ = Describe("update snapshot backup metadata", func() { Name: cluster.Name, }, &updatedCluster) Expect(err).ToNot(HaveOccurred()) + //nolint:staticcheck Expect(updatedCluster.Status.FirstRecoverabilityPoint).To(Equal(twoHoursAgo.Format(time.RFC3339))) + //nolint:staticcheck Expect(updatedCluster.Status.FirstRecoverabilityPointByMethod[apiv1.BackupMethodBarmanObjectStore]). To(Equal(oneHourAgo)) + //nolint:staticcheck Expect(updatedCluster.Status.FirstRecoverabilityPointByMethod[apiv1.BackupMethodVolumeSnapshot]). To(Equal(twoHoursAgo)) + //nolint:staticcheck Expect(updatedCluster.Status.LastSuccessfulBackup).To(Equal(oneHourAgo.Format(time.RFC3339))) + //nolint:staticcheck Expect(updatedCluster.Status.LastSuccessfulBackupByMethod[apiv1.BackupMethodBarmanObjectStore]). To(Equal(twoHoursAgo)) + //nolint:staticcheck Expect(updatedCluster.Status.LastSuccessfulBackupByMethod[apiv1.BackupMethodVolumeSnapshot]). To(Equal(oneHourAgo)) }) diff --git a/pkg/management/postgres/backup_test.go b/pkg/management/postgres/backup_test.go index 87d654c81c..e521da2620 100644 --- a/pkg/management/postgres/backup_test.go +++ b/pkg/management/postgres/backup_test.go @@ -136,7 +136,7 @@ var _ = Describe("testing backup command", func() { It("should fail and update cluster and backup resource", func() { backupCommand.run(context.Background()) - Expect(cluster.Status.LastFailedBackup).ToNot(BeEmpty()) + Expect(cluster.Status.LastFailedBackup).ToNot(BeEmpty()) //nolint:staticcheck clusterCond := meta.FindStatusCondition(cluster.Status.Conditions, string(apiv1.ConditionBackup)) Expect(clusterCond.Status).To(Equal(metav1.ConditionFalse)) diff --git a/pkg/management/postgres/webserver/metricserver/pg_collector.go b/pkg/management/postgres/webserver/metricserver/pg_collector.go index efef347dc1..3950770992 100644 --- a/pkg/management/postgres/webserver/metricserver/pg_collector.go +++ b/pkg/management/postgres/webserver/metricserver/pg_collector.go @@ -176,19 +176,20 @@ func newMetrics() *metrics { Namespace: PrometheusNamespace, Subsystem: subsystem, Name: "first_recoverability_point", - Help: "The first point of recoverability for the cluster as a unix timestamp", + Help: "The first point of recoverability for the cluster as a unix timestamp" + + " (Deprecated)", }), LastAvailableBackupTimestamp: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: PrometheusNamespace, Subsystem: subsystem, Name: "last_available_backup_timestamp", - Help: "The last available backup as a unix timestamp", + Help: "The last available backup as a unix timestamp (Deprecated)", }), LastFailedBackupTimestamp: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: PrometheusNamespace, Subsystem: subsystem, Name: "last_failed_backup_timestamp", - Help: "The last failed backup as a unix timestamp", + Help: "The last failed backup as a unix timestamp (Deprecated)", }), FencingOn: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: PrometheusNamespace, @@ -506,21 +507,21 @@ func (e *Exporter) collectNodesUsed() { func (e *Exporter) collectFromPrimaryLastFailedBackupTimestamp() { const errorLabel = "Collect.LastFailedBackupTimestamp" e.setTimestampMetric(e.Metrics.LastFailedBackupTimestamp, errorLabel, func(cluster *apiv1.Cluster) string { - return cluster.Status.LastFailedBackup + return cluster.Status.LastFailedBackup //nolint:staticcheck }) } func (e *Exporter) collectFromPrimaryLastAvailableBackupTimestamp() { const errorLabel = "Collect.LastAvailableBackupTimestamp" e.setTimestampMetric(e.Metrics.LastAvailableBackupTimestamp, errorLabel, func(cluster *apiv1.Cluster) string { - return cluster.Status.LastSuccessfulBackup + return cluster.Status.LastSuccessfulBackup //nolint:staticcheck }) } func (e *Exporter) collectFromPrimaryFirstPointOnTimeRecovery() { const errorLabel = "Collect.FirstRecoverabilityPoint" e.setTimestampMetric(e.Metrics.FirstRecoverabilityPoint, errorLabel, func(cluster *apiv1.Cluster) string { - return cluster.Status.FirstRecoverabilityPoint + return cluster.Status.FirstRecoverabilityPoint //nolint:staticcheck }) } diff --git a/pkg/resources/status/backup.go b/pkg/resources/status/backup.go index 8850e90737..01f594b533 100644 --- a/pkg/resources/status/backup.go +++ b/pkg/resources/status/backup.go @@ -113,7 +113,7 @@ func FlagBackupAsFailed( cli, cluster, func(cluster *apiv1.Cluster) { - cluster.Status.LastFailedBackup = pgTime.GetCurrentTimestampWithFormat(time.RFC3339) + cluster.Status.LastFailedBackup = pgTime.GetCurrentTimestampWithFormat(time.RFC3339) //nolint:staticcheck }, ); err != nil { contextLogger.Error(err, "while patching cluster status with last failed backup") diff --git a/pkg/resources/status/backup_test.go b/pkg/resources/status/backup_test.go index 3727b4835a..3a4172c795 100644 --- a/pkg/resources/status/backup_test.go +++ b/pkg/resources/status/backup_test.go @@ -71,7 +71,7 @@ var _ = Describe("FlagBackupAsFailed", func() { Expect(backup.Status.Error).To(BeEquivalentTo("my sample error")) // Cluster status assertions - Expect(cluster.Status.LastFailedBackup).ToNot(BeEmpty()) + Expect(cluster.Status.LastFailedBackup).ToNot(BeEmpty()) //nolint:staticcheck for _, condition := range cluster.Status.Conditions { if condition.Type == string(apiv1.ConditionBackup) { Expect(condition.Status).To(BeEquivalentTo(metav1.ConditionFalse)) diff --git a/tests/e2e/backup_restore_azure_test.go b/tests/e2e/backup_restore_azure_test.go index a8b538c653..0e7ce0c196 100644 --- a/tests/e2e/backup_restore_azure_test.go +++ b/tests/e2e/backup_restore_azure_test.go @@ -118,7 +118,7 @@ var _ = Describe("Azure - Backup and restore", Label(tests.LabelBackupRestore), }, 30).Should(BeNumerically(">=", 1)) Eventually(func() (string, error) { cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) - return cluster.Status.FirstRecoverabilityPoint, err + return cluster.Status.FirstRecoverabilityPoint, err //nolint:staticcheck }, 30).ShouldNot(BeEmpty()) }) @@ -485,7 +485,7 @@ func prepareClusterForPITROnAzureBlob( Eventually(func(g Gomega) { cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(cluster.Status.FirstRecoverabilityPoint).ToNot(BeEmpty()) + g.Expect(cluster.Status.FirstRecoverabilityPoint).ToNot(BeEmpty()) //nolint:staticcheck }, 30).Should(Succeed()) }) diff --git a/tests/e2e/backup_restore_azurite_test.go b/tests/e2e/backup_restore_azurite_test.go index c4d6f9ddf1..645ef88ac8 100644 --- a/tests/e2e/backup_restore_azurite_test.go +++ b/tests/e2e/backup_restore_azurite_test.go @@ -298,7 +298,7 @@ func prepareClusterBackupOnAzurite( Eventually(func(g Gomega) { cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(cluster.Status.FirstRecoverabilityPoint).ToNot(BeEmpty()) + g.Expect(cluster.Status.FirstRecoverabilityPoint).ToNot(BeEmpty()) //nolint:staticcheck }, 30).Should(Succeed()) }) backups.AssertBackupConditionInClusterStatus(env.Ctx, env.Client, namespace, clusterName) @@ -324,7 +324,7 @@ func prepareClusterForPITROnAzurite( Eventually(func(g Gomega) { cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(cluster.Status.FirstRecoverabilityPoint).ToNot(BeEmpty()) + g.Expect(cluster.Status.FirstRecoverabilityPoint).ToNot(BeEmpty()) //nolint:staticcheck }, 30).Should(Succeed()) }) diff --git a/tests/e2e/backup_restore_minio_test.go b/tests/e2e/backup_restore_minio_test.go index 96664129ca..bf56b9a6ff 100644 --- a/tests/e2e/backup_restore_minio_test.go +++ b/tests/e2e/backup_restore_minio_test.go @@ -160,21 +160,21 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore), if err != nil { return "", err } - return cluster.Status.FirstRecoverabilityPoint, err + return cluster.Status.FirstRecoverabilityPoint, err //nolint:staticcheck }, 30).ShouldNot(BeEmpty()) Eventually(func() (string, error) { cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) if err != nil { return "", err } - return cluster.Status.LastSuccessfulBackup, err + return cluster.Status.LastSuccessfulBackup, err //nolint:staticcheck }, 30).ShouldNot(BeEmpty()) Eventually(func() (string, error) { cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) if err != nil { return "", err } - return cluster.Status.LastFailedBackup, err + return cluster.Status.LastFailedBackup, err //nolint:staticcheck }, 30).Should(BeEmpty()) }) @@ -321,7 +321,7 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore), }, 60).Should(BeEquivalentTo(1)) Eventually(func() (string, error) { cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, targetClusterName) - return cluster.Status.FirstRecoverabilityPoint, err + return cluster.Status.FirstRecoverabilityPoint, err //nolint:staticcheck }, 30).ShouldNot(BeEmpty()) }) }) @@ -375,7 +375,7 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore), }, 60).Should(BeEquivalentTo(1)) Eventually(func() (string, error) { cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, targetClusterName) - return cluster.Status.FirstRecoverabilityPoint, err + return cluster.Status.FirstRecoverabilityPoint, err //nolint:staticcheck }, 30).ShouldNot(BeEmpty()) }) @@ -439,7 +439,7 @@ var _ = Describe("MinIO - Backup and restore", Label(tests.LabelBackupRestore), // this is the second backup we take on the bucket Eventually(func() (string, error) { cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, customClusterName) - return cluster.Status.FirstRecoverabilityPoint, err + return cluster.Status.FirstRecoverabilityPoint, err //nolint:staticcheck }, 30).ShouldNot(BeEmpty()) }) @@ -660,7 +660,7 @@ var _ = Describe("MinIO - Clusters Recovery from Barman Object Store", Label(tes if err != nil { return "", err } - return cluster.Status.FirstRecoverabilityPoint, err + return cluster.Status.FirstRecoverabilityPoint, err //nolint:staticcheck }, 30).ShouldNot(BeEmpty()) }) @@ -806,7 +806,7 @@ func prepareClusterForPITROnMinio( Eventually(func(g Gomega) { cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(cluster.Status.FirstRecoverabilityPoint).ToNot(BeEmpty()) + g.Expect(cluster.Status.FirstRecoverabilityPoint).ToNot(BeEmpty()) //nolint:staticcheck }, 30).Should(Succeed()) }) diff --git a/tests/e2e/tablespaces_test.go b/tests/e2e/tablespaces_test.go index 4c3b4c04eb..00b1cbd672 100644 --- a/tests/e2e/tablespaces_test.go +++ b/tests/e2e/tablespaces_test.go @@ -224,7 +224,7 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, if err != nil { return "", err } - return cluster.Status.FirstRecoverabilityPoint, err + return cluster.Status.FirstRecoverabilityPoint, err //nolint:staticcheck }, 30).ShouldNot(BeEmpty()) }) }) @@ -316,21 +316,21 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces, if err != nil { return "", err } - return cluster.Status.FirstRecoverabilityPoint, err + return cluster.Status.FirstRecoverabilityPoint, err //nolint:staticcheck }, 30).ShouldNot(BeEmpty()) Eventually(func() (string, error) { cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) if err != nil { return "", err } - return cluster.Status.LastSuccessfulBackup, err + return cluster.Status.LastSuccessfulBackup, err //nolint:staticcheck }, 30).ShouldNot(BeEmpty()) Eventually(func() (string, error) { cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) if err != nil { return "", err } - return cluster.Status.LastFailedBackup, err + return cluster.Status.LastFailedBackup, err //nolint:staticcheck }, 30).Should(BeEmpty()) }) }) From 24bf64ca22f3ccc46340f5f5945be71cad149a3d Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 22 Jul 2025 16:41:55 +0200 Subject: [PATCH 717/836] chore(deps): update kindest/node docker tag to v1.33.2 (main) (#8092) --- hack/e2e/run-e2e-kind.sh | 2 +- hack/setup-cluster.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hack/e2e/run-e2e-kind.sh b/hack/e2e/run-e2e-kind.sh index 7217b30390..767b1fb70b 100755 --- a/hack/e2e/run-e2e-kind.sh +++ b/hack/e2e/run-e2e-kind.sh @@ -33,7 +33,7 @@ E2E_DIR="${HACK_DIR}/e2e" export PRESERVE_CLUSTER=${PRESERVE_CLUSTER:-false} export BUILD_IMAGE=${BUILD_IMAGE:-false} -KIND_NODE_DEFAULT_VERSION=v1.33.1 +KIND_NODE_DEFAULT_VERSION=v1.33.2 export K8S_VERSION=${K8S_VERSION:-$KIND_NODE_DEFAULT_VERSION} export CLUSTER_ENGINE=kind export CLUSTER_NAME=pg-operator-e2e-${K8S_VERSION//./-} diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh index 254baeaca8..138c9115f7 100755 --- a/hack/setup-cluster.sh +++ b/hack/setup-cluster.sh @@ -27,7 +27,7 @@ if [ "${DEBUG-}" = true ]; then fi # Defaults -KIND_NODE_DEFAULT_VERSION=v1.33.1 +KIND_NODE_DEFAULT_VERSION=v1.33.2 CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=v1.17.0 EXTERNAL_SNAPSHOTTER_VERSION=v8.3.0 EXTERNAL_PROVISIONER_VERSION=v5.3.0 From df29ff68a0e0b81635b83bf6c52112b9032e7ead Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Tue, 22 Jul 2025 17:26:35 +0200 Subject: [PATCH 718/836] test(logpipe): improve extra logs test reliability (#8088) should handle the flaky unit tests in main (hopefully) Signed-off-by: Armando Ruocco --- pkg/podlogs/cluster_writer_test.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/pkg/podlogs/cluster_writer_test.go b/pkg/podlogs/cluster_writer_test.go index 45d2fb292a..432b053882 100644 --- a/pkg/podlogs/cluster_writer_test.go +++ b/pkg/podlogs/cluster_writer_test.go @@ -165,6 +165,8 @@ var _ = Describe("Cluster logging tests", func() { followWaiting := 150 * time.Millisecond ctx2, cancel := context.WithTimeout(ctx, 300*time.Millisecond) go func() { + // we always invoke done no matter what happens + defer wg.Done() defer GinkgoRecover() streamClusterLogs := ClusterWriter{ Cluster: cluster, @@ -175,8 +177,9 @@ var _ = Describe("Cluster logging tests", func() { Client: client, } err := streamClusterLogs.SingleStream(ctx2, &logBuffer) - Expect(err).To(Equal(context.DeadlineExceeded)) - wg.Done() + // we cannot reliably now if we will close the function before the context + // deadline, so we accept both nil and context.DeadlineExceeded + Expect(err).To(Or(BeNil(), Equal(context.DeadlineExceeded))) }() time.Sleep(350 * time.Millisecond) From 6e67bd436149c48888e7705b653dfe2443bf4050 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Tue, 22 Jul 2025 17:37:59 +0200 Subject: [PATCH 719/836] feat(cluster,status): add `PhaseFailurePlugin` phase (#7988) Previously, ambiguous or generic error conditions during interaction with plugins led to unclear user feedback, making troubleshooting difficult. This change introduces a dedicated failure phase, making the operator's behaviour more transparent. Closes #7867 Signed-off-by: Armando Ruocco Signed-off-by: Marco Nenciarini Signed-off-by: Leonardo Cecchi Co-authored-by: Marco Nenciarini Co-authored-by: Leonardo Cecchi --- api/v1/cluster_types.go | 3 + internal/cnpi/plugin/client/backup.go | 26 ++-- internal/cnpi/plugin/client/cluster.go | 32 ++++- internal/cnpi/plugin/client/create.go | 5 + internal/cnpi/plugin/client/errors.go | 73 +++++++++++ internal/cnpi/plugin/client/errors_test.go | 122 +++++++++++++++++++ internal/cnpi/plugin/client/lifecycle.go | 10 ++ internal/cnpi/plugin/client/postgres.go | 10 ++ internal/cnpi/plugin/client/postgres_test.go | 3 +- internal/cnpi/plugin/client/reconciler.go | 2 +- internal/cnpi/plugin/client/restore_job.go | 8 ++ internal/cnpi/plugin/client/wal.go | 18 +++ internal/controller/cluster_controller.go | 24 ++++ 13 files changed, 315 insertions(+), 21 deletions(-) create mode 100644 internal/cnpi/plugin/client/errors.go create mode 100644 internal/cnpi/plugin/client/errors_test.go diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go index 0fd9cd3c49..987a813d1d 100644 --- a/api/v1/cluster_types.go +++ b/api/v1/cluster_types.go @@ -640,6 +640,9 @@ const ( // loaded still PhaseUnknownPlugin = "Cluster cannot proceed to reconciliation due to an unknown plugin being required" + // PhaseFailurePlugin is triggered when the cluster cannot proceed to reconciliation due to an interaction failure + PhaseFailurePlugin = "Cluster cannot proceed to reconciliation due to an error while interacting with plugins" + // PhaseImageCatalogError is triggered when the cluster cannot select the image to // apply because of an invalid or incomplete catalog PhaseImageCatalogError = "Cluster has incomplete or invalid image catalog" diff --git a/internal/cnpi/plugin/client/backup.go b/internal/cnpi/plugin/client/backup.go index 5178aa3a53..6103462380 100644 --- a/internal/cnpi/plugin/client/backup.go +++ b/internal/cnpi/plugin/client/backup.go @@ -22,7 +22,6 @@ package client import ( "context" "encoding/json" - "errors" "fmt" "slices" "time" @@ -33,20 +32,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -var ( - // ErrPluginNotLoaded is raised when the plugin that should manage the backup - // have not been loaded inside the cluster - ErrPluginNotLoaded = errors.New("plugin not loaded") - - // ErrPluginNotSupportBackup is raised when the plugin that should manage the backup - // doesn't support the Backup service - ErrPluginNotSupportBackup = errors.New("plugin does not support Backup service") - - // ErrPluginNotSupportBackupEndpoint is raised when the plugin that should manage the backup - // doesn't support the Backup RPC endpoint - ErrPluginNotSupportBackupEndpoint = errors.New("plugin does not support the Backup RPC call") -) - // BackupResponse is the status of a newly created backup. This is used as a return // type for the Backup RPC Call type BackupResponse struct { @@ -100,6 +85,17 @@ func (data *data) Backup( backupObject client.Object, pluginName string, parameters map[string]string, +) (*BackupResponse, error) { + b, err := data.innerBackup(ctx, cluster, backupObject, pluginName, parameters) + return b, wrapAsPluginErrorIfNeeded(err) +} + +func (data *data) innerBackup( + ctx context.Context, + cluster client.Object, + backupObject client.Object, + pluginName string, + parameters map[string]string, ) (*BackupResponse, error) { contextLogger := log.FromContext(ctx) diff --git a/internal/cnpi/plugin/client/cluster.go b/internal/cnpi/plugin/client/cluster.go index 0742dba8ca..6429cb8ba7 100644 --- a/internal/cnpi/plugin/client/cluster.go +++ b/internal/cnpi/plugin/client/cluster.go @@ -22,7 +22,6 @@ package client import ( "context" "encoding/json" - "errors" "fmt" "slices" @@ -34,6 +33,11 @@ import ( ) func (data *data) MutateCluster(ctx context.Context, object client.Object, mutatedObject client.Object) error { + err := data.innerMutateCluster(ctx, object, mutatedObject) + return wrapAsPluginErrorIfNeeded(err) +} + +func (data *data) innerMutateCluster(ctx context.Context, object client.Object, mutatedObject client.Object) error { contextLogger := log.FromContext(ctx) serializedObject, err := json.Marshal(object) @@ -96,11 +100,16 @@ func (data *data) MutateCluster(ctx context.Context, object client.Object, mutat } var ( - errInvalidJSON = errors.New("invalid json") - errSetStatusInCluster = errors.New("SetStatusInCluster invocation failed") + errInvalidJSON = newPluginError("invalid json") + errSetStatusInCluster = newPluginError("SetStatusInCluster invocation failed") ) func (data *data) SetStatusInCluster(ctx context.Context, cluster client.Object) (map[string]string, error) { + m, err := data.innerSetStatusInCluster(ctx, cluster) + return m, wrapAsPluginErrorIfNeeded(err) +} + +func (data *data) innerSetStatusInCluster(ctx context.Context, cluster client.Object) (map[string]string, error) { contextLogger := log.FromContext(ctx) serializedObject, err := json.Marshal(cluster) if err != nil { @@ -150,6 +159,14 @@ func (data *data) SetStatusInCluster(ctx context.Context, cluster client.Object) func (data *data) ValidateClusterCreate( ctx context.Context, object client.Object, +) (field.ErrorList, error) { + result, err := data.innerValidateClusterCreate(ctx, object) + return result, wrapAsPluginErrorIfNeeded(err) +} + +func (data *data) innerValidateClusterCreate( + ctx context.Context, + object client.Object, ) (field.ErrorList, error) { contextLogger := log.FromContext(ctx) @@ -192,6 +209,15 @@ func (data *data) ValidateClusterUpdate( ctx context.Context, oldObject client.Object, newObject client.Object, +) (field.ErrorList, error) { + result, err := data.innerValidateClusterUpdate(ctx, oldObject, newObject) + return result, wrapAsPluginErrorIfNeeded(err) +} + +func (data *data) innerValidateClusterUpdate( + ctx context.Context, + oldObject client.Object, + newObject client.Object, ) (field.ErrorList, error) { contextLogger := log.FromContext(ctx) diff --git a/internal/cnpi/plugin/client/create.go b/internal/cnpi/plugin/client/create.go index a17c3a1cdb..fc08a0f707 100644 --- a/internal/cnpi/plugin/client/create.go +++ b/internal/cnpi/plugin/client/create.go @@ -31,6 +31,11 @@ import ( // NewClient creates a new CNPI client func NewClient(ctx context.Context, enabledPlugin *stringset.Data) (Client, error) { + cli, err := innerNewClient(ctx, enabledPlugin) + return cli, wrapAsPluginErrorIfNeeded(err) +} + +func innerNewClient(ctx context.Context, enabledPlugin *stringset.Data) (Client, error) { contextLogger := log.FromContext(ctx) plugins := repository.New() diff --git a/internal/cnpi/plugin/client/errors.go b/internal/cnpi/plugin/client/errors.go new file mode 100644 index 0000000000..ad99ed2bca --- /dev/null +++ b/internal/cnpi/plugin/client/errors.go @@ -0,0 +1,73 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import "errors" + +var ( + // ErrPluginNotLoaded is raised when the plugin that should manage the backup + // have not been loaded inside the cluster + ErrPluginNotLoaded = newPluginError("plugin not loaded") + + // ErrPluginNotSupportBackup is raised when the plugin that should manage the backup + // doesn't support the Backup service + ErrPluginNotSupportBackup = newPluginError("plugin does not support Backup service") + + // ErrPluginNotSupportBackupEndpoint is raised when the plugin that should manage the backup + // doesn't support the Backup RPC endpoint + ErrPluginNotSupportBackupEndpoint = newPluginError("plugin does not support the Backup RPC call") +) + +type pluginError struct { + innerErr error +} + +func (e *pluginError) Error() string { + return e.innerErr.Error() +} + +func (e *pluginError) Unwrap() error { + return e.innerErr +} + +func newPluginError(msg string) error { + return &pluginError{innerErr: errors.New(msg)} +} + +// ContainsPluginError checks if the provided error chain contains a plugin error. +func ContainsPluginError(err error) bool { + if err == nil { + return false + } + + var pluginErr *pluginError + return errors.As(err, &pluginErr) +} + +func wrapAsPluginErrorIfNeeded(err error) error { + if err == nil { + return nil + } + if ContainsPluginError(err) { + return err + } + + return &pluginError{innerErr: err} +} diff --git a/internal/cnpi/plugin/client/errors_test.go b/internal/cnpi/plugin/client/errors_test.go new file mode 100644 index 0000000000..9c68a3da19 --- /dev/null +++ b/internal/cnpi/plugin/client/errors_test.go @@ -0,0 +1,122 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "errors" + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("wrapAsPluginErrorIfNeeded", func() { + It("should return nil when err is nil", func() { + result := wrapAsPluginErrorIfNeeded(nil) + Expect(result).ToNot(HaveOccurred()) + }) + + It("should return the same error when it already contains a plugin error", func() { + originalErr := newPluginError("original plugin error") + wrappedErr := fmt.Errorf("wrapped: %w", originalErr) + + result := wrapAsPluginErrorIfNeeded(wrappedErr) + Expect(result).To(Equal(wrappedErr)) + Expect(ContainsPluginError(result)).To(BeTrue()) + }) + + It("should wrap a non-plugin error as a plugin error", func() { + originalErr := errors.New("some regular error") + + result := wrapAsPluginErrorIfNeeded(originalErr) + Expect(result).ToNot(Equal(originalErr)) + Expect(ContainsPluginError(result)).To(BeTrue()) + Expect(result.Error()).To(Equal(originalErr.Error())) + }) + + It("should wrap a nested non-plugin error as a plugin error", func() { + originalErr := errors.New("base error") + wrappedErr := fmt.Errorf("context: %w", originalErr) + + result := wrapAsPluginErrorIfNeeded(wrappedErr) + Expect(result).ToNot(Equal(wrappedErr)) + Expect(ContainsPluginError(result)).To(BeTrue()) + Expect(result.Error()).To(Equal(wrappedErr.Error())) + Expect(errors.Is(result, originalErr)).To(BeTrue()) + }) + + It("should not double-wrap an already wrapped plugin error", func() { + originalErr := errors.New("base error") + pluginErr := &pluginError{innerErr: originalErr} + wrappedPluginErr := fmt.Errorf("additional context: %w", pluginErr) + + result := wrapAsPluginErrorIfNeeded(wrappedPluginErr) + Expect(result).To(Equal(wrappedPluginErr)) + Expect(ContainsPluginError(result)).To(BeTrue()) + }) +}) + +var _ = Describe("ContainsPluginError", func() { + It("should return false when err is nil", func() { + result := ContainsPluginError(nil) + Expect(result).To(BeFalse()) + }) + + It("should return true when error is a direct plugin error", func() { + pluginErr := newPluginError("test plugin error") + + result := ContainsPluginError(pluginErr) + Expect(result).To(BeTrue()) + }) + + It("should return true when plugin error is wrapped with fmt.Errorf", func() { + pluginErr := newPluginError("original plugin error") + wrappedErr := fmt.Errorf("context: %w", pluginErr) + + result := ContainsPluginError(wrappedErr) + Expect(result).To(BeTrue()) + }) + + It("should return true when plugin error is deeply nested", func() { + pluginErr := newPluginError("base plugin error") + wrappedOnce := fmt.Errorf("level 1: %w", pluginErr) + wrappedTwice := fmt.Errorf("level 2: %w", wrappedOnce) + + result := ContainsPluginError(wrappedTwice) + Expect(result).To(BeTrue()) + }) + + It("should return false when error chain contains no plugin error", func() { + baseErr := errors.New("base error") + wrappedErr := fmt.Errorf("wrapped: %w", baseErr) + + result := ContainsPluginError(wrappedErr) + Expect(result).To(BeFalse()) + }) + + It("should return true when error chain contains plugin error mixed with other errors", func() { + baseErr := errors.New("base error") + pluginErr := &pluginError{innerErr: baseErr} + wrappedErr := fmt.Errorf("additional context: %w", pluginErr) + + result := ContainsPluginError(wrappedErr) + Expect(result).To(BeTrue()) + }) +}) diff --git a/internal/cnpi/plugin/client/lifecycle.go b/internal/cnpi/plugin/client/lifecycle.go index 98e3a0ddd7..1ba289e890 100644 --- a/internal/cnpi/plugin/client/lifecycle.go +++ b/internal/cnpi/plugin/client/lifecycle.go @@ -49,6 +49,16 @@ func (data *data) LifecycleHook( operationType plugin.OperationVerb, cluster client.Object, object client.Object, +) (client.Object, error) { + obj, err := data.innerLifecycleHook(ctx, operationType, cluster, object) + return obj, wrapAsPluginErrorIfNeeded(err) +} + +func (data *data) innerLifecycleHook( + ctx context.Context, + operationType plugin.OperationVerb, + cluster client.Object, + object client.Object, ) (client.Object, error) { contextLogger := log.FromContext(ctx).WithName("lifecycle_hook") diff --git a/internal/cnpi/plugin/client/postgres.go b/internal/cnpi/plugin/client/postgres.go index a3259a666c..f25d2b6674 100644 --- a/internal/cnpi/plugin/client/postgres.go +++ b/internal/cnpi/plugin/client/postgres.go @@ -34,6 +34,16 @@ func (data *data) EnrichConfiguration( cluster client.Object, config map[string]string, operationType postgresClient.OperationType_Type, +) (map[string]string, error) { + m, err := data.innerEnrichConfiguration(ctx, cluster, config, operationType) + return m, wrapAsPluginErrorIfNeeded(err) +} + +func (data *data) innerEnrichConfiguration( + ctx context.Context, + cluster client.Object, + config map[string]string, + operationType postgresClient.OperationType_Type, ) (map[string]string, error) { tempConfig := config diff --git a/internal/cnpi/plugin/client/postgres_test.go b/internal/cnpi/plugin/client/postgres_test.go index 80f426cd86..187320fcf4 100644 --- a/internal/cnpi/plugin/client/postgres_test.go +++ b/internal/cnpi/plugin/client/postgres_test.go @@ -21,7 +21,6 @@ package client import ( "context" - "errors" "github.com/cloudnative-pg/cnpg-i/pkg/postgres" "google.golang.org/grpc" @@ -119,7 +118,7 @@ var _ = Describe("EnrichConfiguration", func() { }) It("should return error when plugin returns error", func(ctx SpecContext) { - expectedErr := errors.New("plugin error") + expectedErr := newPluginError("plugin error") postgresClient := &fakePostgresClient{ enrichConfigError: expectedErr, diff --git a/internal/cnpi/plugin/client/reconciler.go b/internal/cnpi/plugin/client/reconciler.go index b234ef48fb..cc0eded125 100644 --- a/internal/cnpi/plugin/client/reconciler.go +++ b/internal/cnpi/plugin/client/reconciler.go @@ -61,7 +61,7 @@ func newReconcilerRequeueResult(identifier string, after int64) ReconcilerHookRe // newReconcilerErrorResult creates a new result from an error func newReconcilerErrorResult(identifier string, err error) ReconcilerHookResult { - return ReconcilerHookResult{Err: err, StopReconciliation: true, Identifier: identifier} + return ReconcilerHookResult{Err: wrapAsPluginErrorIfNeeded(err), StopReconciliation: true, Identifier: identifier} } func (data *data) PreReconcile(ctx context.Context, cluster client.Object, object client.Object) ReconcilerHookResult { diff --git a/internal/cnpi/plugin/client/restore_job.go b/internal/cnpi/plugin/client/restore_job.go index 86fc0751ae..2ea986d120 100644 --- a/internal/cnpi/plugin/client/restore_job.go +++ b/internal/cnpi/plugin/client/restore_job.go @@ -40,6 +40,14 @@ type gvkEnsurer interface { func (data *data) Restore( ctx context.Context, cluster gvkEnsurer, +) (*restore.RestoreResponse, error) { + r, err := data.innerRestore(ctx, cluster) + return r, wrapAsPluginErrorIfNeeded(err) +} + +func (data *data) innerRestore( + ctx context.Context, + cluster gvkEnsurer, ) (*restore.RestoreResponse, error) { cluster.EnsureGVKIsPresent() diff --git a/internal/cnpi/plugin/client/wal.go b/internal/cnpi/plugin/client/wal.go index e9d36b55b8..7e86ebd5be 100644 --- a/internal/cnpi/plugin/client/wal.go +++ b/internal/cnpi/plugin/client/wal.go @@ -35,6 +35,14 @@ func (data *data) ArchiveWAL( ctx context.Context, cluster client.Object, sourceFileName string, +) error { + return wrapAsPluginErrorIfNeeded(data.innerArchiveWAL(ctx, cluster, sourceFileName)) +} + +func (data *data) innerArchiveWAL( + ctx context.Context, + cluster client.Object, + sourceFileName string, ) error { contextLogger := log.FromContext(ctx) @@ -79,6 +87,16 @@ func (data *data) RestoreWAL( cluster client.Object, sourceWALName string, destinationFileName string, +) (bool, error) { + b, err := data.innerRestoreWAL(ctx, cluster, sourceWALName, destinationFileName) + return b, wrapAsPluginErrorIfNeeded(err) +} + +func (data *data) innerRestoreWAL( + ctx context.Context, + cluster client.Object, + sourceWALName string, + destinationFileName string, ) (bool, error) { var errorCollector error diff --git a/internal/controller/cluster_controller.go b/internal/controller/cluster_controller.go index 9cd46f5719..08556196b4 100644 --- a/internal/controller/cluster_controller.go +++ b/internal/controller/cluster_controller.go @@ -207,6 +207,15 @@ func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct ) } + if regErr := r.RegisterPhase( + ctx, + cluster, + apiv1.PhaseFailurePlugin, + fmt.Sprintf("Error while discovering plugins: %s", err.Error()), + ); regErr != nil { + contextLogger.Error(regErr, "unable to register phase", "outerErr", err.Error()) + } + contextLogger.Error(err, "Error loading plugins, retrying") return ctrl.Result{}, err } @@ -224,6 +233,21 @@ func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct if errors.Is(err, utils.ErrTerminateLoop) { return ctrl.Result{}, nil } + + // This code assumes that we always end the reconciliation loop if we encounter an error. + // In case that the assumption is false this code could overwrite an error phase. + if cnpgiClient.ContainsPluginError(err) { + if regErr := r.RegisterPhase( + ctx, + cluster, + apiv1.PhaseFailurePlugin, + fmt.Sprintf("Encountered an error while interacting with plugins: %s", err.Error()), + ); regErr != nil { + contextLogger.Error(regErr, "unable to register phase", "outerErr", err.Error()) + } + return ctrl.Result{RequeueAfter: 15 * time.Second}, nil + } + if err != nil { return ctrl.Result{}, err } From 978cd66f8d7c98e9fd77e90928150d4ff6d866dc Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Wed, 23 Jul 2025 14:22:44 +0200 Subject: [PATCH 720/836] feat(instance): add cnpgi `metrics` capabilities to the instance webserver (#8033) This commit introduces support for plugins to expose their metrics through the instance metrics web server. By doing so, the metrics collected or generated by a plugin become directly accessible alongside other operator metrics, improving observability and operational integration. Relates #7812 Signed-off-by: Armando Ruocco --- go.mod | 8 +- go.sum | 36 ++--- internal/cmd/manager/instance/run/cmd.go | 3 +- internal/cnpi/plugin/client/contracts.go | 1 + internal/cnpi/plugin/client/interfaces.go | 39 ----- internal/cnpi/plugin/client/metrics.go | 145 ++++++++++++++++++ internal/cnpi/plugin/client/postgres.go | 12 ++ internal/cnpi/plugin/client/suite_test.go | 17 ++ internal/cnpi/plugin/connection/connection.go | 43 ++++++ pkg/management/postgres/metrics/collector.go | 108 +++++++++++++ .../postgres/metrics/collector_test.go | 126 +++++++++++++++ .../webserver/metricserver/pg_collector.go | 26 +++- .../metricserver/pg_collector_test.go | 12 +- 13 files changed, 509 insertions(+), 67 deletions(-) delete mode 100644 internal/cnpi/plugin/client/interfaces.go create mode 100644 internal/cnpi/plugin/client/metrics.go diff --git a/go.mod b/go.mod index e40b7adf54..cf292975f1 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/blang/semver v3.5.1+incompatible github.com/cheynewallace/tabby v1.1.1 github.com/cloudnative-pg/barman-cloud v0.3.1 - github.com/cloudnative-pg/cnpg-i v0.2.2-0.20250609075931-7cb57628933b + github.com/cloudnative-pg/cnpg-i v0.2.2-0.20250723093238-963c368523c2 github.com/cloudnative-pg/machinery v0.3.0 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/evanphx/json-patch/v5 v5.9.11 @@ -36,7 +36,7 @@ require ( go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 golang.org/x/term v0.33.0 - google.golang.org/grpc v1.73.0 + google.golang.org/grpc v1.74.2 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.33.3 k8s.io/apiextensions-apiserver v0.33.3 @@ -100,14 +100,14 @@ require ( go.yaml.in/yaml/v3 v3.0.3 // indirect golang.org/x/crypto v0.39.0 // indirect golang.org/x/net v0.41.0 // indirect - golang.org/x/oauth2 v0.28.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect golang.org/x/sync v0.16.0 // indirect golang.org/x/sys v0.34.0 // indirect golang.org/x/text v0.27.0 // indirect golang.org/x/time v0.9.0 // indirect golang.org/x/tools v0.34.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a // indirect google.golang.org/protobuf v1.36.6 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index 669da485c8..30fc7f3c1d 100644 --- a/go.sum +++ b/go.sum @@ -20,8 +20,8 @@ github.com/cheynewallace/tabby v1.1.1 h1:JvUR8waht4Y0S3JF17G6Vhyt+FRhnqVCkk8l4Yr github.com/cheynewallace/tabby v1.1.1/go.mod h1:Pba/6cUL8uYqvOc9RkyvFbHGrQ9wShyrn6/S/1OYVys= github.com/cloudnative-pg/barman-cloud v0.3.1 h1:kzkY77k2lN/caoyh7ibXDSZjJeSJTNvnVt6Gfa8Iq5M= github.com/cloudnative-pg/barman-cloud v0.3.1/go.mod h1:4HL3AjY9oEl2Ed0HSkyvTZEQPhwyFOaAnuCz9lfVeYQ= -github.com/cloudnative-pg/cnpg-i v0.2.2-0.20250609075931-7cb57628933b h1:B7Ugp5epMIDNPe0bIOcqpErKkiQfuCM3nXoGh4GiPHM= -github.com/cloudnative-pg/cnpg-i v0.2.2-0.20250609075931-7cb57628933b/go.mod h1:FUA8ELMnqHpA2MIOeG425sX7D+u3m8SD/oFd1CnXSEw= +github.com/cloudnative-pg/cnpg-i v0.2.2-0.20250723093238-963c368523c2 h1:uLooqDE54OE0tBMdwHws1CwD3X4098K9oZyNt3xdQuE= +github.com/cloudnative-pg/cnpg-i v0.2.2-0.20250723093238-963c368523c2/go.mod h1:pJaTIy0d6Yd3CA554AHZD81CJM7/jiDNmk7BFTMb3Fk= github.com/cloudnative-pg/machinery v0.3.0 h1:t1DzXGeK3RUYXS5KWIdIk30oh4EmwxZ+6sWM4wJDBac= github.com/cloudnative-pg/machinery v0.3.0/go.mod h1:6NhajP3JlioeecYceVuOBLD2lfsJty8qSZsFpSb/vmA= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= @@ -198,16 +198,16 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= -go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= -go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= -go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= -go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY= -go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg= -go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o= -go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= -go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= -go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= +go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= +go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= +go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= +go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= +go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= +go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= +go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= +go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= +go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= +go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= @@ -235,8 +235,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= -golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc= -golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -269,10 +269,10 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 h1:e0AIkUUhxyBKh6ssZNrAMeqhA7RKUj42346d1y02i2g= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= -google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok= -google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a h1:v2PbRU4K3llS09c7zodFpNePeamkAwG3mPrAery9VeE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/grpc v1.74.2 h1:WoosgB65DlWVC9FqI82dGsZhWFNBSLjQ84bjROOpMu4= +google.golang.org/grpc v1.74.2/go.mod h1:CtQ+BGjaAIXHs/5YS3i473GqwBBa1zGQNevxdeBEXrM= google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/internal/cmd/manager/instance/run/cmd.go b/internal/cmd/manager/instance/run/cmd.go index 82b118f726..a34f9410f4 100644 --- a/internal/cmd/manager/instance/run/cmd.go +++ b/internal/cmd/manager/instance/run/cmd.go @@ -55,6 +55,7 @@ import ( "github.com/cloudnative-pg/cloudnative-pg/pkg/management" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/logpipe" + "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/metrics" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/webserver/metricserver" pg "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" @@ -218,7 +219,7 @@ func runSubCommand(ctx context.Context, instance *postgres.Instance) error { } defer pluginRepository.Close() - metricsExporter := metricserver.NewExporter(instance) + metricsExporter := metricserver.NewExporter(instance, metrics.NewPluginCollector(pluginRepository)) reconciler := controller.NewInstanceReconciler(instance, mgr.GetClient(), metricsExporter, pluginRepository) err = ctrl.NewControllerManagedBy(mgr). For(&apiv1.Cluster{}). diff --git a/internal/cnpi/plugin/client/contracts.go b/internal/cnpi/plugin/client/contracts.go index 22594ea65f..03bf7fe32d 100644 --- a/internal/cnpi/plugin/client/contracts.go +++ b/internal/cnpi/plugin/client/contracts.go @@ -42,6 +42,7 @@ type Client interface { BackupCapabilities RestoreJobHooksCapabilities PostgresConfigurationCapabilities + MetricsCapabilities } // SetPluginClientInContext records the plugin client in the given context diff --git a/internal/cnpi/plugin/client/interfaces.go b/internal/cnpi/plugin/client/interfaces.go deleted file mode 100644 index 162d341ec6..0000000000 --- a/internal/cnpi/plugin/client/interfaces.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright © contributors to CloudNativePG, established as -CloudNativePG a Series of LF Projects, LLC. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package client - -import ( - "context" - - "github.com/cloudnative-pg/cnpg-i/pkg/postgres" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// PostgresConfigurationCapabilities is the interface that defines the -// capabilities of interacting with PostgreSQL. -type PostgresConfigurationCapabilities interface { - // EnrichConfiguration is the method that enriches the PostgreSQL configuration - EnrichConfiguration( - ctx context.Context, - cluster client.Object, - config map[string]string, - operationType postgres.OperationType_Type, - ) (map[string]string, error) -} diff --git a/internal/cnpi/plugin/client/metrics.go b/internal/cnpi/plugin/client/metrics.go new file mode 100644 index 0000000000..765c221f6b --- /dev/null +++ b/internal/cnpi/plugin/client/metrics.go @@ -0,0 +1,145 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + "encoding/json" + "errors" + "slices" + + "github.com/cloudnative-pg/cnpg-i/pkg/metrics" + "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/prometheus/client_golang/prometheus" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// MetricsCapabilities defines the interface for plugins that can provide metrics capabilities. +type MetricsCapabilities interface { + // GetMetricsDefinitions retrieves the definitions of the metrics that will be collected from the plugins. + GetMetricsDefinitions(ctx context.Context, cluster client.Object) (PluginMetricDefinitions, error) + // CollectMetrics collects the metrics from the plugins. + CollectMetrics(ctx context.Context, cluster client.Object) ([]*metrics.CollectMetric, error) +} + +// PluginMetricDefinitions is a slice of PluginMetricDefinition, representing the metrics definitions returned +// by plugins. +type PluginMetricDefinitions []PluginMetricDefinition + +// Get returns the PluginMetricDefinition with the given fully qualified name (FqName), returns nil if not found. +func (p PluginMetricDefinitions) Get(fqName string) *PluginMetricDefinition { + for _, metric := range p { + if metric.FqName == fqName { + return &metric + } + } + + return nil +} + +// PluginMetricDefinition represents a metric definition returned by a plugin. +type PluginMetricDefinition struct { + FqName string + ValueType prometheus.ValueType + Desc *prometheus.Desc +} + +func (data *data) GetMetricsDefinitions( + ctx context.Context, + cluster client.Object, +) (PluginMetricDefinitions, error) { + contextLogger := log.FromContext(ctx).WithName("plugin_metrics_definitions") + + clusterDefinition, marshalErr := json.Marshal(cluster) + if marshalErr != nil { + return nil, marshalErr + } + + var results PluginMetricDefinitions + + for idx := range data.plugins { + plugin := data.plugins[idx] + if !slices.Contains(plugin.MetricsCapabilities(), metrics.MetricsCapability_RPC_TYPE_METRICS) { + contextLogger.Debug("skipping plugin", "plugin", plugin.Name()) + continue + } + + res, err := plugin.MetricsClient().Define(ctx, &metrics.DefineMetricsRequest{ClusterDefinition: clusterDefinition}) + if err != nil { + contextLogger.Error(err, "failed to get metrics definitions from plugin", "plugin", plugin.Name()) + return nil, err + } + if res == nil { + err := errors.New("plugin returned nil metrics definitions while having metrics capability") + contextLogger.Error(err, "while invoking metrics definitions", "plugin", plugin.Name()) + return nil, err + } + + contextLogger.Debug("plugin returned metrics definitions", "plugin", plugin.Name(), "metrics", res.Metrics) + for _, element := range res.Metrics { + desc := prometheus.NewDesc(element.FqName, element.Help, element.VariableLabels, element.ConstLabels) + results = append(results, PluginMetricDefinition{ + FqName: element.FqName, + Desc: desc, + ValueType: prometheus.ValueType(element.ValueType.Type), + }) + } + } + + return results, nil +} + +func (data *data) CollectMetrics( + ctx context.Context, + cluster client.Object, +) ([]*metrics.CollectMetric, error) { + contextLogger := log.FromContext(ctx).WithName("plugin_metrics_collect") + + clusterDefinition, marshalErr := json.Marshal(cluster) + if marshalErr != nil { + return nil, marshalErr + } + + var results []*metrics.CollectMetric + + for idx := range data.plugins { + plugin := data.plugins[idx] + if !slices.Contains(plugin.MetricsCapabilities(), metrics.MetricsCapability_RPC_TYPE_METRICS) { + contextLogger.Debug("skipping plugin", "plugin", plugin.Name()) + continue + } + + res, err := plugin.MetricsClient().Collect(ctx, &metrics.CollectMetricsRequest{ClusterDefinition: clusterDefinition}) + if err != nil { + contextLogger.Error(err, "failed to collect metrics from plugin", "plugin", plugin.Name()) + return nil, err + } + if res == nil { + err := errors.New("plugin returned nil metrics while having metrics capability") + contextLogger.Error(err, "while invoking metrics collection", "plugin", plugin.Name()) + return nil, err + } + + contextLogger.Debug("plugin returned metrics", "plugin", plugin.Name(), "metrics", res.Metrics) + results = append(results, res.Metrics...) + } + + return results, nil +} diff --git a/internal/cnpi/plugin/client/postgres.go b/internal/cnpi/plugin/client/postgres.go index f25d2b6674..4679cfc4ba 100644 --- a/internal/cnpi/plugin/client/postgres.go +++ b/internal/cnpi/plugin/client/postgres.go @@ -29,6 +29,18 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) +// PostgresConfigurationCapabilities is the interface that defines the +// capabilities of interacting with PostgreSQL. +type PostgresConfigurationCapabilities interface { + // EnrichConfiguration is the method that enriches the PostgreSQL configuration + EnrichConfiguration( + ctx context.Context, + cluster client.Object, + config map[string]string, + operationType postgresClient.OperationType_Type, + ) (map[string]string, error) +} + func (data *data) EnrichConfiguration( ctx context.Context, cluster client.Object, diff --git a/internal/cnpi/plugin/client/suite_test.go b/internal/cnpi/plugin/client/suite_test.go index 6773c09b15..bb6f18a801 100644 --- a/internal/cnpi/plugin/client/suite_test.go +++ b/internal/cnpi/plugin/client/suite_test.go @@ -26,6 +26,7 @@ import ( "github.com/cloudnative-pg/cnpg-i/pkg/backup" "github.com/cloudnative-pg/cnpg-i/pkg/identity" "github.com/cloudnative-pg/cnpg-i/pkg/lifecycle" + "github.com/cloudnative-pg/cnpg-i/pkg/metrics" "github.com/cloudnative-pg/cnpg-i/pkg/operator" postgresClient "github.com/cloudnative-pg/cnpg-i/pkg/postgres" "github.com/cloudnative-pg/cnpg-i/pkg/reconciler" @@ -109,6 +110,22 @@ type fakeConnection struct { operatorClient *fakeOperatorClient } +func (f *fakeConnection) MetricsClient() metrics.MetricsClient { + panic("implement me") +} + +func (f *fakeConnection) MetricsCapabilities() []metrics.MetricsCapability_RPC_Type { + panic("implement me") +} + +func (f *fakeConnection) GetMetricsDefinitions(context.Context, k8client.Object) (PluginMetricDefinitions, error) { + panic("implement me") +} + +func (f *fakeConnection) CollectMetrics(context.Context, k8client.Object) ([]*metrics.CollectMetric, error) { + panic("implement me") +} + func (f *fakeConnection) PostgresClient() postgresClient.PostgresClient { panic("implement me") } diff --git a/internal/cnpi/plugin/connection/connection.go b/internal/cnpi/plugin/connection/connection.go index 44a096d409..f03dcab1d5 100644 --- a/internal/cnpi/plugin/connection/connection.go +++ b/internal/cnpi/plugin/connection/connection.go @@ -29,6 +29,7 @@ import ( "github.com/cloudnative-pg/cnpg-i/pkg/backup" "github.com/cloudnative-pg/cnpg-i/pkg/identity" "github.com/cloudnative-pg/cnpg-i/pkg/lifecycle" + "github.com/cloudnative-pg/cnpg-i/pkg/metrics" "github.com/cloudnative-pg/cnpg-i/pkg/operator" postgresClient "github.com/cloudnative-pg/cnpg-i/pkg/postgres" "github.com/cloudnative-pg/cnpg-i/pkg/reconciler" @@ -65,6 +66,7 @@ type Interface interface { ReconcilerHooksClient() reconciler.ReconcilerHooksClient RestoreJobHooksClient() restore.RestoreJobHooksClient PostgresClient() postgresClient.PostgresClient + MetricsClient() metrics.MetricsClient PluginCapabilities() []identity.PluginCapability_Service_Type OperatorCapabilities() []operator.OperatorCapability_RPC_Type @@ -74,6 +76,7 @@ type Interface interface { ReconcilerCapabilities() []reconciler.ReconcilerHooksCapability_Kind RestoreJobHooksCapabilities() []restore.RestoreJobHooksCapability_Kind PostgresCapabilities() []postgresClient.PostgresCapability_RPC_Type + MetricsCapabilities() []metrics.MetricsCapability_RPC_Type Ping(ctx context.Context) error Close() error @@ -89,6 +92,7 @@ type data struct { reconcilerHooksClient reconciler.ReconcilerHooksClient restoreJobHooksClient restore.RestoreJobHooksClient postgresClient postgresClient.PostgresClient + metricsClient metrics.MetricsClient name string version string @@ -100,6 +104,7 @@ type data struct { reconcilerCapabilities []reconciler.ReconcilerHooksCapability_Kind restoreJobHooksCapabilities []restore.RestoreJobHooksCapability_Kind postgresCapabilities []postgresClient.PostgresCapability_RPC_Type + metricsCapabilities []metrics.MetricsCapability_RPC_Type } func newPluginDataFromConnection(ctx context.Context, connection Handler) (data, error) { @@ -128,6 +133,7 @@ func newPluginDataFromConnection(ctx context.Context, connection Handler) (data, reconcilerHooksClient: reconciler.NewReconcilerHooksClient(connection), restoreJobHooksClient: restore.NewRestoreJobHooksClient(connection), postgresClient: postgresClient.NewPostgresClient(connection), + metricsClient: metrics.NewMetricsClient(connection), } return result, err @@ -291,6 +297,27 @@ func (pluginData *data) loadPostgresCapabilities(ctx context.Context) error { return nil } +func (pluginData *data) loadMetricsCapabilities(ctx context.Context) error { + var metricsCapabilitiesResponse *metrics.MetricsCapabilitiesResult + var err error + + if metricsCapabilitiesResponse, err = pluginData.metricsClient.GetCapabilities( + ctx, + &metrics.MetricsCapabilitiesRequest{}, + ); err != nil { + return fmt.Errorf("while querying plugin metrics capabilities: %w", err) + } + + pluginData.metricsCapabilities = make( + []metrics.MetricsCapability_RPC_Type, + len(metricsCapabilitiesResponse.Capabilities)) + for i := range pluginData.metricsCapabilities { + pluginData.metricsCapabilities[i] = metricsCapabilitiesResponse.Capabilities[i].GetRpc().Type + } + + return nil +} + // Metadata extracts the plugin metadata reading from // the internal metadata func (pluginData *data) Metadata() Metadata { @@ -365,6 +392,10 @@ func (pluginData *data) PostgresClient() postgresClient.PostgresClient { return pluginData.postgresClient } +func (pluginData *data) MetricsClient() metrics.MetricsClient { + return pluginData.metricsClient +} + func (pluginData *data) PluginCapabilities() []identity.PluginCapability_Service_Type { return pluginData.capabilities } @@ -397,6 +428,10 @@ func (pluginData *data) PostgresCapabilities() []postgresClient.PostgresCapabili return pluginData.postgresCapabilities } +func (pluginData *data) MetricsCapabilities() []metrics.MetricsCapability_RPC_Type { + return pluginData.metricsCapabilities +} + func (pluginData *data) Ping(ctx context.Context) error { _, err := pluginData.identityClient.Probe(ctx, &identity.ProbeRequest{}) return err @@ -471,5 +506,13 @@ func LoadPlugin(ctx context.Context, handler Handler) (Interface, error) { } } + // If the plugin implements the metrics service, load its + // capabilities + if slices.Contains(result.capabilities, identity.PluginCapability_Service_TYPE_METRICS) { + if err = result.loadMetricsCapabilities(ctx); err != nil { + return nil, err + } + } + return &result, nil } diff --git a/pkg/management/postgres/metrics/collector.go b/pkg/management/postgres/metrics/collector.go index e6910f2282..fe4ee6c4a1 100644 --- a/pkg/management/postgres/metrics/collector.go +++ b/pkg/management/postgres/metrics/collector.go @@ -27,11 +27,16 @@ import ( "fmt" "path" "regexp" + "time" "github.com/blang/semver" + "github.com/cloudnative-pg/cnpg-i/pkg/metrics" "github.com/cloudnative-pg/machinery/pkg/log" "github.com/prometheus/client_golang/prometheus" + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + pluginClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client" + "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/repository" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/metrics/histogram" postgresutils "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/utils" @@ -546,3 +551,106 @@ func (c QueryCollector) collectHistogramMetric( } ch <- metric } + +// PluginCollector is the interface for collecting metrics from plugins +type PluginCollector interface { + // Collect collects the metrics from the plugins + Collect(ctx context.Context, ch chan<- prometheus.Metric, cluster *apiv1.Cluster) error + // Describe describes the metrics from the plugins + Describe(ctx context.Context, ch chan<- *prometheus.Desc, cluster *apiv1.Cluster) +} + +type pluginCollector struct { + pluginRepository repository.Interface +} + +// NewPluginCollector creates a new PluginCollector that collects metrics from plugins +func NewPluginCollector( + pluginRepository repository.Interface, +) PluginCollector { + return &pluginCollector{pluginRepository: pluginRepository} +} + +func (p *pluginCollector) Describe(ctx context.Context, ch chan<- *prometheus.Desc, cluster *apiv1.Cluster) { + contextLogger := log.FromContext(ctx).WithName("plugin_metrics_describe") + + if len(cluster.GetInstanceEnabledPluginNames()) == 0 { + contextLogger.Trace("No plugins enabled for metrics collection") + return + } + + cli, err := p.getClient(ctx, cluster) + if err != nil { + contextLogger.Error(err, "failed to get plugin client") + return + } + defer cli.Close(ctx) + + pluginsMetrics, err := cli.GetMetricsDefinitions(ctx, cluster) + if err != nil { + contextLogger.Error(err, "failed to get plugin metrics") + return + } + + for _, metric := range pluginsMetrics { + ch <- metric.Desc + } +} + +func (p *pluginCollector) Collect(ctx context.Context, ch chan<- prometheus.Metric, cluster *apiv1.Cluster) error { + contextLogger := log.FromContext(ctx).WithName("plugin_metrics_collect") + + if len(cluster.GetInstanceEnabledPluginNames()) == 0 { + contextLogger.Trace("No plugins enabled for metrics collection") + return nil + } + + cli, err := p.getClient(ctx, cluster) + if err != nil { + return fmt.Errorf("failed to get plugin client: %w", err) + } + defer cli.Close(ctx) + + definitions, err := cli.GetMetricsDefinitions(ctx, cluster) + if err != nil { + return fmt.Errorf("failed to get plugin metrics during collect: %w", err) + } + + res, err := cli.CollectMetrics(ctx, cluster) + if err != nil { + return fmt.Errorf("failed to collect metrics from plugins: %w", err) + } + + return sendPluginMetrics(definitions, res, ch) +} + +func sendPluginMetrics( + definitions pluginClient.PluginMetricDefinitions, + metrics []*metrics.CollectMetric, + ch chan<- prometheus.Metric, +) error { + for _, metric := range metrics { + definition := definitions.Get(metric.FqName) + if definition == nil { + return fmt.Errorf("metric definition not found for fqName: %s", metric.FqName) + } + + m, err := prometheus.NewConstMetric(definition.Desc, definition.ValueType, metric.Value, metric.VariableLabels...) + if err != nil { + return fmt.Errorf("failed to create metric %s: %w", metric.FqName, err) + } + ch <- m + } + return nil +} + +func (p *pluginCollector) getClient(ctx context.Context, cluster *apiv1.Cluster) (pluginClient.Client, error) { + pluginLoadingContext, cancelPluginLoading := context.WithTimeout(ctx, 5*time.Second) + defer cancelPluginLoading() + + return pluginClient.WithPlugins( + pluginLoadingContext, + p.pluginRepository, + cluster.GetInstanceEnabledPluginNames()..., + ) +} diff --git a/pkg/management/postgres/metrics/collector_test.go b/pkg/management/postgres/metrics/collector_test.go index 0a29fb7a48..acc2a71ab7 100644 --- a/pkg/management/postgres/metrics/collector_test.go +++ b/pkg/management/postgres/metrics/collector_test.go @@ -20,8 +20,11 @@ SPDX-License-Identifier: Apache-2.0 package metrics import ( + "github.com/cloudnative-pg/cnpg-i/pkg/metrics" "github.com/prometheus/client_golang/prometheus" + pluginClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) @@ -150,3 +153,126 @@ var _ = Describe("QueryCollector tests", func() { }) }) }) + +var _ = Describe("sendPluginMetrics tests", func() { + It("should successfully send metrics when definitions and metrics match", func() { + ch := make(chan prometheus.Metric, 10) + desc := prometheus.NewDesc("test_metric", "test description", []string{"label1"}, nil) + definitions := pluginClient.PluginMetricDefinitions{ + pluginClient.PluginMetricDefinition{ + FqName: "test_metric", + Desc: desc, + ValueType: prometheus.CounterValue, + }, + } + + testMetrics := []*metrics.CollectMetric{ + { + FqName: "test_metric", + Value: 42.0, + VariableLabels: []string{"value1"}, + }, + } + + err := sendPluginMetrics(definitions, testMetrics, ch) + + Expect(err).ToNot(HaveOccurred()) + Expect(ch).To(HaveLen(1)) + + // Verify the metric was sent + metric := <-ch + Expect(metric.Desc()).To(Equal(desc)) + }) + + It("should return error when metric definition is not found", func() { + ch := make(chan prometheus.Metric, 10) + definitions := pluginClient.PluginMetricDefinitions{} + testMetrics := []*metrics.CollectMetric{ + { + FqName: "missing_metric", + Value: 42.0, + VariableLabels: []string{"value1"}, + }, + } + + err := sendPluginMetrics(definitions, testMetrics, ch) + + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("metric definition not found for fqName: missing_metric")) + Expect(ch).To(BeEmpty()) + }) + + It("should return error when prometheus metric creation fails", func() { + ch := make(chan prometheus.Metric, 10) + desc := prometheus.NewDesc("test_metric", "test description", []string{"label1", "label2"}, nil) + definitions := pluginClient.PluginMetricDefinitions{ + pluginClient.PluginMetricDefinition{ + FqName: "test_metric", + Desc: desc, + ValueType: prometheus.CounterValue, + }, + } + + // Create metric with wrong number of labels (should cause NewConstMetric to fail) + testMetrics := []*metrics.CollectMetric{ + { + FqName: "test_metric", + Value: 42.0, + VariableLabels: []string{"value1"}, // Only one label, but desc expects two + }, + } + + err := sendPluginMetrics(definitions, testMetrics, ch) + + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to create metric test_metric")) + Expect(ch).To(BeEmpty()) + }) + + It("should handle multiple metrics successfully", func() { + ch := make(chan prometheus.Metric, 10) + desc1 := prometheus.NewDesc("metric_one", "first metric", []string{"label1"}, nil) + desc2 := prometheus.NewDesc("metric_two", "second metric", []string{"label2"}, nil) + definitions := pluginClient.PluginMetricDefinitions{ + pluginClient.PluginMetricDefinition{ + FqName: "metric_one", + Desc: desc1, + ValueType: prometheus.CounterValue, + }, + pluginClient.PluginMetricDefinition{ + FqName: "metric_two", + Desc: desc2, + ValueType: prometheus.GaugeValue, + }, + } + + testMetrics := []*metrics.CollectMetric{ + { + FqName: "metric_one", + Value: 10.0, + VariableLabels: []string{"value1"}, + }, + { + FqName: "metric_two", + Value: 20.0, + VariableLabels: []string{"value2"}, + }, + } + + err := sendPluginMetrics(definitions, testMetrics, ch) + + Expect(err).ToNot(HaveOccurred()) + Expect(ch).To(HaveLen(2)) + }) + + It("should handle empty metrics slice", func() { + ch := make(chan prometheus.Metric, 10) + definitions := pluginClient.PluginMetricDefinitions{} + var testMetrics []*metrics.CollectMetric + + err := sendPluginMetrics(definitions, testMetrics, ch) + + Expect(err).ToNot(HaveOccurred()) + Expect(ch).To(BeEmpty()) + }) +}) diff --git a/pkg/management/postgres/webserver/metricserver/pg_collector.go b/pkg/management/postgres/webserver/metricserver/pg_collector.go index 3950770992..876b26bd06 100644 --- a/pkg/management/postgres/webserver/metricserver/pg_collector.go +++ b/pkg/management/postgres/webserver/metricserver/pg_collector.go @@ -20,6 +20,7 @@ SPDX-License-Identifier: Apache-2.0 package metricserver import ( + "context" "database/sql" "errors" "fmt" @@ -54,6 +55,9 @@ type Exporter struct { // - to ensure we are able to unit test // - to make the struct adhere to the composition pattern instead of hardcoding dependencies inside the functions getCluster func() (*apiv1.Cluster, error) + + // pluginCollector is used to collect metrics from plugins + pluginCollector m.PluginCollector } // metrics here are related to the exporter itself, which is instrumented to @@ -91,11 +95,13 @@ type PgStatWalMetrics struct { } // NewExporter creates an exporter -func NewExporter(instance *postgres.Instance) *Exporter { +func NewExporter(instance *postgres.Instance, pluginCollector m.PluginCollector) *Exporter { + clusterGetter := local.NewClient().Cache().GetCluster return &Exporter{ - instance: instance, - Metrics: newMetrics(), - getCluster: local.NewClient().Cache().GetCluster, + instance: instance, + Metrics: newMetrics(), + getCluster: clusterGetter, + pluginCollector: pluginCollector, } } @@ -303,6 +309,10 @@ func (e *Exporter) Describe(ch chan<- *prometheus.Desc) { e.Metrics.PgStatWalMetrics.WalSyncTime.Describe(ch) } } + + if cluster, _ := e.getCluster(); cluster != nil { + e.pluginCollector.Describe(context.Background(), ch, cluster) + } } // Collect implements prometheus.Collector, collecting the Metrics values to @@ -339,6 +349,14 @@ func (e *Exporter) Collect(ch chan<- prometheus.Metric) { e.Metrics.PgStatWalMetrics.WalSyncTime.Collect(ch) } } + + if cluster, _ := e.getCluster(); cluster != nil { + if err := e.pluginCollector.Collect(context.Background(), ch, cluster); err != nil { + log.Error(err, "error while collecting plugin metrics") + e.Metrics.Error.Set(1) + e.Metrics.PgCollectionErrors.WithLabelValues("Collect.PluginMetrics").Inc() + } + } } func (e *Exporter) collectPgMetrics(ch chan<- prometheus.Metric) { diff --git a/pkg/management/postgres/webserver/metricserver/pg_collector_test.go b/pkg/management/postgres/webserver/metricserver/pg_collector_test.go index 6bb9c5cb48..dd5c24eef9 100644 --- a/pkg/management/postgres/webserver/metricserver/pg_collector_test.go +++ b/pkg/management/postgres/webserver/metricserver/pg_collector_test.go @@ -20,6 +20,7 @@ SPDX-License-Identifier: Apache-2.0 package metricserver import ( + "context" "fmt" "time" @@ -36,13 +37,22 @@ import ( . "github.com/onsi/gomega" ) +type fakePluginCollector struct{} + +func (f fakePluginCollector) Collect(context.Context, chan<- prometheus.Metric, *apiv1.Cluster) error { + return nil +} + +func (f fakePluginCollector) Describe(context.Context, chan<- *prometheus.Desc, *apiv1.Cluster) { +} + var _ = Describe("test metrics parsing", func() { var exporter *Exporter BeforeEach(func() { cache.Delete(cache.ClusterKey) instance := postgres.NewInstance() - exporter = NewExporter(instance) + exporter = NewExporter(instance, fakePluginCollector{}) }) It("fails if there's no cluster in the cache", func() { From b2ea2fb0c3bf269e4ee97751f11cfc215efc250f Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 23 Jul 2025 16:17:11 +0200 Subject: [PATCH 721/836] fix(deps): update module github.com/onsi/gomega to v1.38.0 (main) (#8047) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index cf292975f1..9b0eefa646 100644 --- a/go.mod +++ b/go.mod @@ -24,7 +24,7 @@ require ( github.com/logrusorgru/aurora/v4 v4.0.0 github.com/mitchellh/go-ps v1.0.0 github.com/onsi/ginkgo/v2 v2.23.4 - github.com/onsi/gomega v1.37.0 + github.com/onsi/gomega v1.38.0 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.84.0 github.com/prometheus/client_golang v1.22.0 github.com/robfig/cron v1.2.0 diff --git a/go.sum b/go.sum index 30fc7f3c1d..1ea508d7a8 100644 --- a/go.sum +++ b/go.sum @@ -145,8 +145,8 @@ github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus= github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8= -github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y= -github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= +github.com/onsi/gomega v1.38.0 h1:c/WX+w8SLAinvuKKQFh77WEucCnPk4j2OTUr7lt7BeY= +github.com/onsi/gomega v1.38.0/go.mod h1:OcXcwId0b9QsE7Y49u+BTrL4IdKOBOKnD6VQNTJEB6o= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= From 7e2ea8bb10309e01899ce044f37016cf3982179b Mon Sep 17 00:00:00 2001 From: Zekiye Aydemir Date: Wed, 23 Jul 2025 18:19:27 +0300 Subject: [PATCH 722/836] docs: add guide on using Cilium to secure CloudNativePG traffic (#7703) This contribution adds a new documentation section on securing CloudNativePG-managed PostgreSQL clusters using Cilium, a CNCF Sandbox project that provides eBPF-powered networking and security. It includes an introduction to Cilium, an example CiliumNetworkPolicy manifest to restrict access to PostgreSQL. Closes #7691 Signed-off-by: Zekiye Aydemir Signed-off-by: Jonathan Gonzalez V. Signed-off-by: Jonathan Battiato Co-authored-by: Jonathan Gonzalez V. Co-authored-by: Jonathan Battiato --- .wordlist-en-custom.txt | 4 + docs/mkdocs.yml | 1 + docs/src/cncf-projects/cilium.md | 210 +++++++++++++++++++++++++++++++ 3 files changed, 215 insertions(+) create mode 100644 docs/src/cncf-projects/cilium.md diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index 4f4ae02071..b8f7d56809 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -75,6 +75,7 @@ Ceph CertificatesConfiguration CertificatesStatus Certmanager +CiliumNetworkPolicy ClassName ClientCASecret ClientCertsCASecret @@ -209,6 +210,7 @@ Innocenti InstanceID InstanceReportedState IsolationCheckConfiguration +Isovalent Istio Istio's JSON @@ -268,6 +270,7 @@ NOCREATEROLE NOSUPERUSER Namespaces Nenciarini +NetworkPolicy Niccolò NodeAffinity NodeMaintenanceWindow @@ -783,6 +786,7 @@ downtimes dvcmQ dwm dx +eBPF ecdsa edb eks diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index fe3de716f7..bdc8413311 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -75,6 +75,7 @@ nav: - release_notes.md - CNCF Projects Integrations: - cncf-projects/external-secrets.md + - cncf-projects/cilium.md - Appendixes: - appendixes/backup_volumesnapshot.md - appendixes/backup_barmanobjectstore.md diff --git a/docs/src/cncf-projects/cilium.md b/docs/src/cncf-projects/cilium.md new file mode 100644 index 0000000000..6f1933ec92 --- /dev/null +++ b/docs/src/cncf-projects/cilium.md @@ -0,0 +1,210 @@ +# Cilium + +## About + +[Cilium](https://cilium.io/) is a CNCF Graduated project that was accepted as an Incubating project in 2021 and graduated in 2023 under +the sponsorship of Isovalent. It is an advanced networking, security, and observability solution for cloud-native +environments, built on top of eBPF (Extended Berkeley Packet Filter) technology. Cilium manages network traffic in +Kubernetes clusters by dynamically injecting eBPF programs into the Linux Kernel, enabling low-latency, +high-performance communication and enforcing fine-grained security policies. + +Key features of Cilium: + +- Advanced L3-L7 security policies for fine-grained network traffic control +- Efficient, kernel-level traffic management via eBPF +- Service Mesh integration (Cilium Service Mesh) +- Support for both NetworkPolicy and CiliumNetworkPolicy +- Built-in observability and monitoring with Hubble + +To install Cilium in your environment, follow the instructions in the documentation: +[https://docs.cilium.io/en/stable/gettingstarted/k8s-install-default/](https://docs.cilium.io/en/stable/gettingstarted/k8s-install-default/) + +## Pod-to-Pod Network Security with CloudNativePG and Cilium + +Kubernetes’ default behavior is to allow traffic between any two Pods in the cluster network. +Cilium provides advanced L3/L4 network security using the `CiliumNetworkPolicy` resource. This +enables fine-grained control over network traffic between Pods within a Kubernetes cluster. It is +especially useful for securing communication between application workloads and backend +services. + +In the following examples, we demonstrate how Cilium can be used to secure a CloudNativePG PostgreSQL instance by +restricting ingress traffic to only authorized Pods. + +!!! Important + Before proceeding, ensure that the `cluster-example` Postgres cluster is up and running in your environment. + +## Making Cilium Network Policies work with CloudNativePG Operator + +When working with a network policy, Cilium or not, the first step is to make sure that the operator can reach the Pods +in the target namespace. This is important because the operator needs to be able to perform checks and actions on the +Pods, and one of those actions requires to access the port `8000` on the Pods to get the current status of the PostgreSQL +instance running inside. + +The following `CiliumNetworkPolicy` allows the operator to access the Pods in the target `default` namespace + +```yaml +apiVersion: cilium.io/v2 +kind: CiliumNetworkPolicy +metadata: + name: cnpg-operator-policy + namespace: default +spec: + description: "Allow CloudNativePG operator access to any pod in the target namespace" + endpointSelector: {} + ingress: + - fromEndpoints: + - matchLabels: + io.kubernetes.pod.namespace: cnpg-system + toPorts: + - ports: + - port: "8000" + protocol: TCP +``` +!!! Important + The `cnpg-system` namespace is the default namespace for the operator when using the YAML manifests, if the operator + was installed using a different process(Helm, OLM, etc.), the namespace may be different. Make sure to adjust the + namespace properly. + +## Allowing access between cluster Pods + +Since the default policy is "deny all", we need to explicitly allow access between the cluster Pods in the same namespace. +We will improve our previous policy by adding the required ingress rule: + +```yaml +apiVersion: cilium.io/v2 +kind: CiliumNetworkPolicy +metadata: + name: cnpg-policy + namespace: default +spec: + description: "Allow CloudNativePG operator access and connection between pods in the same namespace" + endpointSelector: {} + ingress: + - fromEndpoints: + - matchLabels: + io.kubernetes.pod.namespace: cnpg-system + - matchLabels: + io.kubernetes.pod.namespace: default + cnpg.io/cluster: cluster-example + toPorts: + - ports: + - port: "8000" + protocol: TCP + - port: "5432" + protocol: TCP +``` + +The policy allows access from `cnpg-system` Pods and from `default` namespace Pods that also belong to `cluster-example`. +The `matchLabels` selector requires Pods to have the complete set of listed labels. Missing even one label means the Pod +will not match. + +## Restricting Access to PostgreSQL with Cilium + +In this example, we define a `CiliumNetworkPolicy` that allows only Pods labeled `role=backend` in the `default` namespace +to connect to a PostgreSQL cluster named `cluster-example`. All other ingress traffic is blocked by default. + +```yaml +apiVersion: cilium.io/v2 +kind: CiliumNetworkPolicy +metadata: + name: postgres-policy + namespace: default +spec: + description: "Allow PostgreSQL access on port 5432 from Pods with role=backend" + endpointSelector: + matchLabels: + cnpg.io/cluster: cluster-example + ingress: + - fromEndpoints: + - matchLabels: + role: backend + toPorts: + - ports: + - port: "5432" + protocol: TCP +``` + +This `CiliumNetworkPolicy` ensures that only Pods labeled with `role=backend` can access the +PostgreSQL instance managed by CloudNativePG via port 5432 in the `default` namespace. + +In the following policy, we demonstrate how to allow ingress traffic to port 5432 of a PostgreSQL cluster named +`cluster-example`, only from Pods with the label `role=backend` in any namespace. + +```yaml +apiVersion: cilium.io/v2 +kind: CiliumNetworkPolicy +metadata: + name: postgres-policy + namespace: default +spec: + description: "Allow PostgreSQL access on port 5432 from Pods with role=backend in any namespace" + endpointSelector: + matchLabels: + cnpg.io/cluster: cluster-example + ingress: + - fromEndpoints: + - matchLabels: + role: backend + matchExpressions: + - key: io.kubernetes.pod.namespace + operator: Exists + toPorts: + - ports: + - port: "5432" + protocol: TCP +``` + +The following example allows ingress traffic to port 5432 of the `cluster-example` cluster (located in the +`default` namespace) from any Pods in the `backend` namespace. + +```yaml +apiVersion: cilium.io/v2 +kind: CiliumNetworkPolicy +metadata: + name: postgres-policy + namespace: default +spec: + description: "Allow PostgreSQL access on port 5432 from any Pods in the backend namespace" + endpointSelector: + matchLabels: + cnpg.io/cluster: cluster-example + ingress: + - fromEndpoints: + - matchLabels: + io.kubernetes.pod.namespace: backend + toPorts: + - ports: + - port: "5432" + protocol: TCP +``` + +Using Cilium’s L3/L4 policy model, we define a `CiliumNetworkPolicy` that explicitly allows ingress +traffic to cluster Pods only from application Pods in the `backend` namespace. All other +traffic is implicitly denied unless explicitly permitted by additional policies. + +The following example allows ingress traffic to port 5432 of the `cluster-example` cluster (located in the +`default` namespace) from any source within the Kubernetes cluster. + +```yaml +apiVersion: cilium.io/v2 +kind: CiliumNetworkPolicy +metadata: + name: postgres-policy + namespace: default +spec: + description: "Allow ingress traffic to port 5432 of the cluster-example from any pods within the Kubernetes cluster" + endpointSelector: + matchLabels: + cnpg.io/cluster: cluster-example + ingress: + - fromEntities: + - cluster + toPorts: + - ports: + - port: "5432" + protocol: TCP +``` + +You may consider using [editor.networkpolicy.io](https://editor.networkpolicy.io/), a visual and interactive tool that simplifies the creation and +validation of Cilium Network Policies. It’s especially helpful for avoiding misconfigurations and understanding traffic +rules more clearly by presenting in a visual way. From cdda88f557bcee6da4a61d6114c0834c3ef4d761 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Wed, 23 Jul 2025 17:44:06 +0200 Subject: [PATCH 723/836] fix(validation): consider HugePages validating shared_buffers (#7864) Closes #7862 Signed-off-by: Marco Nenciarini --- internal/webhook/v1/cluster_webhook.go | 96 ++++++++++++++++----- internal/webhook/v1/cluster_webhook_test.go | 48 +++++++++++ 2 files changed, 123 insertions(+), 21 deletions(-) diff --git a/internal/webhook/v1/cluster_webhook.go b/internal/webhook/v1/cluster_webhook.go index ca149dc0cf..e9fd767866 100644 --- a/internal/webhook/v1/cluster_webhook.go +++ b/internal/webhook/v1/cluster_webhook.go @@ -859,45 +859,55 @@ func (v *ClusterCustomValidator) validateImagePullPolicy(r *apiv1.Cluster) field func (v *ClusterCustomValidator) validateResources(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList - cpuRequest := r.Spec.Resources.Requests.Cpu() + cpuRequests := r.Spec.Resources.Requests.Cpu() cpuLimits := r.Spec.Resources.Limits.Cpu() - if !cpuRequest.IsZero() && !cpuLimits.IsZero() { - cpuRequestGtThanLimit := cpuRequest.Cmp(*cpuLimits) > 0 + if !cpuRequests.IsZero() && !cpuLimits.IsZero() { + cpuRequestGtThanLimit := cpuRequests.Cmp(*cpuLimits) > 0 if cpuRequestGtThanLimit { result = append(result, field.Invalid( field.NewPath("spec", "resources", "requests", "cpu"), - cpuRequest.String(), + cpuRequests.String(), "CPU request is greater than the limit", )) } } - memoryRequest := r.Spec.Resources.Requests.Memory() - rawSharedBuffer := r.Spec.PostgresConfiguration.Parameters[sharedBuffersParameter] - if !memoryRequest.IsZero() && rawSharedBuffer != "" { - if sharedBuffers, err := parsePostgresQuantityValue(rawSharedBuffer); err == nil { - if memoryRequest.Cmp(sharedBuffers) < 0 { - result = append(result, field.Invalid( - field.NewPath("spec", "resources", "requests", "memory"), - memoryRequest.String(), - "Memory request is lower than PostgreSQL `shared_buffers` value", - )) - } - } - } - + memoryRequests := r.Spec.Resources.Requests.Memory() memoryLimits := r.Spec.Resources.Limits.Memory() - if !memoryRequest.IsZero() && !memoryLimits.IsZero() { - memoryRequestGtThanLimit := memoryRequest.Cmp(*memoryLimits) > 0 + if !memoryRequests.IsZero() && !memoryLimits.IsZero() { + memoryRequestGtThanLimit := memoryRequests.Cmp(*memoryLimits) > 0 if memoryRequestGtThanLimit { result = append(result, field.Invalid( field.NewPath("spec", "resources", "requests", "memory"), - memoryRequest.String(), + memoryRequests.String(), "Memory request is greater than the limit", )) } } + hugePages, hugePagesErrors := validateHugePagesResources(r) + result = append(result, hugePagesErrors...) + if cpuRequests.IsZero() && cpuLimits.IsZero() && memoryRequests.IsZero() && memoryLimits.IsZero() && + len(hugePages) > 0 { + result = append(result, field.Forbidden( + field.NewPath("spec", "resources"), + "HugePages require cpu or memory", + )) + } + + rawSharedBuffer := r.Spec.PostgresConfiguration.Parameters[sharedBuffersParameter] + if rawSharedBuffer != "" { + if sharedBuffers, err := parsePostgresQuantityValue(rawSharedBuffer); err == nil { + if !hasEnoughMemoryForSharedBuffers(sharedBuffers, memoryRequests, hugePages) { + result = append(result, field.Invalid( + field.NewPath("spec", "resources", "requests"), + memoryRequests.String(), + "Memory request is lower than PostgreSQL `shared_buffers` value", + )) + } + } + } + ephemeralStorageRequest := r.Spec.Resources.Requests.StorageEphemeral() ephemeralStorageLimits := r.Spec.Resources.Limits.StorageEphemeral() if !ephemeralStorageRequest.IsZero() && !ephemeralStorageLimits.IsZero() { @@ -914,6 +924,50 @@ func (v *ClusterCustomValidator) validateResources(r *apiv1.Cluster) field.Error return result } +func validateHugePagesResources(r *apiv1.Cluster) (map[corev1.ResourceName]resource.Quantity, field.ErrorList) { + var result field.ErrorList + hugepages := make(map[corev1.ResourceName]resource.Quantity) + for name, quantity := range r.Spec.Resources.Limits { + if strings.HasPrefix(string(name), corev1.ResourceHugePagesPrefix) { + hugepages[name] = quantity + } + } + for name, quantity := range r.Spec.Resources.Requests { + if strings.HasPrefix(string(name), corev1.ResourceHugePagesPrefix) { + if existingQuantity, exists := hugepages[name]; exists { + if existingQuantity.Cmp(quantity) != 0 { + result = append(result, field.Invalid( + field.NewPath("spec", "resources", "requests", string(name)), + quantity.String(), + "HugePages requests must equal the limits", + )) + } + continue + } + hugepages[name] = quantity + } + } + return hugepages, result +} + +func hasEnoughMemoryForSharedBuffers( + sharedBuffers resource.Quantity, + memoryRequest *resource.Quantity, + hugePages map[corev1.ResourceName]resource.Quantity, +) bool { + if memoryRequest.IsZero() || sharedBuffers.Cmp(*memoryRequest) <= 0 { + return true + } + + for _, quantity := range hugePages { + if sharedBuffers.Cmp(quantity) <= 0 { + return true + } + } + + return false +} + func (v *ClusterCustomValidator) validateSynchronousReplicaConfiguration(r *apiv1.Cluster) field.ErrorList { if r.Spec.PostgresConfiguration.Synchronous == nil { return nil diff --git a/internal/webhook/v1/cluster_webhook_test.go b/internal/webhook/v1/cluster_webhook_test.go index 5d21839e3a..3c62c70019 100644 --- a/internal/webhook/v1/cluster_webhook_test.go +++ b/internal/webhook/v1/cluster_webhook_test.go @@ -4508,6 +4508,12 @@ var _ = Describe("validateResources", func() { Expect(errors[0].Detail).To(Equal("Memory request is lower than PostgreSQL `shared_buffers` value")) }) + It("returns no errors when no memoryRequest is set", func() { + cluster.Spec.PostgresConfiguration.Parameters["shared_buffers"] = "1GB" + errors := v.validateResources(cluster) + Expect(errors).To(BeEmpty()) + }) + It("returns no errors when memoryRequest is greater than or equal to shared_buffers in GB", func() { cluster.Spec.Resources.Requests["memory"] = resource.MustParse("2Gi") cluster.Spec.PostgresConfiguration.Parameters["shared_buffers"] = "1GB" @@ -4515,6 +4521,48 @@ var _ = Describe("validateResources", func() { Expect(errors).To(BeEmpty()) }) + It("returns an error when hugepages request is different than hugepages limits", func() { + cluster.Spec.Resources.Requests["memory"] = resource.MustParse("2Gi") + cluster.Spec.Resources.Requests["hugepages-1Gi"] = resource.MustParse("1Gi") + cluster.Spec.Resources.Limits["hugepages-1Gi"] = resource.MustParse("2Gi") + errors := v.validateResources(cluster) + Expect(errors).To(HaveLen(1)) + Expect(errors[0].Detail).To(Equal("HugePages requests must equal the limits")) + }) + + It("returns an error when hugepages request is present but no CPU or memory are", func() { + cluster.Spec.Resources.Requests["hugepages-1Gi"] = resource.MustParse("1Gi") + errors := v.validateResources(cluster) + Expect(errors).To(HaveLen(1)) + Expect(errors[0].Detail).To(Equal("HugePages require cpu or memory")) + }) + + It("returns an error when no request is enough to contain shared_buffers, even if the sum is", func() { + cluster.Spec.Resources.Requests["memory"] = resource.MustParse("1Gi") + cluster.Spec.Resources.Requests["ugepages-1Gi"] = resource.MustParse("1Gi") + cluster.Spec.Resources.Requests["hugepages-2Mi"] = resource.MustParse("1Gi") + cluster.Spec.PostgresConfiguration.Parameters["shared_buffers"] = "2000000kB" + errors := v.validateResources(cluster) + Expect(errors).To(HaveLen(1)) + Expect(errors[0].Detail).To(Equal("Memory request is lower than PostgreSQL `shared_buffers` value")) + }) + + It("returns no errors when hugepages-1Gi request is greater than or equal to shared_buffers in GB", func() { + cluster.Spec.Resources.Requests["memory"] = resource.MustParse("256Mi") + cluster.Spec.Resources.Requests["hugepages-1Gi"] = resource.MustParse("1Gi") + cluster.Spec.PostgresConfiguration.Parameters["shared_buffers"] = "1GB" + errors := v.validateResources(cluster) + Expect(errors).To(BeEmpty()) + }) + + It("returns no errors when hugepages-2Mi request is greater than or equal to shared_buffers in GB", func() { + cluster.Spec.Resources.Requests["memory"] = resource.MustParse("256Mi") + cluster.Spec.Resources.Limits["hugepages-2Mi"] = resource.MustParse("1Gi") + cluster.Spec.PostgresConfiguration.Parameters["shared_buffers"] = "1GB" + errors := v.validateResources(cluster) + Expect(errors).To(BeEmpty()) + }) + It("returns no errors when shared_buffers is in a format that can't be parsed", func() { cluster.Spec.Resources.Requests["memory"] = resource.MustParse("1Gi") cluster.Spec.PostgresConfiguration.Parameters["shared_buffers"] = "invalid_value" From 747e841971eef886678ad48643f9d72721f60357 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Thu, 24 Jul 2025 14:06:05 +0200 Subject: [PATCH 724/836] fix(replica_cluster): detect designated primary while ordering instances (#8108) Closes #7416 Signed-off-by: Armando Ruocco --- internal/cmd/plugin/status/status.go | 1 + internal/controller/backup_controller.go | 1 + internal/controller/replicas.go | 5 ++++- internal/plugin/resources/instance.go | 6 +++++- .../postgres/webserver/client/remote/instance.go | 8 ++++++++ pkg/postgres/status.go | 14 +++++++++++++- 6 files changed, 32 insertions(+), 3 deletions(-) diff --git a/internal/cmd/plugin/status/status.go b/internal/cmd/plugin/status/status.go index 50ded9a1d1..463f34d299 100644 --- a/internal/cmd/plugin/status/status.go +++ b/internal/cmd/plugin/status/status.go @@ -178,6 +178,7 @@ func extractPostgresqlStatus(ctx context.Context, cluster apiv1.Cluster) *Postgr // Get the list of Pods created by this Cluster instancesStatus, errList := resources.ExtractInstancesStatus( ctx, + &cluster, plugin.Config, managedPods, ) diff --git a/internal/controller/backup_controller.go b/internal/controller/backup_controller.go index c43f53de44..00a9fcf366 100644 --- a/internal/controller/backup_controller.go +++ b/internal/controller/backup_controller.go @@ -181,6 +181,7 @@ func (r *BackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr }() ctx = cnpgiClient.SetPluginClientInContext(ctx, pluginClient) + ctx = cluster.SetInContext(ctx) // Plugin pre-hooks if hookResult := preReconcilePluginHooks(ctx, &cluster, &backup); hookResult.StopReconciliation { diff --git a/internal/controller/replicas.go b/internal/controller/replicas.go index 5ab1ab6a08..74117dc5c2 100644 --- a/internal/controller/replicas.go +++ b/internal/controller/replicas.go @@ -330,7 +330,10 @@ func GetPodsNotOnPrimaryNode( status postgres.PostgresqlStatusList, primaryPod *postgres.PostgresqlStatus, ) postgres.PostgresqlStatusList { - podsOnOtherNodes := postgres.PostgresqlStatusList{} + podsOnOtherNodes := postgres.PostgresqlStatusList{ + IsReplicaCluster: status.IsReplicaCluster, + CurrentPrimary: status.CurrentPrimary, + } if primaryPod == nil { return podsOnOtherNodes } diff --git a/internal/plugin/resources/instance.go b/internal/plugin/resources/instance.go index a483dd53ba..9fdab1378d 100644 --- a/internal/plugin/resources/instance.go +++ b/internal/plugin/resources/instance.go @@ -77,10 +77,14 @@ func GetInstancePods(ctx context.Context, clusterName string) ([]corev1.Pod, cor // ExtractInstancesStatus extracts the instance status from the given pod list func ExtractInstancesStatus( ctx context.Context, + cluster *apiv1.Cluster, config *rest.Config, filteredPods []corev1.Pod, ) (postgres.PostgresqlStatusList, []error) { - var result postgres.PostgresqlStatusList + result := postgres.PostgresqlStatusList{ + IsReplicaCluster: cluster.IsReplica(), + CurrentPrimary: cluster.Status.CurrentPrimary, + } var errs []error for idx := range filteredPods { diff --git a/pkg/management/postgres/webserver/client/remote/instance.go b/pkg/management/postgres/webserver/client/remote/instance.go index da29fa4242..913ca19d9f 100644 --- a/pkg/management/postgres/webserver/client/remote/instance.go +++ b/pkg/management/postgres/webserver/client/remote/instance.go @@ -37,10 +37,12 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/retry" + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/management/url" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" + contextutils "github.com/cloudnative-pg/cloudnative-pg/pkg/utils/context" ) const ( @@ -108,6 +110,12 @@ func (r instanceClientImpl) extractInstancesStatus( ) postgres.PostgresqlStatusList { var result postgres.PostgresqlStatusList + cluster, ok := ctx.Value(contextutils.ContextKeyCluster).(*apiv1.Cluster) + if ok && cluster != nil { + result.IsReplicaCluster = cluster.IsReplica() + result.CurrentPrimary = cluster.Status.CurrentPrimary + } + for idx := range activePods { instanceStatus := r.getReplicaStatusFromPodViaHTTP(ctx, activePods[idx]) result.Items = append(result.Items, instanceStatus) diff --git a/pkg/postgres/status.go b/pkg/postgres/status.go index 332807ce09..d1b63ab6dd 100644 --- a/pkg/postgres/status.go +++ b/pkg/postgres/status.go @@ -220,7 +220,9 @@ func (list PgStatReplicationList) Less(i, j int) bool { // PostgresqlStatusList is a list of PostgreSQL status received from the Pods // that can be sorted considering the replication status type PostgresqlStatusList struct { - Items []PostgresqlStatus `json:"items"` + Items []PostgresqlStatus `json:"items"` + IsReplicaCluster bool `json:"-"` + CurrentPrimary string `json:"-"` } // GetNames returns a list of names of Pods @@ -304,6 +306,16 @@ func (list *PostgresqlStatusList) Less(i, j int) bool { return !list.Items[i].ReplayLsn.Less(list.Items[j].ReplayLsn) } + // In a replica cluster, all instances are standbys of an external primary. + // Therefore, `IsPrimary` is always false for every item in the list. + // We rely on the `CurrentPrimary` field to identify the designated primary + // instance that is replicating from the external cluster, ensuring it is + // sorted first among the standbys. + if list.IsReplicaCluster && + (list.Items[i].Pod.Name == list.CurrentPrimary && list.Items[j].Pod.Name != list.CurrentPrimary) { + return true + } + return list.Items[i].Pod.Name < list.Items[j].Pod.Name } From a23b0af7150bc66ae880bfab5154caacf56a58da Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Thu, 24 Jul 2025 14:23:01 +0200 Subject: [PATCH 725/836] chore(ci): use independent names for recovery backups (#8124) In Azure tests, we were using the name `cluster-backup` multiple times, which caused issues with the tests since we can no longer modify the backup objects. Closes #8120 Signed-off-by: Jonathan Gonzalez V. --- .../backup/recovery_external_clusters/backup-azure-blob-02.yaml | 2 +- .../recovery_external_clusters/backup-azure-blob-pitr-sas.yaml | 2 +- .../recovery_external_clusters/backup-azure-blob-pitr.yaml | 2 +- .../recovery_external_clusters/backup-azure-blob-sas.yaml | 2 +- .../backup/recovery_external_clusters/backup-azurite-02.yaml | 2 +- .../backup/recovery_external_clusters/backup-minio-02.yaml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azure-blob-02.yaml b/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azure-blob-02.yaml index d59a7a38e3..be4373f4ad 100644 --- a/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azure-blob-02.yaml +++ b/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azure-blob-02.yaml @@ -1,7 +1,7 @@ apiVersion: postgresql.cnpg.io/v1 kind: Backup metadata: - name: cluster-backup + name: cluster-backup-02 spec: cluster: name: source-cluster-azure diff --git a/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azure-blob-pitr-sas.yaml b/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azure-blob-pitr-sas.yaml index 240b9dd72e..6c6a213162 100644 --- a/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azure-blob-pitr-sas.yaml +++ b/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azure-blob-pitr-sas.yaml @@ -1,7 +1,7 @@ apiVersion: postgresql.cnpg.io/v1 kind: Backup metadata: - name: cluster-backup + name: cluster-backup-pitr-sas spec: cluster: name: pg-backup-azure-blob-sas diff --git a/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azure-blob-pitr.yaml b/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azure-blob-pitr.yaml index d856776eaa..a8f134f745 100644 --- a/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azure-blob-pitr.yaml +++ b/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azure-blob-pitr.yaml @@ -1,7 +1,7 @@ apiVersion: postgresql.cnpg.io/v1 kind: Backup metadata: - name: cluster-backup + name: cluster-backup-pitr spec: cluster: name: external-cluster-azure diff --git a/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azure-blob-sas.yaml b/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azure-blob-sas.yaml index 240b9dd72e..2b10ab726f 100644 --- a/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azure-blob-sas.yaml +++ b/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azure-blob-sas.yaml @@ -1,7 +1,7 @@ apiVersion: postgresql.cnpg.io/v1 kind: Backup metadata: - name: cluster-backup + name: cluster-backup-sas spec: cluster: name: pg-backup-azure-blob-sas diff --git a/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azurite-02.yaml b/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azurite-02.yaml index b05e52e1fc..8838291d02 100644 --- a/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azurite-02.yaml +++ b/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azurite-02.yaml @@ -1,7 +1,7 @@ apiVersion: postgresql.cnpg.io/v1 kind: Backup metadata: - name: cluster-backup + name: cluster-backup-azurite-02 spec: cluster: name: pg-backup-azurite diff --git a/tests/e2e/fixtures/backup/recovery_external_clusters/backup-minio-02.yaml b/tests/e2e/fixtures/backup/recovery_external_clusters/backup-minio-02.yaml index 0cf59f9ef1..c4226fc92b 100644 --- a/tests/e2e/fixtures/backup/recovery_external_clusters/backup-minio-02.yaml +++ b/tests/e2e/fixtures/backup/recovery_external_clusters/backup-minio-02.yaml @@ -1,7 +1,7 @@ apiVersion: postgresql.cnpg.io/v1 kind: Backup metadata: - name: cluster-backup + name: cluster-backup-02 spec: cluster: name: source-cluster-minio From c1d7fc8a9bc1c078cba86000f1c7fe3fcc7a0162 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niccol=C3=B2=20Fei?= Date: Thu, 24 Jul 2025 14:59:17 +0200 Subject: [PATCH 726/836] feat: dynamic loading of PostgreSQL extensions (#7991) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This feature allows you to mount a PostgreSQL extension, packaged as an OCI-compliant container image, as a read-only, immutable volume inside an instance pod. The new `.spec.postgresql.extensions` stanza of a `Cluster` resource accepts an ordered list of extension images to be added to the PostgreSQL cluster. Each item in the `extensions` stanza is identified by `name` and provides the following options: - `extension_control_path`: A list of relative paths within the container image to be appended to PostgreSQL’s `extension_control_path`, allowing it to locate extension control files (default: `share`). - `dynamic_library_path`: A list of relative paths within the container image to be appended to PostgreSQL’s `dynamic_library_path`, enabling it to locate shared library files for extensions (default: `lib`). - `ld_library_path`: A list of relative paths within the container image to be appended to the `LD_LIBRARY_PATH` environment variable of the instance manager process, allowing PostgreSQL to locate required system libraries at runtime (default: empty). Each image volume is mounted at `/extensions/`. By default, CloudNativePG automatically manages the relevant PostgreSQL GUCs, setting: - `extension_control_path` to `/extensions//share`, allowing PostgreSQL to locate any extension control file within `/extensions//share/extension` - `dynamic_library_path` to `/extensions//lib` These values are appended in the order in which the extensions are defined in the `extensions` list, ensuring deterministic path resolution within PostgreSQL. Requirements: - PostgreSQL 18 or higher (for the `extension_control_path` option) - Kubernetes 1.33+ with the `ImageVolume` feature enabled Closes #7188 Signed-off-by: Niccolò Fei Signed-off-by: Gabriele Bartolini Signed-off-by: Leonardo Cecchi Signed-off-by: Armando Ruocco Signed-off-by: Marco Nenciarini Co-authored-by: Gabriele Bartolini Co-authored-by: Leonardo Cecchi Co-authored-by: Armando Ruocco Co-authored-by: Marco Nenciarini --- .wordlist-en-custom.txt | 8 + api/v1/cluster_types.go | 31 ++ api/v1/zz_generated.deepcopy.go | 38 ++ .../bases/postgresql.cnpg.io_clusters.yaml | 61 ++++ docs/mkdocs.yml | 1 + docs/src/cloudnative-pg.v1.md | 62 ++++ docs/src/imagevolume_extensions.md | 343 ++++++++++++++++++ hack/setup-cluster.sh | 9 + internal/cmd/manager/instance/join/cmd.go | 1 + .../manager/instance/upgrade/execute/cmd.go | 1 + .../controller/instance_controller.go | 1 + internal/webhook/v1/cluster_webhook.go | 65 ++++ internal/webhook/v1/cluster_webhook_test.go | 226 +++++++++++- pkg/management/postgres/configuration.go | 12 + pkg/management/postgres/initdb.go | 7 +- pkg/management/postgres/instance.go | 54 ++- pkg/management/postgres/instance_test.go | 95 +++++ pkg/management/postgres/restore.go | 12 +- pkg/postgres/configuration.go | 139 ++++++- pkg/postgres/configuration_test.go | 102 ++++++ pkg/specs/volumes.go | 37 ++ pkg/specs/volumes_test.go | 77 ++++ .../cluster-with-extensions.yaml.template | 36 ++ .../database.yaml.template | 26 ++ tests/e2e/imagevolume_extensions_test.go | 234 ++++++++++++ 25 files changed, 1652 insertions(+), 26 deletions(-) create mode 100644 docs/src/imagevolume_extensions.md create mode 100644 tests/e2e/fixtures/imagevolume_extensions/cluster-with-extensions.yaml.template create mode 100644 tests/e2e/fixtures/imagevolume_extensions/database.yaml.template create mode 100644 tests/e2e/imagevolume_extensions_test.go diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index b8f7d56809..e0f5f2f5de 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -160,6 +160,7 @@ EnvVar EphemeralVolumeSource EphemeralVolumesSizeLimit EphemeralVolumesSizeLimitConfiguration +ExtensionConfiguration ExtensionSpec ExtensionStatus ExternalCluster @@ -180,6 +181,7 @@ GUC GUCs Gabriele GaugeVec +GeoSpatial Gi GitOps GoArch @@ -204,6 +206,8 @@ ImageCatalog ImageCatalogRef ImageCatalogSpec ImageInfo +ImageVolume +ImageVolumeSource ImportSource InfoSec Innocenti @@ -771,6 +775,7 @@ dir disableDefaultQueries disablePassword disabledDefaultServices +discoverable displayName displayName distro @@ -885,6 +890,7 @@ imageCatalogRef imageName imagePullPolicy imagePullSecrets +imageVolume imagecatalogs img immediateCheckpoint @@ -951,6 +957,7 @@ lastSuccessfulBackupByMethod latestGeneratedNode latn lc +ld ldap ldapBindPassword ldaps @@ -1093,6 +1100,7 @@ pgBouncerSecrets pgDataImageInfo pgDumpExtraOptions pgRestoreExtraOptions +pgRouting pgSQL pgadmin pgaudit diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go index 987a813d1d..a342f58e41 100644 --- a/api/v1/cluster_types.go +++ b/api/v1/cluster_types.go @@ -1429,6 +1429,37 @@ type PostgresConfiguration struct { // Defaults to false. // +optional EnableAlterSystem bool `json:"enableAlterSystem,omitempty"` + + // The configuration of the extensions to be added + // +optional + Extensions []ExtensionConfiguration `json:"extensions,omitempty"` +} + +// ExtensionConfiguration is the configuration used to add +// PostgreSQL extensions to the Cluster. +type ExtensionConfiguration struct { + // The name of the extension, required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:Pattern=`^[a-z0-9]([-a-z0-9]*[a-z0-9])?$` + Name string `json:"name"` + + // The image containing the extension, required + // +kubebuilder:validation:XValidation:rule="has(self.reference)",message="An image reference is required" + ImageVolumeSource corev1.ImageVolumeSource `json:"image"` + + // The list of directories inside the image which should be added to extension_control_path. + // If not defined, defaults to "/share". + // +optional + ExtensionControlPath []string `json:"extension_control_path,omitempty"` + + // The list of directories inside the image which should be added to dynamic_library_path. + // If not defined, defaults to "/lib". + // +optional + DynamicLibraryPath []string `json:"dynamic_library_path,omitempty"` + + // The list of directories inside the image which should be added to ld_library_path. + // +optional + LdLibraryPath []string `json:"ld_library_path,omitempty"` } // BootstrapConfiguration contains information about how to create the PostgreSQL diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index 042fe56758..c6805684d7 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -1262,6 +1262,37 @@ func (in *EphemeralVolumesSizeLimitConfiguration) DeepCopy() *EphemeralVolumesSi return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtensionConfiguration) DeepCopyInto(out *ExtensionConfiguration) { + *out = *in + out.ImageVolumeSource = in.ImageVolumeSource + if in.ExtensionControlPath != nil { + in, out := &in.ExtensionControlPath, &out.ExtensionControlPath + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DynamicLibraryPath != nil { + in, out := &in.DynamicLibraryPath, &out.DynamicLibraryPath + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.LdLibraryPath != nil { + in, out := &in.LdLibraryPath, &out.LdLibraryPath + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtensionConfiguration. +func (in *ExtensionConfiguration) DeepCopy() *ExtensionConfiguration { + if in == nil { + return nil + } + out := new(ExtensionConfiguration) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExtensionSpec) DeepCopyInto(out *ExtensionSpec) { *out = *in @@ -2300,6 +2331,13 @@ func (in *PostgresConfiguration) DeepCopyInto(out *PostgresConfiguration) { *out = new(LDAPConfig) (*in).DeepCopyInto(*out) } + if in.Extensions != nil { + in, out := &in.Extensions, &out.Extensions + *out = make([]ExtensionConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresConfiguration. diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml index f1a78bc165..57da62e77c 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml @@ -4011,6 +4011,67 @@ spec: This should only be used for debugging and troubleshooting. Defaults to false. type: boolean + extensions: + description: The configuration of the extensions to be added + items: + description: |- + ExtensionConfiguration is the configuration used to add + PostgreSQL extensions to the Cluster. + properties: + dynamic_library_path: + description: |- + The list of directories inside the image which should be added to dynamic_library_path. + If not defined, defaults to "/lib". + items: + type: string + type: array + extension_control_path: + description: |- + The list of directories inside the image which should be added to extension_control_path. + If not defined, defaults to "/share". + items: + type: string + type: array + image: + description: The image containing the extension, required + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object + x-kubernetes-validations: + - message: An image reference is required + rule: has(self.reference) + ld_library_path: + description: The list of directories inside the image which + should be added to ld_library_path. + items: + type: string + type: array + name: + description: The name of the extension, required + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + required: + - image + - name + type: object + type: array ldap: description: Options to specify LDAP configuration properties: diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index bdc8413311..b76cfb334b 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -63,6 +63,7 @@ nav: - postgis.md - e2e.md - container_images.md + - imagevolume_extensions.md - operator_capability_levels.md - controller.md - samples.md diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md index 2b9e6d5116..92ca69e434 100644 --- a/docs/src/cloudnative-pg.v1.md +++ b/docs/src/cloudnative-pg.v1.md @@ -2825,6 +2825,61 @@ storage

+## ExtensionConfiguration {#postgresql-cnpg-io-v1-ExtensionConfiguration} + + +**Appears in:** + +- [PostgresConfiguration](#postgresql-cnpg-io-v1-PostgresConfiguration) + + +

ExtensionConfiguration is the configuration used to add +PostgreSQL extensions to the Cluster.

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+

The name of the extension, required

+
image [Required]
+core/v1.ImageVolumeSource +
+

The image containing the extension, required

+
extension_control_path
+[]string +
+

The list of directories inside the image which should be added to extension_control_path. +If not defined, defaults to "/share".

+
dynamic_library_path
+[]string +
+

The list of directories inside the image which should be added to dynamic_library_path. +If not defined, defaults to "/lib".

+
ld_library_path
+[]string +
+

The list of directories inside the image which should be added to ld_library_path.

+
+ ## ExtensionSpec {#postgresql-cnpg-io-v1-ExtensionSpec} @@ -4453,6 +4508,13 @@ This should only be used for debugging and troubleshooting. Defaults to false.

+extensions
+[]ExtensionConfiguration + + +

The configuration of the extensions to be added

+ + diff --git a/docs/src/imagevolume_extensions.md b/docs/src/imagevolume_extensions.md new file mode 100644 index 0000000000..bc89d3b6eb --- /dev/null +++ b/docs/src/imagevolume_extensions.md @@ -0,0 +1,343 @@ +# Image Volume Extensions + + +CloudNativePG supports the **dynamic loading of PostgreSQL extensions** into a +`Cluster` at Pod startup using the [Kubernetes `ImageVolume` feature](https://kubernetes.io/docs/tasks/configure-pod-container/image-volumes/) +and the `extension_control_path` GUC introduced in PostgreSQL 18, to which this +project contributed. + +This feature allows you to mount a [PostgreSQL extension](https://www.postgresql.org/docs/current/extend-extensions.html), +packaged as an OCI-compliant container image, as a read-only and immutable +volume inside a running pod at a known filesystem path. + +You can make the extension available either globally, using the +[`shared_preload_libraries` option](postgresql_conf.md#shared-preload-libraries), +or at the database level through the `CREATE EXTENSION` command. For the +latter, you can use the [`Database` resource’s declarative extension management](declarative_database_management.md/#managing-extensions-in-a-database) +to ensure consistent, automated extension setup within your PostgreSQL +databases. + +## Benefits + +Image volume extensions decouple the distribution of PostgreSQL operand +container images from the distribution of extensions. This eliminates the +need to define and embed extensions at build time within your PostgreSQL +images—a major adoption blocker for PostgreSQL as a containerized workload, +including from a security and supply chain perspective. + +As a result, you can: + +- Use the [official PostgreSQL `minimal` operand images](https://github.com/cloudnative-pg/postgres-containers?tab=readme-ov-file#minimal-images) + provided by CloudNativePG. +- Dynamically add the extensions you need to your `Cluster` definitions, + without rebuilding or maintaining custom PostgreSQL images. +- Reduce your operational surface by using immutable, minimal, and secure base + images while adding only the extensions required for each workload. + +Extension images must be built according to the +[documented specifications](#image-specifications). + +## Requirements + +To use image volume extensions with CloudNativePG, you need: + +- **PostgreSQL 18 or later**, with support for `extension_control_path`. +- **Kubernetes 1.33**, with the `ImageVolume` feature gate enabled. +- **CloudNativePG-compatible extension container images**, ensuring: + - Matching PostgreSQL major version of the `Cluster` resource. + - Compatible operating system distribution of the `Cluster` resource. + - Matching CPU architecture of the `Cluster` resource. + +## How it works + +Extension images are defined in the `.spec.postgresql.extensions` stanza of a +`Cluster` resource, which accepts an ordered list of extensions to be added to +the PostgreSQL cluster. + +!!! Info + For field-level details, see the + [API reference for `ExtensionConfiguration`](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-ExtensionConfiguration). + +Each image volume is mounted at `/extensions/`. + +By default, CloudNativePG automatically manages the relevant GUCs, setting: + +- `extension_control_path` to `/extensions//share`, allowing + PostgreSQL to locate any extension control file within `/extensions//share/extension` +- `dynamic_library_path` to `/extensions//lib` + +These values are appended in the order in which the extensions are defined in +the `extensions` list, ensuring deterministic path resolution within +PostgreSQL. This allows PostgreSQL to discover and load the extension without +requiring manual configuration inside the pod. + +!!! Info + Depending on how your extension container images are built and their layout, + you may need to adjust the default `extension_control_path` and + `dynamic_library_path` values to match the image structure. + +!!! Important + If the extension image includes shared libraries, they must be compiled + with the same PostgreSQL major version, operating system distribution, and CPU + architecture as the PostgreSQL container image used by your cluster, to ensure + compatibility and prevent runtime issues. + +## How to add a new extension + +Adding an extension to a database in CloudNativePG involves a few steps: + +1. Define the extension image in the `Cluster` resource so that PostgreSQL can + discover and load it. +2. Add the library to [`shared_preload_libraries`](postgresql_conf.md#shared-preload-libraries) + if the extension requires it. +3. Declare the extension in the `Database` resource where you want it + installed, if the extension supports `CREATE EXTENSION`. + +For illustration purposes, this guide uses a simple, fictitious extension named +`foo` that supports `CREATE EXTENSION`. + +### Adding a new extension to a `Cluster` resource + +You can add an `ImageVolume`-based extension to a `Cluster` using the +`.spec.postgresql.extensions` stanza. For example: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: foo-18 +spec: + # ... + postgresql: + extensions: + - name: foo + image: + reference: # registry path for your extension image + # ... +``` + +The `name` field is **mandatory** and **must be unique within the cluster**, as +it determines the mount path (`/extensions/foo` in this example). It must +consist of *lowercase alphanumeric characters or hyphens (`-`)* and must start +and end with an alphanumeric character. + +The `image` stanza follows the [Kubernetes `ImageVolume` API](https://kubernetes.io/docs/tasks/configure-pod-container/image-volumes/). +The `reference` must point to a valid container registry path for the extension +image. + +!!! Important + When a new extension is added to a running `Cluster`, CloudNativePG will + automatically trigger a [rolling update](rolling_update.md) to attach the new + image volume to each pod. Before adding a new extension in production, + ensure you have thoroughly tested it in a staging environment to prevent + configuration issues that could leave your PostgreSQL cluster in an unhealthy + state. + +Once mounted, CloudNativePG will automatically configure PostgreSQL by appending: + +- `/extensions/foo/share` to `extension_control_path` +- `/extensions/foo/lib` to `dynamic_library_path` + +This ensures that the PostgreSQL container is ready to serve the `foo` +extension when requested by a database, as described in the next section. The +`CREATE EXTENSION foo` command, triggered automatically during the +[reconciliation of the `Database` resource](declarative_database_management.md/#managing-extensions-in-a-database), +will work without additional configuration, as PostgreSQL will locate: + +- the extension control file at `/extensions/foo/share/extension/foo.control` +- the shared library at `/extensions/foo/lib/foo.so` + +### Adding a new extension to a `Database` resource + +Once the extension is available in the PostgreSQL instance, you can leverage +declarative databases to [manage the lifecycle of your extensions](declarative_database_management.md#managing-extensions-in-a-database) +within the target database. + +Continuing with the `foo` example, you can request the installation of the +`foo` extension in the `app` database of the `foo-18` cluster using the +following resource definition: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Database +metadata: + name: foo-app +spec: + name: app + owner: app + cluster: + name: foo-18 + extensions: + - name: foo + version: 1.0 +``` + +CloudNativePG will automatically reconcile this resource, executing the +`CREATE EXTENSION foo` command inside the `app` database if it is not +already installed, ensuring your desired state is maintained without manual +intervention. + +## Advanced Topics + +In some cases, the default expected structure may be insufficient for your +extension image, particularly when: + +- The extension requires additional system libraries. +- Multiple extensions are bundled in the same image. +- The image uses a custom directory structure. + +Following the *"convention over configuration"* paradigm, CloudNativePG allows +you to finely control the configuration of each extension image through the +following fields: + +- `extension_control_path`: A list of relative paths within the container image + to be appended to PostgreSQL’s `extension_control_path`, allowing it to + locate extension control files. +- `dynamic_library_path`: A list of relative paths within the container image + to be appended to PostgreSQL’s `dynamic_library_path`, enabling it to locate + shared library files for extensions. +- `ld_library_path`: A list of relative paths within the container image to be + appended to the `LD_LIBRARY_PATH` environment variable of the instance + manager process, allowing PostgreSQL to locate required system libraries at + runtime. + +This flexibility enables you to support complex or non-standard extension +images while maintaining clarity and predictability. + +### Setting Custom Paths + +If your extension image does not use the default `lib` and `share` directories +for its libraries and control files, you can override the defaults by +explicitly setting `extension_control_path` and `dynamic_library_path`. + +For example: + +```yaml +spec: + postgresql: + extensions: + - name: my-extension + extension_control_path: + - my/share/path + dynamic_library_path: + - my/lib/path + image: + reference: # registry path for your extension image +``` + +CloudNativePG will configure PostgreSQL with: + +- `/extensions/my-extension/my/share/path` appended to `extension_control_path` +- `/extensions/my-extension/my/lib/path` appended to `dynamic_library_path` + +This allows PostgreSQL to discover your extension’s control files and shared +libraries correctly, even with a non-standard layout. + +### Multi-extension Images + +You may need to include multiple extensions within the same container image, +adopting a structure where each extension’s files reside in their own +subdirectory. + +For example, to package PostGIS and pgRouting together in a single image, each +in its own subdirectory: + +```yaml +# ... +spec: + # ... + postgresql: + extensions: + - name: geospatial + extension_control_path: + - postgis/share + - pgrouting/share + dynamic_library_path: + - postgis/lib + - pgrouting/lib + # ... + image: + reference: # registry path for your geospatial image + # ... + # ... + # ... +``` + +### Including System Libraries + +Some extensions, such as PostGIS, require system libraries that may not be +present in the base PostgreSQL image. To support these requirements, you can +package the necessary libraries within your extension container image and make +them available to PostgreSQL using the `ld_library_path` field. + +For example, if your extension image includes a `system` directory with the +required libraries: + +```yaml +# ... +spec: + # ... + postgresql: + extensions: + - name: postgis + # ... + ld_library_path: + - syslib + image: + reference: # registry path for your PostGIS image + # ... + # ... + # ... +``` + +CloudNativePG will set the `LD_LIBRARY_PATH` environment variable to include +`/extensions/postgis/system`, allowing PostgreSQL to locate and load these +system libraries at runtime. + +!!! Important + Since `ld_library_path` must be set when the PostgreSQL process starts, + changing this value requires a **cluster restart** for the new value to take effect. + CloudNativePG does not currently trigger this restart automatically; you will need to + manually restart the cluster (e.g., using `cnpg restart`) after modifying `ld_library_path`. + +## Image Specifications + +A standard extension container image for CloudNativePG includes two +required directories at its root: + +- `share`: contains the extension control file (e.g., `.control`) + and any SQL files. +- `lib`: contains the extension's shared library (e.g., `.so`) and + any additional required libraries. + +Following this structure ensures that the extension will be automatically +discoverable and usable by PostgreSQL within CloudNativePG without requiring +manual configuration. + +!!! Important + We encourage PostgreSQL extension developers to publish OCI-compliant extension + images following this layout as part of their artifact distribution, making + their extensions easily consumable within Kubernetes environments. + Ideally, extension images should target a specific operating system + distribution and architecture, be tied to a particular PostgreSQL version, and + be built using the distribution’s native packaging system (for example, using + Debian or RPM packages). This approach ensures consistency, security, and + compatibility with the PostgreSQL images used in your clusters. + +## Caveats + +Currently, adding, removing, or updating an extension image triggers a +restart of the PostgreSQL pods. This behavior is inherited from how +[image volumes](https://kubernetes.io/docs/tasks/configure-pod-container/image-volumes/) +work in Kubernetes. + +Before performing an extension update, ensure you have: + +- Thoroughly tested the update process in a staging environment. +- Verified that the extension image contains the required upgrade path between + the currently installed version and the target version. +- Updated the `version` field for the extension in the relevant `Database` + resource definition to align with the new version in the image. + +These steps help prevent downtime or data inconsistencies in your PostgreSQL +clusters during extension updates. diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh index 138c9115f7..52e22b1144 100755 --- a/hack/setup-cluster.sh +++ b/hack/setup-cluster.sh @@ -174,6 +174,15 @@ EOF done fi + # Enable ImageVolume support from kindest/node v1.33.1 + if [[ "$(printf '%s\n' "1.33.1" "${k8s_version#v}" | sort -V | head -n1)" == "1.33.1" ]]; then + cat >>"${config_file}" <<-EOF + +featureGates: + ImageVolume: true +EOF + fi + # Add containerdConfigPatches section cat >>"${config_file}" <<-EOF diff --git a/internal/cmd/manager/instance/join/cmd.go b/internal/cmd/manager/instance/join/cmd.go index a82779e4d4..dbea010f2c 100644 --- a/internal/cmd/manager/instance/join/cmd.go +++ b/internal/cmd/manager/instance/join/cmd.go @@ -116,6 +116,7 @@ func joinSubCommand(ctx context.Context, instance *postgres.Instance, info postg contextLogger.Error(err, "Error while getting cluster") return err } + instance.Cluster = &cluster if _, err := instancecertificate.NewReconciler(client, instance).RefreshSecrets(ctx, &cluster); err != nil { contextLogger.Error(err, "Error while refreshing secrets") diff --git a/internal/cmd/manager/instance/upgrade/execute/cmd.go b/internal/cmd/manager/instance/upgrade/execute/cmd.go index 418ce4f332..3ecb21482f 100644 --- a/internal/cmd/manager/instance/upgrade/execute/cmd.go +++ b/internal/cmd/manager/instance/upgrade/execute/cmd.go @@ -160,6 +160,7 @@ func (ui upgradeInfo) upgradeSubCommand(ctx context.Context, instance *postgres. contextLogger.Error(err, "Error while getting cluster") return err } + instance.Cluster = &cluster if _, err := instancecertificate.NewReconciler(client, instance).RefreshSecrets(ctx, &cluster); err != nil { return fmt.Errorf("error while downloading secrets: %w", err) diff --git a/internal/management/controller/instance_controller.go b/internal/management/controller/instance_controller.go index 085c0c3406..454b9247ba 100644 --- a/internal/management/controller/instance_controller.go +++ b/internal/management/controller/instance_controller.go @@ -942,6 +942,7 @@ func (r *InstanceReconciler) reconcileInstance(cluster *apiv1.Cluster) { r.instance.MaxStopDelay = cluster.GetMaxStopDelay() r.instance.SmartStopDelay = cluster.GetSmartShutdownTimeout() r.instance.RequiresDesignatedPrimaryTransition = detectRequiresDesignatedPrimaryTransition() + r.instance.Cluster = cluster } // PostgreSQLAutoConfWritable reconciles the permissions bit of `postgresql.auto.conf` diff --git a/internal/webhook/v1/cluster_webhook.go b/internal/webhook/v1/cluster_webhook.go index e9fd767866..72d1009ebf 100644 --- a/internal/webhook/v1/cluster_webhook.go +++ b/internal/webhook/v1/cluster_webhook.go @@ -220,6 +220,7 @@ func (v *ClusterCustomValidator) validate(r *apiv1.Cluster) (allErrs field.Error v.validatePromotionToken, v.validatePluginConfiguration, v.validateLivenessPingerProbe, + v.validateExtensions, } for _, validate := range validations { @@ -2630,3 +2631,67 @@ func (v *ClusterCustomValidator) validateLivenessPingerProbe(r *apiv1.Cluster) f return nil } + +func (v *ClusterCustomValidator) validateExtensions(r *apiv1.Cluster) field.ErrorList { + ensureNotEmptyOrDuplicate := func(path *field.Path, list *stringset.Data, value string) *field.Error { + if value == "" { + return field.Invalid( + path, + value, + "value cannot be empty", + ) + } + + if list.Has(value) { + return field.Duplicate( + path, + value, + ) + } + return nil + } + + if len(r.Spec.PostgresConfiguration.Extensions) == 0 { + return nil + } + + var result field.ErrorList + + extensionNames := stringset.New() + + for i, v := range r.Spec.PostgresConfiguration.Extensions { + basePath := field.NewPath("spec", "postgresql", "extensions").Index(i) + if nameErr := ensureNotEmptyOrDuplicate(basePath.Child("name"), extensionNames, v.Name); nameErr != nil { + result = append(result, nameErr) + } + extensionNames.Put(v.Name) + + controlPaths := stringset.New() + for j, path := range v.ExtensionControlPath { + if validateErr := ensureNotEmptyOrDuplicate( + basePath.Child("extension_control_path").Index(j), + controlPaths, + path, + ); validateErr != nil { + result = append(result, validateErr) + } + + controlPaths.Put(path) + } + + libraryPaths := stringset.New() + for j, path := range v.DynamicLibraryPath { + if validateErr := ensureNotEmptyOrDuplicate( + basePath.Child("dynamic_library_path").Index(j), + libraryPaths, + path, + ); validateErr != nil { + result = append(result, validateErr) + } + + libraryPaths.Put(path) + } + } + + return result +} diff --git a/internal/webhook/v1/cluster_webhook_test.go b/internal/webhook/v1/cluster_webhook_test.go index 3c62c70019..f858f112ae 100644 --- a/internal/webhook/v1/cluster_webhook_test.go +++ b/internal/webhook/v1/cluster_webhook_test.go @@ -5258,7 +5258,7 @@ var _ = Describe("validatePluginConfiguration", func() { }) }) -var _ = Describe("", func() { +var _ = Describe("liveness probe validation", func() { var v *ClusterCustomValidator BeforeEach(func() { v = &ClusterCustomValidator{} @@ -5298,6 +5298,230 @@ var _ = Describe("", func() { }) }) +var _ = Describe("validateExtensions", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + It("returns no error when extensions are not specified", func() { + cluster := &apiv1.Cluster{} + Expect(v.validateExtensions(cluster)).To(BeEmpty()) + }) + + It("returns no error if the specified extensions are unique", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Extensions: []apiv1.ExtensionConfiguration{ + { + Name: "extOne", + ImageVolumeSource: corev1.ImageVolumeSource{ + Reference: "extOne", + }, + }, + { + Name: "extTwo", + ImageVolumeSource: corev1.ImageVolumeSource{ + Reference: "extOne", + }, + }, + }, + }, + }, + } + + Expect(v.validateExtensions(cluster)).To(BeEmpty()) + }) + + It("returns an error per duplicate extension name", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Extensions: []apiv1.ExtensionConfiguration{ + { + Name: "extOne", + ImageVolumeSource: corev1.ImageVolumeSource{ + Reference: "extOne", + }, + }, + { + Name: "extTwo", + ImageVolumeSource: corev1.ImageVolumeSource{ + Reference: "extOne", + }, + }, + { + Name: "extTwo", + ImageVolumeSource: corev1.ImageVolumeSource{ + Reference: "extTwo:1", + }, + }, + { + Name: "extOne", + ImageVolumeSource: corev1.ImageVolumeSource{ + Reference: "extOne:1", + }, + }, + }, + }, + }, + } + + err := v.validateExtensions(cluster) + Expect(err).To(HaveLen(2)) + Expect(err[0].BadValue).To(Equal("extTwo")) + Expect(err[1].BadValue).To(Equal("extOne")) + }) + + It("returns multiple errors for both invalid ExtensionControlPath and DynamicLibraryPath", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Extensions: []apiv1.ExtensionConfiguration{ + { + Name: "extOne", + ImageVolumeSource: corev1.ImageVolumeSource{ + Reference: "extOne", + }, + ExtensionControlPath: []string{ + "/valid/path", + "", + }, + DynamicLibraryPath: []string{ + "", + "/valid/lib/path", + }, + }, + }, + }, + }, + } + + err := v.validateExtensions(cluster) + Expect(err).To(HaveLen(2)) + Expect(err[0].Field).To(ContainSubstring("extensions[0].extension_control_path[1]")) + Expect(err[1].Field).To(ContainSubstring("extensions[0].dynamic_library_path[0]")) + }) + + It("returns no error when ExtensionControlPath and DynamicLibraryPath are valid", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Extensions: []apiv1.ExtensionConfiguration{ + { + Name: "extOne", + ImageVolumeSource: corev1.ImageVolumeSource{ + Reference: "extOne", + }, + ExtensionControlPath: []string{ + "/usr/share/postgresql/extension", + "/opt/custom/extensions", + }, + DynamicLibraryPath: []string{ + "/usr/lib/postgresql/lib", + "/opt/custom/lib", + }, + }, + }, + }, + }, + } + + Expect(v.validateExtensions(cluster)).To(BeEmpty()) + }) + + It("returns errors for duplicate ExtensionControlPath entries", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Extensions: []apiv1.ExtensionConfiguration{ + { + Name: "extOne", + ImageVolumeSource: corev1.ImageVolumeSource{ + Reference: "extOne", + }, + ExtensionControlPath: []string{ + "/usr/share/postgresql/extension", + "/opt/custom/extensions", + "/usr/share/postgresql/extension", // duplicate + }, + }, + }, + }, + }, + } + + err := v.validateExtensions(cluster) + Expect(err).To(HaveLen(1)) + Expect(err[0].Type).To(Equal(field.ErrorTypeDuplicate)) + Expect(err[0].Field).To(ContainSubstring("extensions[0].extension_control_path[2]")) + Expect(err[0].BadValue).To(Equal("/usr/share/postgresql/extension")) + }) + + It("returns errors for duplicate DynamicLibraryPath entries", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Extensions: []apiv1.ExtensionConfiguration{ + { + Name: "extOne", + ImageVolumeSource: corev1.ImageVolumeSource{ + Reference: "extOne", + }, + DynamicLibraryPath: []string{ + "/usr/lib/postgresql/lib", + "/opt/custom/lib", + "/usr/lib/postgresql/lib", + }, + }, + }, + }, + }, + } + + err := v.validateExtensions(cluster) + Expect(err).To(HaveLen(1)) + Expect(err[0].Type).To(Equal(field.ErrorTypeDuplicate)) + Expect(err[0].Field).To(ContainSubstring("extensions[0].dynamic_library_path[2]")) + Expect(err[0].BadValue).To(Equal("/usr/lib/postgresql/lib")) + }) + + It("returns errors for duplicates in both ExtensionControlPath and DynamicLibraryPath", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Extensions: []apiv1.ExtensionConfiguration{ + { + Name: "extOne", + ImageVolumeSource: corev1.ImageVolumeSource{ + Reference: "extOne", + }, + ExtensionControlPath: []string{ + "/usr/share/postgresql/extension", + "/usr/share/postgresql/extension", + }, + DynamicLibraryPath: []string{ + "/usr/lib/postgresql/lib", + "/usr/lib/postgresql/lib", + }, + }, + }, + }, + }, + } + + err := v.validateExtensions(cluster) + Expect(err).To(HaveLen(2)) + + Expect(err[0].Type).To(Equal(field.ErrorTypeDuplicate)) + Expect(err[0].BadValue).To(Equal("/usr/share/postgresql/extension")) + + Expect(err[1].Type).To(Equal(field.ErrorTypeDuplicate)) + Expect(err[1].BadValue).To(Equal("/usr/lib/postgresql/lib")) + }) +}) + var _ = Describe("getInTreeBarmanWarnings", func() { It("returns no warnings when BarmanObjectStore is not configured", func() { cluster := &apiv1.Cluster{ diff --git a/pkg/management/postgres/configuration.go b/pkg/management/postgres/configuration.go index 1445e08600..3d5775884a 100644 --- a/pkg/management/postgres/configuration.go +++ b/pkg/management/postgres/configuration.go @@ -426,6 +426,18 @@ func createPostgresqlConfiguration( } sort.Strings(info.TemporaryTablespaces) + // Set additional extensions + for _, extension := range cluster.Spec.PostgresConfiguration.Extensions { + info.AdditionalExtensions = append( + info.AdditionalExtensions, + postgres.AdditionalExtensionConfiguration{ + Name: extension.Name, + ExtensionControlPath: extension.ExtensionControlPath, + DynamicLibraryPath: extension.DynamicLibraryPath, + }, + ) + } + // Setup minimum replay delay if we're on a replica cluster if cluster.IsReplica() && cluster.Spec.ReplicaCluster.MinApplyDelay != nil { info.RecoveryMinApplyDelay = cluster.Spec.ReplicaCluster.MinApplyDelay.Duration diff --git a/pkg/management/postgres/initdb.go b/pkg/management/postgres/initdb.go index 21d250fd68..5951e2c50f 100644 --- a/pkg/management/postgres/initdb.go +++ b/pkg/management/postgres/initdb.go @@ -168,7 +168,7 @@ func (info InitInfo) EnsureTargetDirectoriesDoNotExist(ctx context.Context) erro return nil } - out, err := info.GetInstance().GetPgControldata() + out, err := info.GetInstance(nil).GetPgControldata() if err == nil { contextLogger.Info("pg_controldata check on existing directory succeeded, renaming the folders", "out", out) return info.renameExistingTargetDataDirectories(ctx, pgWalExists) @@ -295,10 +295,11 @@ func (info InitInfo) CreateDataDirectory() error { } // GetInstance gets the PostgreSQL instance which correspond to these init information -func (info InitInfo) GetInstance() *Instance { +func (info InitInfo) GetInstance(cluster *apiv1.Cluster) *Instance { postgresInstance := NewInstance() postgresInstance.PgData = info.PgData postgresInstance.StartupOptions = []string{"listen_addresses='127.0.0.1'"} + postgresInstance.Cluster = cluster return postgresInstance } @@ -477,7 +478,7 @@ func (info InitInfo) Bootstrap(ctx context.Context) error { return err } - instance := info.GetInstance() + instance := info.GetInstance(cluster) // Detect an initdb bootstrap with import isImportBootstrap := cluster.Spec.Bootstrap != nil && diff --git a/pkg/management/postgres/instance.go b/pkg/management/postgres/instance.go index 58d768f83c..0011a67672 100644 --- a/pkg/management/postgres/instance.go +++ b/pkg/management/postgres/instance.go @@ -38,6 +38,7 @@ import ( "time" "github.com/blang/semver" + "github.com/cloudnative-pg/machinery/pkg/envmap" "github.com/cloudnative-pg/machinery/pkg/execlog" "github.com/cloudnative-pg/machinery/pkg/fileutils" "github.com/cloudnative-pg/machinery/pkg/fileutils/compatibility" @@ -223,6 +224,9 @@ type Instance struct { MetricsPortTLS bool serverCertificateHandler serverCertificateHandler + + // Cluster is the cluster this instance belongs to + Cluster *apiv1.Cluster } type serverCertificateHandler struct { @@ -723,16 +727,56 @@ func (instance *Instance) Run() (*execlog.StreamingCmd, error) { return streamingCmd, nil } +// buildPostgresEnv builds the environment variables that should be used by PostgreSQL +// to run the main process, taking care of adding any library path that is needed for +// extensions. func (instance *Instance) buildPostgresEnv() []string { env := instance.Env if env == nil { env = os.Environ() } - env = append(env, - "PG_OOM_ADJUST_FILE=/proc/self/oom_score_adj", - "PG_OOM_ADJUST_VALUE=0", - ) - return env + envMap, _ := envmap.Parse(env) + envMap["PG_OOM_ADJUST_FILE"] = "/proc/self/oom_score_adj" + envMap["PG_OOM_ADJUST_VALUE"] = "0" + + if instance.Cluster == nil { + return envMap.StringSlice() + } + + // If there are no additional library paths, we use the environment variables + // of the current process + additionalLibraryPaths := collectLibraryPaths(instance.Cluster.Spec.PostgresConfiguration.Extensions) + if len(additionalLibraryPaths) == 0 { + return envMap.StringSlice() + } + + // We add the additional library paths after the entries that are already + // available. + currentLibraryPath := envMap["LD_LIBRARY_PATH"] + if currentLibraryPath != "" { + currentLibraryPath += ":" + } + currentLibraryPath += strings.Join(additionalLibraryPaths, ":") + envMap["LD_LIBRARY_PATH"] = currentLibraryPath + + return envMap.StringSlice() +} + +// collectLibraryPaths returns a list of PATHS which should be added to LD_LIBRARY_PATH +// given an extension +func collectLibraryPaths(extensionList []apiv1.ExtensionConfiguration) []string { + result := make([]string, 0, len(extensionList)) + + for _, extension := range extensionList { + for _, libraryPath := range extension.LdLibraryPath { + result = append( + result, + filepath.Join(postgres.ExtensionsBaseDirectory, extension.Name, libraryPath), + ) + } + } + + return result } // WithActiveInstance execute the internal function while this diff --git a/pkg/management/postgres/instance_test.go b/pkg/management/postgres/instance_test.go index 8aade97331..5801cc3c4c 100644 --- a/pkg/management/postgres/instance_test.go +++ b/pkg/management/postgres/instance_test.go @@ -24,8 +24,11 @@ import ( "fmt" "os" "path/filepath" + "strings" "github.com/cloudnative-pg/machinery/pkg/fileutils" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" @@ -266,3 +269,95 @@ var _ = Describe("ALTER SYSTEM enable and disable in PostgreSQL <17", func() { Expect(info.Mode()).To(BeEquivalentTo(0o400)) }) }) + +var _ = Describe("buildPostgresEnv", func() { + var cluster apiv1.Cluster + var instance Instance + + BeforeEach(func() { + err := os.Unsetenv("LD_LIBRARY_PATH") + Expect(err).ToNot(HaveOccurred()) + + cluster = apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-example", + Namespace: "default", + }, + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Extensions: []apiv1.ExtensionConfiguration{ + { + Name: "foo", + ImageVolumeSource: corev1.ImageVolumeSource{ + Reference: "foo:dev", + }, + }, + { + Name: "bar", + ImageVolumeSource: corev1.ImageVolumeSource{ + Reference: "bar:dev", + }, + }, + }, + }, + }, + } + instance.Cluster = &cluster + }) + + Context("Extensions enabled, LD_LIBRARY_PATH undefined", func() { + It("should be empty by default", func() { + ldLibraryPath := getLibraryPathFromEnv(instance.buildPostgresEnv()) + Expect(ldLibraryPath).To(BeEmpty()) + }) + }) + + Context("Extensions enabled, LD_LIBRARY_PATH defined", func() { + const ( + path1 = postgres.ExtensionsBaseDirectory + "/foo/syslib" + path2 = postgres.ExtensionsBaseDirectory + "/foo/sample" + path3 = postgres.ExtensionsBaseDirectory + "/bar/syslib" + path4 = postgres.ExtensionsBaseDirectory + "/bar/sample" + ) + finalPaths := strings.Join([]string{path1, path2, path3, path4}, ":") + + BeforeEach(func() { + cluster.Spec.PostgresConfiguration.Extensions[0].LdLibraryPath = []string{"/syslib", "sample/"} + cluster.Spec.PostgresConfiguration.Extensions[1].LdLibraryPath = []string{"./syslib", "./sample/"} + }) + + It("should be defined", func() { + ldLibraryPath := getLibraryPathFromEnv(instance.buildPostgresEnv()) + Expect(ldLibraryPath).To(Equal(fmt.Sprintf("LD_LIBRARY_PATH=%s", finalPaths))) + }) + It("should retain existing values", func() { + GinkgoT().Setenv("LD_LIBRARY_PATH", "/my/library/path") + + ldLibraryPath := getLibraryPathFromEnv(instance.buildPostgresEnv()) + Expect(ldLibraryPath).To(BeEquivalentTo(fmt.Sprintf("LD_LIBRARY_PATH=/my/library/path:%s", finalPaths))) + }) + }) + + Context("Extensions disabled", func() { + BeforeEach(func() { + cluster.Spec.PostgresConfiguration.Extensions = []apiv1.ExtensionConfiguration{} + }) + It("LD_LIBRARY_PATH should be empty", func() { + ldLibraryPath := getLibraryPathFromEnv(instance.buildPostgresEnv()) + Expect(ldLibraryPath).To(BeEmpty()) + }) + }) +}) + +func getLibraryPathFromEnv(envs []string) string { + var ldLibraryPath string + + for i := len(envs) - 1; i >= 0; i-- { + if strings.HasPrefix(envs[i], "LD_LIBRARY_PATH=") { + ldLibraryPath = envs[i] + break + } + } + + return ldLibraryPath +} diff --git a/pkg/management/postgres/restore.go b/pkg/management/postgres/restore.go index cf053adbdc..affdeb6197 100644 --- a/pkg/management/postgres/restore.go +++ b/pkg/management/postgres/restore.go @@ -108,10 +108,10 @@ func (info InitInfo) RestoreSnapshot(ctx context.Context, cli client.Client, imm // We're creating a new replica of an existing cluster, and the PVCs // have been initialized by a set of VolumeSnapshots. if immediate { - // If the instance will start as a primary, we will enter in the + // If the instance starts as a primary, we will enter in the // same logic attaching an old primary back after a failover. // We don't need that as this instance has never diverged. - if err := info.GetInstance().Demote(ctx, cluster); err != nil { + if err := info.GetInstance(nil).Demote(ctx, cluster); err != nil { return fmt.Errorf("error while demoting the instance: %w", err) } return nil @@ -183,7 +183,7 @@ func (info InitInfo) concludeRestore( // we recover from a base which has postgresql.auto.conf // the override.conf and include statement is present, what we need to do is to // migrate the content - if _, err := info.GetInstance().migratePostgresAutoConfFile(ctx); err != nil { + if _, err := info.GetInstance(cluster).migratePostgresAutoConfFile(ctx); err != nil { return err } if cluster.IsReplica() { @@ -836,7 +836,7 @@ func (info InitInfo) WriteInitialPostgresqlConf(ctx context.Context, cluster *ap return fmt.Errorf("while creating a temporary data directory: %w", err) } - temporaryInstance := temporaryInitInfo.GetInstance(). + temporaryInstance := temporaryInitInfo.GetInstance(cluster). WithNamespace(info.Namespace). WithClusterName(info.ClusterName) @@ -903,7 +903,7 @@ func (info InitInfo) WriteRestoreHbaConf(ctx context.Context) error { } // Create only the local map referred in the HBA configuration - _, err = info.GetInstance().RefreshPGIdent(ctx, nil) + _, err = info.GetInstance(nil).RefreshPGIdent(ctx, nil) return err } @@ -914,7 +914,7 @@ func (info InitInfo) WriteRestoreHbaConf(ctx context.Context) error { func (info InitInfo) ConfigureInstanceAfterRestore(ctx context.Context, cluster *apiv1.Cluster, env []string) error { contextLogger := log.FromContext(ctx) - instance := info.GetInstance() + instance := info.GetInstance(cluster) instance.Env = env if err := instance.VerifyPgDataCoherence(ctx); err != nil { diff --git a/pkg/postgres/configuration.go b/pkg/postgres/configuration.go index 118dcd5cde..b5259c2616 100644 --- a/pkg/postgres/configuration.go +++ b/pkg/postgres/configuration.go @@ -23,7 +23,10 @@ import ( "bytes" "crypto/sha256" "fmt" + "iter" "math" + "path/filepath" + "slices" "sort" "strings" "text/template" @@ -245,6 +248,15 @@ local {{.Username}} postgres // SynchronousStandbyNames is the postgresql parameter key for synchronous standbys SynchronousStandbyNames = "synchronous_standby_names" + + // ExtensionControlPath is the postgresql parameter key for extension_control_path + ExtensionControlPath = "extension_control_path" + + // DynamicLibraryPath is the postgresql parameter key dynamic_library_path + DynamicLibraryPath = "dynamic_library_path" + + // ExtensionsBaseDirectory is the base directory to store ImageVolume Extensions + ExtensionsBaseDirectory = "/extensions" ) // hbaTemplate is the template used to create the HBA configuration @@ -337,6 +349,9 @@ type ConfigurationInfo struct { // Minimum apply delay of transaction RecoveryMinApplyDelay time.Duration + + // The list of additional extensions to be loaded into the PostgreSQL configuration + AdditionalExtensions []AdditionalExtensionConfiguration } // getAlterSystemEnabledValue returns a config compatible value for IsAlterSystemEnabled @@ -652,7 +667,7 @@ func CreatePostgresqlConfiguration(info ConfigurationInfo) *PgConfiguration { ignoreFixedSettingsFromUser := info.IncludingMandatory || !info.PreserveFixedSettingsFromUser // Set all the default settings - setDefaultConfigurations(info, configuration) + configuration.setDefaultConfigurations(info) // Apply all the values from the user, overriding defaults, // ignoring those which are fixed if ignoreFixedSettingsFromUser is true @@ -728,10 +743,10 @@ func CreatePostgresqlConfiguration(info ConfigurationInfo) *PgConfiguration { if info.IncludingSharedPreloadLibraries { // Set all managed shared preload libraries - setManagedSharedPreloadLibraries(info, configuration) + configuration.setManagedSharedPreloadLibraries(info) // Set all user provided shared preload libraries - setUserSharedPreloadLibraries(info, configuration) + configuration.setUserSharedPreloadLibraries(info) } // Apply the list of temporary tablespaces @@ -739,33 +754,39 @@ func CreatePostgresqlConfiguration(info ConfigurationInfo) *PgConfiguration { configuration.OverwriteConfig("temp_tablespaces", strings.Join(info.TemporaryTablespaces, ",")) } + // Setup additional extensions + if len(info.AdditionalExtensions) > 0 { + configuration.setExtensionControlPath(info) + configuration.setDynamicLibraryPath(info) + } + return configuration } // setDefaultConfigurations sets all default configurations into the configuration map // from the provided info -func setDefaultConfigurations(info ConfigurationInfo, configuration *PgConfiguration) { +func (p *PgConfiguration) setDefaultConfigurations(info ConfigurationInfo) { // start from the global default settings for key, value := range info.Settings.GlobalDefaultSettings { - configuration.OverwriteConfig(key, value) + p.OverwriteConfig(key, value) } // apply settings relative to a certain PostgreSQL version for constraints, settings := range info.Settings.DefaultSettings { if constraints.Min <= info.MajorVersion && info.MajorVersion < constraints.Max { for key, value := range settings { - configuration.OverwriteConfig(key, value) + p.OverwriteConfig(key, value) } } } } // setManagedSharedPreloadLibraries sets all additional preloaded libraries -func setManagedSharedPreloadLibraries(info ConfigurationInfo, configuration *PgConfiguration) { +func (p *PgConfiguration) setManagedSharedPreloadLibraries(info ConfigurationInfo) { for _, extension := range ManagedExtensions { if extension.IsUsed(info.UserSettings) { for _, library := range extension.SharedPreloadLibraries { - configuration.AddSharedPreloadLibrary(library) + p.AddSharedPreloadLibrary(library) } } } @@ -775,8 +796,8 @@ func setManagedSharedPreloadLibraries(info ConfigurationInfo, configuration *PgC // The resulting list will have all the user provided libraries, followed by all the ones managed // by the operator, removing any duplicate and keeping the first occurrence in case of duplicates. // Therefore the user provided order is preserved, if an overlap (with the ones already present) happens -func setUserSharedPreloadLibraries(info ConfigurationInfo, configuration *PgConfiguration) { - oldLibraries := strings.Split(configuration.GetConfig(SharedPreloadLibraries), ",") +func (p *PgConfiguration) setUserSharedPreloadLibraries(info ConfigurationInfo) { + oldLibraries := strings.Split(p.GetConfig(SharedPreloadLibraries), ",") dedupedLibraries := make(map[string]bool, len(oldLibraries)+len(info.AdditionalSharedPreloadLibraries)) var libraries []string for _, library := range append(info.AdditionalSharedPreloadLibraries, oldLibraries...) { @@ -790,7 +811,7 @@ func setUserSharedPreloadLibraries(info ConfigurationInfo, configuration *PgConf } } if len(libraries) > 0 { - configuration.OverwriteConfig(SharedPreloadLibraries, strings.Join(libraries, ",")) + p.OverwriteConfig(SharedPreloadLibraries, strings.Join(libraries, ",")) } } @@ -821,3 +842,99 @@ func CreatePostgresqlConfFile(configuration *PgConfiguration) (string, string) { func escapePostgresConfValue(value string) string { return fmt.Sprintf("'%v'", strings.ReplaceAll(value, "'", "''")) } + +// AdditionalExtensionConfiguration is the configuration for an Extension added via ImageVolume +type AdditionalExtensionConfiguration struct { + // The name of the Extension + Name string + + // The list of directories that should be added to ExtensionControlPath. + ExtensionControlPath []string + + // The list of directories that should be added to DynamicLibraryPath. + DynamicLibraryPath []string +} + +// absolutizePaths returns an iterator over the passed paths, absolutized +// using the name of the extension +func (ext *AdditionalExtensionConfiguration) absolutizePaths(paths []string) iter.Seq[string] { + return func(yield func(string) bool) { + for _, path := range paths { + if !yield(filepath.Join(ExtensionsBaseDirectory, ext.Name, path)) { + break + } + } + } +} + +// getRuntimeExtensionControlPath collects the absolute directories to be put +// into the `extension_control_path` GUC to support this additional extension +func (ext *AdditionalExtensionConfiguration) getRuntimeExtensionControlPath() iter.Seq[string] { + paths := []string{"share"} + if len(ext.ExtensionControlPath) > 0 { + paths = ext.ExtensionControlPath + } + + return ext.absolutizePaths(paths) +} + +// getDynamicLibraryPath collects the absolute directories to be put +// into the `dynamic_library_path` GUC to support this additional extension +func (ext *AdditionalExtensionConfiguration) getDynamicLibraryPath() iter.Seq[string] { + paths := []string{"lib"} + if len(ext.DynamicLibraryPath) > 0 { + paths = ext.DynamicLibraryPath + } + + return ext.absolutizePaths(paths) +} + +// setExtensionControlPath manages the `extension_control_path` GUC, merging +// the paths defined by the user with the ones provided by the +// `.spec.postgresql.extensions` stanza +func (p *PgConfiguration) setExtensionControlPath(info ConfigurationInfo) { + extensionControlPath := []string{"$system"} + + for _, extension := range info.AdditionalExtensions { + extensionControlPath = slices.AppendSeq( + extensionControlPath, + extension.getRuntimeExtensionControlPath(), + ) + } + + extensionControlPath = slices.AppendSeq( + extensionControlPath, + strings.SplitSeq(p.GetConfig(ExtensionControlPath), ":"), + ) + + extensionControlPath = slices.DeleteFunc( + extensionControlPath, + func(s string) bool { return s == "" }, + ) + + p.OverwriteConfig(ExtensionControlPath, strings.Join(extensionControlPath, ":")) +} + +// setDynamicLibraryPath manages the `dynamic_library_path` GUC, merging the +// paths defined by the user with the ones provided by the +// `.spec.postgresql.extensions` stanza +func (p *PgConfiguration) setDynamicLibraryPath(info ConfigurationInfo) { + dynamicLibraryPath := []string{"$libdir"} + + for _, extension := range info.AdditionalExtensions { + dynamicLibraryPath = slices.AppendSeq( + dynamicLibraryPath, + extension.getDynamicLibraryPath()) + } + + dynamicLibraryPath = slices.AppendSeq( + dynamicLibraryPath, + strings.SplitSeq(p.GetConfig(DynamicLibraryPath), ":")) + + dynamicLibraryPath = slices.DeleteFunc( + dynamicLibraryPath, + func(s string) bool { return s == "" }, + ) + + p.OverwriteConfig(DynamicLibraryPath, strings.Join(dynamicLibraryPath, ":")) +} diff --git a/pkg/postgres/configuration_test.go b/pkg/postgres/configuration_test.go index 415c896de7..e6366d292e 100644 --- a/pkg/postgres/configuration_test.go +++ b/pkg/postgres/configuration_test.go @@ -391,3 +391,105 @@ var _ = Describe("recovery_min_apply_delay", func() { Expect(config.GetConfig(ParameterRecoveryMinApplyDelay)).To(Equal("3600s")) }) }) + +var _ = Describe("PostgreSQL Extensions", func() { + Context("configuring extension_control_path and dynamic_library_path", func() { + const ( + share1 = ExtensionsBaseDirectory + "/postgis/share" + share2 = ExtensionsBaseDirectory + "/pgvector/share" + lib1 = ExtensionsBaseDirectory + "/postgis/lib" + lib2 = ExtensionsBaseDirectory + "/pgvector/lib" + ) + sharePaths := strings.Join([]string{share1, share2}, ":") + libPaths := strings.Join([]string{lib1, lib2}, ":") + + It("both empty when there are no Extensions defined", func() { + info := ConfigurationInfo{ + Settings: CnpgConfigurationSettings, + MajorVersion: 18, + IncludingMandatory: true, + } + config := CreatePostgresqlConfiguration(info) + Expect(config.GetConfig(ExtensionControlPath)).To(BeEmpty()) + Expect(config.GetConfig(DynamicLibraryPath)).To(BeEmpty()) + }) + + It("configures them when an Extension is defined", func() { + info := ConfigurationInfo{ + Settings: CnpgConfigurationSettings, + MajorVersion: 18, + IncludingMandatory: true, + AdditionalExtensions: []AdditionalExtensionConfiguration{ + { + Name: "postgis", + }, + { + Name: "pgvector", + }, + }, + } + config := CreatePostgresqlConfiguration(info) + Expect(config.GetConfig(ExtensionControlPath)).To(BeEquivalentTo("$system:" + sharePaths)) + Expect(config.GetConfig(DynamicLibraryPath)).To(BeEquivalentTo("$libdir:" + libPaths)) + }) + + It("correctly merges the configuration with UserSettings", func() { + info := ConfigurationInfo{ + Settings: CnpgConfigurationSettings, + MajorVersion: 18, + IncludingMandatory: true, + UserSettings: map[string]string{ + ExtensionControlPath: "/my/extension/path", + DynamicLibraryPath: "/my/library/path", + }, + AdditionalExtensions: []AdditionalExtensionConfiguration{ + { + Name: "postgis", + }, + { + Name: "pgvector", + }, + }, + } + config := CreatePostgresqlConfiguration(info) + Expect(config.GetConfig(ExtensionControlPath)).To(BeEquivalentTo("$system:" + sharePaths + ":/my/extension/path")) + Expect(config.GetConfig(DynamicLibraryPath)).To(BeEquivalentTo("$libdir:" + libPaths + ":/my/library/path")) + }) + + It("when custom paths are provided (multi-extension)", func() { + const ( + geoShare1 = ExtensionsBaseDirectory + "/geo/postgis/share" + geoShare2 = ExtensionsBaseDirectory + "/geo/pgrouting/share" + geoLib1 = ExtensionsBaseDirectory + "/geo/postgis/lib" + geoLib2 = ExtensionsBaseDirectory + "/geo/pgrouting/lib" + utilityShare1 = ExtensionsBaseDirectory + "/utility/pgaudit/share" + utilityShare2 = ExtensionsBaseDirectory + "/utility/pg-failover-slots/share" + utilityLib1 = ExtensionsBaseDirectory + "/utility/pgaudit/lib" + utilityLib2 = ExtensionsBaseDirectory + "/utility/pg-failover-slots/lib" + ) + sharePaths = strings.Join([]string{geoShare1, geoShare2, utilityShare1, utilityShare2}, ":") + libPaths = strings.Join([]string{geoLib1, geoLib2, utilityLib1, utilityLib2}, ":") + + info := ConfigurationInfo{ + Settings: CnpgConfigurationSettings, + MajorVersion: 18, + IncludingMandatory: true, + AdditionalExtensions: []AdditionalExtensionConfiguration{ + { + Name: "geo", + ExtensionControlPath: []string{"postgis/share", "./pgrouting/share"}, + DynamicLibraryPath: []string{"postgis/lib/", "/pgrouting/lib/"}, + }, + { + Name: "utility", + ExtensionControlPath: []string{"pgaudit/share", "./pg-failover-slots/share"}, + DynamicLibraryPath: []string{"pgaudit/lib/", "/pg-failover-slots/lib/"}, + }, + }, + } + config := CreatePostgresqlConfiguration(info) + Expect(config.GetConfig(ExtensionControlPath)).To(BeEquivalentTo("$system:" + sharePaths)) + Expect(config.GetConfig(DynamicLibraryPath)).To(BeEquivalentTo("$libdir:" + libPaths)) + }) + }) +}) diff --git a/pkg/specs/volumes.go b/pkg/specs/volumes.go index 4ac7dbddb9..f77ff6c023 100644 --- a/pkg/specs/volumes.go +++ b/pkg/specs/volumes.go @@ -22,6 +22,7 @@ package specs import ( "fmt" "path" + "path/filepath" "sort" "strings" @@ -147,6 +148,9 @@ func createPostgresVolumes(cluster *apiv1.Cluster, podName string) []corev1.Volu if cluster.ShouldCreateProjectedVolume() { result = append(result, createProjectedVolume(cluster)) } + + result = append(result, createExtensionVolumes(cluster)...) + return result } @@ -275,6 +279,9 @@ func CreatePostgresVolumeMounts(cluster apiv1.Cluster) []corev1.VolumeMount { ) } } + + volumeMounts = append(volumeMounts, createExtensionVolumeMounts(&cluster)...) + return volumeMounts } @@ -313,3 +320,33 @@ func createProjectedVolume(cluster *apiv1.Cluster) corev1.Volume { }, } } + +func createExtensionVolumes(cluster *apiv1.Cluster) []corev1.Volume { + extensionVolumes := make([]corev1.Volume, 0, len(cluster.Spec.PostgresConfiguration.Extensions)) + for _, extension := range cluster.Spec.PostgresConfiguration.Extensions { + extensionVolumes = append(extensionVolumes, + corev1.Volume{ + Name: extension.Name, + VolumeSource: corev1.VolumeSource{ + Image: &extension.ImageVolumeSource, + }, + }, + ) + } + + return extensionVolumes +} + +func createExtensionVolumeMounts(cluster *apiv1.Cluster) []corev1.VolumeMount { + extensionVolumeMounts := make([]corev1.VolumeMount, 0, len(cluster.Spec.PostgresConfiguration.Extensions)) + for _, extension := range cluster.Spec.PostgresConfiguration.Extensions { + extensionVolumeMounts = append(extensionVolumeMounts, + corev1.VolumeMount{ + Name: extension.Name, + MountPath: filepath.Join(postgres.ExtensionsBaseDirectory, extension.Name), + }, + ) + } + + return extensionVolumeMounts +} diff --git a/pkg/specs/volumes_test.go b/pkg/specs/volumes_test.go index 6370da9368..1cce8d6917 100644 --- a/pkg/specs/volumes_test.go +++ b/pkg/specs/volumes_test.go @@ -22,9 +22,11 @@ package specs import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -521,3 +523,78 @@ var _ = Describe("createEphemeralVolume", func() { Expect(*ephemeralVolume.VolumeSource.EmptyDir.SizeLimit).To(Equal(quantity)) }) }) + +var _ = Describe("ImageVolume Extensions", func() { + var cluster apiv1.Cluster + + BeforeEach(func() { + cluster = apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-example", + Namespace: "default", + }, + Spec: apiv1.ClusterSpec{ + PostgresConfiguration: apiv1.PostgresConfiguration{ + Extensions: []apiv1.ExtensionConfiguration{ + { + Name: "foo", + ImageVolumeSource: corev1.ImageVolumeSource{ + Reference: "foo:dev", + }, + }, + { + Name: "bar", + ImageVolumeSource: corev1.ImageVolumeSource{ + Reference: "bar:dev", + }, + }, + }, + }, + }, + } + }) + + Context("createExtensionVolumes", func() { + When("Extensions are disabled", func() { + It("shouldn't create Volumes", func() { + cluster.Spec.PostgresConfiguration.Extensions = []apiv1.ExtensionConfiguration{} + extensionVolumes := createExtensionVolumes(&cluster) + Expect(extensionVolumes).To(BeEmpty()) + }) + }) + When("Extensions are enabled", func() { + It("should create a Volume for each Extension", func() { + extensionVolumes := createExtensionVolumes(&cluster) + Expect(len(extensionVolumes)).To(BeEquivalentTo(2)) + Expect(extensionVolumes[0].Name).To(Equal("foo")) + Expect(extensionVolumes[0].VolumeSource.Image.Reference).To(Equal("foo:dev")) + Expect(extensionVolumes[1].Name).To(Equal("bar")) + Expect(extensionVolumes[1].VolumeSource.Image.Reference).To(Equal("bar:dev")) + }) + }) + }) + + Context("createExtensionVolumeMounts", func() { + When("Extensions are disabled", func() { + It("shouldn't create VolumeMounts", func() { + cluster.Spec.PostgresConfiguration.Extensions = []apiv1.ExtensionConfiguration{} + extensionVolumeMounts := createExtensionVolumeMounts(&cluster) + Expect(extensionVolumeMounts).To(BeEmpty()) + }) + }) + When("Extensions are enabled", func() { + It("should create a VolumeMount for each Extension", func() { + const ( + fooMountPath = postgres.ExtensionsBaseDirectory + "/foo" + barMountPath = postgres.ExtensionsBaseDirectory + "/bar" + ) + extensionVolumeMounts := createExtensionVolumeMounts(&cluster) + Expect(len(extensionVolumeMounts)).To(BeEquivalentTo(2)) + Expect(extensionVolumeMounts[0].Name).To(Equal("foo")) + Expect(extensionVolumeMounts[0].MountPath).To(Equal(fooMountPath)) + Expect(extensionVolumeMounts[1].Name).To(Equal("bar")) + Expect(extensionVolumeMounts[1].MountPath).To(Equal(barMountPath)) + }) + }) + }) +}) diff --git a/tests/e2e/fixtures/imagevolume_extensions/cluster-with-extensions.yaml.template b/tests/e2e/fixtures/imagevolume_extensions/cluster-with-extensions.yaml.template new file mode 100644 index 0000000000..332cc3ef3c --- /dev/null +++ b/tests/e2e/fixtures/imagevolume_extensions/cluster-with-extensions.yaml.template @@ -0,0 +1,36 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: postgresql-with-extensions +spec: + instances: 3 + + postgresql: + parameters: + max_connections: "110" + log_checkpoints: "on" + log_lock_waits: "on" + log_min_duration_statement: '1000' + log_statement: 'ddl' + log_temp_files: '1024' + log_autovacuum_min_duration: '1s' + log_replication_commands: 'on' + extensions: + - name: postgis + ld_library_path: + - system + image: + reference: ghcr.io/niccolofei/postgis:18beta1-master-bullseye # wokeignore:rule=master + + bootstrap: + initdb: + database: app + owner: app + + # Persistent storage configuration + storage: + storageClass: ${E2E_DEFAULT_STORAGE_CLASS} + size: 1Gi + walStorage: + storageClass: ${E2E_DEFAULT_STORAGE_CLASS} + size: 1Gi diff --git a/tests/e2e/fixtures/imagevolume_extensions/database.yaml.template b/tests/e2e/fixtures/imagevolume_extensions/database.yaml.template new file mode 100644 index 0000000000..af3929cb07 --- /dev/null +++ b/tests/e2e/fixtures/imagevolume_extensions/database.yaml.template @@ -0,0 +1,26 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Database +metadata: + name: app +spec: + name: app + owner: app + cluster: + name: postgresql-with-extensions + extensions: + - name: postgis + ensure: present + - name: postgis_raster + ensure: present + - name: postgis_sfcgal + ensure: present + - name: fuzzystrmatch + ensure: present + - name: address_standardizer + ensure: present + - name: address_standardizer_data_us + ensure: present + - name: postgis_tiger_geocoder + ensure: present + - name: postgis_topology + ensure: present diff --git a/tests/e2e/imagevolume_extensions_test.go b/tests/e2e/imagevolume_extensions_test.go new file mode 100644 index 0000000000..51c4c32f4e --- /dev/null +++ b/tests/e2e/imagevolume_extensions_test.go @@ -0,0 +1,234 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package e2e + +import ( + "path/filepath" + "strings" + "time" + + "github.com/blang/semver" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/clusterutils" + postgresutils "github.com/cloudnative-pg/cloudnative-pg/tests/utils/postgres" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/timeouts" + "github.com/cloudnative-pg/cloudnative-pg/tests/utils/yaml" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("ImageVolume Extensions", Label(tests.LabelPostgresConfiguration), func() { + const ( + clusterManifest = fixturesDir + "/imagevolume_extensions/cluster-with-extensions.yaml.template" + databaseManifest = fixturesDir + "/imagevolume_extensions/database.yaml.template" + namespacePrefix = "cluster-imagevolume-extensions" + level = tests.Low + ) + + BeforeEach(func() { + if testLevelEnv.Depth < int(level) { + Skip("Test depth is lower than the amount requested for this test") + } + if !IsLocal() { + Skip("This test is only run on local cluster") + } + if env.PostgresVersion < 18 { + Skip("This test is only run on PostgreSQL v18 or greater") + } + // Require K8S 1.33 or greater + versionInfo, err := env.Interface.Discovery().ServerVersion() + Expect(err).NotTo(HaveOccurred()) + currentVersion, err := semver.Parse(strings.TrimPrefix(versionInfo.String(), "v")) + Expect(err).NotTo(HaveOccurred()) + k8s133, err := semver.Parse("1.33.0") + Expect(err).NotTo(HaveOccurred()) + if currentVersion.LT(k8s133) { + Skip("This test runs only on Kubernetes 1.33 or greater") + } + }) + var namespace, clusterName, databaseName string + var err error + + assertVolumeMounts := func(podList *corev1.PodList, imageVolumeExtension string) { + found := false + mountPath := filepath.Join(postgres.ExtensionsBaseDirectory, imageVolumeExtension) + for _, pod := range podList.Items { + for _, volumeMount := range pod.Spec.Containers[0].VolumeMounts { + if volumeMount.Name == imageVolumeExtension && volumeMount.MountPath == mountPath { + found = true + } + } + } + Expect(found).To(BeTrue()) + } + + assertVolumes := func(podList *corev1.PodList, imageVolumeExtension string) { + found := false + for _, pod := range podList.Items { + for _, volume := range pod.Spec.Volumes { + if volume.Name == imageVolumeExtension && volume.Image.Reference != "" { + found = true + } + } + } + Expect(found).To(BeTrue()) + } + + assertExtensions := func(namespace, databaseName string) { + database := &apiv1.Database{} + databaseNamespacedName := types.NamespacedName{ + Namespace: namespace, + Name: databaseName, + } + Eventually(func(g Gomega) { + err := env.Client.Get(env.Ctx, databaseNamespacedName, database) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(database.Status.Applied).Should(HaveValue(BeTrue())) + g.Expect(database.Status.Message).Should(BeEmpty()) + for _, extension := range database.Status.Extensions { + Expect(extension.Applied).Should(HaveValue(BeTrue())) + Expect(extension.Message).Should(BeEmpty()) + } + }, 60).WithPolling(10 * time.Second).Should(Succeed()) + } + + assertPostgis := func(namespace, clusterName string) { + row, err := postgresutils.RunQueryRowOverForward( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, + namespace, clusterName, postgresutils.AppDBName, + apiv1.ApplicationUserSecretSuffix, + "SELECT ST_AsText(geom) AS wkt, ST_Area(geom) AS area"+ + " FROM (SELECT ST_GeomFromText('POLYGON((0 0, 0 10, 10 10, 10 0, 0 0))', 4326) AS geom) AS subquery;") + Expect(err).ToNot(HaveOccurred()) + + var wkt, area string + err = row.Scan(&wkt, &area) + Expect(err).ToNot(HaveOccurred()) + Expect(wkt).To(BeEquivalentTo("POLYGON((0 0,0 10,10 10,10 0,0 0))")) + Expect(area).To(BeEquivalentTo("100")) + } + + assertVector := func(namespace, clusterName string) { + row, err := postgresutils.RunQueryRowOverForward( + env.Ctx, + env.Client, + env.Interface, + env.RestClientConfig, + namespace, clusterName, postgresutils.AppDBName, + apiv1.ApplicationUserSecretSuffix, + "SELECT"+ + " '[1, 2, 3]'::vector AS vec1,"+ + " '[4, 5, 6]'::vector AS vec2,"+ + " cosine_distance('[1, 2, 3]'::vector, '[4, 5, 6]'::vector) AS cosine_sim,"+ + " l2_distance('[1, 2, 3]'::vector, '[4, 5, 6]'::vector) AS l2_dist,"+ + " inner_product('[1, 2, 3]'::vector, '[4, 5, 6]'::vector) AS dot_product;") + Expect(err).ToNot(HaveOccurred()) + + var vec1, vec2, cosineDist, distance, dotProduct string + err = row.Scan(&vec1, &vec2, &cosineDist, &distance, &dotProduct) + Expect(err).ToNot(HaveOccurred()) + Expect(vec1).To(BeEquivalentTo("[1,2,3]")) + Expect(vec2).To(BeEquivalentTo("[4,5,6]")) + Expect(cosineDist).To(BeEquivalentTo("0.025368153802923787")) + Expect(distance).To(BeEquivalentTo("5.196152422706632")) + Expect(dotProduct).To(BeEquivalentTo("32")) + } + + It("can use ImageVolume extensions", func() { + By("creating the cluster", func() { + namespace, err = env.CreateUniqueTestNamespace(env.Ctx, env.Client, namespacePrefix) + Expect(err).ToNot(HaveOccurred()) + clusterName, err = yaml.GetResourceNameFromYAML(env.Scheme, clusterManifest) + Expect(err).ToNot(HaveOccurred()) + databaseName, err = yaml.GetResourceNameFromYAML(env.Scheme, databaseManifest) + Expect(err).NotTo(HaveOccurred()) + AssertCreateCluster(namespace, clusterName, clusterManifest, env) + CreateResourceFromFile(namespace, databaseManifest) + }) + + By("checking volumes and volumeMounts", func() { + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + assertVolumeMounts(podList, "postgis") + assertVolumes(podList, "postgis") + }) + + By("checking extensions have been created", func() { + assertExtensions(namespace, databaseName) + }) + + By("adding a new extension to an existing Cluster", func() { + database := &apiv1.Database{} + databaseNamespacedName := types.NamespacedName{ + Namespace: namespace, + Name: databaseName, + } + + Eventually(func(g Gomega) { + // Updating the Cluster + cluster, err := clusterutils.Get(env.Ctx, env.Client, namespace, clusterName) + g.Expect(err).NotTo(HaveOccurred()) + cluster.Spec.PostgresConfiguration.Extensions = append(cluster.Spec.PostgresConfiguration.Extensions, + apiv1.ExtensionConfiguration{ + Name: "pgvector", + ImageVolumeSource: corev1.ImageVolumeSource{ + Reference: "ghcr.io/niccolofei/pgvector:18beta1-master-bullseye", // wokeignore:rule=master + }, + }) + g.Expect(env.Client.Update(env.Ctx, cluster)).To(Succeed()) + + // Updating the Database + err = env.Client.Get(env.Ctx, databaseNamespacedName, database) + g.Expect(err).ToNot(HaveOccurred()) + database.Spec.Extensions = append(database.Spec.Extensions, apiv1.ExtensionSpec{ + DatabaseObjectSpec: apiv1.DatabaseObjectSpec{ + Name: "vector", + Ensure: apiv1.EnsurePresent, + }, + }) + g.Expect(env.Client.Update(env.Ctx, database)).To(Succeed()) + }, 60, 5).Should(Succeed()) + + AssertClusterEventuallyReachesPhase(namespace, clusterName, + []string{apiv1.PhaseUpgrade, apiv1.PhaseWaitingForInstancesToBeActive}, 30) + AssertClusterIsReady(namespace, clusterName, testTimeouts[timeouts.ClusterIsReadyQuick], env) + + podList, err := clusterutils.ListPods(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + assertVolumeMounts(podList, "pgvector") + assertVolumes(podList, "pgvector") + assertExtensions(namespace, databaseName) + }) + + By("verifying the extension's usage ", func() { + assertPostgis(namespace, clusterName) + assertVector(namespace, clusterName) + }) + }) +}) From 1a64cbb6a326ab707c37824a45649dfde2d05b3f Mon Sep 17 00:00:00 2001 From: Jordi Teterissa Date: Thu, 24 Jul 2025 16:46:51 +0200 Subject: [PATCH 727/836] docs: update Operands image signing context (#8117) This change makes it clear in the documentation that _only_ `minimal` and `standard` Operand images are signed. `System` images built with the old process were never signed to begin with - this old process is being deprecated. For details, see https://github.com/cloudnative-pg/postgres-containers/issues/245 Signed-off-by: Jordi Teterissa Signed-off-by: Gabriele Bartolini Co-authored-by: Gabriele Bartolini --- docs/src/index.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/src/index.md b/docs/src/index.md index 1c89078a7d..7b8ebfac25 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -74,7 +74,9 @@ The PostgreSQL operand container images are available for all across multiple architectures, directly from the [`postgres-containers` project's GitHub Container Registry](https://github.com/cloudnative-pg/postgres-containers/pkgs/container/postgresql). -All container images are signed and include SBOM and provenance attestations, +The [`minimal`](https://github.com/cloudnative-pg/postgres-containers#minimal-images) +and [`standard`](https://github.com/cloudnative-pg/postgres-containers#standard-images) +container images are signed and include SBOM and provenance attestations, provided separately for each architecture. Weekly jobs ensure that critical vulnerabilities (CVEs) in the entire stack are From 735528aa8d8ff5674fd16abebdf4fdc9f807e8c9 Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Fri, 25 Jul 2025 10:12:38 +0200 Subject: [PATCH 728/836] docs: release notes for 1.26.1 and 1.25.3 (#8129) Closes #8128 Signed-off-by: Gabriele Bartolini Signed-off-by: Jonathan Gonzalez V. Signed-off-by: Marco Nenciarini Co-authored-by: Jonathan Gonzalez V. Co-authored-by: Marco Nenciarini --- .wordlist-en-custom.txt | 1 + docs/src/release_notes/v1.25.md | 58 +++++++++++++++++++++ docs/src/release_notes/v1.26.md | 92 +++++++++++++++++++++++++++++++++ docs/src/supported_releases.md | 2 +- 4 files changed, 152 insertions(+), 1 deletion(-) diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index e0f5f2f5de..be82ff8c26 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -1028,6 +1028,7 @@ msg mspan multinamespace mutatingwebhookconfigurations +mutex myAKSCluster myResourceGroup namespace diff --git a/docs/src/release_notes/v1.25.md b/docs/src/release_notes/v1.25.md index 5c0ba24b1d..5a88a9d87b 100644 --- a/docs/src/release_notes/v1.25.md +++ b/docs/src/release_notes/v1.25.md @@ -7,6 +7,64 @@ For a complete list of changes, please refer to the [commits](https://github.com/cloudnative-pg/cloudnative-pg/commits/release-1.25) on the release branch in GitHub. +## Version 1.25.3 + +**Release date:** Jul 25, 2025 + +*In memory of [DJ Walker-Morgan](https://www.linkedin.com/in/codepope/).* + +### Enhancements + +- Improved validation of `shared_buffers` by correctly considering `HugePages` + settings, ensuring accurate memory configuration checks + ([#7864](https://github.com/cloudnative-pg/cloudnative-pg/pull/7864)). + +- Set `oom_score_adj` for PostgreSQL worker processes to improve prioritization + during out-of-memory situations + ([#7891](https://github.com/cloudnative-pg/cloudnative-pg/pull/7891)). + +- Added `fqdn-uri` and `fqdn-jdbc-uri` fields in user secrets to simplify + application connection string management and align with DNS-based connection + best practices ([#7852](https://github.com/cloudnative-pg/cloudnative-pg/pull/7852)). + +### Fixes + +- Added a mutex in the connection pooler to protect concurrent access to the + connections map, improving stability in high-concurrency environments + ([#7804](https://github.com/cloudnative-pg/cloudnative-pg/pull/7804)). + +- Fixed replica cluster instance ordering by correctly detecting the designated + primary, improving replica cluster stability and switchover operations + ([#8108](https://github.com/cloudnative-pg/cloudnative-pg/pull/8108)). + +- Added support for reconciling `VolumeAttributesClass` for PVCs, enhancing + storage compatibility and lifecycle management + ([#7885](https://github.com/cloudnative-pg/cloudnative-pg/pull/7885)). + +- Made the internal webserver routines non-blocking to improve responsiveness + under load ([#8071](https://github.com/cloudnative-pg/cloudnative-pg/pull/8071)). + +- Fixed an issue where the `ensureReplicationClientLeafCertificate` error did + not display the correct `secretName` in the not found message + ([#8086](https://github.com/cloudnative-pg/cloudnative-pg/pull/8086)). + +- Prevented invalid `ALTER SUBSCRIPTION` statements by updating only + PostgreSQL‑supported parameters; unsupported options like `copy_data` are + ignored to avoid reconciliation failures + ([7844](https://github.com/cloudnative-pg/cloudnative-pg/pull/7844)). + +- Fixed an issue where the `bootstrap-controller` in the connection pooler did + not apply `resources` settings correctly + ([#7922](https://github.com/cloudnative-pg/cloudnative-pg/pull/7922)). + +- Ensured online backups fail cleanly if the `targetPod` becomes unhealthy + during backup, preventing partial or misleading backups + ([#7944](https://github.com/cloudnative-pg/cloudnative-pg/pull/7944)). + +- Ensured the Backup resource status is set properly after a failure, improving + observability and scripting reliability + ([#7898](https://github.com/cloudnative-pg/cloudnative-pg/pull/7898)). + ## Version 1.25.2 **Release date:** May 23, 2025 diff --git a/docs/src/release_notes/v1.26.md b/docs/src/release_notes/v1.26.md index 6e54b07411..9b3cb7958a 100644 --- a/docs/src/release_notes/v1.26.md +++ b/docs/src/release_notes/v1.26.md @@ -7,6 +7,98 @@ For a complete list of changes, please refer to the [commits](https://github.com/cloudnative-pg/cloudnative-pg/commits/release-1.26) on the release branch in GitHub. +## Version 1.26.1 + +**Release date:** Jul 25, 2025 + +*In memory of [DJ Walker-Morgan](https://www.linkedin.com/in/codepope/).* + +### Important Changes + +CloudNativePG is moving toward a plugin-based, backup and recovery agnostic +architecture (initiated in 1.26.0 with Barman Cloud). As part of this +transition, the following fields in the `.status` section of the `Cluster` +resource are now deprecated: + +- `firstRecoverabilityPoint` +- `firstRecoverabilityPointByMethod` +- `lastSuccessfulBackup` +- `lastSuccessfulBackupByMethod` +- `lastFailedBackup` + +Additionally, the following Prometheus metrics are deprecated: + +- `cnpg_collector_first_recoverability_point` +- `cnpg_collector_last_available_backup_timestamp` +- `cnpg_collector_last_failed_backup_timestamp` + +These fields and metrics will no longer update when using plugin-based backups +(e.g., Barman Cloud via CNPG-I). They remain functional for users still using +in-core Barman Cloud and volume snapshot backups. + +> **Note:** We, as maintainers, are sorry for any inconvenience caused by not +> highlighting this change during the 1.26.0 release. As we transition to a +> plugin-based backup and recovery architecture, we encourage you to **test +> your chosen plugin thoroughly in a staging environment before moving to +> production** to ensure your workflows and observability integration continue +> to meet your needs. Thank you for your understanding and for working with us +> as CloudNativePG evolves to provide a more modular and robust experience. + +### Enhancements + +- Improved validation of `shared_buffers` by correctly considering `HugePages` + settings, ensuring accurate memory configuration checks + ([#7864](https://github.com/cloudnative-pg/cloudnative-pg/pull/7864)). + +- Set `oom_score_adj` for PostgreSQL worker processes to improve prioritization + during out-of-memory situations + ([#7891](https://github.com/cloudnative-pg/cloudnative-pg/pull/7891)). + +- Added `fqdn-uri` and `fqdn-jdbc-uri` fields in user secrets to simplify + application connection string management and align with DNS-based connection + best practices ([#7852](https://github.com/cloudnative-pg/cloudnative-pg/pull/7852)). + +### Fixes + +- Added a mutex in the connection pooler to protect concurrent access to the + connections map, improving stability in high-concurrency environments + ([#7804](https://github.com/cloudnative-pg/cloudnative-pg/pull/7804)). + +- Fixed replica cluster instance ordering by correctly detecting the designated + primary, improving replica cluster stability and switchover operations + ([#8108](https://github.com/cloudnative-pg/cloudnative-pg/pull/8108)). + +- Added support for reconciling `VolumeAttributesClass` for PVCs, enhancing + storage compatibility and lifecycle management + ([#7885](https://github.com/cloudnative-pg/cloudnative-pg/pull/7885)). + +- Made the internal webserver routines non-blocking to improve responsiveness + under load ([#8071](https://github.com/cloudnative-pg/cloudnative-pg/pull/8071)). + +- Fixed an issue where the `ensureReplicationClientLeafCertificate` error did + not display the correct `secretName` in the not found message + ([#8086](https://github.com/cloudnative-pg/cloudnative-pg/pull/8086)). + +- Prevented invalid `ALTER SUBSCRIPTION` statements by updating only + PostgreSQL‑supported parameters; unsupported options like `copy_data` are + ignored to avoid reconciliation failures + ([7844](https://github.com/cloudnative-pg/cloudnative-pg/pull/7844)). + +- Fixed an issue where the `bootstrap-controller` in the connection pooler did + not apply `resources` settings correctly + ([#7922](https://github.com/cloudnative-pg/cloudnative-pg/pull/7922)). + +- Ensured online backups fail cleanly if the `targetPod` becomes unhealthy + during backup, preventing partial or misleading backups + ([#7944](https://github.com/cloudnative-pg/cloudnative-pg/pull/7944)). + +- Ensured the Backup resource status is set properly after a failure, improving + observability and scripting reliability + ([#7898](https://github.com/cloudnative-pg/cloudnative-pg/pull/7898)). + +- Improved liveness probe handling to avoid unnecessary timeouts when they are + not required ([#7902](https://github.com/cloudnative-pg/cloudnative-pg/pull/7902)). + ## Version 1.26.0 **Release date:** May 23, 2025 diff --git a/docs/src/supported_releases.md b/docs/src/supported_releases.md index be7e97dd6d..90940f680e 100644 --- a/docs/src/supported_releases.md +++ b/docs/src/supported_releases.md @@ -87,7 +87,7 @@ Git tags for versions are prefixed with `v`. | 1.25.x | Yes | Dec 23, 2024 | 22 Aug 2025 | 1.29, 1.30, 1.31, 1.32 | 1.33 | 13 - 17 | | main | No, development only | | | | | 13 - 17 | - +1 _PostgreSQL 13 will be supported until November 13, 2025. The list of supported Kubernetes versions in the table depends on what the CloudNativePG maintainers think is reasonable to support and to test. From e78ab0449752313519ad7f9938124640cd45fa4c Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Fri, 25 Jul 2025 11:10:22 +0200 Subject: [PATCH 729/836] fix(docs): release notes for 1.26.1 and 1.25.3 (#8131) Signed-off-by: Marco Nenciarini --- docs/src/release_notes/v1.25.md | 10 ++++++++++ docs/src/release_notes/v1.26.md | 10 ++++++++++ docs/src/supported_releases.md | 2 +- 3 files changed, 21 insertions(+), 1 deletion(-) diff --git a/docs/src/release_notes/v1.25.md b/docs/src/release_notes/v1.25.md index 5a88a9d87b..e52b749c84 100644 --- a/docs/src/release_notes/v1.25.md +++ b/docs/src/release_notes/v1.25.md @@ -13,6 +13,12 @@ on the release branch in GitHub. *In memory of [DJ Walker-Morgan](https://www.linkedin.com/in/codepope/).* +### Changes + +- Removed `386` and ARM (v5/v6/v7) architectures from the `cnpg` plugin build + matrix, reducing the number of published binaries + ([#7648](https://github.com/cloudnative-pg/cloudnative-pg/pull/7648)). + ### Enhancements - Improved validation of `shared_buffers` by correctly considering `HugePages` @@ -27,6 +33,10 @@ on the release branch in GitHub. application connection string management and align with DNS-based connection best practices ([#7852](https://github.com/cloudnative-pg/cloudnative-pg/pull/7852)). +- Added the `systemID` field and related condition in the `Cluster` status to track + the PostgreSQL system identifier. + ([#7717](https://github.com/cloudnative-pg/cloudnative-pg/pull/7717)). + ### Fixes - Added a mutex in the connection pooler to protect concurrent access to the diff --git a/docs/src/release_notes/v1.26.md b/docs/src/release_notes/v1.26.md index 9b3cb7958a..16067cf9a4 100644 --- a/docs/src/release_notes/v1.26.md +++ b/docs/src/release_notes/v1.26.md @@ -44,6 +44,12 @@ in-core Barman Cloud and volume snapshot backups. > to meet your needs. Thank you for your understanding and for working with us > as CloudNativePG evolves to provide a more modular and robust experience. +### Changes + +- Removed `386` and ARM (v5/v6/v7) architectures from the `cnpg` plugin build + matrix, reducing the number of published binaries + ([#7648](https://github.com/cloudnative-pg/cloudnative-pg/pull/7648)). + ### Enhancements - Improved validation of `shared_buffers` by correctly considering `HugePages` @@ -58,6 +64,10 @@ in-core Barman Cloud and volume snapshot backups. application connection string management and align with DNS-based connection best practices ([#7852](https://github.com/cloudnative-pg/cloudnative-pg/pull/7852)). +- Added the `systemID` field and related condition in the `Cluster` status to track + the PostgreSQL system identifier. + ([#7717](https://github.com/cloudnative-pg/cloudnative-pg/pull/7717)). + ### Fixes - Added a mutex in the connection pooler to protect concurrent access to the diff --git a/docs/src/supported_releases.md b/docs/src/supported_releases.md index 90940f680e..51ff33a0c9 100644 --- a/docs/src/supported_releases.md +++ b/docs/src/supported_releases.md @@ -87,7 +87,7 @@ Git tags for versions are prefixed with `v`. | 1.25.x | Yes | Dec 23, 2024 | 22 Aug 2025 | 1.29, 1.30, 1.31, 1.32 | 1.33 | 13 - 17 | | main | No, development only | | | | | 13 - 17 | -1 _PostgreSQL 13 will be supported until November 13, 2025. +1 _PostgreSQL 13 will be supported until November 13, 2025._ The list of supported Kubernetes versions in the table depends on what the CloudNativePG maintainers think is reasonable to support and to test. From 3336cbd3bff87e31a5763f8f2c5461b858cf4760 Mon Sep 17 00:00:00 2001 From: Bill Mulligan Date: Fri, 25 Jul 2025 11:22:23 +0200 Subject: [PATCH 730/836] docs: Cilium grammar and yaml fixes (#8130) A few grammar and YAML fixes and adding an example of how default deny works with Cilium. Signed-off-by: Bill Mulligan Signed-off-by: Gabriele Bartolini Co-authored-by: Gabriele Bartolini --- docs/src/cncf-projects/cilium.md | 148 +++++++++++++++++++++---------- 1 file changed, 99 insertions(+), 49 deletions(-) diff --git a/docs/src/cncf-projects/cilium.md b/docs/src/cncf-projects/cilium.md index 6f1933ec92..909a365e7d 100644 --- a/docs/src/cncf-projects/cilium.md +++ b/docs/src/cncf-projects/cilium.md @@ -2,18 +2,21 @@ ## About -[Cilium](https://cilium.io/) is a CNCF Graduated project that was accepted as an Incubating project in 2021 and graduated in 2023 under -the sponsorship of Isovalent. It is an advanced networking, security, and observability solution for cloud-native -environments, built on top of eBPF (Extended Berkeley Packet Filter) technology. Cilium manages network traffic in -Kubernetes clusters by dynamically injecting eBPF programs into the Linux Kernel, enabling low-latency, -high-performance communication and enforcing fine-grained security policies. +[Cilium](https://cilium.io/) is a CNCF Graduated project that was accepted as +an Incubating project in 2021 and graduated in 2023. It was originally created +by Isovalent. It is an advanced networking, security, and observability +solution for cloud native environments, built on top of +[eBPF](https://ebpf.io/) technology. Cilium manages network traffic in +Kubernetes clusters by dynamically injecting eBPF programs into the Linux +Kernel, enabling low-latency, high-performance communication, and enforcing +fine-grained security policies. Key features of Cilium: - Advanced L3-L7 security policies for fine-grained network traffic control - Efficient, kernel-level traffic management via eBPF - Service Mesh integration (Cilium Service Mesh) -- Support for both NetworkPolicy and CiliumNetworkPolicy +- Support for both Kubernetes NetworkPolicy and CiliumNetworkPolicy - Built-in observability and monitoring with Hubble To install Cilium in your environment, follow the instructions in the documentation: @@ -27,26 +30,57 @@ enables fine-grained control over network traffic between Pods within a Kubernet especially useful for securing communication between application workloads and backend services. -In the following examples, we demonstrate how Cilium can be used to secure a CloudNativePG PostgreSQL instance by -restricting ingress traffic to only authorized Pods. +In the following examples, we demonstrate how Cilium can be used to secure a +CloudNativePG PostgreSQL instance by restricting ingress traffic to only +authorized Pods. !!! Important - Before proceeding, ensure that the `cluster-example` Postgres cluster is up and running in your environment. + Before proceeding, ensure that the `cluster-example` Postgres cluster is up + and running in your environment. + +## Default Deny Behavior in Cilium + +By default, Cilium does **not** deny all traffic unless explicitly configured +to do so. In contrast to Kubernetes NetworkPolicy, which uses a deny-by-default +model once a policy is present in a namespace, Cilium provides more flexible +control over default deny behavior. + +To enforce a default deny posture with Cilium, you need to explicitly create a +policy that denies all traffic to a set of Pods unless otherwise allowed. This +is commonly achieved by using an **empty `ingress` section** in combination +with `endpointSelector`, or by enabling **`--enable-default-deny`** at the +Cilium agent level for broader enforcement. + +A minimal example of a default deny policy: + +```yaml +apiVersion: cilium.io/v2 +kind: CiliumNetworkPolicy +metadata: + name: default-deny + namespace: default +spec: + description: "Default deny all ingress traffic to all Pods in this namespace" + endpointSelector: {} + ingress: [] +``` ## Making Cilium Network Policies work with CloudNativePG Operator -When working with a network policy, Cilium or not, the first step is to make sure that the operator can reach the Pods -in the target namespace. This is important because the operator needs to be able to perform checks and actions on the -Pods, and one of those actions requires to access the port `8000` on the Pods to get the current status of the PostgreSQL -instance running inside. +When working with a network policy, Cilium or not, the first step is to make +sure that the operator can reach the Pods in the target namespace. This is +important because the operator needs to be able to perform checks and actions +on the Pods, and one of those actions requires access to the port `8000` on the +Pods to get the current status of the PostgreSQL instance running inside. -The following `CiliumNetworkPolicy` allows the operator to access the Pods in the target `default` namespace +The following `CiliumNetworkPolicy` allows the operator to access the Pods in +the target `default` namespace: ```yaml apiVersion: cilium.io/v2 kind: CiliumNetworkPolicy metadata: - name: cnpg-operator-policy + name: cnpg-operator-access namespace: default spec: description: "Allow CloudNativePG operator access to any pod in the target namespace" @@ -61,20 +95,22 @@ spec: protocol: TCP ``` !!! Important - The `cnpg-system` namespace is the default namespace for the operator when using the YAML manifests, if the operator - was installed using a different process(Helm, OLM, etc.), the namespace may be different. Make sure to adjust the - namespace properly. + The `cnpg-system` namespace is the default namespace for the operator when + using the YAML manifests. If the operator was installed using a different + process (Helm, OLM, etc.), the namespace may be different. Make sure to adjust + the namespace properly. ## Allowing access between cluster Pods -Since the default policy is "deny all", we need to explicitly allow access between the cluster Pods in the same namespace. -We will improve our previous policy by adding the required ingress rule: +Since the default policy is "deny all", we need to explicitly allow access +between the cluster Pods in the same namespace. We will improve our previous +policy by adding the required ingress rule: ```yaml apiVersion: cilium.io/v2 kind: CiliumNetworkPolicy metadata: - name: cnpg-policy + name: cnpg-cluster-internal-access namespace: default spec: description: "Allow CloudNativePG operator access and connection between pods in the same namespace" @@ -94,20 +130,23 @@ spec: protocol: TCP ``` -The policy allows access from `cnpg-system` Pods and from `default` namespace Pods that also belong to `cluster-example`. -The `matchLabels` selector requires Pods to have the complete set of listed labels. Missing even one label means the Pod -will not match. +The policy allows access from `cnpg-system` Pods and from `default` namespace +Pods that also belong to `cluster-example`. The `matchLabels` selector requires +Pods to have the complete set of listed labels. Missing even one label means +the Pod will not match. ## Restricting Access to PostgreSQL with Cilium -In this example, we define a `CiliumNetworkPolicy` that allows only Pods labeled `role=backend` in the `default` namespace -to connect to a PostgreSQL cluster named `cluster-example`. All other ingress traffic is blocked by default. +In this example, we define a `CiliumNetworkPolicy` that allows only Pods +labeled `role=backend` in the `default` namespace to connect to a PostgreSQL +cluster named `cluster-example`. All other ingress traffic is blocked by +default. ```yaml apiVersion: cilium.io/v2 kind: CiliumNetworkPolicy metadata: - name: postgres-policy + name: postgres-access-backend-label namespace: default spec: description: "Allow PostgreSQL access on port 5432 from Pods with role=backend" @@ -124,17 +163,19 @@ spec: protocol: TCP ``` -This `CiliumNetworkPolicy` ensures that only Pods labeled with `role=backend` can access the -PostgreSQL instance managed by CloudNativePG via port 5432 in the `default` namespace. +This `CiliumNetworkPolicy` ensures that only Pods labeled with `role=backend` +can access the PostgreSQL instance managed by CloudNativePG via port 5432 in +the `default` namespace. -In the following policy, we demonstrate how to allow ingress traffic to port 5432 of a PostgreSQL cluster named -`cluster-example`, only from Pods with the label `role=backend` in any namespace. +In the following policy, we demonstrate how to allow ingress traffic to port +5432 of a PostgreSQL cluster named `cluster-example`, only from Pods with the +label `role=backend` in any namespace. ```yaml apiVersion: cilium.io/v2 kind: CiliumNetworkPolicy metadata: - name: postgres-policy + name: postgres-access-backend-any-ns namespace: default spec: description: "Allow PostgreSQL access on port 5432 from Pods with role=backend in any namespace" @@ -143,25 +184,27 @@ spec: cnpg.io/cluster: cluster-example ingress: - fromEndpoints: - - matchLabels: - role: backend - matchExpressions: - - key: io.kubernetes.pod.namespace - operator: Exists + - labelSelector: + matchLabels: + role: backend + matchExpressions: + - key: io.kubernetes.pod.namespace + operator: Exists toPorts: - ports: - port: "5432" protocol: TCP ``` -The following example allows ingress traffic to port 5432 of the `cluster-example` cluster (located in the -`default` namespace) from any Pods in the `backend` namespace. +The following example allows ingress traffic to port 5432 of the +`cluster-example` cluster (located in the `default` namespace) from any Pods in +the `backend` namespace. ```yaml apiVersion: cilium.io/v2 kind: CiliumNetworkPolicy metadata: - name: postgres-policy + name: postgres-access-backend-namespace namespace: default spec: description: "Allow PostgreSQL access on port 5432 from any Pods in the backend namespace" @@ -178,18 +221,20 @@ spec: protocol: TCP ``` -Using Cilium’s L3/L4 policy model, we define a `CiliumNetworkPolicy` that explicitly allows ingress -traffic to cluster Pods only from application Pods in the `backend` namespace. All other -traffic is implicitly denied unless explicitly permitted by additional policies. +Using Cilium’s L3/L4 policy model, we define a `CiliumNetworkPolicy` that +explicitly allows ingress traffic to cluster Pods only from application Pods in +the `backend` namespace. All other traffic is implicitly denied unless +explicitly permitted by additional policies. -The following example allows ingress traffic to port 5432 of the `cluster-example` cluster (located in the -`default` namespace) from any source within the Kubernetes cluster. +The following example allows ingress traffic to port 5432 of the +`cluster-example` cluster (located in the `default` namespace) from any source +within the Kubernetes cluster. ```yaml apiVersion: cilium.io/v2 kind: CiliumNetworkPolicy metadata: - name: postgres-policy + name: postgres-access-cluster-wide namespace: default spec: description: "Allow ingress traffic to port 5432 of the cluster-example from any pods within the Kubernetes cluster" @@ -205,6 +250,11 @@ spec: protocol: TCP ``` -You may consider using [editor.networkpolicy.io](https://editor.networkpolicy.io/), a visual and interactive tool that simplifies the creation and -validation of Cilium Network Policies. It’s especially helpful for avoiding misconfigurations and understanding traffic -rules more clearly by presenting in a visual way. +You may consider using [editor.networkpolicy.io](https://editor.networkpolicy.io/), +a visual and interactive tool that simplifies the creation and validation of +Cilium Network Policies. It’s especially helpful for avoiding misconfigurations +and understanding traffic rules more clearly by presenting in a visual way. + +With these policies, you've established baseline access controls for +PostgreSQL. You can layer additional egress or audit rules using Cilium's +policy language or extend to L7 enforcement with Envoy. From 25db589427009bcd998d2d72dd6cc501298bced9 Mon Sep 17 00:00:00 2001 From: Jaime Silvela Date: Fri, 25 Jul 2025 11:47:37 +0200 Subject: [PATCH 731/836] fix(import): do not create default user and database in monolith mode (#7801) When importing in monolith mode, avoid automatically creating a default user and database. This approach ensures that the import process accurately mirrors the source environment, preventing the creation of unnecessary or unintended objects. Additionally, the documentation has been updated to clarify the import behavior and usage in monolith scenarios. Closes #7731 Closes #7447 Signed-off-by: Jaime Silvela Signed-off-by: Marco Nenciarini Signed-off-by: Gabriele Bartolini Co-authored-by: Marco Nenciarini Co-authored-by: Gabriele Bartolini --- api/v1/cluster_defaults.go | 6 +- docs/src/database_import.md | 115 ++++++++++++++++++++++++++- tests/e2e/cluster_monolithic_test.go | 14 ++++ 3 files changed, 132 insertions(+), 3 deletions(-) diff --git a/api/v1/cluster_defaults.go b/api/v1/cluster_defaults.go index 5a5607d8dd..707b3f4287 100644 --- a/api/v1/cluster_defaults.go +++ b/api/v1/cluster_defaults.go @@ -238,7 +238,11 @@ func (r *Cluster) defaultInitDB() { } if r.Spec.Bootstrap.InitDB.Database == "" { - r.Spec.Bootstrap.InitDB.Database = DefaultApplicationDatabaseName + // Set the default only if not executing a monolithic import + if r.Spec.Bootstrap.InitDB.Import == nil || + r.Spec.Bootstrap.InitDB.Import.Type != MonolithSnapshotType { + r.Spec.Bootstrap.InitDB.Database = DefaultApplicationDatabaseName + } } if r.Spec.Bootstrap.InitDB.Owner == "" { r.Spec.Bootstrap.InitDB.Owner = r.Spec.Bootstrap.InitDB.Database diff --git a/docs/src/database_import.md b/docs/src/database_import.md index f397a6c06e..593ea430e0 100644 --- a/docs/src/database_import.md +++ b/docs/src/database_import.md @@ -234,7 +234,8 @@ There are a few things you need to be aware of when using the `monolith` type: that needs to run `pg_dump` and retrieve roles information (*superuser* is OK) - Currently, the `pg_dump -Fd` result is stored temporarily inside the `dumps` - folder in the `PGDATA` volume, so there should be enough available space to + folder in the `PGDATA` volume of the destination cluster's instances, so + there should be enough available space to temporarily contain the dump result on the assigned node, as well as the restored data and indexes. Once the import operation is completed, this folder is automatically deleted by the operator. @@ -250,7 +251,117 @@ There are a few things you need to be aware of when using the `monolith` type: and those databases not allowing connections - After the clone procedure is done, `ANALYZE VERBOSE` is executed for every database. -- `postImportApplicationSQL` field is not supported +- The `postImportApplicationSQL` field is not supported + +!!! Hint + The databases and their owners are preserved exactly as they exist in the + source cluster—no `app` database or user will be created during import. If your + `bootstrap.initdb` stanza specifies custom `database` and `owner` values that + do not match any of the databases or users being imported, the instance + manager will create a new, empty application database and owner role with those + specified names, while leaving the imported databases and owners unchanged. + +## A practical example + +There is nothing to stop you from using the `monolith` approach to import a +single database. It is interesting to see how the results of doing so would +differ from using the `microservice` approach. + +Given a source cluster, for example the following, with a database named +`mydb` owned by role `me`: + +``` yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: cluster-example +spec: + instances: 1 + + postgresql: + pg_hba: + - host all all all trust + + storage: + size: 1Gi + + bootstrap: + initdb: + database: mydb + owner: me +``` + +We can import it via `microservice`: + +``` yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: cluster-example-microservice +spec: + instances: 1 + + storage: + size: 1Gi + + bootstrap: + initdb: + import: + type: microservice + databases: + - mydb + source: + externalCluster: cluster-example + + externalClusters: + - name: cluster-example + connectionParameters: + host: cluster-example-rw + dbname: postgres +``` + +as well as via monolith: + +``` yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: cluster-example-monolith +spec: + instances: 1 + + storage: + size: 1Gi + + bootstrap: + initdb: + import: + type: monolith + databases: + - mydb + roles: + - me + source: + externalCluster: cluster-example + + externalClusters: + - name: cluster-example + connectionParameters: + host: cluster-example-rw + dbname: postgres +``` + +In both cases, the database's contents will be imported, but: + +- In the microservice case, the imported database's name and owner both become + `app`, or whichever configuration for the fields `database` and `owner` are + set in the `bootstrap.initdb` stanza. +- In the monolith case, the database and owner are kept exactly as in the source + cluster, i.e. `mydb` and `me` respectively. No `app` database nor user will be + created. If there are custom settings for `database` and `owner` in the + `bootstrap.initdb` stanza that don't match the source databases/owners to + import, the instance manager will create a new empty application database and + owner role, but will leave the imported databases/owners intact. ## Import optimizations diff --git a/tests/e2e/cluster_monolithic_test.go b/tests/e2e/cluster_monolithic_test.go index 2351278503..e12a7407ca 100644 --- a/tests/e2e/cluster_monolithic_test.go +++ b/tests/e2e/cluster_monolithic_test.go @@ -185,6 +185,20 @@ var _ = Describe("Imports with Monolithic Approach", Label(tests.LabelImportingD } }) + By("verifying that no extra application database or owner were created", func() { + stmt, err := connTarget.Prepare("SELECT count(*) FROM pg_catalog.pg_database WHERE datname = $1") + Expect(err).ToNot(HaveOccurred()) + var matchCount int + err = stmt.QueryRowContext(env.Ctx, "app").Scan(&matchCount) + Expect(err).ToNot(HaveOccurred()) + Expect(matchCount).To(BeZero(), "app database should not exist") + stmt, err = connTarget.Prepare("SELECT count(*) from pg_catalog.pg_user WHERE usename = $1") + Expect(err).ToNot(HaveOccurred()) + err = stmt.QueryRowContext(env.Ctx, "app").Scan(&matchCount) + Expect(err).ToNot(HaveOccurred()) + Expect(matchCount).To(BeZero(), "app user should not exist") + }) + By(fmt.Sprintf("verifying that the source superuser '%s' became a normal user in target", databaseSuperUser), func() { row := connTarget.QueryRow(fmt.Sprintf( From a3976cd9986ecce4a1d8a7e32f2c03f3f79a043b Mon Sep 17 00:00:00 2001 From: Agalin <6164461+Agalin@users.noreply.github.com> Date: Fri, 25 Jul 2025 12:13:06 +0200 Subject: [PATCH 732/836] feat(auth): support user maps for predefined users to enable self-managed certificates (#7725) Add support for default user maps for predefined users, allowing administrators to remap certificate identities and enable the `streaming_replica` user to authenticate with certificates using different Common Names. This improves compatibility and flexibility when using self-managed PKI infrastructures. Closes #7697 Signed-off-by: Szymon Soloch Signed-off-by: Marco Nenciarini Signed-off-by: Gabriele Bartolini Co-authored-by: Marco Nenciarini Co-authored-by: Gabriele Bartolini --- docs/src/certificates.md | 30 ++++++++++++++++++++++++++++++ docs/src/postgresql_conf.md | 11 ++++++----- docs/src/replication.md | 4 ++-- pkg/postgres/configuration.go | 12 +++++++++--- 4 files changed, 47 insertions(+), 10 deletions(-) diff --git a/docs/src/certificates.md b/docs/src/certificates.md index 96e764bbf3..28585e8191 100644 --- a/docs/src/certificates.md +++ b/docs/src/certificates.md @@ -263,6 +263,36 @@ the following parameters: instances, you can add a label with the key `cnpg.io/reload` to it. Otherwise, you must reload the instances using the `kubectl cnpg reload` subcommand. +#### Customizing the `streaming_replica` client certificate + +In some environments, it may not be possible to generate a certificate with the +common name `streaming_replica` due to company policies or other security +concerns, such as a CA shared across multiple clusters. In such cases, the user +mapping feature can be used to allow authentication as the `streaming_replica` +user with certificates containing different common names. + +To configure this setup, add a `pg_ident.conf` entry for the predefined map +named `cnpg_streaming_replica`. + +For example, to enable `streaming_replica` authentication using a certificate +with the common name `streaming-replica.cnpg.svc.cluster.local`, add the +following to your cluster definition: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: cluster-example +spec: + postgresql: + pg_ident: + - cnpg_streaming_replica streaming-replica.cnpg.svc.cluster.local streaming_replica +``` + +For further details on how `pg_ident.conf` is managed by the operator, see the +["PostgreSQL Configuration" page](postgresql_conf.md#the-pg_ident-section) in +the documentation. + #### Cert-manager example This simple example shows how to use [cert-manager](https://cert-manager.io/) diff --git a/docs/src/postgresql_conf.md b/docs/src/postgresql_conf.md index 614ccd3a7a..368815cac4 100644 --- a/docs/src/postgresql_conf.md +++ b/docs/src/postgresql_conf.md @@ -373,9 +373,9 @@ Fixed rules: ```text local all all peer -hostssl postgres streaming_replica all cert -hostssl replication streaming_replica all cert -hostssl all cnpg_pooler_pgbouncer all cert +hostssl postgres streaming_replica all cert map=cnpg_streaming_replica +hostssl replication streaming_replica all cert map=cnpg_streaming_replica +hostssl all cnpg_pooler_pgbouncer all cert map=cnpg_pooler_pgbouncer ``` Default rules: @@ -397,8 +397,9 @@ The resulting `pg_hba.conf` will look like this: ```text local all all peer -hostssl postgres streaming_replica all cert -hostssl replication streaming_replica all cert +hostssl postgres streaming_replica all cert map=cnpg_streaming_replica +hostssl replication streaming_replica all cert map=cnpg_streaming_replica +hostssl all cnpg_pooler_pgbouncer all cert map=cnpg_pooler_pgbouncer diff --git a/docs/src/replication.md b/docs/src/replication.md index 6489963276..09503efb38 100644 --- a/docs/src/replication.md +++ b/docs/src/replication.md @@ -81,8 +81,8 @@ following excerpt taken from `pg_hba.conf`: ``` # Require client certificate authentication for the streaming_replica user -hostssl postgres streaming_replica all cert -hostssl replication streaming_replica all cert +hostssl postgres streaming_replica all cert map=cnpg_streaming_replica +hostssl replication streaming_replica all cert map=cnpg_streaming_replica ``` !!! Seealso "Certificates" diff --git a/pkg/postgres/configuration.go b/pkg/postgres/configuration.go index b5259c2616..7e96024041 100644 --- a/pkg/postgres/configuration.go +++ b/pkg/postgres/configuration.go @@ -99,9 +99,9 @@ const ( local all all peer map=local # Require client certificate authentication for the streaming_replica user -hostssl postgres streaming_replica all cert -hostssl replication streaming_replica all cert -hostssl all cnpg_pooler_pgbouncer all cert +hostssl postgres streaming_replica all cert map=cnpg_streaming_replica +hostssl replication streaming_replica all cert map=cnpg_streaming_replica +hostssl all cnpg_pooler_pgbouncer all cert map=cnpg_pooler_pgbouncer # # USER-DEFINED RULES @@ -134,6 +134,12 @@ host all all all {{.DefaultAuthenticationMethod}} # Grant local access ('local' user map) local {{.Username}} postgres +# Grant streaming_replica access ('cnpg_streaming_replica' user map) +cnpg_streaming_replica streaming_replica streaming_replica + +# Grant cnpg_pooler_pgbouncer access ('cnpg_pooler_pgbouncer' user map) +cnpg_pooler_pgbouncer cnpg_pooler_pgbouncer cnpg_pooler_pgbouncer + # # USER-DEFINED RULES # From fc7d1e30b5df3797df1fe81717d7ec7ea042d98b Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Fri, 25 Jul 2025 13:51:52 +0200 Subject: [PATCH 733/836] feat(rollout): support changing image and PostgreSQL settings simultaneously (#8115) Allow updating the container image (including PostgreSQL version or extensions) and PostgreSQL configuration settings in a single operation. The image change will trigger the first rollout, followed by the configuration changes in a subsequent rollout. Closes: #2530 Signed-off-by: Leonardo Cecchi Signed-off-by: Armando Ruocco Signed-off-by: Marco Nenciarini Signed-off-by: Gabriele Bartolini Co-authored-by: Armando Ruocco Co-authored-by: Marco Nenciarini Co-authored-by: Gabriele Bartolini --- docs/src/rolling_update.md | 39 ++++++++------- .../controller/instance_controller.go | 40 ++++++++++++++++ internal/management/controller/manager.go | 6 ++- internal/webhook/v1/cluster_webhook.go | 26 ---------- internal/webhook/v1/cluster_webhook_test.go | 48 ------------------- 5 files changed, 67 insertions(+), 92 deletions(-) diff --git a/docs/src/rolling_update.md b/docs/src/rolling_update.md index 1d04a1e2ee..6dda17e990 100644 --- a/docs/src/rolling_update.md +++ b/docs/src/rolling_update.md @@ -1,32 +1,39 @@ # Rolling Updates -The operator allows changing the PostgreSQL version used in a cluster while -applications are running against it. +The operator allows you to change the PostgreSQL version used in a cluster +while applications continue running against it. -!!! Important - Only upgrades for PostgreSQL minor releases are supported. +Rolling upgrades are triggered when: -Rolling upgrades are started when: +- you change the `imageName` attribute in the cluster specification; -- the user changes the `imageName` attribute of the cluster specification; +- you change the list of extension images in the `.spec.postgresql.extensions` + stanza of the cluster specification; -- the [image catalog](image_catalog.md) is updated with a new image for the major used by the cluster; +- the [image catalog](image_catalog.md) is updated with a new image for the + major version used by the cluster; -- a change in the PostgreSQL configuration requires a restart to be - applied; +- a change in the PostgreSQL configuration requires a restart to apply; -- a change on the `Cluster` `.spec.resources` values +- you change the `Cluster` `.spec.resources` values; -- a change in size of the persistent volume claim on AKS +- you resize the persistent volume claim on AKS; -- after the operator is updated, to ensure the Pods run the latest instance - manager (unless [in-place updates are enabled](installation_upgrade.md#in-place-updates-of-the-instance-manager)). +- the operator is updated, ensuring Pods run the latest instance manager + (unless [in-place updates are enabled](installation_upgrade.md#in-place-updates-of-the-instance-manager)). -The operator starts upgrading all the replicas, one Pod at a time, and begins -from the one with the highest serial. +!!! Warning + Any change to container images (including extensions) takes precedence over + all other changes and will trigger a rollout first. For example, if you update + both the PostgreSQL configuration and the PostgreSQL version at the same time, + the container image change will take priority, and the configuration change + will be applied in a subsequent rollout. -The primary is the last node to be upgraded. +During a rolling upgrade, the operator upgrades all replicas one Pod at a time, +starting from the one with the highest serial. + +The primary is always the last node to be upgraded. Rolling updates are configurable and can be either entirely automated (`unsupervised`) or requiring human intervention (`supervised`). diff --git a/internal/management/controller/instance_controller.go b/internal/management/controller/instance_controller.go index 454b9247ba..ee04eac8cb 100644 --- a/internal/management/controller/instance_controller.go +++ b/internal/management/controller/instance_controller.go @@ -34,6 +34,7 @@ import ( "github.com/cloudnative-pg/machinery/pkg/fileutils" "github.com/cloudnative-pg/machinery/pkg/log" pgTime "github.com/cloudnative-pg/machinery/pkg/postgres/time" + "github.com/cloudnative-pg/machinery/pkg/stringset" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -381,6 +382,11 @@ func (r *InstanceReconciler) refreshConfigurationFiles( } reloadNeeded = reloadNeeded || reloadIdent + // We give priority to images changes before applying the configuration ones. + if r.requiresImagesRollout(ctx, cluster) { + return reloadNeeded, nil + } + // Reconcile PostgreSQL configuration // This doesn't need the PG connection, but it needs to reload it in case of changes reloadConfig, err := r.instance.RefreshConfigurationFilesFromCluster( @@ -402,6 +408,40 @@ func (r *InstanceReconciler) refreshConfigurationFiles( return reloadNeeded, nil } +func (r *InstanceReconciler) requiresImagesRollout(ctx context.Context, cluster *apiv1.Cluster) bool { + contextLogger := log.FromContext(ctx) + + latestImages := stringset.New() + latestImages.Put(cluster.Spec.ImageName) + for _, extension := range cluster.Spec.PostgresConfiguration.Extensions { + latestImages.Put(extension.ImageVolumeSource.Reference) + } + + if r.runningImages == nil { + r.runningImages = latestImages + contextLogger.Info("Detected running images", "runningImages", r.runningImages.ToSortedList()) + + return false + } + + contextLogger.Trace( + "Calculated image requirements", + "latestImages", latestImages.ToSortedList(), + "runningImages", r.runningImages.ToSortedList()) + + if latestImages.Eq(r.runningImages) { + return false + } + + contextLogger.Info( + "Detected drift between the bootstrap images and the configuration. Skipping configuration reload", + "runningImages", r.runningImages.ToSortedList(), + "latestImages", latestImages.ToSortedList(), + ) + + return true +} + func (r *InstanceReconciler) reconcileFencing(ctx context.Context, cluster *apiv1.Cluster) *reconcile.Result { contextLogger := log.FromContext(ctx) diff --git a/internal/management/controller/manager.go b/internal/management/controller/manager.go index 76929fef62..e756afa7d6 100644 --- a/internal/management/controller/manager.go +++ b/internal/management/controller/manager.go @@ -25,6 +25,7 @@ import ( "context" "fmt" + "github.com/cloudnative-pg/machinery/pkg/stringset" "go.uber.org/atomic" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -42,8 +43,9 @@ import ( // the one of this PostgreSQL instance. Also, the configuration in the // ConfigMap is applied when needed type InstanceReconciler struct { - client ctrl.Client - instance *postgres.Instance + client ctrl.Client + instance *postgres.Instance + runningImages *stringset.Data secretVersions map[string]string extensionStatus map[string]bool diff --git a/internal/webhook/v1/cluster_webhook.go b/internal/webhook/v1/cluster_webhook.go index 72d1009ebf..6a022877f6 100644 --- a/internal/webhook/v1/cluster_webhook.go +++ b/internal/webhook/v1/cluster_webhook.go @@ -21,7 +21,6 @@ package v1 import ( "context" - "encoding/json" "fmt" "slices" "strconv" @@ -241,7 +240,6 @@ func (v *ClusterCustomValidator) validateClusterChanges(r, old *apiv1.Cluster) ( type validationFunc func(*apiv1.Cluster, *apiv1.Cluster) field.ErrorList validations := []validationFunc{ v.validateImageChange, - v.validateConfigurationChange, v.validateStorageChange, v.validateWalStorageChange, v.validateTablespacesChange, @@ -1261,30 +1259,6 @@ func parsePostgresQuantityValue(value string) (resource.Quantity, error) { return resource.ParseQuantity(value) } -// validateConfigurationChange determines whether a PostgreSQL configuration -// change can be applied -func (v *ClusterCustomValidator) validateConfigurationChange(r, old *apiv1.Cluster) field.ErrorList { - var result field.ErrorList - - if old.Spec.ImageName != r.Spec.ImageName { - diff := utils.CollectDifferencesFromMaps(old.Spec.PostgresConfiguration.Parameters, - r.Spec.PostgresConfiguration.Parameters) - if len(diff) > 0 { - jsonDiff, _ := json.Marshal(diff) - result = append( - result, - field.Invalid( - field.NewPath("spec", "imageName"), - r.Spec.ImageName, - fmt.Sprintf("Can't change image name and configuration at the same time. "+ - "There are differences in PostgreSQL configuration parameters: %s", jsonDiff))) - return result - } - } - - return result -} - func validateSyncReplicaElectionConstraint(constraints apiv1.SyncReplicaElectionConstraints) *field.Error { if !constraints.Enabled { return nil diff --git a/internal/webhook/v1/cluster_webhook_test.go b/internal/webhook/v1/cluster_webhook_test.go index f858f112ae..8ce24826dd 100644 --- a/internal/webhook/v1/cluster_webhook_test.go +++ b/internal/webhook/v1/cluster_webhook_test.go @@ -514,54 +514,6 @@ var _ = Describe("configuration change validation", func() { v = &ClusterCustomValidator{} }) - It("doesn't complain when the configuration is exactly the same", func() { - clusterOld := &apiv1.Cluster{ - Spec: apiv1.ClusterSpec{ - ImageName: "postgres:10.4", - }, - } - clusterNew := clusterOld.DeepCopy() - Expect(v.validateConfigurationChange(clusterNew, clusterOld)).To(BeEmpty()) - }) - - It("doesn't complain when we change a setting which is not fixed", func() { - clusterOld := &apiv1.Cluster{ - Spec: apiv1.ClusterSpec{ - ImageName: "postgres:10.4", - }, - } - clusterNew := &apiv1.Cluster{ - Spec: apiv1.ClusterSpec{ - ImageName: "postgres:10.4", - PostgresConfiguration: apiv1.PostgresConfiguration{ - Parameters: map[string]string{ - "shared_buffers": "4G", - }, - }, - }, - } - Expect(v.validateConfigurationChange(clusterNew, clusterOld)).To(BeEmpty()) - }) - - It("complains when changing postgres major version and settings", func() { - clusterOld := &apiv1.Cluster{ - Spec: apiv1.ClusterSpec{ - ImageName: "postgres:10.4", - }, - } - clusterNew := &apiv1.Cluster{ - Spec: apiv1.ClusterSpec{ - ImageName: "postgres:10.5", - PostgresConfiguration: apiv1.PostgresConfiguration{ - Parameters: map[string]string{ - "shared_buffers": "4G", - }, - }, - }, - } - Expect(v.validateConfigurationChange(clusterNew, clusterOld)).To(HaveLen(1)) - }) - It("produces no error when WAL size settings are correct", func() { clusterNew := &apiv1.Cluster{ Spec: apiv1.ClusterSpec{ From efa3c2a462abb7ebe64bf7129a8e8a07cd5dd883 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 25 Jul 2025 12:19:29 +0200 Subject: [PATCH 734/836] Version tag to 1.26.1 (#8135) Signed-off-by: Marco Nenciarini Co-authored-by: Marco Nenciarini --- docs/src/installation_upgrade.md | 4 +- docs/src/kubectl-plugin.md | 30 +- pkg/versions/versions.go | 6 +- releases/cnpg-1.26.1.yaml | 18034 +++++++++++++++++++++++++++++ 4 files changed, 18054 insertions(+), 20 deletions(-) create mode 100644 releases/cnpg-1.26.1.yaml diff --git a/docs/src/installation_upgrade.md b/docs/src/installation_upgrade.md index 7191be7e68..ea81383716 100644 --- a/docs/src/installation_upgrade.md +++ b/docs/src/installation_upgrade.md @@ -8,12 +8,12 @@ The operator can be installed like any other resource in Kubernetes, through a YAML manifest applied via `kubectl`. -You can install the [latest operator manifest](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.26/releases/cnpg-1.26.0.yaml) +You can install the [latest operator manifest](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.26/releases/cnpg-1.26.1.yaml) for this minor release as follows: ```sh kubectl apply --server-side -f \ - https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.26/releases/cnpg-1.26.0.yaml + https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.26/releases/cnpg-1.26.1.yaml ``` You can verify that with: diff --git a/docs/src/kubectl-plugin.md b/docs/src/kubectl-plugin.md index dfb1147c41..3d32e546ac 100644 --- a/docs/src/kubectl-plugin.md +++ b/docs/src/kubectl-plugin.md @@ -31,11 +31,11 @@ them in your systems. #### Debian packages -For example, let's install the 1.26.0 release of the plugin, for an Intel based +For example, let's install the 1.26.1 release of the plugin, for an Intel based 64 bit server. First, we download the right `.deb` file. ```sh -wget https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.26.0/kubectl-cnpg_1.26.0_linux_x86_64.deb \ +wget https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.26.1/kubectl-cnpg_1.26.1_linux_x86_64.deb \ --output-document kube-plugin.deb ``` @@ -46,17 +46,17 @@ $ sudo dpkg -i kube-plugin.deb Selecting previously unselected package cnpg. (Reading database ... 6688 files and directories currently installed.) Preparing to unpack kube-plugin.deb ... -Unpacking cnpg (1.26.0) ... -Setting up cnpg (1.26.0) ... +Unpacking cnpg (1.26.1) ... +Setting up cnpg (1.26.1) ... ``` #### RPM packages -As in the example for `.rpm` packages, let's install the 1.26.0 release for an +As in the example for `.rpm` packages, let's install the 1.26.1 release for an Intel 64 bit machine. Note the `--output` flag to provide a file name. ```sh -curl -L https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.26.0/kubectl-cnpg_1.26.0_linux_x86_64.rpm \ +curl -L https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.26.1/kubectl-cnpg_1.26.1_linux_x86_64.rpm \ --output kube-plugin.rpm ``` @@ -70,7 +70,7 @@ Dependencies resolved. Package Architecture Version Repository Size ==================================================================================================== Installing: - cnpg x86_64 1.26.0 @commandline 20 M + cnpg x86_64 1.26.1 @commandline 20 M Transaction Summary ==================================================================================================== @@ -294,9 +294,9 @@ sandbox-3 0/604DE38 0/604DE38 0/604DE38 0/604DE38 00:00:00 00:00:00 00 Instances status Name Current LSN Replication role Status QoS Manager Version Node ---- ----------- ---------------- ------ --- --------------- ---- -sandbox-1 0/604DE38 Primary OK BestEffort 1.26.0 k8s-eu-worker -sandbox-2 0/604DE38 Standby (async) OK BestEffort 1.26.0 k8s-eu-worker2 -sandbox-3 0/604DE38 Standby (async) OK BestEffort 1.26.0 k8s-eu-worker +sandbox-1 0/604DE38 Primary OK BestEffort 1.26.1 k8s-eu-worker +sandbox-2 0/604DE38 Standby (async) OK BestEffort 1.26.1 k8s-eu-worker2 +sandbox-3 0/604DE38 Standby (async) OK BestEffort 1.26.1 k8s-eu-worker ``` If you require more detailed status information, use the `--verbose` option (or @@ -350,9 +350,9 @@ sandbox-primary primary 1 1 1 Instances status Name Current LSN Replication role Status QoS Manager Version Node ---- ----------- ---------------- ------ --- --------------- ---- -sandbox-1 0/6053720 Primary OK BestEffort 1.26.0 k8s-eu-worker -sandbox-2 0/6053720 Standby (async) OK BestEffort 1.26.0 k8s-eu-worker2 -sandbox-3 0/6053720 Standby (async) OK BestEffort 1.26.0 k8s-eu-worker +sandbox-1 0/6053720 Primary OK BestEffort 1.26.1 k8s-eu-worker +sandbox-2 0/6053720 Standby (async) OK BestEffort 1.26.1 k8s-eu-worker2 +sandbox-3 0/6053720 Standby (async) OK BestEffort 1.26.1 k8s-eu-worker ``` With an additional `-v` (e.g. `kubectl cnpg status sandbox -v -v`), you can @@ -575,12 +575,12 @@ Archive: report_operator_.zip ```output ====== Beginning of Previous Log ===== -2023-03-28T12:56:41.251711811Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.26.0","build":{"Version":"1.26.0+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} +2023-03-28T12:56:41.251711811Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.26.1","build":{"Version":"1.26.1+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} 2023-03-28T12:56:41.251851909Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"} ====== End of Previous Log ===== -2023-03-28T12:57:09.854306024Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.26.0","build":{"Version":"1.26.0+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} +2023-03-28T12:57:09.854306024Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.26.1","build":{"Version":"1.26.1+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} 2023-03-28T12:57:09.854363943Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"} ``` diff --git a/pkg/versions/versions.go b/pkg/versions/versions.go index 60d372f7e6..1cb27a4cc4 100644 --- a/pkg/versions/versions.go +++ b/pkg/versions/versions.go @@ -23,13 +23,13 @@ package versions const ( // Version is the version of the operator - Version = "1.26.0" + Version = "1.26.1" // DefaultImageName is the default image used by the operator to create pods DefaultImageName = "ghcr.io/cloudnative-pg/postgresql:17.5" // DefaultOperatorImageName is the default operator image used by the controller in the pods running PostgreSQL - DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.26.0" + DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.26.1" ) // BuildInfo is a struct containing all the info about the build @@ -39,7 +39,7 @@ type BuildInfo struct { var ( // buildVersion injected during the build - buildVersion = "1.26.0" + buildVersion = "1.26.1" // buildCommit injected during the build buildCommit = "none" diff --git a/releases/cnpg-1.26.1.yaml b/releases/cnpg-1.26.1.yaml new file mode 100644 index 0000000000..e99338fe1a --- /dev/null +++ b/releases/cnpg-1.26.1.yaml @@ -0,0 +1,18034 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-system +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: backups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Backup + listKind: BackupList + plural: backups + singular: backup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.method + name: Method + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .status.error + name: Error + type: string + name: v1 + schema: + openAPIV3Schema: + description: A Backup resource is a request for a PostgreSQL backup by the + user. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the backup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + type: object + status: + description: |- + Most recently observed status of the backup. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + azureCredentials: + description: The credentials to use to upload data to Azure Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without providing + explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + backupId: + description: The ID of the Barman backup + type: string + backupLabelFile: + description: Backup label file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + backupName: + description: The Name of the Barman backup + type: string + beginLSN: + description: The starting xlog + type: string + beginWal: + description: The starting WAL + type: string + commandError: + description: The backup command output in case of error + type: string + commandOutput: + description: Unused. Retained for compatibility with old versions. + type: string + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data. This may not be populated in case of errors. + type: string + encryption: + description: Encryption method required to S3 API + type: string + endLSN: + description: The ending xlog + type: string + endWal: + description: The ending WAL + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + error: + description: The detected error + type: string + googleCredentials: + description: The credentials to use to upload data to Google Cloud + Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage JSON + file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + instanceID: + description: Information to identify the instance where the backup + has been taken from + properties: + ContainerID: + description: The container ID + type: string + podName: + description: The pod name + type: string + type: object + method: + description: The backup method being used + type: string + online: + description: Whether the backup was online/hot (`true`) or offline/cold + (`false`) + type: boolean + phase: + description: The last backup status + type: string + pluginMetadata: + additionalProperties: + type: string + description: A map containing the plugin metadata + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without providing + explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the region + name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + snapshotBackupStatus: + description: Status of the volumeSnapshot backup + properties: + elements: + description: The elements list, populated with the gathered volume + snapshots + items: + description: BackupSnapshotElementStatus is a volume snapshot + that is part of a volume snapshot method backup + properties: + name: + description: Name is the snapshot resource name + type: string + tablespaceName: + description: |- + TablespaceName is the name of the snapshotted tablespace. Only set + when type is PG_TABLESPACE + type: string + type: + description: Type is tho role of the snapshot in the cluster, + such as PG_DATA, PG_WAL and PG_TABLESPACE + type: string + required: + - name + - type + type: object + type: array + type: object + startedAt: + description: When the backup was started + format: date-time + type: string + stoppedAt: + description: When the backup was terminated + format: date-time + type: string + tablespaceMapFile: + description: Tablespace map file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: clusterimagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ClusterImageCatalog + listKind: ClusterImageCatalogList + plural: clusterimagecatalogs + singular: clusterimagecatalog + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ClusterImageCatalog is the Schema for the clusterimagecatalogs + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ClusterImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: clusters.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Cluster + listKind: ClusterList + plural: clusters + singular: cluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Number of instances + jsonPath: .status.instances + name: Instances + type: integer + - description: Number of ready instances + jsonPath: .status.readyInstances + name: Ready + type: integer + - description: Cluster current status + jsonPath: .status.phase + name: Status + type: string + - description: Primary pod + jsonPath: .status.currentPrimary + name: Primary + type: string + name: v1 + schema: + openAPIV3Schema: + description: Cluster is the Schema for the PostgreSQL API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the cluster. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + affinity: + description: Affinity/Anti-affinity rules for Pods + properties: + additionalPodAffinity: + description: AdditionalPodAffinity allows to specify pod affinity + terms to be passed to all the cluster's pods. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + additionalPodAntiAffinity: + description: |- + AdditionalPodAntiAffinity allows to specify pod anti-affinity terms to be added to the ones generated + by the operator if EnablePodAntiAffinity is set to true (default) or to be used exclusively if set to false. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + enablePodAntiAffinity: + description: |- + Activates anti-affinity for the pods. The operator will define pods + anti-affinity unless this field is explicitly set to false + type: boolean + nodeAffinity: + description: |- + NodeAffinity describes node affinity scheduling rules for the pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is map of key-value pairs used to define the nodes on which + the pods can run. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + podAntiAffinityType: + description: |- + PodAntiAffinityType allows the user to decide whether pod anti-affinity between cluster instance has to be + considered a strong requirement during scheduling or not. Allowed values are: "preferred" (default if empty) or + "required". Setting it to "required", could lead to instances remaining pending until new kubernetes nodes are + added if all the existing nodes don't match the required pod anti-affinity rule. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + type: string + tolerations: + description: |- + Tolerations is a list of Tolerations that should be set for all the pods, in order to allow them to run + on tainted nodes. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologyKey: + description: |- + TopologyKey to use for anti-affinity configuration. See k8s documentation + for more info on that + type: string + type: object + backup: + description: The configuration to be used for backups + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2`, and `snappy`. + enum: + - bzip2 + - gzip + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage + JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the + region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2`, + `lz4`, `snappy`, `xz`, and `zstd`. + enum: + - bzip2 + - gzip + - lz4 + - snappy + - xz + - zstd + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + retentionPolicy: + description: |- + RetentionPolicy is the retention policy to be used for backups + and WALs (i.e. '60d'). The retention policy is expressed in the form + of `XXu` where `XX` is a positive integer and `u` is in `[dwm]` - + days, weeks, months. + It's currently only applicable when using the BarmanObjectStore method. + pattern: ^[1-9][0-9]*[dwm]$ + type: string + target: + default: prefer-standby + description: |- + The policy to decide which instance should perform backups. Available + options are empty string, which will default to `prefer-standby` policy, + `primary` to have backups run always on primary instances, `prefer-standby` + to have backups run preferably on the most updated standby, if available. + enum: + - primary + - prefer-standby + type: string + volumeSnapshot: + description: VolumeSnapshot provides the configuration for the + execution of volume snapshot backups. + properties: + annotations: + additionalProperties: + type: string + description: Annotations key-value pairs that will be added + to .metadata.annotations snapshot resources. + type: object + className: + description: |- + ClassName specifies the Snapshot Class to be used for PG_DATA PersistentVolumeClaim. + It is the default class for the other types if no specific class is present + type: string + labels: + additionalProperties: + type: string + description: Labels are key-value pairs that will be added + to .metadata.labels snapshot resources. + type: object + online: + default: true + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + type: boolean + onlineConfiguration: + default: + immediateCheckpoint: false + waitForArchive: true + description: Configuration parameters to control the online/hot + backup with volume snapshots + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + snapshotOwnerReference: + default: none + description: SnapshotOwnerReference indicates the type of + owner reference the snapshot should have + enum: + - none + - cluster + - backup + type: string + tablespaceClassName: + additionalProperties: + type: string + description: |- + TablespaceClassName specifies the Snapshot Class to be used for the tablespaces. + defaults to the PGDATA Snapshot Class, if set + type: object + walClassName: + description: WalClassName specifies the Snapshot Class to + be used for the PG_WAL PersistentVolumeClaim. + type: string + type: object + type: object + bootstrap: + description: Instructions to bootstrap this cluster + properties: + initdb: + description: Bootstrap the cluster via initdb + properties: + builtinLocale: + description: |- + Specifies the locale name when the builtin provider is used. + This option requires `localeProvider` to be set to `builtin`. + Available from PostgreSQL 17. + type: string + dataChecksums: + description: |- + Whether the `-k` option should be passed to initdb, + enabling checksums on data pages (default: `false`) + type: boolean + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + encoding: + description: The value to be passed as option `--encoding` + for initdb (default:`UTF8`) + type: string + icuLocale: + description: |- + Specifies the ICU locale when the ICU provider is used. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 15. + type: string + icuRules: + description: |- + Specifies additional collation rules to customize the behavior of the default collation. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 16. + type: string + import: + description: |- + Bootstraps the new cluster by importing data from an existing PostgreSQL + instance using logical backup (`pg_dump` and `pg_restore`) + properties: + databases: + description: The databases to import + items: + type: string + type: array + pgDumpExtraOptions: + description: |- + List of custom options to pass to the `pg_dump` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + pgRestoreExtraOptions: + description: |- + List of custom options to pass to the `pg_restore` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + postImportApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after is imported - to be used with extreme care + (by default empty). Only available in microservice type. + items: + type: string + type: array + roles: + description: The roles to import + items: + type: string + type: array + schemaOnly: + description: |- + When set to true, only the `pre-data` and `post-data` sections of + `pg_restore` are invoked, avoiding data import. Default: `false`. + type: boolean + source: + description: The source of the import + properties: + externalCluster: + description: The name of the externalCluster used + for import + type: string + required: + - externalCluster + type: object + type: + description: The import type. Can be `microservice` or + `monolith`. + enum: + - microservice + - monolith + type: string + required: + - databases + - source + - type + type: object + locale: + description: Sets the default collation order and character + classification in the new database. + type: string + localeCType: + description: The value to be passed as option `--lc-ctype` + for initdb (default:`C`) + type: string + localeCollate: + description: The value to be passed as option `--lc-collate` + for initdb (default:`C`) + type: string + localeProvider: + description: |- + This option sets the locale provider for databases created in the new cluster. + Available from PostgreSQL 16. + type: string + options: + description: |- + The list of options that must be passed to initdb when creating the cluster. + Deprecated: This could lead to inconsistent configurations, + please use the explicit provided parameters instead. + If defined, explicit values will be ignored. + items: + type: string + type: array + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + postInitApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitApplicationSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the application database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitSQL: + description: |- + List of SQL queries to be executed as a superuser in the `postgres` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `postgres` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitTemplateSQL: + description: |- + List of SQL queries to be executed as a superuser in the `template1` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitTemplateSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `template1` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + walSegmentSize: + description: |- + The value in megabytes (1 to 1024) to be passed to the `--wal-segsize` + option for initdb (default: empty, resulting in PostgreSQL default: 16MB) + maximum: 1024 + minimum: 1 + type: integer + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider + is set to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is + set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set + to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + pg_basebackup: + description: |- + Bootstrap the cluster taking a physical backup of another compatible + PostgreSQL instance + properties: + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: The name of the server of which we need to take + a physical backup + minLength: 1 + type: string + required: + - source + type: object + recovery: + description: Bootstrap the cluster from a backup + properties: + backup: + description: |- + The backup object containing the physical base backup from which to + initiate the recovery procedure. + Mutually exclusive with `source` and `volumeSnapshots`. + properties: + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + name: + description: Name of the referent. + type: string + required: + - name + type: object + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + recoveryTarget: + description: |- + By default, the recovery process applies all the available + WAL files in the archive (full recovery). However, you can also + end the recovery as soon as a consistent state is reached or + recover to a point-in-time (PITR) by specifying a `RecoveryTarget` object, + as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...). + More info: https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET + properties: + backupID: + description: |- + The ID of the backup from which to start the recovery process. + If empty (default) the operator will automatically detect the backup + based on targetTime or targetLSN if specified. Otherwise use the + latest available backup in chronological order. + type: string + exclusive: + description: |- + Set the target to be exclusive. If omitted, defaults to false, so that + in Postgres, `recovery_target_inclusive` will be true + type: boolean + targetImmediate: + description: End recovery as soon as a consistent state + is reached + type: boolean + targetLSN: + description: The target LSN (Log Sequence Number) + type: string + targetName: + description: |- + The target name (to be previously created + with `pg_create_restore_point`) + type: string + targetTLI: + description: The target timeline ("latest" or a positive + integer) + type: string + targetTime: + description: The target time as a timestamp in the RFC3339 + standard + type: string + targetXID: + description: The target transaction ID + type: string + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: |- + The external cluster whose backup we will restore. This is also + used as the name of the folder under which the backup is stored, + so it must be set to the name of the source cluster + Mutually exclusive with `backup`. + type: string + volumeSnapshots: + description: |- + The static PVC data source(s) from which to initiate the + recovery procedure. Currently supporting `VolumeSnapshot` + and `PersistentVolumeClaim` resources that map an existing + PVC group, compatible with CloudNativePG, and taken with + a cold backup copy on a fenced Postgres instance (limitation + which will be removed in the future when online backup + will be implemented). + Mutually exclusive with `backup`. + properties: + storage: + description: Configuration of the storage of the instances + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + tablespaceStorage: + additionalProperties: + description: |- + TypedLocalObjectReference contains enough information to let you locate the + typed referenced object inside the same namespace. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + description: Configuration of the storage for PostgreSQL + tablespaces + type: object + walStorage: + description: Configuration of the storage for PostgreSQL + WAL (Write-Ahead Log) + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + required: + - storage + type: object + type: object + type: object + certificates: + description: The configuration for the CA and related certificates + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + description: + description: Description of this PostgreSQL cluster + type: string + enablePDB: + default: true + description: |- + Manage the `PodDisruptionBudget` resources within the cluster. When + configured as `true` (default setting), the pod disruption budgets + will safeguard the primary node from being terminated. Conversely, + setting it to `false` will result in the absence of any + `PodDisruptionBudget` resource, permitting the shutdown of all nodes + hosting the PostgreSQL cluster. This latter configuration is + advisable for any PostgreSQL cluster employed for + development/staging purposes. + type: boolean + enableSuperuserAccess: + default: false + description: |- + When this option is enabled, the operator will use the `SuperuserSecret` + to update the `postgres` user password (if the secret is + not present, the operator will automatically create one). When this + option is disabled, the operator will ignore the `SuperuserSecret` content, delete + it when automatically created, and then blank the password of the `postgres` + user by setting it to `NULL`. Disabled by default. + type: boolean + env: + description: |- + Env follows the Env format to pass environment variables + to the pods created in the cluster + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + EnvFrom follows the EnvFrom format to pass environment variables + sources to the pods to be used by Env + items: + description: EnvFromSource represents the source of a set of ConfigMaps + or Secrets + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: Optional text to prepend to the name of each environment + variable. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + ephemeralVolumeSource: + description: EphemeralVolumeSource allows the user to configure the + source of ephemeral volumes. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to + consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the + PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + ephemeralVolumesSizeLimit: + description: |- + EphemeralVolumesSizeLimit allows the user to set the limits for the ephemeral + volumes + properties: + shm: + anyOf: + - type: integer + - type: string + description: Shm is the size limit of the shared memory volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + temporaryData: + anyOf: + - type: integer + - type: string + description: TemporaryData is the size limit of the temporary + data volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + externalClusters: + description: The list of external clusters which are used in the configuration + items: + description: |- + ExternalCluster represents the connection parameters to an + external cluster which is used in the other sections of the configuration + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2`, and `snappy`. + enum: + - bzip2 + - gzip + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud + Storage JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing + the region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2`, + `lz4`, `snappy`, `xz`, and `zstd`. + enum: + - bzip2 + - gzip + - lz4 + - snappy + - xz + - zstd + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + connectionParameters: + additionalProperties: + type: string + description: The list of connection parameters, such as dbname, + host, username, etc + type: object + name: + description: The server name, required + type: string + password: + description: |- + The reference to the password to be used to connect to the server. + If a password is provided, CloudNativePG creates a PostgreSQL + passfile at `/controller/external/NAME/pass` (where "NAME" is the + cluster's name). This passfile is automatically referenced in the + connection string when establishing a connection to the remote + PostgreSQL server from the current PostgreSQL `Cluster`. This ensures + secure and efficient password management for external clusters. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + plugin: + description: |- + The configuration of the plugin that is taking care + of WAL archiving and backups for this external cluster + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + isWALArchiver: + default: false + description: |- + Only one plugin can be declared as WALArchiver. + Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + sslCert: + description: |- + The reference to an SSL certificate to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslKey: + description: |- + The reference to an SSL private key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslRootCert: + description: |- + The reference to an SSL CA public key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - name + type: object + type: array + failoverDelay: + default: 0 + description: |- + The amount of time (in seconds) to wait before triggering a failover + after the primary PostgreSQL instance in the cluster was detected + to be unhealthy + format: int32 + type: integer + imageCatalogRef: + description: Defines the major PostgreSQL version we want to use within + an ImageCatalog + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + major: + description: The major version of PostgreSQL we want to use from + the ImageCatalog + type: integer + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - major + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: Only image catalogs are supported + rule: self.kind == 'ImageCatalog' || self.kind == 'ClusterImageCatalog' + - message: Only image catalogs are supported + rule: self.apiGroup == 'postgresql.cnpg.io' + imageName: + description: |- + Name of the container image, supporting both tags (`:`) + and digests for deterministic and repeatable deployments + (`:@sha256:`) + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of `Always`, `Never` or `IfNotPresent`. + If not defined, it defaults to `IfNotPresent`. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + imagePullSecrets: + description: The list of pull secrets to be used to pull the images + items: + description: |- + LocalObjectReference contains enough information to let you locate a + local object with a known type inside the same namespace + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + type: array + inheritedMetadata: + description: Metadata that will be inherited by all objects related + to the Cluster + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + instances: + default: 1 + description: Number of instances required in the cluster + minimum: 1 + type: integer + livenessProbeTimeout: + description: |- + LivenessProbeTimeout is the time (in seconds) that is allowed for a PostgreSQL instance + to successfully respond to the liveness probe (default 30). + The Liveness probe failure threshold is derived from this value using the formula: + ceiling(livenessProbe / 10). + format: int32 + type: integer + logLevel: + default: info + description: 'The instances'' log level, one of the following values: + error, warning, info (default), debug, trace' + enum: + - error + - warning + - info + - debug + - trace + type: string + managed: + description: The configuration that is used by the portions of PostgreSQL + that are managed by the instance manager + properties: + roles: + description: Database roles managed by the `Cluster` + items: + description: |- + RoleConfiguration is the representation, in Kubernetes, of a PostgreSQL role + with the additional field Ensure specifying whether to ensure the presence or + absence of the role in the database + + The defaults of the CREATE ROLE command are applied + Reference: https://www.postgresql.org/docs/current/sql-createrole.html + properties: + bypassrls: + description: |- + Whether a role bypasses every row-level security (RLS) policy. + Default is `false`. + type: boolean + comment: + description: Description of the role + type: string + connectionLimit: + default: -1 + description: |- + If the role can log in, this specifies how many concurrent + connections the role can make. `-1` (the default) means no limit. + format: int64 + type: integer + createdb: + description: |- + When set to `true`, the role being defined will be allowed to create + new databases. Specifying `false` (default) will deny a role the + ability to create databases. + type: boolean + createrole: + description: |- + Whether the role will be permitted to create, alter, drop, comment + on, change the security label for, and grant or revoke membership in + other roles. Default is `false`. + type: boolean + disablePassword: + description: DisablePassword indicates that a role's password + should be set to NULL in Postgres + type: boolean + ensure: + default: present + description: Ensure the role is `present` or `absent` - + defaults to "present" + enum: + - present + - absent + type: string + inRoles: + description: |- + List of one or more existing roles to which this role will be + immediately added as a new member. Default empty. + items: + type: string + type: array + inherit: + default: true + description: |- + Whether a role "inherits" the privileges of roles it is a member of. + Defaults is `true`. + type: boolean + login: + description: |- + Whether the role is allowed to log in. A role having the `login` + attribute can be thought of as a user. Roles without this attribute + are useful for managing database privileges, but are not users in + the usual sense of the word. Default is `false`. + type: boolean + name: + description: Name of the role + type: string + passwordSecret: + description: |- + Secret containing the password of the role (if present) + If null, the password will be ignored unless DisablePassword is set + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + replication: + description: |- + Whether a role is a replication role. A role must have this + attribute (or be a superuser) in order to be able to connect to the + server in replication mode (physical or logical replication) and in + order to be able to create or drop replication slots. A role having + the `replication` attribute is a very highly privileged role, and + should only be used on roles actually used for replication. Default + is `false`. + type: boolean + superuser: + description: |- + Whether the role is a `superuser` who can override all access + restrictions within the database - superuser status is dangerous and + should be used only when really needed. You must yourself be a + superuser to create a new superuser. Defaults is `false`. + type: boolean + validUntil: + description: |- + Date and time after which the role's password is no longer valid. + When omitted, the password will never expire (default). + format: date-time + type: string + required: + - name + type: object + type: array + services: + description: Services roles managed by the `Cluster` + properties: + additional: + description: Additional is a list of additional managed services + specified by the user. + items: + description: |- + ManagedService represents a specific service managed by the cluster. + It includes the type of service and its associated template specification. + properties: + selectorType: + description: |- + SelectorType specifies the type of selectors that the service will have. + Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services. + enum: + - rw + - r + - ro + type: string + serviceTemplate: + description: ServiceTemplate is the template specification + for the service. + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only + supported for certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information + on service's port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed + by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains + the configurations of session affinity. + properties: + clientIP: + description: clientIP contains the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + updateStrategy: + default: patch + description: UpdateStrategy describes how the service + differences should be reconciled + enum: + - patch + - replace + type: string + required: + - selectorType + - serviceTemplate + type: object + type: array + disabledDefaultServices: + description: |- + DisabledDefaultServices is a list of service types that are disabled by default. + Valid values are "r", and "ro", representing read, and read-only services. + items: + description: |- + ServiceSelectorType describes a valid value for generating the service selectors. + It indicates which type of service the selector applies to, such as read-write, read, or read-only + enum: + - rw + - r + - ro + type: string + type: array + type: object + type: object + maxSyncReplicas: + default: 0 + description: |- + The target value for the synchronous replication quorum, that can be + decreased if the number of ready standbys is lower than this. + Undefined or 0 disable synchronous replication. + minimum: 0 + type: integer + minSyncReplicas: + default: 0 + description: |- + Minimum number of instances required in synchronous replication with the + primary. Undefined or 0 allow writes to complete when no standby is + available. + minimum: 0 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this cluster + properties: + customQueriesConfigMap: + description: The list of config maps containing the custom queries + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + customQueriesSecret: + description: The list of secrets containing the custom queries + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + disableDefaultQueries: + default: false + description: |- + Whether the default queries should be injected. + Set it to `true` if you don't want to inject default queries into the cluster. + Default: false. + type: boolean + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + tls: + description: |- + Configure TLS communication for the metrics endpoint. + Changing tls.enabled option will force a rollout of all instances. + properties: + enabled: + default: false + description: |- + Enable TLS for the monitoring endpoint. + Changing this option will force a rollout of all instances. + type: boolean + type: object + type: object + nodeMaintenanceWindow: + description: Define a maintenance window for the Kubernetes nodes + properties: + inProgress: + default: false + description: Is there a node maintenance activity in progress? + type: boolean + reusePVC: + default: true + description: |- + Reuse the existing PVC (wait for the node to come + up again) or not (recreate it elsewhere - when `instances` >1) + type: boolean + type: object + plugins: + description: |- + The plugins configuration, containing + any plugin to be loaded with the corresponding configuration + items: + description: |- + PluginConfiguration specifies a plugin that need to be loaded for this + cluster to be reconciled + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + isWALArchiver: + default: false + description: |- + Only one plugin can be declared as WALArchiver. + Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + type: array + postgresGID: + default: 26 + description: The GID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresUID: + default: 26 + description: The UID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresql: + description: Configuration of the PostgreSQL server + properties: + enableAlterSystem: + description: |- + If this parameter is true, the user will be able to invoke `ALTER SYSTEM` + on this CloudNativePG Cluster. + This should only be used for debugging and troubleshooting. + Defaults to false. + type: boolean + ldap: + description: Options to specify LDAP configuration + properties: + bindAsAuth: + description: Bind as authentication configuration + properties: + prefix: + description: Prefix for the bind authentication option + type: string + suffix: + description: Suffix for the bind authentication option + type: string + type: object + bindSearchAuth: + description: Bind+Search authentication configuration + properties: + baseDN: + description: Root DN to begin the user search + type: string + bindDN: + description: DN of the user to bind to the directory + type: string + bindPassword: + description: Secret with the password for the user to + bind to the directory + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + searchAttribute: + description: Attribute to match against the username + type: string + searchFilter: + description: Search filter to use when doing the search+bind + authentication + type: string + type: object + port: + description: LDAP server port + type: integer + scheme: + description: LDAP schema to be used, possible options are + `ldap` and `ldaps` + enum: + - ldap + - ldaps + type: string + server: + description: LDAP hostname or IP address + type: string + tls: + description: Set to 'true' to enable LDAP over TLS. 'false' + is default + type: boolean + type: object + parameters: + additionalProperties: + type: string + description: PostgreSQL configuration options (postgresql.conf) + type: object + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + pg_ident: + description: |- + PostgreSQL User Name Maps rules (lines to be appended + to the pg_ident.conf file) + items: + type: string + type: array + promotionTimeout: + description: |- + Specifies the maximum number of seconds to wait when promoting an instance to primary. + Default value is 40000000, greater than one year in seconds, + big enough to simulate an infinite timeout + format: int32 + type: integer + shared_preload_libraries: + description: Lists of shared preload libraries to add to the default + ones + items: + type: string + type: array + syncReplicaElectionConstraint: + description: |- + Requirements to be met by sync replicas. This will affect how the "synchronous_standby_names" parameter will be + set up. + properties: + enabled: + description: This flag enables the constraints for sync replicas + type: boolean + nodeLabelsAntiAffinity: + description: A list of node labels values to extract and compare + to evaluate if the pods reside in the same topology or not + items: + type: string + type: array + required: + - enabled + type: object + synchronous: + description: Configuration of the PostgreSQL synchronous replication + feature + properties: + dataDurability: + description: |- + If set to "required", data durability is strictly enforced. Write operations + with synchronous commit settings (`on`, `remote_write`, or `remote_apply`) will + block if there are insufficient healthy replicas, ensuring data persistence. + If set to "preferred", data durability is maintained when healthy replicas + are available, but the required number of instances will adjust dynamically + if replicas become unavailable. This setting relaxes strict durability enforcement + to allow for operational continuity. This setting is only applicable if both + `standbyNamesPre` and `standbyNamesPost` are unset (empty). + enum: + - required + - preferred + type: string + maxStandbyNamesFromCluster: + description: |- + Specifies the maximum number of local cluster pods that can be + automatically included in the `synchronous_standby_names` option in + PostgreSQL. + type: integer + method: + description: |- + Method to select synchronous replication standbys from the listed + servers, accepting 'any' (quorum-based synchronous replication) or + 'first' (priority-based synchronous replication) as values. + enum: + - any + - first + type: string + number: + description: |- + Specifies the number of synchronous standby servers that + transactions must wait for responses from. + type: integer + x-kubernetes-validations: + - message: The number of synchronous replicas should be greater + than zero + rule: self > 0 + standbyNamesPost: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` after local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + standbyNamesPre: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` before local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + required: + - method + - number + type: object + x-kubernetes-validations: + - message: dataDurability set to 'preferred' requires empty 'standbyNamesPre' + and empty 'standbyNamesPost' + rule: self.dataDurability!='preferred' || ((!has(self.standbyNamesPre) + || self.standbyNamesPre.size()==0) && (!has(self.standbyNamesPost) + || self.standbyNamesPost.size()==0)) + type: object + primaryUpdateMethod: + default: restart + description: |- + Method to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be with a switchover (`switchover`) or in-place (`restart` - default) + enum: + - switchover + - restart + type: string + primaryUpdateStrategy: + default: unsupervised + description: |- + Deployment strategy to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be automated (`unsupervised` - default) or manual (`supervised`) + enum: + - unsupervised + - supervised + type: string + priorityClassName: + description: |- + Name of the priority class which will be used in every generated Pod, if the PriorityClass + specified does not exist, the pod will not be able to schedule. Please refer to + https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass + for more information + type: string + probes: + description: |- + The configuration of the probes to be injected + in the PostgreSQL Pods. + properties: + liveness: + description: The liveness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + readiness: + description: The readiness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + maximumLag: + anyOf: + - type: integer + - type: string + description: Lag limit. Used only for `streaming` strategy + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: + description: The probe strategy + enum: + - pg_isready + - streaming + - query + type: string + type: object + startup: + description: The startup probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + maximumLag: + anyOf: + - type: integer + - type: string + description: Lag limit. Used only for `streaming` strategy + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: + description: The probe strategy + enum: + - pg_isready + - streaming + - query + type: string + type: object + type: object + projectedVolumeTemplate: + description: |- + Template to be used to define projected volumes, projected volumes will be mounted + under `/projected` base folder + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root to write + the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap data + to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the + pod: only annotations, labels, name, namespace + and uid are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must not + be absolute or contain the ''..'' path. Must + be utf-8 encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the secret data to + project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether the Secret + or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information about the + serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + replica: + description: Replica cluster configuration + properties: + enabled: + description: |- + If replica mode is enabled, this cluster will be a replica of an + existing cluster. Replica cluster can be created from a recovery + object store or via streaming through pg_basebackup. + Refer to the Replica clusters page of the documentation for more information. + type: boolean + minApplyDelay: + description: |- + When replica mode is enabled, this parameter allows you to replay + transactions only when the system time is at least the configured + time past the commit time. This provides an opportunity to correct + data loss errors. Note that when this parameter is set, a promotion + token cannot be used. + type: string + primary: + description: |- + Primary defines which Cluster is defined to be the primary in the distributed PostgreSQL cluster, based on the + topology specified in externalClusters + type: string + promotionToken: + description: |- + A demotion token generated by an external cluster used to + check if the promotion requirements are met. + type: string + self: + description: |- + Self defines the name of this cluster. It is used to determine if this is a primary + or a replica cluster, comparing it with `primary` + type: string + source: + description: The name of the external cluster which is the replication + origin + minLength: 1 + type: string + required: + - source + type: object + replicationSlots: + default: + highAvailability: + enabled: true + description: Replication slots management configuration + properties: + highAvailability: + default: + enabled: true + description: Replication slots for high availability configuration + properties: + enabled: + default: true + description: |- + If enabled (default), the operator will automatically manage replication slots + on the primary instance and use them in streaming replication + connections with all the standby instances that are part of the HA + cluster. If disabled, the operator will not take advantage + of replication slots in streaming connections with the replicas. + This feature also controls replication slots in replica cluster, + from the designated primary to its cascading replicas. + type: boolean + slotPrefix: + default: _cnpg_ + description: |- + Prefix for replication slots managed by the operator for HA. + It may only contain lower case letters, numbers, and the underscore character. + This can only be set at creation time. By default set to `_cnpg_`. + pattern: ^[0-9a-z_]*$ + type: string + type: object + synchronizeReplicas: + description: Configures the synchronization of the user defined + physical replication slots + properties: + enabled: + default: true + description: When set to true, every replication slot that + is on the primary is synchronized on each standby + type: boolean + excludePatterns: + description: List of regular expression patterns to match + the names of replication slots to be excluded (by default + empty) + items: + type: string + type: array + required: + - enabled + type: object + updateInterval: + default: 30 + description: |- + Standby will update the status of the local replication slots + every `updateInterval` seconds (default 30). + minimum: 1 + type: integer + type: object + resources: + description: |- + Resources requirements of every generated Pod. Please refer to + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + for more information. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + schedulerName: + description: |- + If specified, the pod will be dispatched by specified Kubernetes + scheduler. If not specified, the pod will be dispatched by the default + scheduler. More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/ + type: string + seccompProfile: + description: |- + The SeccompProfile applied to every Pod and Container. + Defaults to: `RuntimeDefault` + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + serviceAccountTemplate: + description: Configure the generation of the service account + properties: + metadata: + description: |- + Metadata are the metadata to be used for the generated + service account + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + required: + - metadata + type: object + smartShutdownTimeout: + default: 180 + description: |- + The time in seconds that controls the window of time reserved for the smart shutdown of Postgres to complete. + Make sure you reserve enough time for the operator to request a fast shutdown of Postgres + (that is: `stopDelay` - `smartShutdownTimeout`). + format: int32 + type: integer + startDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + successfully start up (default 3600). + The startup probe failure threshold is derived from this value using the formula: + ceiling(startDelay / 10). + format: int32 + type: integer + stopDelay: + default: 1800 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + gracefully shutdown (default 1800) + format: int32 + type: integer + storage: + description: Configuration of the storage of the instances + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + superuserSecret: + description: |- + The secret containing the superuser password. If not defined a new + secret will be created with a randomly generated password + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + switchoverDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a primary PostgreSQL instance + to gracefully shutdown during a switchover. + Default value is 3600 seconds (1 hour). + format: int32 + type: integer + tablespaces: + description: The tablespaces configuration + items: + description: |- + TablespaceConfiguration is the configuration of a tablespace, and includes + the storage specification for the tablespace + properties: + name: + description: The name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + properties: + name: + type: string + type: object + storage: + description: The storage configuration for the tablespace + properties: + pvcTemplate: + description: Template to be used to generate the Persistent + Volume Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to + the PersistentVolume backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + temporary: + default: false + description: |- + When set to true, the tablespace will be added as a `temp_tablespaces` + entry in PostgreSQL, and will be available to automatically house temp + database objects, or other temporary files. Please refer to PostgreSQL + documentation for more information on the `temp_tablespaces` GUC. + type: boolean + required: + - name + - storage + type: object + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints specifies how to spread matching pods among the given topology. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ + items: + description: TopologySpreadConstraint specifies how to spread matching + pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + walStorage: + description: Configuration of the storage for PostgreSQL WAL (Write-Ahead + Log) + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + required: + - instances + type: object + x-kubernetes-validations: + - message: imageName and imageCatalogRef are mutually exclusive + rule: '!(has(self.imageCatalogRef) && has(self.imageName))' + status: + description: |- + Most recently observed status of the cluster. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + availableArchitectures: + description: AvailableArchitectures reports the available architectures + of a cluster + items: + description: AvailableArchitecture represents the state of a cluster's + architecture + properties: + goArch: + description: GoArch is the name of the executable architecture + type: string + hash: + description: Hash is the hash of the executable + type: string + required: + - goArch + - hash + type: object + type: array + certificates: + description: The configuration for the CA and related certificates, + initialized with defaults. + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + expirations: + additionalProperties: + type: string + description: Expiration dates for all certificates. + type: object + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + cloudNativePGCommitHash: + description: The commit hash number of which this operator running + type: string + cloudNativePGOperatorHash: + description: The hash of the binary of the operator + type: string + conditions: + description: Conditions for cluster object + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + configMapResourceVersion: + description: |- + The list of resource versions of the configmaps, + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + configmap data + properties: + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the config maps used to pass metrics. + Map keys are the config map names, map values are the versions + type: object + type: object + currentPrimary: + description: Current primary instance + type: string + currentPrimaryFailingSinceTimestamp: + description: |- + The timestamp when the primary was detected to be unhealthy + This field is reported when `.spec.failoverDelay` is populated or during online upgrades + type: string + currentPrimaryTimestamp: + description: The timestamp when the last actual promotion to primary + has occurred + type: string + danglingPVC: + description: |- + List of all the PVCs created by this cluster and still available + which are not attached to a Pod + items: + type: string + type: array + demotionToken: + description: |- + DemotionToken is a JSON token containing the information + from pg_controldata such as Database system identifier, Latest checkpoint's + TimeLineID, Latest checkpoint's REDO location, Latest checkpoint's REDO + WAL file, and Time of latest checkpoint + type: string + firstRecoverabilityPoint: + description: |- + The first recoverability point, stored as a date in RFC3339 format. + This field is calculated from the content of FirstRecoverabilityPointByMethod. + + Deprecated: the field is not set for backup plugins. + type: string + firstRecoverabilityPointByMethod: + additionalProperties: + format: date-time + type: string + description: |- + The first recoverability point, stored as a date in RFC3339 format, per backup method type. + + Deprecated: the field is not set for backup plugins. + type: object + healthyPVC: + description: List of all the PVCs not dangling nor initializing + items: + type: string + type: array + image: + description: Image contains the image name used by the pods + type: string + initializingPVC: + description: List of all the PVCs that are being initialized by this + cluster + items: + type: string + type: array + instanceNames: + description: List of instance names in the cluster + items: + type: string + type: array + instances: + description: The total number of PVC Groups detected in the cluster. + It may differ from the number of existing instance pods. + type: integer + instancesReportedState: + additionalProperties: + description: InstanceReportedState describes the last reported state + of an instance during a reconciliation loop + properties: + ip: + description: IP address of the instance + type: string + isPrimary: + description: indicates if an instance is the primary one + type: boolean + timeLineID: + description: indicates on which TimelineId the instance is + type: integer + required: + - isPrimary + type: object + description: The reported state of the instances during the last reconciliation + loop + type: object + instancesStatus: + additionalProperties: + items: + type: string + type: array + description: InstancesStatus indicates in which status the instances + are + type: object + jobCount: + description: How many Jobs have been created by this cluster + format: int32 + type: integer + lastFailedBackup: + description: |- + Last failed backup, stored as a date in RFC3339 format. + + Deprecated: the field is not set for backup plugins. + type: string + lastPromotionToken: + description: |- + LastPromotionToken is the last verified promotion token that + was used to promote a replica cluster + type: string + lastSuccessfulBackup: + description: |- + Last successful backup, stored as a date in RFC3339 format. + This field is calculated from the content of LastSuccessfulBackupByMethod. + + Deprecated: the field is not set for backup plugins. + type: string + lastSuccessfulBackupByMethod: + additionalProperties: + format: date-time + type: string + description: |- + Last successful backup, stored as a date in RFC3339 format, per backup method type. + + Deprecated: the field is not set for backup plugins. + type: object + latestGeneratedNode: + description: ID of the latest generated node (used to avoid node name + clashing) + type: integer + managedRolesStatus: + description: ManagedRolesStatus reports the state of the managed roles + in the cluster + properties: + byStatus: + additionalProperties: + items: + type: string + type: array + description: ByStatus gives the list of roles in each state + type: object + cannotReconcile: + additionalProperties: + items: + type: string + type: array + description: |- + CannotReconcile lists roles that cannot be reconciled in PostgreSQL, + with an explanation of the cause + type: object + passwordStatus: + additionalProperties: + description: PasswordState represents the state of the password + of a managed RoleConfiguration + properties: + resourceVersion: + description: the resource version of the password secret + type: string + transactionID: + description: the last transaction ID to affect the role + definition in PostgreSQL + format: int64 + type: integer + type: object + description: PasswordStatus gives the last transaction id and + password secret version for each managed role + type: object + type: object + onlineUpdateEnabled: + description: OnlineUpdateEnabled shows if the online upgrade is enabled + inside the cluster + type: boolean + pgDataImageInfo: + description: PGDataImageInfo contains the details of the latest image + that has run on the current data directory. + properties: + image: + description: Image is the image name + type: string + majorVersion: + description: MajorVersion is the major version of the image + type: integer + required: + - image + - majorVersion + type: object + phase: + description: Current phase of the cluster + type: string + phaseReason: + description: Reason for the current phase + type: string + pluginStatus: + description: PluginStatus is the status of the loaded plugins + items: + description: PluginStatus is the status of a loaded plugin + properties: + backupCapabilities: + description: |- + BackupCapabilities are the list of capabilities of the + plugin regarding the Backup management + items: + type: string + type: array + capabilities: + description: |- + Capabilities are the list of capabilities of the + plugin + items: + type: string + type: array + name: + description: Name is the name of the plugin + type: string + operatorCapabilities: + description: |- + OperatorCapabilities are the list of capabilities of the + plugin regarding the reconciler + items: + type: string + type: array + restoreJobHookCapabilities: + description: |- + RestoreJobHookCapabilities are the list of capabilities of the + plugin regarding the RestoreJobHook management + items: + type: string + type: array + status: + description: Status contain the status reported by the plugin + through the SetStatusInCluster interface + type: string + version: + description: |- + Version is the version of the plugin loaded by the + latest reconciliation loop + type: string + walCapabilities: + description: |- + WALCapabilities are the list of capabilities of the + plugin regarding the WAL management + items: + type: string + type: array + required: + - name + - version + type: object + type: array + poolerIntegrations: + description: The integration needed by poolers referencing the cluster + properties: + pgBouncerIntegration: + description: PgBouncerIntegrationStatus encapsulates the needed + integration for the pgbouncer poolers referencing the cluster + properties: + secrets: + items: + type: string + type: array + type: object + type: object + pvcCount: + description: How many PVCs have been created by this cluster + format: int32 + type: integer + readService: + description: Current list of read pods + type: string + readyInstances: + description: The total number of ready instances in the cluster. It + is equal to the number of ready instance pods. + type: integer + resizingPVC: + description: List of all the PVCs that have ResizingPVC condition. + items: + type: string + type: array + secretsResourceVersion: + description: |- + The list of resource versions of the secrets + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + secret data + properties: + applicationSecretVersion: + description: The resource version of the "app" user secret + type: string + barmanEndpointCA: + description: The resource version of the Barman Endpoint CA if + provided + type: string + caSecretVersion: + description: Unused. Retained for compatibility with old versions. + type: string + clientCaSecretVersion: + description: The resource version of the PostgreSQL client-side + CA secret version + type: string + externalClusterSecretVersion: + additionalProperties: + type: string + description: The resource versions of the external cluster secrets + type: object + managedRoleSecretVersion: + additionalProperties: + type: string + description: The resource versions of the managed roles secrets + type: object + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the secrets used to pass metrics. + Map keys are the secret names, map values are the versions + type: object + replicationSecretVersion: + description: The resource version of the "streaming_replica" user + secret + type: string + serverCaSecretVersion: + description: The resource version of the PostgreSQL server-side + CA secret version + type: string + serverSecretVersion: + description: The resource version of the PostgreSQL server-side + secret version + type: string + superuserSecretVersion: + description: The resource version of the "postgres" user secret + type: string + type: object + switchReplicaClusterStatus: + description: SwitchReplicaClusterStatus is the status of the switch + to replica cluster + properties: + inProgress: + description: InProgress indicates if there is an ongoing procedure + of switching a cluster to a replica cluster. + type: boolean + type: object + systemID: + description: SystemID is the latest detected PostgreSQL SystemID + type: string + tablespacesStatus: + description: TablespacesStatus reports the state of the declarative + tablespaces in the cluster + items: + description: TablespaceState represents the state of a tablespace + in a cluster + properties: + error: + description: Error is the reconciliation error, if any + type: string + name: + description: Name is the name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + type: string + state: + description: State is the latest reconciliation state + type: string + required: + - name + - state + type: object + type: array + targetPrimary: + description: |- + Target primary instance, this is different from the previous one + during a switchover or a failover + type: string + targetPrimaryTimestamp: + description: The timestamp when the last request for a new primary + has occurred + type: string + timelineID: + description: The timeline of the Postgres cluster + type: integer + topology: + description: Instances topology. + properties: + instances: + additionalProperties: + additionalProperties: + type: string + description: PodTopologyLabels represent the topology of a Pod. + map[labelName]labelValue + type: object + description: Instances contains the pod topology of the instances + type: object + nodesUsed: + description: |- + NodesUsed represents the count of distinct nodes accommodating the instances. + A value of '1' suggests that all instances are hosted on a single node, + implying the absence of High Availability (HA). Ideally, this value should + be the same as the number of instances in the Postgres HA cluster, implying + shared nothing architecture on the compute side. + format: int32 + type: integer + successfullyExtracted: + description: |- + SuccessfullyExtracted indicates if the topology data was extract. It is useful to enact fallback behaviors + in synchronous replica election in case of failures + type: boolean + type: object + unusablePVC: + description: List of all the PVCs that are unusable because another + PVC is missing + items: + type: string + type: array + writeService: + description: Current write pod + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: databases.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Database + listKind: DatabaseList + plural: databases + singular: database + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Database is the Schema for the databases API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired Database. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allowConnections: + description: |- + Maps to the `ALLOW_CONNECTIONS` parameter of `CREATE DATABASE` and + `ALTER DATABASE`. If false then no one can connect to this database. + type: boolean + builtinLocale: + description: |- + Maps to the `BUILTIN_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the locale name when the + builtin provider is used. This option requires `localeProvider` to + be set to `builtin`. Available from PostgreSQL 17. + type: string + x-kubernetes-validations: + - message: builtinLocale is immutable + rule: self == oldSelf + cluster: + description: The name of the PostgreSQL cluster hosting the database. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + collationVersion: + description: |- + Maps to the `COLLATION_VERSION` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: collationVersion is immutable + rule: self == oldSelf + connectionLimit: + description: |- + Maps to the `CONNECTION LIMIT` clause of `CREATE DATABASE` and + `ALTER DATABASE`. How many concurrent connections can be made to + this database. -1 (the default) means no limit. + type: integer + databaseReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this database. + enum: + - delete + - retain + type: string + encoding: + description: |- + Maps to the `ENCODING` parameter of `CREATE DATABASE`. This setting + cannot be changed. Character set encoding to use in the database. + type: string + x-kubernetes-validations: + - message: encoding is immutable + rule: self == oldSelf + ensure: + default: present + description: Ensure the PostgreSQL database is `present` or `absent` + - defaults to "present". + enum: + - present + - absent + type: string + extensions: + description: The list of extensions to be managed in the database + items: + description: ExtensionSpec configures an extension in a database + properties: + ensure: + default: present + description: |- + Specifies whether an extension/schema should be present or absent in + the database. If set to `present`, the extension/schema will be + created if it does not exist. If set to `absent`, the + extension/schema will be removed if it exists. + enum: + - present + - absent + type: string + name: + description: Name of the extension/schema + type: string + schema: + description: |- + The name of the schema in which to install the extension's objects, + in case the extension allows its contents to be relocated. If not + specified (default), and the extension's control file does not + specify a schema either, the current default object creation schema + is used. + type: string + version: + description: |- + The version of the extension to install. If empty, the operator will + install the default version (whatever is specified in the + extension's control file) + type: string + required: + - name + type: object + type: array + icuLocale: + description: |- + Maps to the `ICU_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the ICU locale when the ICU + provider is used. This option requires `localeProvider` to be set to + `icu`. Available from PostgreSQL 15. + type: string + x-kubernetes-validations: + - message: icuLocale is immutable + rule: self == oldSelf + icuRules: + description: |- + Maps to the `ICU_RULES` parameter of `CREATE DATABASE`. This setting + cannot be changed. Specifies additional collation rules to customize + the behavior of the default collation. This option requires + `localeProvider` to be set to `icu`. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: icuRules is immutable + rule: self == oldSelf + isTemplate: + description: |- + Maps to the `IS_TEMPLATE` parameter of `CREATE DATABASE` and `ALTER + DATABASE`. If true, this database is considered a template and can + be cloned by any user with `CREATEDB` privileges. + type: boolean + locale: + description: |- + Maps to the `LOCALE` parameter of `CREATE DATABASE`. This setting + cannot be changed. Sets the default collation order and character + classification in the new database. + type: string + x-kubernetes-validations: + - message: locale is immutable + rule: self == oldSelf + localeCType: + description: |- + Maps to the `LC_CTYPE` parameter of `CREATE DATABASE`. This setting + cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCType is immutable + rule: self == oldSelf + localeCollate: + description: |- + Maps to the `LC_COLLATE` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCollate is immutable + rule: self == oldSelf + localeProvider: + description: |- + Maps to the `LOCALE_PROVIDER` parameter of `CREATE DATABASE`. This + setting cannot be changed. This option sets the locale provider for + databases created in the new cluster. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: localeProvider is immutable + rule: self == oldSelf + name: + description: The name of the database to create inside PostgreSQL. + This setting cannot be changed. + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + - message: the name postgres is reserved + rule: self != 'postgres' + - message: the name template0 is reserved + rule: self != 'template0' + - message: the name template1 is reserved + rule: self != 'template1' + owner: + description: |- + Maps to the `OWNER` parameter of `CREATE DATABASE`. + Maps to the `OWNER TO` command of `ALTER DATABASE`. + The role name of the user who owns the database inside PostgreSQL. + type: string + schemas: + description: The list of schemas to be managed in the database + items: + description: SchemaSpec configures a schema in a database + properties: + ensure: + default: present + description: |- + Specifies whether an extension/schema should be present or absent in + the database. If set to `present`, the extension/schema will be + created if it does not exist. If set to `absent`, the + extension/schema will be removed if it exists. + enum: + - present + - absent + type: string + name: + description: Name of the extension/schema + type: string + owner: + description: |- + The role name of the user who owns the schema inside PostgreSQL. + It maps to the `AUTHORIZATION` parameter of `CREATE SCHEMA` and the + `OWNER TO` command of `ALTER SCHEMA`. + type: string + required: + - name + type: object + type: array + tablespace: + description: |- + Maps to the `TABLESPACE` parameter of `CREATE DATABASE`. + Maps to the `SET TABLESPACE` command of `ALTER DATABASE`. + The name of the tablespace (in PostgreSQL) that will be associated + with the new database. This tablespace will be the default + tablespace used for objects created in this database. + type: string + template: + description: |- + Maps to the `TEMPLATE` parameter of `CREATE DATABASE`. This setting + cannot be changed. The name of the template from which to create + this database. + type: string + x-kubernetes-validations: + - message: template is immutable + rule: self == oldSelf + required: + - cluster + - name + - owner + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider is set + to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + status: + description: |- + Most recently observed status of the Database. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + applied: + description: Applied is true if the database was reconciled correctly + type: boolean + extensions: + description: Extensions is the status of the managed extensions + items: + description: DatabaseObjectStatus is the status of the managed database + objects + properties: + applied: + description: |- + True of the object has been installed successfully in + the database + type: boolean + message: + description: Message is the object reconciliation message + type: string + name: + description: The name of the object + type: string + required: + - applied + - name + type: object + type: array + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + schemas: + description: Schemas is the status of the managed schemas + items: + description: DatabaseObjectStatus is the status of the managed database + objects + properties: + applied: + description: |- + True of the object has been installed successfully in + the database + type: boolean + message: + description: Message is the object reconciliation message + type: string + name: + description: The name of the object + type: string + required: + - applied + - name + type: object + type: array + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: imagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ImageCatalog + listKind: ImageCatalogList + plural: imagecatalogs + singular: imagecatalog + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ImageCatalog is the Schema for the imagecatalogs API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: poolers.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Pooler + listKind: PoolerList + plural: poolers + singular: pooler + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.type + name: Type + type: string + name: v1 + schema: + openAPIV3Schema: + description: Pooler is the Schema for the poolers API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the Pooler. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: |- + This is the cluster reference on which the Pooler will work. + Pooler name should never match with any cluster name within the same namespace. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + deploymentStrategy: + description: The deployment strategy to use for pgbouncer to replace + existing pods with new ones + properties: + rollingUpdate: + description: |- + Rolling update config params. Present only if DeploymentStrategyType = + RollingUpdate. + properties: + maxSurge: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be scheduled above the desired number of + pods. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + This can not be 0 if MaxUnavailable is 0. + Absolute number is calculated from percentage by rounding up. + Defaults to 25%. + Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when + the rolling update starts, such that the total number of old and new pods do not exceed + 130% of desired pods. Once old pods have been killed, + new ReplicaSet can be scaled up further, ensuring that total number of pods running + at any time during the update is at most 130% of desired pods. + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be unavailable during the update. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + Absolute number is calculated from percentage by rounding down. + This can not be 0 if MaxSurge is 0. + Defaults to 25%. + Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods + immediately when the rolling update starts. Once new pods are ready, old ReplicaSet + can be scaled down further, followed by scaling up the new ReplicaSet, ensuring + that the total number of pods available at all times during the update is at + least 70% of desired pods. + x-kubernetes-int-or-string: true + type: object + type: + description: Type of deployment. Can be "Recreate" or "RollingUpdate". + Default is RollingUpdate. + type: string + type: object + instances: + default: 1 + description: 'The number of replicas we want. Default: 1.' + format: int32 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this pooler. + properties: + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + type: object + pgbouncer: + description: The PgBouncer configuration + properties: + authQuery: + description: |- + The query that will be used to download the hash of the password + of a certain user. Default: "SELECT usename, passwd FROM public.user_search($1)". + In case it is specified, also an AuthQuerySecret has to be specified and + no automatic CNPG Cluster integration will be triggered. + type: string + authQuerySecret: + description: |- + The credentials of the user that need to be used for the authentication + query. In case it is specified, also an AuthQuery + (e.g. "SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1") + has to be specified and no automatic CNPG Cluster integration will be triggered. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + parameters: + additionalProperties: + type: string + description: |- + Additional parameters to be passed to PgBouncer - please check + the CNPG documentation for a list of options you can configure + type: object + paused: + default: false + description: |- + When set to `true`, PgBouncer will disconnect from the PostgreSQL + server, first waiting for all queries to complete, and pause all new + client connections until this value is set to `false` (default). Internally, + the operator calls PgBouncer's `PAUSE` and `RESUME` commands. + type: boolean + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + poolMode: + default: session + description: 'The pool mode. Default: `session`.' + enum: + - session + - transaction + type: string + type: object + serviceTemplate: + description: Template for the Service to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information on service's + port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the configurations + of session affinity. + properties: + clientIP: + description: clientIP contains the configurations of Client + IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + template: + description: The template of the Pod to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the pod. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + activeDeadlineSeconds: + description: |- + Optional duration in seconds the pod may be active on the node relative to + StartTime before the system will actively try to mark it failed and kill associated containers. + Value must be a positive integer. + format: int64 + type: integer + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + automountServiceAccountToken: + description: AutomountServiceAccountToken indicates whether + a service account token should be automatically mounted. + type: boolean + containers: + description: |- + List of containers belonging to the pod. + Containers cannot currently be added or removed. + There must be at least one container in a Pod. + Cannot be updated. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps or Secrets + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: Optional text to prepend to the name + of each environment variable. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + stopSignal: + description: |- + StopSignal defines which signal will be sent to a container when it is being stopped. + If not specified, the default is defined by the container runtime in use. + StopSignal can only be set for Pods with a non-empty .spec.os.name + type: string + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + dnsConfig: + description: |- + Specifies the DNS parameters of a pod. + Parameters specified here will be merged to the generated DNS + configuration based on DNSPolicy. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver + options of a pod. + properties: + name: + description: |- + Name is this DNS resolver option's name. + Required. + type: string + value: + description: Value is this DNS resolver option's + value. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + dnsPolicy: + description: |- + Set DNS policy for the pod. + Defaults to "ClusterFirst". + Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + To have DNS options set along with hostNetwork, you have to specify DNS policy + explicitly to 'ClusterFirstWithHostNet'. + type: string + enableServiceLinks: + description: |- + EnableServiceLinks indicates whether information about services should be injected into pod's + environment variables, matching the syntax of Docker links. + Optional: Defaults to true. + type: boolean + ephemeralContainers: + description: |- + List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + pod to perform user-initiated actions such as debugging. This list cannot be specified when + creating a pod, and it cannot be modified by updating the pod spec. In order to add an + ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + items: + description: |- + An EphemeralContainer is a temporary container that you may add to an existing Pod for + user-initiated activities such as debugging. Ephemeral containers have no resource or + scheduling guarantees, and they will not be restarted when they exit or when a Pod is + removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the + Pod to exceed its resource allocation. + + To add an ephemeral container, use the ephemeralcontainers subresource of an existing + Pod. Ephemeral containers may not be removed or restarted. + properties: + args: + description: |- + Arguments to the entrypoint. + The image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps or Secrets + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: Optional text to prepend to the name + of each environment variable. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: Lifecycle is not allowed for ephemeral + containers. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + stopSignal: + description: |- + StopSignal defines which signal will be sent to a container when it is being stopped. + If not specified, the default is defined by the container runtime in use. + StopSignal can only be set for Pods with a non-empty .spec.os.name + type: string + type: object + livenessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the ephemeral container specified as a DNS_LABEL. + This name must be unique among all containers, init containers and ephemeral containers. + type: string + ports: + description: Ports are not allowed for ephemeral containers. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + already allocated to the pod. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for the container to manage the restart behavior of each + container within a pod. + This may only be set for init containers. You cannot set this field on + ephemeral containers. + type: string + securityContext: + description: |- + Optional: SecurityContext defines the security options the ephemeral container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + targetContainerName: + description: |- + If set, the name of the container from PodSpec that this ephemeral container targets. + The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + If not set then the ephemeral container uses the namespaces configured in the Pod spec. + + The container runtime must implement support for this feature. If the runtime does not + support namespace targeting then the result of setting this field is undefined. + type: string + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + hostAliases: + description: |- + HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + file if specified. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + description: IP address of the host file entry. + type: string + required: + - ip + type: object + type: array + x-kubernetes-list-map-keys: + - ip + x-kubernetes-list-type: map + hostIPC: + description: |- + Use the host's ipc namespace. + Optional: Default to false. + type: boolean + hostNetwork: + description: |- + Host networking requested for this pod. Use the host's network namespace. + If this option is set, the ports that will be used must be specified. + Default to false. + type: boolean + hostPID: + description: |- + Use the host's pid namespace. + Optional: Default to false. + type: boolean + hostUsers: + description: |- + Use the host's user namespace. + Optional: Default to true. + If set to true or not present, the pod will be run in the host user namespace, useful + for when the pod needs a feature only available to the host user namespace, such as + loading a kernel module with CAP_SYS_MODULE. + When set to false, a new userns is created for the pod. Setting false is useful for + mitigating container breakout vulnerabilities even allowing users to run their + containers as root without actually having root privileges on the host. + This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. + type: boolean + hostname: + description: |- + Specifies the hostname of the Pod + If not specified, the pod's hostname will be set to a system-defined value. + type: string + imagePullSecrets: + description: |- + ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + If specified, these secrets will be passed to individual puller implementations for them to use. + More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + initContainers: + description: |- + List of initialization containers belonging to the pod. + Init containers are executed in order prior to containers being started. If any + init container fails, the pod is considered to have failed and is handled according + to its restartPolicy. The name for an init container or normal container must be + unique among all containers. + Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. + The resourceRequirements of an init container are taken into account during scheduling + by finding the highest request/limit for each resource type, and then using the max of + that value or the sum of the normal containers. Limits are applied to init containers + in a similar fashion. + Init containers cannot currently be added or removed. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps or Secrets + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: Optional text to prepend to the name + of each environment variable. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + stopSignal: + description: |- + StopSignal defines which signal will be sent to a container when it is being stopped. + If not specified, the default is defined by the container runtime in use. + StopSignal can only be set for Pods with a non-empty .spec.os.name + type: string + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + nodeName: + description: |- + NodeName indicates in which node this pod is scheduled. + If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. + Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. + This field should not be used to express a desire for the pod to be scheduled on a specific node. + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the pod to fit on a node. + Selector which must match a node's labels for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + x-kubernetes-map-type: atomic + os: + description: |- + Specifies the OS of the containers in the pod. + Some pod and container fields are restricted if this is set. + + If the OS field is set to linux, the following fields must be unset: + -securityContext.windowsOptions + + If the OS field is set to windows, following fields must be unset: + - spec.hostPID + - spec.hostIPC + - spec.hostUsers + - spec.securityContext.appArmorProfile + - spec.securityContext.seLinuxOptions + - spec.securityContext.seccompProfile + - spec.securityContext.fsGroup + - spec.securityContext.fsGroupChangePolicy + - spec.securityContext.sysctls + - spec.shareProcessNamespace + - spec.securityContext.runAsUser + - spec.securityContext.runAsGroup + - spec.securityContext.supplementalGroups + - spec.securityContext.supplementalGroupsPolicy + - spec.containers[*].securityContext.appArmorProfile + - spec.containers[*].securityContext.seLinuxOptions + - spec.containers[*].securityContext.seccompProfile + - spec.containers[*].securityContext.capabilities + - spec.containers[*].securityContext.readOnlyRootFilesystem + - spec.containers[*].securityContext.privileged + - spec.containers[*].securityContext.allowPrivilegeEscalation + - spec.containers[*].securityContext.procMount + - spec.containers[*].securityContext.runAsUser + - spec.containers[*].securityContext.runAsGroup + properties: + name: + description: |- + Name is the name of the operating system. The currently supported values are linux and windows. + Additional value may be defined in future and can be one of: + https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + Clients should expect to handle additional values and treat unrecognized values in this field as os: null + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + This field will be autopopulated at admission time by the RuntimeClass admission controller. If + the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + The RuntimeClass admission controller will reject Pod create requests which have the overhead already + set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md + type: object + preemptionPolicy: + description: |- + PreemptionPolicy is the Policy for preempting pods with lower priority. + One of Never, PreemptLowerPriority. + Defaults to PreemptLowerPriority if unset. + type: string + priority: + description: |- + The priority value. Various system components use this field to find the + priority of the pod. When Priority Admission Controller is enabled, it + prevents users from setting this field. The admission controller populates + this field from PriorityClassName. + The higher the value, the higher the priority. + format: int32 + type: integer + priorityClassName: + description: |- + If specified, indicates the pod's priority. "system-node-critical" and + "system-cluster-critical" are two special keywords which indicate the + highest priorities with the former being the highest priority. Any other + name must be defined by creating a PriorityClass object with that name. + If not specified, the pod priority will be default or zero if there is no + default. + type: string + readinessGates: + description: |- + If specified, all readiness gates will be evaluated for pod readiness. + A pod is ready when all its containers are ready AND + all conditions specified in the readiness gates have status equal to "True" + More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates + items: + description: PodReadinessGate contains the reference to + a pod condition + properties: + conditionType: + description: ConditionType refers to a condition in + the pod's condition list with matching type. + type: string + required: + - conditionType + type: object + type: array + x-kubernetes-list-type: atomic + resourceClaims: + description: |- + ResourceClaims defines which ResourceClaims must be allocated + and reserved before the Pod is allowed to start. The resources + will be made available to those containers which consume them + by name. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. + items: + description: |- + PodResourceClaim references exactly one ResourceClaim, either directly + or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim + for the pod. + + It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. + Containers that need access to the ResourceClaim reference it with this name. + properties: + name: + description: |- + Name uniquely identifies this resource claim inside the pod. + This must be a DNS_LABEL. + type: string + resourceClaimName: + description: |- + ResourceClaimName is the name of a ResourceClaim object in the same + namespace as this pod. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + resourceClaimTemplateName: + description: |- + ResourceClaimTemplateName is the name of a ResourceClaimTemplate + object in the same namespace as this pod. + + The template will be used to create a new ResourceClaim, which will + be bound to this pod. When this pod is deleted, the ResourceClaim + will also be deleted. The pod name and resource name, along with a + generated component, will be used to form a unique name for the + ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + + This field is immutable and no changes will be made to the + corresponding ResourceClaim by the control plane after creating the + ResourceClaim. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + resources: + description: |- + Resources is the total amount of CPU and Memory resources required by all + containers in the pod. It supports specifying Requests and Limits for + "cpu" and "memory" resource names only. ResourceClaims are not supported. + + This field enables fine-grained control over resource allocation for the + entire pod, allowing resource sharing among containers in a pod. + + This is an alpha field and requires enabling the PodLevelResources feature + gate. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for all containers within the pod. + One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. + Default to Always. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy + type: string + runtimeClassName: + description: |- + RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + empty definition that uses the default runtime handler. + More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + type: string + schedulerName: + description: |- + If specified, the pod will be dispatched by specified scheduler. + If not specified, the pod will be dispatched by default scheduler. + type: string + schedulingGates: + description: |- + SchedulingGates is an opaque list of values that if specified will block scheduling the pod. + If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + scheduler will not attempt to schedule the pod. + + SchedulingGates can only be set at pod creation time, and be removed only afterwards. + items: + description: PodSchedulingGate is associated to a Pod to + guard its scheduling. + properties: + name: + description: |- + Name of the scheduling gate. + Each scheduling gate must have a unique name field. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + securityContext: + description: |- + SecurityContext holds pod-level security attributes and common container settings. + Optional: Defaults to empty. See type description for default values of each field. + properties: + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxChangePolicy: + description: |- + seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. + It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. + Valid values are "MountOption" and "Recursive". + + "Recursive" means relabeling of all files on all Pod volumes by the container runtime. + This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. + + "MountOption" mounts all eligible Pod volumes with `-o context` mount option. + This requires all Pods that share the same volume to use the same SELinux label. + It is not possible to share the same volume among privileged and unprivileged Pods. + Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes + whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their + CSIDriver instance. Other volumes are always re-labelled recursively. + "MountOption" value is allowed only when SELinuxMount feature gate is enabled. + + If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. + If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes + and "Recursive" for all other volumes. + + This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers. + + All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. + Note that this field cannot be set when spec.os.name is windows. + type: string + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in + addition to the container's primary GID and fsGroup (if specified). If + the SupplementalGroupsPolicy feature is enabled, the + supplementalGroupsPolicy field determines whether these are in addition + to or instead of any group memberships defined in the container image. + If unspecified, no additional groups are added, though group memberships + defined in the container image may still be used, depending on the + supplementalGroupsPolicy field. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + description: |- + Defines how supplemental groups of the first container processes are calculated. + Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + and the container runtime must implement support for this feature. + Note that this field cannot be set when spec.os.name is windows. + type: string + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be + set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: |- + DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. + Deprecated: Use serviceAccountName instead. + type: string + serviceAccountName: + description: |- + ServiceAccountName is the name of the ServiceAccount to use to run this pod. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + type: string + setHostnameAsFQDN: + description: |- + If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). + In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). + In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. + If a pod does not have FQDN, this has no effect. + Default to false. + type: boolean + shareProcessNamespace: + description: |- + Share a single process namespace between all of the containers in a pod. + When this is set containers will be able to view and signal processes from other containers + in the same pod, and the first process in each container will not be assigned PID 1. + HostPID and ShareProcessNamespace cannot both be set. + Optional: Default to false. + type: boolean + subdomain: + description: |- + If specified, the fully qualified Pod hostname will be "...svc.". + If not specified, the pod will not have a domainname at all. + type: string + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + If this value is nil, the default grace period will be used instead. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: If specified, the pod's tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of pods ought to spread across topology + domains. Scheduler will schedule pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + description: |- + List of volumes that can be mounted by containers belonging to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk + in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in + the blob storage + type: string + fsType: + default: ext4 + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage account Managed: azure + managed data disk (only in managed availability + set). defaults to shared' + type: string + readOnly: + default: false + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default + is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers. + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about + the pod that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing the + pod field + properties: + fieldRef: + description: 'Required: Selects a field of + the pod: only annotations, labels, name, + namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path. + Must be utf-8 encoded. The first item of + the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource + that is attached to a kubelet's host machine and then + exposed to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target + worldwide names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. + properties: + driver: + description: driver is the name of the driver to + use for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the + specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + image: + description: |- + image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + The volume is resolved at pod startup depending on which PullPolicy value is provided: + + - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + + The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + The volume will be mounted read-only (ro) and non-executable files (noexec). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. + The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + default: default + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon + Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources + secrets, configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume + root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the + configMap data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about + the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name, namespace and uid + are supported.' + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file + to be created. Must not be absolute + or contain the ''..'' path. Must + be utf-8 encoded. The first item + of the relative path must not + start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the + secret data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + default: /etc/ceph/keyring + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. + properties: + fsType: + default: xfs + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the + ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the + ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL + communication with Gateway, default false + type: boolean + storageMode: + default: ThinProvisioned + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage + Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether the + Secret or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy + Based Management (SPBM) profile ID associated + with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + required: + - containers + type: object + type: object + type: + default: rw + description: 'Type of service to forward traffic to. Default: `rw`.' + enum: + - rw + - ro + - r + type: string + required: + - cluster + - pgbouncer + type: object + status: + description: |- + Most recently observed status of the Pooler. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + instances: + description: The number of pods trying to be scheduled + format: int32 + type: integer + secrets: + description: The resource version of the config object + properties: + clientCA: + description: The client CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + pgBouncerSecrets: + description: The version of the secrets used by PgBouncer + properties: + authQuery: + description: The auth query secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + serverCA: + description: The server CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + serverTLS: + description: The server TLS secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: publications.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Publication + listKind: PublicationList + plural: publications + singular: publication + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Publication is the Schema for the publications API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PublicationSpec defines the desired state of Publication + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "publisher" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "publisher" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + name: + description: The name of the publication inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Publication parameters part of the `WITH` clause as expected by + PostgreSQL `CREATE PUBLICATION` command + type: object + publicationReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this publication + enum: + - delete + - retain + type: string + target: + description: Target of the publication as expected by PostgreSQL `CREATE + PUBLICATION` command + properties: + allTables: + description: |- + Marks the publication as one that replicates changes for all tables + in the database, including tables created in the future. + Corresponding to `FOR ALL TABLES` in PostgreSQL. + type: boolean + x-kubernetes-validations: + - message: allTables is immutable + rule: self == oldSelf + objects: + description: Just the following schema objects + items: + description: PublicationTargetObject is an object to publish + properties: + table: + description: |- + Specifies a list of tables to add to the publication. Corresponding + to `FOR TABLE` in PostgreSQL. + properties: + columns: + description: The columns to publish + items: + type: string + type: array + name: + description: The table name + type: string + only: + description: Whether to limit to the table only or include + all its descendants + type: boolean + schema: + description: The schema name + type: string + required: + - name + type: object + tablesInSchema: + description: |- + Marks the publication as one that replicates changes for all tables + in the specified list of schemas, including tables created in the + future. Corresponding to `FOR TABLES IN SCHEMA` in PostgreSQL. + type: string + type: object + x-kubernetes-validations: + - message: tablesInSchema and table are mutually exclusive + rule: (has(self.tablesInSchema) && !has(self.table)) || (!has(self.tablesInSchema) + && has(self.table)) + maxItems: 100000 + type: array + x-kubernetes-validations: + - message: specifying a column list when the publication also + publishes tablesInSchema is not supported + rule: '!(self.exists(o, has(o.table) && has(o.table.columns)) + && self.exists(o, has(o.tablesInSchema)))' + type: object + x-kubernetes-validations: + - message: allTables and objects are mutually exclusive + rule: (has(self.allTables) && !has(self.objects)) || (!has(self.allTables) + && has(self.objects)) + required: + - cluster + - dbname + - name + - target + type: object + status: + description: PublicationStatus defines the observed state of Publication + properties: + applied: + description: Applied is true if the publication was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: scheduledbackups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ScheduledBackup + listKind: ScheduledBackupList + plural: scheduledbackups + singular: scheduledbackup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .status.lastScheduleTime + name: Last Backup + type: date + name: v1 + schema: + openAPIV3Schema: + description: ScheduledBackup is the Schema for the scheduledbackups API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ScheduledBackup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + backupOwnerReference: + default: none + description: |- + Indicates which ownerReference should be put inside the created backup resources.
+ - none: no owner reference for created backup objects (same behavior as before the field was introduced)
+ - self: sets the Scheduled backup object as owner of the backup
+ - cluster: set the cluster as owner of the backup
+ enum: + - none + - self + - cluster + type: string + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + immediate: + description: If the first backup has to be immediately start after + creation or not + type: boolean + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + schedule: + description: |- + The schedule does not follow the same format used in Kubernetes CronJobs + as it includes an additional seconds specifier, + see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format + type: string + suspend: + description: If this backup is suspended or not + type: boolean + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + - schedule + type: object + status: + description: |- + Most recently observed status of the ScheduledBackup. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + lastCheckTime: + description: The latest time the schedule + format: date-time + type: string + lastScheduleTime: + description: Information when was the last time that backup was successfully + scheduled. + format: date-time + type: string + nextScheduleTime: + description: Next time we will run a backup + format: date-time + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: subscriptions.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Subscription + listKind: SubscriptionList + plural: subscriptions + singular: subscription + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Subscription is the Schema for the subscriptions API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SubscriptionSpec defines the desired state of Subscription + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "subscriber" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "subscriber" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + externalClusterName: + description: The name of the external cluster with the publication + ("publisher") + type: string + name: + description: The name of the subscription inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Subscription parameters included in the `WITH` clause of the PostgreSQL + `CREATE SUBSCRIPTION` command. Most parameters cannot be changed + after the subscription is created and will be ignored if modified + later, except for a limited set documented at: + https://www.postgresql.org/docs/current/sql-altersubscription.html#SQL-ALTERSUBSCRIPTION-PARAMS-SET + type: object + publicationDBName: + description: |- + The name of the database containing the publication on the external + cluster. Defaults to the one in the external cluster definition. + type: string + publicationName: + description: |- + The name of the publication inside the PostgreSQL database in the + "publisher" + type: string + subscriptionReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this subscription + enum: + - delete + - retain + type: string + required: + - cluster + - dbname + - externalClusterName + - name + - publicationName + type: object + status: + description: SubscriptionStatus defines the observed state of Subscription + properties: + applied: + description: Applied is true if the subscription was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cnpg-manager +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps/status + - secrets/status + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - pods + - pods/exec + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - pods/status + verbs: + - get +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - get + - patch +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +- apiGroups: + - monitoring.coreos.com + resources: + - podmonitors + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups + - clusters + - databases + - poolers + - publications + - scheduledbackups + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups/status + - databases/status + - publications/status + - scheduledbackups/status + - subscriptions/status + verbs: + - get + - patch + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusterimagecatalogs + - imagecatalogs + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/finalizers + - poolers/finalizers + verbs: + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/status + - poolers/status + verbs: + - get + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - create + - get + - list + - patch + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cnpg-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cnpg-manager +subjects: +- kind: ServiceAccount + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: v1 +data: + queries: | + backends: + query: | + SELECT sa.datname + , sa.usename + , sa.application_name + , states.state + , COALESCE(sa.count, 0) AS total + , COALESCE(sa.max_tx_secs, 0) AS max_tx_duration_seconds + FROM ( VALUES ('active') + , ('idle') + , ('idle in transaction') + , ('idle in transaction (aborted)') + , ('fastpath function call') + , ('disabled') + ) AS states(state) + LEFT JOIN ( + SELECT datname + , state + , usename + , COALESCE(application_name, '') AS application_name + , COUNT(*) + , COALESCE(EXTRACT (EPOCH FROM (max(now() - xact_start))), 0) AS max_tx_secs + FROM pg_catalog.pg_stat_activity + GROUP BY datname, state, usename, application_name + ) sa ON states.state = sa.state + WHERE sa.usename IS NOT NULL + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - usename: + usage: "LABEL" + description: "Name of the user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - state: + usage: "LABEL" + description: "State of the backend" + - total: + usage: "GAUGE" + description: "Number of backends" + - max_tx_duration_seconds: + usage: "GAUGE" + description: "Maximum duration of a transaction in seconds" + + backends_waiting: + query: | + SELECT count(*) AS total + FROM pg_catalog.pg_locks blocked_locks + JOIN pg_catalog.pg_locks blocking_locks + ON blocking_locks.locktype = blocked_locks.locktype + AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database + AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation + AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page + AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple + AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid + AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid + AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid + AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid + AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid + AND blocking_locks.pid != blocked_locks.pid + JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid + WHERE NOT blocked_locks.granted + metrics: + - total: + usage: "GAUGE" + description: "Total number of backends that are currently waiting on other queries" + + pg_database: + query: | + SELECT datname + , pg_catalog.pg_database_size(datname) AS size_bytes + , pg_catalog.age(datfrozenxid) AS xid_age + , pg_catalog.mxid_age(datminmxid) AS mxid_age + FROM pg_catalog.pg_database + WHERE datallowconn + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - size_bytes: + usage: "GAUGE" + description: "Disk space used by the database" + - xid_age: + usage: "GAUGE" + description: "Number of transactions from the frozen XID to the current one" + - mxid_age: + usage: "GAUGE" + description: "Number of multiple transactions (Multixact) from the frozen XID to the current one" + + pg_postmaster: + query: | + SELECT EXTRACT(EPOCH FROM pg_postmaster_start_time) AS start_time + FROM pg_catalog.pg_postmaster_start_time() + metrics: + - start_time: + usage: "GAUGE" + description: "Time at which postgres started (based on epoch)" + + pg_replication: + query: "SELECT CASE WHEN ( + NOT pg_catalog.pg_is_in_recovery() + OR pg_catalog.pg_last_wal_receive_lsn() = pg_catalog.pg_last_wal_replay_lsn()) + THEN 0 + ELSE GREATEST (0, + EXTRACT(EPOCH FROM (now() - pg_catalog.pg_last_xact_replay_timestamp()))) + END AS lag, + pg_catalog.pg_is_in_recovery() AS in_recovery, + EXISTS (TABLE pg_stat_wal_receiver) AS is_wal_receiver_up, + (SELECT count(*) FROM pg_catalog.pg_stat_replication) AS streaming_replicas" + metrics: + - lag: + usage: "GAUGE" + description: "Replication lag behind primary in seconds" + - in_recovery: + usage: "GAUGE" + description: "Whether the instance is in recovery" + - is_wal_receiver_up: + usage: "GAUGE" + description: "Whether the instance wal_receiver is up" + - streaming_replicas: + usage: "GAUGE" + description: "Number of streaming replicas connected to the instance" + + pg_replication_slots: + query: | + SELECT slot_name, + slot_type, + database, + active, + (CASE pg_catalog.pg_is_in_recovery() + WHEN TRUE THEN pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_last_wal_receive_lsn(), restart_lsn) + ELSE pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), restart_lsn) + END) as pg_wal_lsn_diff + FROM pg_catalog.pg_replication_slots + WHERE NOT temporary + metrics: + - slot_name: + usage: "LABEL" + description: "Name of the replication slot" + - slot_type: + usage: "LABEL" + description: "Type of the replication slot" + - database: + usage: "LABEL" + description: "Name of the database" + - active: + usage: "GAUGE" + description: "Flag indicating whether the slot is active" + - pg_wal_lsn_diff: + usage: "GAUGE" + description: "Replication lag in bytes" + + pg_stat_archiver: + query: | + SELECT archived_count + , failed_count + , COALESCE(EXTRACT(EPOCH FROM (now() - last_archived_time)), -1) AS seconds_since_last_archival + , COALESCE(EXTRACT(EPOCH FROM (now() - last_failed_time)), -1) AS seconds_since_last_failure + , COALESCE(EXTRACT(EPOCH FROM last_archived_time), -1) AS last_archived_time + , COALESCE(EXTRACT(EPOCH FROM last_failed_time), -1) AS last_failed_time + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_archived_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_archived_wal_start_lsn + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_failed_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_failed_wal_start_lsn + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_archiver + metrics: + - archived_count: + usage: "COUNTER" + description: "Number of WAL files that have been successfully archived" + - failed_count: + usage: "COUNTER" + description: "Number of failed attempts for archiving WAL files" + - seconds_since_last_archival: + usage: "GAUGE" + description: "Seconds since the last successful archival operation" + - seconds_since_last_failure: + usage: "GAUGE" + description: "Seconds since the last failed archival operation" + - last_archived_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving succeeded" + - last_failed_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving failed" + - last_archived_wal_start_lsn: + usage: "GAUGE" + description: "Archived WAL start LSN" + - last_failed_wal_start_lsn: + usage: "GAUGE" + description: "Last failed WAL LSN" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_bgwriter: + runonserver: "<17.0.0" + query: | + SELECT checkpoints_timed + , checkpoints_req + , checkpoint_write_time + , checkpoint_sync_time + , buffers_checkpoint + , buffers_clean + , maxwritten_clean + , buffers_backend + , buffers_backend_fsync + , buffers_alloc + FROM pg_catalog.pg_stat_bgwriter + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - checkpoint_write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds" + - checkpoint_sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds" + - buffers_checkpoint: + usage: "COUNTER" + description: "Number of buffers written during checkpoints" + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_backend: + usage: "COUNTER" + description: "Number of buffers written directly by a backend" + - buffers_backend_fsync: + usage: "COUNTER" + description: "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + + pg_stat_bgwriter_17: + runonserver: ">=17.0.0" + name: pg_stat_bgwriter + query: | + SELECT buffers_clean + , maxwritten_clean + , buffers_alloc + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_bgwriter + metrics: + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_checkpointer: + runonserver: ">=17.0.0" + query: | + SELECT num_timed AS checkpoints_timed + , num_requested AS checkpoints_req + , restartpoints_timed + , restartpoints_req + , restartpoints_done + , write_time + , sync_time + , buffers_written + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_checkpointer + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - restartpoints_timed: + usage: "COUNTER" + description: "Number of scheduled restartpoints due to timeout or after a failed attempt to perform it" + - restartpoints_req: + usage: "COUNTER" + description: "Number of requested restartpoints that have been performed" + - restartpoints_done: + usage: "COUNTER" + description: "Number of restartpoints that have been performed" + - write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are written to disk, in milliseconds" + - sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are synchronized to disk, in milliseconds" + - buffers_written: + usage: "COUNTER" + description: "Number of buffers written during checkpoints and restartpoints" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_database: + query: | + SELECT datname + , xact_commit + , xact_rollback + , blks_read + , blks_hit + , tup_returned + , tup_fetched + , tup_inserted + , tup_updated + , tup_deleted + , conflicts + , temp_files + , temp_bytes + , deadlocks + , blk_read_time + , blk_write_time + FROM pg_catalog.pg_stat_database + metrics: + - datname: + usage: "LABEL" + description: "Name of this database" + - xact_commit: + usage: "COUNTER" + description: "Number of transactions in this database that have been committed" + - xact_rollback: + usage: "COUNTER" + description: "Number of transactions in this database that have been rolled back" + - blks_read: + usage: "COUNTER" + description: "Number of disk blocks read in this database" + - blks_hit: + usage: "COUNTER" + description: "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)" + - tup_returned: + usage: "COUNTER" + description: "Number of rows returned by queries in this database" + - tup_fetched: + usage: "COUNTER" + description: "Number of rows fetched by queries in this database" + - tup_inserted: + usage: "COUNTER" + description: "Number of rows inserted by queries in this database" + - tup_updated: + usage: "COUNTER" + description: "Number of rows updated by queries in this database" + - tup_deleted: + usage: "COUNTER" + description: "Number of rows deleted by queries in this database" + - conflicts: + usage: "COUNTER" + description: "Number of queries canceled due to conflicts with recovery in this database" + - temp_files: + usage: "COUNTER" + description: "Number of temporary files created by queries in this database" + - temp_bytes: + usage: "COUNTER" + description: "Total amount of data written to temporary files by queries in this database" + - deadlocks: + usage: "COUNTER" + description: "Number of deadlocks detected in this database" + - blk_read_time: + usage: "COUNTER" + description: "Time spent reading data file blocks by backends in this database, in milliseconds" + - blk_write_time: + usage: "COUNTER" + description: "Time spent writing data file blocks by backends in this database, in milliseconds" + + pg_stat_replication: + primary: true + query: | + SELECT usename + , COALESCE(application_name, '') AS application_name + , COALESCE(client_addr::text, '') AS client_addr + , COALESCE(client_port::text, '') AS client_port + , EXTRACT(EPOCH FROM backend_start) AS backend_start + , COALESCE(pg_catalog.age(backend_xmin), 0) AS backend_xmin_age + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), sent_lsn) AS sent_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), write_lsn) AS write_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), flush_lsn) AS flush_diff_bytes + , COALESCE(pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), replay_lsn),0) AS replay_diff_bytes + , COALESCE((EXTRACT(EPOCH FROM write_lag)),0)::float AS write_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM flush_lag)),0)::float AS flush_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM replay_lag)),0)::float AS replay_lag_seconds + FROM pg_catalog.pg_stat_replication + metrics: + - usename: + usage: "LABEL" + description: "Name of the replication user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - client_addr: + usage: "LABEL" + description: "Client IP address" + - client_port: + usage: "LABEL" + description: "Client TCP port" + - backend_start: + usage: "COUNTER" + description: "Time when this process was started" + - backend_xmin_age: + usage: "COUNTER" + description: "The age of this standby's xmin horizon" + - sent_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location sent on this connection" + - write_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location written to disk by this standby server" + - flush_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location flushed to disk by this standby server" + - replay_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location replayed into the database on this standby server" + - write_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it" + - flush_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it" + - replay_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it" + + pg_settings: + query: | + SELECT name, + CASE setting WHEN 'on' THEN '1' WHEN 'off' THEN '0' ELSE setting END AS setting + FROM pg_catalog.pg_settings + WHERE vartype IN ('integer', 'real', 'bool') + ORDER BY 1 + metrics: + - name: + usage: "LABEL" + description: "Name of the setting" + - setting: + usage: "GAUGE" + description: "Setting value" + + pg_extensions: + query: | + SELECT + current_database() as datname, + name as extname, + default_version, + installed_version, + CASE + WHEN default_version = installed_version THEN 0 + ELSE 1 + END AS update_available + FROM pg_catalog.pg_available_extensions + WHERE installed_version IS NOT NULL + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - extname: + usage: "LABEL" + description: "Extension name" + - default_version: + usage: "LABEL" + description: "Default version" + - installed_version: + usage: "LABEL" + description: "Installed version" + - update_available: + usage: "GAUGE" + description: "An update is available" + target_databases: + - '*' +kind: ConfigMap +metadata: + labels: + cnpg.io/reload: "" + name: cnpg-default-monitoring + namespace: cnpg-system +--- +apiVersion: v1 +kind: Service +metadata: + name: cnpg-webhook-service + namespace: cnpg-system +spec: + ports: + - port: 443 + targetPort: 9443 + selector: + app.kubernetes.io/name: cloudnative-pg +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-controller-manager + namespace: cnpg-system +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: cloudnative-pg + template: + metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + spec: + containers: + - args: + - controller + - --leader-elect + - --max-concurrent-reconciles=10 + - --config-map-name=cnpg-controller-manager-config + - --secret-name=cnpg-controller-manager-config + - --webhook-port=9443 + command: + - /manager + env: + - name: OPERATOR_IMAGE_NAME + value: ghcr.io/cloudnative-pg/cloudnative-pg:1.26.1 + - name: OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MONITORING_QUERIES_CONFIGMAP + value: cnpg-default-monitoring + image: ghcr.io/cloudnative-pg/cloudnative-pg:1.26.1 + imagePullPolicy: Always + livenessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + name: manager + ports: + - containerPort: 8080 + name: metrics + protocol: TCP + - containerPort: 9443 + name: webhook-server + protocol: TCP + readinessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + resources: + limits: + cpu: 100m + memory: 200Mi + requests: + cpu: 100m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 10001 + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + startupProbe: + failureThreshold: 6 + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + periodSeconds: 5 + volumeMounts: + - mountPath: /controller + name: scratch-data + - mountPath: /run/secrets/cnpg.io/webhook + name: webhook-certificates + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: cnpg-manager + terminationGracePeriodSeconds: 10 + volumes: + - emptyDir: {} + name: scratch-data + - name: webhook-certificates + secret: + defaultMode: 420 + optional: true + secretName: cnpg-webhook-cert +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: cnpg-mutating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: mbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: mcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-database + failurePolicy: Fail + name: mdatabase.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - databases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: mscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: cnpg-validating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: vbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: vcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-database + failurePolicy: Fail + name: vdatabase.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - databases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-pooler + failurePolicy: Fail + name: vpooler.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - poolers + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: vscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None From e68a5e652f01fcbf46b106d6e9c43063622bbb3e Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Fri, 25 Jul 2025 16:10:27 +0200 Subject: [PATCH 735/836] chore: update issue template (#8137) Signed-off-by: Marco Nenciarini --- .github/ISSUE_TEMPLATE/bug.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml index 5cdee79b94..cc3f5a3fc4 100644 --- a/.github/ISSUE_TEMPLATE/bug.yml +++ b/.github/ISSUE_TEMPLATE/bug.yml @@ -51,6 +51,7 @@ body: - "1.26 (latest patch)" - "1.25 (latest patch)" - "trunk (main)" + - "older in 1.26.x" - "older in 1.25.x" - "older minor (unsupported)" validations: From d755ff7b7240b042e23c11a0778428072e761be4 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Fri, 25 Jul 2025 16:16:00 +0200 Subject: [PATCH 736/836] chore: improve log message when HTTPS status is missing (#8134) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Improve logging when waiting for Pods to report HTTP status. If any Pod hasn’t yet reported, the controller now lists the Pods that are reporting status and those that aren’t, giving more context for debugging. Signed-off-by: Leonardo Cecchi --- internal/controller/cluster_controller.go | 27 +++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/internal/controller/cluster_controller.go b/internal/controller/cluster_controller.go index 08556196b4..9f2fba3db0 100644 --- a/internal/controller/cluster_controller.go +++ b/internal/controller/cluster_controller.go @@ -28,6 +28,7 @@ import ( "time" "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/stringset" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" policyv1 "k8s.io/api/policy/v1" @@ -984,12 +985,34 @@ func (r *ClusterReconciler) reconcilePods( // cluster.Status.Instances == cluster.Spec.Instances and // we don't need to modify the cluster topology if cluster.Status.ReadyInstances != cluster.Status.Instances || - cluster.Status.ReadyInstances != len(instancesStatus.Items) || - !instancesStatus.IsComplete() { + cluster.Status.ReadyInstances != len(instancesStatus.Items) { contextLogger.Debug("Waiting for Pods to be ready") return ctrl.Result{RequeueAfter: 1 * time.Second}, ErrNextLoop } + // If there is a Pod that doesn't report its HTTP status, + // we wait until the Pod gets marked as non ready or until we're + // able to connect to it. + if !instancesStatus.IsComplete() { + podsReportingStatus := stringset.New() + podsNotReportingStatus := make(map[string]string) + for i := range instancesStatus.Items { + podName := instancesStatus.Items[i].Pod.Name + if instancesStatus.Items[i].Error != nil { + podsNotReportingStatus[podName] = instancesStatus.Items[i].Error.Error() + } else { + podsReportingStatus.Put(podName) + } + } + + contextLogger.Info( + "Waiting for Pods to report HTTP status", + "podsReportingStatus", podsReportingStatus.ToSortedList(), + "podsNotReportingStatus", podsNotReportingStatus, + ) + return ctrl.Result{RequeueAfter: 1 * time.Second}, ErrNextLoop + } + report := instancesStatus.GetConfigurationReport() // If any pod is not reporting its configuration (i.e., uniform == nil), From e877f81f252f23cd639d765619334fc249c4aec0 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Sat, 26 Jul 2025 01:09:57 +0200 Subject: [PATCH 737/836] chore: fix some common typos in the code (#8141) Signed-off-by: Marco Nenciarini --- internal/controller/rollout/rollout.go | 2 +- tests/e2e/failover_test.go | 2 +- tests/e2e/volume_snapshot_test.go | 2 +- tests/utils/minio/minio.go | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/internal/controller/rollout/rollout.go b/internal/controller/rollout/rollout.go index 34ac26fad0..ff97ab7d9c 100644 --- a/internal/controller/rollout/rollout.go +++ b/internal/controller/rollout/rollout.go @@ -46,7 +46,7 @@ type Manager struct { // used by the unit tests to inject a fake time timeProvider timeFunc - // The following data is relative to the the last + // The following data is relative to the last // rollout lastInstance string lastCluster client.ObjectKey diff --git a/tests/e2e/failover_test.go b/tests/e2e/failover_test.go index e88e0a291e..a6a7e4055d 100644 --- a/tests/e2e/failover_test.go +++ b/tests/e2e/failover_test.go @@ -222,7 +222,7 @@ var _ = Describe("Failover", Label(tests.LabelSelfHealing), func() { }, timeout).Should(Not(Equal(""))) } - By("making sure that the the targetPrimary has switched away from current primary") + By("making sure that the targetPrimary has switched away from current primary") // The operator should eventually set the cluster target primary to // the instance we expect to take that role (-3). Eventually(func() (string, error) { diff --git a/tests/e2e/volume_snapshot_test.go b/tests/e2e/volume_snapshot_test.go index 9c2488e1ba..bfac11594b 100644 --- a/tests/e2e/volume_snapshot_test.go +++ b/tests/e2e/volume_snapshot_test.go @@ -841,7 +841,7 @@ var _ = Describe("Verify Volume Snapshot", Expect(err).ToNot(HaveOccurred()) }) - By("checking the the cluster is working", func() { + By("checking the cluster is working", func() { // Setting up a cluster with three pods is slow, usually 200-600s AssertClusterIsReady(namespace, clusterToSnapshotName, testTimeouts[timeouts.ClusterIsReady], env) }) diff --git a/tests/utils/minio/minio.go b/tests/utils/minio/minio.go index 8de344ab4b..3d250821fc 100644 --- a/tests/utils/minio/minio.go +++ b/tests/utils/minio/minio.go @@ -519,7 +519,7 @@ func (m *Env) getCaSecret(env *environment.TestingEnvironment, namespace string) }, nil } -// CreateCaSecret creates the certificates required to authenticate against the the MinIO service +// CreateCaSecret creates the certificates required to authenticate against the MinIO service func (m *Env) CreateCaSecret(env *environment.TestingEnvironment, namespace string) error { caSecret, err := m.getCaSecret(env, namespace) if err != nil { @@ -529,7 +529,7 @@ func (m *Env) CreateCaSecret(env *environment.TestingEnvironment, namespace stri return err } -// CountFiles uses the minioClient in the given `namespace` to count the +// CountFiles uses the minioClient in the given `namespace` to count the // amount of files matching the given `path` func CountFiles(minioEnv *Env, path string) (value int, err error) { var stdout string From b025413ba0d2095c03860a92d28e8247f414fcb3 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Sat, 26 Jul 2025 15:43:18 +0200 Subject: [PATCH 738/836] test(e2e): fix expected entry count in pg_ident_file_mappings (#8142) Fix e2e test failure introduced with #7725 Signed-off-by: Marco Nenciarini --- tests/e2e/configuration_update_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/e2e/configuration_update_test.go b/tests/e2e/configuration_update_test.go index ddf4c1001f..61622098f4 100644 --- a/tests/e2e/configuration_update_test.go +++ b/tests/e2e/configuration_update_test.go @@ -434,7 +434,7 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada Expect(err).ToNot(HaveOccurred()) query := "select count(1) from pg_catalog.pg_ident_file_mappings;" - By("check that there is only one entry in pg_ident_file_mappings", func() { + By("check that there is the expected number of entry in pg_ident_file_mappings", func() { Eventually(func() (string, error) { stdout, _, err := exec.QueryInInstancePod( env.Ctx, env.Client, env.Interface, env.RestClientConfig, @@ -445,14 +445,14 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada postgres.PostgresDBName, query) return strings.Trim(stdout, "\n"), err - }, timeout).Should(BeEquivalentTo("1")) + }, timeout).Should(BeEquivalentTo("3")) }) By("apply configuration update", func() { updateClusterPostgresPgIdent(namespace) }) - By("verify that there are now two entries in pg_ident_file_mappings", func() { + By("verify that there is one more entry in pg_ident_file_mappings", func() { Eventually(func() (string, error) { stdout, _, err := exec.QueryInInstancePod( env.Ctx, env.Client, env.Interface, env.RestClientConfig, @@ -463,7 +463,7 @@ var _ = Describe("Configuration update", Ordered, Label(tests.LabelClusterMetada postgres.PostgresDBName, query) return strings.Trim(stdout, "\n"), err - }, timeout).Should(BeEquivalentTo("2")) + }, timeout).Should(BeEquivalentTo("4")) }) } }) From 6c4088242d732e64d5459d9aa702360b26275711 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sun, 27 Jul 2025 11:07:30 +0200 Subject: [PATCH 739/836] chore(deps): update github/codeql-action digest to 4e828ff (main) (#8109) --- .github/workflows/codeql-analysis.yml | 4 ++-- .github/workflows/continuous-integration.yml | 2 +- .github/workflows/ossf_scorecard.yml | 2 +- .github/workflows/snyk.yml | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 6687311009..033b61256d 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -76,7 +76,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@d6bbdef45e766d081b84a2def353b0055f728d3e # v3 + uses: github/codeql-action/init@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3 with: languages: "go" build-mode: manual @@ -93,6 +93,6 @@ jobs: make - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@d6bbdef45e766d081b84a2def353b0055f728d3e # v3 + uses: github/codeql-action/analyze@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3 with: category: "/language:go" diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 43152113f3..f6ea6d3462 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -617,7 +617,7 @@ jobs: args: --severity-threshold=high --file=Dockerfile - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@d6bbdef45e766d081b84a2def353b0055f728d3e # v3 + uses: github/codeql-action/upload-sarif@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3 if: | !github.event.repository.fork && !github.event.pull_request.head.repo.fork diff --git a/.github/workflows/ossf_scorecard.yml b/.github/workflows/ossf_scorecard.yml index ae153a1caf..ad01f3b2c4 100644 --- a/.github/workflows/ossf_scorecard.yml +++ b/.github/workflows/ossf_scorecard.yml @@ -74,6 +74,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard (optional). # Commenting out will disable upload of results to your repo's Code Scanning dashboard - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@d6bbdef45e766d081b84a2def353b0055f728d3e # v3 + uses: github/codeql-action/upload-sarif@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3 with: sarif_file: results.sarif diff --git a/.github/workflows/snyk.yml b/.github/workflows/snyk.yml index 73be8334fa..f55f5611bc 100644 --- a/.github/workflows/snyk.yml +++ b/.github/workflows/snyk.yml @@ -30,7 +30,7 @@ jobs: args: --sarif-file-output=snyk-static.sarif - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@d6bbdef45e766d081b84a2def353b0055f728d3e # v3 + uses: github/codeql-action/upload-sarif@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3 with: sarif_file: snyk-static.sarif @@ -43,6 +43,6 @@ jobs: args: --sarif-file-output=snyk-test.sarif - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@d6bbdef45e766d081b84a2def353b0055f728d3e # v3 + uses: github/codeql-action/upload-sarif@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3 with: sarif_file: snyk-test.sarif From 67529637ef71a06b2cc9a0d86d8b9355208e2441 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sun, 27 Jul 2025 17:58:55 +0200 Subject: [PATCH 740/836] chore(deps): update backup test tools (main) (#8118) This PR contains the following updates: minio/mc | patch | `RELEASE.2025-07-16T15-35-03Z` -> `RELEASE.2025-07-21T05-28-08Z` minio/minio | patch | `RELEASE.2025-07-18T21-56-31Z` -> `RELEASE.2025-07-23T15-54-02Z` --- tests/utils/minio/minio.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/utils/minio/minio.go b/tests/utils/minio/minio.go index 3d250821fc..ba249e7673 100644 --- a/tests/utils/minio/minio.go +++ b/tests/utils/minio/minio.go @@ -50,9 +50,9 @@ import ( const ( // minioImage is the image used to run a MinIO server - minioImage = "minio/minio:RELEASE.2025-07-18T21-56-31Z" + minioImage = "minio/minio:RELEASE.2025-07-23T15-54-02Z" // minioClientImage is the image used to run a MinIO client - minioClientImage = "minio/mc:RELEASE.2025-07-16T15-35-03Z" + minioClientImage = "minio/mc:RELEASE.2025-07-21T05-28-08Z" ) // Env contains all the information related or required by MinIO deployment and From c82843f9cbc35c42339eef356c340cc0f0298f6a Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Mon, 28 Jul 2025 14:09:52 +0200 Subject: [PATCH 741/836] feat(webhook): add warnings for storage configuration (#8127) This commit introduces new admission warnings for the Cluster resource to inform users about potentially confusing storage configurations. A warning is now issued when: - Both `spec.storage.size` and `spec.storage.pvcTemplate.resources.requests.storage` are set, clarifying that `size` will be used. - Both `spec.storage.storageClass` and `spec.storage.pvcTemplate.storageClassName` are set, clarifying that`storageClass` will be used. These checks are applied to both `spec.storage` and `spec.walStorage`. Signed-off-by: Armando Ruocco --- internal/webhook/v1/cluster_webhook.go | 51 ++++++ internal/webhook/v1/cluster_webhook_test.go | 188 ++++++++++++++++++++ 2 files changed, 239 insertions(+) diff --git a/internal/webhook/v1/cluster_webhook.go b/internal/webhook/v1/cluster_webhook.go index 6a022877f6..776ff30963 100644 --- a/internal/webhook/v1/cluster_webhook.go +++ b/internal/webhook/v1/cluster_webhook.go @@ -2421,9 +2421,60 @@ func (v *ClusterCustomValidator) getAdmissionWarnings(r *apiv1.Cluster) admissio list := getMaintenanceWindowsAdmissionWarnings(r) list = append(list, getInTreeBarmanWarnings(r)...) list = append(list, getRetentionPolicyWarnings(r)...) + list = append(list, getStorageWarnings(r)...) return append(list, getSharedBuffersWarnings(r)...) } +func getStorageWarnings(r *apiv1.Cluster) admission.Warnings { + generateWarningsFunc := func(path field.Path, configuration *apiv1.StorageConfiguration) admission.Warnings { + if configuration == nil { + return nil + } + + if configuration.PersistentVolumeClaimTemplate == nil { + return nil + } + + pvcTemplatePath := path.Child("pvcTemplate") + + var result admission.Warnings + if configuration.StorageClass != nil && configuration.PersistentVolumeClaimTemplate.StorageClassName != nil { + storageClass := path.Child("storageClass").String() + result = append( + result, + fmt.Sprintf("%s and %s are both specified, %s value will be used.", + storageClass, + pvcTemplatePath.Child("storageClassName"), + storageClass, + ), + ) + } + requestsSpecified := !configuration.PersistentVolumeClaimTemplate.Resources.Requests.Storage().IsZero() + if configuration.Size != "" && requestsSpecified { + size := path.Child("size").String() + result = append( + result, + fmt.Sprintf( + "%s and %s are both specified, %s value will be used.", + size, + pvcTemplatePath.Child("resources", "requests", "storage").String(), + size, + ), + ) + } + + return result + } + + var result admission.Warnings + + storagePath := *field.NewPath("spec", "storage") + result = append(result, generateWarningsFunc(storagePath, &r.Spec.StorageConfiguration)...) + + walStoragePath := *field.NewPath("spec", "walStorage") + return append(result, generateWarningsFunc(walStoragePath, r.Spec.WalStorage)...) +} + func getInTreeBarmanWarnings(r *apiv1.Cluster) admission.Warnings { var result admission.Warnings diff --git a/internal/webhook/v1/cluster_webhook_test.go b/internal/webhook/v1/cluster_webhook_test.go index 8ce24826dd..9de2d3d603 100644 --- a/internal/webhook/v1/cluster_webhook_test.go +++ b/internal/webhook/v1/cluster_webhook_test.go @@ -5559,3 +5559,191 @@ var _ = Describe("getRetentionPolicyWarnings", func() { Expect(warnings).To(HaveLen(1)) }) }) + +var _ = Describe("getStorageWarnings", func() { + It("returns no warnings when storage is properly configured", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "1Gi", + }, + }, + } + Expect(getStorageWarnings(cluster)).To(BeEmpty()) + }) + + It("returns no warnings when PVC template has storage configured", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + StorageConfiguration: apiv1.StorageConfiguration{ + PersistentVolumeClaimTemplate: &corev1.PersistentVolumeClaimSpec{ + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + }, + }, + }, + } + Expect(getStorageWarnings(cluster)).To(BeEmpty()) + }) + + It("returns a warning when both storageClass and storageClassName are specified", func() { + storageClass := "fast-ssd" + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + StorageConfiguration: apiv1.StorageConfiguration{ + StorageClass: &storageClass, + PersistentVolumeClaimTemplate: &corev1.PersistentVolumeClaimSpec{ + StorageClassName: &storageClass, + }, + }, + }, + } + warnings := getStorageWarnings(cluster) + Expect(warnings).To(HaveLen(1)) + Expect(warnings[0]).To(ContainSubstring("spec.storage.storageClass")) + Expect(warnings[0]).To(ContainSubstring("spec.storage.pvcTemplate.storageClassName")) + Expect(warnings[0]).To(ContainSubstring("spec.storage.storageClass value will be used")) + }) + + It("returns a warning when both size and storage requests are specified", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "1Gi", + PersistentVolumeClaimTemplate: &corev1.PersistentVolumeClaimSpec{ + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("2Gi"), + }, + }, + }, + }, + }, + } + warnings := getStorageWarnings(cluster) + Expect(warnings).To(HaveLen(1)) + Expect(warnings[0]).To(ContainSubstring("spec.storage.size")) + Expect(warnings[0]).To(ContainSubstring("spec.storage.pvcTemplate.resources.requests.storage")) + Expect(warnings[0]).To(ContainSubstring("spec.storage.size value will be used")) + }) + + It("returns multiple warnings when both storage conflicts exist", func() { + storageClass := "fast-ssd" + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "1Gi", + StorageClass: &storageClass, + PersistentVolumeClaimTemplate: &corev1.PersistentVolumeClaimSpec{ + StorageClassName: &storageClass, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("2Gi"), + }, + }, + }, + }, + }, + } + warnings := getStorageWarnings(cluster) + Expect(warnings).To(HaveLen(2)) + Expect(warnings[0]).To(ContainSubstring("storageClass")) + Expect(warnings[1]).To(ContainSubstring("size")) + }) + + It("returns warnings for WAL storage configuration conflicts", func() { + storageClass := "fast-ssd" + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + WalStorage: &apiv1.StorageConfiguration{ + Size: "500Mi", + StorageClass: &storageClass, + PersistentVolumeClaimTemplate: &corev1.PersistentVolumeClaimSpec{ + StorageClassName: &storageClass, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + }, + }, + }, + } + warnings := getStorageWarnings(cluster) + Expect(warnings).To(HaveLen(2)) + Expect(warnings[0]).To(ContainSubstring("spec.walStorage.storageClass")) + Expect(warnings[1]).To(ContainSubstring("spec.walStorage.size")) + }) + + It("returns warnings for both storage and WAL storage conflicts", func() { + storageClass := "fast-ssd" + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "1Gi", + StorageClass: &storageClass, + PersistentVolumeClaimTemplate: &corev1.PersistentVolumeClaimSpec{ + StorageClassName: &storageClass, + }, + }, + WalStorage: &apiv1.StorageConfiguration{ + Size: "500Mi", + PersistentVolumeClaimTemplate: &corev1.PersistentVolumeClaimSpec{ + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + }, + }, + }, + } + warnings := getStorageWarnings(cluster) + Expect(warnings).To(HaveLen(2)) + Expect(warnings[0]).To(ContainSubstring("spec.storage")) + Expect(warnings[1]).To(ContainSubstring("spec.walStorage")) + }) + + It("returns no warnings when WAL storage is nil", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "1Gi", + }, + WalStorage: nil, + }, + } + Expect(getStorageWarnings(cluster)).To(BeEmpty()) + }) + + It("returns no warnings when PVC template is nil", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "1Gi", + PersistentVolumeClaimTemplate: nil, + }, + }, + } + Expect(getStorageWarnings(cluster)).To(BeEmpty()) + }) + + It("returns no warnings when storage requests are zero", func() { + cluster := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + StorageConfiguration: apiv1.StorageConfiguration{ + Size: "1Gi", + PersistentVolumeClaimTemplate: &corev1.PersistentVolumeClaimSpec{ + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{}, + }, + }, + }, + }, + } + Expect(getStorageWarnings(cluster)).To(BeEmpty()) + }) +}) From ebb7e8ebe99c05fba33fa095bc5fad5630e19be9 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Mon, 28 Jul 2025 17:44:25 +0200 Subject: [PATCH 742/836] revert: support changing image and PostgreSQL settings simultaneously (#8160) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit fc7d1e30b5df3797df1fe81717d7ec7ea042d98b. Closes #8162 --------- Signed-off-by: Leonardo Cecchi Signed-off-by: Francesco Canovai Signed-off-by: Niccolò Fei Co-authored-by: Francesco Canovai Co-authored-by: Niccolò Fei --- docs/src/imagevolume_extensions.md | 7 +++ docs/src/rolling_update.md | 39 +++++++-------- .../controller/instance_controller.go | 40 ---------------- internal/management/controller/manager.go | 6 +-- internal/webhook/v1/cluster_webhook.go | 26 ++++++++++ internal/webhook/v1/cluster_webhook_test.go | 48 +++++++++++++++++++ .../cluster-with-extensions.yaml.template | 4 +- tests/e2e/imagevolume_extensions_test.go | 2 +- 8 files changed, 102 insertions(+), 70 deletions(-) diff --git a/docs/src/imagevolume_extensions.md b/docs/src/imagevolume_extensions.md index bc89d3b6eb..4b9fba6383 100644 --- a/docs/src/imagevolume_extensions.md +++ b/docs/src/imagevolume_extensions.md @@ -93,6 +93,13 @@ Adding an extension to a database in CloudNativePG involves a few steps: 3. Declare the extension in the `Database` resource where you want it installed, if the extension supports `CREATE EXTENSION`. +!!! Warning + Avoid making changes to extension images and PostgreSQL configuration + settings (such as `shared_preload_libraries`) simultaneously. + First, allow the pod to roll out with the new extension image, then update + the PostgreSQL configuration. + This limitation will be addressed in a future release of CloudNativePG. + For illustration purposes, this guide uses a simple, fictitious extension named `foo` that supports `CREATE EXTENSION`. diff --git a/docs/src/rolling_update.md b/docs/src/rolling_update.md index 6dda17e990..1d04a1e2ee 100644 --- a/docs/src/rolling_update.md +++ b/docs/src/rolling_update.md @@ -1,39 +1,32 @@ # Rolling Updates -The operator allows you to change the PostgreSQL version used in a cluster -while applications continue running against it. +The operator allows changing the PostgreSQL version used in a cluster while +applications are running against it. -Rolling upgrades are triggered when: +!!! Important + Only upgrades for PostgreSQL minor releases are supported. -- you change the `imageName` attribute in the cluster specification; +Rolling upgrades are started when: -- you change the list of extension images in the `.spec.postgresql.extensions` - stanza of the cluster specification; +- the user changes the `imageName` attribute of the cluster specification; -- the [image catalog](image_catalog.md) is updated with a new image for the - major version used by the cluster; +- the [image catalog](image_catalog.md) is updated with a new image for the major used by the cluster; -- a change in the PostgreSQL configuration requires a restart to apply; +- a change in the PostgreSQL configuration requires a restart to be + applied; -- you change the `Cluster` `.spec.resources` values; +- a change on the `Cluster` `.spec.resources` values -- you resize the persistent volume claim on AKS; +- a change in size of the persistent volume claim on AKS -- the operator is updated, ensuring Pods run the latest instance manager - (unless [in-place updates are enabled](installation_upgrade.md#in-place-updates-of-the-instance-manager)). +- after the operator is updated, to ensure the Pods run the latest instance + manager (unless [in-place updates are enabled](installation_upgrade.md#in-place-updates-of-the-instance-manager)). -!!! Warning - Any change to container images (including extensions) takes precedence over - all other changes and will trigger a rollout first. For example, if you update - both the PostgreSQL configuration and the PostgreSQL version at the same time, - the container image change will take priority, and the configuration change - will be applied in a subsequent rollout. +The operator starts upgrading all the replicas, one Pod at a time, and begins +from the one with the highest serial. -During a rolling upgrade, the operator upgrades all replicas one Pod at a time, -starting from the one with the highest serial. - -The primary is always the last node to be upgraded. +The primary is the last node to be upgraded. Rolling updates are configurable and can be either entirely automated (`unsupervised`) or requiring human intervention (`supervised`). diff --git a/internal/management/controller/instance_controller.go b/internal/management/controller/instance_controller.go index ee04eac8cb..454b9247ba 100644 --- a/internal/management/controller/instance_controller.go +++ b/internal/management/controller/instance_controller.go @@ -34,7 +34,6 @@ import ( "github.com/cloudnative-pg/machinery/pkg/fileutils" "github.com/cloudnative-pg/machinery/pkg/log" pgTime "github.com/cloudnative-pg/machinery/pkg/postgres/time" - "github.com/cloudnative-pg/machinery/pkg/stringset" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -382,11 +381,6 @@ func (r *InstanceReconciler) refreshConfigurationFiles( } reloadNeeded = reloadNeeded || reloadIdent - // We give priority to images changes before applying the configuration ones. - if r.requiresImagesRollout(ctx, cluster) { - return reloadNeeded, nil - } - // Reconcile PostgreSQL configuration // This doesn't need the PG connection, but it needs to reload it in case of changes reloadConfig, err := r.instance.RefreshConfigurationFilesFromCluster( @@ -408,40 +402,6 @@ func (r *InstanceReconciler) refreshConfigurationFiles( return reloadNeeded, nil } -func (r *InstanceReconciler) requiresImagesRollout(ctx context.Context, cluster *apiv1.Cluster) bool { - contextLogger := log.FromContext(ctx) - - latestImages := stringset.New() - latestImages.Put(cluster.Spec.ImageName) - for _, extension := range cluster.Spec.PostgresConfiguration.Extensions { - latestImages.Put(extension.ImageVolumeSource.Reference) - } - - if r.runningImages == nil { - r.runningImages = latestImages - contextLogger.Info("Detected running images", "runningImages", r.runningImages.ToSortedList()) - - return false - } - - contextLogger.Trace( - "Calculated image requirements", - "latestImages", latestImages.ToSortedList(), - "runningImages", r.runningImages.ToSortedList()) - - if latestImages.Eq(r.runningImages) { - return false - } - - contextLogger.Info( - "Detected drift between the bootstrap images and the configuration. Skipping configuration reload", - "runningImages", r.runningImages.ToSortedList(), - "latestImages", latestImages.ToSortedList(), - ) - - return true -} - func (r *InstanceReconciler) reconcileFencing(ctx context.Context, cluster *apiv1.Cluster) *reconcile.Result { contextLogger := log.FromContext(ctx) diff --git a/internal/management/controller/manager.go b/internal/management/controller/manager.go index e756afa7d6..76929fef62 100644 --- a/internal/management/controller/manager.go +++ b/internal/management/controller/manager.go @@ -25,7 +25,6 @@ import ( "context" "fmt" - "github.com/cloudnative-pg/machinery/pkg/stringset" "go.uber.org/atomic" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -43,9 +42,8 @@ import ( // the one of this PostgreSQL instance. Also, the configuration in the // ConfigMap is applied when needed type InstanceReconciler struct { - client ctrl.Client - instance *postgres.Instance - runningImages *stringset.Data + client ctrl.Client + instance *postgres.Instance secretVersions map[string]string extensionStatus map[string]bool diff --git a/internal/webhook/v1/cluster_webhook.go b/internal/webhook/v1/cluster_webhook.go index 776ff30963..29ab7a5aeb 100644 --- a/internal/webhook/v1/cluster_webhook.go +++ b/internal/webhook/v1/cluster_webhook.go @@ -21,6 +21,7 @@ package v1 import ( "context" + "encoding/json" "fmt" "slices" "strconv" @@ -240,6 +241,7 @@ func (v *ClusterCustomValidator) validateClusterChanges(r, old *apiv1.Cluster) ( type validationFunc func(*apiv1.Cluster, *apiv1.Cluster) field.ErrorList validations := []validationFunc{ v.validateImageChange, + v.validateConfigurationChange, v.validateStorageChange, v.validateWalStorageChange, v.validateTablespacesChange, @@ -1259,6 +1261,30 @@ func parsePostgresQuantityValue(value string) (resource.Quantity, error) { return resource.ParseQuantity(value) } +// validateConfigurationChange determines whether a PostgreSQL configuration +// change can be applied +func (v *ClusterCustomValidator) validateConfigurationChange(r, old *apiv1.Cluster) field.ErrorList { + var result field.ErrorList + + if old.Spec.ImageName != r.Spec.ImageName { + diff := utils.CollectDifferencesFromMaps(old.Spec.PostgresConfiguration.Parameters, + r.Spec.PostgresConfiguration.Parameters) + if len(diff) > 0 { + jsonDiff, _ := json.Marshal(diff) + result = append( + result, + field.Invalid( + field.NewPath("spec", "imageName"), + r.Spec.ImageName, + fmt.Sprintf("Can't change image name and configuration at the same time. "+ + "There are differences in PostgreSQL configuration parameters: %s", jsonDiff))) + return result + } + } + + return result +} + func validateSyncReplicaElectionConstraint(constraints apiv1.SyncReplicaElectionConstraints) *field.Error { if !constraints.Enabled { return nil diff --git a/internal/webhook/v1/cluster_webhook_test.go b/internal/webhook/v1/cluster_webhook_test.go index 9de2d3d603..f06a207569 100644 --- a/internal/webhook/v1/cluster_webhook_test.go +++ b/internal/webhook/v1/cluster_webhook_test.go @@ -514,6 +514,54 @@ var _ = Describe("configuration change validation", func() { v = &ClusterCustomValidator{} }) + It("doesn't complain when the configuration is exactly the same", func() { + clusterOld := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:10.4", + }, + } + clusterNew := clusterOld.DeepCopy() + Expect(v.validateConfigurationChange(clusterNew, clusterOld)).To(BeEmpty()) + }) + + It("doesn't complain when we change a setting which is not fixed", func() { + clusterOld := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:10.4", + }, + } + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:10.4", + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "shared_buffers": "4G", + }, + }, + }, + } + Expect(v.validateConfigurationChange(clusterNew, clusterOld)).To(BeEmpty()) + }) + + It("complains when changing postgres major version and settings", func() { + clusterOld := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:10.4", + }, + } + clusterNew := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + ImageName: "postgres:10.5", + PostgresConfiguration: apiv1.PostgresConfiguration{ + Parameters: map[string]string{ + "shared_buffers": "4G", + }, + }, + }, + } + Expect(v.validateConfigurationChange(clusterNew, clusterOld)).To(HaveLen(1)) + }) + It("produces no error when WAL size settings are correct", func() { clusterNew := &apiv1.Cluster{ Spec: apiv1.ClusterSpec{ diff --git a/tests/e2e/fixtures/imagevolume_extensions/cluster-with-extensions.yaml.template b/tests/e2e/fixtures/imagevolume_extensions/cluster-with-extensions.yaml.template index 332cc3ef3c..5db523de6e 100644 --- a/tests/e2e/fixtures/imagevolume_extensions/cluster-with-extensions.yaml.template +++ b/tests/e2e/fixtures/imagevolume_extensions/cluster-with-extensions.yaml.template @@ -18,9 +18,9 @@ spec: extensions: - name: postgis ld_library_path: - - system + - syslib image: - reference: ghcr.io/niccolofei/postgis:18beta1-master-bullseye # wokeignore:rule=master + reference: ghcr.io/niccolofei/postgis:18beta2-master-bullseye # wokeignore:rule=master bootstrap: initdb: diff --git a/tests/e2e/imagevolume_extensions_test.go b/tests/e2e/imagevolume_extensions_test.go index 51c4c32f4e..c3f9430299 100644 --- a/tests/e2e/imagevolume_extensions_test.go +++ b/tests/e2e/imagevolume_extensions_test.go @@ -198,7 +198,7 @@ var _ = Describe("ImageVolume Extensions", Label(tests.LabelPostgresConfiguratio apiv1.ExtensionConfiguration{ Name: "pgvector", ImageVolumeSource: corev1.ImageVolumeSource{ - Reference: "ghcr.io/niccolofei/pgvector:18beta1-master-bullseye", // wokeignore:rule=master + Reference: "ghcr.io/niccolofei/pgvector:18beta2-master-bullseye", // wokeignore:rule=master }, }) g.Expect(env.Client.Update(env.Ctx, cluster)).To(Succeed()) From 0cd889f0554d2aa2fc8e1e819bab6c8b7412ee73 Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Mon, 28 Jul 2025 21:08:04 +0200 Subject: [PATCH 743/836] docs: add CN Rejekts NA 2024 talk (microservice database) (#8159) Updated the talks section by adding the talk given to CloudNative Rejekts NA 2024 in Salt Lake City and removing two entries that are not strictly related to CloudNativePG. Signed-off-by: Gabriele Bartolini --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index f234194746..1a8c4bec33 100644 --- a/README.md +++ b/README.md @@ -115,8 +115,7 @@ organization to this list! ### CloudNativePG at KubeCon - April 4 2025, KubeCon Europe in London: ["Consistent Volume Group Snapshots, Unraveling the Magic"](https://sched.co/1tx8g) - Leonardo Cecchi (EDB) and Xing Yang (VMware) -- April 2 2025, KubeCon Europe in London: ["The Future of Data on Kubernetes - From Database Management To AI Foundation"](https://kccnceu2025.sched.com/event/1txEy/the-future-of-data-on-kubernetes-from-database-management-to-ai-foundation-melissa-logan-constantia-nimisha-mehta-confluent-gabriele-bartolini-edb-brian-kaufman-google) - Gabriele Bartolini (EDB), Melissa Logan (Constantia), Nimisha Mehta (Confluent), Brian Kaufman (Google) -- April 1 2025, Data on Kubernetes Day: ["The Next Wave Of Data On Kubernetes: Winning Over The Enterprise"](https://colocatedeventseu2025.sched.com/event/1ub0S/sponsored-keynote-the-next-wave-of-data-on-kubernetes-winning-over-the-enterprise-simon-metson-enterprisedb) - Simon Metson, EDB +- November 11 2024, Cloud Native Rejekts NA 2024: ["Maximising Microservice Databases with Kubernetes, Postgres, and CloudNativePG"](https://www.youtube.com/watch?v=uBzl_stoxoc&ab_channel=CloudNativeRejekts) - Gabriele Bartolini (EDB) and Leonardo Cecchi (EDB) - March 21 2024, KubeCon Europe 2024 in Paris: ["Scaling Heights: Mastering Postgres Database Vertical Scalability with Kubernetes Storage Magic"](https://kccnceu2024.sched.com/event/1YeM4/scaling-heights-mastering-postgres-database-vertical-scalability-with-kubernetes-storage-magic-gabriele-bartolini-edb-gari-singh-google) - Gari Singh, Google & Gabriele Bartolini, EDB - March 19 2024, Data on Kubernetes Day at KubeCon Europe 2024 in Paris: ["From Zero to Hero: Scaling Postgres in Kubernetes Using the Power of CloudNativePG"](https://colocatedeventseu2024.sched.com/event/1YFha/from-zero-to-hero-scaling-postgres-in-kubernetes-using-the-power-of-cloudnativepg-gabriele-bartolini-edb) - Gabriele Bartolini, EDB - 7 November 2023, KubeCon North America 2023 in Chicago: ["Disaster Recovery with Very Large Postgres Databases (in Kubernetes)"](https://kccncna2023.sched.com/event/1R2ml/disaster-recovery-with-very-large-postgres-databases-gabriele-bartolini-edb-michelle-au-google) - Michelle Au, Google & Gabriele Bartolini, EDB From b01bbc9ba3721cb47c00dad5cbacbe1cff7047bf Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Tue, 29 Jul 2025 12:14:23 +0200 Subject: [PATCH 744/836] feat: introduce experimental support for failover quorum (#7572) This commit adds opt-in, experimental support for "failover quorum", also known as "quorum-based failover", in CloudNativePG. Failover quorum is a mechanism designed to improve data durability and safety during failover events by ensuring that the promoted replica contains all synchronously committed data. With synchronous replication, a transaction is acknowledged only after all required synchronous standbys have received the WAL data. However, this alone doesn't guarantee that the operator can always promote the most advanced replica during a failure. The failover quorum mechanism addresses this risk by requiring the operator to verify that a quorum of replicas agrees on the promotion. This ensures that the selected replica has all required committed data. If this quorum is not met, failover will not proceed, reducing the risk of data loss. To enable the feature, set the annotation: `cnpg.io/failoverQuorum="true"` in the `Cluster` resource. Once stabilised, a dedicated configuration field will replace this annotation. For further information, refer to the included documentation and the related issue. Closes #7481 Signed-off-by: Leonardo Cecchi Signed-off-by: Armando Ruocco Signed-off-by: Marco Nenciarini Signed-off-by: Francesco Canovai Signed-off-by: Gabriele Bartolini Co-authored-by: Armando Ruocco Co-authored-by: Marco Nenciarini Co-authored-by: Francesco Canovai Co-authored-by: Gabriele Bartolini --- .wordlist-en-custom.txt | 12 + PROJECT | 9 + api/v1/cluster_funcs.go | 17 ++ api/v1/cluster_funcs_test.go | 30 ++ api/v1/failoverquorum_types.go | 83 ++++++ api/v1/zz_generated.deepcopy.go | 78 ++++++ .../postgresql.cnpg.io_failoverquorums.yaml | 77 ++++++ config/crd/kustomization.yaml | 1 + config/rbac/role.yaml | 11 + docs/src/cloudnative-pg.v1.md | 84 ++++++ docs/src/failover.md | 259 ++++++++++++++++++ docs/src/replication.md | 5 + .../cluster-example-syncreplicas-quorum.yaml | 17 ++ internal/cmd/manager/instance/run/cmd.go | 3 + internal/controller/cluster_controller.go | 2 + internal/controller/cluster_create.go | 5 + internal/controller/replicas.go | 18 ++ internal/controller/replicas_quorum.go | 185 +++++++++++++ internal/controller/replicas_quorum_test.go | 141 ++++++++++ .../controller/instance_controller.go | 21 +- .../management/controller/instance_sync.go | 123 +++++++++ internal/webhook/v1/cluster_webhook.go | 61 ++++- internal/webhook/v1/cluster_webhook_test.go | 77 ++++++ pkg/management/postgres/configuration.go | 7 +- pkg/management/postgres/configuration_test.go | 30 +- pkg/management/postgres/instance.go | 32 +++ pkg/postgres/configuration.go | 64 ++++- pkg/postgres/replication/explicit.go | 49 ++-- pkg/postgres/replication/explicit_test.go | 74 ++++- pkg/postgres/replication/legacy.go | 44 +-- pkg/postgres/replication/legacy_test.go | 31 ++- pkg/postgres/replication/replication.go | 11 +- pkg/specs/roles.go | 33 +++ pkg/specs/roles_test.go | 2 +- pkg/utils/labels_annotations.go | 7 + 35 files changed, 1589 insertions(+), 114 deletions(-) create mode 100644 api/v1/failoverquorum_types.go create mode 100644 config/crd/bases/postgresql.cnpg.io_failoverquorums.yaml create mode 100644 docs/src/samples/cluster-example-syncreplicas-quorum.yaml create mode 100644 internal/controller/replicas_quorum.go create mode 100644 internal/controller/replicas_quorum_test.go create mode 100644 internal/management/controller/instance_sync.go diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index be82ff8c26..64c796faa1 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -166,6 +166,9 @@ ExtensionStatus ExternalCluster FQDN FQDNs +FailoverQuorum +FailoverQuorumSpec +FailoverQuorumStatus Fei Filesystem Fluentd @@ -371,6 +374,7 @@ ProbeTerminationGracePeriod ProbeWithStrategy ProbesConfiguration ProjectedVolumeSource +Promotable PublicationReclaimPolicy PublicationSpec PublicationStatus @@ -383,6 +387,7 @@ PushSecret QoS Quaresima QuickStart +QuorumFailoverProtection RBAC README RHSA @@ -470,6 +475,8 @@ SnapshotType Snapshotting Snyk Stackgres +StandbyNames +StandbyNumber StartupProbe StartupStrategyType StatefulSets @@ -487,6 +494,7 @@ SynchronizeReplicas SynchronizeReplicasConfiguration SynchronousReplicaConfiguration SynchronousReplicaConfigurationMethod +SynchronousStandbyNamesList Synopsys SystemID TCP @@ -1167,6 +1175,7 @@ programmatically proj projectedVolumeTemplate prometheus +promotable promotionTimeout promotionToken provisioner @@ -1182,6 +1191,7 @@ pvcTemplate quantile queryable quickstart +quorumFailoverProtection rbac rc readService @@ -1308,8 +1318,10 @@ sslkey sslmode sslrootcert sso +standbyNames standbyNamesPost standbyNamesPre +standbyNumber startDelay startedAt stateful diff --git a/PROJECT b/PROJECT index 27b49f0be3..9f0b527b4d 100644 --- a/PROJECT +++ b/PROJECT @@ -84,3 +84,12 @@ resources: kind: Subscription path: github.com/cloudnative-pg/cloudnative-pg/api/v1 version: v1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: cnpg.io + group: postgresql + kind: FailoverQuorum + path: github.com/cloudnative-pg/cloudnative-pg/api/v1 + version: v1 diff --git a/api/v1/cluster_funcs.go b/api/v1/cluster_funcs.go index 8c1f7aa393..3ffecd5846 100644 --- a/api/v1/cluster_funcs.go +++ b/api/v1/cluster_funcs.go @@ -24,6 +24,7 @@ import ( "fmt" "regexp" "slices" + "strconv" "strings" "time" @@ -1560,3 +1561,19 @@ func (cluster *Cluster) GetEnabledWALArchivePluginName() string { return "" } + +// IsFailoverQuorumActive check if we should enable the +// quorum failover protection alpha-feature. +func (cluster *Cluster) IsFailoverQuorumActive() (bool, error) { + failoverQuorumAnnotation, ok := cluster.GetAnnotations()[utils.FailoverQuorumAnnotationName] + if !ok || failoverQuorumAnnotation == "" { + return false, nil + } + + v, err := strconv.ParseBool(failoverQuorumAnnotation) + if err != nil { + return false, fmt.Errorf("failed to parse failover quorum annotation '%v': %v", failoverQuorumAnnotation, err) + } + + return v, nil +} diff --git a/api/v1/cluster_funcs_test.go b/api/v1/cluster_funcs_test.go index 01799bd6e1..5e92d4ea40 100644 --- a/api/v1/cluster_funcs_test.go +++ b/api/v1/cluster_funcs_test.go @@ -1749,3 +1749,33 @@ var _ = Describe("Probes configuration", func() { "configured probe should not be modified with zero values") }) }) + +var _ = Describe("Failover quorum annotation", func() { + clusterWithAnnotation := func(v string) *Cluster { + return &Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.FailoverQuorumAnnotationName: v, + }, + }, + } + } + + DescribeTable( + "annotation parsing", + func(cluster *Cluster, valueIsCorrect, expected bool) { + actual, err := cluster.IsFailoverQuorumActive() + if valueIsCorrect { + Expect(err).ToNot(HaveOccurred()) + } else { + Expect(err).To(HaveOccurred()) + } + Expect(actual).To(Equal(expected)) + }, + Entry("with no annotation", &Cluster{}, true, false), + Entry("with empty annotation", clusterWithAnnotation(""), true, false), + Entry("with true annotation", clusterWithAnnotation("t"), true, true), + Entry("with false annotation", clusterWithAnnotation("f"), true, false), + Entry("with invalid annotation", clusterWithAnnotation("xxx"), false, false), + ) +}) diff --git a/api/v1/failoverquorum_types.go b/api/v1/failoverquorum_types.go new file mode 100644 index 0000000000..afa55cd001 --- /dev/null +++ b/api/v1/failoverquorum_types.go @@ -0,0 +1,83 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +kubebuilder:object:root=true + +// FailoverQuorumList contains a list of FailoverQuorum +type FailoverQuorumList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + // List of failoverquorums + Items []FailoverQuorum `json:"items"` +} + +// +genclient +// +kubebuilder:object:root=true +// +kubebuilder:storageversion +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" + +// FailoverQuorum contains the information about the current failover +// quorum status of a PG cluster. It is updated by the instance manager +// of the primary node and reset to zero by the operator to trigger +// an update. +type FailoverQuorum struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // Most recently observed status of the failover quorum. + // +optional + Status FailoverQuorumStatus `json:"status"` +} + +// FailoverQuorumStatus is the latest observed status of the failover +// quorum of the PG cluster. +type FailoverQuorumStatus struct { + // Contains the latest reported Method value. + // +optional + Method string `json:"method,omitempty"` + + // StandbyNames is the list of potentially synchronous + // instance names. + // +optional + StandbyNames []string `json:"standbyNames,omitempty"` + + // StandbyNumber is the number of synchronous standbys that transactions + // need to wait for replies from. + // +optional + StandbyNumber int `json:"standbyNumber,omitempty"` + + // Primary is the name of the primary instance that updated + // this object the latest time. + // +optional + Primary string `json:"primary,omitempty"` +} + +func init() { + SchemeBuilder.Register(&FailoverQuorum{}, &FailoverQuorumList{}) +} diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index c6805684d7..2d601693e1 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -1361,6 +1361,84 @@ func (in *ExternalCluster) DeepCopy() *ExternalCluster { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FailoverQuorum) DeepCopyInto(out *FailoverQuorum) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailoverQuorum. +func (in *FailoverQuorum) DeepCopy() *FailoverQuorum { + if in == nil { + return nil + } + out := new(FailoverQuorum) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FailoverQuorum) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FailoverQuorumList) DeepCopyInto(out *FailoverQuorumList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FailoverQuorum, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailoverQuorumList. +func (in *FailoverQuorumList) DeepCopy() *FailoverQuorumList { + if in == nil { + return nil + } + out := new(FailoverQuorumList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FailoverQuorumList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FailoverQuorumStatus) DeepCopyInto(out *FailoverQuorumStatus) { + *out = *in + if in.StandbyNames != nil { + in, out := &in.StandbyNames, &out.StandbyNames + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailoverQuorumStatus. +func (in *FailoverQuorumStatus) DeepCopy() *FailoverQuorumStatus { + if in == nil { + return nil + } + out := new(FailoverQuorumStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ImageCatalog) DeepCopyInto(out *ImageCatalog) { *out = *in diff --git a/config/crd/bases/postgresql.cnpg.io_failoverquorums.yaml b/config/crd/bases/postgresql.cnpg.io_failoverquorums.yaml new file mode 100644 index 0000000000..91da8c5817 --- /dev/null +++ b/config/crd/bases/postgresql.cnpg.io_failoverquorums.yaml @@ -0,0 +1,77 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: failoverquorums.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: FailoverQuorum + listKind: FailoverQuorumList + plural: failoverquorums + singular: failoverquorum + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: |- + FailoverQuorum contains the information about the current failover + quorum status of a PG cluster. It is updated by the instance manager + of the primary node and reset to zero by the operator to trigger + an update. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + status: + description: Most recently observed status of the failover quorum. + properties: + method: + description: Contains the latest reported Method value. + type: string + primary: + description: |- + Primary is the name of the primary instance that updated + this object the latest time. + type: string + standbyNames: + description: |- + StandbyNames is the list of potentially synchronous + instance names. + items: + type: string + type: array + standbyNumber: + description: |- + StandbyNumber is the number of synchronous standbys that transactions + need to wait for replies from. + type: integer + type: object + required: + - metadata + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 6100960f12..ec827bc3a4 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -13,6 +13,7 @@ resources: - bases/postgresql.cnpg.io_databases.yaml - bases/postgresql.cnpg.io_publications.yaml - bases/postgresql.cnpg.io_subscriptions.yaml +- bases/postgresql.cnpg.io_failoverquorums.yaml # +kubebuilder:scaffold:crdkustomizeresource patches: diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index f47a568f0d..7f6a70c742 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -184,12 +184,23 @@ rules: - postgresql.cnpg.io resources: - clusters/status + - failoverquorums/status - poolers/status verbs: - get - patch - update - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - failoverquorums + verbs: + - create + - delete + - get + - list + - watch - apiGroups: - rbac.authorization.k8s.io resources: diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md index 92ca69e434..6ceff12c66 100644 --- a/docs/src/cloudnative-pg.v1.md +++ b/docs/src/cloudnative-pg.v1.md @@ -11,6 +11,7 @@ - [Cluster](#postgresql-cnpg-io-v1-Cluster) - [ClusterImageCatalog](#postgresql-cnpg-io-v1-ClusterImageCatalog) - [Database](#postgresql-cnpg-io-v1-Database) +- [FailoverQuorum](#postgresql-cnpg-io-v1-FailoverQuorum) - [ImageCatalog](#postgresql-cnpg-io-v1-ImageCatalog) - [Pooler](#postgresql-cnpg-io-v1-Pooler) - [Publication](#postgresql-cnpg-io-v1-Publication) @@ -160,6 +161,40 @@ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api- +## FailoverQuorum {#postgresql-cnpg-io-v1-FailoverQuorum} + + +**Appears in:** + + + +

FailoverQuorum contains the information about the current failover +quorum status of a PG cluster. It is updated by the instance manager +of the primary node and reset to zero by the operator to trigger +an update.

+ + + + + + + + + + + + + + +
FieldDescription
apiVersion [Required]
string
postgresql.cnpg.io/v1
kind [Required]
string
FailoverQuorum
metadata [Required]
+meta/v1.ObjectMeta +
+ No description provided.Refer to the Kubernetes API documentation for the fields of the metadata field.
status
+FailoverQuorumStatus +
+

Most recently observed status of the failover quorum.

+
+ ## ImageCatalog {#postgresql-cnpg-io-v1-ImageCatalog} @@ -3008,6 +3043,55 @@ of WAL archiving and backups for this external cluster

+## FailoverQuorumStatus {#postgresql-cnpg-io-v1-FailoverQuorumStatus} + + +**Appears in:** + +- [FailoverQuorum](#postgresql-cnpg-io-v1-FailoverQuorum) + + +

FailoverQuorumStatus is the latest observed status of the failover +quorum of the PG cluster.

+ + + + + + + + + + + + + + + + + + +
FieldDescription
method
+string +
+

Contains the latest reported Method value.

+
standbyNames
+[]string +
+

StandbyNames is the list of potentially synchronous +instance names.

+
standbyNumber
+int +
+

StandbyNumber is the number of synchronous standbys that transactions +need to wait for replies from.

+
primary
+string +
+

Primary is the name of the primary instance that updated +this object the latest time.

+
+ ## ImageCatalogRef {#postgresql-cnpg-io-v1-ImageCatalogRef} diff --git a/docs/src/failover.md b/docs/src/failover.md index e28da3d933..1c843ed600 100644 --- a/docs/src/failover.md +++ b/docs/src/failover.md @@ -92,3 +92,262 @@ expected outage. Enabling a new configuration option to delay failover provides a mechanism to prevent premature failover for short-lived network or node instability. + +## Failover Quorum (Quorum-based Failover) + +!!! Warning + *Failover quorum* is an experimental feature introduced in version 1.27.0. + Use with caution in production environments. + +Failover quorum is a mechanism that enhances data durability and safety during +failover events in CloudNativePG-managed PostgreSQL clusters. + +Quorum-based failover allows the controller to determine whether to promote a replica +to primary based on the state of a quorum of replicas. +This is useful when stronger data durability is required than the one offered +by [synchronous replication](replication.md#synchronous-replication) and +default automated failover procedures. + +When synchronous replication is not enabled, some data loss is expected and +accepted during failover, as a replica may lag behind the primary when +promoted. + +With synchronous replication enabled, the guarantee is that the application +will not receive explicit acknowledgment of the successful commit of a +transaction until the WAL data is known to be safely received by all required +synchronous standbys. +This is not enough to guarantee that the operator is able to promote the most +advanced replica. + +For example, in a three-node cluster with synchronous replication set to `ANY 1 +(...)`, data is written to the primary and one standby before a commit is +acknowledged. If both the primary and the aligned standby become unavailable +(such as during a network partition), the remaining replica may not have the +latest data. Promoting it could lose some data that the application considered +committed. + +Quorum-based failover addresses this risk by ensuring that failover only occurs +if the operator can confirm the presence of all synchronously committed data in +the instance to promote, and it does not occur otherwise. + +This feature allows users to choose their preferred trade-off between data +durability and data availability. + +Failover quorum can be enabled by setting the annotation +`cnpg.io/failoverQuorum="true"` in the `Cluster` resource. + +!!! info + When this feature is out of the experimental phase, the annotation + `cnpg.io/failoverQuorum` will be replaced by a configuration option in the + `Cluster` resource. + +### How it works + +Before promoting a replica to primary, the operator performs a quorum check, +following the principles of the Dynamo `R + W > N` consistency model[^1]. + +In the quorum failover, these values assume the following meaning: + +- `R` is the number of *promotable replicas* (read quorum); +- `W` is the number of replicas that must acknowledge the write before the + `COMMIT` is returned to the client (write quorum); +- `N` is the total number of potentially synchronous replicas; + +*Promotable replicas* are replicas that have these properties: + + - are part of the cluster; + - are able to report their state to the operator; + - are potentially synchronous; + +If `R + W > N`, then we can be sure that among the promotable replicas there is +at least one that has confirmed all the synchronous commits, and we can safely +promote it to primary. If this is not the case, the controller will not promote +any replica to primary, and will wait for the situation to change. + +Users can force a promotion of a replica to primary through the +`kubectl cnpg promote` command even if the quorum check is failing. + +!!! Warning + Manual promotion should only be used as a last resort. Before proceeding, + make sure you fully understand the risk of data loss and carefully consider the + consequences of prioritizing the resumption of write workloads for your + applications. + +An additional CRD is used to track the quorum state of the cluster. A `Cluster` +with the quorum failover enabled will have a `FailoverQuorum` resource with the same +name as the `Cluster` resource. The `FailoverQuorum` CR is created by the +controller when the quorum failover is enabled, and it is updated by the primary +instance during its reconciliation loop, and read by the operator during quorum +checks. It is used to track the latest known configuration of the synchronous +replication. + +!!! Important + Users should not modify the `FailoverQuorum` resource directly. During + PostgreSQL configuration changes, when it is not possible to determine the + configuration, the `FailoverQuorum` resource will be reset, preventing any + failover until the new configuration is applied. + +The `FailoverQuorum` resource works in conjunction with PostgreSQL synchronous +replication. + +!!! Warning + There is no guarantee that `COMMIT` operations returned to the + client but that have not been performed synchronously, such as those made + explicitly disabling synchronous replication with + `SET synchronous_commit TO local`, will be present on a promoted replica. + +### Quorum Failover Example Scenarios + +In the following scenarios, `R` is the number of promotable replicas, `W` is +the number of replicas that must acknowledge a write before commit, and `N` is +the total number of potentially synchronous replicas. The "Failover" column +indicates whether failover is allowed under quorum failover rules. + +#### Scenario 1: Three-node cluster, failing pod(s) + +A cluster with `instances: 3`, `synchronous.number=1`, and +`dataDurability=required`. + +- If only the primary fails, two promotable replicas remain (R=2). + Since `R + W > N` (2 + 1 > 2), failover is allowed and safe. +- If both the primary and one replica fail, only one promotable replica + remains (R=1). Since `R + W = N` (1 + 1 = 2), failover is not allowed to + prevent possible data loss. + +| R | W | N | Failover | +|:-:|:-:|:-:|:--------:| +| 2 | 1 | 2 | ✅ | +| 1 | 1 | 2 | ❌ | + +#### Scenario 2: Three-node cluster, network partition + +A cluster with `instances: 3`, `synchronous.number: 1`, and +`dataDurability: required` experiences a network partition. + +- If the operator can communicate with the primary, no failover occurs. The + cluster can be impacted if the primary cannot reach any standby, since it + won't commit transactions due to synchronous replication requirements. +- If the operator cannot reach the primary but can reach both replicas (R=2), + failover is allowed. If the operator can reach only one replica (R=1), + failover is not allowed, as the synchronous one may be the other one. + +| R | W | N | Failover | +|:-:|:-:|:-:|:--------:| +| 2 | 1 | 2 | ✅ | +| 1 | 1 | 2 | ❌ | + +#### Scenario 3: Five-node cluster, network partition + +A cluster with `instances: 5`, `synchronous.number=2`, and +`dataDurability=required` experiences a network partition. + +- If the operator can communicate with the primary, no failover occurs. The + cluster can be impacted if the primary cannot reach at least two standbys, + as since it won't commit transactions due to synchronous replication + requirements. +- If the operator cannot reach the primary but can reach at least three + replicas (R=3), failover is allowed. If the operator can reach only two + replicas (R=2), failover is not allowed, as the synchronous one may be the + other one. + +| R | W | N | Failover | +|:-:|:-:|:-:|:--------:| +| 3 | 2 | 4 | ✅ | +| 2 | 2 | 4 | ❌ | + +#### Scenario 4: Three-node cluster with remote synchronous replicas + +A cluster with `instances: 3` and remote synchronous replicas defined in +`standbyNamesPre` or `standbyNamesPost`. We assume that the primary is failing. + +This scenario requires an important consideration. Replicas listed in +`standbyNamesPre` or `standbyNamesPost` are not counted in +`R` (they cannot be promoted), but are included in `N` (they may have received +synchronous writes). So, if +`synchronous.number <= len(standbyNamesPre) + len(standbyNamesPost)`, failover +is not possible, as no local replica can be guaranteed to have the required +data. The operator prevents such configurations during validation, but some +invalid configurations are shown below for clarity. + +**Example configurations:** + +Configuration #1 (valid): +```yaml +instances: 3 +postgresql: + synchronous: + method: any + number: 2 + standbyNamesPre: + - angus +``` +In this configuration, when the primary fails, `R = 2` (the local replicas), +`W = 2`, and `N = 3` (2 local replicas + 1 remote), allowing failover. +In case of an additional replica failing (`R = 1`) failover is not allowed. + +| R | W | N | Failover | +|:-:|:-:|:-:|:--------:| +| 3 | 2 | 4 | ✅ | +| 2 | 2 | 4 | ❌ | + +Configuration #2 (invalid): +```yaml +instances: 3 +postgresql: + synchronous: + method: any + number: 1 + maxStandbyNamesFromCluster: 1 + standbyNamesPre: + - angus +``` +In this configuration, `R = 2` (the local replicas), `W = 1`, and `N = 3` +(2 local replicas + 1 remote). +Failover is not possible in this setup, so quorum failover can not be +enabled with this configuration. + +| R | W | N | Failover | +|:-:|:-:|:-:|:--------:| +| 1 | 1 | 2 | ❌ | + +Configuration #3 (invalid): +```yaml +instances: 3 +postgresql: + synchronous: + method: any + number: 1 + maxStandbyNamesFromCluster: 0 + standbyNamesPre: + - angus + - malcolm +``` +In this configuration, `R = 0` (the local replicas), `W = 1`, and `N = 2` +(0 local replicas + 2 remote). +Failover is not possible in this setup, so quorum failover can not be +enabled with this configuration. + +| R | W | N | Failover | +|:-:|:-:|:-:|:--------:| +| 0 | 1 | 2 | ❌ | + +#### Scenario 5: Three-node cluster, preferred data durability, network partition + +Consider a cluster with `instances: 3`, `synchronous.number=1`, and +`dataDurability=preferred` that experiences a network partition. + +- If the operator can communicate with both the primary and the API server, + the primary continues to operate, removing unreachable standbys from the + `synchronous_standby_names` set. +- If the primary cannot reach the operator or API server, a quorum check is + performed. The `FailoverQuorum` status cannot have changed, as the primary cannot + have received new configuration. If the operator can reach both replicas, + failover is allowed (`R=2`). If only one replica is reachable (`R=1`), + failover is not allowed. + +| R | W | N | Failover | +|:-:|:-:|:-:|:--------:| +| 2 | 1 | 2 | ✅ | +| 1 | 1 | 2 | ❌ | + +[^1]: [Dynamo: Amazon’s highly available key-value store](https://www.amazon.science/publications/dynamo-amazons-highly-available-key-value-store) diff --git a/docs/src/replication.md b/docs/src/replication.md index 09503efb38..c4eb01e48a 100644 --- a/docs/src/replication.md +++ b/docs/src/replication.md @@ -121,6 +121,11 @@ CloudNativePG supports both details on managing this behavior, refer to the [Data Durability and Synchronous Replication](#data-durability-and-synchronous-replication) section. +!!! Important + The [*failover quorum* feature](failover.md#failover-quorum-quorum-based-failover) (experimental) + can be used alongside synchronous replication to improve data durability + and safety during failover events. + Direct configuration of the `synchronous_standby_names` option is not permitted. However, CloudNativePG automatically populates this option with the names of local pods, while also allowing customization to extend synchronous diff --git a/docs/src/samples/cluster-example-syncreplicas-quorum.yaml b/docs/src/samples/cluster-example-syncreplicas-quorum.yaml new file mode 100644 index 0000000000..1836d2edeb --- /dev/null +++ b/docs/src/samples/cluster-example-syncreplicas-quorum.yaml @@ -0,0 +1,17 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: cluster-example + annotations: + cnpg.io/failoverQuorum: "t" +spec: + instances: 3 + + postgresql: + synchronous: + method: any + number: 1 + + storage: + size: 1G + diff --git a/internal/cmd/manager/instance/run/cmd.go b/internal/cmd/manager/instance/run/cmd.go index a34f9410f4..758c53480d 100644 --- a/internal/cmd/manager/instance/run/cmd.go +++ b/internal/cmd/manager/instance/run/cmd.go @@ -194,6 +194,9 @@ func runSubCommand(ctx context.Context, instance *postgres.Instance) error { // we don't have the permissions to cache backups, as the ServiceAccount // doesn't have watch permission on the backup status &apiv1.Backup{}, + // we don't have the permissions to cache FailoverQuorum objects, we can + // only access the object having the same name as the cluster + &apiv1.FailoverQuorum{}, }, }, }, diff --git a/internal/controller/cluster_controller.go b/internal/controller/cluster_controller.go index 9f2fba3db0..57cd107c7b 100644 --- a/internal/controller/cluster_controller.go +++ b/internal/controller/cluster_controller.go @@ -145,6 +145,8 @@ var ErrNextLoop = utils.ErrNextLoop // +kubebuilder:rbac:groups=snapshot.storage.k8s.io,resources=volumesnapshots,verbs=get;create;watch;list;patch // +kubebuilder:rbac:groups=postgresql.cnpg.io,resources=imagecatalogs,verbs=get;watch;list // +kubebuilder:rbac:groups=postgresql.cnpg.io,resources=clusterimagecatalogs,verbs=get;watch;list +// +kubebuilder:rbac:groups=postgresql.cnpg.io,resources=failoverquorums,verbs=create;get;watch;delete;list +// +kubebuilder:rbac:groups=postgresql.cnpg.io,resources=failoverquorums/status,verbs=get;patch;update;watch // Reconcile is the operator reconcile loop func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { diff --git a/internal/controller/cluster_create.go b/internal/controller/cluster_create.go index 76f459c2fc..c62bab33f6 100644 --- a/internal/controller/cluster_create.go +++ b/internal/controller/cluster_create.go @@ -101,6 +101,11 @@ func (r *ClusterReconciler) createPostgresClusterObjects(ctx context.Context, cl return err } + err = r.reconcileFailoverQuorumObject(ctx, cluster) + if err != nil { + return err + } + return nil } diff --git a/internal/controller/replicas.go b/internal/controller/replicas.go index 74117dc5c2..bad7f515de 100644 --- a/internal/controller/replicas.go +++ b/internal/controller/replicas.go @@ -110,6 +110,24 @@ func (r *ClusterReconciler) reconcileTargetPrimaryForNonReplicaCluster( return "", err } + // If quorum check is active, ensure we don't failover in unsafe scenarios. + isFailoverQuorumActive, err := cluster.IsFailoverQuorumActive() + if err != nil { + contextLogger.Error(err, "Failed to determine if failover quorum is active") + isFailoverQuorumActive = false + } + + if cluster.Status.TargetPrimary == cluster.Status.CurrentPrimary && + cluster.Spec.PostgresConfiguration.Synchronous != nil && + isFailoverQuorumActive { + if status, err := r.evaluateQuorumCheck(ctx, cluster, status); err != nil { + return "", err + } else if !status { + // Prevent a failover from happening + return "", nil + } + } + // The current primary is not correctly working, and we need to elect a new one // but before doing that we need to wait for all the WAL receivers to be // terminated. To make sure they eventually terminate we signal the old primary diff --git a/internal/controller/replicas_quorum.go b/internal/controller/replicas_quorum.go new file mode 100644 index 0000000000..0794365578 --- /dev/null +++ b/internal/controller/replicas_quorum.go @@ -0,0 +1,185 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package controller + +import ( + "context" + + "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/stringset" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" +) + +// evaluateQuorumCheck evaluate the quorum check algorithm to detect if a failover +// is possible without losing any transaction. +// "true" is returned when there is surely a replica containing all the transactions, +// "false" is returned otherwise. +// When an error is raised, the caller should not start a failover. +func (r *ClusterReconciler) evaluateQuorumCheck( + ctx context.Context, + cluster *apiv1.Cluster, + statusList postgres.PostgresqlStatusList, +) (bool, error) { + contextLogger := log.FromContext(ctx).WithValues("tag", "quorumCheck") + + var failoverQuorum apiv1.FailoverQuorum + if err := r.Get(ctx, client.ObjectKeyFromObject(cluster), &failoverQuorum); err != nil { + if apierrs.IsNotFound(err) { + contextLogger.Warning( + "Quorum check failed because no synchronous metadata is available. Denying the failover request") + return false, nil + } + + contextLogger.Error(err, + "Quorum check failed because the synchronous replica metadata couldn't be read") + return false, err + } + + return r.evaluateQuorumCheckWithStatus(ctx, &failoverQuorum, statusList) +} + +// evaluateQuorumCheckWithStatus is used internally by evaluateQuorumCheck, +// primarily at the benefit of the unit tests +func (r *ClusterReconciler) evaluateQuorumCheckWithStatus( + ctx context.Context, + failoverQuorum *apiv1.FailoverQuorum, + statusList postgres.PostgresqlStatusList, +) (bool, error) { + contextLogger := log.FromContext(ctx).WithValues("tag", "quorumCheck") + + syncStatus := failoverQuorum.Status + contextLogger.Trace("Dumping latest synchronous replication status", "syncStatus", syncStatus) + + // Step 1: coherence check of the synchrouous replication information + if syncStatus.StandbyNumber <= 0 { + contextLogger.Warning( + "Quorum check failed a unsupported synchronous nodes number") + return false, nil + } + + if len(syncStatus.StandbyNames) == 0 { + contextLogger.Warning( + "Quorum check failed because the list of synchronous replicas is empty") + return false, nil + } + + // Step 2: detect promotable replicas + candidateReplicas := stringset.New() + for _, record := range statusList.Items { + if record.Error == nil && record.IsPodReady { + candidateReplicas.Put(record.Pod.Name) + } + } + + // Step 3: evaluate quorum check algorithm + // + // Important: R + W > N <==> strong consistency + // With: + // N = the cardinality of the synchronous_standby_names set + // W = the sync number or 0 if we're changing a replica configuration. + // R = the cardinality of the set of promotable replicas within the + // synchronous_standby_names set + // + // When this criteria is satisfied we surely have a node containing + // the latest transaction. + // + // The case having W == 0 has been already sorted out in the coherence check. + + nodeSet := stringset.From(syncStatus.StandbyNames) + writeSetCardinality := syncStatus.StandbyNumber + readSet := nodeSet.Intersect(candidateReplicas) + + nodeSetCardinality := nodeSet.Len() + readSetCardinality := readSet.Len() + + isStronglyConsistent := (readSetCardinality + writeSetCardinality) > nodeSetCardinality + + contextLogger.Info( + "Quorum check algorithm results", + "isStronglyConsistent", isStronglyConsistent, + "readSetCardinality", readSetCardinality, + "readSet", readSet.ToSortedList(), + "writeSetCardinality", writeSetCardinality, + "nodeSet", nodeSet.ToSortedList(), + "nodeSetCardinality", nodeSetCardinality, + ) + + if !isStronglyConsistent { + contextLogger.Info("Strong consistency check failed. Preventing failover.") + } + + return isStronglyConsistent, nil +} + +func (r *ClusterReconciler) reconcileFailoverQuorumObject(ctx context.Context, cluster *apiv1.Cluster) error { + contextLogger := log.FromContext(ctx).WithValues("tag", "quorumCheck") + + syncConfig := cluster.Spec.PostgresConfiguration.Synchronous + failoverQuorumActive, err := cluster.IsFailoverQuorumActive() + if err != nil { + contextLogger.Error(err, "Failed to determine if failover quorum is active") + } + if syncConfig != nil && failoverQuorumActive { + return r.ensureFailoverQuorumObjectExists(ctx, cluster) + } + + return r.ensureFailoverQuorumObjectDoesNotExist(ctx, cluster) +} + +func (r *ClusterReconciler) ensureFailoverQuorumObjectExists(ctx context.Context, cluster *apiv1.Cluster) error { + failoverQuorum := apiv1.FailoverQuorum{ + TypeMeta: metav1.TypeMeta{ + Kind: "FailoverQuorum", + APIVersion: apiv1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: cluster.Namespace, + Name: cluster.Name, + }, + } + cluster.SetInheritedDataAndOwnership(&failoverQuorum.ObjectMeta) + + err := r.Create(ctx, &failoverQuorum) + if err != nil && !apierrs.IsAlreadyExists(err) { + log.FromContext(ctx).Error(err, "Unable to create the FailoverQuorum", "object", failoverQuorum) + return err + } + + return nil +} + +func (r *ClusterReconciler) ensureFailoverQuorumObjectDoesNotExist(ctx context.Context, cluster *apiv1.Cluster) error { + var failoverQuorum apiv1.FailoverQuorum + + if err := r.Get(ctx, client.ObjectKeyFromObject(cluster), &failoverQuorum); err != nil { + if apierrs.IsNotFound(err) { + return nil + } + + return err + } + + return r.Delete(ctx, &failoverQuorum) +} diff --git a/internal/controller/replicas_quorum_test.go b/internal/controller/replicas_quorum_test.go new file mode 100644 index 0000000000..b2bda01ff0 --- /dev/null +++ b/internal/controller/replicas_quorum_test.go @@ -0,0 +1,141 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package controller + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("quorum promotion control", func() { + r := &ClusterReconciler{} + + When("the information is not consistent because the number of synchronous standbies is zero", func() { + sync := &apiv1.FailoverQuorum{ + Status: apiv1.FailoverQuorumStatus{ + StandbyNumber: 0, + }, + } + + statusList := postgres.PostgresqlStatusList{} + + It("denies a failover", func(ctx SpecContext) { + status, err := r.evaluateQuorumCheckWithStatus(ctx, sync, statusList) + Expect(err).ToNot(HaveOccurred()) + Expect(status).To(BeFalse()) + }) + }) + + When("the information is not consistent because the standby list is empty", func() { + sync := &apiv1.FailoverQuorum{ + Status: apiv1.FailoverQuorumStatus{ + StandbyNumber: 3, + StandbyNames: nil, + }, + } + + statusList := postgres.PostgresqlStatusList{} + + It("denies a failover", func(ctx SpecContext) { + status, err := r.evaluateQuorumCheckWithStatus(ctx, sync, statusList) + Expect(err).ToNot(HaveOccurred()) + Expect(status).To(BeFalse()) + }) + }) + + When("there is no quorum", func() { + sync := &apiv1.FailoverQuorum{ + Status: apiv1.FailoverQuorumStatus{ + StandbyNumber: 1, + StandbyNames: []string{ + "postgres-2", + "postgres-3", + }, + }, + } + + statusList := postgres.PostgresqlStatusList{ + Items: []postgres.PostgresqlStatus{ + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "postgres-3", + }, + }, + Error: nil, + IsPodReady: true, + }, + }, + } + + It("denies a failover", func(ctx SpecContext) { + status, err := r.evaluateQuorumCheckWithStatus(ctx, sync, statusList) + Expect(err).ToNot(HaveOccurred()) + Expect(status).To(BeFalse()) + }) + }) + + When("there is quorum", func() { + sync := &apiv1.FailoverQuorum{ + Status: apiv1.FailoverQuorumStatus{ + StandbyNumber: 1, + StandbyNames: []string{ + "postgres-2", + "postgres-3", + }, + }, + } + + statusList := postgres.PostgresqlStatusList{ + Items: []postgres.PostgresqlStatus{ + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "postgres-2", + }, + }, + Error: nil, + IsPodReady: true, + }, + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "postgres-3", + }, + }, + Error: nil, + IsPodReady: true, + }, + }, + } + + It("denies a failover", func(ctx SpecContext) { + status, err := r.evaluateQuorumCheckWithStatus(ctx, sync, statusList) + Expect(err).ToNot(HaveOccurred()) + Expect(status).To(BeTrue()) + }) + }) +}) diff --git a/internal/management/controller/instance_controller.go b/internal/management/controller/instance_controller.go index 454b9247ba..e77399fa70 100644 --- a/internal/management/controller/instance_controller.go +++ b/internal/management/controller/instance_controller.go @@ -151,7 +151,7 @@ func (r *InstanceReconciler) Reconcile( requeueOnMissingPermissions := r.updateCacheFromCluster(ctx, cluster) // Reconcile monitoring section - r.reconcileMetrics(cluster) + r.reconcileMetrics(ctx, cluster) r.reconcileMonitoringQueries(ctx, cluster) // Verify that the promotion token is usable before changing the archive mode and triggering restarts @@ -252,6 +252,18 @@ func (r *InstanceReconciler) Reconcile( if reloadNeeded && !restarted { contextLogger.Info("reloading the instance") + + // IMPORTANT + // + // We are unsure of the state of the PostgreSQL configuration + // meanwhile a new configuration is applied. + // + // For this reason, before applying a new configuration we + // reset the FailoverQuorum object - de facto preventing any failover - + // and we update it after. + if err = r.resetFailoverQuorumObject(ctx, cluster); err != nil { + return reconcile.Result{}, err + } if err = r.instance.Reload(ctx); err != nil { return reconcile.Result{}, fmt.Errorf("while reloading the instance: %w", err) } @@ -260,6 +272,10 @@ func (r *InstanceReconciler) Reconcile( } } + if err = r.updateFailoverQuorumObject(ctx, cluster); err != nil { + return reconcile.Result{}, err + } + // IMPORTANT // From now on, the database can be assumed as running. Every operation // needing the database to be up should be put below this line. @@ -807,6 +823,7 @@ func (r *InstanceReconciler) reconcileClusterRoleWithoutDB( // reconcileMetrics updates any required metrics func (r *InstanceReconciler) reconcileMetrics( + ctx context.Context, cluster *apiv1.Cluster, ) { exporter := r.metricsServerExporter @@ -823,7 +840,7 @@ func (r *InstanceReconciler) reconcileMetrics( exporter.Metrics.SyncReplicas.WithLabelValues("min").Set(float64(cluster.Spec.MinSyncReplicas)) exporter.Metrics.SyncReplicas.WithLabelValues("max").Set(float64(cluster.Spec.MaxSyncReplicas)) - syncReplicas := replication.GetExpectedSyncReplicasNumber(cluster) + syncReplicas := replication.GetExpectedSyncReplicasNumber(ctx, cluster) exporter.Metrics.SyncReplicas.WithLabelValues("expected").Set(float64(syncReplicas)) if cluster.IsReplica() { diff --git a/internal/management/controller/instance_sync.go b/internal/management/controller/instance_sync.go new file mode 100644 index 0000000000..6d9809fc5a --- /dev/null +++ b/internal/management/controller/instance_sync.go @@ -0,0 +1,123 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package controller + +import ( + "context" + + "github.com/cloudnative-pg/machinery/pkg/log" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/client" + + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" +) + +// resetFailoverQuorumObject resets the content of the sync quorum object +// to prevent unsafe failovers when we are changing the configuration +func (r *InstanceReconciler) resetFailoverQuorumObject(ctx context.Context, cluster *apiv1.Cluster) error { + if !r.shouldManageFailoverQuorumObject(ctx, cluster) { + return nil + } + + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + var livingQuorumStatus apiv1.FailoverQuorum + + err := r.client.Get(ctx, client.ObjectKeyFromObject(cluster), &livingQuorumStatus) + if err != nil { + return err + } + + livingQuorumStatus.Status = apiv1.FailoverQuorumStatus{} + return r.client.Status().Update(ctx, &livingQuorumStatus) + }) +} + +// updateFailoverQuorumObject updates the sync quorum object reading the +// current synchronous replica metadata from the PG instance +func (r *InstanceReconciler) updateFailoverQuorumObject(ctx context.Context, cluster *apiv1.Cluster) error { + if !r.shouldManageFailoverQuorumObject(ctx, cluster) { + return nil + } + + metadata, err := r.Instance().GetSynchronousReplicationMetadata(ctx) + if err != nil { + return err + } + + newStatus := apiv1.FailoverQuorumStatus{} + if metadata != nil { + newStatus.Method = metadata.Method + newStatus.Primary = r.instance.GetPodName() + newStatus.StandbyNumber = metadata.NumSync + + // We ensure the primary is not included in the standby names + newStatus.StandbyNames = make([]string, 0, len(metadata.StandbyNames)) + for _, name := range metadata.StandbyNames { + if name == newStatus.Primary { + continue + } + newStatus.StandbyNames = append(newStatus.StandbyNames, name) + } + } + + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + var livingQuorumStatus apiv1.FailoverQuorum + + err := r.client.Get(ctx, client.ObjectKeyFromObject(cluster), &livingQuorumStatus) + if err != nil { + return err + } + + if equality.Semantic.DeepEqual(livingQuorumStatus.Status, newStatus) { + return nil + } + + updatedQuorumStatus := livingQuorumStatus.DeepCopy() + updatedQuorumStatus.Status = newStatus + return r.client.Status().Update(ctx, updatedQuorumStatus) + }) +} + +func (r *InstanceReconciler) shouldManageFailoverQuorumObject(ctx context.Context, cluster *apiv1.Cluster) bool { + contextLogger := log.FromContext(ctx) + + if cluster.Status.TargetPrimary != r.instance.GetPodName() { + return false + } + if cluster.Status.CurrentPrimary != cluster.Status.TargetPrimary { + return false + } + if cluster.Spec.PostgresConfiguration.Synchronous == nil { + return false + } + + failoverQuorumActive, err := cluster.IsFailoverQuorumActive() + if err != nil { + contextLogger.Error(err, "Failed to determine if sync quorum is active") + failoverQuorumActive = false + } + + if !failoverQuorumActive { + return false + } + + return true +} diff --git a/internal/webhook/v1/cluster_webhook.go b/internal/webhook/v1/cluster_webhook.go index 29ab7a5aeb..e82890f793 100644 --- a/internal/webhook/v1/cluster_webhook.go +++ b/internal/webhook/v1/cluster_webhook.go @@ -207,6 +207,7 @@ func (v *ClusterCustomValidator) validate(r *apiv1.Cluster) (allErrs field.Error v.validateRetentionPolicy, v.validateConfiguration, v.validateSynchronousReplicaConfiguration, + v.validateFailoverQuorum, v.validateLDAP, v.validateReplicationSlots, v.validateSynchronizeLogicalDecoding, @@ -976,12 +977,13 @@ func (v *ClusterCustomValidator) validateSynchronousReplicaConfiguration(r *apiv var result field.ErrorList - if r.Spec.PostgresConfiguration.Synchronous.Number >= (r.Spec.Instances + - len(r.Spec.PostgresConfiguration.Synchronous.StandbyNamesPost) + - len(r.Spec.PostgresConfiguration.Synchronous.StandbyNamesPre)) { + cfg := r.Spec.PostgresConfiguration.Synchronous + if cfg.Number >= (r.Spec.Instances + + len(cfg.StandbyNamesPost) + + len(cfg.StandbyNamesPre)) { err := field.Invalid( field.NewPath("spec", "postgresql", "synchronous"), - r.Spec.PostgresConfiguration.Synchronous, + cfg, "Invalid synchronous configuration: the number of synchronous replicas must be less than the "+ "total number of instances and the provided standby names.", ) @@ -991,6 +993,57 @@ func (v *ClusterCustomValidator) validateSynchronousReplicaConfiguration(r *apiv return result } +func (v *ClusterCustomValidator) validateFailoverQuorum(r *apiv1.Cluster) field.ErrorList { + var result field.ErrorList + + failoverQuorumActive, err := r.IsFailoverQuorumActive() + if err != nil { + err := field.Invalid( + field.NewPath("metadata", "annotations", utils.FailoverQuorumAnnotationName), + r.Annotations[utils.FailoverQuorumAnnotationName], + "Invalid failoverQuorum annotation value, expected boolean.", + ) + result = append(result, err) + return result + } + if !failoverQuorumActive { + return nil + } + + cfg := r.Spec.PostgresConfiguration.Synchronous + if cfg == nil { + err := field.Required( + field.NewPath("spec", "postgresql", "synchronous"), + "Invalid failoverQuorum configuration: synchronous replication configuration "+ + "is required.", + ) + result = append(result, err) + return result + } + + if cfg.Number <= len(cfg.StandbyNamesPost)+len(cfg.StandbyNamesPre) { + err := field.Invalid( + field.NewPath("spec", "postgresql", "synchronous"), + cfg, + "Invalid failoverQuorum configuration: spec.postgresql.synchronous.number must the greater than "+ + "the total number of instances in spec.postgresql.synchronous.standbyNamesPre and "+ + "spec.postgresql.synchronous.standbyNamesPost to allow automatic failover.", + ) + result = append(result, err) + } + + if r.Spec.Instances <= 2 { + err := field.Invalid( + field.NewPath("spec", "instances"), + r.Spec.Instances, + "failoverQuorum requires more than 2 instances.", + ) + result = append(result, err) + } + + return result +} + // validateConfiguration determines whether a PostgreSQL configuration is valid func (v *ClusterCustomValidator) validateConfiguration(r *apiv1.Cluster) field.ErrorList { var result field.ErrorList diff --git a/internal/webhook/v1/cluster_webhook_test.go b/internal/webhook/v1/cluster_webhook_test.go index f06a207569..a59bab25b8 100644 --- a/internal/webhook/v1/cluster_webhook_test.go +++ b/internal/webhook/v1/cluster_webhook_test.go @@ -5795,3 +5795,80 @@ var _ = Describe("getStorageWarnings", func() { Expect(getStorageWarnings(cluster)).To(BeEmpty()) }) }) + +var _ = Describe("failoverQuorum validation", func() { + var v *ClusterCustomValidator + BeforeEach(func() { + v = &ClusterCustomValidator{} + }) + + It("fails if it is active but no synchronous replication is configured", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.FailoverQuorumAnnotationName: "t", + }, + }, + Spec: apiv1.ClusterSpec{ + Instances: 3, + }, + } + + errList := v.validateFailoverQuorum(cluster) + Expect(errList).To(HaveLen(1)) + }) + + It("requires at least three instances", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.FailoverQuorumAnnotationName: "t", + }, + }, + Spec: apiv1.ClusterSpec{ + Instances: 3, + PostgresConfiguration: apiv1.PostgresConfiguration{ + Synchronous: &apiv1.SynchronousReplicaConfiguration{ + Number: 1, + }, + }, + }, + } + + errList := v.validateFailoverQuorum(cluster) + Expect(errList).To(BeEmpty()) + + cluster.Spec.Instances = 2 + errList = v.validateFailoverQuorum(cluster) + Expect(errList).To(HaveLen(1)) + }) + + It("check if the number of external synchronous replicas is coherent", func() { + cluster := &apiv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + utils.FailoverQuorumAnnotationName: "t", + }, + }, + Spec: apiv1.ClusterSpec{ + Instances: 3, + PostgresConfiguration: apiv1.PostgresConfiguration{ + Synchronous: &apiv1.SynchronousReplicaConfiguration{ + Number: 1, + StandbyNamesPre: []string{ + "one", + "two", + }, + StandbyNamesPost: []string{ + "three", + "four", + }, + }, + }, + }, + } + + errList := v.validateFailoverQuorum(cluster) + Expect(errList).To(HaveLen(1)) + }) +}) diff --git a/pkg/management/postgres/configuration.go b/pkg/management/postgres/configuration.go index 3d5775884a..6ee4ec1bbc 100644 --- a/pkg/management/postgres/configuration.go +++ b/pkg/management/postgres/configuration.go @@ -78,10 +78,7 @@ func (instance *Instance) RefreshConfigurationFilesFromCluster( } postgresConfiguration, sha256, err := createPostgresqlConfiguration( - ctx, - cluster, - preserveUserSettings, - pgMajor, + ctx, cluster, preserveUserSettings, pgMajor, operationType, ) if err != nil { @@ -406,7 +403,7 @@ func createPostgresqlConfiguration( IsReplicaCluster: cluster.IsReplica(), IsWalArchivingDisabled: utils.IsWalArchivingDisabled(&cluster.ObjectMeta), IsAlterSystemEnabled: cluster.Spec.PostgresConfiguration.EnableAlterSystem, - SynchronousStandbyNames: replication.GetSynchronousStandbyNames(cluster), + SynchronousStandbyNames: replication.GetSynchronousStandbyNames(ctx, cluster), } if preserveUserSettings { diff --git a/pkg/management/postgres/configuration_test.go b/pkg/management/postgres/configuration_test.go index 38ffa9010a..48cd59847b 100644 --- a/pkg/management/postgres/configuration_test.go +++ b/pkg/management/postgres/configuration_test.go @@ -178,10 +178,7 @@ var _ = Describe("Test building of the list of temporary tablespaces", func() { It("doesn't set temp_tablespaces if there are no declared tablespaces", func(ctx SpecContext) { config, _, err := createPostgresqlConfiguration( - ctx, - &clusterWithoutTablespaces, - true, - defaultMajor, + ctx, &clusterWithoutTablespaces, true, defaultMajor, postgres.OperationType_TYPE_UNSPECIFIED, ) Expect(err).ToNot(HaveOccurred()) @@ -190,10 +187,7 @@ var _ = Describe("Test building of the list of temporary tablespaces", func() { It("doesn't set temp_tablespaces if there are no temporary tablespaces", func(ctx SpecContext) { config, _, err := createPostgresqlConfiguration( - ctx, - &clusterWithoutTemporaryTablespaces, - true, - defaultMajor, + ctx, &clusterWithoutTemporaryTablespaces, true, defaultMajor, postgres.OperationType_TYPE_UNSPECIFIED, ) Expect(err).ToNot(HaveOccurred()) @@ -202,10 +196,7 @@ var _ = Describe("Test building of the list of temporary tablespaces", func() { It("sets temp_tablespaces when there are temporary tablespaces", func(ctx SpecContext) { config, _, err := createPostgresqlConfiguration( - ctx, - &clusterWithTemporaryTablespaces, - true, - defaultMajor, + ctx, &clusterWithTemporaryTablespaces, true, defaultMajor, postgres.OperationType_TYPE_UNSPECIFIED, ) Expect(err).ToNot(HaveOccurred()) @@ -267,10 +258,7 @@ var _ = Describe("recovery_min_apply_delay", func() { Expect(primaryCluster.IsReplica()).To(BeFalse()) config, _, err := createPostgresqlConfiguration( - ctx, - &primaryCluster, - true, - defaultMajor, + ctx, &primaryCluster, true, defaultMajor, postgres.OperationType_TYPE_UNSPECIFIED, ) Expect(err).ToNot(HaveOccurred()) @@ -281,10 +269,7 @@ var _ = Describe("recovery_min_apply_delay", func() { Expect(replicaCluster.IsReplica()).To(BeTrue()) config, _, err := createPostgresqlConfiguration( - ctx, - &replicaCluster, - true, - defaultMajor, + ctx, &replicaCluster, true, defaultMajor, postgres.OperationType_TYPE_UNSPECIFIED, ) Expect(err).ToNot(HaveOccurred()) @@ -295,10 +280,7 @@ var _ = Describe("recovery_min_apply_delay", func() { Expect(replicaClusterWithNoDelay.IsReplica()).To(BeTrue()) config, _, err := createPostgresqlConfiguration( - ctx, - &replicaClusterWithNoDelay, - true, - defaultMajor, + ctx, &replicaClusterWithNoDelay, true, defaultMajor, postgres.OperationType_TYPE_UNSPECIFIED, ) Expect(err).ToNot(HaveOccurred()) diff --git a/pkg/management/postgres/instance.go b/pkg/management/postgres/instance.go index 0011a67672..74f0caee2c 100644 --- a/pkg/management/postgres/instance.go +++ b/pkg/management/postgres/instance.go @@ -23,6 +23,7 @@ import ( "context" "crypto/tls" "database/sql" + "encoding/json" "errors" "fmt" "io/fs" @@ -1010,6 +1011,37 @@ func (instance *Instance) WaitForConfigReload(ctx context.Context) (*postgres.Po return status, nil } +// GetSynchronousReplicationMetadata reads the current PostgreSQL configuration +// and extracts the parameters that were used to compute the synchronous_standby_names +// GUC. +func (instance *Instance) GetSynchronousReplicationMetadata( + ctx context.Context, +) (*postgres.SynchronousStandbyNamesConfig, error) { + db, err := instance.GetSuperUserDB() + if err != nil { + return nil, err + } + + var metadata string + row := db.QueryRowContext( + ctx, fmt.Sprintf("SHOW %s", postgres.CNPGSynchronousStandbyNamesMetadata)) + err = row.Scan(&metadata) + if err != nil { + return nil, err + } + + if len(metadata) == 0 { + return nil, nil + } + + var result postgres.SynchronousStandbyNamesConfig + if err := json.Unmarshal([]byte(metadata), &result); err != nil { + return nil, fmt.Errorf("while decoding synchronous_standby_names metadata: %w", err) + } + + return &result, nil +} + // waitForStreamingConnectionAvailable waits until we can connect to the passed // sql.DB connection using streaming protocol func waitForStreamingConnectionAvailable(ctx context.Context, db *sql.DB) error { diff --git a/pkg/postgres/configuration.go b/pkg/postgres/configuration.go index 7e96024041..84ad8c4676 100644 --- a/pkg/postgres/configuration.go +++ b/pkg/postgres/configuration.go @@ -22,6 +22,7 @@ package postgres import ( "bytes" "crypto/sha256" + "encoding/json" "fmt" "iter" "math" @@ -31,6 +32,8 @@ import ( "strings" "text/template" "time" + + "github.com/cloudnative-pg/machinery/pkg/log" ) // WalLevelValue a value that is assigned to the 'wal_level' configuration field @@ -249,6 +252,12 @@ cnpg_pooler_pgbouncer cnpg_pooler_pgbouncer cnpg_pooler_pgbouncer // config in the custom.conf file CNPGConfigSha256 = "cnpg.config_sha256" + // CNPGSynchronousStandbyNamesMetadata is used to inject inside PG the parameters + // that were used to calculate synchronous_standby_names. With this data we're + // able to know the actual settings without parsing back the + // synchronous_standby_names GUC + CNPGSynchronousStandbyNamesMetadata = "cnpg.synchronous_standby_names_metadata" + // SharedPreloadLibraries shared preload libraries key in the config SharedPreloadLibraries = "shared_preload_libraries" @@ -303,6 +312,21 @@ type ConfigurationSettings struct { PgAuditSettings SettingsCollection } +// SynchronousStandbyNamesConfig is the parameters that are needed +// to create the synchronous_standby_names GUC +type SynchronousStandbyNamesConfig struct { + // Method accepts 'any' (quorum-based synchronous replication) + // or 'first' (priority-based synchronous replication) as values. + Method string `json:"method"` + + // NumSync is the number of synchronous standbys that transactions + // need to wait for replies from + NumSync int `json:"number"` + + // StandbyNames is the list of standby servers + StandbyNames []string `json:"standbyNames"` +} + // ConfigurationInfo contains the required information to create a PostgreSQL // configuration type ConfigurationInfo struct { @@ -319,7 +343,7 @@ type ConfigurationInfo struct { UserSettings map[string]string // The synchronous_standby_names configuration to be applied - SynchronousStandbyNames string + SynchronousStandbyNames SynchronousStandbyNamesConfig // The synchronized_standby_slots configuration to be applied SynchronizedStandbySlots []string @@ -709,9 +733,17 @@ func CreatePostgresqlConfiguration(info ConfigurationInfo) *PgConfiguration { } // Apply the synchronous replication settings - syncStandbyNames := info.SynchronousStandbyNames + syncStandbyNames := info.SynchronousStandbyNames.String() if len(syncStandbyNames) > 0 { configuration.OverwriteConfig(SynchronousStandbyNames, syncStandbyNames) + + if metadata, err := json.Marshal(info.SynchronousStandbyNames); err != nil { + log.Error(err, + "Error while serializing streaming configuration parameters", + "synchronousStandbyNames", info.SynchronousStandbyNames) + } else { + configuration.OverwriteConfig(CNPGSynchronousStandbyNamesMetadata, string(metadata)) + } } if len(info.SynchronizedStandbySlots) > 0 { @@ -944,3 +976,31 @@ func (p *PgConfiguration) setDynamicLibraryPath(info ConfigurationInfo) { p.OverwriteConfig(DynamicLibraryPath, strings.Join(dynamicLibraryPath, ":")) } + +// String creates the synchronous_standby_names PostgreSQL GUC +// with the passed members +func (s *SynchronousStandbyNamesConfig) String() string { + if s.IsZero() { + return "" + } + + escapePostgresConfLiteral := func(value string) string { + return fmt.Sprintf("\"%v\"", strings.ReplaceAll(value, "\"", "\"\"")) + } + + escapedReplicas := make([]string, len(s.StandbyNames)) + for idx, name := range s.StandbyNames { + escapedReplicas[idx] = escapePostgresConfLiteral(name) + } + + return fmt.Sprintf( + "%s %v (%v)", + s.Method, + s.NumSync, + strings.Join(escapedReplicas, ",")) +} + +// IsZero is true when synchronour replication is disabled +func (s SynchronousStandbyNamesConfig) IsZero() bool { + return len(s.StandbyNames) == 0 +} diff --git a/pkg/postgres/replication/explicit.go b/pkg/postgres/replication/explicit.go index 443d633edb..8772f880e3 100644 --- a/pkg/postgres/replication/explicit.go +++ b/pkg/postgres/replication/explicit.go @@ -20,12 +20,11 @@ SPDX-License-Identifier: Apache-2.0 package replication import ( - "fmt" "slices" "sort" - "strings" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" ) // placeholderInstanceNameSuffix is the name of the suffix to be added to the @@ -33,7 +32,7 @@ import ( // `synchronous_stanby_names` when the replica list would be empty. const placeholderInstanceNameSuffix = "-placeholder" -func explicitSynchronousStandbyNames(cluster *apiv1.Cluster) string { +func explicitSynchronousStandbyNames(cluster *apiv1.Cluster) postgres.SynchronousStandbyNamesConfig { switch cluster.Spec.PostgresConfiguration.Synchronous.DataDurability { case apiv1.DataDurabilityLevelPreferred: return explicitSynchronousStandbyNamesDataDurabilityPreferred(cluster) @@ -43,7 +42,9 @@ func explicitSynchronousStandbyNames(cluster *apiv1.Cluster) string { } } -func explicitSynchronousStandbyNamesDataDurabilityRequired(cluster *apiv1.Cluster) string { +func explicitSynchronousStandbyNamesDataDurabilityRequired( + cluster *apiv1.Cluster, +) postgres.SynchronousStandbyNamesConfig { config := cluster.Spec.PostgresConfiguration.Synchronous // Create the list of pod names @@ -71,20 +72,16 @@ func explicitSynchronousStandbyNamesDataDurabilityRequired(cluster *apiv1.Cluste } } - // Escape the pod list - escapedReplicas := make([]string, len(instancesList)) - for idx, name := range instancesList { - escapedReplicas[idx] = escapePostgresConfLiteral(name) + return postgres.SynchronousStandbyNamesConfig{ + Method: config.Method.ToPostgreSQLConfigurationKeyword(), + NumSync: config.Number, + StandbyNames: instancesList, } - - return fmt.Sprintf( - "%s %v (%v)", - config.Method.ToPostgreSQLConfigurationKeyword(), - config.Number, - strings.Join(escapedReplicas, ",")) } -func explicitSynchronousStandbyNamesDataDurabilityPreferred(cluster *apiv1.Cluster) string { +func explicitSynchronousStandbyNamesDataDurabilityPreferred( + cluster *apiv1.Cluster, +) postgres.SynchronousStandbyNamesConfig { config := cluster.Spec.PostgresConfiguration.Synchronous // Create the list of healthy replicas @@ -95,12 +92,6 @@ func explicitSynchronousStandbyNamesDataDurabilityPreferred(cluster *apiv1.Clust instancesList = instancesList[:*config.MaxStandbyNamesFromCluster] } - // Escape the pod list - escapedReplicas := make([]string, len(instancesList)) - for idx, name := range instancesList { - escapedReplicas[idx] = escapePostgresConfLiteral(name) - } - // If data durability is not enforced, we cap the number of synchronous // replicas to be required to the number or available replicas. syncReplicaNumber := config.Number @@ -110,14 +101,18 @@ func explicitSynchronousStandbyNamesDataDurabilityPreferred(cluster *apiv1.Clust // An empty instances list is not allowed in synchronous_standby_names if len(instancesList) == 0 { - return "" + return postgres.SynchronousStandbyNamesConfig{ + Method: "", + NumSync: 0, + StandbyNames: []string{}, + } } - return fmt.Sprintf( - "%s %v (%v)", - config.Method.ToPostgreSQLConfigurationKeyword(), - syncReplicaNumber, - strings.Join(escapedReplicas, ",")) + return postgres.SynchronousStandbyNamesConfig{ + Method: config.Method.ToPostgreSQLConfigurationKeyword(), + NumSync: syncReplicaNumber, + StandbyNames: instancesList, + } } // getSortedInstanceNames gets a list of all the known PostgreSQL instances in a diff --git a/pkg/postgres/replication/explicit_test.go b/pkg/postgres/replication/explicit_test.go index c2a12a9a69..8dc7d9ce45 100644 --- a/pkg/postgres/replication/explicit_test.go +++ b/pkg/postgres/replication/explicit_test.go @@ -23,6 +23,7 @@ import ( "k8s.io/utils/ptr" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -46,7 +47,11 @@ var _ = Describe("synchronous replica configuration with the new API", func() { }, } - Expect(explicitSynchronousStandbyNames(cluster)).To(Equal("ANY 2 (\"three\",\"two\",\"one\")")) + Expect(explicitSynchronousStandbyNames(cluster)).To(Equal(postgres.SynchronousStandbyNamesConfig{ + Method: "ANY", + NumSync: 2, + StandbyNames: []string{"three", "two", "one"}, + })) }) It("creates configuration with the FIRST clause", func() { @@ -65,7 +70,11 @@ var _ = Describe("synchronous replica configuration with the new API", func() { }, } - Expect(explicitSynchronousStandbyNames(cluster)).To(Equal("FIRST 2 (\"three\",\"two\",\"one\")")) + Expect(explicitSynchronousStandbyNames(cluster)).To(Equal(postgres.SynchronousStandbyNamesConfig{ + Method: "FIRST", + NumSync: 2, + StandbyNames: []string{"three", "two", "one"}, + })) }) It("considers the maximum number of standby names", func() { @@ -84,7 +93,11 @@ var _ = Describe("synchronous replica configuration with the new API", func() { }, } - Expect(explicitSynchronousStandbyNames(cluster)).To(Equal("FIRST 2 (\"three\")")) + Expect(explicitSynchronousStandbyNames(cluster)).To(Equal(postgres.SynchronousStandbyNamesConfig{ + Method: "FIRST", + NumSync: 2, + StandbyNames: []string{"three"}, + })) }) It("prepends the prefix and append the suffix", func() { @@ -103,8 +116,11 @@ var _ = Describe("synchronous replica configuration with the new API", func() { }, } - Expect(explicitSynchronousStandbyNames(cluster)).To( - Equal("FIRST 2 (\"prefix\",\"here\",\"three\",\"suffix\",\"there\")")) + Expect(explicitSynchronousStandbyNames(cluster)).To(Equal(postgres.SynchronousStandbyNamesConfig{ + Method: "FIRST", + NumSync: 2, + StandbyNames: []string{"prefix", "here", "three", "suffix", "there"}, + })) }) It("enforce synchronous replication even if there are no healthy replicas", func() { @@ -116,8 +132,11 @@ var _ = Describe("synchronous replica configuration with the new API", func() { } cluster.Status = apiv1.ClusterStatus{} - Expect(explicitSynchronousStandbyNames(cluster)).To( - Equal("FIRST 2 (\"example-placeholder\")")) + Expect(explicitSynchronousStandbyNames(cluster)).To(Equal(postgres.SynchronousStandbyNamesConfig{ + Method: "FIRST", + NumSync: 2, + StandbyNames: []string{"example-placeholder"}, + })) }) It("includes pods that do not report the status", func() { @@ -136,7 +155,12 @@ var _ = Describe("synchronous replica configuration with the new API", func() { }, InstanceNames: []string{"one", "two", "three"}, } - Expect(explicitSynchronousStandbyNames(cluster)).To(Equal("FIRST 2 (\"three\",\"two\",\"one\")")) + + Expect(explicitSynchronousStandbyNames(cluster)).To(Equal(postgres.SynchronousStandbyNamesConfig{ + Method: "FIRST", + NumSync: 2, + StandbyNames: []string{"three", "two", "one"}, + })) }) }) @@ -159,7 +183,11 @@ var _ = Describe("synchronous replica configuration with the new API", func() { } // Important: the name of the primary is not included in the list - Expect(explicitSynchronousStandbyNames(cluster)).To(Equal("ANY 2 (\"three\",\"two\")")) + Expect(explicitSynchronousStandbyNames(cluster)).To(Equal(postgres.SynchronousStandbyNamesConfig{ + Method: "ANY", + NumSync: 2, + StandbyNames: []string{"three", "two"}, + })) }) It("creates configuration with the FIRST clause", func() { @@ -180,7 +208,11 @@ var _ = Describe("synchronous replica configuration with the new API", func() { } // Important: the name of the primary is not included in the list - Expect(explicitSynchronousStandbyNames(cluster)).To(Equal("FIRST 2 (\"three\",\"two\")")) + Expect(explicitSynchronousStandbyNames(cluster)).To(Equal(postgres.SynchronousStandbyNamesConfig{ + Method: "FIRST", + NumSync: 2, + StandbyNames: []string{"three", "two"}, + })) }) It("considers the maximum number of standby names", func() { @@ -200,7 +232,11 @@ var _ = Describe("synchronous replica configuration with the new API", func() { }, } - Expect(explicitSynchronousStandbyNames(cluster)).To(Equal("FIRST 1 (\"three\")")) + Expect(explicitSynchronousStandbyNames(cluster)).To(Equal(postgres.SynchronousStandbyNamesConfig{ + Method: "FIRST", + NumSync: 1, + StandbyNames: []string{"three"}, + })) }) It("ignores the prefix and the suffix", func() { @@ -219,8 +255,11 @@ var _ = Describe("synchronous replica configuration with the new API", func() { }, } - Expect(explicitSynchronousStandbyNames(cluster)).To( - Equal("FIRST 2 (\"three\",\"two\")")) + Expect(explicitSynchronousStandbyNames(cluster)).To(Equal(postgres.SynchronousStandbyNamesConfig{ + Method: "FIRST", + NumSync: 2, + StandbyNames: []string{"three", "two"}, + })) }) It("disables synchronous replication when no instance is available", func() { @@ -233,7 +272,7 @@ var _ = Describe("synchronous replica configuration with the new API", func() { } cluster.Status = apiv1.ClusterStatus{} - Expect(explicitSynchronousStandbyNames(cluster)).To(Equal("")) + Expect(explicitSynchronousStandbyNames(cluster).IsZero()).To(BeTrue()) }) It("does not include pods that do not report the status", func() { @@ -253,7 +292,12 @@ var _ = Describe("synchronous replica configuration with the new API", func() { }, InstanceNames: []string{"one", "two", "three"}, } - Expect(explicitSynchronousStandbyNames(cluster)).To(Equal("FIRST 1 (\"three\")")) + + Expect(explicitSynchronousStandbyNames(cluster)).To(Equal(postgres.SynchronousStandbyNamesConfig{ + Method: "FIRST", + NumSync: 1, + StandbyNames: []string{"three"}, + })) }) }) }) diff --git a/pkg/postgres/replication/legacy.go b/pkg/postgres/replication/legacy.go index cd56d8c6fc..b3e552193c 100644 --- a/pkg/postgres/replication/legacy.go +++ b/pkg/postgres/replication/legacy.go @@ -20,38 +20,44 @@ SPDX-License-Identifier: Apache-2.0 package replication import ( - "fmt" + "context" "sort" - "strings" "github.com/cloudnative-pg/machinery/pkg/log" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" ) // legacySynchronousStandbyNames sets the standby node list with the // legacy API -func legacySynchronousStandbyNames(cluster *apiv1.Cluster) string { - syncReplicas, syncReplicasElectable := getSyncReplicasData(cluster) +func legacySynchronousStandbyNames(ctx context.Context, cluster *apiv1.Cluster) postgres.SynchronousStandbyNamesConfig { + syncReplicas, syncReplicasElectable := getSyncReplicasData(ctx, cluster) if syncReplicasElectable != nil && syncReplicas > 0 { escapedReplicas := make([]string, len(syncReplicasElectable)) for idx, name := range syncReplicasElectable { escapedReplicas[idx] = escapePostgresConfLiteral(name) } - return fmt.Sprintf( - "ANY %v (%v)", - syncReplicas, - strings.Join(escapedReplicas, ",")) + return postgres.SynchronousStandbyNamesConfig{ + Method: "ANY", + NumSync: syncReplicas, + StandbyNames: syncReplicasElectable, + } } - return "" + return postgres.SynchronousStandbyNamesConfig{} } // getSyncReplicasData computes the actual number of required synchronous replicas and the names of // the electable sync replicas given the requested min, max, the number of ready replicas in the cluster and the sync // replicas constraints (if any) -func getSyncReplicasData(cluster *apiv1.Cluster) (syncReplicas int, electableSyncReplicas []string) { +func getSyncReplicasData( + ctx context.Context, + cluster *apiv1.Cluster, +) (syncReplicas int, electableSyncReplicas []string) { + contextLogger := log.FromContext(ctx) + // We start with the number of healthy replicas (healthy pods minus one) // and verify it is greater than 0 and between minSyncReplicas and maxSyncReplicas. // Formula: 1 <= minSyncReplicas <= SyncReplicas <= maxSyncReplicas < readyReplicas @@ -76,16 +82,16 @@ func getSyncReplicasData(cluster *apiv1.Cluster) (syncReplicas int, electableSyn // temporarily unresponsive system) if readyReplicas < cluster.Spec.MinSyncReplicas { syncReplicas = readyReplicas - log.Warning("Ignore minSyncReplicas to enforce self-healing", + contextLogger.Warning("Ignore minSyncReplicas to enforce self-healing", "syncReplicas", readyReplicas, "minSyncReplicas", cluster.Spec.MinSyncReplicas, "maxSyncReplicas", cluster.Spec.MaxSyncReplicas) } - electableSyncReplicas = getElectableSyncReplicas(cluster) + electableSyncReplicas = getElectableSyncReplicas(ctx, cluster) numberOfElectableSyncReplicas := len(electableSyncReplicas) if numberOfElectableSyncReplicas < syncReplicas { - log.Warning("lowering sync replicas due to not enough electable instances for sync replication "+ + contextLogger.Warning("lowering sync replicas due to not enough electable instances for sync replication "+ "given the constraints", "electableSyncReplicasWithoutConstraints", syncReplicas, "electableSyncReplicasWithConstraints", numberOfElectableSyncReplicas, @@ -97,7 +103,9 @@ func getSyncReplicasData(cluster *apiv1.Cluster) (syncReplicas int, electableSyn } // getElectableSyncReplicas computes the names of the instances that can be elected to sync replicas -func getElectableSyncReplicas(cluster *apiv1.Cluster) []string { +func getElectableSyncReplicas(ctx context.Context, cluster *apiv1.Cluster) []string { + contextLogger := log.FromContext(ctx) + nonPrimaryInstances := getSortedNonPrimaryHealthyInstanceNames(cluster) topology := cluster.Status.Topology @@ -111,20 +119,20 @@ func getElectableSyncReplicas(cluster *apiv1.Cluster) []string { // The same happens if we have failed to extract topology, we want to preserve the current status by adding all the // electable instances. if !topology.SuccessfullyExtracted { - log.Warning("topology data not extracted, falling back to all electable sync replicas") + contextLogger.Warning("topology data not extracted, falling back to all electable sync replicas") return nonPrimaryInstances } currentPrimary := apiv1.PodName(cluster.Status.CurrentPrimary) // given that the constraints are based off the primary instance if we still don't have one we cannot continue if currentPrimary == "" { - log.Warning("no primary elected, cannot compute electable sync replicas") + contextLogger.Warning("no primary elected, cannot compute electable sync replicas") return nil } currentPrimaryTopology, ok := topology.Instances[currentPrimary] if !ok { - log.Warning("current primary topology not yet extracted, cannot computed electable sync replicas", + contextLogger.Warning("current primary topology not yet extracted, cannot computed electable sync replicas", "instanceName", currentPrimary) return nil } @@ -136,7 +144,7 @@ func getElectableSyncReplicas(cluster *apiv1.Cluster) []string { instanceTopology, ok := topology.Instances[name] // if we still don't have the topology data for the node we skip it from inserting it in the electable pool if !ok { - log.Warning("current instance topology not found", "instanceName", name) + contextLogger.Warning("current instance topology not found", "instanceName", name) continue } diff --git a/pkg/postgres/replication/legacy_test.go b/pkg/postgres/replication/legacy_test.go index 3f87f07558..c29a099ac0 100644 --- a/pkg/postgres/replication/legacy_test.go +++ b/pkg/postgres/replication/legacy_test.go @@ -21,20 +21,21 @@ package replication import ( apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) var _ = Describe("ensuring the correctness of synchronous replica data calculation", func() { - It("should return all the non primary pods as electable", func() { + It("should return all the non primary pods as electable", func(ctx SpecContext) { cluster := createFakeCluster("example") - number, names := getSyncReplicasData(cluster) + number, names := getSyncReplicasData(ctx, cluster) Expect(number).To(Equal(2)) Expect(names).To(Equal([]string{"example-2", "example-3"})) }) - It("should return only the pod in the different AZ", func() { + It("should return only the pod in the different AZ", func(ctx SpecContext) { const ( primaryPod = "exampleAntiAffinity-1" sameZonePod = "exampleAntiAffinity-2" @@ -61,13 +62,13 @@ var _ = Describe("ensuring the correctness of synchronous replica data calculati }, } - number, names := getSyncReplicasData(cluster) + number, names := getSyncReplicasData(ctx, cluster) Expect(number).To(Equal(1)) Expect(names).To(Equal([]string{differentAZPod})) }) - It("should lower the synchronous replica number to enforce self-healing", func() { + It("should lower the synchronous replica number to enforce self-healing", func(ctx SpecContext) { cluster := createFakeCluster("exampleOnePod") cluster.Status = apiv1.ClusterStatus{ CurrentPrimary: "exampleOnePod-1", @@ -76,14 +77,14 @@ var _ = Describe("ensuring the correctness of synchronous replica data calculati apiv1.PodFailed: {"exampleOnePod-2", "exampleOnePod-3"}, }, } - number, names := getSyncReplicasData(cluster) + number, names := getSyncReplicasData(ctx, cluster) Expect(number).To(BeZero()) Expect(names).To(BeEmpty()) Expect(cluster.Spec.MinSyncReplicas).To(Equal(1)) }) - It("should behave correctly if there is no ready host", func() { + It("should behave correctly if there is no ready host", func(ctx SpecContext) { cluster := createFakeCluster("exampleNoPods") cluster.Status = apiv1.ClusterStatus{ CurrentPrimary: "example-1", @@ -91,7 +92,7 @@ var _ = Describe("ensuring the correctness of synchronous replica data calculati apiv1.PodFailed: {"exampleNoPods-1", "exampleNoPods-2", "exampleNoPods-3"}, }, } - number, names := getSyncReplicasData(cluster) + number, names := getSyncReplicasData(ctx, cluster) Expect(number).To(BeZero()) Expect(names).To(BeEmpty()) @@ -99,7 +100,7 @@ var _ = Describe("ensuring the correctness of synchronous replica data calculati }) var _ = Describe("legacy synchronous_standby_names configuration", func() { - It("generate the correct value for the synchronous_standby_names parameter", func() { + It("generate the correct value for the synchronous_standby_names parameter", func(ctx SpecContext) { cluster := createFakeCluster("exampleNoPods") cluster.Spec.MinSyncReplicas = 2 cluster.Spec.MaxSyncReplicas = 2 @@ -109,8 +110,14 @@ var _ = Describe("legacy synchronous_standby_names configuration", func() { apiv1.PodHealthy: {"one", "two", "three"}, }, } - synchronousStandbyNames := legacySynchronousStandbyNames(cluster) - Expect(synchronousStandbyNames). - To(Equal("ANY 2 (\"one\",\"three\",\"two\")")) + synchronousStandbyNames := legacySynchronousStandbyNames(ctx, cluster) + + Expect(synchronousStandbyNames).To(Equal( + postgres.SynchronousStandbyNamesConfig{ + Method: "ANY", + NumSync: 2, + StandbyNames: []string{"one", "three", "two"}, + }, + )) }) }) diff --git a/pkg/postgres/replication/replication.go b/pkg/postgres/replication/replication.go index 1864f81df2..e791731dc4 100644 --- a/pkg/postgres/replication/replication.go +++ b/pkg/postgres/replication/replication.go @@ -20,25 +20,28 @@ SPDX-License-Identifier: Apache-2.0 package replication import ( + "context" + apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/postgres" ) // GetExpectedSyncReplicasNumber computes the actual number of required synchronous replicas -func GetExpectedSyncReplicasNumber(cluster *apiv1.Cluster) int { +func GetExpectedSyncReplicasNumber(ctx context.Context, cluster *apiv1.Cluster) int { if cluster.Spec.PostgresConfiguration.Synchronous != nil { return cluster.Spec.PostgresConfiguration.Synchronous.Number } - syncReplicas, _ := getSyncReplicasData(cluster) + syncReplicas, _ := getSyncReplicasData(ctx, cluster) return syncReplicas } // GetSynchronousStandbyNames gets the value to be applied // to synchronous_standby_names -func GetSynchronousStandbyNames(cluster *apiv1.Cluster) string { +func GetSynchronousStandbyNames(ctx context.Context, cluster *apiv1.Cluster) postgres.SynchronousStandbyNamesConfig { if cluster.Spec.PostgresConfiguration.Synchronous != nil { return explicitSynchronousStandbyNames(cluster) } - return legacySynchronousStandbyNames(cluster) + return legacySynchronousStandbyNames(ctx, cluster) } diff --git a/pkg/specs/roles.go b/pkg/specs/roles.go index 096b2d1079..4567307d5c 100644 --- a/pkg/specs/roles.go +++ b/pkg/specs/roles.go @@ -213,6 +213,39 @@ func CreateRole(cluster apiv1.Cluster, backupOrigin *apiv1.Backup) rbacv1.Role { "update", }, }, + { + APIGroups: []string{ + "postgresql.cnpg.io", + }, + Resources: []string{ + "failoverquorums", + }, + Verbs: []string{ + "get", + "list", + "watch", + }, + ResourceNames: []string{ + cluster.Name, + }, + }, + { + APIGroups: []string{ + "postgresql.cnpg.io", + }, + Resources: []string{ + "failoverquorums/status", + }, + Verbs: []string{ + "get", + "patch", + "update", + "watch", + }, + ResourceNames: []string{ + cluster.Name, + }, + }, } return rbacv1.Role{ diff --git a/pkg/specs/roles_test.go b/pkg/specs/roles_test.go index 98ce4213fa..8808163d46 100644 --- a/pkg/specs/roles_test.go +++ b/pkg/specs/roles_test.go @@ -168,7 +168,7 @@ var _ = Describe("Roles", func() { serviceAccount := CreateRole(cluster, nil) Expect(serviceAccount.Name).To(Equal(cluster.Name)) Expect(serviceAccount.Namespace).To(Equal(cluster.Namespace)) - Expect(serviceAccount.Rules).To(HaveLen(13)) + Expect(serviceAccount.Rules).To(HaveLen(15)) }) It("should contain every secret of the origin backup and backup configuration of every external cluster", func() { diff --git a/pkg/utils/labels_annotations.go b/pkg/utils/labels_annotations.go index 1ce7eddfe0..f6cbb71363 100644 --- a/pkg/utils/labels_annotations.go +++ b/pkg/utils/labels_annotations.go @@ -257,6 +257,13 @@ const ( // WebhookValidationAnnotationName is the name of the annotation describing if // the validation webhook should be enabled or disabled WebhookValidationAnnotationName = MetadataNamespace + "/validation" + + // FailoverQuorumAnnotationName is the name of the annotation that allows the + // user to enable synchronous quorum failover protection. + // + // This feature enables quorum-based check before failover, ensuring + // no data loss at the expense of availability. + FailoverQuorumAnnotationName = MetadataNamespace + "/failoverQuorum" ) type annotationStatus string From f91a448084f19f23faacd81c5606a0b527118826 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Tue, 29 Jul 2025 13:20:55 +0200 Subject: [PATCH 745/836] fix(slots): handle context cancellation in `Replicator` `Start` (#8138) Wrap the initial config channel read in a select statement to properly handle context cancellation. This prevents the goroutine from blocking indefinitely when the context is cancelled before the config is available. Signed-off-by: Armando Ruocco --- internal/management/controller/slots/runner/runner.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/internal/management/controller/slots/runner/runner.go b/internal/management/controller/slots/runner/runner.go index 0ded0031de..e3bd9e12d0 100644 --- a/internal/management/controller/slots/runner/runner.go +++ b/internal/management/controller/slots/runner/runner.go @@ -49,7 +49,13 @@ func NewReplicator(instance *postgres.Instance) *Replicator { func (sr *Replicator) Start(ctx context.Context) error { contextLog := log.FromContext(ctx).WithName("Replicator") go func() { - config := <-sr.instance.SlotReplicatorChan() + var config *apiv1.ReplicationSlotsConfiguration + select { + case config = <-sr.instance.SlotReplicatorChan(): + case <-ctx.Done(): + return + } + updateInterval := config.GetUpdateInterval() ticker := time.NewTicker(updateInterval) From e7495163fcc3c5bc7b0bfe6f736c0621e33173f5 Mon Sep 17 00:00:00 2001 From: Francesco Canovai Date: Tue, 29 Jul 2025 14:03:26 +0200 Subject: [PATCH 746/836] refactor: renamed failoverQuorum annotation (#8169) Use `alpha.cnpg.io/failoverQuorum` instead `of cnpg.io/failoverQuorum`. Closes #8168 Signed-off-by: Francesco Canovai --- docs/src/failover.md | 6 +++--- docs/src/samples/cluster-example-syncreplicas-quorum.yaml | 3 +-- pkg/utils/labels_annotations.go | 2 +- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/docs/src/failover.md b/docs/src/failover.md index 1c843ed600..ab78aa70ab 100644 --- a/docs/src/failover.md +++ b/docs/src/failover.md @@ -134,12 +134,12 @@ This feature allows users to choose their preferred trade-off between data durability and data availability. Failover quorum can be enabled by setting the annotation -`cnpg.io/failoverQuorum="true"` in the `Cluster` resource. +`alpha.cnpg.io/failoverQuorum="true"` in the `Cluster` resource. !!! info When this feature is out of the experimental phase, the annotation - `cnpg.io/failoverQuorum` will be replaced by a configuration option in the - `Cluster` resource. + `alpha.cnpg.io/failoverQuorum` will be replaced by a configuration option in + the `Cluster` resource. ### How it works diff --git a/docs/src/samples/cluster-example-syncreplicas-quorum.yaml b/docs/src/samples/cluster-example-syncreplicas-quorum.yaml index 1836d2edeb..ba3b6e9226 100644 --- a/docs/src/samples/cluster-example-syncreplicas-quorum.yaml +++ b/docs/src/samples/cluster-example-syncreplicas-quorum.yaml @@ -3,7 +3,7 @@ kind: Cluster metadata: name: cluster-example annotations: - cnpg.io/failoverQuorum: "t" + alpha.cnpg.io/failoverQuorum: "true" spec: instances: 3 @@ -14,4 +14,3 @@ spec: storage: size: 1G - diff --git a/pkg/utils/labels_annotations.go b/pkg/utils/labels_annotations.go index f6cbb71363..e89947cfdf 100644 --- a/pkg/utils/labels_annotations.go +++ b/pkg/utils/labels_annotations.go @@ -263,7 +263,7 @@ const ( // // This feature enables quorum-based check before failover, ensuring // no data loss at the expense of availability. - FailoverQuorumAnnotationName = MetadataNamespace + "/failoverQuorum" + FailoverQuorumAnnotationName = AlphaMetadataNamespace + "/failoverQuorum" ) type annotationStatus string From 54e967f80f7093c969eb0bfca9a1b7339fd13ba8 Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Tue, 29 Jul 2025 14:07:58 +0200 Subject: [PATCH 747/836] docs: release notes for 1.27.0-rc1 (#8106) Also include a shell script to detect in which release branches a PR is present. Closes #8067 Signed-off-by: Gabriele Bartolini Signed-off-by: Francesco Canovai Co-authored-by: Francesco Canovai --- .wordlist-en-custom.txt | 2 + docs/src/installation_upgrade.md | 9 ++- docs/src/preview_version.md | 5 +- docs/src/release_notes.md | 3 +- docs/src/release_notes/v1.27.md | 84 ++++++++++++++++++++++++++++ docs/src/supported_releases.md | 5 +- hack/check-pr-in-release-branches.sh | 32 +++++++++++ 7 files changed, 134 insertions(+), 6 deletions(-) create mode 100644 docs/src/release_notes/v1.27.md create mode 100755 hack/check-pr-in-release-branches.sh diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index 64c796faa1..3106d67a3d 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -51,6 +51,7 @@ BootstrapPgBaseBackup BootstrapRecovery Burstable ByStatus +CAs CIS CKA CN @@ -1122,6 +1123,7 @@ pgpass pgstatstatements phaseReason pid +pinger pitr plpgsql pluggable diff --git a/docs/src/installation_upgrade.md b/docs/src/installation_upgrade.md index ea81383716..628cba4a43 100644 --- a/docs/src/installation_upgrade.md +++ b/docs/src/installation_upgrade.md @@ -255,12 +255,19 @@ only the operator itself. + The current preview version is **1.27.0-rc1**. For more information on the current preview version and how to test, please view the links below: - [Announcement](https://cloudnative-pg.io/releases/cloudnative-pg-1-27.0-rc1-released/) - [Documentation](https://cloudnative-pg.io/documentation/preview/) ---> diff --git a/docs/src/release_notes.md b/docs/src/release_notes.md index ea434985d4..2feb0dd7fd 100644 --- a/docs/src/release_notes.md +++ b/docs/src/release_notes.md @@ -3,7 +3,8 @@ History of user-visible changes for CloudNativePG, classified for each minor release. - + +- [CloudNativePG 1.27 - Release Candidate](release_notes/v1.27.md) - [CloudNativePG 1.26](release_notes/v1.26.md) - [CloudNativePG 1.25](release_notes/v1.25.md) diff --git a/docs/src/release_notes/v1.27.md b/docs/src/release_notes/v1.27.md new file mode 100644 index 0000000000..09bd659bf6 --- /dev/null +++ b/docs/src/release_notes/v1.27.md @@ -0,0 +1,84 @@ +# Release notes for CloudNativePG 1.27 + +History of user-visible changes in the 1.27 minor release of CloudNativePG. + +For a complete list of changes, please refer to the +[commits](https://github.com/cloudnative-pg/cloudnative-pg/commits/release-1.27) +on the release branch in GitHub. + +## Version 1.27.0-rc1 + +**Release date:** Jul 29, 2025 + +### Important changes: + +- A change in the default behavior of the [liveness probe](instance_manager.md#liveness-probe), + now enforcing the [shutdown of an isolated primary](instance_manager.md#primary-isolation) + within the `livenessProbeTimeout` (30 seconds), will require a restart of your pods. + +### Features: + +- **Logical decoding slot synchronization in HA clusters**: Added the + `synchronizeLogicalDecoding` field under + `spec.replicationSlots.highAvailability` to enable automatic synchronization of + logical decoding slots across high-availability clusters, ensuring logical + replication subscribers continue seamlessly after a publisher failover + ([#7931](https://github.com/cloudnative-pg/cloudnative-pg/pull/7931)). + +- **Primary Isolation Check**: Promoted to stable the liveness pinger + experimental feature introduced in 1.26, adding the + `.spec.probes.liveness.isolationCheck` section to enable primary isolation + checks in the liveness probe by default. This improves the detection and + handling of primary connectivity issues in Kubernetes environments + ([#7845](https://github.com/cloudnative-pg/cloudnative-pg/pull/7845)). + +### Enhancements: + + +- Introduced an opt-in experimental feature that enables quorum-based failover + to improve safety and data durability during failover events. This feature, + also called failover quorum, can be activated via the + `alpha.cnpg.io/failoverQuorum` annotation. + ([#7572](https://github.com/cloudnative-pg/cloudnative-pg/pull/7572)). + +- Added support for user maps for predefined users such as `streaming_replica`, + allowing the use of self-managed client certificates with different Common + Names in environments with strict policies or shared CAs, while still enabling + replicas to join clusters using the `streaming_replica` role + ([#7725](https://github.com/cloudnative-pg/cloudnative-pg/pull/7725)). + +- Added a new `PhaseFailurePlugin` phase in the `Cluster` status to improve + observability of plugin-related failures + ([#7988](https://github.com/cloudnative-pg/cloudnative-pg/pull/7988)). + +- Made the `Backup.spec` field immutable after creation, ensuring consistency + and predictability in backup operations + ([#7904](https://github.com/cloudnative-pg/cloudnative-pg/pull/7904)). + +- Added `fqdn-uri` and `fqdn-jdbc-uri` fields in the user secret to simplify + the retrieval of fully qualified domain name-based connection strings + ([#7852](https://github.com/cloudnative-pg/cloudnative-pg/pull/7852)). + +- CNPG-I: + + - Added `Postgres` interface support to the CNPG-I operator, continuing the + transition toward a plugin-based architecture + ([#7179](https://github.com/cloudnative-pg/cloudnative-pg/pull/7179)). + + - Added `metrics` capabilities to the CNPG-I instance webserver, enabling + metrics exposure directly from the instance for better observability + ([#8033](https://github.com/cloudnative-pg/cloudnative-pg/pull/8033)). + +### Supported versions + +- Kubernetes 1.33, 1.32, and 1.31 +- PostgreSQL 17, 16, 15, 14, and 13 + - PostgreSQL 17.5 is the default image + - PostgreSQL 13 support ends on November 12, 2025 diff --git a/docs/src/supported_releases.md b/docs/src/supported_releases.md index 51ff33a0c9..625826e72d 100644 --- a/docs/src/supported_releases.md +++ b/docs/src/supported_releases.md @@ -83,6 +83,7 @@ Git tags for versions are prefixed with `v`. | Version | Currently supported | Release date | End of life | Supported Kubernetes versions | Tested, but not supported | Supported Postgres versions | |-----------------|----------------------|--------------|-----------------|-------------------------------|---------------------------|-----------------------------| +| 1.27.x | No (preview) | ~ Aug 2025 | ~ Feb 2026 | 1.31, 1.32, 1.33 | 1.30, 1.29 | 13 - 17 | | 1.26.x | Yes | May 23, 2025 | ~ Nov 2025 | 1.30, 1.31, 1.32, 1.33 | 1.29 | 13 - 17 | | 1.25.x | Yes | Dec 23, 2024 | 22 Aug 2025 | 1.29, 1.30, 1.31, 1.32 | 1.33 | 13 - 17 | | main | No, development only | | | | | 13 - 17 | @@ -122,9 +123,9 @@ version of PostgreSQL, we might not be able to help you. | Version | Release date | End of life | |---------|--------------|-------------| -| 1.27.0 | ~ Aug, 2025 | ~ Feb, 2026 | | 1.28.0 | ~ Nov, 2025 | ~ May, 2026 | -| 1.29.0 | ~ Feb, 2025 | ~ Aug, 2026 | +| 1.29.0 | ~ Feb, 2026 | ~ Aug, 2026 | +| 1.30.0 | ~ May, 2026 | ~ Nov, 2026 | !!! Note Feature freeze occurs 1-2 weeks before the release, at which point a diff --git a/hack/check-pr-in-release-branches.sh b/hack/check-pr-in-release-branches.sh new file mode 100755 index 0000000000..25d9e57add --- /dev/null +++ b/hack/check-pr-in-release-branches.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash +# +# check-pr-in-release-branches.sh +# +# Example: ./hack/check-pr-in-release-branches.sh #7988 + +set -euo pipefail + +if [ $# -lt 1 ]; then + echo "Usage: $0 " + exit 1 +fi + +search_string="$1" +shift + +branches="main $(git for-each-ref --format '%(refname)' 'refs/heads/release*' | sed -e 's@refs/heads/@@' | sort -rV)" +found=0 +for branch in $branches; do + echo "Checking branch: $branch" + if git log "origin/$branch" --grep="$search_string" -i --oneline | grep -q .; then + echo "✅ Found \"$search_string\" in commits on branch: $branch" + found=1 + else + echo "❌ \"$search_string\" not found in commits on branch: $branch" + fi +done + +if [ $found -eq 0 ]; then + echo "String \"$search_string\" not found in any specified branches." + exit 1 +fi From f3b36d3a4b2f3b377b4eb4c533d56a30f945d1a6 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Tue, 29 Jul 2025 15:35:06 +0200 Subject: [PATCH 748/836] chore: add missing CRD owned definition to CSV (#8173) Signed-off-by: Jonathan Gonzalez V. --- .wordlist-en-custom.txt | 1 + .../bases/cloudnative-pg.clusterserviceversion.yaml | 9 +++++++++ 2 files changed, 10 insertions(+) diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index 3106d67a3d..633f983b42 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -833,6 +833,7 @@ externalclusters facto failover failoverDelay +failoverquorums failovers failureThreshold faq diff --git a/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml b/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml index 83704fdaaa..00c0a11529 100644 --- a/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml +++ b/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml @@ -1008,3 +1008,12 @@ spec: - path: message displayName: Message description: Message is the reconciliation output message + - kind: FailoverQuorum + name: failoverquorums.postgresql.cnpg.io + displayName: Failover Quorum + description: FailoverQuorum contains the information about the current failover quorum status of a PG cluster + version: v1 + resources: + - kind: Cluster + name: '' + version: v1 From 82df564c229aa10070bcf118d514c88972c57d10 Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Tue, 29 Jul 2025 15:50:36 +0200 Subject: [PATCH 749/836] docs: add dynamic loading of extensions to release notes (#8175) Signed-off-by: Gabriele Bartolini --- docs/src/release_notes/v1.27.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docs/src/release_notes/v1.27.md b/docs/src/release_notes/v1.27.md index 09bd659bf6..7bb702f421 100644 --- a/docs/src/release_notes/v1.27.md +++ b/docs/src/release_notes/v1.27.md @@ -18,6 +18,13 @@ on the release branch in GitHub. ### Features: +- **Dynamic loading of PostgreSQL extensions**: Introduced the + `.spec.postgresql.extensions` stanza for mounting PostgreSQL extensions, + packaged as OCI-compliant container images, as read-only and immutable volumes + inside instance pods. This allows dynamic extension management without + rebuilding base images. + ([#7991](https://github.com/cloudnative-pg/cloudnative-pg/pull/7991)). + - **Logical decoding slot synchronization in HA clusters**: Added the `synchronizeLogicalDecoding` field under `spec.replicationSlots.highAvailability` to enable automatic synchronization of From cb57c38a266351771813ed6f843749213db7ad11 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 29 Jul 2025 16:43:10 +0200 Subject: [PATCH 750/836] Version tag to 1.27.0-rc1 (#8177) Automated PR. Will trigger the 1.27.0-rc1 release when approved. Signed-off-by: Francesco Canovai Co-authored-by: Francesco Canovai --- docs/src/installation_upgrade.md | 6 +- docs/src/kubectl-plugin.md | 30 +- pkg/versions/versions.go | 6 +- releases/cnpg-1.27.0-rc1.yaml | 18219 +++++++++++++++++++++++++++++ 4 files changed, 18240 insertions(+), 21 deletions(-) create mode 100644 releases/cnpg-1.27.0-rc1.yaml diff --git a/docs/src/installation_upgrade.md b/docs/src/installation_upgrade.md index 628cba4a43..ac4d8e4668 100644 --- a/docs/src/installation_upgrade.md +++ b/docs/src/installation_upgrade.md @@ -8,12 +8,12 @@ The operator can be installed like any other resource in Kubernetes, through a YAML manifest applied via `kubectl`. -You can install the [latest operator manifest](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.26/releases/cnpg-1.26.1.yaml) +You can install the [latest operator manifest](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-1.27.0-rc1.yaml) for this minor release as follows: ```sh kubectl apply --server-side -f \ - https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.26/releases/cnpg-1.26.1.yaml + https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-1.27.0-rc1.yaml ``` You can verify that with: @@ -74,7 +74,7 @@ specific minor release, you can just run: ```sh curl -sSfL \ - https://raw.githubusercontent.com/cloudnative-pg/artifacts/release-1.26/manifests/operator-manifest.yaml | \ + https://raw.githubusercontent.com/cloudnative-pg/artifacts/release-1.27/manifests/operator-manifest.yaml | \ kubectl apply --server-side -f - ``` diff --git a/docs/src/kubectl-plugin.md b/docs/src/kubectl-plugin.md index 3d32e546ac..d0526147d3 100644 --- a/docs/src/kubectl-plugin.md +++ b/docs/src/kubectl-plugin.md @@ -31,11 +31,11 @@ them in your systems. #### Debian packages -For example, let's install the 1.26.1 release of the plugin, for an Intel based +For example, let's install the 1.27.0-rc1 release of the plugin, for an Intel based 64 bit server. First, we download the right `.deb` file. ```sh -wget https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.26.1/kubectl-cnpg_1.26.1_linux_x86_64.deb \ +wget https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.27.0-rc1/kubectl-cnpg_1.27.0-rc1_linux_x86_64.deb \ --output-document kube-plugin.deb ``` @@ -46,17 +46,17 @@ $ sudo dpkg -i kube-plugin.deb Selecting previously unselected package cnpg. (Reading database ... 6688 files and directories currently installed.) Preparing to unpack kube-plugin.deb ... -Unpacking cnpg (1.26.1) ... -Setting up cnpg (1.26.1) ... +Unpacking cnpg (1.27.0-rc1) ... +Setting up cnpg (1.27.0-rc1) ... ``` #### RPM packages -As in the example for `.rpm` packages, let's install the 1.26.1 release for an +As in the example for `.rpm` packages, let's install the 1.27.0-rc1 release for an Intel 64 bit machine. Note the `--output` flag to provide a file name. ```sh -curl -L https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.26.1/kubectl-cnpg_1.26.1_linux_x86_64.rpm \ +curl -L https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.27.0-rc1/kubectl-cnpg_1.27.0-rc1_linux_x86_64.rpm \ --output kube-plugin.rpm ``` @@ -70,7 +70,7 @@ Dependencies resolved. Package Architecture Version Repository Size ==================================================================================================== Installing: - cnpg x86_64 1.26.1 @commandline 20 M + cnpg x86_64 1.27.0-rc1 @commandline 20 M Transaction Summary ==================================================================================================== @@ -294,9 +294,9 @@ sandbox-3 0/604DE38 0/604DE38 0/604DE38 0/604DE38 00:00:00 00:00:00 00 Instances status Name Current LSN Replication role Status QoS Manager Version Node ---- ----------- ---------------- ------ --- --------------- ---- -sandbox-1 0/604DE38 Primary OK BestEffort 1.26.1 k8s-eu-worker -sandbox-2 0/604DE38 Standby (async) OK BestEffort 1.26.1 k8s-eu-worker2 -sandbox-3 0/604DE38 Standby (async) OK BestEffort 1.26.1 k8s-eu-worker +sandbox-1 0/604DE38 Primary OK BestEffort 1.27.0-rc1 k8s-eu-worker +sandbox-2 0/604DE38 Standby (async) OK BestEffort 1.27.0-rc1 k8s-eu-worker2 +sandbox-3 0/604DE38 Standby (async) OK BestEffort 1.27.0-rc1 k8s-eu-worker ``` If you require more detailed status information, use the `--verbose` option (or @@ -350,9 +350,9 @@ sandbox-primary primary 1 1 1 Instances status Name Current LSN Replication role Status QoS Manager Version Node ---- ----------- ---------------- ------ --- --------------- ---- -sandbox-1 0/6053720 Primary OK BestEffort 1.26.1 k8s-eu-worker -sandbox-2 0/6053720 Standby (async) OK BestEffort 1.26.1 k8s-eu-worker2 -sandbox-3 0/6053720 Standby (async) OK BestEffort 1.26.1 k8s-eu-worker +sandbox-1 0/6053720 Primary OK BestEffort 1.27.0-rc1 k8s-eu-worker +sandbox-2 0/6053720 Standby (async) OK BestEffort 1.27.0-rc1 k8s-eu-worker2 +sandbox-3 0/6053720 Standby (async) OK BestEffort 1.27.0-rc1 k8s-eu-worker ``` With an additional `-v` (e.g. `kubectl cnpg status sandbox -v -v`), you can @@ -575,12 +575,12 @@ Archive: report_operator_.zip ```output ====== Beginning of Previous Log ===== -2023-03-28T12:56:41.251711811Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.26.1","build":{"Version":"1.26.1+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} +2023-03-28T12:56:41.251711811Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.27.0-rc1","build":{"Version":"1.27.0-rc1+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} 2023-03-28T12:56:41.251851909Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"} ====== End of Previous Log ===== -2023-03-28T12:57:09.854306024Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.26.1","build":{"Version":"1.26.1+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} +2023-03-28T12:57:09.854306024Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.27.0-rc1","build":{"Version":"1.27.0-rc1+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} 2023-03-28T12:57:09.854363943Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"} ``` diff --git a/pkg/versions/versions.go b/pkg/versions/versions.go index 1cb27a4cc4..435b41d8fd 100644 --- a/pkg/versions/versions.go +++ b/pkg/versions/versions.go @@ -23,13 +23,13 @@ package versions const ( // Version is the version of the operator - Version = "1.26.1" + Version = "1.27.0-rc1" // DefaultImageName is the default image used by the operator to create pods DefaultImageName = "ghcr.io/cloudnative-pg/postgresql:17.5" // DefaultOperatorImageName is the default operator image used by the controller in the pods running PostgreSQL - DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.26.1" + DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.27.0-rc1" ) // BuildInfo is a struct containing all the info about the build @@ -39,7 +39,7 @@ type BuildInfo struct { var ( // buildVersion injected during the build - buildVersion = "1.26.1" + buildVersion = "1.27.0-rc1" // buildCommit injected during the build buildCommit = "none" diff --git a/releases/cnpg-1.27.0-rc1.yaml b/releases/cnpg-1.27.0-rc1.yaml new file mode 100644 index 0000000000..73cb75ff2e --- /dev/null +++ b/releases/cnpg-1.27.0-rc1.yaml @@ -0,0 +1,18219 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-system +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: backups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Backup + listKind: BackupList + plural: backups + singular: backup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.method + name: Method + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .status.error + name: Error + type: string + name: v1 + schema: + openAPIV3Schema: + description: A Backup resource is a request for a PostgreSQL backup by the + user. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the backup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + type: object + x-kubernetes-validations: + - message: BackupSpec is immutable once set + rule: oldSelf == self + status: + description: |- + Most recently observed status of the backup. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + azureCredentials: + description: The credentials to use to upload data to Azure Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without providing + explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + backupId: + description: The ID of the Barman backup + type: string + backupLabelFile: + description: Backup label file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + backupName: + description: The Name of the Barman backup + type: string + beginLSN: + description: The starting xlog + type: string + beginWal: + description: The starting WAL + type: string + commandError: + description: The backup command output in case of error + type: string + commandOutput: + description: Unused. Retained for compatibility with old versions. + type: string + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data. This may not be populated in case of errors. + type: string + encryption: + description: Encryption method required to S3 API + type: string + endLSN: + description: The ending xlog + type: string + endWal: + description: The ending WAL + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + error: + description: The detected error + type: string + googleCredentials: + description: The credentials to use to upload data to Google Cloud + Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage JSON + file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + instanceID: + description: Information to identify the instance where the backup + has been taken from + properties: + ContainerID: + description: The container ID + type: string + podName: + description: The pod name + type: string + type: object + method: + description: The backup method being used + type: string + online: + description: Whether the backup was online/hot (`true`) or offline/cold + (`false`) + type: boolean + phase: + description: The last backup status + type: string + pluginMetadata: + additionalProperties: + type: string + description: A map containing the plugin metadata + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without providing + explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the region + name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + snapshotBackupStatus: + description: Status of the volumeSnapshot backup + properties: + elements: + description: The elements list, populated with the gathered volume + snapshots + items: + description: BackupSnapshotElementStatus is a volume snapshot + that is part of a volume snapshot method backup + properties: + name: + description: Name is the snapshot resource name + type: string + tablespaceName: + description: |- + TablespaceName is the name of the snapshotted tablespace. Only set + when type is PG_TABLESPACE + type: string + type: + description: Type is tho role of the snapshot in the cluster, + such as PG_DATA, PG_WAL and PG_TABLESPACE + type: string + required: + - name + - type + type: object + type: array + type: object + startedAt: + description: When the backup was started + format: date-time + type: string + stoppedAt: + description: When the backup was terminated + format: date-time + type: string + tablespaceMapFile: + description: Tablespace map file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: clusterimagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ClusterImageCatalog + listKind: ClusterImageCatalogList + plural: clusterimagecatalogs + singular: clusterimagecatalog + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ClusterImageCatalog is the Schema for the clusterimagecatalogs + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ClusterImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: clusters.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Cluster + listKind: ClusterList + plural: clusters + singular: cluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Number of instances + jsonPath: .status.instances + name: Instances + type: integer + - description: Number of ready instances + jsonPath: .status.readyInstances + name: Ready + type: integer + - description: Cluster current status + jsonPath: .status.phase + name: Status + type: string + - description: Primary pod + jsonPath: .status.currentPrimary + name: Primary + type: string + name: v1 + schema: + openAPIV3Schema: + description: Cluster is the Schema for the PostgreSQL API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the cluster. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + affinity: + description: Affinity/Anti-affinity rules for Pods + properties: + additionalPodAffinity: + description: AdditionalPodAffinity allows to specify pod affinity + terms to be passed to all the cluster's pods. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + additionalPodAntiAffinity: + description: |- + AdditionalPodAntiAffinity allows to specify pod anti-affinity terms to be added to the ones generated + by the operator if EnablePodAntiAffinity is set to true (default) or to be used exclusively if set to false. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + enablePodAntiAffinity: + description: |- + Activates anti-affinity for the pods. The operator will define pods + anti-affinity unless this field is explicitly set to false + type: boolean + nodeAffinity: + description: |- + NodeAffinity describes node affinity scheduling rules for the pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is map of key-value pairs used to define the nodes on which + the pods can run. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + podAntiAffinityType: + description: |- + PodAntiAffinityType allows the user to decide whether pod anti-affinity between cluster instance has to be + considered a strong requirement during scheduling or not. Allowed values are: "preferred" (default if empty) or + "required". Setting it to "required", could lead to instances remaining pending until new kubernetes nodes are + added if all the existing nodes don't match the required pod anti-affinity rule. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + type: string + tolerations: + description: |- + Tolerations is a list of Tolerations that should be set for all the pods, in order to allow them to run + on tainted nodes. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologyKey: + description: |- + TopologyKey to use for anti-affinity configuration. See k8s documentation + for more info on that + type: string + type: object + backup: + description: The configuration to be used for backups + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2`, and `snappy`. + enum: + - bzip2 + - gzip + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage + JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the + region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2`, + `lz4`, `snappy`, `xz`, and `zstd`. + enum: + - bzip2 + - gzip + - lz4 + - snappy + - xz + - zstd + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + retentionPolicy: + description: |- + RetentionPolicy is the retention policy to be used for backups + and WALs (i.e. '60d'). The retention policy is expressed in the form + of `XXu` where `XX` is a positive integer and `u` is in `[dwm]` - + days, weeks, months. + It's currently only applicable when using the BarmanObjectStore method. + pattern: ^[1-9][0-9]*[dwm]$ + type: string + target: + default: prefer-standby + description: |- + The policy to decide which instance should perform backups. Available + options are empty string, which will default to `prefer-standby` policy, + `primary` to have backups run always on primary instances, `prefer-standby` + to have backups run preferably on the most updated standby, if available. + enum: + - primary + - prefer-standby + type: string + volumeSnapshot: + description: VolumeSnapshot provides the configuration for the + execution of volume snapshot backups. + properties: + annotations: + additionalProperties: + type: string + description: Annotations key-value pairs that will be added + to .metadata.annotations snapshot resources. + type: object + className: + description: |- + ClassName specifies the Snapshot Class to be used for PG_DATA PersistentVolumeClaim. + It is the default class for the other types if no specific class is present + type: string + labels: + additionalProperties: + type: string + description: Labels are key-value pairs that will be added + to .metadata.labels snapshot resources. + type: object + online: + default: true + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + type: boolean + onlineConfiguration: + default: + immediateCheckpoint: false + waitForArchive: true + description: Configuration parameters to control the online/hot + backup with volume snapshots + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + snapshotOwnerReference: + default: none + description: SnapshotOwnerReference indicates the type of + owner reference the snapshot should have + enum: + - none + - cluster + - backup + type: string + tablespaceClassName: + additionalProperties: + type: string + description: |- + TablespaceClassName specifies the Snapshot Class to be used for the tablespaces. + defaults to the PGDATA Snapshot Class, if set + type: object + walClassName: + description: WalClassName specifies the Snapshot Class to + be used for the PG_WAL PersistentVolumeClaim. + type: string + type: object + type: object + bootstrap: + description: Instructions to bootstrap this cluster + properties: + initdb: + description: Bootstrap the cluster via initdb + properties: + builtinLocale: + description: |- + Specifies the locale name when the builtin provider is used. + This option requires `localeProvider` to be set to `builtin`. + Available from PostgreSQL 17. + type: string + dataChecksums: + description: |- + Whether the `-k` option should be passed to initdb, + enabling checksums on data pages (default: `false`) + type: boolean + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + encoding: + description: The value to be passed as option `--encoding` + for initdb (default:`UTF8`) + type: string + icuLocale: + description: |- + Specifies the ICU locale when the ICU provider is used. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 15. + type: string + icuRules: + description: |- + Specifies additional collation rules to customize the behavior of the default collation. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 16. + type: string + import: + description: |- + Bootstraps the new cluster by importing data from an existing PostgreSQL + instance using logical backup (`pg_dump` and `pg_restore`) + properties: + databases: + description: The databases to import + items: + type: string + type: array + pgDumpExtraOptions: + description: |- + List of custom options to pass to the `pg_dump` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + pgRestoreExtraOptions: + description: |- + List of custom options to pass to the `pg_restore` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + postImportApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after is imported - to be used with extreme care + (by default empty). Only available in microservice type. + items: + type: string + type: array + roles: + description: The roles to import + items: + type: string + type: array + schemaOnly: + description: |- + When set to true, only the `pre-data` and `post-data` sections of + `pg_restore` are invoked, avoiding data import. Default: `false`. + type: boolean + source: + description: The source of the import + properties: + externalCluster: + description: The name of the externalCluster used + for import + type: string + required: + - externalCluster + type: object + type: + description: The import type. Can be `microservice` or + `monolith`. + enum: + - microservice + - monolith + type: string + required: + - databases + - source + - type + type: object + locale: + description: Sets the default collation order and character + classification in the new database. + type: string + localeCType: + description: The value to be passed as option `--lc-ctype` + for initdb (default:`C`) + type: string + localeCollate: + description: The value to be passed as option `--lc-collate` + for initdb (default:`C`) + type: string + localeProvider: + description: |- + This option sets the locale provider for databases created in the new cluster. + Available from PostgreSQL 16. + type: string + options: + description: |- + The list of options that must be passed to initdb when creating the cluster. + Deprecated: This could lead to inconsistent configurations, + please use the explicit provided parameters instead. + If defined, explicit values will be ignored. + items: + type: string + type: array + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + postInitApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitApplicationSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the application database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitSQL: + description: |- + List of SQL queries to be executed as a superuser in the `postgres` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `postgres` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitTemplateSQL: + description: |- + List of SQL queries to be executed as a superuser in the `template1` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitTemplateSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `template1` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + walSegmentSize: + description: |- + The value in megabytes (1 to 1024) to be passed to the `--wal-segsize` + option for initdb (default: empty, resulting in PostgreSQL default: 16MB) + maximum: 1024 + minimum: 1 + type: integer + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider + is set to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is + set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set + to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + pg_basebackup: + description: |- + Bootstrap the cluster taking a physical backup of another compatible + PostgreSQL instance + properties: + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: The name of the server of which we need to take + a physical backup + minLength: 1 + type: string + required: + - source + type: object + recovery: + description: Bootstrap the cluster from a backup + properties: + backup: + description: |- + The backup object containing the physical base backup from which to + initiate the recovery procedure. + Mutually exclusive with `source` and `volumeSnapshots`. + properties: + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + name: + description: Name of the referent. + type: string + required: + - name + type: object + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + recoveryTarget: + description: |- + By default, the recovery process applies all the available + WAL files in the archive (full recovery). However, you can also + end the recovery as soon as a consistent state is reached or + recover to a point-in-time (PITR) by specifying a `RecoveryTarget` object, + as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...). + More info: https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET + properties: + backupID: + description: |- + The ID of the backup from which to start the recovery process. + If empty (default) the operator will automatically detect the backup + based on targetTime or targetLSN if specified. Otherwise use the + latest available backup in chronological order. + type: string + exclusive: + description: |- + Set the target to be exclusive. If omitted, defaults to false, so that + in Postgres, `recovery_target_inclusive` will be true + type: boolean + targetImmediate: + description: End recovery as soon as a consistent state + is reached + type: boolean + targetLSN: + description: The target LSN (Log Sequence Number) + type: string + targetName: + description: |- + The target name (to be previously created + with `pg_create_restore_point`) + type: string + targetTLI: + description: The target timeline ("latest" or a positive + integer) + type: string + targetTime: + description: The target time as a timestamp in the RFC3339 + standard + type: string + targetXID: + description: The target transaction ID + type: string + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: |- + The external cluster whose backup we will restore. This is also + used as the name of the folder under which the backup is stored, + so it must be set to the name of the source cluster + Mutually exclusive with `backup`. + type: string + volumeSnapshots: + description: |- + The static PVC data source(s) from which to initiate the + recovery procedure. Currently supporting `VolumeSnapshot` + and `PersistentVolumeClaim` resources that map an existing + PVC group, compatible with CloudNativePG, and taken with + a cold backup copy on a fenced Postgres instance (limitation + which will be removed in the future when online backup + will be implemented). + Mutually exclusive with `backup`. + properties: + storage: + description: Configuration of the storage of the instances + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + tablespaceStorage: + additionalProperties: + description: |- + TypedLocalObjectReference contains enough information to let you locate the + typed referenced object inside the same namespace. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + description: Configuration of the storage for PostgreSQL + tablespaces + type: object + walStorage: + description: Configuration of the storage for PostgreSQL + WAL (Write-Ahead Log) + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + required: + - storage + type: object + type: object + type: object + certificates: + description: The configuration for the CA and related certificates + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + description: + description: Description of this PostgreSQL cluster + type: string + enablePDB: + default: true + description: |- + Manage the `PodDisruptionBudget` resources within the cluster. When + configured as `true` (default setting), the pod disruption budgets + will safeguard the primary node from being terminated. Conversely, + setting it to `false` will result in the absence of any + `PodDisruptionBudget` resource, permitting the shutdown of all nodes + hosting the PostgreSQL cluster. This latter configuration is + advisable for any PostgreSQL cluster employed for + development/staging purposes. + type: boolean + enableSuperuserAccess: + default: false + description: |- + When this option is enabled, the operator will use the `SuperuserSecret` + to update the `postgres` user password (if the secret is + not present, the operator will automatically create one). When this + option is disabled, the operator will ignore the `SuperuserSecret` content, delete + it when automatically created, and then blank the password of the `postgres` + user by setting it to `NULL`. Disabled by default. + type: boolean + env: + description: |- + Env follows the Env format to pass environment variables + to the pods created in the cluster + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + EnvFrom follows the EnvFrom format to pass environment variables + sources to the pods to be used by Env + items: + description: EnvFromSource represents the source of a set of ConfigMaps + or Secrets + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: Optional text to prepend to the name of each environment + variable. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + ephemeralVolumeSource: + description: EphemeralVolumeSource allows the user to configure the + source of ephemeral volumes. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to + consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the + PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + ephemeralVolumesSizeLimit: + description: |- + EphemeralVolumesSizeLimit allows the user to set the limits for the ephemeral + volumes + properties: + shm: + anyOf: + - type: integer + - type: string + description: Shm is the size limit of the shared memory volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + temporaryData: + anyOf: + - type: integer + - type: string + description: TemporaryData is the size limit of the temporary + data volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + externalClusters: + description: The list of external clusters which are used in the configuration + items: + description: |- + ExternalCluster represents the connection parameters to an + external cluster which is used in the other sections of the configuration + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2`, and `snappy`. + enum: + - bzip2 + - gzip + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud + Storage JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing + the region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2`, + `lz4`, `snappy`, `xz`, and `zstd`. + enum: + - bzip2 + - gzip + - lz4 + - snappy + - xz + - zstd + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + connectionParameters: + additionalProperties: + type: string + description: The list of connection parameters, such as dbname, + host, username, etc + type: object + name: + description: The server name, required + type: string + password: + description: |- + The reference to the password to be used to connect to the server. + If a password is provided, CloudNativePG creates a PostgreSQL + passfile at `/controller/external/NAME/pass` (where "NAME" is the + cluster's name). This passfile is automatically referenced in the + connection string when establishing a connection to the remote + PostgreSQL server from the current PostgreSQL `Cluster`. This ensures + secure and efficient password management for external clusters. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + plugin: + description: |- + The configuration of the plugin that is taking care + of WAL archiving and backups for this external cluster + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + isWALArchiver: + default: false + description: |- + Only one plugin can be declared as WALArchiver. + Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + sslCert: + description: |- + The reference to an SSL certificate to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslKey: + description: |- + The reference to an SSL private key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslRootCert: + description: |- + The reference to an SSL CA public key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - name + type: object + type: array + failoverDelay: + default: 0 + description: |- + The amount of time (in seconds) to wait before triggering a failover + after the primary PostgreSQL instance in the cluster was detected + to be unhealthy + format: int32 + type: integer + imageCatalogRef: + description: Defines the major PostgreSQL version we want to use within + an ImageCatalog + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + major: + description: The major version of PostgreSQL we want to use from + the ImageCatalog + type: integer + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - major + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: Only image catalogs are supported + rule: self.kind == 'ImageCatalog' || self.kind == 'ClusterImageCatalog' + - message: Only image catalogs are supported + rule: self.apiGroup == 'postgresql.cnpg.io' + imageName: + description: |- + Name of the container image, supporting both tags (`:`) + and digests for deterministic and repeatable deployments + (`:@sha256:`) + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of `Always`, `Never` or `IfNotPresent`. + If not defined, it defaults to `IfNotPresent`. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + imagePullSecrets: + description: The list of pull secrets to be used to pull the images + items: + description: |- + LocalObjectReference contains enough information to let you locate a + local object with a known type inside the same namespace + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + type: array + inheritedMetadata: + description: Metadata that will be inherited by all objects related + to the Cluster + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + instances: + default: 1 + description: Number of instances required in the cluster + minimum: 1 + type: integer + livenessProbeTimeout: + description: |- + LivenessProbeTimeout is the time (in seconds) that is allowed for a PostgreSQL instance + to successfully respond to the liveness probe (default 30). + The Liveness probe failure threshold is derived from this value using the formula: + ceiling(livenessProbe / 10). + format: int32 + type: integer + logLevel: + default: info + description: 'The instances'' log level, one of the following values: + error, warning, info (default), debug, trace' + enum: + - error + - warning + - info + - debug + - trace + type: string + managed: + description: The configuration that is used by the portions of PostgreSQL + that are managed by the instance manager + properties: + roles: + description: Database roles managed by the `Cluster` + items: + description: |- + RoleConfiguration is the representation, in Kubernetes, of a PostgreSQL role + with the additional field Ensure specifying whether to ensure the presence or + absence of the role in the database + + The defaults of the CREATE ROLE command are applied + Reference: https://www.postgresql.org/docs/current/sql-createrole.html + properties: + bypassrls: + description: |- + Whether a role bypasses every row-level security (RLS) policy. + Default is `false`. + type: boolean + comment: + description: Description of the role + type: string + connectionLimit: + default: -1 + description: |- + If the role can log in, this specifies how many concurrent + connections the role can make. `-1` (the default) means no limit. + format: int64 + type: integer + createdb: + description: |- + When set to `true`, the role being defined will be allowed to create + new databases. Specifying `false` (default) will deny a role the + ability to create databases. + type: boolean + createrole: + description: |- + Whether the role will be permitted to create, alter, drop, comment + on, change the security label for, and grant or revoke membership in + other roles. Default is `false`. + type: boolean + disablePassword: + description: DisablePassword indicates that a role's password + should be set to NULL in Postgres + type: boolean + ensure: + default: present + description: Ensure the role is `present` or `absent` - + defaults to "present" + enum: + - present + - absent + type: string + inRoles: + description: |- + List of one or more existing roles to which this role will be + immediately added as a new member. Default empty. + items: + type: string + type: array + inherit: + default: true + description: |- + Whether a role "inherits" the privileges of roles it is a member of. + Defaults is `true`. + type: boolean + login: + description: |- + Whether the role is allowed to log in. A role having the `login` + attribute can be thought of as a user. Roles without this attribute + are useful for managing database privileges, but are not users in + the usual sense of the word. Default is `false`. + type: boolean + name: + description: Name of the role + type: string + passwordSecret: + description: |- + Secret containing the password of the role (if present) + If null, the password will be ignored unless DisablePassword is set + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + replication: + description: |- + Whether a role is a replication role. A role must have this + attribute (or be a superuser) in order to be able to connect to the + server in replication mode (physical or logical replication) and in + order to be able to create or drop replication slots. A role having + the `replication` attribute is a very highly privileged role, and + should only be used on roles actually used for replication. Default + is `false`. + type: boolean + superuser: + description: |- + Whether the role is a `superuser` who can override all access + restrictions within the database - superuser status is dangerous and + should be used only when really needed. You must yourself be a + superuser to create a new superuser. Defaults is `false`. + type: boolean + validUntil: + description: |- + Date and time after which the role's password is no longer valid. + When omitted, the password will never expire (default). + format: date-time + type: string + required: + - name + type: object + type: array + services: + description: Services roles managed by the `Cluster` + properties: + additional: + description: Additional is a list of additional managed services + specified by the user. + items: + description: |- + ManagedService represents a specific service managed by the cluster. + It includes the type of service and its associated template specification. + properties: + selectorType: + description: |- + SelectorType specifies the type of selectors that the service will have. + Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services. + enum: + - rw + - r + - ro + type: string + serviceTemplate: + description: ServiceTemplate is the template specification + for the service. + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only + supported for certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information + on service's port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed + by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains + the configurations of session affinity. + properties: + clientIP: + description: clientIP contains the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + updateStrategy: + default: patch + description: UpdateStrategy describes how the service + differences should be reconciled + enum: + - patch + - replace + type: string + required: + - selectorType + - serviceTemplate + type: object + type: array + disabledDefaultServices: + description: |- + DisabledDefaultServices is a list of service types that are disabled by default. + Valid values are "r", and "ro", representing read, and read-only services. + items: + description: |- + ServiceSelectorType describes a valid value for generating the service selectors. + It indicates which type of service the selector applies to, such as read-write, read, or read-only + enum: + - rw + - r + - ro + type: string + type: array + type: object + type: object + maxSyncReplicas: + default: 0 + description: |- + The target value for the synchronous replication quorum, that can be + decreased if the number of ready standbys is lower than this. + Undefined or 0 disable synchronous replication. + minimum: 0 + type: integer + minSyncReplicas: + default: 0 + description: |- + Minimum number of instances required in synchronous replication with the + primary. Undefined or 0 allow writes to complete when no standby is + available. + minimum: 0 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this cluster + properties: + customQueriesConfigMap: + description: The list of config maps containing the custom queries + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + customQueriesSecret: + description: The list of secrets containing the custom queries + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + disableDefaultQueries: + default: false + description: |- + Whether the default queries should be injected. + Set it to `true` if you don't want to inject default queries into the cluster. + Default: false. + type: boolean + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + tls: + description: |- + Configure TLS communication for the metrics endpoint. + Changing tls.enabled option will force a rollout of all instances. + properties: + enabled: + default: false + description: |- + Enable TLS for the monitoring endpoint. + Changing this option will force a rollout of all instances. + type: boolean + type: object + type: object + nodeMaintenanceWindow: + description: Define a maintenance window for the Kubernetes nodes + properties: + inProgress: + default: false + description: Is there a node maintenance activity in progress? + type: boolean + reusePVC: + default: true + description: |- + Reuse the existing PVC (wait for the node to come + up again) or not (recreate it elsewhere - when `instances` >1) + type: boolean + type: object + plugins: + description: |- + The plugins configuration, containing + any plugin to be loaded with the corresponding configuration + items: + description: |- + PluginConfiguration specifies a plugin that need to be loaded for this + cluster to be reconciled + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + isWALArchiver: + default: false + description: |- + Only one plugin can be declared as WALArchiver. + Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + type: array + postgresGID: + default: 26 + description: The GID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresUID: + default: 26 + description: The UID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresql: + description: Configuration of the PostgreSQL server + properties: + enableAlterSystem: + description: |- + If this parameter is true, the user will be able to invoke `ALTER SYSTEM` + on this CloudNativePG Cluster. + This should only be used for debugging and troubleshooting. + Defaults to false. + type: boolean + extensions: + description: The configuration of the extensions to be added + items: + description: |- + ExtensionConfiguration is the configuration used to add + PostgreSQL extensions to the Cluster. + properties: + dynamic_library_path: + description: |- + The list of directories inside the image which should be added to dynamic_library_path. + If not defined, defaults to "/lib". + items: + type: string + type: array + extension_control_path: + description: |- + The list of directories inside the image which should be added to extension_control_path. + If not defined, defaults to "/share". + items: + type: string + type: array + image: + description: The image containing the extension, required + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object + x-kubernetes-validations: + - message: An image reference is required + rule: has(self.reference) + ld_library_path: + description: The list of directories inside the image which + should be added to ld_library_path. + items: + type: string + type: array + name: + description: The name of the extension, required + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + required: + - image + - name + type: object + type: array + ldap: + description: Options to specify LDAP configuration + properties: + bindAsAuth: + description: Bind as authentication configuration + properties: + prefix: + description: Prefix for the bind authentication option + type: string + suffix: + description: Suffix for the bind authentication option + type: string + type: object + bindSearchAuth: + description: Bind+Search authentication configuration + properties: + baseDN: + description: Root DN to begin the user search + type: string + bindDN: + description: DN of the user to bind to the directory + type: string + bindPassword: + description: Secret with the password for the user to + bind to the directory + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + searchAttribute: + description: Attribute to match against the username + type: string + searchFilter: + description: Search filter to use when doing the search+bind + authentication + type: string + type: object + port: + description: LDAP server port + type: integer + scheme: + description: LDAP schema to be used, possible options are + `ldap` and `ldaps` + enum: + - ldap + - ldaps + type: string + server: + description: LDAP hostname or IP address + type: string + tls: + description: Set to 'true' to enable LDAP over TLS. 'false' + is default + type: boolean + type: object + parameters: + additionalProperties: + type: string + description: PostgreSQL configuration options (postgresql.conf) + type: object + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + pg_ident: + description: |- + PostgreSQL User Name Maps rules (lines to be appended + to the pg_ident.conf file) + items: + type: string + type: array + promotionTimeout: + description: |- + Specifies the maximum number of seconds to wait when promoting an instance to primary. + Default value is 40000000, greater than one year in seconds, + big enough to simulate an infinite timeout + format: int32 + type: integer + shared_preload_libraries: + description: Lists of shared preload libraries to add to the default + ones + items: + type: string + type: array + syncReplicaElectionConstraint: + description: |- + Requirements to be met by sync replicas. This will affect how the "synchronous_standby_names" parameter will be + set up. + properties: + enabled: + description: This flag enables the constraints for sync replicas + type: boolean + nodeLabelsAntiAffinity: + description: A list of node labels values to extract and compare + to evaluate if the pods reside in the same topology or not + items: + type: string + type: array + required: + - enabled + type: object + synchronous: + description: Configuration of the PostgreSQL synchronous replication + feature + properties: + dataDurability: + description: |- + If set to "required", data durability is strictly enforced. Write operations + with synchronous commit settings (`on`, `remote_write`, or `remote_apply`) will + block if there are insufficient healthy replicas, ensuring data persistence. + If set to "preferred", data durability is maintained when healthy replicas + are available, but the required number of instances will adjust dynamically + if replicas become unavailable. This setting relaxes strict durability enforcement + to allow for operational continuity. This setting is only applicable if both + `standbyNamesPre` and `standbyNamesPost` are unset (empty). + enum: + - required + - preferred + type: string + maxStandbyNamesFromCluster: + description: |- + Specifies the maximum number of local cluster pods that can be + automatically included in the `synchronous_standby_names` option in + PostgreSQL. + type: integer + method: + description: |- + Method to select synchronous replication standbys from the listed + servers, accepting 'any' (quorum-based synchronous replication) or + 'first' (priority-based synchronous replication) as values. + enum: + - any + - first + type: string + number: + description: |- + Specifies the number of synchronous standby servers that + transactions must wait for responses from. + type: integer + x-kubernetes-validations: + - message: The number of synchronous replicas should be greater + than zero + rule: self > 0 + standbyNamesPost: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` after local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + standbyNamesPre: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` before local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + required: + - method + - number + type: object + x-kubernetes-validations: + - message: dataDurability set to 'preferred' requires empty 'standbyNamesPre' + and empty 'standbyNamesPost' + rule: self.dataDurability!='preferred' || ((!has(self.standbyNamesPre) + || self.standbyNamesPre.size()==0) && (!has(self.standbyNamesPost) + || self.standbyNamesPost.size()==0)) + type: object + primaryUpdateMethod: + default: restart + description: |- + Method to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be with a switchover (`switchover`) or in-place (`restart` - default) + enum: + - switchover + - restart + type: string + primaryUpdateStrategy: + default: unsupervised + description: |- + Deployment strategy to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be automated (`unsupervised` - default) or manual (`supervised`) + enum: + - unsupervised + - supervised + type: string + priorityClassName: + description: |- + Name of the priority class which will be used in every generated Pod, if the PriorityClass + specified does not exist, the pod will not be able to schedule. Please refer to + https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass + for more information + type: string + probes: + description: |- + The configuration of the probes to be injected + in the PostgreSQL Pods. + properties: + liveness: + description: The liveness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + isolationCheck: + description: |- + Configure the feature that extends the liveness probe for a primary + instance. In addition to the basic checks, this verifies whether the + primary is isolated from the Kubernetes API server and from its + replicas, ensuring that it can be safely shut down if network + partition or API unavailability is detected. Enabled by default. + properties: + connectionTimeout: + default: 1000 + description: Timeout in milliseconds for connections during + the primary isolation check + type: integer + enabled: + default: true + description: Whether primary isolation checking is enabled + for the liveness probe + type: boolean + requestTimeout: + default: 1000 + description: Timeout in milliseconds for requests during + the primary isolation check + type: integer + type: object + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + readiness: + description: The readiness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + maximumLag: + anyOf: + - type: integer + - type: string + description: Lag limit. Used only for `streaming` strategy + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: + description: The probe strategy + enum: + - pg_isready + - streaming + - query + type: string + type: object + startup: + description: The startup probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + maximumLag: + anyOf: + - type: integer + - type: string + description: Lag limit. Used only for `streaming` strategy + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: + description: The probe strategy + enum: + - pg_isready + - streaming + - query + type: string + type: object + type: object + projectedVolumeTemplate: + description: |- + Template to be used to define projected volumes, projected volumes will be mounted + under `/projected` base folder + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root to write + the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap data + to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the + pod: only annotations, labels, name, namespace + and uid are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must not + be absolute or contain the ''..'' path. Must + be utf-8 encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the secret data to + project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether the Secret + or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information about the + serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + replica: + description: Replica cluster configuration + properties: + enabled: + description: |- + If replica mode is enabled, this cluster will be a replica of an + existing cluster. Replica cluster can be created from a recovery + object store or via streaming through pg_basebackup. + Refer to the Replica clusters page of the documentation for more information. + type: boolean + minApplyDelay: + description: |- + When replica mode is enabled, this parameter allows you to replay + transactions only when the system time is at least the configured + time past the commit time. This provides an opportunity to correct + data loss errors. Note that when this parameter is set, a promotion + token cannot be used. + type: string + primary: + description: |- + Primary defines which Cluster is defined to be the primary in the distributed PostgreSQL cluster, based on the + topology specified in externalClusters + type: string + promotionToken: + description: |- + A demotion token generated by an external cluster used to + check if the promotion requirements are met. + type: string + self: + description: |- + Self defines the name of this cluster. It is used to determine if this is a primary + or a replica cluster, comparing it with `primary` + type: string + source: + description: The name of the external cluster which is the replication + origin + minLength: 1 + type: string + required: + - source + type: object + replicationSlots: + default: + highAvailability: + enabled: true + description: Replication slots management configuration + properties: + highAvailability: + default: + enabled: true + description: Replication slots for high availability configuration + properties: + enabled: + default: true + description: |- + If enabled (default), the operator will automatically manage replication slots + on the primary instance and use them in streaming replication + connections with all the standby instances that are part of the HA + cluster. If disabled, the operator will not take advantage + of replication slots in streaming connections with the replicas. + This feature also controls replication slots in replica cluster, + from the designated primary to its cascading replicas. + type: boolean + slotPrefix: + default: _cnpg_ + description: |- + Prefix for replication slots managed by the operator for HA. + It may only contain lower case letters, numbers, and the underscore character. + This can only be set at creation time. By default set to `_cnpg_`. + pattern: ^[0-9a-z_]*$ + type: string + synchronizeLogicalDecoding: + description: |- + When enabled, the operator automatically manages synchronization of logical + decoding (replication) slots across high-availability clusters. + + Requires one of the following conditions: + - PostgreSQL version 17 or later + - PostgreSQL version < 17 with pg_failover_slots extension enabled + type: boolean + type: object + synchronizeReplicas: + description: Configures the synchronization of the user defined + physical replication slots + properties: + enabled: + default: true + description: When set to true, every replication slot that + is on the primary is synchronized on each standby + type: boolean + excludePatterns: + description: List of regular expression patterns to match + the names of replication slots to be excluded (by default + empty) + items: + type: string + type: array + required: + - enabled + type: object + updateInterval: + default: 30 + description: |- + Standby will update the status of the local replication slots + every `updateInterval` seconds (default 30). + minimum: 1 + type: integer + type: object + resources: + description: |- + Resources requirements of every generated Pod. Please refer to + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + for more information. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + schedulerName: + description: |- + If specified, the pod will be dispatched by specified Kubernetes + scheduler. If not specified, the pod will be dispatched by the default + scheduler. More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/ + type: string + seccompProfile: + description: |- + The SeccompProfile applied to every Pod and Container. + Defaults to: `RuntimeDefault` + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + serviceAccountTemplate: + description: Configure the generation of the service account + properties: + metadata: + description: |- + Metadata are the metadata to be used for the generated + service account + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + required: + - metadata + type: object + smartShutdownTimeout: + default: 180 + description: |- + The time in seconds that controls the window of time reserved for the smart shutdown of Postgres to complete. + Make sure you reserve enough time for the operator to request a fast shutdown of Postgres + (that is: `stopDelay` - `smartShutdownTimeout`). + format: int32 + type: integer + startDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + successfully start up (default 3600). + The startup probe failure threshold is derived from this value using the formula: + ceiling(startDelay / 10). + format: int32 + type: integer + stopDelay: + default: 1800 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + gracefully shutdown (default 1800) + format: int32 + type: integer + storage: + description: Configuration of the storage of the instances + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + superuserSecret: + description: |- + The secret containing the superuser password. If not defined a new + secret will be created with a randomly generated password + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + switchoverDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a primary PostgreSQL instance + to gracefully shutdown during a switchover. + Default value is 3600 seconds (1 hour). + format: int32 + type: integer + tablespaces: + description: The tablespaces configuration + items: + description: |- + TablespaceConfiguration is the configuration of a tablespace, and includes + the storage specification for the tablespace + properties: + name: + description: The name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + properties: + name: + type: string + type: object + storage: + description: The storage configuration for the tablespace + properties: + pvcTemplate: + description: Template to be used to generate the Persistent + Volume Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to + the PersistentVolume backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + temporary: + default: false + description: |- + When set to true, the tablespace will be added as a `temp_tablespaces` + entry in PostgreSQL, and will be available to automatically house temp + database objects, or other temporary files. Please refer to PostgreSQL + documentation for more information on the `temp_tablespaces` GUC. + type: boolean + required: + - name + - storage + type: object + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints specifies how to spread matching pods among the given topology. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ + items: + description: TopologySpreadConstraint specifies how to spread matching + pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + walStorage: + description: Configuration of the storage for PostgreSQL WAL (Write-Ahead + Log) + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + required: + - instances + type: object + x-kubernetes-validations: + - message: imageName and imageCatalogRef are mutually exclusive + rule: '!(has(self.imageCatalogRef) && has(self.imageName))' + status: + description: |- + Most recently observed status of the cluster. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + availableArchitectures: + description: AvailableArchitectures reports the available architectures + of a cluster + items: + description: AvailableArchitecture represents the state of a cluster's + architecture + properties: + goArch: + description: GoArch is the name of the executable architecture + type: string + hash: + description: Hash is the hash of the executable + type: string + required: + - goArch + - hash + type: object + type: array + certificates: + description: The configuration for the CA and related certificates, + initialized with defaults. + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + expirations: + additionalProperties: + type: string + description: Expiration dates for all certificates. + type: object + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + cloudNativePGCommitHash: + description: The commit hash number of which this operator running + type: string + cloudNativePGOperatorHash: + description: The hash of the binary of the operator + type: string + conditions: + description: Conditions for cluster object + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + configMapResourceVersion: + description: |- + The list of resource versions of the configmaps, + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + configmap data + properties: + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the config maps used to pass metrics. + Map keys are the config map names, map values are the versions + type: object + type: object + currentPrimary: + description: Current primary instance + type: string + currentPrimaryFailingSinceTimestamp: + description: |- + The timestamp when the primary was detected to be unhealthy + This field is reported when `.spec.failoverDelay` is populated or during online upgrades + type: string + currentPrimaryTimestamp: + description: The timestamp when the last actual promotion to primary + has occurred + type: string + danglingPVC: + description: |- + List of all the PVCs created by this cluster and still available + which are not attached to a Pod + items: + type: string + type: array + demotionToken: + description: |- + DemotionToken is a JSON token containing the information + from pg_controldata such as Database system identifier, Latest checkpoint's + TimeLineID, Latest checkpoint's REDO location, Latest checkpoint's REDO + WAL file, and Time of latest checkpoint + type: string + firstRecoverabilityPoint: + description: |- + The first recoverability point, stored as a date in RFC3339 format. + This field is calculated from the content of FirstRecoverabilityPointByMethod. + + Deprecated: the field is not set for backup plugins. + type: string + firstRecoverabilityPointByMethod: + additionalProperties: + format: date-time + type: string + description: |- + The first recoverability point, stored as a date in RFC3339 format, per backup method type. + + Deprecated: the field is not set for backup plugins. + type: object + healthyPVC: + description: List of all the PVCs not dangling nor initializing + items: + type: string + type: array + image: + description: Image contains the image name used by the pods + type: string + initializingPVC: + description: List of all the PVCs that are being initialized by this + cluster + items: + type: string + type: array + instanceNames: + description: List of instance names in the cluster + items: + type: string + type: array + instances: + description: The total number of PVC Groups detected in the cluster. + It may differ from the number of existing instance pods. + type: integer + instancesReportedState: + additionalProperties: + description: InstanceReportedState describes the last reported state + of an instance during a reconciliation loop + properties: + ip: + description: IP address of the instance + type: string + isPrimary: + description: indicates if an instance is the primary one + type: boolean + timeLineID: + description: indicates on which TimelineId the instance is + type: integer + required: + - isPrimary + type: object + description: The reported state of the instances during the last reconciliation + loop + type: object + instancesStatus: + additionalProperties: + items: + type: string + type: array + description: InstancesStatus indicates in which status the instances + are + type: object + jobCount: + description: How many Jobs have been created by this cluster + format: int32 + type: integer + lastFailedBackup: + description: |- + Last failed backup, stored as a date in RFC3339 format. + + Deprecated: the field is not set for backup plugins. + type: string + lastPromotionToken: + description: |- + LastPromotionToken is the last verified promotion token that + was used to promote a replica cluster + type: string + lastSuccessfulBackup: + description: |- + Last successful backup, stored as a date in RFC3339 format. + This field is calculated from the content of LastSuccessfulBackupByMethod. + + Deprecated: the field is not set for backup plugins. + type: string + lastSuccessfulBackupByMethod: + additionalProperties: + format: date-time + type: string + description: |- + Last successful backup, stored as a date in RFC3339 format, per backup method type. + + Deprecated: the field is not set for backup plugins. + type: object + latestGeneratedNode: + description: ID of the latest generated node (used to avoid node name + clashing) + type: integer + managedRolesStatus: + description: ManagedRolesStatus reports the state of the managed roles + in the cluster + properties: + byStatus: + additionalProperties: + items: + type: string + type: array + description: ByStatus gives the list of roles in each state + type: object + cannotReconcile: + additionalProperties: + items: + type: string + type: array + description: |- + CannotReconcile lists roles that cannot be reconciled in PostgreSQL, + with an explanation of the cause + type: object + passwordStatus: + additionalProperties: + description: PasswordState represents the state of the password + of a managed RoleConfiguration + properties: + resourceVersion: + description: the resource version of the password secret + type: string + transactionID: + description: the last transaction ID to affect the role + definition in PostgreSQL + format: int64 + type: integer + type: object + description: PasswordStatus gives the last transaction id and + password secret version for each managed role + type: object + type: object + onlineUpdateEnabled: + description: OnlineUpdateEnabled shows if the online upgrade is enabled + inside the cluster + type: boolean + pgDataImageInfo: + description: PGDataImageInfo contains the details of the latest image + that has run on the current data directory. + properties: + image: + description: Image is the image name + type: string + majorVersion: + description: MajorVersion is the major version of the image + type: integer + required: + - image + - majorVersion + type: object + phase: + description: Current phase of the cluster + type: string + phaseReason: + description: Reason for the current phase + type: string + pluginStatus: + description: PluginStatus is the status of the loaded plugins + items: + description: PluginStatus is the status of a loaded plugin + properties: + backupCapabilities: + description: |- + BackupCapabilities are the list of capabilities of the + plugin regarding the Backup management + items: + type: string + type: array + capabilities: + description: |- + Capabilities are the list of capabilities of the + plugin + items: + type: string + type: array + name: + description: Name is the name of the plugin + type: string + operatorCapabilities: + description: |- + OperatorCapabilities are the list of capabilities of the + plugin regarding the reconciler + items: + type: string + type: array + restoreJobHookCapabilities: + description: |- + RestoreJobHookCapabilities are the list of capabilities of the + plugin regarding the RestoreJobHook management + items: + type: string + type: array + status: + description: Status contain the status reported by the plugin + through the SetStatusInCluster interface + type: string + version: + description: |- + Version is the version of the plugin loaded by the + latest reconciliation loop + type: string + walCapabilities: + description: |- + WALCapabilities are the list of capabilities of the + plugin regarding the WAL management + items: + type: string + type: array + required: + - name + - version + type: object + type: array + poolerIntegrations: + description: The integration needed by poolers referencing the cluster + properties: + pgBouncerIntegration: + description: PgBouncerIntegrationStatus encapsulates the needed + integration for the pgbouncer poolers referencing the cluster + properties: + secrets: + items: + type: string + type: array + type: object + type: object + pvcCount: + description: How many PVCs have been created by this cluster + format: int32 + type: integer + readService: + description: Current list of read pods + type: string + readyInstances: + description: The total number of ready instances in the cluster. It + is equal to the number of ready instance pods. + type: integer + resizingPVC: + description: List of all the PVCs that have ResizingPVC condition. + items: + type: string + type: array + secretsResourceVersion: + description: |- + The list of resource versions of the secrets + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + secret data + properties: + applicationSecretVersion: + description: The resource version of the "app" user secret + type: string + barmanEndpointCA: + description: The resource version of the Barman Endpoint CA if + provided + type: string + caSecretVersion: + description: Unused. Retained for compatibility with old versions. + type: string + clientCaSecretVersion: + description: The resource version of the PostgreSQL client-side + CA secret version + type: string + externalClusterSecretVersion: + additionalProperties: + type: string + description: The resource versions of the external cluster secrets + type: object + managedRoleSecretVersion: + additionalProperties: + type: string + description: The resource versions of the managed roles secrets + type: object + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the secrets used to pass metrics. + Map keys are the secret names, map values are the versions + type: object + replicationSecretVersion: + description: The resource version of the "streaming_replica" user + secret + type: string + serverCaSecretVersion: + description: The resource version of the PostgreSQL server-side + CA secret version + type: string + serverSecretVersion: + description: The resource version of the PostgreSQL server-side + secret version + type: string + superuserSecretVersion: + description: The resource version of the "postgres" user secret + type: string + type: object + switchReplicaClusterStatus: + description: SwitchReplicaClusterStatus is the status of the switch + to replica cluster + properties: + inProgress: + description: InProgress indicates if there is an ongoing procedure + of switching a cluster to a replica cluster. + type: boolean + type: object + systemID: + description: SystemID is the latest detected PostgreSQL SystemID + type: string + tablespacesStatus: + description: TablespacesStatus reports the state of the declarative + tablespaces in the cluster + items: + description: TablespaceState represents the state of a tablespace + in a cluster + properties: + error: + description: Error is the reconciliation error, if any + type: string + name: + description: Name is the name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + type: string + state: + description: State is the latest reconciliation state + type: string + required: + - name + - state + type: object + type: array + targetPrimary: + description: |- + Target primary instance, this is different from the previous one + during a switchover or a failover + type: string + targetPrimaryTimestamp: + description: The timestamp when the last request for a new primary + has occurred + type: string + timelineID: + description: The timeline of the Postgres cluster + type: integer + topology: + description: Instances topology. + properties: + instances: + additionalProperties: + additionalProperties: + type: string + description: PodTopologyLabels represent the topology of a Pod. + map[labelName]labelValue + type: object + description: Instances contains the pod topology of the instances + type: object + nodesUsed: + description: |- + NodesUsed represents the count of distinct nodes accommodating the instances. + A value of '1' suggests that all instances are hosted on a single node, + implying the absence of High Availability (HA). Ideally, this value should + be the same as the number of instances in the Postgres HA cluster, implying + shared nothing architecture on the compute side. + format: int32 + type: integer + successfullyExtracted: + description: |- + SuccessfullyExtracted indicates if the topology data was extract. It is useful to enact fallback behaviors + in synchronous replica election in case of failures + type: boolean + type: object + unusablePVC: + description: List of all the PVCs that are unusable because another + PVC is missing + items: + type: string + type: array + writeService: + description: Current write pod + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: databases.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Database + listKind: DatabaseList + plural: databases + singular: database + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Database is the Schema for the databases API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired Database. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allowConnections: + description: |- + Maps to the `ALLOW_CONNECTIONS` parameter of `CREATE DATABASE` and + `ALTER DATABASE`. If false then no one can connect to this database. + type: boolean + builtinLocale: + description: |- + Maps to the `BUILTIN_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the locale name when the + builtin provider is used. This option requires `localeProvider` to + be set to `builtin`. Available from PostgreSQL 17. + type: string + x-kubernetes-validations: + - message: builtinLocale is immutable + rule: self == oldSelf + cluster: + description: The name of the PostgreSQL cluster hosting the database. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + collationVersion: + description: |- + Maps to the `COLLATION_VERSION` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: collationVersion is immutable + rule: self == oldSelf + connectionLimit: + description: |- + Maps to the `CONNECTION LIMIT` clause of `CREATE DATABASE` and + `ALTER DATABASE`. How many concurrent connections can be made to + this database. -1 (the default) means no limit. + type: integer + databaseReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this database. + enum: + - delete + - retain + type: string + encoding: + description: |- + Maps to the `ENCODING` parameter of `CREATE DATABASE`. This setting + cannot be changed. Character set encoding to use in the database. + type: string + x-kubernetes-validations: + - message: encoding is immutable + rule: self == oldSelf + ensure: + default: present + description: Ensure the PostgreSQL database is `present` or `absent` + - defaults to "present". + enum: + - present + - absent + type: string + extensions: + description: The list of extensions to be managed in the database + items: + description: ExtensionSpec configures an extension in a database + properties: + ensure: + default: present + description: |- + Specifies whether an extension/schema should be present or absent in + the database. If set to `present`, the extension/schema will be + created if it does not exist. If set to `absent`, the + extension/schema will be removed if it exists. + enum: + - present + - absent + type: string + name: + description: Name of the extension/schema + type: string + schema: + description: |- + The name of the schema in which to install the extension's objects, + in case the extension allows its contents to be relocated. If not + specified (default), and the extension's control file does not + specify a schema either, the current default object creation schema + is used. + type: string + version: + description: |- + The version of the extension to install. If empty, the operator will + install the default version (whatever is specified in the + extension's control file) + type: string + required: + - name + type: object + type: array + icuLocale: + description: |- + Maps to the `ICU_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the ICU locale when the ICU + provider is used. This option requires `localeProvider` to be set to + `icu`. Available from PostgreSQL 15. + type: string + x-kubernetes-validations: + - message: icuLocale is immutable + rule: self == oldSelf + icuRules: + description: |- + Maps to the `ICU_RULES` parameter of `CREATE DATABASE`. This setting + cannot be changed. Specifies additional collation rules to customize + the behavior of the default collation. This option requires + `localeProvider` to be set to `icu`. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: icuRules is immutable + rule: self == oldSelf + isTemplate: + description: |- + Maps to the `IS_TEMPLATE` parameter of `CREATE DATABASE` and `ALTER + DATABASE`. If true, this database is considered a template and can + be cloned by any user with `CREATEDB` privileges. + type: boolean + locale: + description: |- + Maps to the `LOCALE` parameter of `CREATE DATABASE`. This setting + cannot be changed. Sets the default collation order and character + classification in the new database. + type: string + x-kubernetes-validations: + - message: locale is immutable + rule: self == oldSelf + localeCType: + description: |- + Maps to the `LC_CTYPE` parameter of `CREATE DATABASE`. This setting + cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCType is immutable + rule: self == oldSelf + localeCollate: + description: |- + Maps to the `LC_COLLATE` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCollate is immutable + rule: self == oldSelf + localeProvider: + description: |- + Maps to the `LOCALE_PROVIDER` parameter of `CREATE DATABASE`. This + setting cannot be changed. This option sets the locale provider for + databases created in the new cluster. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: localeProvider is immutable + rule: self == oldSelf + name: + description: The name of the database to create inside PostgreSQL. + This setting cannot be changed. + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + - message: the name postgres is reserved + rule: self != 'postgres' + - message: the name template0 is reserved + rule: self != 'template0' + - message: the name template1 is reserved + rule: self != 'template1' + owner: + description: |- + Maps to the `OWNER` parameter of `CREATE DATABASE`. + Maps to the `OWNER TO` command of `ALTER DATABASE`. + The role name of the user who owns the database inside PostgreSQL. + type: string + schemas: + description: The list of schemas to be managed in the database + items: + description: SchemaSpec configures a schema in a database + properties: + ensure: + default: present + description: |- + Specifies whether an extension/schema should be present or absent in + the database. If set to `present`, the extension/schema will be + created if it does not exist. If set to `absent`, the + extension/schema will be removed if it exists. + enum: + - present + - absent + type: string + name: + description: Name of the extension/schema + type: string + owner: + description: |- + The role name of the user who owns the schema inside PostgreSQL. + It maps to the `AUTHORIZATION` parameter of `CREATE SCHEMA` and the + `OWNER TO` command of `ALTER SCHEMA`. + type: string + required: + - name + type: object + type: array + tablespace: + description: |- + Maps to the `TABLESPACE` parameter of `CREATE DATABASE`. + Maps to the `SET TABLESPACE` command of `ALTER DATABASE`. + The name of the tablespace (in PostgreSQL) that will be associated + with the new database. This tablespace will be the default + tablespace used for objects created in this database. + type: string + template: + description: |- + Maps to the `TEMPLATE` parameter of `CREATE DATABASE`. This setting + cannot be changed. The name of the template from which to create + this database. + type: string + x-kubernetes-validations: + - message: template is immutable + rule: self == oldSelf + required: + - cluster + - name + - owner + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider is set + to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + status: + description: |- + Most recently observed status of the Database. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + applied: + description: Applied is true if the database was reconciled correctly + type: boolean + extensions: + description: Extensions is the status of the managed extensions + items: + description: DatabaseObjectStatus is the status of the managed database + objects + properties: + applied: + description: |- + True of the object has been installed successfully in + the database + type: boolean + message: + description: Message is the object reconciliation message + type: string + name: + description: The name of the object + type: string + required: + - applied + - name + type: object + type: array + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + schemas: + description: Schemas is the status of the managed schemas + items: + description: DatabaseObjectStatus is the status of the managed database + objects + properties: + applied: + description: |- + True of the object has been installed successfully in + the database + type: boolean + message: + description: Message is the object reconciliation message + type: string + name: + description: The name of the object + type: string + required: + - applied + - name + type: object + type: array + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: failoverquorums.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: FailoverQuorum + listKind: FailoverQuorumList + plural: failoverquorums + singular: failoverquorum + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: |- + FailoverQuorum contains the information about the current failover + quorum status of a PG cluster. It is updated by the instance manager + of the primary node and reset to zero by the operator to trigger + an update. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + status: + description: Most recently observed status of the failover quorum. + properties: + method: + description: Contains the latest reported Method value. + type: string + primary: + description: |- + Primary is the name of the primary instance that updated + this object the latest time. + type: string + standbyNames: + description: |- + StandbyNames is the list of potentially synchronous + instance names. + items: + type: string + type: array + standbyNumber: + description: |- + StandbyNumber is the number of synchronous standbys that transactions + need to wait for replies from. + type: integer + type: object + required: + - metadata + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: imagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ImageCatalog + listKind: ImageCatalogList + plural: imagecatalogs + singular: imagecatalog + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ImageCatalog is the Schema for the imagecatalogs API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: poolers.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Pooler + listKind: PoolerList + plural: poolers + singular: pooler + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.type + name: Type + type: string + name: v1 + schema: + openAPIV3Schema: + description: Pooler is the Schema for the poolers API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the Pooler. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: |- + This is the cluster reference on which the Pooler will work. + Pooler name should never match with any cluster name within the same namespace. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + deploymentStrategy: + description: The deployment strategy to use for pgbouncer to replace + existing pods with new ones + properties: + rollingUpdate: + description: |- + Rolling update config params. Present only if DeploymentStrategyType = + RollingUpdate. + properties: + maxSurge: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be scheduled above the desired number of + pods. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + This can not be 0 if MaxUnavailable is 0. + Absolute number is calculated from percentage by rounding up. + Defaults to 25%. + Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when + the rolling update starts, such that the total number of old and new pods do not exceed + 130% of desired pods. Once old pods have been killed, + new ReplicaSet can be scaled up further, ensuring that total number of pods running + at any time during the update is at most 130% of desired pods. + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be unavailable during the update. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + Absolute number is calculated from percentage by rounding down. + This can not be 0 if MaxSurge is 0. + Defaults to 25%. + Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods + immediately when the rolling update starts. Once new pods are ready, old ReplicaSet + can be scaled down further, followed by scaling up the new ReplicaSet, ensuring + that the total number of pods available at all times during the update is at + least 70% of desired pods. + x-kubernetes-int-or-string: true + type: object + type: + description: Type of deployment. Can be "Recreate" or "RollingUpdate". + Default is RollingUpdate. + type: string + type: object + instances: + default: 1 + description: 'The number of replicas we want. Default: 1.' + format: int32 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this pooler. + properties: + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + type: object + pgbouncer: + description: The PgBouncer configuration + properties: + authQuery: + description: |- + The query that will be used to download the hash of the password + of a certain user. Default: "SELECT usename, passwd FROM public.user_search($1)". + In case it is specified, also an AuthQuerySecret has to be specified and + no automatic CNPG Cluster integration will be triggered. + type: string + authQuerySecret: + description: |- + The credentials of the user that need to be used for the authentication + query. In case it is specified, also an AuthQuery + (e.g. "SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1") + has to be specified and no automatic CNPG Cluster integration will be triggered. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + parameters: + additionalProperties: + type: string + description: |- + Additional parameters to be passed to PgBouncer - please check + the CNPG documentation for a list of options you can configure + type: object + paused: + default: false + description: |- + When set to `true`, PgBouncer will disconnect from the PostgreSQL + server, first waiting for all queries to complete, and pause all new + client connections until this value is set to `false` (default). Internally, + the operator calls PgBouncer's `PAUSE` and `RESUME` commands. + type: boolean + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + poolMode: + default: session + description: 'The pool mode. Default: `session`.' + enum: + - session + - transaction + type: string + type: object + serviceTemplate: + description: Template for the Service to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information on service's + port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the configurations + of session affinity. + properties: + clientIP: + description: clientIP contains the configurations of Client + IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + template: + description: The template of the Pod to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the pod. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + activeDeadlineSeconds: + description: |- + Optional duration in seconds the pod may be active on the node relative to + StartTime before the system will actively try to mark it failed and kill associated containers. + Value must be a positive integer. + format: int64 + type: integer + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + automountServiceAccountToken: + description: AutomountServiceAccountToken indicates whether + a service account token should be automatically mounted. + type: boolean + containers: + description: |- + List of containers belonging to the pod. + Containers cannot currently be added or removed. + There must be at least one container in a Pod. + Cannot be updated. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps or Secrets + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: Optional text to prepend to the name + of each environment variable. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + stopSignal: + description: |- + StopSignal defines which signal will be sent to a container when it is being stopped. + If not specified, the default is defined by the container runtime in use. + StopSignal can only be set for Pods with a non-empty .spec.os.name + type: string + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + dnsConfig: + description: |- + Specifies the DNS parameters of a pod. + Parameters specified here will be merged to the generated DNS + configuration based on DNSPolicy. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver + options of a pod. + properties: + name: + description: |- + Name is this DNS resolver option's name. + Required. + type: string + value: + description: Value is this DNS resolver option's + value. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + dnsPolicy: + description: |- + Set DNS policy for the pod. + Defaults to "ClusterFirst". + Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + To have DNS options set along with hostNetwork, you have to specify DNS policy + explicitly to 'ClusterFirstWithHostNet'. + type: string + enableServiceLinks: + description: |- + EnableServiceLinks indicates whether information about services should be injected into pod's + environment variables, matching the syntax of Docker links. + Optional: Defaults to true. + type: boolean + ephemeralContainers: + description: |- + List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + pod to perform user-initiated actions such as debugging. This list cannot be specified when + creating a pod, and it cannot be modified by updating the pod spec. In order to add an + ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + items: + description: |- + An EphemeralContainer is a temporary container that you may add to an existing Pod for + user-initiated activities such as debugging. Ephemeral containers have no resource or + scheduling guarantees, and they will not be restarted when they exit or when a Pod is + removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the + Pod to exceed its resource allocation. + + To add an ephemeral container, use the ephemeralcontainers subresource of an existing + Pod. Ephemeral containers may not be removed or restarted. + properties: + args: + description: |- + Arguments to the entrypoint. + The image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps or Secrets + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: Optional text to prepend to the name + of each environment variable. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: Lifecycle is not allowed for ephemeral + containers. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + stopSignal: + description: |- + StopSignal defines which signal will be sent to a container when it is being stopped. + If not specified, the default is defined by the container runtime in use. + StopSignal can only be set for Pods with a non-empty .spec.os.name + type: string + type: object + livenessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the ephemeral container specified as a DNS_LABEL. + This name must be unique among all containers, init containers and ephemeral containers. + type: string + ports: + description: Ports are not allowed for ephemeral containers. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + already allocated to the pod. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for the container to manage the restart behavior of each + container within a pod. + This may only be set for init containers. You cannot set this field on + ephemeral containers. + type: string + securityContext: + description: |- + Optional: SecurityContext defines the security options the ephemeral container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + targetContainerName: + description: |- + If set, the name of the container from PodSpec that this ephemeral container targets. + The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + If not set then the ephemeral container uses the namespaces configured in the Pod spec. + + The container runtime must implement support for this feature. If the runtime does not + support namespace targeting then the result of setting this field is undefined. + type: string + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + hostAliases: + description: |- + HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + file if specified. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + description: IP address of the host file entry. + type: string + required: + - ip + type: object + type: array + x-kubernetes-list-map-keys: + - ip + x-kubernetes-list-type: map + hostIPC: + description: |- + Use the host's ipc namespace. + Optional: Default to false. + type: boolean + hostNetwork: + description: |- + Host networking requested for this pod. Use the host's network namespace. + If this option is set, the ports that will be used must be specified. + Default to false. + type: boolean + hostPID: + description: |- + Use the host's pid namespace. + Optional: Default to false. + type: boolean + hostUsers: + description: |- + Use the host's user namespace. + Optional: Default to true. + If set to true or not present, the pod will be run in the host user namespace, useful + for when the pod needs a feature only available to the host user namespace, such as + loading a kernel module with CAP_SYS_MODULE. + When set to false, a new userns is created for the pod. Setting false is useful for + mitigating container breakout vulnerabilities even allowing users to run their + containers as root without actually having root privileges on the host. + This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. + type: boolean + hostname: + description: |- + Specifies the hostname of the Pod + If not specified, the pod's hostname will be set to a system-defined value. + type: string + imagePullSecrets: + description: |- + ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + If specified, these secrets will be passed to individual puller implementations for them to use. + More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + initContainers: + description: |- + List of initialization containers belonging to the pod. + Init containers are executed in order prior to containers being started. If any + init container fails, the pod is considered to have failed and is handled according + to its restartPolicy. The name for an init container or normal container must be + unique among all containers. + Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. + The resourceRequirements of an init container are taken into account during scheduling + by finding the highest request/limit for each resource type, and then using the max of + that value or the sum of the normal containers. Limits are applied to init containers + in a similar fashion. + Init containers cannot currently be added or removed. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps or Secrets + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: Optional text to prepend to the name + of each environment variable. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + stopSignal: + description: |- + StopSignal defines which signal will be sent to a container when it is being stopped. + If not specified, the default is defined by the container runtime in use. + StopSignal can only be set for Pods with a non-empty .spec.os.name + type: string + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + nodeName: + description: |- + NodeName indicates in which node this pod is scheduled. + If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. + Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. + This field should not be used to express a desire for the pod to be scheduled on a specific node. + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the pod to fit on a node. + Selector which must match a node's labels for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + x-kubernetes-map-type: atomic + os: + description: |- + Specifies the OS of the containers in the pod. + Some pod and container fields are restricted if this is set. + + If the OS field is set to linux, the following fields must be unset: + -securityContext.windowsOptions + + If the OS field is set to windows, following fields must be unset: + - spec.hostPID + - spec.hostIPC + - spec.hostUsers + - spec.securityContext.appArmorProfile + - spec.securityContext.seLinuxOptions + - spec.securityContext.seccompProfile + - spec.securityContext.fsGroup + - spec.securityContext.fsGroupChangePolicy + - spec.securityContext.sysctls + - spec.shareProcessNamespace + - spec.securityContext.runAsUser + - spec.securityContext.runAsGroup + - spec.securityContext.supplementalGroups + - spec.securityContext.supplementalGroupsPolicy + - spec.containers[*].securityContext.appArmorProfile + - spec.containers[*].securityContext.seLinuxOptions + - spec.containers[*].securityContext.seccompProfile + - spec.containers[*].securityContext.capabilities + - spec.containers[*].securityContext.readOnlyRootFilesystem + - spec.containers[*].securityContext.privileged + - spec.containers[*].securityContext.allowPrivilegeEscalation + - spec.containers[*].securityContext.procMount + - spec.containers[*].securityContext.runAsUser + - spec.containers[*].securityContext.runAsGroup + properties: + name: + description: |- + Name is the name of the operating system. The currently supported values are linux and windows. + Additional value may be defined in future and can be one of: + https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + Clients should expect to handle additional values and treat unrecognized values in this field as os: null + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + This field will be autopopulated at admission time by the RuntimeClass admission controller. If + the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + The RuntimeClass admission controller will reject Pod create requests which have the overhead already + set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md + type: object + preemptionPolicy: + description: |- + PreemptionPolicy is the Policy for preempting pods with lower priority. + One of Never, PreemptLowerPriority. + Defaults to PreemptLowerPriority if unset. + type: string + priority: + description: |- + The priority value. Various system components use this field to find the + priority of the pod. When Priority Admission Controller is enabled, it + prevents users from setting this field. The admission controller populates + this field from PriorityClassName. + The higher the value, the higher the priority. + format: int32 + type: integer + priorityClassName: + description: |- + If specified, indicates the pod's priority. "system-node-critical" and + "system-cluster-critical" are two special keywords which indicate the + highest priorities with the former being the highest priority. Any other + name must be defined by creating a PriorityClass object with that name. + If not specified, the pod priority will be default or zero if there is no + default. + type: string + readinessGates: + description: |- + If specified, all readiness gates will be evaluated for pod readiness. + A pod is ready when all its containers are ready AND + all conditions specified in the readiness gates have status equal to "True" + More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates + items: + description: PodReadinessGate contains the reference to + a pod condition + properties: + conditionType: + description: ConditionType refers to a condition in + the pod's condition list with matching type. + type: string + required: + - conditionType + type: object + type: array + x-kubernetes-list-type: atomic + resourceClaims: + description: |- + ResourceClaims defines which ResourceClaims must be allocated + and reserved before the Pod is allowed to start. The resources + will be made available to those containers which consume them + by name. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. + items: + description: |- + PodResourceClaim references exactly one ResourceClaim, either directly + or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim + for the pod. + + It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. + Containers that need access to the ResourceClaim reference it with this name. + properties: + name: + description: |- + Name uniquely identifies this resource claim inside the pod. + This must be a DNS_LABEL. + type: string + resourceClaimName: + description: |- + ResourceClaimName is the name of a ResourceClaim object in the same + namespace as this pod. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + resourceClaimTemplateName: + description: |- + ResourceClaimTemplateName is the name of a ResourceClaimTemplate + object in the same namespace as this pod. + + The template will be used to create a new ResourceClaim, which will + be bound to this pod. When this pod is deleted, the ResourceClaim + will also be deleted. The pod name and resource name, along with a + generated component, will be used to form a unique name for the + ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + + This field is immutable and no changes will be made to the + corresponding ResourceClaim by the control plane after creating the + ResourceClaim. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + resources: + description: |- + Resources is the total amount of CPU and Memory resources required by all + containers in the pod. It supports specifying Requests and Limits for + "cpu" and "memory" resource names only. ResourceClaims are not supported. + + This field enables fine-grained control over resource allocation for the + entire pod, allowing resource sharing among containers in a pod. + + This is an alpha field and requires enabling the PodLevelResources feature + gate. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for all containers within the pod. + One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. + Default to Always. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy + type: string + runtimeClassName: + description: |- + RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + empty definition that uses the default runtime handler. + More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + type: string + schedulerName: + description: |- + If specified, the pod will be dispatched by specified scheduler. + If not specified, the pod will be dispatched by default scheduler. + type: string + schedulingGates: + description: |- + SchedulingGates is an opaque list of values that if specified will block scheduling the pod. + If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + scheduler will not attempt to schedule the pod. + + SchedulingGates can only be set at pod creation time, and be removed only afterwards. + items: + description: PodSchedulingGate is associated to a Pod to + guard its scheduling. + properties: + name: + description: |- + Name of the scheduling gate. + Each scheduling gate must have a unique name field. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + securityContext: + description: |- + SecurityContext holds pod-level security attributes and common container settings. + Optional: Defaults to empty. See type description for default values of each field. + properties: + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxChangePolicy: + description: |- + seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. + It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. + Valid values are "MountOption" and "Recursive". + + "Recursive" means relabeling of all files on all Pod volumes by the container runtime. + This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. + + "MountOption" mounts all eligible Pod volumes with `-o context` mount option. + This requires all Pods that share the same volume to use the same SELinux label. + It is not possible to share the same volume among privileged and unprivileged Pods. + Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes + whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their + CSIDriver instance. Other volumes are always re-labelled recursively. + "MountOption" value is allowed only when SELinuxMount feature gate is enabled. + + If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. + If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes + and "Recursive" for all other volumes. + + This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers. + + All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. + Note that this field cannot be set when spec.os.name is windows. + type: string + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in + addition to the container's primary GID and fsGroup (if specified). If + the SupplementalGroupsPolicy feature is enabled, the + supplementalGroupsPolicy field determines whether these are in addition + to or instead of any group memberships defined in the container image. + If unspecified, no additional groups are added, though group memberships + defined in the container image may still be used, depending on the + supplementalGroupsPolicy field. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + description: |- + Defines how supplemental groups of the first container processes are calculated. + Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + and the container runtime must implement support for this feature. + Note that this field cannot be set when spec.os.name is windows. + type: string + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be + set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: |- + DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. + Deprecated: Use serviceAccountName instead. + type: string + serviceAccountName: + description: |- + ServiceAccountName is the name of the ServiceAccount to use to run this pod. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + type: string + setHostnameAsFQDN: + description: |- + If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). + In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). + In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. + If a pod does not have FQDN, this has no effect. + Default to false. + type: boolean + shareProcessNamespace: + description: |- + Share a single process namespace between all of the containers in a pod. + When this is set containers will be able to view and signal processes from other containers + in the same pod, and the first process in each container will not be assigned PID 1. + HostPID and ShareProcessNamespace cannot both be set. + Optional: Default to false. + type: boolean + subdomain: + description: |- + If specified, the fully qualified Pod hostname will be "...svc.". + If not specified, the pod will not have a domainname at all. + type: string + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + If this value is nil, the default grace period will be used instead. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: If specified, the pod's tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of pods ought to spread across topology + domains. Scheduler will schedule pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + description: |- + List of volumes that can be mounted by containers belonging to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk + in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in + the blob storage + type: string + fsType: + default: ext4 + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage account Managed: azure + managed data disk (only in managed availability + set). defaults to shared' + type: string + readOnly: + default: false + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default + is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers. + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about + the pod that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing the + pod field + properties: + fieldRef: + description: 'Required: Selects a field of + the pod: only annotations, labels, name, + namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path. + Must be utf-8 encoded. The first item of + the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource + that is attached to a kubelet's host machine and then + exposed to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target + worldwide names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. + properties: + driver: + description: driver is the name of the driver to + use for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the + specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + image: + description: |- + image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + The volume is resolved at pod startup depending on which PullPolicy value is provided: + + - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + + The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + The volume will be mounted read-only (ro) and non-executable files (noexec). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. + The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + default: default + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon + Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources + secrets, configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume + root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the + configMap data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about + the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name, namespace and uid + are supported.' + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file + to be created. Must not be absolute + or contain the ''..'' path. Must + be utf-8 encoded. The first item + of the relative path must not + start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the + secret data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + default: /etc/ceph/keyring + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. + properties: + fsType: + default: xfs + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the + ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the + ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL + communication with Gateway, default false + type: boolean + storageMode: + default: ThinProvisioned + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage + Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether the + Secret or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy + Based Management (SPBM) profile ID associated + with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + required: + - containers + type: object + type: object + type: + default: rw + description: 'Type of service to forward traffic to. Default: `rw`.' + enum: + - rw + - ro + - r + type: string + required: + - cluster + - pgbouncer + type: object + status: + description: |- + Most recently observed status of the Pooler. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + instances: + description: The number of pods trying to be scheduled + format: int32 + type: integer + secrets: + description: The resource version of the config object + properties: + clientCA: + description: The client CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + pgBouncerSecrets: + description: The version of the secrets used by PgBouncer + properties: + authQuery: + description: The auth query secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + serverCA: + description: The server CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + serverTLS: + description: The server TLS secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: publications.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Publication + listKind: PublicationList + plural: publications + singular: publication + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Publication is the Schema for the publications API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PublicationSpec defines the desired state of Publication + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "publisher" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "publisher" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + name: + description: The name of the publication inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Publication parameters part of the `WITH` clause as expected by + PostgreSQL `CREATE PUBLICATION` command + type: object + publicationReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this publication + enum: + - delete + - retain + type: string + target: + description: Target of the publication as expected by PostgreSQL `CREATE + PUBLICATION` command + properties: + allTables: + description: |- + Marks the publication as one that replicates changes for all tables + in the database, including tables created in the future. + Corresponding to `FOR ALL TABLES` in PostgreSQL. + type: boolean + x-kubernetes-validations: + - message: allTables is immutable + rule: self == oldSelf + objects: + description: Just the following schema objects + items: + description: PublicationTargetObject is an object to publish + properties: + table: + description: |- + Specifies a list of tables to add to the publication. Corresponding + to `FOR TABLE` in PostgreSQL. + properties: + columns: + description: The columns to publish + items: + type: string + type: array + name: + description: The table name + type: string + only: + description: Whether to limit to the table only or include + all its descendants + type: boolean + schema: + description: The schema name + type: string + required: + - name + type: object + tablesInSchema: + description: |- + Marks the publication as one that replicates changes for all tables + in the specified list of schemas, including tables created in the + future. Corresponding to `FOR TABLES IN SCHEMA` in PostgreSQL. + type: string + type: object + x-kubernetes-validations: + - message: tablesInSchema and table are mutually exclusive + rule: (has(self.tablesInSchema) && !has(self.table)) || (!has(self.tablesInSchema) + && has(self.table)) + maxItems: 100000 + type: array + x-kubernetes-validations: + - message: specifying a column list when the publication also + publishes tablesInSchema is not supported + rule: '!(self.exists(o, has(o.table) && has(o.table.columns)) + && self.exists(o, has(o.tablesInSchema)))' + type: object + x-kubernetes-validations: + - message: allTables and objects are mutually exclusive + rule: (has(self.allTables) && !has(self.objects)) || (!has(self.allTables) + && has(self.objects)) + required: + - cluster + - dbname + - name + - target + type: object + status: + description: PublicationStatus defines the observed state of Publication + properties: + applied: + description: Applied is true if the publication was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: scheduledbackups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ScheduledBackup + listKind: ScheduledBackupList + plural: scheduledbackups + singular: scheduledbackup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .status.lastScheduleTime + name: Last Backup + type: date + name: v1 + schema: + openAPIV3Schema: + description: ScheduledBackup is the Schema for the scheduledbackups API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ScheduledBackup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + backupOwnerReference: + default: none + description: |- + Indicates which ownerReference should be put inside the created backup resources.
+ - none: no owner reference for created backup objects (same behavior as before the field was introduced)
+ - self: sets the Scheduled backup object as owner of the backup
+ - cluster: set the cluster as owner of the backup
+ enum: + - none + - self + - cluster + type: string + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + immediate: + description: If the first backup has to be immediately start after + creation or not + type: boolean + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + schedule: + description: |- + The schedule does not follow the same format used in Kubernetes CronJobs + as it includes an additional seconds specifier, + see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format + type: string + suspend: + description: If this backup is suspended or not + type: boolean + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + - schedule + type: object + status: + description: |- + Most recently observed status of the ScheduledBackup. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + lastCheckTime: + description: The latest time the schedule + format: date-time + type: string + lastScheduleTime: + description: Information when was the last time that backup was successfully + scheduled. + format: date-time + type: string + nextScheduleTime: + description: Next time we will run a backup + format: date-time + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: subscriptions.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Subscription + listKind: SubscriptionList + plural: subscriptions + singular: subscription + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Subscription is the Schema for the subscriptions API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SubscriptionSpec defines the desired state of Subscription + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "subscriber" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "subscriber" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + externalClusterName: + description: The name of the external cluster with the publication + ("publisher") + type: string + name: + description: The name of the subscription inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Subscription parameters included in the `WITH` clause of the PostgreSQL + `CREATE SUBSCRIPTION` command. Most parameters cannot be changed + after the subscription is created and will be ignored if modified + later, except for a limited set documented at: + https://www.postgresql.org/docs/current/sql-altersubscription.html#SQL-ALTERSUBSCRIPTION-PARAMS-SET + type: object + publicationDBName: + description: |- + The name of the database containing the publication on the external + cluster. Defaults to the one in the external cluster definition. + type: string + publicationName: + description: |- + The name of the publication inside the PostgreSQL database in the + "publisher" + type: string + subscriptionReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this subscription + enum: + - delete + - retain + type: string + required: + - cluster + - dbname + - externalClusterName + - name + - publicationName + type: object + status: + description: SubscriptionStatus defines the observed state of Subscription + properties: + applied: + description: Applied is true if the subscription was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cnpg-manager +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps/status + - secrets/status + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - pods + - pods/exec + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - pods/status + verbs: + - get +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - get + - patch +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +- apiGroups: + - monitoring.coreos.com + resources: + - podmonitors + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups + - clusters + - databases + - poolers + - publications + - scheduledbackups + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups/status + - databases/status + - publications/status + - scheduledbackups/status + - subscriptions/status + verbs: + - get + - patch + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusterimagecatalogs + - imagecatalogs + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/finalizers + - poolers/finalizers + verbs: + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/status + - failoverquorums/status + - poolers/status + verbs: + - get + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - failoverquorums + verbs: + - create + - delete + - get + - list + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - create + - get + - list + - patch + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cnpg-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cnpg-manager +subjects: +- kind: ServiceAccount + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: v1 +data: + queries: | + backends: + query: | + SELECT sa.datname + , sa.usename + , sa.application_name + , states.state + , COALESCE(sa.count, 0) AS total + , COALESCE(sa.max_tx_secs, 0) AS max_tx_duration_seconds + FROM ( VALUES ('active') + , ('idle') + , ('idle in transaction') + , ('idle in transaction (aborted)') + , ('fastpath function call') + , ('disabled') + ) AS states(state) + LEFT JOIN ( + SELECT datname + , state + , usename + , COALESCE(application_name, '') AS application_name + , COUNT(*) + , COALESCE(EXTRACT (EPOCH FROM (max(now() - xact_start))), 0) AS max_tx_secs + FROM pg_catalog.pg_stat_activity + GROUP BY datname, state, usename, application_name + ) sa ON states.state = sa.state + WHERE sa.usename IS NOT NULL + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - usename: + usage: "LABEL" + description: "Name of the user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - state: + usage: "LABEL" + description: "State of the backend" + - total: + usage: "GAUGE" + description: "Number of backends" + - max_tx_duration_seconds: + usage: "GAUGE" + description: "Maximum duration of a transaction in seconds" + + backends_waiting: + query: | + SELECT count(*) AS total + FROM pg_catalog.pg_locks blocked_locks + JOIN pg_catalog.pg_locks blocking_locks + ON blocking_locks.locktype = blocked_locks.locktype + AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database + AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation + AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page + AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple + AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid + AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid + AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid + AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid + AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid + AND blocking_locks.pid != blocked_locks.pid + JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid + WHERE NOT blocked_locks.granted + metrics: + - total: + usage: "GAUGE" + description: "Total number of backends that are currently waiting on other queries" + + pg_database: + query: | + SELECT datname + , pg_catalog.pg_database_size(datname) AS size_bytes + , pg_catalog.age(datfrozenxid) AS xid_age + , pg_catalog.mxid_age(datminmxid) AS mxid_age + FROM pg_catalog.pg_database + WHERE datallowconn + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - size_bytes: + usage: "GAUGE" + description: "Disk space used by the database" + - xid_age: + usage: "GAUGE" + description: "Number of transactions from the frozen XID to the current one" + - mxid_age: + usage: "GAUGE" + description: "Number of multiple transactions (Multixact) from the frozen XID to the current one" + + pg_postmaster: + query: | + SELECT EXTRACT(EPOCH FROM pg_postmaster_start_time) AS start_time + FROM pg_catalog.pg_postmaster_start_time() + metrics: + - start_time: + usage: "GAUGE" + description: "Time at which postgres started (based on epoch)" + + pg_replication: + query: "SELECT CASE WHEN ( + NOT pg_catalog.pg_is_in_recovery() + OR pg_catalog.pg_last_wal_receive_lsn() = pg_catalog.pg_last_wal_replay_lsn()) + THEN 0 + ELSE GREATEST (0, + EXTRACT(EPOCH FROM (now() - pg_catalog.pg_last_xact_replay_timestamp()))) + END AS lag, + pg_catalog.pg_is_in_recovery() AS in_recovery, + EXISTS (TABLE pg_stat_wal_receiver) AS is_wal_receiver_up, + (SELECT count(*) FROM pg_catalog.pg_stat_replication) AS streaming_replicas" + metrics: + - lag: + usage: "GAUGE" + description: "Replication lag behind primary in seconds" + - in_recovery: + usage: "GAUGE" + description: "Whether the instance is in recovery" + - is_wal_receiver_up: + usage: "GAUGE" + description: "Whether the instance wal_receiver is up" + - streaming_replicas: + usage: "GAUGE" + description: "Number of streaming replicas connected to the instance" + + pg_replication_slots: + query: | + SELECT slot_name, + slot_type, + database, + active, + (CASE pg_catalog.pg_is_in_recovery() + WHEN TRUE THEN pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_last_wal_receive_lsn(), restart_lsn) + ELSE pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), restart_lsn) + END) as pg_wal_lsn_diff + FROM pg_catalog.pg_replication_slots + WHERE NOT temporary + metrics: + - slot_name: + usage: "LABEL" + description: "Name of the replication slot" + - slot_type: + usage: "LABEL" + description: "Type of the replication slot" + - database: + usage: "LABEL" + description: "Name of the database" + - active: + usage: "GAUGE" + description: "Flag indicating whether the slot is active" + - pg_wal_lsn_diff: + usage: "GAUGE" + description: "Replication lag in bytes" + + pg_stat_archiver: + query: | + SELECT archived_count + , failed_count + , COALESCE(EXTRACT(EPOCH FROM (now() - last_archived_time)), -1) AS seconds_since_last_archival + , COALESCE(EXTRACT(EPOCH FROM (now() - last_failed_time)), -1) AS seconds_since_last_failure + , COALESCE(EXTRACT(EPOCH FROM last_archived_time), -1) AS last_archived_time + , COALESCE(EXTRACT(EPOCH FROM last_failed_time), -1) AS last_failed_time + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_archived_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_archived_wal_start_lsn + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_failed_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_failed_wal_start_lsn + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_archiver + metrics: + - archived_count: + usage: "COUNTER" + description: "Number of WAL files that have been successfully archived" + - failed_count: + usage: "COUNTER" + description: "Number of failed attempts for archiving WAL files" + - seconds_since_last_archival: + usage: "GAUGE" + description: "Seconds since the last successful archival operation" + - seconds_since_last_failure: + usage: "GAUGE" + description: "Seconds since the last failed archival operation" + - last_archived_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving succeeded" + - last_failed_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving failed" + - last_archived_wal_start_lsn: + usage: "GAUGE" + description: "Archived WAL start LSN" + - last_failed_wal_start_lsn: + usage: "GAUGE" + description: "Last failed WAL LSN" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_bgwriter: + runonserver: "<17.0.0" + query: | + SELECT checkpoints_timed + , checkpoints_req + , checkpoint_write_time + , checkpoint_sync_time + , buffers_checkpoint + , buffers_clean + , maxwritten_clean + , buffers_backend + , buffers_backend_fsync + , buffers_alloc + FROM pg_catalog.pg_stat_bgwriter + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - checkpoint_write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds" + - checkpoint_sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds" + - buffers_checkpoint: + usage: "COUNTER" + description: "Number of buffers written during checkpoints" + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_backend: + usage: "COUNTER" + description: "Number of buffers written directly by a backend" + - buffers_backend_fsync: + usage: "COUNTER" + description: "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + + pg_stat_bgwriter_17: + runonserver: ">=17.0.0" + name: pg_stat_bgwriter + query: | + SELECT buffers_clean + , maxwritten_clean + , buffers_alloc + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_bgwriter + metrics: + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_checkpointer: + runonserver: ">=17.0.0" + query: | + SELECT num_timed AS checkpoints_timed + , num_requested AS checkpoints_req + , restartpoints_timed + , restartpoints_req + , restartpoints_done + , write_time + , sync_time + , buffers_written + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_checkpointer + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - restartpoints_timed: + usage: "COUNTER" + description: "Number of scheduled restartpoints due to timeout or after a failed attempt to perform it" + - restartpoints_req: + usage: "COUNTER" + description: "Number of requested restartpoints that have been performed" + - restartpoints_done: + usage: "COUNTER" + description: "Number of restartpoints that have been performed" + - write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are written to disk, in milliseconds" + - sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are synchronized to disk, in milliseconds" + - buffers_written: + usage: "COUNTER" + description: "Number of buffers written during checkpoints and restartpoints" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_database: + query: | + SELECT datname + , xact_commit + , xact_rollback + , blks_read + , blks_hit + , tup_returned + , tup_fetched + , tup_inserted + , tup_updated + , tup_deleted + , conflicts + , temp_files + , temp_bytes + , deadlocks + , blk_read_time + , blk_write_time + FROM pg_catalog.pg_stat_database + metrics: + - datname: + usage: "LABEL" + description: "Name of this database" + - xact_commit: + usage: "COUNTER" + description: "Number of transactions in this database that have been committed" + - xact_rollback: + usage: "COUNTER" + description: "Number of transactions in this database that have been rolled back" + - blks_read: + usage: "COUNTER" + description: "Number of disk blocks read in this database" + - blks_hit: + usage: "COUNTER" + description: "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)" + - tup_returned: + usage: "COUNTER" + description: "Number of rows returned by queries in this database" + - tup_fetched: + usage: "COUNTER" + description: "Number of rows fetched by queries in this database" + - tup_inserted: + usage: "COUNTER" + description: "Number of rows inserted by queries in this database" + - tup_updated: + usage: "COUNTER" + description: "Number of rows updated by queries in this database" + - tup_deleted: + usage: "COUNTER" + description: "Number of rows deleted by queries in this database" + - conflicts: + usage: "COUNTER" + description: "Number of queries canceled due to conflicts with recovery in this database" + - temp_files: + usage: "COUNTER" + description: "Number of temporary files created by queries in this database" + - temp_bytes: + usage: "COUNTER" + description: "Total amount of data written to temporary files by queries in this database" + - deadlocks: + usage: "COUNTER" + description: "Number of deadlocks detected in this database" + - blk_read_time: + usage: "COUNTER" + description: "Time spent reading data file blocks by backends in this database, in milliseconds" + - blk_write_time: + usage: "COUNTER" + description: "Time spent writing data file blocks by backends in this database, in milliseconds" + + pg_stat_replication: + primary: true + query: | + SELECT usename + , COALESCE(application_name, '') AS application_name + , COALESCE(client_addr::text, '') AS client_addr + , COALESCE(client_port::text, '') AS client_port + , EXTRACT(EPOCH FROM backend_start) AS backend_start + , COALESCE(pg_catalog.age(backend_xmin), 0) AS backend_xmin_age + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), sent_lsn) AS sent_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), write_lsn) AS write_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), flush_lsn) AS flush_diff_bytes + , COALESCE(pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), replay_lsn),0) AS replay_diff_bytes + , COALESCE((EXTRACT(EPOCH FROM write_lag)),0)::float AS write_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM flush_lag)),0)::float AS flush_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM replay_lag)),0)::float AS replay_lag_seconds + FROM pg_catalog.pg_stat_replication + metrics: + - usename: + usage: "LABEL" + description: "Name of the replication user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - client_addr: + usage: "LABEL" + description: "Client IP address" + - client_port: + usage: "LABEL" + description: "Client TCP port" + - backend_start: + usage: "COUNTER" + description: "Time when this process was started" + - backend_xmin_age: + usage: "COUNTER" + description: "The age of this standby's xmin horizon" + - sent_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location sent on this connection" + - write_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location written to disk by this standby server" + - flush_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location flushed to disk by this standby server" + - replay_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location replayed into the database on this standby server" + - write_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it" + - flush_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it" + - replay_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it" + + pg_settings: + query: | + SELECT name, + CASE setting WHEN 'on' THEN '1' WHEN 'off' THEN '0' ELSE setting END AS setting + FROM pg_catalog.pg_settings + WHERE vartype IN ('integer', 'real', 'bool') + ORDER BY 1 + metrics: + - name: + usage: "LABEL" + description: "Name of the setting" + - setting: + usage: "GAUGE" + description: "Setting value" + + pg_extensions: + query: | + SELECT + current_database() as datname, + name as extname, + default_version, + installed_version, + CASE + WHEN default_version = installed_version THEN 0 + ELSE 1 + END AS update_available + FROM pg_catalog.pg_available_extensions + WHERE installed_version IS NOT NULL + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - extname: + usage: "LABEL" + description: "Extension name" + - default_version: + usage: "LABEL" + description: "Default version" + - installed_version: + usage: "LABEL" + description: "Installed version" + - update_available: + usage: "GAUGE" + description: "An update is available" + target_databases: + - '*' +kind: ConfigMap +metadata: + labels: + cnpg.io/reload: "" + name: cnpg-default-monitoring + namespace: cnpg-system +--- +apiVersion: v1 +kind: Service +metadata: + name: cnpg-webhook-service + namespace: cnpg-system +spec: + ports: + - port: 443 + targetPort: 9443 + selector: + app.kubernetes.io/name: cloudnative-pg +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-controller-manager + namespace: cnpg-system +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: cloudnative-pg + template: + metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + spec: + containers: + - args: + - controller + - --leader-elect + - --max-concurrent-reconciles=10 + - --config-map-name=cnpg-controller-manager-config + - --secret-name=cnpg-controller-manager-config + - --webhook-port=9443 + command: + - /manager + env: + - name: OPERATOR_IMAGE_NAME + value: ghcr.io/cloudnative-pg/cloudnative-pg:1.27.0-rc1 + - name: OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MONITORING_QUERIES_CONFIGMAP + value: cnpg-default-monitoring + image: ghcr.io/cloudnative-pg/cloudnative-pg:1.27.0-rc1 + imagePullPolicy: Always + livenessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + name: manager + ports: + - containerPort: 8080 + name: metrics + protocol: TCP + - containerPort: 9443 + name: webhook-server + protocol: TCP + readinessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + resources: + limits: + cpu: 100m + memory: 200Mi + requests: + cpu: 100m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 10001 + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + startupProbe: + failureThreshold: 6 + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + periodSeconds: 5 + volumeMounts: + - mountPath: /controller + name: scratch-data + - mountPath: /run/secrets/cnpg.io/webhook + name: webhook-certificates + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: cnpg-manager + terminationGracePeriodSeconds: 10 + volumes: + - emptyDir: {} + name: scratch-data + - name: webhook-certificates + secret: + defaultMode: 420 + optional: true + secretName: cnpg-webhook-cert +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: cnpg-mutating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: mbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: mcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-database + failurePolicy: Fail + name: mdatabase.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - databases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: mscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: cnpg-validating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: vbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: vcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-database + failurePolicy: Fail + name: vdatabase.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - databases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-pooler + failurePolicy: Fail + name: vpooler.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - poolers + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: vscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None From 5c3765c59c3a80edf2f879ff28f78d5773b507bb Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Tue, 29 Jul 2025 19:05:24 +0200 Subject: [PATCH 751/836] fix: pointer slice loop (#8140) Fix the loop when converting item slices to pointer slices. The previous version unnecessarily allocates memory, and with an older compiler, it does not work as expected. Signed-off-by: Marco Nenciarini Signed-off-by: Armando Ruocco Co-authored-by: Armando Ruocco --- api/v1/generic_funcs.go | 4 +- api/v1/generic_funcs_test.go | 38 +++++++++++++++++++ internal/controller/finalizers_delete.go | 4 +- internal/controller/finalizers_delete_test.go | 13 +++++++ 4 files changed, 55 insertions(+), 4 deletions(-) create mode 100644 api/v1/generic_funcs_test.go diff --git a/api/v1/generic_funcs.go b/api/v1/generic_funcs.go index da622c8e7b..7397a6f4e2 100644 --- a/api/v1/generic_funcs.go +++ b/api/v1/generic_funcs.go @@ -60,8 +60,8 @@ func ensureManagedResourceExclusivity[T managedResourceComparer](t1 T, list []T) // toSliceWithPointers converts a slice of items to a slice of pointers to the items func toSliceWithPointers[T any](items []T) []*T { result := make([]*T, len(items)) - for i, item := range items { - result[i] = &item + for i := range items { + result[i] = &items[i] } return result } diff --git a/api/v1/generic_funcs_test.go b/api/v1/generic_funcs_test.go new file mode 100644 index 0000000000..51429fd030 --- /dev/null +++ b/api/v1/generic_funcs_test.go @@ -0,0 +1,38 @@ +/* +Copyright © contributors to CloudNativePG, established as +CloudNativePG a Series of LF Projects, LLC. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package v1 + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +type testStruct struct{ Val int } + +var _ = Describe("toSliceWithPointers", func() { + It("should return pointers to the original slice elements", func() { + items := []testStruct{{1}, {2}, {3}} + pointers := toSliceWithPointers(items) + Expect(pointers).To(HaveLen(len(items))) + for i := range items { + Expect(pointers[i]).To(BeIdenticalTo(&items[i])) + } + }) +}) diff --git a/internal/controller/finalizers_delete.go b/internal/controller/finalizers_delete.go index a791bd58b2..9d0e1467bb 100644 --- a/internal/controller/finalizers_delete.go +++ b/internal/controller/finalizers_delete.go @@ -94,8 +94,8 @@ type clusterOwnedResourceWithStatus interface { func toSliceWithPointers[T any](items []T) []*T { result := make([]*T, len(items)) - for i, item := range items { - result[i] = &item + for i := range items { + result[i] = &items[i] } return result } diff --git a/internal/controller/finalizers_delete_test.go b/internal/controller/finalizers_delete_test.go index 5f0e1a55b4..40b9793b5f 100644 --- a/internal/controller/finalizers_delete_test.go +++ b/internal/controller/finalizers_delete_test.go @@ -326,3 +326,16 @@ var _ = Describe("Test cleanup of owned objects on cluster deletion", func() { Expect(subscription.Status.Message).ToNot(ContainSubstring("not reconciled")) }) }) + +type testStruct struct{ Val int } + +var _ = Describe("toSliceWithPointers", func() { + It("should return pointers to the original slice elements", func() { + items := []testStruct{{1}, {2}, {3}} + pointers := toSliceWithPointers(items) + Expect(pointers).To(HaveLen(len(items))) + for i := range items { + Expect(pointers[i]).To(BeIdenticalTo(&items[i])) + } + }) +}) From 72df18301ce2a6577e80a49a7cec21ece488c29e Mon Sep 17 00:00:00 2001 From: Peggie Date: Wed, 30 Jul 2025 07:54:51 +0200 Subject: [PATCH 752/836] test: Updated Postgres versions used in E2E tests (#8156) Update the Postgres versions used in E2E tests Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: postgres-versions-updater --- .github/pg_versions.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/pg_versions.json b/.github/pg_versions.json index 77dee9750e..42e376dd61 100644 --- a/.github/pg_versions.json +++ b/.github/pg_versions.json @@ -1,7 +1,7 @@ { "18": [ "18beta2", - "18beta2-1" + "18beta2-2" ], "17": [ "17.5", From b7e9f07cf6fa2181bc5c9b8e82d4b37b27ee92ba Mon Sep 17 00:00:00 2001 From: Infinoid Date: Wed, 30 Jul 2025 03:15:55 -0400 Subject: [PATCH 753/836] fix(plugin): cosmetic error in `cnpg status` output (#8146) The table columns don't line up in the "cnpg status" output when a pod status lookup failed. Closes: #8145 Signed-off-by: Mark Glines --- internal/cmd/plugin/status/status.go | 1 - 1 file changed, 1 deletion(-) diff --git a/internal/cmd/plugin/status/status.go b/internal/cmd/plugin/status/status.go index 463f34d299..43a097c0e0 100644 --- a/internal/cmd/plugin/status/status.go +++ b/internal/cmd/plugin/status/status.go @@ -759,7 +759,6 @@ func (fullStatus *PostgresqlStatus) printInstancesStatus() { instance.Pod.Name, "-", "-", - "-", apierrs.ReasonForError(instance.Error), instance.Pod.Status.QOSClass, "-", From 70b78a8ce9949eae6733e6c100f970c4708b1d2a Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 31 Jul 2025 10:25:46 +0200 Subject: [PATCH 754/836] fix(deps): update module sigs.k8s.io/yaml to v1.6.0 (main) (#8149) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 9b0eefa646..ef3c3fdaf0 100644 --- a/go.mod +++ b/go.mod @@ -45,7 +45,7 @@ require ( k8s.io/client-go v0.33.3 k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 sigs.k8s.io/controller-runtime v0.21.0 - sigs.k8s.io/yaml v1.5.0 + sigs.k8s.io/yaml v1.6.0 ) require ( diff --git a/go.sum b/go.sum index 1ea508d7a8..1bd76bb7ad 100644 --- a/go.sum +++ b/go.sum @@ -316,5 +316,5 @@ sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxO sigs.k8s.io/structured-merge-diff/v4 v4.7.0 h1:qPeWmscJcXP0snki5IYF79Z8xrl8ETFxgMd7wez1XkI= sigs.k8s.io/structured-merge-diff/v4 v4.7.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= -sigs.k8s.io/yaml v1.5.0 h1:M10b2U7aEUY6hRtU870n2VTPgR5RZiL/I6Lcc2F4NUQ= -sigs.k8s.io/yaml v1.5.0/go.mod h1:wZs27Rbxoai4C0f8/9urLZtZtF3avA3gKvGyPdDqTO4= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= From 4e0b294eeda2ec890357d20f8f837a3fb241efd6 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Thu, 31 Jul 2025 14:21:20 +0200 Subject: [PATCH 755/836] docs(development_environment): add helm requirement (#8171) Signed-off-by: Armando Ruocco --- contribute/development_environment/README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/contribute/development_environment/README.md b/contribute/development_environment/README.md index a5d613cf03..48efe55395 100644 --- a/contribute/development_environment/README.md +++ b/contribute/development_environment/README.md @@ -47,6 +47,7 @@ environment variable: - [golangci-lint](https://github.com/golangci/golangci-lint) - [goreleaser](https://goreleaser.com/) - [Operator SDK CLI](https://sdk.operatorframework.io/) +- [Helm](https://helm.sh/) In addition, check that the following packages are installed in your system: @@ -83,7 +84,8 @@ components in your Mac OS X system: brew install go \ kind \ golangci/tap/golangci-lint \ - goreleaser + goreleaser \ + helm ``` Please note that bash v5.0+ is required, this can be installed with: From 6d5330b90fb4372016c91f050f226f3a21b5a2af Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 31 Jul 2025 14:51:20 +0200 Subject: [PATCH 756/836] chore(deps): update dependency vmware-tanzu/velero to v1.16.2 (main) (#8187) --- .github/workflows/continuous-delivery.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index 77885bed09..f3a2425d3e 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -1325,7 +1325,7 @@ jobs: uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3 env: # renovate: datasource=github-releases depName=vmware-tanzu/velero - VELERO_VERSION: "v1.16.1" + VELERO_VERSION: "v1.16.2" # renovate: datasource=docker depName=velero/velero-plugin-for-aws VELERO_AWS_PLUGIN_VERSION: "v1.12.1" with: From d53d5cad9d0e85a33bb72e48194030f2ea43d149 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 31 Jul 2025 15:55:03 +0200 Subject: [PATCH 757/836] chore(deps): update github/codeql-action digest to 51f7732 (main) (#8186) --- .github/workflows/codeql-analysis.yml | 4 ++-- .github/workflows/continuous-integration.yml | 2 +- .github/workflows/ossf_scorecard.yml | 2 +- .github/workflows/snyk.yml | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 033b61256d..ea3f36a212 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -76,7 +76,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3 + uses: github/codeql-action/init@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3 with: languages: "go" build-mode: manual @@ -93,6 +93,6 @@ jobs: make - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3 + uses: github/codeql-action/analyze@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3 with: category: "/language:go" diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index f6ea6d3462..8a52bc9617 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -617,7 +617,7 @@ jobs: args: --severity-threshold=high --file=Dockerfile - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3 + uses: github/codeql-action/upload-sarif@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3 if: | !github.event.repository.fork && !github.event.pull_request.head.repo.fork diff --git a/.github/workflows/ossf_scorecard.yml b/.github/workflows/ossf_scorecard.yml index ad01f3b2c4..e2e62844a4 100644 --- a/.github/workflows/ossf_scorecard.yml +++ b/.github/workflows/ossf_scorecard.yml @@ -74,6 +74,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard (optional). # Commenting out will disable upload of results to your repo's Code Scanning dashboard - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3 + uses: github/codeql-action/upload-sarif@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3 with: sarif_file: results.sarif diff --git a/.github/workflows/snyk.yml b/.github/workflows/snyk.yml index f55f5611bc..ee436dc919 100644 --- a/.github/workflows/snyk.yml +++ b/.github/workflows/snyk.yml @@ -30,7 +30,7 @@ jobs: args: --sarif-file-output=snyk-static.sarif - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3 + uses: github/codeql-action/upload-sarif@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3 with: sarif_file: snyk-static.sarif @@ -43,6 +43,6 @@ jobs: args: --sarif-file-output=snyk-test.sarif - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3 + uses: github/codeql-action/upload-sarif@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3 with: sarif_file: snyk-test.sarif From 391af9c2995ac456f11b1c01adc57766fd9b4e78 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 31 Jul 2025 22:36:20 +0200 Subject: [PATCH 758/836] chore(deps): update velero/velero-plugin-for-aws docker tag to v1.12.2 (main) (#8188) --- .github/workflows/continuous-delivery.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index f3a2425d3e..4c5658e89b 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -1327,7 +1327,7 @@ jobs: # renovate: datasource=github-releases depName=vmware-tanzu/velero VELERO_VERSION: "v1.16.2" # renovate: datasource=docker depName=velero/velero-plugin-for-aws - VELERO_AWS_PLUGIN_VERSION: "v1.12.1" + VELERO_AWS_PLUGIN_VERSION: "v1.12.2" with: timeout_minutes: 10 max_attempts: 3 From 11f9be2fc5e021dd7306a82f44c2b55015917371 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 1 Aug 2025 10:18:45 +0200 Subject: [PATCH 759/836] chore(deps): update dependency rook/rook to v1.17.7 (main) (#8201) --- .github/workflows/continuous-delivery.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index 4c5658e89b..2676bd5f9e 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -42,7 +42,7 @@ env: # renovate: datasource=github-tags depName=kubernetes-sigs/kind versioning=semver KIND_VERSION: "v0.29.0" # renovate: datasource=github-releases depName=rook/rook versioning=loose - ROOK_VERSION: "v1.17.6" + ROOK_VERSION: "v1.17.7" EXTERNAL_SNAPSHOTTER_VERSION: "v8.3.0" OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing" BUILD_PUSH_PROVENANCE: "" From beea2d04696436cdfbb64fc4726622fb4bdc7572 Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Fri, 1 Aug 2025 12:03:02 +0200 Subject: [PATCH 760/836] docs: list 2025-3 term LFX Mentorships (#8212) Closes #8211 Signed-off-by: Gabriele Bartolini Signed-off-by: Jonathan Gonzalez V. Co-authored-by: Jonathan Gonzalez V. --- contribute/lfx-mentorship-program.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/contribute/lfx-mentorship-program.md b/contribute/lfx-mentorship-program.md index 30c9d06edb..736bd054d3 100644 --- a/contribute/lfx-mentorship-program.md +++ b/contribute/lfx-mentorship-program.md @@ -25,6 +25,9 @@ the areas outlined below. | Year | Term | Project | Mentee | | ---- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------ | ---------------------------------------- | +| 2025 | 3 (Sep–Nov) | [Chaos Testing](https://mentorship.lfx.linuxfoundation.org/project/0858ce07-0c90-47fa-a1a0-95c6762f00ff) | - | +| 2025 | 3 (Sep–Nov) | [Rebuild documentation for multi-version support with Docusaurus](https://mentorship.lfx.linuxfoundation.org/project/86a647c1-88c7-474f-b093-6abb58197083) | - | +| 2025 | 3 (Sep–Nov) | [Refresh cnpg-i-hello-world to align with current CNPG-I](https://mentorship.lfx.linuxfoundation.org/project/cabc7391-4956-42b2-b91c-d261816b7289) | - | | 2025 | 2 (Jun–Aug) | [Declarative Management of PostgreSQL FDWs](https://mentorship.lfx.linuxfoundation.org/project/53fa853e-b5fa-4d68-be71-f005c75aea89) | [Ying Zhu](https://github.com/EdwinaZhu) | --- From dabd1108b9a3769d473302e74aba24953d60217c Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Fri, 1 Aug 2025 12:07:56 +0200 Subject: [PATCH 761/836] chore: update new path of the EKS versioning (#8208) Per commit https://github.com/awsdocs/amazon-eks-user-guide/commit/671de34ea65488fcda8d36d2131fdc37501f3893 the documentation we were using to get the EKS versions was moved to a new directory. Signed-off-by: Jonathan Gonzalez V. --- .github/workflows/k8s-versions-check.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/k8s-versions-check.yml b/.github/workflows/k8s-versions-check.yml index d45406ad5a..fe5c09c7f8 100644 --- a/.github/workflows/k8s-versions-check.yml +++ b/.github/workflows/k8s-versions-check.yml @@ -39,7 +39,7 @@ jobs: # There is no command to get EKS k8s versions, we have to parse the documentation name: Get updated EKS versions run: | - DOC_URL="https://raw.githubusercontent.com/awsdocs/amazon-eks-user-guide/mainline/latest/ug/clusters/kubernetes-versions-standard.adoc" + DOC_URL="https://raw.githubusercontent.com/awsdocs/amazon-eks-user-guide/mainline/latest/ug/versioning/kubernetes-versions-standard.adoc" curl --silent "${DOC_URL}" | sed -e 's/.*Kubernetes \([0-9].[0-9][0-9]\).*/\1/;/^[0-9]\./!d' | uniq | \ awk -vv=$MINIMAL_K8S '$0>=v {print $0}' | \ jq -Rn '[inputs]' | tee .github/eks_versions.json From e0f1a3e5257f8ec394b36fd1f9d5f2466113d3ed Mon Sep 17 00:00:00 2001 From: Peggie Date: Fri, 1 Aug 2025 12:20:08 +0200 Subject: [PATCH 762/836] feat: Public Cloud K8S versions update (#7702) Update the versions used to test the operator on public cloud providers Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: public-cloud-k8s-versions-check --- .github/aks_versions.json | 6 +++--- .github/eks_versions.json | 1 + .github/gke_versions.json | 1 + .github/kind_versions.json | 2 +- 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/.github/aks_versions.json b/.github/aks_versions.json index bea3c37cce..52919c25a3 100644 --- a/.github/aks_versions.json +++ b/.github/aks_versions.json @@ -1,6 +1,6 @@ [ - "1.33.0", - "1.32.4", - "1.31.8", + "1.33.2", + "1.32.6", + "1.31.9", "1.30.9" ] diff --git a/.github/eks_versions.json b/.github/eks_versions.json index 5c66ae5056..1d12057c20 100644 --- a/.github/eks_versions.json +++ b/.github/eks_versions.json @@ -1,4 +1,5 @@ [ + "1.33", "1.32", "1.31", "1.30" diff --git a/.github/gke_versions.json b/.github/gke_versions.json index 5c66ae5056..1d12057c20 100644 --- a/.github/gke_versions.json +++ b/.github/gke_versions.json @@ -1,4 +1,5 @@ [ + "1.33", "1.32", "1.31", "1.30" diff --git a/.github/kind_versions.json b/.github/kind_versions.json index 0f0983321d..2a05db31e3 100644 --- a/.github/kind_versions.json +++ b/.github/kind_versions.json @@ -1,5 +1,5 @@ [ - "v1.33.1", + "v1.33.2", "v1.32.5", "v1.31.9", "v1.30.13", From e45c8e7141b76962524333bb382c6fe0783869c5 Mon Sep 17 00:00:00 2001 From: Marco Nenciarini Date: Fri, 1 Aug 2025 13:50:37 +0200 Subject: [PATCH 763/836] ci: docker push with Docker Desktop must specify the platform (#7929) Closed #7930 Signed-off-by: Marco Nenciarini --- hack/setup-cluster.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh index 52e22b1144..fbec426386 100755 --- a/hack/setup-cluster.sh +++ b/hack/setup-cluster.sh @@ -64,9 +64,9 @@ case $ARCH in aarch64) ARCH="arm64" ;; esac -# If arm64 and user did not set it explicitly -if [ "${ARCH}" = "arm64" ] && [ "${DOCKER_DEFAULT_PLATFORM}" = "" ]; then - DOCKER_DEFAULT_PLATFORM=linux/arm64 +# If user did not set it explicitly +if [ "${DOCKER_DEFAULT_PLATFORM}" = "" ]; then + DOCKER_DEFAULT_PLATFORM="linux/${ARCH}" fi export DOCKER_DEFAULT_PLATFORM @@ -406,7 +406,7 @@ load_image_registry() { local image_local_name=${image/${registry_name}/127.0.0.1} docker tag "${image}" "${image_local_name}" - docker push -q "${image_local_name}" + docker push --platform "${DOCKER_DEFAULT_PLATFORM}" -q "${image_local_name}" } load_image() { From 939dae8ba2f2256b9c4eaecd36d5eb097fff3320 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sat, 2 Aug 2025 10:39:44 +0200 Subject: [PATCH 764/836] fix(deps): update all non-major go dependencies (main) (#8148) This PR contains the following updates: https://github.com/goreleaser/goreleaser `v2.11.0` -> `v2.11.2` https://github.com/prometheus/client_golang `v1.22.0` -> `v1.23.0` --- Makefile | 2 +- go.mod | 8 ++++---- go.sum | 16 ++++++++-------- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/Makefile b/Makefile index 91a8ff4485..27956bc249 100644 --- a/Makefile +++ b/Makefile @@ -57,7 +57,7 @@ KUSTOMIZE_VERSION ?= v5.6.0 CONTROLLER_TOOLS_VERSION ?= v0.18.0 GENREF_VERSION ?= 015aaac611407c4fe591bc8700d2c67b7521efca # renovate: datasource=go depName=github.com/goreleaser/goreleaser -GORELEASER_VERSION ?= v2.11.0 +GORELEASER_VERSION ?= v2.11.2 # renovate: datasource=docker depName=jonasbn/github-action-spellcheck versioning=docker SPELLCHECK_VERSION ?= 0.51.0 # renovate: datasource=docker depName=getwoke/woke versioning=docker diff --git a/go.mod b/go.mod index ef3c3fdaf0..3b393be1e3 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( github.com/onsi/ginkgo/v2 v2.23.4 github.com/onsi/gomega v1.38.0 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.84.0 - github.com/prometheus/client_golang v1.22.0 + github.com/prometheus/client_golang v1.23.0 github.com/robfig/cron v1.2.0 github.com/sethvargo/go-password v0.3.1 github.com/spf13/cobra v1.9.1 @@ -89,9 +89,9 @@ require ( github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.62.0 // indirect - github.com/prometheus/procfs v0.15.1 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.65.0 // indirect + github.com/prometheus/procfs v0.16.1 // indirect github.com/spf13/pflag v1.0.6 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xlab/treeprint v1.2.0 // indirect diff --git a/go.sum b/go.sum index 1bd76bb7ad..0de5c8850a 100644 --- a/go.sum +++ b/go.sum @@ -157,14 +157,14 @@ github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4 github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.84.0 h1:V/HLst0rSw4BZp8nIqhaTnnW4/EGxEoYbgjcDqzPJ5U= github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.84.0/go.mod h1:MruMqbSS9aYrKhBImrO9X9g52hwz3I0B+tcoeAwkmuM= -github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= -github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= -github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc= +github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= +github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= +github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= +github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= From 37578c78cd35b309ee4008783ebdbd06cd37b029 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sun, 3 Aug 2025 11:43:11 +0200 Subject: [PATCH 765/836] chore(deps): update dependency golangci/golangci-lint to v2.3.1 (main) (#8220) --- .github/workflows/continuous-integration.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 8a52bc9617..97d98f9023 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -21,7 +21,7 @@ env: # renovate: datasource=golang-version depName=golang versioning=loose GOLANG_VERSION: "1.24.5" # renovate: datasource=github-releases depName=golangci/golangci-lint versioning=loose - GOLANGCI_LINT_VERSION: "v2.3.0" + GOLANGCI_LINT_VERSION: "v2.3.1" KUBEBUILDER_VERSION: "2.3.1" # renovate: datasource=github-tags depName=kubernetes-sigs/kind versioning=semver KIND_VERSION: "v0.29.0" From 40eb97c769c957b61ab39accf940bab4c91a579f Mon Sep 17 00:00:00 2001 From: smiyc <36233521+smiyc@users.noreply.github.com> Date: Sun, 3 Aug 2025 18:40:14 +0200 Subject: [PATCH 766/836] docs: add a new k9s shortcut to view the logs in "pretty" format (#8217) This PR add's a new shortcut to the k9s plugins.yml file. SHIFT-L: will print the cluster logs but formatted using `kubectl cnpg logs pretty` Closes #8068 Signed-off-by: Daniel Chambre Signed-off-by: Daniel Chambre --- docs/src/samples/k9s/plugins.yml | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/docs/src/samples/k9s/plugins.yml b/docs/src/samples/k9s/plugins.yml index 04b6850e0c..b51ad07f01 100644 --- a/docs/src/samples/k9s/plugins.yml +++ b/docs/src/samples/k9s/plugins.yml @@ -6,6 +6,7 @@ # h View hibernate status # Shift-H Hibernate cluster (this retains the data, but deletes everything else - including the cluster) # l View cluster logs +# Shift-L View cluster logs pretty # p Connect to the cluster via psql # r Reload the cluster # Shift-R Restart the cluster @@ -68,7 +69,17 @@ plugins: background: false args: - -c - - "kubectl cnpg logs cluster $NAME -f -n $NAMESPACE --context $CONTEXT" + - "kubectl cnpg logs cluster $NAME -f -n $NAMESPACE --context $CONTEXT" + cnpg-logs-pretty: + shortCut: Shift-L + description: Logs pretty + scopes: + - cluster + command: bash + background: false + args: + - -c + - "kubectl cnpg logs cluster $NAME -f -n $NAMESPACE --context $CONTEXT | kubectl cnpg logs pretty" cnpg-psql: shortCut: p description: PSQL shell @@ -120,4 +131,4 @@ plugins: background: false args: - -c - - "kubectl cnpg status $NAME -n $NAMESPACE --context \"$CONTEXT\" --verbose 2>&1 | less -R" + - "kubectl cnpg status $NAME -n $NAMESPACE --context \"$CONTEXT\" --verbose 2>&1 | less -R" \ No newline at end of file From d8345d04c03bc7625ce6eb323b699f84f757375f Mon Sep 17 00:00:00 2001 From: Peggie Date: Sun, 3 Aug 2025 20:40:18 +0200 Subject: [PATCH 767/836] chore: refresh licenses directory (#7985) Refresh the licenses directory Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Signed-off-by: Jonathan Gonzalez V. Co-authored-by: license-updater --- .../{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/LICENSE | 0 .../{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/NOTICE | 0 .../{sigs.k8s.io/yaml/goyaml.v3 => go.yaml.in/yaml/v3}/LICENSE | 0 .../{sigs.k8s.io/yaml/goyaml.v3 => go.yaml.in/yaml/v3}/NOTICE | 0 4 files changed, 0 insertions(+), 0 deletions(-) rename licenses/go-licenses/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/LICENSE (100%) rename licenses/go-licenses/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/NOTICE (100%) rename licenses/go-licenses/{sigs.k8s.io/yaml/goyaml.v3 => go.yaml.in/yaml/v3}/LICENSE (100%) rename licenses/go-licenses/{sigs.k8s.io/yaml/goyaml.v3 => go.yaml.in/yaml/v3}/NOTICE (100%) diff --git a/licenses/go-licenses/sigs.k8s.io/yaml/goyaml.v2/LICENSE b/licenses/go-licenses/go.yaml.in/yaml/v2/LICENSE similarity index 100% rename from licenses/go-licenses/sigs.k8s.io/yaml/goyaml.v2/LICENSE rename to licenses/go-licenses/go.yaml.in/yaml/v2/LICENSE diff --git a/licenses/go-licenses/sigs.k8s.io/yaml/goyaml.v2/NOTICE b/licenses/go-licenses/go.yaml.in/yaml/v2/NOTICE similarity index 100% rename from licenses/go-licenses/sigs.k8s.io/yaml/goyaml.v2/NOTICE rename to licenses/go-licenses/go.yaml.in/yaml/v2/NOTICE diff --git a/licenses/go-licenses/sigs.k8s.io/yaml/goyaml.v3/LICENSE b/licenses/go-licenses/go.yaml.in/yaml/v3/LICENSE similarity index 100% rename from licenses/go-licenses/sigs.k8s.io/yaml/goyaml.v3/LICENSE rename to licenses/go-licenses/go.yaml.in/yaml/v3/LICENSE diff --git a/licenses/go-licenses/sigs.k8s.io/yaml/goyaml.v3/NOTICE b/licenses/go-licenses/go.yaml.in/yaml/v3/NOTICE similarity index 100% rename from licenses/go-licenses/sigs.k8s.io/yaml/goyaml.v3/NOTICE rename to licenses/go-licenses/go.yaml.in/yaml/v3/NOTICE From 478c6dd5b85cf79f75a70fb579a18582f2543279 Mon Sep 17 00:00:00 2001 From: Om Santosh Suneri <142336291+omsuneri@users.noreply.github.com> Date: Mon, 4 Aug 2025 14:51:02 +0530 Subject: [PATCH 768/836] chore: `startedAt` vs `started at` in log messages key (#8210) Signed-off-by: Om Santosh Suneri <142336291+omsuneri@users.noreply.github.com> --- internal/controller/backup_controller.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/controller/backup_controller.go b/internal/controller/backup_controller.go index 00a9fcf366..5dec2c29e7 100644 --- a/internal/controller/backup_controller.go +++ b/internal/controller/backup_controller.go @@ -381,7 +381,7 @@ func (r *BackupReconciler) isValidBackupRunning( contextLogger.Info("Backup is already running on", "cluster", cluster.Name, "pod", pod.Name, - "started at", backup.Status.StartedAt) + "startedAt", backup.Status.StartedAt) // Nothing to do here return true, nil From 6caee7c4ae607e7908bd68bd77cff33701cde361 Mon Sep 17 00:00:00 2001 From: Fabian Kammel Date: Mon, 4 Aug 2025 12:05:19 +0200 Subject: [PATCH 769/836] chore: pin GitHub Actions and OCI image references (#8023) - pin the `free-disk-space` action to `v1.3.1` (latest commit on main) - pin `snyk/actions` to the latest commit on `master` - add digests to OCI image references based on their latest tagged versions Closes: #7632 Signed-off-by: Fabian Kammel --- .github/workflows/continuous-delivery.yml | 2 +- .github/workflows/continuous-integration.yml | 6 +++--- Dockerfile | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index 2676bd5f9e..ba3c0c308a 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -633,7 +633,7 @@ jobs: steps: - name: Cleanup Disk - uses: jlumbroso/free-disk-space@main + uses: jlumbroso/free-disk-space@54081f138730dfa15788a46383842cd2f914a1be # v1.3.1 with: android: true dotnet: true diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 97d98f9023..cd4f6242a6 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -605,9 +605,9 @@ jobs: accept-keywords: key - name: Run Snyk to check Docker image for vulnerabilities - uses: snyk/actions/docker@master + uses: snyk/actions/docker@28606799782bc8e809f4076e9f8293bc4212d05e # master if: | - !github.event.repository.fork && + !github.event.repository.fork && !github.event.pull_request.head.repo.fork continue-on-error: true env: @@ -619,7 +619,7 @@ jobs: - name: Upload result to GitHub Code Scanning uses: github/codeql-action/upload-sarif@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3 if: | - !github.event.repository.fork && + !github.event.repository.fork && !github.event.pull_request.head.repo.fork continue-on-error: true with: diff --git a/Dockerfile b/Dockerfile index e85f152f8b..fe64c140f7 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,8 +1,8 @@ -ARG BASE=gcr.io/distroless/static-debian12:nonroot +ARG BASE=gcr.io/distroless/static-debian12:nonroot@sha256:627d6c5a23ad24e6bdff827f16c7b60e0289029b0c79e9f7ccd54ae3279fb45f # This builder stage it's only because we need a command # to create a symlink and we do not have it in a distroless image -FROM gcr.io/distroless/static-debian12:debug-nonroot AS builder +FROM gcr.io/distroless/static-debian12:debug-nonroot@sha256:edbeb7a4e79938116dc9cb672b231792e0b5ac86c56fb49781a79e54f3842c67 AS builder ARG TARGETARCH SHELL ["/busybox/sh", "-c"] RUN ln -sf operator/manager_${TARGETARCH} manager From 9ade7ed075fa2794f2a65853d447f49d193a36b9 Mon Sep 17 00:00:00 2001 From: Peggie Date: Mon, 4 Aug 2025 13:40:18 +0200 Subject: [PATCH 770/836] test: Updated Postgres versions used in E2E tests (#8230) Update the Postgres versions used in E2E tests Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Signed-off-by: Jonathan Gonzalez V. Co-authored-by: postgres-versions-updater --- .github/pg_versions.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/pg_versions.json b/.github/pg_versions.json index 42e376dd61..061fd87efa 100644 --- a/.github/pg_versions.json +++ b/.github/pg_versions.json @@ -1,7 +1,7 @@ { "18": [ "18beta2", - "18beta2-2" + "18beta2-3" ], "17": [ "17.5", From 2bf2a5ad5b3fc6d5794f111c7dc7faab9ddbf222 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 4 Aug 2025 23:00:25 +0200 Subject: [PATCH 771/836] chore(deps): update all non-major github action (main) (#8238) This PR contains the following updates: https://github.com/docker/login-action `74a5d14` -> `184bdaa` snyk/actions `2860679` -> `77490d9` --- .github/workflows/continuous-delivery.yml | 12 ++++++------ .github/workflows/continuous-integration.yml | 10 +++++----- .github/workflows/release-publish.yml | 4 ++-- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index ba3c0c308a..a3ca9a0be2 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -369,7 +369,7 @@ jobs: uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3 - name: Login into docker registry - uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 + uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3 with: registry: ${{ env.REGISTRY }} username: ${{ env.REGISTRY_USER }} @@ -665,7 +665,7 @@ jobs: - ## In case hack/setup-cluster.sh need pull operand image from registry name: Login into docker registry - uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 + uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3 with: registry: ${{ env.REGISTRY }} username: ${{ env.REGISTRY_USER }} @@ -909,7 +909,7 @@ jobs: - ## In case hack/setup-cluster.sh need pull operand image from registry name: Login into docker registry - uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 + uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3 with: registry: ${{ env.REGISTRY }} username: ${{ env.REGISTRY_USER }} @@ -1239,7 +1239,7 @@ jobs: - ## In case hack/setup-cluster.sh need pull operand image from registry name: Login into docker registry - uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 + uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3 with: registry: ${{ env.REGISTRY }} username: ${{ env.REGISTRY_USER }} @@ -1626,7 +1626,7 @@ jobs: - ## In case hack/setup-cluster.sh need pull operand image from registry name: Login into docker registry - uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 + uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3 with: registry: ${{ env.REGISTRY }} username: ${{ env.REGISTRY_USER }} @@ -1948,7 +1948,7 @@ jobs: - ## In case hack/setup-cluster.sh need pull operand image from registry name: Login into docker registry - uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 + uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3 with: registry: ${{ env.REGISTRY }} username: ${{ env.REGISTRY_USER }} diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index cd4f6242a6..8485af2ac5 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -552,7 +552,7 @@ jobs: uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3 - name: Login into docker registry - uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 + uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3 with: registry: ${{ env.REGISTRY }} username: ${{ env.REGISTRY_USER }} @@ -605,7 +605,7 @@ jobs: accept-keywords: key - name: Run Snyk to check Docker image for vulnerabilities - uses: snyk/actions/docker@28606799782bc8e809f4076e9f8293bc4212d05e # master + uses: snyk/actions/docker@77490d94e966421e076e95ad8fa87aa55e5ca409 # master if: | !github.event.repository.fork && !github.event.pull_request.head.repo.fork @@ -680,7 +680,7 @@ jobs: uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3 - name: Login into docker registry - uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 + uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3 with: registry: ${{ env.REGISTRY }} username: ${{ env.REGISTRY_USER }} @@ -728,7 +728,7 @@ jobs: make operator-sdk preflight - name: Loging to container registry - uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 + uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3 with: registry: ${{ env.REGISTRY }} username: ${{ env.REGISTRY_USER }} @@ -790,7 +790,7 @@ jobs: uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3 - name: Login into docker registry - uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 + uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3 with: registry: ${{ env.REGISTRY }} username: ${{ env.REGISTRY_USER }} diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml index 7c3012ff9e..9d887122bb 100644 --- a/.github/workflows/release-publish.yml +++ b/.github/workflows/release-publish.yml @@ -191,7 +191,7 @@ jobs: uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3 - name: Login to ghcr.io - uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 + uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3 with: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} @@ -271,7 +271,7 @@ jobs: uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3 - name: Login to ghcr.io - uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3 + uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3 with: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} From 95899c5231e1502175e6565ee08ebe7683fb9c11 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Tue, 5 Aug 2025 12:06:56 +0200 Subject: [PATCH 772/836] chore(renovate): create a group for distroless digests images (#8254) Create a group for the distroless digest in Renovate Signed-off-by: Jonathan Gonzalez V. --- .github/renovate.json5 | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.github/renovate.json5 b/.github/renovate.json5 index 3364ab9d1d..6a104723dd 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -300,5 +300,13 @@ separateMajorMinor: false, pinDigests: false, }, + { + groupName: 'container distroless digests', + matchPackageNames: [ + 'gcr.io/distroless{/,}**', + ], + pinDigests: true, + separateMajorMinor: false, + }, ], } From 99550adf171efb72d806cea2210fee5698ef5386 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Wed, 6 Aug 2025 07:07:37 +0200 Subject: [PATCH 773/836] fix: unblock rollout of former primary Pod when WAL archiving plugin is missing (#8236) This patch addresses a rollout issue when migrating from the in-tree Barman Cloud support to the `barman-cloud` plugin, particularly when using the `switchover` primary update strategy. During such a migration, a switchover is triggered to roll out the former primary instance. After the switchover, the former primary Pod restarts and attempts to archive any remaining WAL files in the queue before starting PostgreSQL. These files may still be present due to archiving or misconfiguration delays. However, the previous version of the Pod does not include support for the plugin-based archiving system, causing all archiving attempts to fail during this pre-start phase. As a result, the Pod stalls indefinitely, blocking the rollout. This fix introduces a mechanism to detect this specific failure scenario. If WAL archiving cannot proceed due to a missing plugin, the Pod exits with a distinct exit code. The operator recognises this exit code and replaces the Pod with one based on the updated container image, ensuring the plugin is available and WAL archiving can continue as expected. Fixes: #8189 Signed-off-by: Leonardo Cecchi Signed-off-by: Gabriele Bartolini Co-authored-by: Gabriele Bartolini --- api/v1/cluster_types.go | 5 +++ internal/cmd/manager/instance/run/cmd.go | 32 ++++++++++++++++--- .../cmd/manager/instance/run/lifecycle/run.go | 6 ++++ internal/controller/cluster_controller.go | 28 ++++++++++++++++ internal/controller/cluster_status.go | 16 ++++++++-- .../management/controller/instance_startup.go | 7 ++++ pkg/concurrency/executed.go | 29 +++++++++++++++++ pkg/management/postgres/archiver/archiver.go | 16 +++++++++- 8 files changed, 131 insertions(+), 8 deletions(-) diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go index a342f58e41..94006080c9 100644 --- a/api/v1/cluster_types.go +++ b/api/v1/cluster_types.go @@ -112,6 +112,11 @@ const ( // MissingWALDiskSpaceExitCode is the exit code the instance manager // will use to signal that there's no more WAL disk space MissingWALDiskSpaceExitCode = 4 + + // MissingWALArchivePlugin is the exit code used by the instance manager + // to indicate that it started successfully, but the configured WAL + // archiving plugin is not available. + MissingWALArchivePlugin = 5 ) // SnapshotOwnerReference defines the reference type for the owner of the snapshot. diff --git a/internal/cmd/manager/instance/run/cmd.go b/internal/cmd/manager/instance/run/cmd.go index 758c53480d..2d8713beed 100644 --- a/internal/cmd/manager/instance/run/cmd.go +++ b/internal/cmd/manager/instance/run/cmd.go @@ -26,6 +26,7 @@ import ( "fmt" "os" "path/filepath" + "slices" "github.com/cloudnative-pg/machinery/pkg/log" "github.com/spf13/cobra" @@ -66,9 +67,13 @@ import ( var ( scheme = runtime.NewScheme() - // errNoFreeWALSpace is raised when there's not enough disk space - // to store two WAL files + // errNoFreeWALSpace is returned when there isn't enough disk space + // available to store at least two WAL files. errNoFreeWALSpace = fmt.Errorf("no free disk space for WALs") + + // errWALArchivePluginNotAvailable is returned when the configured + // WAL archiving plugin is not available or cannot be found. + errWALArchivePluginNotAvailable = fmt.Errorf("WAL archive plugin not available") ) func init() { @@ -114,6 +119,9 @@ func NewCmd() *cobra.Command { if errors.Is(err, errNoFreeWALSpace) { os.Exit(apiv1.MissingWALDiskSpaceExitCode) } + if errors.Is(err, errWALArchivePluginNotAvailable) { + os.Exit(apiv1.MissingWALArchivePlugin) + } return err }, @@ -140,7 +148,7 @@ func NewCmd() *cobra.Command { return cmd } -func runSubCommand(ctx context.Context, instance *postgres.Instance) error { +func runSubCommand(ctx context.Context, instance *postgres.Instance) error { //nolint:gocognit,gocyclo var err error contextLogger := log.FromContext(ctx) @@ -216,8 +224,11 @@ func runSubCommand(ctx context.Context, instance *postgres.Instance) error { postgresStartConditions := concurrency.MultipleExecuted{} exitedConditions := concurrency.MultipleExecuted{} + var loadedPluginNames []string pluginRepository := repository.New() - if _, err := pluginRepository.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir); err != nil { + if loadedPluginNames, err = pluginRepository.RegisterUnixSocketPluginsInPath( + configuration.Current.PluginSocketDir, + ); err != nil { contextLogger.Error(err, "Unable to load sidecar CNPG-i plugins, skipping") } defer pluginRepository.Close() @@ -372,7 +383,18 @@ func runSubCommand(ctx context.Context, instance *postgres.Instance) error { contextLogger.Error(err, "Error while checking if there is enough disk space for WALs, skipping") } else if !hasDiskSpaceForWals { contextLogger.Info("Detected low-disk space condition") - return errNoFreeWALSpace + return makeUnretryableError(errNoFreeWALSpace) + } + + if instance.Cluster != nil { + enabledArchiverPluginName := instance.Cluster.GetEnabledWALArchivePluginName() + if enabledArchiverPluginName != "" && !slices.Contains(loadedPluginNames, enabledArchiverPluginName) { + contextLogger.Info( + "Detected missing WAL archiver plugin, waiting for the operator to rollout a new instance Pod", + "enabledArchiverPluginName", enabledArchiverPluginName, + "loadedPluginNames", loadedPluginNames) + return makeUnretryableError(errWALArchivePluginNotAvailable) + } } return nil diff --git a/internal/cmd/manager/instance/run/lifecycle/run.go b/internal/cmd/manager/instance/run/lifecycle/run.go index f5be4fae8f..83c08e3ecd 100644 --- a/internal/cmd/manager/instance/run/lifecycle/run.go +++ b/internal/cmd/manager/instance/run/lifecycle/run.go @@ -77,6 +77,12 @@ func (i *PostgresLifecycle) runPostgresAndWait(ctx context.Context) <-chan error // following will be a no-op. i.systemInitialization.Wait() + // If the system initialization failed, we return an error and let + // the instance manager quit. + if i.systemInitialization.Err() != nil { + return err + } + // The lifecycle loop will call us even when PostgreSQL is fenced. // In that case there's no need to proceed. if i.instance.IsFenced() { diff --git a/internal/controller/cluster_controller.go b/internal/controller/cluster_controller.go index 57cd107c7b..d316580cbf 100644 --- a/internal/controller/cluster_controller.go +++ b/internal/controller/cluster_controller.go @@ -463,6 +463,10 @@ func (r *ClusterReconciler) reconcile(ctx context.Context, cluster *apiv1.Cluste return res, err } + if res, err := r.requireWALArchivingPluginOrDelete(ctx, instancesStatus); err != nil || !res.IsZero() { + return res, err + } + if res, err := replicaclusterswitch.Reconcile( ctx, r.Client, cluster, r.InstanceClient, instancesStatus); res != nil || err != nil { if res != nil { @@ -595,6 +599,30 @@ func (r *ClusterReconciler) ensureNoFailoverOnFullDisk( return ctrl.Result{RequeueAfter: 10 * time.Second}, nil } +func (r *ClusterReconciler) requireWALArchivingPluginOrDelete( + ctx context.Context, + instances postgres.PostgresqlStatusList, +) (ctrl.Result, error) { + contextLogger := log.FromContext(ctx).WithName("require_wal_archiving_plugin_delete") + + for _, state := range instances.Items { + if !isTerminatedBecauseOfMissingWALArchivePlugin(state.Pod) { + contextLogger.Warning( + "Detected instance manager initialization procedure that failed "+ + "because the required WAL archive plugin is missing. Deleting it to trigger rollout", + "targetPod", state.Pod.Name) + if err := r.Delete(ctx, state.Pod); err != nil { + contextLogger.Error(err, "Cannot delete the pod", "pod", state.Pod.Name) + return ctrl.Result{}, err + } + + return ctrl.Result{RequeueAfter: 10 * time.Second}, nil + } + } + + return ctrl.Result{}, nil +} + func (r *ClusterReconciler) handleSwitchover( ctx context.Context, cluster *apiv1.Cluster, diff --git a/internal/controller/cluster_status.go b/internal/controller/cluster_status.go index 8819e7038a..74c4d9da89 100644 --- a/internal/controller/cluster_status.go +++ b/internal/controller/cluster_status.go @@ -859,7 +859,19 @@ func isWALSpaceAvailableOnPod(pod *corev1.Pod) bool { isTerminatedForMissingWALDiskSpace := func(state *corev1.ContainerState) bool { return state.Terminated != nil && state.Terminated.ExitCode == apiv1.MissingWALDiskSpaceExitCode } + return hasPostgresContainerTerminationReason(pod, isTerminatedForMissingWALDiskSpace) +} + +// isTerminatedBecauseOfMissingWALArchivePlugin check if a Pod terminated because the +// WAL archiving plugin was missing when the Pod started +func isTerminatedBecauseOfMissingWALArchivePlugin(pod *corev1.Pod) bool { + isTerminatedForMissingWALDiskSpace := func(state *corev1.ContainerState) bool { + return state.Terminated != nil && state.Terminated.ExitCode == apiv1.MissingWALArchivePlugin + } + return hasPostgresContainerTerminationReason(pod, isTerminatedForMissingWALDiskSpace) +} +func hasPostgresContainerTerminationReason(pod *corev1.Pod, reason func(state *corev1.ContainerState) bool) bool { var pgContainerStatus *corev1.ContainerStatus for i := range pod.Status.ContainerStatuses { status := pod.Status.ContainerStatuses[i] @@ -877,14 +889,14 @@ func isWALSpaceAvailableOnPod(pod *corev1.Pod) bool { // If the Pod was terminated because it didn't have enough disk // space, then we have no disk space - if isTerminatedForMissingWALDiskSpace(&pgContainerStatus.State) { + if reason(&pgContainerStatus.State) { return false } // The Pod is now running but not still ready, and last time it // was terminated for missing disk space. Let's wait for it // to be ready before classifying it as having enough disk space - if !pgContainerStatus.Ready && isTerminatedForMissingWALDiskSpace(&pgContainerStatus.LastTerminationState) { + if !pgContainerStatus.Ready && reason(&pgContainerStatus.LastTerminationState) { return false } diff --git a/internal/management/controller/instance_startup.go b/internal/management/controller/instance_startup.go index c22052bfbe..1b434d95c1 100644 --- a/internal/management/controller/instance_startup.go +++ b/internal/management/controller/instance_startup.go @@ -21,6 +21,7 @@ package controller import ( "context" + "errors" "fmt" "os" "path/filepath" @@ -127,6 +128,12 @@ func (r *InstanceReconciler) verifyPgDataCoherenceForPrimary(ctx context.Context // We archive every WAL that have not been archived from the latest postmaster invocation. if err := archiver.ArchiveAllReadyWALs(ctx, cluster, r.instance.PgData); err != nil { + var missingPluginError archiver.ErrMissingWALArchiverPlugin + if errors.As(err, &missingPluginError) { + // The instance initialization resulted in a fatal error. + // We need the Pod to be rolled out to install the archiving plugin. + r.systemInitialization.BroadcastError(err) + } return fmt.Errorf("while ensuring all WAL files are archived: %w", err) } diff --git a/pkg/concurrency/executed.go b/pkg/concurrency/executed.go index bb11de807d..6a0a11291c 100644 --- a/pkg/concurrency/executed.go +++ b/pkg/concurrency/executed.go @@ -21,6 +21,8 @@ package concurrency import ( "sync" + + "go.uber.org/multierr" ) // Executed can be used to wait for something to be executed, @@ -30,6 +32,7 @@ import ( type Executed struct { cond sync.Cond done bool + err error } // MultipleExecuted can be used to wrap multiple Executed conditions that @@ -43,6 +46,16 @@ func (m MultipleExecuted) Wait() { } } +// Err returns a composition of the errors raised by the individual +// execution components or nil if there is no error. +func (m MultipleExecuted) Err() error { + var err error + for _, cond := range m { + err = multierr.Append(err, cond.Err()) + } + return err +} + // NewExecuted creates a new Executed func NewExecuted() *Executed { return &Executed{ @@ -62,10 +75,26 @@ func (i *Executed) Wait() { // Broadcast broadcasts execution to waiting goroutines func (i *Executed) Broadcast() { + i.BroadcastError(nil) +} + +// BroadcastError broadcasts execution to waiting goroutines +// recording the passed error status +func (i *Executed) BroadcastError(err error) { i.cond.L.Lock() defer i.cond.L.Unlock() if !i.done { + i.err = err i.done = true i.cond.Broadcast() } } + +// Err returns the error passed to BroadcastError if it was +// executed or nil. +func (i *Executed) Err() error { + if !i.done { + return nil + } + return i.err +} diff --git a/pkg/management/postgres/archiver/archiver.go b/pkg/management/postgres/archiver/archiver.go index 6d8519f761..d9750949de 100644 --- a/pkg/management/postgres/archiver/archiver.go +++ b/pkg/management/postgres/archiver/archiver.go @@ -48,6 +48,18 @@ import ( // and the new primary have not completed the promotion var errSwitchoverInProgress = fmt.Errorf("switchover in progress, refusing archiving") +// ErrMissingWALArchiverPlugin is raised when we try to archive a WAL +// file with a CNPG-i plugin whose socket does not exist. +type ErrMissingWALArchiverPlugin struct { + // PluginName is the name of the plugin that is missing + PluginName string +} + +// Error implements the error interface +func (e ErrMissingWALArchiverPlugin) Error() string { + return fmt.Sprintf("wal archive plugin is not available: %s", e.PluginName) +} + // ArchiveAllReadyWALs ensures that all WAL files that are in the "ready" // queue have been archived. // This is used to ensure that a former primary will archive the WAL files in @@ -289,7 +301,9 @@ func archiveWALViaPlugins( enabledArchiverPluginName := cluster.GetEnabledWALArchivePluginName() if enabledArchiverPluginName != "" && !client.HasPlugin(enabledArchiverPluginName) { - return fmt.Errorf("wal archive plugin is not available: %s", enabledArchiverPluginName) + return ErrMissingWALArchiverPlugin{ + PluginName: enabledArchiverPluginName, + } } return client.ArchiveWAL(ctx, cluster, walName) From a4efa40c41e1c9e3c3a48f38fcb9b4e10115e5db Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 6 Aug 2025 10:45:22 +0200 Subject: [PATCH 774/836] chore(deps): update container distroless digests (main) (#8255) This PR contains the following updates: gcr.io/distroless/static-debian12 final `627d6c5` -> `cdf4daa` gcr.io/distroless/static-debian12 stage `edbeb7a` -> `0895d6f` --- Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index fe64c140f7..9f34614a46 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,8 +1,8 @@ -ARG BASE=gcr.io/distroless/static-debian12:nonroot@sha256:627d6c5a23ad24e6bdff827f16c7b60e0289029b0c79e9f7ccd54ae3279fb45f +ARG BASE=gcr.io/distroless/static-debian12:nonroot@sha256:cdf4daaf154e3e27cfffc799c16f343a384228f38646928a1513d925f473cb46 # This builder stage it's only because we need a command # to create a symlink and we do not have it in a distroless image -FROM gcr.io/distroless/static-debian12:debug-nonroot@sha256:edbeb7a4e79938116dc9cb672b231792e0b5ac86c56fb49781a79e54f3842c67 AS builder +FROM gcr.io/distroless/static-debian12:debug-nonroot@sha256:0895d6fc256a6938a60c87d92e1148eec0d36198bff9c5d3082e6a56db7756bd AS builder ARG TARGETARCH SHELL ["/busybox/sh", "-c"] RUN ln -sf operator/manager_${TARGETARCH} manager From 1c663fbd65e8a1ea8776559f59c9f9a05f1b808f Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 6 Aug 2025 11:47:56 +0200 Subject: [PATCH 775/836] chore(deps): update actions/download-artifact action to v5 (main) (#8259) --- .github/workflows/continuous-delivery.yml | 4 ++-- .github/workflows/continuous-integration.yml | 2 +- .github/workflows/release-publish.yml | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index a3ca9a0be2..d04ccf30eb 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -515,7 +515,7 @@ jobs: rm -fr manifests/operator-manifest.yaml - name: Prepare the operator manifest - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4 + uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5 with: name: operator-manifest.yaml path: manifests @@ -2140,7 +2140,7 @@ jobs: run: mkdir test-artifacts - name: Download all artifacts to the directory - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4 + uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5 with: path: test-artifacts pattern: testartifacts-* diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 8485af2ac5..cbc2c02824 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -843,7 +843,7 @@ jobs: password: ${{ env.REGISTRY_PASSWORD }} - name: Download the bundle - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4 + uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5 with: name: bundle diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml index 9d887122bb..2928edf26b 100644 --- a/.github/workflows/release-publish.yml +++ b/.github/workflows/release-publish.yml @@ -322,7 +322,7 @@ jobs: persist-credentials: false - name: Download the bundle - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4 + uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5 with: name: bundle @@ -405,7 +405,7 @@ jobs: git config user.name "${{ needs.release-binaries.outputs.author_name }}" - name: Download the bundle - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4 + uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5 with: name: bundle - From 00a50d59fb61a77e655995c61df54e4b663edb41 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Thu, 7 Aug 2025 09:13:29 +0200 Subject: [PATCH 776/836] fix: correctly wrap `startBackup` `pg_create_physical_replication_slot` errors (#8245) Signed-off-by: Armando Ruocco --- pkg/management/postgres/webserver/backup_connection.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/management/postgres/webserver/backup_connection.go b/pkg/management/postgres/webserver/backup_connection.go index 4e2a91f9c8..53cb6beb4b 100644 --- a/pkg/management/postgres/webserver/backup_connection.go +++ b/pkg/management/postgres/webserver/backup_connection.go @@ -133,7 +133,7 @@ func (bc *backupConnection) startBackup(ctx context.Context, sync *sync.Mutex) { "slot_name => $1, immediately_reserve => true, temporary => true)", slotName, ); err != nil { - bc.err = fmt.Errorf("while creating the replication slot: %w", bc.err) + bc.err = fmt.Errorf("while creating the replication slot: %w", err) return } From 1257cc18ac081b87b532467b443792e41d71a01f Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 7 Aug 2025 14:24:08 +0200 Subject: [PATCH 777/836] fix(deps): update module github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring to v0.84.1 (main) (#8266) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 3b393be1e3..a1a601608b 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( github.com/mitchellh/go-ps v1.0.0 github.com/onsi/ginkgo/v2 v2.23.4 github.com/onsi/gomega v1.38.0 - github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.84.0 + github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.84.1 github.com/prometheus/client_golang v1.23.0 github.com/robfig/cron v1.2.0 github.com/sethvargo/go-password v0.3.1 diff --git a/go.sum b/go.sum index 0de5c8850a..24c4fc5f1f 100644 --- a/go.sum +++ b/go.sum @@ -155,8 +155,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.84.0 h1:V/HLst0rSw4BZp8nIqhaTnnW4/EGxEoYbgjcDqzPJ5U= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.84.0/go.mod h1:MruMqbSS9aYrKhBImrO9X9g52hwz3I0B+tcoeAwkmuM= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.84.1 h1:NEQAo0Cl1gf9sJ3oI1QzczS3BF6ySvSSH36mwOZDuhI= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.84.1/go.mod h1:MruMqbSS9aYrKhBImrO9X9g52hwz3I0B+tcoeAwkmuM= github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc= github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= From dd845c72a2eeecad321d84294b383e716babd276 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Fri, 8 Aug 2025 00:34:31 +0200 Subject: [PATCH 778/836] fix: always consider `WALArchiver` as a valid instance sidecar for metrics (#8265) Signed-off-by: Armando Ruocco --- pkg/management/postgres/metrics/collector.go | 21 +++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/pkg/management/postgres/metrics/collector.go b/pkg/management/postgres/metrics/collector.go index fe4ee6c4a1..6dc1253491 100644 --- a/pkg/management/postgres/metrics/collector.go +++ b/pkg/management/postgres/metrics/collector.go @@ -27,6 +27,7 @@ import ( "fmt" "path" "regexp" + "slices" "time" "github.com/blang/semver" @@ -574,7 +575,7 @@ func NewPluginCollector( func (p *pluginCollector) Describe(ctx context.Context, ch chan<- *prometheus.Desc, cluster *apiv1.Cluster) { contextLogger := log.FromContext(ctx).WithName("plugin_metrics_describe") - if len(cluster.GetInstanceEnabledPluginNames()) == 0 { + if len(p.getEnabledPluginNames(cluster)) == 0 { contextLogger.Trace("No plugins enabled for metrics collection") return } @@ -600,7 +601,7 @@ func (p *pluginCollector) Describe(ctx context.Context, ch chan<- *prometheus.De func (p *pluginCollector) Collect(ctx context.Context, ch chan<- prometheus.Metric, cluster *apiv1.Cluster) error { contextLogger := log.FromContext(ctx).WithName("plugin_metrics_collect") - if len(cluster.GetInstanceEnabledPluginNames()) == 0 { + if len(p.getEnabledPluginNames(cluster)) == 0 { contextLogger.Trace("No plugins enabled for metrics collection") return nil } @@ -651,6 +652,20 @@ func (p *pluginCollector) getClient(ctx context.Context, cluster *apiv1.Cluster) return pluginClient.WithPlugins( pluginLoadingContext, p.pluginRepository, - cluster.GetInstanceEnabledPluginNames()..., + p.getEnabledPluginNames(cluster)..., ) } + +func (p *pluginCollector) getEnabledPluginNames(cluster *apiv1.Cluster) []string { + enabledPluginNames := cluster.GetInstanceEnabledPluginNames() + + // for backward compatibility, we also add the WAL archive plugin that initially didn't require + // INSTANCE_SIDECAR_INJECTION + if pluginWAL := cluster.GetEnabledWALArchivePluginName(); pluginWAL != "" { + if !slices.Contains(enabledPluginNames, pluginWAL) { + enabledPluginNames = append(enabledPluginNames, pluginWAL) + } + } + + return enabledPluginNames +} From 531cf0a424d11268ff680fde8807903f79bca19a Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 8 Aug 2025 10:34:18 +0200 Subject: [PATCH 779/836] chore(deps): update dependency golang to v1.24.6 (main) (#8274) --- .github/workflows/backport.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/continuous-delivery.yml | 2 +- .github/workflows/continuous-integration.yml | 2 +- .github/workflows/refresh-licenses.yml | 2 +- .github/workflows/release-publish.yml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 26209947a3..882901e630 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -12,7 +12,7 @@ permissions: read-all env: # renovate: datasource=golang-version depName=golang versioning=loose - GOLANG_VERSION: "1.24.5" + GOLANG_VERSION: "1.24.6" jobs: # Label the source pull request with 'backport-requested' and all supported releases label, the goal is, by default diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index ea3f36a212..08e479b4bc 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -35,7 +35,7 @@ permissions: read-all # set up environment variables to be used across all the jobs env: # renovate: datasource=golang-version depName=golang versioning=loose - GOLANG_VERSION: "1.24.5" + GOLANG_VERSION: "1.24.6" jobs: duplicate_runs: diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index d04ccf30eb..f94ba4dba5 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -37,7 +37,7 @@ permissions: read-all # set up environment variables to be used across all the jobs env: # renovate: datasource=golang-version depName=golang versioning=loose - GOLANG_VERSION: "1.24.5" + GOLANG_VERSION: "1.24.6" KUBEBUILDER_VERSION: "2.3.1" # renovate: datasource=github-tags depName=kubernetes-sigs/kind versioning=semver KIND_VERSION: "v0.29.0" diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index cbc2c02824..51a3270423 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -19,7 +19,7 @@ permissions: read-all # set up environment variables to be used across all the jobs env: # renovate: datasource=golang-version depName=golang versioning=loose - GOLANG_VERSION: "1.24.5" + GOLANG_VERSION: "1.24.6" # renovate: datasource=github-releases depName=golangci/golangci-lint versioning=loose GOLANGCI_LINT_VERSION: "v2.3.1" KUBEBUILDER_VERSION: "2.3.1" diff --git a/.github/workflows/refresh-licenses.yml b/.github/workflows/refresh-licenses.yml index a762f7dd6f..03247e7088 100644 --- a/.github/workflows/refresh-licenses.yml +++ b/.github/workflows/refresh-licenses.yml @@ -10,7 +10,7 @@ permissions: read-all env: # renovate: datasource=golang-version depName=golang versioning=loose - GOLANG_VERSION: "1.24.5" + GOLANG_VERSION: "1.24.6" jobs: licenses: diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml index 2928edf26b..6ede613c3a 100644 --- a/.github/workflows/release-publish.yml +++ b/.github/workflows/release-publish.yml @@ -11,7 +11,7 @@ permissions: read-all env: # renovate: datasource=golang-version depName=golang versioning=loose - GOLANG_VERSION: "1.24.5" + GOLANG_VERSION: "1.24.6" REGISTRY: "ghcr.io" jobs: From 49555678c2ce07ea6a1fe5cf016b912aed5eaf41 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sat, 9 Aug 2025 21:26:44 +0200 Subject: [PATCH 780/836] fix(deps): update module golang.org/x/term to v0.34.0 (main) (#8283) --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index a1a601608b..687a07f6db 100644 --- a/go.mod +++ b/go.mod @@ -35,7 +35,7 @@ require ( go.uber.org/atomic v1.11.0 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 - golang.org/x/term v0.33.0 + golang.org/x/term v0.34.0 google.golang.org/grpc v1.74.2 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.33.3 @@ -102,7 +102,7 @@ require ( golang.org/x/net v0.41.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect golang.org/x/sync v0.16.0 // indirect - golang.org/x/sys v0.34.0 // indirect + golang.org/x/sys v0.35.0 // indirect golang.org/x/text v0.27.0 // indirect golang.org/x/time v0.9.0 // indirect golang.org/x/tools v0.34.0 // indirect diff --git a/go.sum b/go.sum index 24c4fc5f1f..73e793042e 100644 --- a/go.sum +++ b/go.sum @@ -247,10 +247,10 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= -golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg= -golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= +golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4= From 802a3a49dd53a3b56aafe1d32d3adfcecbc4a7f1 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sat, 9 Aug 2025 22:39:31 +0200 Subject: [PATCH 781/836] chore(deps): update github/codeql-action digest to 76621b6 (main) (#8280) --- .github/workflows/codeql-analysis.yml | 4 ++-- .github/workflows/continuous-integration.yml | 2 +- .github/workflows/ossf_scorecard.yml | 2 +- .github/workflows/snyk.yml | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 08e479b4bc..b34feec9ce 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -76,7 +76,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3 + uses: github/codeql-action/init@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3 with: languages: "go" build-mode: manual @@ -93,6 +93,6 @@ jobs: make - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3 + uses: github/codeql-action/analyze@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3 with: category: "/language:go" diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 51a3270423..3e4a4289f8 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -617,7 +617,7 @@ jobs: args: --severity-threshold=high --file=Dockerfile - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3 + uses: github/codeql-action/upload-sarif@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3 if: | !github.event.repository.fork && !github.event.pull_request.head.repo.fork diff --git a/.github/workflows/ossf_scorecard.yml b/.github/workflows/ossf_scorecard.yml index e2e62844a4..623a7a9084 100644 --- a/.github/workflows/ossf_scorecard.yml +++ b/.github/workflows/ossf_scorecard.yml @@ -74,6 +74,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard (optional). # Commenting out will disable upload of results to your repo's Code Scanning dashboard - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3 + uses: github/codeql-action/upload-sarif@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3 with: sarif_file: results.sarif diff --git a/.github/workflows/snyk.yml b/.github/workflows/snyk.yml index ee436dc919..97202e971c 100644 --- a/.github/workflows/snyk.yml +++ b/.github/workflows/snyk.yml @@ -30,7 +30,7 @@ jobs: args: --sarif-file-output=snyk-static.sarif - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3 + uses: github/codeql-action/upload-sarif@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3 with: sarif_file: snyk-static.sarif @@ -43,6 +43,6 @@ jobs: args: --sarif-file-output=snyk-test.sarif - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3 + uses: github/codeql-action/upload-sarif@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3 with: sarif_file: snyk-test.sarif From 452f4d5e04415009d68ad147688349f3fe164650 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Mon, 11 Aug 2025 13:08:49 +0200 Subject: [PATCH 782/836] chore: fix cloudnative-pg grouping test dependencies (#8304) The groiup created for Renovate on the cloudnative-pg dependencies repo was missing the `{/,}**` to cover all the sub-repos. Signed-off-by: Jonathan Gonzalez V. --- .github/renovate.json5 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/renovate.json5 b/.github/renovate.json5 index 6a104723dd..310171db0e 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -295,7 +295,7 @@ { groupName: 'cnpg', matchPackageNames: [ - 'github.com/cloudnative-pg/', + 'github.com/cloudnative-pg{/,}**', ], separateMajorMinor: false, pinDigests: false, From 9e1598cbd6e858ac624dd775da6e606e7bd1dde1 Mon Sep 17 00:00:00 2001 From: Leonardo Cecchi Date: Mon, 11 Aug 2025 13:31:15 +0200 Subject: [PATCH 783/836] chore: remove `STANDBY_TCP_USER_TIMEOUT` from development manifests (#8301) Signed-off-by: Leonardo Cecchi --- Makefile | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 27956bc249..e4d9637e2e 100644 --- a/Makefile +++ b/Makefile @@ -233,8 +233,7 @@ generate-manifest: manifests kustomize ## Generate manifest used for deployment. $(KUSTOMIZE) edit set image controller="${CONTROLLER_IMG_WITH_DIGEST}" ;\ $(KUSTOMIZE) edit add patch --path env_override.yaml ;\ $(KUSTOMIZE) edit add configmap controller-manager-env \ - --from-literal="POSTGRES_IMAGE_NAME=${POSTGRES_IMAGE_NAME}" \ - --from-literal="STANDBY_TCP_USER_TIMEOUT=5000" ;\ + --from-literal="POSTGRES_IMAGE_NAME=${POSTGRES_IMAGE_NAME}" ;\ } ;\ mkdir -p ${DIST_PATH} ;\ $(KUSTOMIZE) build $$CONFIG_TMP_DIR/default > ${OPERATOR_MANIFEST_PATH} ;\ From cd191a537e058a44dc9ceae1ce1b932959238004 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 11 Aug 2025 14:15:39 +0200 Subject: [PATCH 784/836] chore(deps): update dependency python to 3.13 (main) (#8295) --- .github/workflows/latest-postgres-version-check.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/latest-postgres-version-check.yml b/.github/workflows/latest-postgres-version-check.yml index c8a7a53732..d889998de2 100644 --- a/.github/workflows/latest-postgres-version-check.yml +++ b/.github/workflows/latest-postgres-version-check.yml @@ -24,7 +24,7 @@ jobs: - name: Set up Python 3.9 uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5 with: - python-version: 3.9 + python-version: 3.13 - name: Install Python dependencies run: | From 03b06a164163f9061f9844b0c259c7a4c73f1a76 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 11 Aug 2025 15:16:38 +0200 Subject: [PATCH 785/836] chore(deps): update all non-major github action (main) (#8305) --- .github/workflows/backport.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/continuous-delivery.yml | 16 ++++++------ .github/workflows/continuous-integration.yml | 26 +++++++++---------- .github/workflows/k8s-versions-check.yml | 2 +- .../latest-postgres-version-check.yml | 2 +- .github/workflows/ossf_scorecard.yml | 2 +- .github/workflows/refresh-licenses.yml | 2 +- .github/workflows/release-pr.yml | 2 +- .github/workflows/release-publish.yml | 12 ++++----- .github/workflows/release-tag.yml | 2 +- .github/workflows/snyk.yml | 2 +- .github/workflows/spellcheck.yml | 4 +-- 13 files changed, 38 insertions(+), 38 deletions(-) diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 882901e630..688ca4c3f4 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -87,7 +87,7 @@ jobs: - name: Checkout code if: contains( github.event.pull_request.labels.*.name, matrix.branch ) - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 with: fetch-depth: 0 ref: ${{ matrix.branch }} diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index b34feec9ce..c4f62b560e 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -66,7 +66,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 - name: Install Go uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index f94ba4dba5..b951103258 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -279,7 +279,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 with: ref: ${{ needs.evaluate_options.outputs.git_ref }} # To identify the commit we need the history and all the tags. @@ -492,7 +492,7 @@ jobs: steps: - name: Checkout artifact - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 with: repository: cloudnative-pg/artifacts token: ${{ secrets.REPO_GHA_PAT }} @@ -575,7 +575,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 with: ref: ${{ needs.evaluate_options.outputs.git_ref }} - @@ -653,7 +653,7 @@ jobs: echo "-----------------------------------------------------" - name: Checkout code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 with: ref: ${{ needs.evaluate_options.outputs.git_ref }} - @@ -880,7 +880,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 with: ref: ${{ needs.evaluate_options.outputs.git_ref }} - @@ -1227,7 +1227,7 @@ jobs: echo "CLUSTER_NAME=${{ env.E2E_SUFFIX }}-test-${{ github.run_number }}-$( echo ${{ matrix.id }} | tr -d '_.-' )" >> $GITHUB_ENV - name: Checkout code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 with: ref: ${{ needs.evaluate_options.outputs.git_ref }} - @@ -1614,7 +1614,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 with: ref: ${{ needs.evaluate_options.outputs.git_ref }} - @@ -1927,7 +1927,7 @@ jobs: echo "CLUSTER_NAME=${{ env.E2E_SUFFIX }}-ocp-${{ github.run_number}}-$( echo ${{ matrix.k8s_version }} | tr -d '.' )" >> $GITHUB_ENV - name: Checkout code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 with: ref: ${{ needs.evaluate_options.outputs.git_ref }} fetch-depth: 0 diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 3e4a4289f8..78ff2ccc11 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -103,7 +103,7 @@ jobs: renovate-changed: ${{ steps.filter.outputs.renovate-changed }} steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 - name: Check for changes uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2 @@ -154,7 +154,7 @@ jobs: runs-on: ubuntu-24.04 steps: - name: Checkout code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 - name: Install Go uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 @@ -184,7 +184,7 @@ jobs: runs-on: ubuntu-24.04 steps: - name: Checkout code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 - name: Validate Renovate JSON run: npx --yes --package renovate@40.48.6 -- renovate-config-validator @@ -222,7 +222,7 @@ jobs: SHELLCHECK_OPTS: -a -S style steps: - name: Checkout code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 - name: Run ShellCheck uses: ludeeus/action-shellcheck@00cae500b08a931fb5698e11e79bfbd38e612a38 # 2.0.0 @@ -245,7 +245,7 @@ jobs: latest_k8s_version: ${{ steps.get-k8s-versions.outputs.latest_k8s_version }} steps: - name: Checkout code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 - name: Get k8s versions for unit test id: get-k8s-versions @@ -285,7 +285,7 @@ jobs: k8s-version: ${{ fromJSON(needs.generate-unit-tests-jobs.outputs.k8sMatrix) }} steps: - name: Checkout code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 - name: Install Go uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 @@ -324,7 +324,7 @@ jobs: runs-on: ubuntu-24.04 steps: - name: Checkout code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 - name: Install Go uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 @@ -359,7 +359,7 @@ jobs: runs-on: ubuntu-24.04 steps: - name: Checkout code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 - name: Install Go uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 @@ -426,7 +426,7 @@ jobs: push: ${{ env.PUSH }} steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 with: # To identify the commit we need the history and all the tags. fetch-depth: 0 @@ -659,7 +659,7 @@ jobs: needs.buildx.outputs.push == 'true' steps: - name: Checkout code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 with: fetch-depth: 0 ref: ${{ needs.buildx.outputs.commit }} @@ -715,7 +715,7 @@ jobs: needs.olm-bundle.result == 'success' steps: - name: Checkout code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 - name: Install Go uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 @@ -772,7 +772,7 @@ jobs: github.repository_owner == 'cloudnative-pg' steps: - name: Checkout code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 - name: Setting up KinD cluster uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0 @@ -830,7 +830,7 @@ jobs: OPP_RELEASE_INDEX_NAME: "catalog_tmp" steps: - name: Checkout community-operators - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 with: repository: k8s-operatorhub/community-operators persist-credentials: false diff --git a/.github/workflows/k8s-versions-check.yml b/.github/workflows/k8s-versions-check.yml index fe5c09c7f8..e8da20a92b 100644 --- a/.github/workflows/k8s-versions-check.yml +++ b/.github/workflows/k8s-versions-check.yml @@ -34,7 +34,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 - # There is no command to get EKS k8s versions, we have to parse the documentation name: Get updated EKS versions diff --git a/.github/workflows/latest-postgres-version-check.yml b/.github/workflows/latest-postgres-version-check.yml index d889998de2..9bc5ebc6b8 100644 --- a/.github/workflows/latest-postgres-version-check.yml +++ b/.github/workflows/latest-postgres-version-check.yml @@ -19,7 +19,7 @@ jobs: runs-on: ubuntu-24.04 steps: - name: Checkout code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 - name: Set up Python 3.9 uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5 diff --git a/.github/workflows/ossf_scorecard.yml b/.github/workflows/ossf_scorecard.yml index 623a7a9084..5dadff00dd 100644 --- a/.github/workflows/ossf_scorecard.yml +++ b/.github/workflows/ossf_scorecard.yml @@ -35,7 +35,7 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 with: persist-credentials: false diff --git a/.github/workflows/refresh-licenses.yml b/.github/workflows/refresh-licenses.yml index 03247e7088..f3c0e63a49 100644 --- a/.github/workflows/refresh-licenses.yml +++ b/.github/workflows/refresh-licenses.yml @@ -18,7 +18,7 @@ jobs: runs-on: ubuntu-24.04 steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 - name: Install Go uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 diff --git a/.github/workflows/release-pr.yml b/.github/workflows/release-pr.yml index 82310cd171..d886e85171 100644 --- a/.github/workflows/release-pr.yml +++ b/.github/workflows/release-pr.yml @@ -18,7 +18,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 - name: Get tag run: | diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml index 6ede613c3a..66c66c0d1f 100644 --- a/.github/workflows/release-publish.yml +++ b/.github/workflows/release-publish.yml @@ -25,7 +25,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 with: # To identify the commit we need the history and all the tags. fetch-depth: 0 @@ -56,7 +56,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 - name: Get tag run: | @@ -104,7 +104,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 with: # To identify the commit we need the history and all the tags. fetch-depth: 0 @@ -252,7 +252,7 @@ jobs: needs.check-version.outputs.is_stable == 'true' steps: - name: Checkout code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 with: fetch-depth: 0 @@ -315,7 +315,7 @@ jobs: VERSION: ${{ needs.release-binaries.outputs.version }} steps: - name: Checkout community-operators - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 with: repository: k8s-operatorhub/community-operators fetch-depth: 0 @@ -392,7 +392,7 @@ jobs: steps: - name: Checkout artifact - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 with: repository: cloudnative-pg/artifacts token: ${{ secrets.REPO_GHA_PAT }} diff --git a/.github/workflows/release-tag.yml b/.github/workflows/release-tag.yml index 8483977c66..7010621e90 100644 --- a/.github/workflows/release-tag.yml +++ b/.github/workflows/release-tag.yml @@ -19,7 +19,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 - name: Create tag if: github.event.pull_request.merged == true && startsWith(github.head_ref, 'release/v') diff --git a/.github/workflows/snyk.yml b/.github/workflows/snyk.yml index 97202e971c..bcdd54f1e3 100644 --- a/.github/workflows/snyk.yml +++ b/.github/workflows/snyk.yml @@ -18,7 +18,7 @@ jobs: security-events: write steps: - name: Checkout code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 - name: Static Code Analysis uses: snyk/actions/golang@b98d498629f1c368650224d6d212bf7dfa89e4bf # 0.4.0 diff --git a/.github/workflows/spellcheck.yml b/.github/workflows/spellcheck.yml index 80865b85ef..d8b2c9bc18 100644 --- a/.github/workflows/spellcheck.yml +++ b/.github/workflows/spellcheck.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-24.04 steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 - name: woke uses: get-woke/woke-action@b2ec032c4a2c912142b38a6a453ad62017813ed0 # v0 @@ -27,7 +27,7 @@ jobs: runs-on: ubuntu-24.04 steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 - name: Spellcheck uses: rojopolis/spellcheck-github-actions@35a02bae020e6999c5c37fabaf447f2eb8822ca7 # 0.51.0 From dc9ee600f6468214df7dfaab26fd5d8f9496f3eb Mon Sep 17 00:00:00 2001 From: Peggie Date: Mon, 11 Aug 2025 16:25:08 +0200 Subject: [PATCH 786/836] test: Updated Postgres versions used in E2E tests (#8300) Update the Postgres versions used in E2E tests Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Signed-off-by: Jonathan Gonzalez V. Co-authored-by: postgres-versions-updater --- .github/pg_versions.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/pg_versions.json b/.github/pg_versions.json index 061fd87efa..9a98e449fe 100644 --- a/.github/pg_versions.json +++ b/.github/pg_versions.json @@ -1,7 +1,7 @@ { "18": [ "18beta2", - "18beta2-3" + "18beta2-4" ], "17": [ "17.5", From 72f71ff117bc2b7a81ab6a03951e950cccf44177 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 11 Aug 2025 18:44:16 +0200 Subject: [PATCH 787/836] fix(deps): update cnpg (main) (#8306) This PR contains the following updates: https://github.com/cloudnative-pg/barman-cloud `v0.3.1` -> `v0.3.3` https://github.com/cloudnative-pg/cnpg-i `v0.2.2-0.20250723093238-963c368523c2` -> `v0.3.0` https://github.com/cloudnative-pg/machinery `v0.3.0` -> `v0.3.1` --- go.mod | 10 +++++----- go.sum | 19 ++++++++++--------- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/go.mod b/go.mod index 687a07f6db..97ac395bb4 100644 --- a/go.mod +++ b/go.mod @@ -8,9 +8,9 @@ require ( github.com/avast/retry-go/v4 v4.6.1 github.com/blang/semver v3.5.1+incompatible github.com/cheynewallace/tabby v1.1.1 - github.com/cloudnative-pg/barman-cloud v0.3.1 - github.com/cloudnative-pg/cnpg-i v0.2.2-0.20250723093238-963c368523c2 - github.com/cloudnative-pg/machinery v0.3.0 + github.com/cloudnative-pg/barman-cloud v0.3.3 + github.com/cloudnative-pg/cnpg-i v0.3.0 + github.com/cloudnative-pg/machinery v0.3.1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/evanphx/json-patch/v5 v5.9.11 github.com/go-logr/logr v1.4.3 @@ -92,7 +92,7 @@ require ( github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/common v0.65.0 // indirect github.com/prometheus/procfs v0.16.1 // indirect - github.com/spf13/pflag v1.0.6 // indirect + github.com/spf13/pflag v1.0.7 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xlab/treeprint v1.2.0 // indirect go.uber.org/automaxprocs v1.6.0 // indirect @@ -108,7 +108,7 @@ require ( golang.org/x/tools v0.34.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a // indirect - google.golang.org/protobuf v1.36.6 // indirect + google.golang.org/protobuf v1.36.7 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect diff --git a/go.sum b/go.sum index 73e793042e..8d8b5c2f0b 100644 --- a/go.sum +++ b/go.sum @@ -18,12 +18,12 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cheynewallace/tabby v1.1.1 h1:JvUR8waht4Y0S3JF17G6Vhyt+FRhnqVCkk8l4YrOU54= github.com/cheynewallace/tabby v1.1.1/go.mod h1:Pba/6cUL8uYqvOc9RkyvFbHGrQ9wShyrn6/S/1OYVys= -github.com/cloudnative-pg/barman-cloud v0.3.1 h1:kzkY77k2lN/caoyh7ibXDSZjJeSJTNvnVt6Gfa8Iq5M= -github.com/cloudnative-pg/barman-cloud v0.3.1/go.mod h1:4HL3AjY9oEl2Ed0HSkyvTZEQPhwyFOaAnuCz9lfVeYQ= -github.com/cloudnative-pg/cnpg-i v0.2.2-0.20250723093238-963c368523c2 h1:uLooqDE54OE0tBMdwHws1CwD3X4098K9oZyNt3xdQuE= -github.com/cloudnative-pg/cnpg-i v0.2.2-0.20250723093238-963c368523c2/go.mod h1:pJaTIy0d6Yd3CA554AHZD81CJM7/jiDNmk7BFTMb3Fk= -github.com/cloudnative-pg/machinery v0.3.0 h1:t1DzXGeK3RUYXS5KWIdIk30oh4EmwxZ+6sWM4wJDBac= -github.com/cloudnative-pg/machinery v0.3.0/go.mod h1:6NhajP3JlioeecYceVuOBLD2lfsJty8qSZsFpSb/vmA= +github.com/cloudnative-pg/barman-cloud v0.3.3 h1:EEcjeV+IUivDpmyF/H/XGY1pGaKJ5LS5MYeB6wgGcak= +github.com/cloudnative-pg/barman-cloud v0.3.3/go.mod h1:5CM4MncAxAjnqxjDt0I5E/oVd7gsMLL0/o/wQ+vUSgs= +github.com/cloudnative-pg/cnpg-i v0.3.0 h1:5ayNOG5x68lU70IVbHDZQrv5p+bErCJ0mqRmOpW2jjE= +github.com/cloudnative-pg/cnpg-i v0.3.0/go.mod h1:VOIWWXcJ1RyioK+elR2DGOa4cBA6K+6UQgx05aZmH+g= +github.com/cloudnative-pg/machinery v0.3.1 h1:KtPA6EwELTUNisCMLiFYkK83GU9606rkGQhDJGPB8Yw= +github.com/cloudnative-pg/machinery v0.3.1/go.mod h1:jebuqKxZAbrRKDEEpVCIDMKW+FbWtB9Kf/hb2kMUu9o= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= @@ -176,8 +176,9 @@ github.com/sethvargo/go-password v0.3.1 h1:WqrLTjo7X6AcVYfC6R7GtSyuUQR9hGyAj/f1P github.com/sethvargo/go-password v0.3.1/go.mod h1:rXofC1zT54N7R8K/h1WDUdkf9BOx5OptoxrMBcrXzvs= github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= -github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M= +github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stern/stern v1.32.0 h1:xNw0CizB7/4CkWpI46cAo8tArDnS14eYKLaaDevEnrM= github.com/stern/stern v1.32.0/go.mod h1:Nv6yoHcb2E1HvklagJyd4rjoysJM4WxvcGVQtE651Xw= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -273,8 +274,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.74.2 h1:WoosgB65DlWVC9FqI82dGsZhWFNBSLjQ84bjROOpMu4= google.golang.org/grpc v1.74.2/go.mod h1:CtQ+BGjaAIXHs/5YS3i473GqwBBa1zGQNevxdeBEXrM= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A= +google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= From dc4c9649b529e88efd6ab684331d68afb5c85917 Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Tue, 12 Aug 2025 08:57:21 +1000 Subject: [PATCH 788/836] docs: release notes for 1.27.0 (#8253) Closes #8231 Signed-off-by: Gabriele Bartolini --- docs/src/installation_upgrade.md | 7 +++--- docs/src/preview_version.md | 8 +++---- docs/src/release_notes.md | 2 +- docs/src/release_notes/v1.27.md | 40 +++++++++++++++++++++----------- docs/src/supported_releases.md | 28 +++++++++++----------- 5 files changed, 50 insertions(+), 35 deletions(-) diff --git a/docs/src/installation_upgrade.md b/docs/src/installation_upgrade.md index ac4d8e4668..1196e9cf23 100644 --- a/docs/src/installation_upgrade.md +++ b/docs/src/installation_upgrade.md @@ -262,12 +262,14 @@ only the operator itself. 1.28.0, or at least to the latest stable version of your current minor release (e.g., 1.27.x). -### Upgrading to 1.27.0 or 1.26.2 +--> + +### Upgrading to 1.27.0 or 1.26.1 !!! Important We strongly recommend that all CloudNativePG users upgrade to version 1.27.0, or at least to the latest stable version of your current minor release - (e.g., 1.26.2). + (e.g., 1.26.1). Version 1.27 introduces a change in the default behavior of the [liveness probe](instance_manager.md#liveness-probe): it now enforces the @@ -284,7 +286,6 @@ spec: isolationCheck: enabled: false ``` ---> ### Upgrading to 1.26 from a previous minor version diff --git a/docs/src/preview_version.md b/docs/src/preview_version.md index acc028bd05..537fc7651b 100644 --- a/docs/src/preview_version.md +++ b/docs/src/preview_version.md @@ -34,13 +34,13 @@ are not backwards compatible and could be removed entirely. ## Current Preview Version - -The current preview version is **1.27.0-rc1**. + diff --git a/docs/src/release_notes.md b/docs/src/release_notes.md index 2feb0dd7fd..23b707663f 100644 --- a/docs/src/release_notes.md +++ b/docs/src/release_notes.md @@ -4,7 +4,7 @@ History of user-visible changes for CloudNativePG, classified for each minor release. -- [CloudNativePG 1.27 - Release Candidate](release_notes/v1.27.md) +- [CloudNativePG 1.27](release_notes/v1.27.md) - [CloudNativePG 1.26](release_notes/v1.26.md) - [CloudNativePG 1.25](release_notes/v1.25.md) diff --git a/docs/src/release_notes/v1.27.md b/docs/src/release_notes/v1.27.md index 7bb702f421..e4db13a442 100644 --- a/docs/src/release_notes/v1.27.md +++ b/docs/src/release_notes/v1.27.md @@ -6,15 +6,30 @@ For a complete list of changes, please refer to the [commits](https://github.com/cloudnative-pg/cloudnative-pg/commits/release-1.27) on the release branch in GitHub. -## Version 1.27.0-rc1 + + +## Version 1.27.0 + +**Release date:** Aug 12, 2025 ### Important changes: -- A change in the default behavior of the [liveness probe](instance_manager.md#liveness-probe), - now enforcing the [shutdown of an isolated primary](instance_manager.md#primary-isolation) - within the `livenessProbeTimeout` (30 seconds), will require a restart of your pods. +- The default behavior of the [liveness probe](../instance_manager.md#liveness-probe) has been updated. + An [isolated primary is now forcibly shut down](../instance_manager.md#primary-isolation) + within the configured `livenessProbeTimeout` (default: 30 seconds). ### Features: @@ -41,14 +56,6 @@ on the release branch in GitHub. ### Enhancements: - - Introduced an opt-in experimental feature that enables quorum-based failover to improve safety and data durability during failover events. This feature, also called failover quorum, can be activated via the @@ -83,6 +90,13 @@ on the release branch in GitHub. metrics exposure directly from the instance for better observability ([#8033](https://github.com/cloudnative-pg/cloudnative-pg/pull/8033)). +### Fixes + +- Unblocked rollouts when migrating to the `barman-cloud` plugin using the + `switchover` strategy. Former primary Pods now restart correctly after WAL + archiving fails due to missing plugin support. + ([#8236](https://github.com/cloudnative-pg/cloudnative-pg/pull/8236)) + ### Supported versions - Kubernetes 1.33, 1.32, and 1.31 diff --git a/docs/src/supported_releases.md b/docs/src/supported_releases.md index 625826e72d..49c0940c8b 100644 --- a/docs/src/supported_releases.md +++ b/docs/src/supported_releases.md @@ -83,8 +83,8 @@ Git tags for versions are prefixed with `v`. | Version | Currently supported | Release date | End of life | Supported Kubernetes versions | Tested, but not supported | Supported Postgres versions | |-----------------|----------------------|--------------|-----------------|-------------------------------|---------------------------|-----------------------------| -| 1.27.x | No (preview) | ~ Aug 2025 | ~ Feb 2026 | 1.31, 1.32, 1.33 | 1.30, 1.29 | 13 - 17 | -| 1.26.x | Yes | May 23, 2025 | ~ Nov 2025 | 1.30, 1.31, 1.32, 1.33 | 1.29 | 13 - 17 | +| 1.27.x | Yes | 12 Aug 2025 | ~ Feb 2026 | 1.31, 1.32, 1.33 | 1.30, 1.29 | 13 - 17 | +| 1.26.x | Yes | May 23, 2025 | 12 Nov 2025 | 1.30, 1.31, 1.32, 1.33 | 1.29 | 13 - 17 | | 1.25.x | Yes | Dec 23, 2024 | 22 Aug 2025 | 1.29, 1.30, 1.31, 1.32 | 1.33 | 13 - 17 | | main | No, development only | | | | | 13 - 17 | @@ -155,29 +155,29 @@ version of PostgreSQL, we might not be able to help you. ## What we mean by support -Our support window is roughly five months for each release branch (latest +Our support window is roughly five/six months for each release branch (latest minor release, plus 3 additional months), given that we produce a new final -release every two months. +release every two/three months. -In the following diagram, `release-1.23` is an example of a release branch. +In the following diagram, `release-1.27` is an example of a release branch. -For example, if the latest release is `v1.23.0`, you can expect a supplementary -3-month support period for the preceding release, `v1.22.x`. +For example, if the latest release is `v1.27.0`, you can expect a supplementary +3-month support period for the preceding release, `v1.26.x`. -Only the last patch release of each branch is supported. +**Only the last patch release of each branch is supported.** ```diagram ------+---------------------------------------------> main (trunk development) \ \ \ \ - \ \ v1.23.0 - \ \ Apr 24, 2024 ^ - \ \----------+---------------> release-1.23 | + \ \ v1.27.0 + \ \ Aug 12, 2025 ^ + \ \----------+---------------> release-1.27 | \ | SUPPORTED \ | RELEASES - \ v1.22.0 | = last minor - \ Dec 21, 2023 | release + - +-------------------+---------------> release-1.22 | 3 months + \ v1.26.0 | = last minor + \ May 23, 2025 | release + + +-------------------+---------------> release-1.26 | 3 months v ``` From a4f449140dcf6ee58385fdab509bb94d0797e4f6 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Tue, 12 Aug 2025 06:13:05 +0200 Subject: [PATCH 789/836] chore: pin `packaging` version in workflow (#8323) The `packaging` Python package must use a pinned version when installed Closes #8322 Signed-off-by: Jonathan Gonzalez V. --- .github/workflows/latest-postgres-version-check.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/latest-postgres-version-check.yml b/.github/workflows/latest-postgres-version-check.yml index 9bc5ebc6b8..ae689309e3 100644 --- a/.github/workflows/latest-postgres-version-check.yml +++ b/.github/workflows/latest-postgres-version-check.yml @@ -28,7 +28,7 @@ jobs: - name: Install Python dependencies run: | - pip install packaging + pip install packaging==25.0 - name: Generate PostgreSQL JSON files run: | From a2f73dcacc4320feb4298869ab7315d433b1d039 Mon Sep 17 00:00:00 2001 From: Vito Botta Date: Tue, 12 Aug 2025 07:22:31 +0300 Subject: [PATCH 790/836] docs: add Brella to `ADOPTERS.md` (#8317) Signed-off-by: Vito Botta --- ADOPTERS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/ADOPTERS.md b/ADOPTERS.md index 9ccc898c1d..a7b572bdaf 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -66,3 +66,4 @@ This list is sorted in chronological order, based on the submission date. | [DocumentDB Operator](https://github.com/microsoft/documentdb-kubernetes-operator) | [@xgerman](https://github.com/xgerman) | 2025-05-22 | The DocumentDB Kubernetes Operator is an open-source project to run and manage DocumentDB on Kubernetes. [DocumentDB](https://github.com/microsoft/documentdb) is the engine powering vCore-based [Azure Cosmos DB for MongoDB](https://learn.microsoft.com/en-us/azure/cosmos-db/mongodb/vcore/). The operator uses CloudNativePG behind the scenes. | | [Xata](https://xata.io) | [@tsg](https://github.com/tsg) | 2025-05-29 | Xata is a PostgreSQL platform offering instant database branching, separation of storage/compute, and PII anonymization. It uses CloudNativePG for the compute part. | | [Vera Rubin Observatory](https://www.lsst.org) | [@cbarria](https://github.com/cbarria) | 2025-06-17 | At the heart of our operations, CloudNativePG supports the telescope's systems and plays a key role in making astronomical data openly accessible to the world. | +| [Brella](https://www.brella.io) | [@vitobotta](https://github.com/vitobotta/) | 2025-08-11 | Brella is an event management platform that works in new and smart ways. Postgres is at the core of how our platform is built. With CloudNativePG, we moved from using a managed Postgres service - Cloud SQL on Google Cloud - to running Postgres clusters directly in Kubernetes. This change saves us money and gives us more control. At the same time, we didn't lose any functionality.| From 7cd53cd85942de71db2be8bc52114bedbc56a8c0 Mon Sep 17 00:00:00 2001 From: Gabriele Fedi <91485518+GabriFedi97@users.noreply.github.com> Date: Tue, 12 Aug 2025 09:17:29 +0200 Subject: [PATCH 791/836] docs: CNPG-I (#8116) Closes #8058 Signed-off-by: Gabriele Fedi Signed-off-by: Armando Ruocco Signed-off-by: Gabriele Bartolini Co-authored-by: Armando Ruocco Co-authored-by: Gabriele Bartolini --- .wordlist-en-custom.txt | 3 + docs/mkdocs.yml | 1 + docs/src/cnpg_i.md | 206 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 210 insertions(+) create mode 100644 docs/src/cnpg_i.md diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index 633f983b42..71e973ad7e 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -694,6 +694,7 @@ cmd cn cnp cnpg +codebase codeready collationVersion columnValue @@ -872,6 +873,7 @@ googleCredentials goroutines gosec govulncheck +gRPC grafana gzip hashicorp @@ -1036,6 +1038,7 @@ monitoringconfiguration mountPath msg mspan +mTLS multinamespace mutatingwebhookconfigurations mutex diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index b76cfb334b..f510a314e2 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -64,6 +64,7 @@ nav: - e2e.md - container_images.md - imagevolume_extensions.md + - cnpg_i.md - operator_capability_levels.md - controller.md - samples.md diff --git a/docs/src/cnpg_i.md b/docs/src/cnpg_i.md new file mode 100644 index 0000000000..d75522a7fc --- /dev/null +++ b/docs/src/cnpg_i.md @@ -0,0 +1,206 @@ +# CNPG-I + + +The **CloudNativePG Interface** ([CNPG-I](https://github.com/cloudnative-pg/cnpg-i)) +is a standard way to extend and customize CloudNativePG without modifying its +core codebase. + +## Why CNPG-I? + +CloudNativePG supports a wide range of use cases, but sometimes its built-in +functionality isn’t enough, or adding certain features directly to the main +project isn’t practical. + +Before CNPG-I, users had two main options: + +- Fork the project to add custom behavior, or +- Extend the upstream codebase by writing custom components on top of it. + +Both approaches created maintenance overhead, slowed upgrades, and delayed delivery of critical features. + +CNPG-I solves these problems by providing a stable, gRPC-based integration +point for extending CloudNativePG at key points in a cluster’s lifecycle —such +as backups, recovery, and sub-resource reconciliation— without disrupting the +core project. + +CNPG-I can extend: + +- The operator, and/or +- The instance manager running inside PostgreSQL pods. + +## Registering a plugin + +CNPG-I is inspired by the Kubernetes +[Container Storage Interface (CSI)](https://kubernetes.io/blog/2019/01/15/container-storage-interface-ga/). +The operator communicates with registered plugins using **gRPC**, following the +[CNPG-I protocol](https://github.com/cloudnative-pg/cnpg-i/blob/main/docs/protocol.md). + +CloudNativePG discovers plugins **at startup**. You can register them in one of two ways: + +- Sidecar container – run the plugin inside the operator’s Deployment +- Standalone Deployment – run the plugin as a separate workload in the same + namespace + +In both cases, the plugin must be packaged as a container image. + +### Sidecar Container + +When running as a sidecar, the plugin must expose its gRPC server via a **Unix +domain socket**. This socket must be placed in a directory shared with the +operator container, mounted at the path set in `PLUGIN_SOCKET_DIR` (default: +`/plugin`). + +Example: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager +spec: + template: + spec: + containers: + - image: cloudnative-pg:latest + [...] + name: manager + volumeMounts: + - mountPath: /plugins + name: cnpg-i-plugins + + - image: cnpg-i-plugin-example:latest + name: cnpg-i-plugin-example + volumeMounts: + - mountPath: /plugins + name: cnpg-i-plugins + volumes: + - name: cnpg-i-plugins + emptyDir: {} +``` + +### Standalone Deployment (recommended) + +Running a plugin as its own Deployment decouples its lifecycle from the +operator’s and allows independent scaling. In this setup, the plugin exposes a +TCP gRPC endpoint behind a Service, with **mTLS** for secure communication. + +!!! Warning + CloudNativePG does **not** discover plugins dynamically. If you deploy a new + plugin, you must **restart the operator** to detect it. + +Example Deployment: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cnpg-i-plugin-example +spec: + template: + [...] + spec: + containers: + - name: cnpg-i-plugin-example + image: cnpg-i-plugin-example:latest + ports: + - containerPort: 9090 + protocol: TCP +``` + +The related Service for the plugin must include: + +- The label `cnpg.io/plugin: ` — required for CloudNativePG to + discover the plugin +- The annotation `cnpg.io/pluginPort: ` — specifies the port where the + plugin’s gRPC server is exposed + +Example Service: + +```yaml +apiVersion: v1 +kind: Service +metadata: + annotations: + cnpg.io/pluginPort: "9090" + labels: + cnpg.io/pluginName: cnpg-i-plugin-example.my-org.io + name: cnpg-i-plugin-example +spec: + ports: + - port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app: cnpg-i-plugin-example +``` + +### Configuring TLS Certificates + +When a plugin runs as a `Deployment`, communication with CloudNativePG happens +over the network. To secure it, **mTLS is enforced**, requiring TLS +certificates for both sides. + +Certificates must be stored as [Kubernetes TLS Secrets](https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets) +and referenced in the plugin’s Service annotations +(`cnpg.io/pluginClientSecret` and `cnpg.io/pluginServerSecret`): + +```yaml +apiVersion: v1 +kind: Service +metadata: + annotations: + cnpg.io/pluginClientSecret: cnpg-i-plugin-example-client-tls + cnpg.io/pluginServerSecret: cnpg-i-plugin-example-server-tls + cnpg.io/pluginPort: "9090" + name: barman-cloud + namespace: postgresql-operator-system +spec: + [...] +``` + +!!! Note + You can provide your own certificate bundles, but the recommended method is + to use [Cert-manager](https://cert-manager.io). + +## Using a plugin + +To enable a plugin, configure the `.spec.plugins` section in your `Cluster` +resource. Refer to the CloudNativePG API Reference for the full +[PluginConfiguration](https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-PluginConfiguration) +specification. + +Example: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: cluster-with-plugins +spec: + instances: 1 + storage: + size: 1Gi + plugins: + - name: cnpg-i-plugin-example.my-org.io + enabled: true + parameters: + key1: value1 + key2: value2 +``` + +Each plugin may have its own parameters—check the plugin’s documentation for +details. The `name` field in `spec.plugins` depends on how the plugin is +deployed: + +- Sidecar container: use the Unix socket file name +- Deployment: use the value from the Service’s `cnpg.io/pluginName` label + +## Community plugins + +The CNPG-I protocol has quickly become a proven and reliable pattern for +extending CloudNativePG while keeping the core project maintainable. +Over time, the community has built and shared plugins that address real-world +needs and serve as examples for developers. + +For a complete and up-to-date list of plugins built with CNPG-I, please refer to the +[CNPG-I GitHub page](https://github.com/cloudnative-pg/cnpg-i?tab=readme-ov-file#projects-built-with-cnpg-i). From 8b442dcc3d2390e891ec4a53fe576fe9936dfa08 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 12 Aug 2025 15:56:12 +0200 Subject: [PATCH 792/836] Version tag to 1.27.0 (#8336) Automated PR. Will trigger the 1.27.0 release when approved. Signed-off-by: Jonathan Gonzalez V. Co-authored-by: Jonathan Gonzalez V. --- docs/src/installation_upgrade.md | 4 +- docs/src/kubectl-plugin.md | 30 +- pkg/versions/versions.go | 6 +- releases/cnpg-1.27.0.yaml | 18219 +++++++++++++++++++++++++++++ 4 files changed, 18239 insertions(+), 20 deletions(-) create mode 100644 releases/cnpg-1.27.0.yaml diff --git a/docs/src/installation_upgrade.md b/docs/src/installation_upgrade.md index 1196e9cf23..4f6919df69 100644 --- a/docs/src/installation_upgrade.md +++ b/docs/src/installation_upgrade.md @@ -8,12 +8,12 @@ The operator can be installed like any other resource in Kubernetes, through a YAML manifest applied via `kubectl`. -You can install the [latest operator manifest](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-1.27.0-rc1.yaml) +You can install the [latest operator manifest](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.27/releases/cnpg-1.27.0.yaml) for this minor release as follows: ```sh kubectl apply --server-side -f \ - https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/releases/cnpg-1.27.0-rc1.yaml + https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.27/releases/cnpg-1.27.0.yaml ``` You can verify that with: diff --git a/docs/src/kubectl-plugin.md b/docs/src/kubectl-plugin.md index d0526147d3..5526f009af 100644 --- a/docs/src/kubectl-plugin.md +++ b/docs/src/kubectl-plugin.md @@ -31,11 +31,11 @@ them in your systems. #### Debian packages -For example, let's install the 1.27.0-rc1 release of the plugin, for an Intel based +For example, let's install the 1.27.0 release of the plugin, for an Intel based 64 bit server. First, we download the right `.deb` file. ```sh -wget https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.27.0-rc1/kubectl-cnpg_1.27.0-rc1_linux_x86_64.deb \ +wget https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.27.0/kubectl-cnpg_1.27.0_linux_x86_64.deb \ --output-document kube-plugin.deb ``` @@ -46,17 +46,17 @@ $ sudo dpkg -i kube-plugin.deb Selecting previously unselected package cnpg. (Reading database ... 6688 files and directories currently installed.) Preparing to unpack kube-plugin.deb ... -Unpacking cnpg (1.27.0-rc1) ... -Setting up cnpg (1.27.0-rc1) ... +Unpacking cnpg (1.27.0) ... +Setting up cnpg (1.27.0) ... ``` #### RPM packages -As in the example for `.rpm` packages, let's install the 1.27.0-rc1 release for an +As in the example for `.rpm` packages, let's install the 1.27.0 release for an Intel 64 bit machine. Note the `--output` flag to provide a file name. ```sh -curl -L https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.27.0-rc1/kubectl-cnpg_1.27.0-rc1_linux_x86_64.rpm \ +curl -L https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.27.0/kubectl-cnpg_1.27.0_linux_x86_64.rpm \ --output kube-plugin.rpm ``` @@ -70,7 +70,7 @@ Dependencies resolved. Package Architecture Version Repository Size ==================================================================================================== Installing: - cnpg x86_64 1.27.0-rc1 @commandline 20 M + cnpg x86_64 1.27.0 @commandline 20 M Transaction Summary ==================================================================================================== @@ -294,9 +294,9 @@ sandbox-3 0/604DE38 0/604DE38 0/604DE38 0/604DE38 00:00:00 00:00:00 00 Instances status Name Current LSN Replication role Status QoS Manager Version Node ---- ----------- ---------------- ------ --- --------------- ---- -sandbox-1 0/604DE38 Primary OK BestEffort 1.27.0-rc1 k8s-eu-worker -sandbox-2 0/604DE38 Standby (async) OK BestEffort 1.27.0-rc1 k8s-eu-worker2 -sandbox-3 0/604DE38 Standby (async) OK BestEffort 1.27.0-rc1 k8s-eu-worker +sandbox-1 0/604DE38 Primary OK BestEffort 1.27.0 k8s-eu-worker +sandbox-2 0/604DE38 Standby (async) OK BestEffort 1.27.0 k8s-eu-worker2 +sandbox-3 0/604DE38 Standby (async) OK BestEffort 1.27.0 k8s-eu-worker ``` If you require more detailed status information, use the `--verbose` option (or @@ -350,9 +350,9 @@ sandbox-primary primary 1 1 1 Instances status Name Current LSN Replication role Status QoS Manager Version Node ---- ----------- ---------------- ------ --- --------------- ---- -sandbox-1 0/6053720 Primary OK BestEffort 1.27.0-rc1 k8s-eu-worker -sandbox-2 0/6053720 Standby (async) OK BestEffort 1.27.0-rc1 k8s-eu-worker2 -sandbox-3 0/6053720 Standby (async) OK BestEffort 1.27.0-rc1 k8s-eu-worker +sandbox-1 0/6053720 Primary OK BestEffort 1.27.0 k8s-eu-worker +sandbox-2 0/6053720 Standby (async) OK BestEffort 1.27.0 k8s-eu-worker2 +sandbox-3 0/6053720 Standby (async) OK BestEffort 1.27.0 k8s-eu-worker ``` With an additional `-v` (e.g. `kubectl cnpg status sandbox -v -v`), you can @@ -575,12 +575,12 @@ Archive: report_operator_.zip ```output ====== Beginning of Previous Log ===== -2023-03-28T12:56:41.251711811Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.27.0-rc1","build":{"Version":"1.27.0-rc1+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} +2023-03-28T12:56:41.251711811Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.27.0","build":{"Version":"1.27.0+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} 2023-03-28T12:56:41.251851909Z {"level":"info","ts":"2023-03-28T12:56:41Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"} ====== End of Previous Log ===== -2023-03-28T12:57:09.854306024Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.27.0-rc1","build":{"Version":"1.27.0-rc1+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} +2023-03-28T12:57:09.854306024Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting CloudNativePG Operator","version":"1.27.0","build":{"Version":"1.27.0+dev107","Commit":"cc9bab17","Date":"2023-03-28"}} 2023-03-28T12:57:09.854363943Z {"level":"info","ts":"2023-03-28T12:57:09Z","logger":"setup","msg":"Starting pprof HTTP server","addr":"0.0.0.0:6060"} ``` diff --git a/pkg/versions/versions.go b/pkg/versions/versions.go index 435b41d8fd..8247fdcd5d 100644 --- a/pkg/versions/versions.go +++ b/pkg/versions/versions.go @@ -23,13 +23,13 @@ package versions const ( // Version is the version of the operator - Version = "1.27.0-rc1" + Version = "1.27.0" // DefaultImageName is the default image used by the operator to create pods DefaultImageName = "ghcr.io/cloudnative-pg/postgresql:17.5" // DefaultOperatorImageName is the default operator image used by the controller in the pods running PostgreSQL - DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.27.0-rc1" + DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.27.0" ) // BuildInfo is a struct containing all the info about the build @@ -39,7 +39,7 @@ type BuildInfo struct { var ( // buildVersion injected during the build - buildVersion = "1.27.0-rc1" + buildVersion = "1.27.0" // buildCommit injected during the build buildCommit = "none" diff --git a/releases/cnpg-1.27.0.yaml b/releases/cnpg-1.27.0.yaml new file mode 100644 index 0000000000..19397ae0bf --- /dev/null +++ b/releases/cnpg-1.27.0.yaml @@ -0,0 +1,18219 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-system +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: backups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Backup + listKind: BackupList + plural: backups + singular: backup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.method + name: Method + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .status.error + name: Error + type: string + name: v1 + schema: + openAPIV3Schema: + description: A Backup resource is a request for a PostgreSQL backup by the + user. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the backup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + type: object + x-kubernetes-validations: + - message: BackupSpec is immutable once set + rule: oldSelf == self + status: + description: |- + Most recently observed status of the backup. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + azureCredentials: + description: The credentials to use to upload data to Azure Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without providing + explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + backupId: + description: The ID of the Barman backup + type: string + backupLabelFile: + description: Backup label file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + backupName: + description: The Name of the Barman backup + type: string + beginLSN: + description: The starting xlog + type: string + beginWal: + description: The starting WAL + type: string + commandError: + description: The backup command output in case of error + type: string + commandOutput: + description: Unused. Retained for compatibility with old versions. + type: string + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data. This may not be populated in case of errors. + type: string + encryption: + description: Encryption method required to S3 API + type: string + endLSN: + description: The ending xlog + type: string + endWal: + description: The ending WAL + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + error: + description: The detected error + type: string + googleCredentials: + description: The credentials to use to upload data to Google Cloud + Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage JSON + file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + instanceID: + description: Information to identify the instance where the backup + has been taken from + properties: + ContainerID: + description: The container ID + type: string + podName: + description: The pod name + type: string + type: object + method: + description: The backup method being used + type: string + online: + description: Whether the backup was online/hot (`true`) or offline/cold + (`false`) + type: boolean + phase: + description: The last backup status + type: string + pluginMetadata: + additionalProperties: + type: string + description: A map containing the plugin metadata + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without providing + explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the region + name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + snapshotBackupStatus: + description: Status of the volumeSnapshot backup + properties: + elements: + description: The elements list, populated with the gathered volume + snapshots + items: + description: BackupSnapshotElementStatus is a volume snapshot + that is part of a volume snapshot method backup + properties: + name: + description: Name is the snapshot resource name + type: string + tablespaceName: + description: |- + TablespaceName is the name of the snapshotted tablespace. Only set + when type is PG_TABLESPACE + type: string + type: + description: Type is tho role of the snapshot in the cluster, + such as PG_DATA, PG_WAL and PG_TABLESPACE + type: string + required: + - name + - type + type: object + type: array + type: object + startedAt: + description: When the backup was started + format: date-time + type: string + stoppedAt: + description: When the backup was terminated + format: date-time + type: string + tablespaceMapFile: + description: Tablespace map file content as returned by Postgres in + case of online (hot) backups + format: byte + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: clusterimagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ClusterImageCatalog + listKind: ClusterImageCatalogList + plural: clusterimagecatalogs + singular: clusterimagecatalog + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ClusterImageCatalog is the Schema for the clusterimagecatalogs + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ClusterImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: clusters.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Cluster + listKind: ClusterList + plural: clusters + singular: cluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Number of instances + jsonPath: .status.instances + name: Instances + type: integer + - description: Number of ready instances + jsonPath: .status.readyInstances + name: Ready + type: integer + - description: Cluster current status + jsonPath: .status.phase + name: Status + type: string + - description: Primary pod + jsonPath: .status.currentPrimary + name: Primary + type: string + name: v1 + schema: + openAPIV3Schema: + description: Cluster is the Schema for the PostgreSQL API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the cluster. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + affinity: + description: Affinity/Anti-affinity rules for Pods + properties: + additionalPodAffinity: + description: AdditionalPodAffinity allows to specify pod affinity + terms to be passed to all the cluster's pods. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + additionalPodAntiAffinity: + description: |- + AdditionalPodAntiAffinity allows to specify pod anti-affinity terms to be added to the ones generated + by the operator if EnablePodAntiAffinity is set to true (default) or to be used exclusively if set to false. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + enablePodAntiAffinity: + description: |- + Activates anti-affinity for the pods. The operator will define pods + anti-affinity unless this field is explicitly set to false + type: boolean + nodeAffinity: + description: |- + NodeAffinity describes node affinity scheduling rules for the pod. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is map of key-value pairs used to define the nodes on which + the pods can run. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + podAntiAffinityType: + description: |- + PodAntiAffinityType allows the user to decide whether pod anti-affinity between cluster instance has to be + considered a strong requirement during scheduling or not. Allowed values are: "preferred" (default if empty) or + "required". Setting it to "required", could lead to instances remaining pending until new kubernetes nodes are + added if all the existing nodes don't match the required pod anti-affinity rule. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + type: string + tolerations: + description: |- + Tolerations is a list of Tolerations that should be set for all the pods, in order to allow them to run + on tainted nodes. + More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologyKey: + description: |- + TopologyKey to use for anti-affinity configuration. See k8s documentation + for more info on that + type: string + type: object + backup: + description: The configuration to be used for backups + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2`, and `snappy`. + enum: + - bzip2 + - gzip + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud Storage + JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing the + region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2`, + `lz4`, `snappy`, `xz`, and `zstd`. + enum: + - bzip2 + - gzip + - lz4 + - snappy + - xz + - zstd + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + retentionPolicy: + description: |- + RetentionPolicy is the retention policy to be used for backups + and WALs (i.e. '60d'). The retention policy is expressed in the form + of `XXu` where `XX` is a positive integer and `u` is in `[dwm]` - + days, weeks, months. + It's currently only applicable when using the BarmanObjectStore method. + pattern: ^[1-9][0-9]*[dwm]$ + type: string + target: + default: prefer-standby + description: |- + The policy to decide which instance should perform backups. Available + options are empty string, which will default to `prefer-standby` policy, + `primary` to have backups run always on primary instances, `prefer-standby` + to have backups run preferably on the most updated standby, if available. + enum: + - primary + - prefer-standby + type: string + volumeSnapshot: + description: VolumeSnapshot provides the configuration for the + execution of volume snapshot backups. + properties: + annotations: + additionalProperties: + type: string + description: Annotations key-value pairs that will be added + to .metadata.annotations snapshot resources. + type: object + className: + description: |- + ClassName specifies the Snapshot Class to be used for PG_DATA PersistentVolumeClaim. + It is the default class for the other types if no specific class is present + type: string + labels: + additionalProperties: + type: string + description: Labels are key-value pairs that will be added + to .metadata.labels snapshot resources. + type: object + online: + default: true + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + type: boolean + onlineConfiguration: + default: + immediateCheckpoint: false + waitForArchive: true + description: Configuration parameters to control the online/hot + backup with volume snapshots + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + snapshotOwnerReference: + default: none + description: SnapshotOwnerReference indicates the type of + owner reference the snapshot should have + enum: + - none + - cluster + - backup + type: string + tablespaceClassName: + additionalProperties: + type: string + description: |- + TablespaceClassName specifies the Snapshot Class to be used for the tablespaces. + defaults to the PGDATA Snapshot Class, if set + type: object + walClassName: + description: WalClassName specifies the Snapshot Class to + be used for the PG_WAL PersistentVolumeClaim. + type: string + type: object + type: object + bootstrap: + description: Instructions to bootstrap this cluster + properties: + initdb: + description: Bootstrap the cluster via initdb + properties: + builtinLocale: + description: |- + Specifies the locale name when the builtin provider is used. + This option requires `localeProvider` to be set to `builtin`. + Available from PostgreSQL 17. + type: string + dataChecksums: + description: |- + Whether the `-k` option should be passed to initdb, + enabling checksums on data pages (default: `false`) + type: boolean + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + encoding: + description: The value to be passed as option `--encoding` + for initdb (default:`UTF8`) + type: string + icuLocale: + description: |- + Specifies the ICU locale when the ICU provider is used. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 15. + type: string + icuRules: + description: |- + Specifies additional collation rules to customize the behavior of the default collation. + This option requires `localeProvider` to be set to `icu`. + Available from PostgreSQL 16. + type: string + import: + description: |- + Bootstraps the new cluster by importing data from an existing PostgreSQL + instance using logical backup (`pg_dump` and `pg_restore`) + properties: + databases: + description: The databases to import + items: + type: string + type: array + pgDumpExtraOptions: + description: |- + List of custom options to pass to the `pg_dump` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + pgRestoreExtraOptions: + description: |- + List of custom options to pass to the `pg_restore` command. IMPORTANT: + Use these options with caution and at your own risk, as the operator + does not validate their content. Be aware that certain options may + conflict with the operator's intended functionality or design. + items: + type: string + type: array + postImportApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after is imported - to be used with extreme care + (by default empty). Only available in microservice type. + items: + type: string + type: array + roles: + description: The roles to import + items: + type: string + type: array + schemaOnly: + description: |- + When set to true, only the `pre-data` and `post-data` sections of + `pg_restore` are invoked, avoiding data import. Default: `false`. + type: boolean + source: + description: The source of the import + properties: + externalCluster: + description: The name of the externalCluster used + for import + type: string + required: + - externalCluster + type: object + type: + description: The import type. Can be `microservice` or + `monolith`. + enum: + - microservice + - monolith + type: string + required: + - databases + - source + - type + type: object + locale: + description: Sets the default collation order and character + classification in the new database. + type: string + localeCType: + description: The value to be passed as option `--lc-ctype` + for initdb (default:`C`) + type: string + localeCollate: + description: The value to be passed as option `--lc-collate` + for initdb (default:`C`) + type: string + localeProvider: + description: |- + This option sets the locale provider for databases created in the new cluster. + Available from PostgreSQL 16. + type: string + options: + description: |- + The list of options that must be passed to initdb when creating the cluster. + Deprecated: This could lead to inconsistent configurations, + please use the explicit provided parameters instead. + If defined, explicit values will be ignored. + items: + type: string + type: array + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + postInitApplicationSQL: + description: |- + List of SQL queries to be executed as a superuser in the application + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitApplicationSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the application database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitSQL: + description: |- + List of SQL queries to be executed as a superuser in the `postgres` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `postgres` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + postInitTemplateSQL: + description: |- + List of SQL queries to be executed as a superuser in the `template1` + database right after the cluster has been created - to be used with extreme care + (by default empty) + items: + type: string + type: array + postInitTemplateSQLRefs: + description: |- + List of references to ConfigMaps or Secrets containing SQL files + to be executed as a superuser in the `template1` database right after + the cluster has been created. The references are processed in a specific order: + first, all Secrets are processed, followed by all ConfigMaps. + Within each group, the processing order follows the sequence specified + in their respective arrays. + (by default empty) + properties: + configMapRefs: + description: ConfigMapRefs holds a list of references + to ConfigMaps + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + secretRefs: + description: SecretRefs holds a list of references to + Secrets + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + walSegmentSize: + description: |- + The value in megabytes (1 to 1024) to be passed to the `--wal-segsize` + option for initdb (default: empty, resulting in PostgreSQL default: 16MB) + maximum: 1024 + minimum: 1 + type: integer + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider + is set to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is + set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set + to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + pg_basebackup: + description: |- + Bootstrap the cluster taking a physical backup of another compatible + PostgreSQL instance + properties: + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: The name of the server of which we need to take + a physical backup + minLength: 1 + type: string + required: + - source + type: object + recovery: + description: Bootstrap the cluster from a backup + properties: + backup: + description: |- + The backup object containing the physical base backup from which to + initiate the recovery procedure. + Mutually exclusive with `source` and `volumeSnapshots`. + properties: + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive. + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + name: + description: Name of the referent. + type: string + required: + - name + type: object + database: + description: 'Name of the database used by the application. + Default: `app`.' + type: string + owner: + description: |- + Name of the owner of the database in the instance to be used + by applications. Defaults to the value of the `database` key. + type: string + recoveryTarget: + description: |- + By default, the recovery process applies all the available + WAL files in the archive (full recovery). However, you can also + end the recovery as soon as a consistent state is reached or + recover to a point-in-time (PITR) by specifying a `RecoveryTarget` object, + as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...). + More info: https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET + properties: + backupID: + description: |- + The ID of the backup from which to start the recovery process. + If empty (default) the operator will automatically detect the backup + based on targetTime or targetLSN if specified. Otherwise use the + latest available backup in chronological order. + type: string + exclusive: + description: |- + Set the target to be exclusive. If omitted, defaults to false, so that + in Postgres, `recovery_target_inclusive` will be true + type: boolean + targetImmediate: + description: End recovery as soon as a consistent state + is reached + type: boolean + targetLSN: + description: The target LSN (Log Sequence Number) + type: string + targetName: + description: |- + The target name (to be previously created + with `pg_create_restore_point`) + type: string + targetTLI: + description: The target timeline ("latest" or a positive + integer) + type: string + targetTime: + description: The target time as a timestamp in the RFC3339 + standard + type: string + targetXID: + description: The target transaction ID + type: string + type: object + secret: + description: |- + Name of the secret containing the initial credentials for the + owner of the user database. If empty a new secret will be + created from scratch + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + source: + description: |- + The external cluster whose backup we will restore. This is also + used as the name of the folder under which the backup is stored, + so it must be set to the name of the source cluster + Mutually exclusive with `backup`. + type: string + volumeSnapshots: + description: |- + The static PVC data source(s) from which to initiate the + recovery procedure. Currently supporting `VolumeSnapshot` + and `PersistentVolumeClaim` resources that map an existing + PVC group, compatible with CloudNativePG, and taken with + a cold backup copy on a fenced Postgres instance (limitation + which will be removed in the future when online backup + will be implemented). + Mutually exclusive with `backup`. + properties: + storage: + description: Configuration of the storage of the instances + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + tablespaceStorage: + additionalProperties: + description: |- + TypedLocalObjectReference contains enough information to let you locate the + typed referenced object inside the same namespace. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + description: Configuration of the storage for PostgreSQL + tablespaces + type: object + walStorage: + description: Configuration of the storage for PostgreSQL + WAL (Write-Ahead Log) + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + required: + - storage + type: object + type: object + type: object + certificates: + description: The configuration for the CA and related certificates + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + description: + description: Description of this PostgreSQL cluster + type: string + enablePDB: + default: true + description: |- + Manage the `PodDisruptionBudget` resources within the cluster. When + configured as `true` (default setting), the pod disruption budgets + will safeguard the primary node from being terminated. Conversely, + setting it to `false` will result in the absence of any + `PodDisruptionBudget` resource, permitting the shutdown of all nodes + hosting the PostgreSQL cluster. This latter configuration is + advisable for any PostgreSQL cluster employed for + development/staging purposes. + type: boolean + enableSuperuserAccess: + default: false + description: |- + When this option is enabled, the operator will use the `SuperuserSecret` + to update the `postgres` user password (if the secret is + not present, the operator will automatically create one). When this + option is disabled, the operator will ignore the `SuperuserSecret` content, delete + it when automatically created, and then blank the password of the `postgres` + user by setting it to `NULL`. Disabled by default. + type: boolean + env: + description: |- + Env follows the Env format to pass environment variables + to the pods created in the cluster + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + EnvFrom follows the EnvFrom format to pass environment variables + sources to the pods to be used by Env + items: + description: EnvFromSource represents the source of a set of ConfigMaps + or Secrets + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: Optional text to prepend to the name of each environment + variable. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + ephemeralVolumeSource: + description: EphemeralVolumeSource allows the user to configure the + source of ephemeral volumes. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to + consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the + PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + ephemeralVolumesSizeLimit: + description: |- + EphemeralVolumesSizeLimit allows the user to set the limits for the ephemeral + volumes + properties: + shm: + anyOf: + - type: integer + - type: string + description: Shm is the size limit of the shared memory volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + temporaryData: + anyOf: + - type: integer + - type: string + description: TemporaryData is the size limit of the temporary + data volume + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + externalClusters: + description: The list of external clusters which are used in the configuration + items: + description: |- + ExternalCluster represents the connection parameters to an + external cluster which is used in the other sections of the configuration + properties: + barmanObjectStore: + description: The configuration for the barman-cloud tool suite + properties: + azureCredentials: + description: The credentials to use to upload data to Azure + Blob Storage + properties: + connectionString: + description: The connection string to be used + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromAzureAD: + description: Use the Azure AD based authentication without + providing explicitly the keys. + type: boolean + storageAccount: + description: The storage account where to upload data + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageKey: + description: |- + The storage account key to be used in conjunction + with the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + storageSasToken: + description: |- + A shared-access-signature to be used in conjunction with + the storage account name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + data: + description: |- + The configuration to be used to backup the data files + When not defined, base backups files will be stored uncompressed and may + be unencrypted in the object store, according to the bucket default + policy. + properties: + additionalCommandArgs: + description: |- + AdditionalCommandArgs represents additional arguments that can be appended + to the 'barman-cloud-backup' command-line invocation. These arguments + provide flexibility to customize the backup process further according to + specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-backup' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a backup file (a tar file per tablespace) while streaming it + to the object store. Available options are empty string (no + compression, default), `gzip`, `bzip2`, and `snappy`. + enum: + - bzip2 + - gzip + - snappy + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + jobs: + description: |- + The number of parallel jobs to be used to upload the backup, defaults + to 2 + format: int32 + minimum: 1 + type: integer + type: object + destinationPath: + description: |- + The path where to store the backup (i.e. s3://bucket/path/to/folder) + this path, with different destination folders, will be used for WALs + and for data + minLength: 1 + type: string + endpointCA: + description: |- + EndpointCA store the CA bundle of the barman endpoint. + Useful when using self-signed certificates to avoid + errors with certificate issuer and barman-cloud-wal-archive + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + endpointURL: + description: |- + Endpoint to be used to upload data to the cloud, + overriding the automatic endpoint discovery + type: string + googleCredentials: + description: The credentials to use to upload data to Google + Cloud Storage + properties: + applicationCredentials: + description: The secret containing the Google Cloud + Storage JSON file with the credentials + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + gkeEnvironment: + description: |- + If set to true, will presume that it's running inside a GKE environment, + default to false. + type: boolean + type: object + historyTags: + additionalProperties: + type: string + description: |- + HistoryTags is a list of key value pairs that will be passed to the + Barman --history-tags option. + type: object + s3Credentials: + description: The credentials to use to upload data to S3 + properties: + accessKeyId: + description: The reference to the access key id + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + inheritFromIAMRole: + description: Use the role based authentication without + providing explicitly the keys. + type: boolean + region: + description: The reference to the secret containing + the region name + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + secretAccessKey: + description: The reference to the secret access key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + sessionToken: + description: The references to the session key + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: object + serverName: + description: |- + The server name on S3, the cluster name is used if this + parameter is omitted + type: string + tags: + additionalProperties: + type: string + description: |- + Tags is a list of key value pairs that will be passed to the + Barman --tags option. + type: object + wal: + description: |- + The configuration for the backup of the WAL stream. + When not defined, WAL files will be stored uncompressed and may be + unencrypted in the object store, according to the bucket default policy. + properties: + archiveAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-archive' + command-line invocation. These arguments provide flexibility to customize + the WAL archive process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-archive' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + compression: + description: |- + Compress a WAL file before sending it to the object store. Available + options are empty string (no compression, default), `gzip`, `bzip2`, + `lz4`, `snappy`, `xz`, and `zstd`. + enum: + - bzip2 + - gzip + - lz4 + - snappy + - xz + - zstd + type: string + encryption: + description: |- + Whenever to force the encryption of files (if the bucket is + not already configured for that). + Allowed options are empty string (use the bucket policy, default), + `AES256` and `aws:kms` + enum: + - AES256 + - aws:kms + type: string + maxParallel: + description: |- + Number of WAL files to be either archived in parallel (when the + PostgreSQL instance is archiving to a backup object store) or + restored in parallel (when a PostgreSQL standby is fetching WAL + files from a recovery object store). If not specified, WAL files + will be processed one at a time. It accepts a positive integer as a + value - with 1 being the minimum accepted value. + minimum: 1 + type: integer + restoreAdditionalCommandArgs: + description: |- + Additional arguments that can be appended to the 'barman-cloud-wal-restore' + command-line invocation. These arguments provide flexibility to customize + the WAL restore process further, according to specific requirements or configurations. + + Example: + In a scenario where specialized backup options are required, such as setting + a specific timeout or defining custom behavior, users can use this field + to specify additional command arguments. + + Note: + It's essential to ensure that the provided arguments are valid and supported + by the 'barman-cloud-wal-restore' command, to avoid potential errors or unintended + behavior during execution. + items: + type: string + type: array + type: object + required: + - destinationPath + type: object + connectionParameters: + additionalProperties: + type: string + description: The list of connection parameters, such as dbname, + host, username, etc + type: object + name: + description: The server name, required + type: string + password: + description: |- + The reference to the password to be used to connect to the server. + If a password is provided, CloudNativePG creates a PostgreSQL + passfile at `/controller/external/NAME/pass` (where "NAME" is the + cluster's name). This passfile is automatically referenced in the + connection string when establishing a connection to the remote + PostgreSQL server from the current PostgreSQL `Cluster`. This ensures + secure and efficient password management for external clusters. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + plugin: + description: |- + The configuration of the plugin that is taking care + of WAL archiving and backups for this external cluster + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + isWALArchiver: + default: false + description: |- + Only one plugin can be declared as WALArchiver. + Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + sslCert: + description: |- + The reference to an SSL certificate to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslKey: + description: |- + The reference to an SSL private key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sslRootCert: + description: |- + The reference to an SSL CA public key to be used to connect to this + instance + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - name + type: object + type: array + failoverDelay: + default: 0 + description: |- + The amount of time (in seconds) to wait before triggering a failover + after the primary PostgreSQL instance in the cluster was detected + to be unhealthy + format: int32 + type: integer + imageCatalogRef: + description: Defines the major PostgreSQL version we want to use within + an ImageCatalog + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + major: + description: The major version of PostgreSQL we want to use from + the ImageCatalog + type: integer + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - major + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: Only image catalogs are supported + rule: self.kind == 'ImageCatalog' || self.kind == 'ClusterImageCatalog' + - message: Only image catalogs are supported + rule: self.apiGroup == 'postgresql.cnpg.io' + imageName: + description: |- + Name of the container image, supporting both tags (`:`) + and digests for deterministic and repeatable deployments + (`:@sha256:`) + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of `Always`, `Never` or `IfNotPresent`. + If not defined, it defaults to `IfNotPresent`. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + imagePullSecrets: + description: The list of pull secrets to be used to pull the images + items: + description: |- + LocalObjectReference contains enough information to let you locate a + local object with a known type inside the same namespace + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + type: array + inheritedMetadata: + description: Metadata that will be inherited by all objects related + to the Cluster + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + instances: + default: 1 + description: Number of instances required in the cluster + minimum: 1 + type: integer + livenessProbeTimeout: + description: |- + LivenessProbeTimeout is the time (in seconds) that is allowed for a PostgreSQL instance + to successfully respond to the liveness probe (default 30). + The Liveness probe failure threshold is derived from this value using the formula: + ceiling(livenessProbe / 10). + format: int32 + type: integer + logLevel: + default: info + description: 'The instances'' log level, one of the following values: + error, warning, info (default), debug, trace' + enum: + - error + - warning + - info + - debug + - trace + type: string + managed: + description: The configuration that is used by the portions of PostgreSQL + that are managed by the instance manager + properties: + roles: + description: Database roles managed by the `Cluster` + items: + description: |- + RoleConfiguration is the representation, in Kubernetes, of a PostgreSQL role + with the additional field Ensure specifying whether to ensure the presence or + absence of the role in the database + + The defaults of the CREATE ROLE command are applied + Reference: https://www.postgresql.org/docs/current/sql-createrole.html + properties: + bypassrls: + description: |- + Whether a role bypasses every row-level security (RLS) policy. + Default is `false`. + type: boolean + comment: + description: Description of the role + type: string + connectionLimit: + default: -1 + description: |- + If the role can log in, this specifies how many concurrent + connections the role can make. `-1` (the default) means no limit. + format: int64 + type: integer + createdb: + description: |- + When set to `true`, the role being defined will be allowed to create + new databases. Specifying `false` (default) will deny a role the + ability to create databases. + type: boolean + createrole: + description: |- + Whether the role will be permitted to create, alter, drop, comment + on, change the security label for, and grant or revoke membership in + other roles. Default is `false`. + type: boolean + disablePassword: + description: DisablePassword indicates that a role's password + should be set to NULL in Postgres + type: boolean + ensure: + default: present + description: Ensure the role is `present` or `absent` - + defaults to "present" + enum: + - present + - absent + type: string + inRoles: + description: |- + List of one or more existing roles to which this role will be + immediately added as a new member. Default empty. + items: + type: string + type: array + inherit: + default: true + description: |- + Whether a role "inherits" the privileges of roles it is a member of. + Defaults is `true`. + type: boolean + login: + description: |- + Whether the role is allowed to log in. A role having the `login` + attribute can be thought of as a user. Roles without this attribute + are useful for managing database privileges, but are not users in + the usual sense of the word. Default is `false`. + type: boolean + name: + description: Name of the role + type: string + passwordSecret: + description: |- + Secret containing the password of the role (if present) + If null, the password will be ignored unless DisablePassword is set + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + replication: + description: |- + Whether a role is a replication role. A role must have this + attribute (or be a superuser) in order to be able to connect to the + server in replication mode (physical or logical replication) and in + order to be able to create or drop replication slots. A role having + the `replication` attribute is a very highly privileged role, and + should only be used on roles actually used for replication. Default + is `false`. + type: boolean + superuser: + description: |- + Whether the role is a `superuser` who can override all access + restrictions within the database - superuser status is dangerous and + should be used only when really needed. You must yourself be a + superuser to create a new superuser. Defaults is `false`. + type: boolean + validUntil: + description: |- + Date and time after which the role's password is no longer valid. + When omitted, the password will never expire (default). + format: date-time + type: string + required: + - name + type: object + type: array + services: + description: Services roles managed by the `Cluster` + properties: + additional: + description: Additional is a list of additional managed services + specified by the user. + items: + description: |- + ManagedService represents a specific service managed by the cluster. + It includes the type of service and its associated template specification. + properties: + selectorType: + description: |- + SelectorType specifies the type of selectors that the service will have. + Valid values are "rw", "r", and "ro", representing read-write, read, and read-only services. + enum: + - rw + - r + - ro + type: string + serviceTemplate: + description: ServiceTemplate is the template specification + for the service. + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only + supported for certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information + on service's port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed + by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains + the configurations of session affinity. + properties: + clientIP: + description: clientIP contains the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + updateStrategy: + default: patch + description: UpdateStrategy describes how the service + differences should be reconciled + enum: + - patch + - replace + type: string + required: + - selectorType + - serviceTemplate + type: object + type: array + disabledDefaultServices: + description: |- + DisabledDefaultServices is a list of service types that are disabled by default. + Valid values are "r", and "ro", representing read, and read-only services. + items: + description: |- + ServiceSelectorType describes a valid value for generating the service selectors. + It indicates which type of service the selector applies to, such as read-write, read, or read-only + enum: + - rw + - r + - ro + type: string + type: array + type: object + type: object + maxSyncReplicas: + default: 0 + description: |- + The target value for the synchronous replication quorum, that can be + decreased if the number of ready standbys is lower than this. + Undefined or 0 disable synchronous replication. + minimum: 0 + type: integer + minSyncReplicas: + default: 0 + description: |- + Minimum number of instances required in synchronous replication with the + primary. Undefined or 0 allow writes to complete when no standby is + available. + minimum: 0 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this cluster + properties: + customQueriesConfigMap: + description: The list of config maps containing the custom queries + items: + description: |- + ConfigMapKeySelector contains enough information to let you locate + the key of a ConfigMap + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + customQueriesSecret: + description: The list of secrets containing the custom queries + items: + description: |- + SecretKeySelector contains enough information to let you locate + the key of a Secret + properties: + key: + description: The key to select + type: string + name: + description: Name of the referent. + type: string + required: + - key + - name + type: object + type: array + disableDefaultQueries: + default: false + description: |- + Whether the default queries should be injected. + Set it to `true` if you don't want to inject default queries into the cluster. + Default: false. + type: boolean + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + tls: + description: |- + Configure TLS communication for the metrics endpoint. + Changing tls.enabled option will force a rollout of all instances. + properties: + enabled: + default: false + description: |- + Enable TLS for the monitoring endpoint. + Changing this option will force a rollout of all instances. + type: boolean + type: object + type: object + nodeMaintenanceWindow: + description: Define a maintenance window for the Kubernetes nodes + properties: + inProgress: + default: false + description: Is there a node maintenance activity in progress? + type: boolean + reusePVC: + default: true + description: |- + Reuse the existing PVC (wait for the node to come + up again) or not (recreate it elsewhere - when `instances` >1) + type: boolean + type: object + plugins: + description: |- + The plugins configuration, containing + any plugin to be loaded with the corresponding configuration + items: + description: |- + PluginConfiguration specifies a plugin that need to be loaded for this + cluster to be reconciled + properties: + enabled: + default: true + description: Enabled is true if this plugin will be used + type: boolean + isWALArchiver: + default: false + description: |- + Only one plugin can be declared as WALArchiver. + Cannot be active if ".spec.backup.barmanObjectStore" configuration is present. + type: boolean + name: + description: Name is the plugin name + type: string + parameters: + additionalProperties: + type: string + description: Parameters is the configuration of the plugin + type: object + required: + - name + type: object + type: array + postgresGID: + default: 26 + description: The GID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresUID: + default: 26 + description: The UID of the `postgres` user inside the image, defaults + to `26` + format: int64 + type: integer + postgresql: + description: Configuration of the PostgreSQL server + properties: + enableAlterSystem: + description: |- + If this parameter is true, the user will be able to invoke `ALTER SYSTEM` + on this CloudNativePG Cluster. + This should only be used for debugging and troubleshooting. + Defaults to false. + type: boolean + extensions: + description: The configuration of the extensions to be added + items: + description: |- + ExtensionConfiguration is the configuration used to add + PostgreSQL extensions to the Cluster. + properties: + dynamic_library_path: + description: |- + The list of directories inside the image which should be added to dynamic_library_path. + If not defined, defaults to "/lib". + items: + type: string + type: array + extension_control_path: + description: |- + The list of directories inside the image which should be added to extension_control_path. + If not defined, defaults to "/share". + items: + type: string + type: array + image: + description: The image containing the extension, required + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object + x-kubernetes-validations: + - message: An image reference is required + rule: has(self.reference) + ld_library_path: + description: The list of directories inside the image which + should be added to ld_library_path. + items: + type: string + type: array + name: + description: The name of the extension, required + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + required: + - image + - name + type: object + type: array + ldap: + description: Options to specify LDAP configuration + properties: + bindAsAuth: + description: Bind as authentication configuration + properties: + prefix: + description: Prefix for the bind authentication option + type: string + suffix: + description: Suffix for the bind authentication option + type: string + type: object + bindSearchAuth: + description: Bind+Search authentication configuration + properties: + baseDN: + description: Root DN to begin the user search + type: string + bindDN: + description: DN of the user to bind to the directory + type: string + bindPassword: + description: Secret with the password for the user to + bind to the directory + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + searchAttribute: + description: Attribute to match against the username + type: string + searchFilter: + description: Search filter to use when doing the search+bind + authentication + type: string + type: object + port: + description: LDAP server port + type: integer + scheme: + description: LDAP schema to be used, possible options are + `ldap` and `ldaps` + enum: + - ldap + - ldaps + type: string + server: + description: LDAP hostname or IP address + type: string + tls: + description: Set to 'true' to enable LDAP over TLS. 'false' + is default + type: boolean + type: object + parameters: + additionalProperties: + type: string + description: PostgreSQL configuration options (postgresql.conf) + type: object + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + pg_ident: + description: |- + PostgreSQL User Name Maps rules (lines to be appended + to the pg_ident.conf file) + items: + type: string + type: array + promotionTimeout: + description: |- + Specifies the maximum number of seconds to wait when promoting an instance to primary. + Default value is 40000000, greater than one year in seconds, + big enough to simulate an infinite timeout + format: int32 + type: integer + shared_preload_libraries: + description: Lists of shared preload libraries to add to the default + ones + items: + type: string + type: array + syncReplicaElectionConstraint: + description: |- + Requirements to be met by sync replicas. This will affect how the "synchronous_standby_names" parameter will be + set up. + properties: + enabled: + description: This flag enables the constraints for sync replicas + type: boolean + nodeLabelsAntiAffinity: + description: A list of node labels values to extract and compare + to evaluate if the pods reside in the same topology or not + items: + type: string + type: array + required: + - enabled + type: object + synchronous: + description: Configuration of the PostgreSQL synchronous replication + feature + properties: + dataDurability: + description: |- + If set to "required", data durability is strictly enforced. Write operations + with synchronous commit settings (`on`, `remote_write`, or `remote_apply`) will + block if there are insufficient healthy replicas, ensuring data persistence. + If set to "preferred", data durability is maintained when healthy replicas + are available, but the required number of instances will adjust dynamically + if replicas become unavailable. This setting relaxes strict durability enforcement + to allow for operational continuity. This setting is only applicable if both + `standbyNamesPre` and `standbyNamesPost` are unset (empty). + enum: + - required + - preferred + type: string + maxStandbyNamesFromCluster: + description: |- + Specifies the maximum number of local cluster pods that can be + automatically included in the `synchronous_standby_names` option in + PostgreSQL. + type: integer + method: + description: |- + Method to select synchronous replication standbys from the listed + servers, accepting 'any' (quorum-based synchronous replication) or + 'first' (priority-based synchronous replication) as values. + enum: + - any + - first + type: string + number: + description: |- + Specifies the number of synchronous standby servers that + transactions must wait for responses from. + type: integer + x-kubernetes-validations: + - message: The number of synchronous replicas should be greater + than zero + rule: self > 0 + standbyNamesPost: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` after local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + standbyNamesPre: + description: |- + A user-defined list of application names to be added to + `synchronous_standby_names` before local cluster pods (the order is + only useful for priority-based synchronous replication). + items: + type: string + type: array + required: + - method + - number + type: object + x-kubernetes-validations: + - message: dataDurability set to 'preferred' requires empty 'standbyNamesPre' + and empty 'standbyNamesPost' + rule: self.dataDurability!='preferred' || ((!has(self.standbyNamesPre) + || self.standbyNamesPre.size()==0) && (!has(self.standbyNamesPost) + || self.standbyNamesPost.size()==0)) + type: object + primaryUpdateMethod: + default: restart + description: |- + Method to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be with a switchover (`switchover`) or in-place (`restart` - default) + enum: + - switchover + - restart + type: string + primaryUpdateStrategy: + default: unsupervised + description: |- + Deployment strategy to follow to upgrade the primary server during a rolling + update procedure, after all replicas have been successfully updated: + it can be automated (`unsupervised` - default) or manual (`supervised`) + enum: + - unsupervised + - supervised + type: string + priorityClassName: + description: |- + Name of the priority class which will be used in every generated Pod, if the PriorityClass + specified does not exist, the pod will not be able to schedule. Please refer to + https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass + for more information + type: string + probes: + description: |- + The configuration of the probes to be injected + in the PostgreSQL Pods. + properties: + liveness: + description: The liveness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + isolationCheck: + description: |- + Configure the feature that extends the liveness probe for a primary + instance. In addition to the basic checks, this verifies whether the + primary is isolated from the Kubernetes API server and from its + replicas, ensuring that it can be safely shut down if network + partition or API unavailability is detected. Enabled by default. + properties: + connectionTimeout: + default: 1000 + description: Timeout in milliseconds for connections during + the primary isolation check + type: integer + enabled: + default: true + description: Whether primary isolation checking is enabled + for the liveness probe + type: boolean + requestTimeout: + default: 1000 + description: Timeout in milliseconds for requests during + the primary isolation check + type: integer + type: object + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + readiness: + description: The readiness probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + maximumLag: + anyOf: + - type: integer + - type: string + description: Lag limit. Used only for `streaming` strategy + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: + description: The probe strategy + enum: + - pg_isready + - streaming + - query + type: string + type: object + startup: + description: The startup probe configuration + properties: + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + maximumLag: + anyOf: + - type: integer + - type: string + description: Lag limit. Used only for `streaming` strategy + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: + description: The probe strategy + enum: + - pg_isready + - streaming + - query + type: string + type: object + type: object + projectedVolumeTemplate: + description: |- + Template to be used to define projected volumes, projected volumes will be mounted + under `/projected` base folder + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root to write + the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap data + to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the + pod: only annotations, labels, name, namespace + and uid are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must not + be absolute or contain the ''..'' path. Must + be utf-8 encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the secret data to + project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether the Secret + or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information about the + serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + replica: + description: Replica cluster configuration + properties: + enabled: + description: |- + If replica mode is enabled, this cluster will be a replica of an + existing cluster. Replica cluster can be created from a recovery + object store or via streaming through pg_basebackup. + Refer to the Replica clusters page of the documentation for more information. + type: boolean + minApplyDelay: + description: |- + When replica mode is enabled, this parameter allows you to replay + transactions only when the system time is at least the configured + time past the commit time. This provides an opportunity to correct + data loss errors. Note that when this parameter is set, a promotion + token cannot be used. + type: string + primary: + description: |- + Primary defines which Cluster is defined to be the primary in the distributed PostgreSQL cluster, based on the + topology specified in externalClusters + type: string + promotionToken: + description: |- + A demotion token generated by an external cluster used to + check if the promotion requirements are met. + type: string + self: + description: |- + Self defines the name of this cluster. It is used to determine if this is a primary + or a replica cluster, comparing it with `primary` + type: string + source: + description: The name of the external cluster which is the replication + origin + minLength: 1 + type: string + required: + - source + type: object + replicationSlots: + default: + highAvailability: + enabled: true + description: Replication slots management configuration + properties: + highAvailability: + default: + enabled: true + description: Replication slots for high availability configuration + properties: + enabled: + default: true + description: |- + If enabled (default), the operator will automatically manage replication slots + on the primary instance and use them in streaming replication + connections with all the standby instances that are part of the HA + cluster. If disabled, the operator will not take advantage + of replication slots in streaming connections with the replicas. + This feature also controls replication slots in replica cluster, + from the designated primary to its cascading replicas. + type: boolean + slotPrefix: + default: _cnpg_ + description: |- + Prefix for replication slots managed by the operator for HA. + It may only contain lower case letters, numbers, and the underscore character. + This can only be set at creation time. By default set to `_cnpg_`. + pattern: ^[0-9a-z_]*$ + type: string + synchronizeLogicalDecoding: + description: |- + When enabled, the operator automatically manages synchronization of logical + decoding (replication) slots across high-availability clusters. + + Requires one of the following conditions: + - PostgreSQL version 17 or later + - PostgreSQL version < 17 with pg_failover_slots extension enabled + type: boolean + type: object + synchronizeReplicas: + description: Configures the synchronization of the user defined + physical replication slots + properties: + enabled: + default: true + description: When set to true, every replication slot that + is on the primary is synchronized on each standby + type: boolean + excludePatterns: + description: List of regular expression patterns to match + the names of replication slots to be excluded (by default + empty) + items: + type: string + type: array + required: + - enabled + type: object + updateInterval: + default: 30 + description: |- + Standby will update the status of the local replication slots + every `updateInterval` seconds (default 30). + minimum: 1 + type: integer + type: object + resources: + description: |- + Resources requirements of every generated Pod. Please refer to + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + for more information. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + schedulerName: + description: |- + If specified, the pod will be dispatched by specified Kubernetes + scheduler. If not specified, the pod will be dispatched by the default + scheduler. More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/ + type: string + seccompProfile: + description: |- + The SeccompProfile applied to every Pod and Container. + Defaults to: `RuntimeDefault` + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + serviceAccountTemplate: + description: Configure the generation of the service account + properties: + metadata: + description: |- + Metadata are the metadata to be used for the generated + service account + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + required: + - metadata + type: object + smartShutdownTimeout: + default: 180 + description: |- + The time in seconds that controls the window of time reserved for the smart shutdown of Postgres to complete. + Make sure you reserve enough time for the operator to request a fast shutdown of Postgres + (that is: `stopDelay` - `smartShutdownTimeout`). + format: int32 + type: integer + startDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + successfully start up (default 3600). + The startup probe failure threshold is derived from this value using the formula: + ceiling(startDelay / 10). + format: int32 + type: integer + stopDelay: + default: 1800 + description: |- + The time in seconds that is allowed for a PostgreSQL instance to + gracefully shutdown (default 1800) + format: int32 + type: integer + storage: + description: Configuration of the storage of the instances + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + superuserSecret: + description: |- + The secret containing the superuser password. If not defined a new + secret will be created with a randomly generated password + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + switchoverDelay: + default: 3600 + description: |- + The time in seconds that is allowed for a primary PostgreSQL instance + to gracefully shutdown during a switchover. + Default value is 3600 seconds (1 hour). + format: int32 + type: integer + tablespaces: + description: The tablespaces configuration + items: + description: |- + TablespaceConfiguration is the configuration of a tablespace, and includes + the storage specification for the tablespace + properties: + name: + description: The name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + properties: + name: + type: string + type: object + storage: + description: The storage configuration for the tablespace + properties: + pvcTemplate: + description: Template to be used to generate the Persistent + Volume Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to + the PersistentVolume backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + temporary: + default: false + description: |- + When set to true, the tablespace will be added as a `temp_tablespaces` + entry in PostgreSQL, and will be available to automatically house temp + database objects, or other temporary files. Please refer to PostgreSQL + documentation for more information on the `temp_tablespaces` GUC. + type: boolean + required: + - name + - storage + type: object + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints specifies how to spread matching pods among the given topology. + More info: + https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ + items: + description: TopologySpreadConstraint specifies how to spread matching + pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + walStorage: + description: Configuration of the storage for PostgreSQL WAL (Write-Ahead + Log) + properties: + pvcTemplate: + description: Template to be used to generate the Persistent Volume + Claim + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + resizeInUseVolumes: + default: true + description: Resize existent PVCs, defaults to true + type: boolean + size: + description: |- + Size of the storage. Required if not already specified in the PVC template. + Changes to this field are automatically reapplied to the created PVCs. + Size cannot be decreased. + type: string + storageClass: + description: |- + StorageClass to use for PVCs. Applied after + evaluating the PVC template, if available. + If not specified, the generated PVCs will use the + default storage class + type: string + type: object + required: + - instances + type: object + x-kubernetes-validations: + - message: imageName and imageCatalogRef are mutually exclusive + rule: '!(has(self.imageCatalogRef) && has(self.imageName))' + status: + description: |- + Most recently observed status of the cluster. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + availableArchitectures: + description: AvailableArchitectures reports the available architectures + of a cluster + items: + description: AvailableArchitecture represents the state of a cluster's + architecture + properties: + goArch: + description: GoArch is the name of the executable architecture + type: string + hash: + description: Hash is the hash of the executable + type: string + required: + - goArch + - hash + type: object + type: array + certificates: + description: The configuration for the CA and related certificates, + initialized with defaults. + properties: + clientCASecret: + description: |- + The secret containing the Client CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates, + used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided, + this can be omitted.
+ type: string + expirations: + additionalProperties: + type: string + description: Expiration dates for all certificates. + type: object + replicationTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the client certificate to authenticate as + the `streaming_replica` user. + If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be + created using the provided CA. + type: string + serverAltDNSNames: + description: The list of the server alternative DNS names to be + added to the generated server TLS certificates, when required. + items: + type: string + type: array + serverCASecret: + description: |- + The secret containing the Server CA certificate. If not defined, a new secret will be created + with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate, + used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided, + this can be omitted.
+ type: string + serverTLSSecret: + description: |- + The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as + `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely. + If not defined, ServerCASecret must provide also `ca.key` and a new secret will be + created using the provided CA. + type: string + type: object + cloudNativePGCommitHash: + description: The commit hash number of which this operator running + type: string + cloudNativePGOperatorHash: + description: The hash of the binary of the operator + type: string + conditions: + description: Conditions for cluster object + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + configMapResourceVersion: + description: |- + The list of resource versions of the configmaps, + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + configmap data + properties: + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the config maps used to pass metrics. + Map keys are the config map names, map values are the versions + type: object + type: object + currentPrimary: + description: Current primary instance + type: string + currentPrimaryFailingSinceTimestamp: + description: |- + The timestamp when the primary was detected to be unhealthy + This field is reported when `.spec.failoverDelay` is populated or during online upgrades + type: string + currentPrimaryTimestamp: + description: The timestamp when the last actual promotion to primary + has occurred + type: string + danglingPVC: + description: |- + List of all the PVCs created by this cluster and still available + which are not attached to a Pod + items: + type: string + type: array + demotionToken: + description: |- + DemotionToken is a JSON token containing the information + from pg_controldata such as Database system identifier, Latest checkpoint's + TimeLineID, Latest checkpoint's REDO location, Latest checkpoint's REDO + WAL file, and Time of latest checkpoint + type: string + firstRecoverabilityPoint: + description: |- + The first recoverability point, stored as a date in RFC3339 format. + This field is calculated from the content of FirstRecoverabilityPointByMethod. + + Deprecated: the field is not set for backup plugins. + type: string + firstRecoverabilityPointByMethod: + additionalProperties: + format: date-time + type: string + description: |- + The first recoverability point, stored as a date in RFC3339 format, per backup method type. + + Deprecated: the field is not set for backup plugins. + type: object + healthyPVC: + description: List of all the PVCs not dangling nor initializing + items: + type: string + type: array + image: + description: Image contains the image name used by the pods + type: string + initializingPVC: + description: List of all the PVCs that are being initialized by this + cluster + items: + type: string + type: array + instanceNames: + description: List of instance names in the cluster + items: + type: string + type: array + instances: + description: The total number of PVC Groups detected in the cluster. + It may differ from the number of existing instance pods. + type: integer + instancesReportedState: + additionalProperties: + description: InstanceReportedState describes the last reported state + of an instance during a reconciliation loop + properties: + ip: + description: IP address of the instance + type: string + isPrimary: + description: indicates if an instance is the primary one + type: boolean + timeLineID: + description: indicates on which TimelineId the instance is + type: integer + required: + - isPrimary + type: object + description: The reported state of the instances during the last reconciliation + loop + type: object + instancesStatus: + additionalProperties: + items: + type: string + type: array + description: InstancesStatus indicates in which status the instances + are + type: object + jobCount: + description: How many Jobs have been created by this cluster + format: int32 + type: integer + lastFailedBackup: + description: |- + Last failed backup, stored as a date in RFC3339 format. + + Deprecated: the field is not set for backup plugins. + type: string + lastPromotionToken: + description: |- + LastPromotionToken is the last verified promotion token that + was used to promote a replica cluster + type: string + lastSuccessfulBackup: + description: |- + Last successful backup, stored as a date in RFC3339 format. + This field is calculated from the content of LastSuccessfulBackupByMethod. + + Deprecated: the field is not set for backup plugins. + type: string + lastSuccessfulBackupByMethod: + additionalProperties: + format: date-time + type: string + description: |- + Last successful backup, stored as a date in RFC3339 format, per backup method type. + + Deprecated: the field is not set for backup plugins. + type: object + latestGeneratedNode: + description: ID of the latest generated node (used to avoid node name + clashing) + type: integer + managedRolesStatus: + description: ManagedRolesStatus reports the state of the managed roles + in the cluster + properties: + byStatus: + additionalProperties: + items: + type: string + type: array + description: ByStatus gives the list of roles in each state + type: object + cannotReconcile: + additionalProperties: + items: + type: string + type: array + description: |- + CannotReconcile lists roles that cannot be reconciled in PostgreSQL, + with an explanation of the cause + type: object + passwordStatus: + additionalProperties: + description: PasswordState represents the state of the password + of a managed RoleConfiguration + properties: + resourceVersion: + description: the resource version of the password secret + type: string + transactionID: + description: the last transaction ID to affect the role + definition in PostgreSQL + format: int64 + type: integer + type: object + description: PasswordStatus gives the last transaction id and + password secret version for each managed role + type: object + type: object + onlineUpdateEnabled: + description: OnlineUpdateEnabled shows if the online upgrade is enabled + inside the cluster + type: boolean + pgDataImageInfo: + description: PGDataImageInfo contains the details of the latest image + that has run on the current data directory. + properties: + image: + description: Image is the image name + type: string + majorVersion: + description: MajorVersion is the major version of the image + type: integer + required: + - image + - majorVersion + type: object + phase: + description: Current phase of the cluster + type: string + phaseReason: + description: Reason for the current phase + type: string + pluginStatus: + description: PluginStatus is the status of the loaded plugins + items: + description: PluginStatus is the status of a loaded plugin + properties: + backupCapabilities: + description: |- + BackupCapabilities are the list of capabilities of the + plugin regarding the Backup management + items: + type: string + type: array + capabilities: + description: |- + Capabilities are the list of capabilities of the + plugin + items: + type: string + type: array + name: + description: Name is the name of the plugin + type: string + operatorCapabilities: + description: |- + OperatorCapabilities are the list of capabilities of the + plugin regarding the reconciler + items: + type: string + type: array + restoreJobHookCapabilities: + description: |- + RestoreJobHookCapabilities are the list of capabilities of the + plugin regarding the RestoreJobHook management + items: + type: string + type: array + status: + description: Status contain the status reported by the plugin + through the SetStatusInCluster interface + type: string + version: + description: |- + Version is the version of the plugin loaded by the + latest reconciliation loop + type: string + walCapabilities: + description: |- + WALCapabilities are the list of capabilities of the + plugin regarding the WAL management + items: + type: string + type: array + required: + - name + - version + type: object + type: array + poolerIntegrations: + description: The integration needed by poolers referencing the cluster + properties: + pgBouncerIntegration: + description: PgBouncerIntegrationStatus encapsulates the needed + integration for the pgbouncer poolers referencing the cluster + properties: + secrets: + items: + type: string + type: array + type: object + type: object + pvcCount: + description: How many PVCs have been created by this cluster + format: int32 + type: integer + readService: + description: Current list of read pods + type: string + readyInstances: + description: The total number of ready instances in the cluster. It + is equal to the number of ready instance pods. + type: integer + resizingPVC: + description: List of all the PVCs that have ResizingPVC condition. + items: + type: string + type: array + secretsResourceVersion: + description: |- + The list of resource versions of the secrets + managed by the operator. Every change here is done in the + interest of the instance manager, which will refresh the + secret data + properties: + applicationSecretVersion: + description: The resource version of the "app" user secret + type: string + barmanEndpointCA: + description: The resource version of the Barman Endpoint CA if + provided + type: string + caSecretVersion: + description: Unused. Retained for compatibility with old versions. + type: string + clientCaSecretVersion: + description: The resource version of the PostgreSQL client-side + CA secret version + type: string + externalClusterSecretVersion: + additionalProperties: + type: string + description: The resource versions of the external cluster secrets + type: object + managedRoleSecretVersion: + additionalProperties: + type: string + description: The resource versions of the managed roles secrets + type: object + metrics: + additionalProperties: + type: string + description: |- + A map with the versions of all the secrets used to pass metrics. + Map keys are the secret names, map values are the versions + type: object + replicationSecretVersion: + description: The resource version of the "streaming_replica" user + secret + type: string + serverCaSecretVersion: + description: The resource version of the PostgreSQL server-side + CA secret version + type: string + serverSecretVersion: + description: The resource version of the PostgreSQL server-side + secret version + type: string + superuserSecretVersion: + description: The resource version of the "postgres" user secret + type: string + type: object + switchReplicaClusterStatus: + description: SwitchReplicaClusterStatus is the status of the switch + to replica cluster + properties: + inProgress: + description: InProgress indicates if there is an ongoing procedure + of switching a cluster to a replica cluster. + type: boolean + type: object + systemID: + description: SystemID is the latest detected PostgreSQL SystemID + type: string + tablespacesStatus: + description: TablespacesStatus reports the state of the declarative + tablespaces in the cluster + items: + description: TablespaceState represents the state of a tablespace + in a cluster + properties: + error: + description: Error is the reconciliation error, if any + type: string + name: + description: Name is the name of the tablespace + type: string + owner: + description: Owner is the PostgreSQL user owning the tablespace + type: string + state: + description: State is the latest reconciliation state + type: string + required: + - name + - state + type: object + type: array + targetPrimary: + description: |- + Target primary instance, this is different from the previous one + during a switchover or a failover + type: string + targetPrimaryTimestamp: + description: The timestamp when the last request for a new primary + has occurred + type: string + timelineID: + description: The timeline of the Postgres cluster + type: integer + topology: + description: Instances topology. + properties: + instances: + additionalProperties: + additionalProperties: + type: string + description: PodTopologyLabels represent the topology of a Pod. + map[labelName]labelValue + type: object + description: Instances contains the pod topology of the instances + type: object + nodesUsed: + description: |- + NodesUsed represents the count of distinct nodes accommodating the instances. + A value of '1' suggests that all instances are hosted on a single node, + implying the absence of High Availability (HA). Ideally, this value should + be the same as the number of instances in the Postgres HA cluster, implying + shared nothing architecture on the compute side. + format: int32 + type: integer + successfullyExtracted: + description: |- + SuccessfullyExtracted indicates if the topology data was extract. It is useful to enact fallback behaviors + in synchronous replica election in case of failures + type: boolean + type: object + unusablePVC: + description: List of all the PVCs that are unusable because another + PVC is missing + items: + type: string + type: array + writeService: + description: Current write pod + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: databases.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Database + listKind: DatabaseList + plural: databases + singular: database + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Database is the Schema for the databases API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired Database. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allowConnections: + description: |- + Maps to the `ALLOW_CONNECTIONS` parameter of `CREATE DATABASE` and + `ALTER DATABASE`. If false then no one can connect to this database. + type: boolean + builtinLocale: + description: |- + Maps to the `BUILTIN_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the locale name when the + builtin provider is used. This option requires `localeProvider` to + be set to `builtin`. Available from PostgreSQL 17. + type: string + x-kubernetes-validations: + - message: builtinLocale is immutable + rule: self == oldSelf + cluster: + description: The name of the PostgreSQL cluster hosting the database. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + collationVersion: + description: |- + Maps to the `COLLATION_VERSION` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: collationVersion is immutable + rule: self == oldSelf + connectionLimit: + description: |- + Maps to the `CONNECTION LIMIT` clause of `CREATE DATABASE` and + `ALTER DATABASE`. How many concurrent connections can be made to + this database. -1 (the default) means no limit. + type: integer + databaseReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this database. + enum: + - delete + - retain + type: string + encoding: + description: |- + Maps to the `ENCODING` parameter of `CREATE DATABASE`. This setting + cannot be changed. Character set encoding to use in the database. + type: string + x-kubernetes-validations: + - message: encoding is immutable + rule: self == oldSelf + ensure: + default: present + description: Ensure the PostgreSQL database is `present` or `absent` + - defaults to "present". + enum: + - present + - absent + type: string + extensions: + description: The list of extensions to be managed in the database + items: + description: ExtensionSpec configures an extension in a database + properties: + ensure: + default: present + description: |- + Specifies whether an extension/schema should be present or absent in + the database. If set to `present`, the extension/schema will be + created if it does not exist. If set to `absent`, the + extension/schema will be removed if it exists. + enum: + - present + - absent + type: string + name: + description: Name of the extension/schema + type: string + schema: + description: |- + The name of the schema in which to install the extension's objects, + in case the extension allows its contents to be relocated. If not + specified (default), and the extension's control file does not + specify a schema either, the current default object creation schema + is used. + type: string + version: + description: |- + The version of the extension to install. If empty, the operator will + install the default version (whatever is specified in the + extension's control file) + type: string + required: + - name + type: object + type: array + icuLocale: + description: |- + Maps to the `ICU_LOCALE` parameter of `CREATE DATABASE`. This + setting cannot be changed. Specifies the ICU locale when the ICU + provider is used. This option requires `localeProvider` to be set to + `icu`. Available from PostgreSQL 15. + type: string + x-kubernetes-validations: + - message: icuLocale is immutable + rule: self == oldSelf + icuRules: + description: |- + Maps to the `ICU_RULES` parameter of `CREATE DATABASE`. This setting + cannot be changed. Specifies additional collation rules to customize + the behavior of the default collation. This option requires + `localeProvider` to be set to `icu`. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: icuRules is immutable + rule: self == oldSelf + isTemplate: + description: |- + Maps to the `IS_TEMPLATE` parameter of `CREATE DATABASE` and `ALTER + DATABASE`. If true, this database is considered a template and can + be cloned by any user with `CREATEDB` privileges. + type: boolean + locale: + description: |- + Maps to the `LOCALE` parameter of `CREATE DATABASE`. This setting + cannot be changed. Sets the default collation order and character + classification in the new database. + type: string + x-kubernetes-validations: + - message: locale is immutable + rule: self == oldSelf + localeCType: + description: |- + Maps to the `LC_CTYPE` parameter of `CREATE DATABASE`. This setting + cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCType is immutable + rule: self == oldSelf + localeCollate: + description: |- + Maps to the `LC_COLLATE` parameter of `CREATE DATABASE`. This + setting cannot be changed. + type: string + x-kubernetes-validations: + - message: localeCollate is immutable + rule: self == oldSelf + localeProvider: + description: |- + Maps to the `LOCALE_PROVIDER` parameter of `CREATE DATABASE`. This + setting cannot be changed. This option sets the locale provider for + databases created in the new cluster. Available from PostgreSQL 16. + type: string + x-kubernetes-validations: + - message: localeProvider is immutable + rule: self == oldSelf + name: + description: The name of the database to create inside PostgreSQL. + This setting cannot be changed. + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + - message: the name postgres is reserved + rule: self != 'postgres' + - message: the name template0 is reserved + rule: self != 'template0' + - message: the name template1 is reserved + rule: self != 'template1' + owner: + description: |- + Maps to the `OWNER` parameter of `CREATE DATABASE`. + Maps to the `OWNER TO` command of `ALTER DATABASE`. + The role name of the user who owns the database inside PostgreSQL. + type: string + schemas: + description: The list of schemas to be managed in the database + items: + description: SchemaSpec configures a schema in a database + properties: + ensure: + default: present + description: |- + Specifies whether an extension/schema should be present or absent in + the database. If set to `present`, the extension/schema will be + created if it does not exist. If set to `absent`, the + extension/schema will be removed if it exists. + enum: + - present + - absent + type: string + name: + description: Name of the extension/schema + type: string + owner: + description: |- + The role name of the user who owns the schema inside PostgreSQL. + It maps to the `AUTHORIZATION` parameter of `CREATE SCHEMA` and the + `OWNER TO` command of `ALTER SCHEMA`. + type: string + required: + - name + type: object + type: array + tablespace: + description: |- + Maps to the `TABLESPACE` parameter of `CREATE DATABASE`. + Maps to the `SET TABLESPACE` command of `ALTER DATABASE`. + The name of the tablespace (in PostgreSQL) that will be associated + with the new database. This tablespace will be the default + tablespace used for objects created in this database. + type: string + template: + description: |- + Maps to the `TEMPLATE` parameter of `CREATE DATABASE`. This setting + cannot be changed. The name of the template from which to create + this database. + type: string + x-kubernetes-validations: + - message: template is immutable + rule: self == oldSelf + required: + - cluster + - name + - owner + type: object + x-kubernetes-validations: + - message: builtinLocale is only available when localeProvider is set + to `builtin` + rule: '!has(self.builtinLocale) || self.localeProvider == ''builtin''' + - message: icuLocale is only available when localeProvider is set to `icu` + rule: '!has(self.icuLocale) || self.localeProvider == ''icu''' + - message: icuRules is only available when localeProvider is set to `icu` + rule: '!has(self.icuRules) || self.localeProvider == ''icu''' + status: + description: |- + Most recently observed status of the Database. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + applied: + description: Applied is true if the database was reconciled correctly + type: boolean + extensions: + description: Extensions is the status of the managed extensions + items: + description: DatabaseObjectStatus is the status of the managed database + objects + properties: + applied: + description: |- + True of the object has been installed successfully in + the database + type: boolean + message: + description: Message is the object reconciliation message + type: string + name: + description: The name of the object + type: string + required: + - applied + - name + type: object + type: array + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + schemas: + description: Schemas is the status of the managed schemas + items: + description: DatabaseObjectStatus is the status of the managed database + objects + properties: + applied: + description: |- + True of the object has been installed successfully in + the database + type: boolean + message: + description: Message is the object reconciliation message + type: string + name: + description: The name of the object + type: string + required: + - applied + - name + type: object + type: array + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: failoverquorums.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: FailoverQuorum + listKind: FailoverQuorumList + plural: failoverquorums + singular: failoverquorum + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: |- + FailoverQuorum contains the information about the current failover + quorum status of a PG cluster. It is updated by the instance manager + of the primary node and reset to zero by the operator to trigger + an update. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + status: + description: Most recently observed status of the failover quorum. + properties: + method: + description: Contains the latest reported Method value. + type: string + primary: + description: |- + Primary is the name of the primary instance that updated + this object the latest time. + type: string + standbyNames: + description: |- + StandbyNames is the list of potentially synchronous + instance names. + items: + type: string + type: array + standbyNumber: + description: |- + StandbyNumber is the number of synchronous standbys that transactions + need to wait for replies from. + type: integer + type: object + required: + - metadata + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: imagecatalogs.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ImageCatalog + listKind: ImageCatalogList + plural: imagecatalogs + singular: imagecatalog + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ImageCatalog is the Schema for the imagecatalogs API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ImageCatalog. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + images: + description: List of CatalogImages available in the catalog + items: + description: CatalogImage defines the image and major version + properties: + image: + description: The image reference + type: string + major: + description: The PostgreSQL major version of the image. Must + be unique within the catalog. + minimum: 10 + type: integer + required: + - image + - major + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-validations: + - message: Images must have unique major versions + rule: self.all(e, self.filter(f, f.major==e.major).size() == 1) + required: + - images + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: poolers.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Pooler + listKind: PoolerList + plural: poolers + singular: pooler + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.type + name: Type + type: string + name: v1 + schema: + openAPIV3Schema: + description: Pooler is the Schema for the poolers API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the Pooler. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + cluster: + description: |- + This is the cluster reference on which the Pooler will work. + Pooler name should never match with any cluster name within the same namespace. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + deploymentStrategy: + description: The deployment strategy to use for pgbouncer to replace + existing pods with new ones + properties: + rollingUpdate: + description: |- + Rolling update config params. Present only if DeploymentStrategyType = + RollingUpdate. + properties: + maxSurge: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be scheduled above the desired number of + pods. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + This can not be 0 if MaxUnavailable is 0. + Absolute number is calculated from percentage by rounding up. + Defaults to 25%. + Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when + the rolling update starts, such that the total number of old and new pods do not exceed + 130% of desired pods. Once old pods have been killed, + new ReplicaSet can be scaled up further, ensuring that total number of pods running + at any time during the update is at most 130% of desired pods. + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be unavailable during the update. + Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + Absolute number is calculated from percentage by rounding down. + This can not be 0 if MaxSurge is 0. + Defaults to 25%. + Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods + immediately when the rolling update starts. Once new pods are ready, old ReplicaSet + can be scaled down further, followed by scaling up the new ReplicaSet, ensuring + that the total number of pods available at all times during the update is at + least 70% of desired pods. + x-kubernetes-int-or-string: true + type: object + type: + description: Type of deployment. Can be "Recreate" or "RollingUpdate". + Default is RollingUpdate. + type: string + type: object + instances: + default: 1 + description: 'The number of replicas we want. Default: 1.' + format: int32 + type: integer + monitoring: + description: The configuration of the monitoring infrastructure of + this pooler. + properties: + enablePodMonitor: + default: false + description: Enable or disable the `PodMonitor` + type: boolean + podMonitorMetricRelabelings: + description: The list of metric relabelings for the `PodMonitor`. + Applied to samples before ingestion. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + podMonitorRelabelings: + description: The list of relabelings for the `PodMonitor`. Applied + to samples before scraping. + items: + description: |- + RelabelConfig allows dynamic rewriting of the label set for targets, alerts, + scraped samples and remote write samples. + + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + properties: + action: + default: replace + description: |- + Action to perform based on the regex matching. + + `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. + `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. + + Default: "Replace" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: |- + Modulus to take of the hash of the source label values. + + Only applicable when the action is `HashMod`. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. + type: string + replacement: + description: |- + Replacement value against which a Replace action is performed if the + regular expression matches. + + Regex capture groups are available. + type: string + separator: + description: Separator is the string between concatenated + SourceLabels. + type: string + sourceLabels: + description: |- + The source labels select values from existing labels. Their content is + concatenated using the configured Separator and matched against the + configured regular expression. + items: + description: |- + LabelName is a valid Prometheus label name which may only contain ASCII + letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: |- + Label to which the resulting string is written in a replacement. + + It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, + `KeepEqual` and `DropEqual` actions. + + Regex capture groups are available. + type: string + type: object + type: array + type: object + pgbouncer: + description: The PgBouncer configuration + properties: + authQuery: + description: |- + The query that will be used to download the hash of the password + of a certain user. Default: "SELECT usename, passwd FROM public.user_search($1)". + In case it is specified, also an AuthQuerySecret has to be specified and + no automatic CNPG Cluster integration will be triggered. + type: string + authQuerySecret: + description: |- + The credentials of the user that need to be used for the authentication + query. In case it is specified, also an AuthQuery + (e.g. "SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1") + has to be specified and no automatic CNPG Cluster integration will be triggered. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + parameters: + additionalProperties: + type: string + description: |- + Additional parameters to be passed to PgBouncer - please check + the CNPG documentation for a list of options you can configure + type: object + paused: + default: false + description: |- + When set to `true`, PgBouncer will disconnect from the PostgreSQL + server, first waiting for all queries to complete, and pause all new + client connections until this value is set to `false` (default). Internally, + the operator calls PgBouncer's `PAUSE` and `RESUME` commands. + type: boolean + pg_hba: + description: |- + PostgreSQL Host Based Authentication rules (lines to be appended + to the pg_hba.conf file) + items: + type: string + type: array + poolMode: + default: session + description: 'The pool mode. Default: `session`.' + enum: + - session + - transaction + type: string + type: object + serviceTemplate: + description: Template for the Service to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the service. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information on service's + port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the configurations + of session affinity. + properties: + clientIP: + description: clientIP contains the configurations of Client + IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + template: + description: The template of the Pod to be created + properties: + metadata: + description: |- + Standard object's metadata. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + name: + description: The name of the resource. Only supported for + certain types + type: string + type: object + spec: + description: |- + Specification of the desired behavior of the pod. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + activeDeadlineSeconds: + description: |- + Optional duration in seconds the pod may be active on the node relative to + StartTime before the system will actively try to mark it failed and kill associated containers. + Value must be a positive integer. + format: int64 + type: integer + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + automountServiceAccountToken: + description: AutomountServiceAccountToken indicates whether + a service account token should be automatically mounted. + type: boolean + containers: + description: |- + List of containers belonging to the pod. + Containers cannot currently be added or removed. + There must be at least one container in a Pod. + Cannot be updated. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps or Secrets + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: Optional text to prepend to the name + of each environment variable. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + stopSignal: + description: |- + StopSignal defines which signal will be sent to a container when it is being stopped. + If not specified, the default is defined by the container runtime in use. + StopSignal can only be set for Pods with a non-empty .spec.os.name + type: string + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + dnsConfig: + description: |- + Specifies the DNS parameters of a pod. + Parameters specified here will be merged to the generated DNS + configuration based on DNSPolicy. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver + options of a pod. + properties: + name: + description: |- + Name is this DNS resolver option's name. + Required. + type: string + value: + description: Value is this DNS resolver option's + value. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + dnsPolicy: + description: |- + Set DNS policy for the pod. + Defaults to "ClusterFirst". + Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + To have DNS options set along with hostNetwork, you have to specify DNS policy + explicitly to 'ClusterFirstWithHostNet'. + type: string + enableServiceLinks: + description: |- + EnableServiceLinks indicates whether information about services should be injected into pod's + environment variables, matching the syntax of Docker links. + Optional: Defaults to true. + type: boolean + ephemeralContainers: + description: |- + List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + pod to perform user-initiated actions such as debugging. This list cannot be specified when + creating a pod, and it cannot be modified by updating the pod spec. In order to add an + ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + items: + description: |- + An EphemeralContainer is a temporary container that you may add to an existing Pod for + user-initiated activities such as debugging. Ephemeral containers have no resource or + scheduling guarantees, and they will not be restarted when they exit or when a Pod is + removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the + Pod to exceed its resource allocation. + + To add an ephemeral container, use the ephemeralcontainers subresource of an existing + Pod. Ephemeral containers may not be removed or restarted. + properties: + args: + description: |- + Arguments to the entrypoint. + The image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps or Secrets + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: Optional text to prepend to the name + of each environment variable. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: Lifecycle is not allowed for ephemeral + containers. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + stopSignal: + description: |- + StopSignal defines which signal will be sent to a container when it is being stopped. + If not specified, the default is defined by the container runtime in use. + StopSignal can only be set for Pods with a non-empty .spec.os.name + type: string + type: object + livenessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the ephemeral container specified as a DNS_LABEL. + This name must be unique among all containers, init containers and ephemeral containers. + type: string + ports: + description: Ports are not allowed for ephemeral containers. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + already allocated to the pod. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for the container to manage the restart behavior of each + container within a pod. + This may only be set for init containers. You cannot set this field on + ephemeral containers. + type: string + securityContext: + description: |- + Optional: SecurityContext defines the security options the ephemeral container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + targetContainerName: + description: |- + If set, the name of the container from PodSpec that this ephemeral container targets. + The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + If not set then the ephemeral container uses the namespaces configured in the Pod spec. + + The container runtime must implement support for this feature. If the runtime does not + support namespace targeting then the result of setting this field is undefined. + type: string + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + hostAliases: + description: |- + HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + file if specified. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + description: IP address of the host file entry. + type: string + required: + - ip + type: object + type: array + x-kubernetes-list-map-keys: + - ip + x-kubernetes-list-type: map + hostIPC: + description: |- + Use the host's ipc namespace. + Optional: Default to false. + type: boolean + hostNetwork: + description: |- + Host networking requested for this pod. Use the host's network namespace. + If this option is set, the ports that will be used must be specified. + Default to false. + type: boolean + hostPID: + description: |- + Use the host's pid namespace. + Optional: Default to false. + type: boolean + hostUsers: + description: |- + Use the host's user namespace. + Optional: Default to true. + If set to true or not present, the pod will be run in the host user namespace, useful + for when the pod needs a feature only available to the host user namespace, such as + loading a kernel module with CAP_SYS_MODULE. + When set to false, a new userns is created for the pod. Setting false is useful for + mitigating container breakout vulnerabilities even allowing users to run their + containers as root without actually having root privileges on the host. + This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. + type: boolean + hostname: + description: |- + Specifies the hostname of the Pod + If not specified, the pod's hostname will be set to a system-defined value. + type: string + imagePullSecrets: + description: |- + ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + If specified, these secrets will be passed to individual puller implementations for them to use. + More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + initContainers: + description: |- + List of initialization containers belonging to the pod. + Init containers are executed in order prior to containers being started. If any + init container fails, the pod is considered to have failed and is handled according + to its restartPolicy. The name for an init container or normal container must be + unique among all containers. + Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. + The resourceRequirements of an init container are taken into account during scheduling + by finding the highest request/limit for each resource type, and then using the max of + that value or the sum of the normal containers. Limits are applied to init containers + in a similar fashion. + Init containers cannot currently be added or removed. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps or Secrets + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: Optional text to prepend to the name + of each environment variable. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents a duration that + the container should sleep. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for backward compatibility. There is no validation of this field and + lifecycle hooks will fail at runtime when it is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + stopSignal: + description: |- + StopSignal defines which signal will be sent to a container when it is being stopped. + If not specified, the default is defined by the container runtime in use. + StopSignal can only be set for Pods with a non-empty .spec.os.name + type: string + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies a command to execute + in the container. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies a GRPC HealthCheckRequest. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + default: "" + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies an HTTP GET request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies a connection to + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + nodeName: + description: |- + NodeName indicates in which node this pod is scheduled. + If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. + Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. + This field should not be used to express a desire for the pod to be scheduled on a specific node. + https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the pod to fit on a node. + Selector which must match a node's labels for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + x-kubernetes-map-type: atomic + os: + description: |- + Specifies the OS of the containers in the pod. + Some pod and container fields are restricted if this is set. + + If the OS field is set to linux, the following fields must be unset: + -securityContext.windowsOptions + + If the OS field is set to windows, following fields must be unset: + - spec.hostPID + - spec.hostIPC + - spec.hostUsers + - spec.securityContext.appArmorProfile + - spec.securityContext.seLinuxOptions + - spec.securityContext.seccompProfile + - spec.securityContext.fsGroup + - spec.securityContext.fsGroupChangePolicy + - spec.securityContext.sysctls + - spec.shareProcessNamespace + - spec.securityContext.runAsUser + - spec.securityContext.runAsGroup + - spec.securityContext.supplementalGroups + - spec.securityContext.supplementalGroupsPolicy + - spec.containers[*].securityContext.appArmorProfile + - spec.containers[*].securityContext.seLinuxOptions + - spec.containers[*].securityContext.seccompProfile + - spec.containers[*].securityContext.capabilities + - spec.containers[*].securityContext.readOnlyRootFilesystem + - spec.containers[*].securityContext.privileged + - spec.containers[*].securityContext.allowPrivilegeEscalation + - spec.containers[*].securityContext.procMount + - spec.containers[*].securityContext.runAsUser + - spec.containers[*].securityContext.runAsGroup + properties: + name: + description: |- + Name is the name of the operating system. The currently supported values are linux and windows. + Additional value may be defined in future and can be one of: + https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + Clients should expect to handle additional values and treat unrecognized values in this field as os: null + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + This field will be autopopulated at admission time by the RuntimeClass admission controller. If + the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + The RuntimeClass admission controller will reject Pod create requests which have the overhead already + set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md + type: object + preemptionPolicy: + description: |- + PreemptionPolicy is the Policy for preempting pods with lower priority. + One of Never, PreemptLowerPriority. + Defaults to PreemptLowerPriority if unset. + type: string + priority: + description: |- + The priority value. Various system components use this field to find the + priority of the pod. When Priority Admission Controller is enabled, it + prevents users from setting this field. The admission controller populates + this field from PriorityClassName. + The higher the value, the higher the priority. + format: int32 + type: integer + priorityClassName: + description: |- + If specified, indicates the pod's priority. "system-node-critical" and + "system-cluster-critical" are two special keywords which indicate the + highest priorities with the former being the highest priority. Any other + name must be defined by creating a PriorityClass object with that name. + If not specified, the pod priority will be default or zero if there is no + default. + type: string + readinessGates: + description: |- + If specified, all readiness gates will be evaluated for pod readiness. + A pod is ready when all its containers are ready AND + all conditions specified in the readiness gates have status equal to "True" + More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates + items: + description: PodReadinessGate contains the reference to + a pod condition + properties: + conditionType: + description: ConditionType refers to a condition in + the pod's condition list with matching type. + type: string + required: + - conditionType + type: object + type: array + x-kubernetes-list-type: atomic + resourceClaims: + description: |- + ResourceClaims defines which ResourceClaims must be allocated + and reserved before the Pod is allowed to start. The resources + will be made available to those containers which consume them + by name. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. + items: + description: |- + PodResourceClaim references exactly one ResourceClaim, either directly + or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim + for the pod. + + It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. + Containers that need access to the ResourceClaim reference it with this name. + properties: + name: + description: |- + Name uniquely identifies this resource claim inside the pod. + This must be a DNS_LABEL. + type: string + resourceClaimName: + description: |- + ResourceClaimName is the name of a ResourceClaim object in the same + namespace as this pod. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + resourceClaimTemplateName: + description: |- + ResourceClaimTemplateName is the name of a ResourceClaimTemplate + object in the same namespace as this pod. + + The template will be used to create a new ResourceClaim, which will + be bound to this pod. When this pod is deleted, the ResourceClaim + will also be deleted. The pod name and resource name, along with a + generated component, will be used to form a unique name for the + ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + + This field is immutable and no changes will be made to the + corresponding ResourceClaim by the control plane after creating the + ResourceClaim. + + Exactly one of ResourceClaimName and ResourceClaimTemplateName must + be set. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + resources: + description: |- + Resources is the total amount of CPU and Memory resources required by all + containers in the pod. It supports specifying Requests and Limits for + "cpu" and "memory" resource names only. ResourceClaims are not supported. + + This field enables fine-grained control over resource allocation for the + entire pod, allowing resource sharing among containers in a pod. + + This is an alpha field and requires enabling the PodLevelResources feature + gate. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + Restart policy for all containers within the pod. + One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. + Default to Always. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy + type: string + runtimeClassName: + description: |- + RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + empty definition that uses the default runtime handler. + More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + type: string + schedulerName: + description: |- + If specified, the pod will be dispatched by specified scheduler. + If not specified, the pod will be dispatched by default scheduler. + type: string + schedulingGates: + description: |- + SchedulingGates is an opaque list of values that if specified will block scheduling the pod. + If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + scheduler will not attempt to schedule the pod. + + SchedulingGates can only be set at pod creation time, and be removed only afterwards. + items: + description: PodSchedulingGate is associated to a Pod to + guard its scheduling. + properties: + name: + description: |- + Name of the scheduling gate. + Each scheduling gate must have a unique name field. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + securityContext: + description: |- + SecurityContext holds pod-level security attributes and common container settings. + Optional: Defaults to empty. See type description for default values of each field. + properties: + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxChangePolicy: + description: |- + seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. + It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. + Valid values are "MountOption" and "Recursive". + + "Recursive" means relabeling of all files on all Pod volumes by the container runtime. + This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. + + "MountOption" mounts all eligible Pod volumes with `-o context` mount option. + This requires all Pods that share the same volume to use the same SELinux label. + It is not possible to share the same volume among privileged and unprivileged Pods. + Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes + whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their + CSIDriver instance. Other volumes are always re-labelled recursively. + "MountOption" value is allowed only when SELinuxMount feature gate is enabled. + + If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. + If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes + and "Recursive" for all other volumes. + + This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers. + + All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. + Note that this field cannot be set when spec.os.name is windows. + type: string + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in + addition to the container's primary GID and fsGroup (if specified). If + the SupplementalGroupsPolicy feature is enabled, the + supplementalGroupsPolicy field determines whether these are in addition + to or instead of any group memberships defined in the container image. + If unspecified, no additional groups are added, though group memberships + defined in the container image may still be used, depending on the + supplementalGroupsPolicy field. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + description: |- + Defines how supplemental groups of the first container processes are calculated. + Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + and the container runtime must implement support for this feature. + Note that this field cannot be set when spec.os.name is windows. + type: string + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be + set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: |- + DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. + Deprecated: Use serviceAccountName instead. + type: string + serviceAccountName: + description: |- + ServiceAccountName is the name of the ServiceAccount to use to run this pod. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + type: string + setHostnameAsFQDN: + description: |- + If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). + In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). + In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. + If a pod does not have FQDN, this has no effect. + Default to false. + type: boolean + shareProcessNamespace: + description: |- + Share a single process namespace between all of the containers in a pod. + When this is set containers will be able to view and signal processes from other containers + in the same pod, and the first process in each container will not be assigned PID 1. + HostPID and ShareProcessNamespace cannot both be set. + Optional: Default to false. + type: boolean + subdomain: + description: |- + If specified, the fully qualified Pod hostname will be "...svc.". + If not specified, the pod will not have a domainname at all. + type: string + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + If this value is nil, the default grace period will be used instead. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + Defaults to 30 seconds. + format: int64 + type: integer + tolerations: + description: If specified, the pod's tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of pods ought to spread across topology + domains. Scheduler will schedule pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + description: |- + List of volumes that can be mounted by containers belonging to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk + in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in + the blob storage + type: string + fsType: + default: ext4 + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage account Managed: azure + managed data disk (only in managed availability + set). defaults to shared' + type: string + readOnly: + default: false + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default + is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers. + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about + the pod that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing the + pod field + properties: + fieldRef: + description: 'Required: Selects a field of + the pod: only annotations, labels, name, + namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path. + Must be utf-8 encoded. The first item of + the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource + that is attached to a kubelet's host machine and then + exposed to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target + worldwide names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. + properties: + driver: + description: driver is the name of the driver to + use for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the + specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + image: + description: |- + image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + The volume is resolved at pod startup depending on which PullPolicy value is provided: + + - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + + The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + The volume will be mounted read-only (ro) and non-executable files (noexec). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. + The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + properties: + pullPolicy: + description: |- + Policy for pulling OCI objects. Possible values are: + Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + type: string + reference: + description: |- + Required: Image or artifact reference to be used. + Behaves in the same way as pod.spec.containers[*].image. + Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + default: default + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon + Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources + secrets, configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume + root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the + configMap data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about + the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name, namespace and uid + are supported.' + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file + to be created. Must not be absolute + or contain the ''..'' path. Must + be utf-8 encoded. The first item + of the relative path must not + start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the + secret data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + default: /etc/ceph/keyring + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. + properties: + fsType: + default: xfs + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the + ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the + ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL + communication with Gateway, default false + type: boolean + storageMode: + default: ThinProvisioned + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage + Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether the + Secret or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy + Based Management (SPBM) profile ID associated + with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + required: + - containers + type: object + type: object + type: + default: rw + description: 'Type of service to forward traffic to. Default: `rw`.' + enum: + - rw + - ro + - r + type: string + required: + - cluster + - pgbouncer + type: object + status: + description: |- + Most recently observed status of the Pooler. This data may not be up to + date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + instances: + description: The number of pods trying to be scheduled + format: int32 + type: integer + secrets: + description: The resource version of the config object + properties: + clientCA: + description: The client CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + pgBouncerSecrets: + description: The version of the secrets used by PgBouncer + properties: + authQuery: + description: The auth query secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + serverCA: + description: The server CA secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + serverTLS: + description: The server TLS secret version + properties: + name: + description: The name of the secret + type: string + version: + description: The ResourceVersion of the secret + type: string + type: object + type: object + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.instances + statusReplicasPath: .status.instances + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: publications.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Publication + listKind: PublicationList + plural: publications + singular: publication + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Publication is the Schema for the publications API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PublicationSpec defines the desired state of Publication + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "publisher" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "publisher" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + name: + description: The name of the publication inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Publication parameters part of the `WITH` clause as expected by + PostgreSQL `CREATE PUBLICATION` command + type: object + publicationReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this publication + enum: + - delete + - retain + type: string + target: + description: Target of the publication as expected by PostgreSQL `CREATE + PUBLICATION` command + properties: + allTables: + description: |- + Marks the publication as one that replicates changes for all tables + in the database, including tables created in the future. + Corresponding to `FOR ALL TABLES` in PostgreSQL. + type: boolean + x-kubernetes-validations: + - message: allTables is immutable + rule: self == oldSelf + objects: + description: Just the following schema objects + items: + description: PublicationTargetObject is an object to publish + properties: + table: + description: |- + Specifies a list of tables to add to the publication. Corresponding + to `FOR TABLE` in PostgreSQL. + properties: + columns: + description: The columns to publish + items: + type: string + type: array + name: + description: The table name + type: string + only: + description: Whether to limit to the table only or include + all its descendants + type: boolean + schema: + description: The schema name + type: string + required: + - name + type: object + tablesInSchema: + description: |- + Marks the publication as one that replicates changes for all tables + in the specified list of schemas, including tables created in the + future. Corresponding to `FOR TABLES IN SCHEMA` in PostgreSQL. + type: string + type: object + x-kubernetes-validations: + - message: tablesInSchema and table are mutually exclusive + rule: (has(self.tablesInSchema) && !has(self.table)) || (!has(self.tablesInSchema) + && has(self.table)) + maxItems: 100000 + type: array + x-kubernetes-validations: + - message: specifying a column list when the publication also + publishes tablesInSchema is not supported + rule: '!(self.exists(o, has(o.table) && has(o.table.columns)) + && self.exists(o, has(o.tablesInSchema)))' + type: object + x-kubernetes-validations: + - message: allTables and objects are mutually exclusive + rule: (has(self.allTables) && !has(self.objects)) || (!has(self.allTables) + && has(self.objects)) + required: + - cluster + - dbname + - name + - target + type: object + status: + description: PublicationStatus defines the observed state of Publication + properties: + applied: + description: Applied is true if the publication was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: scheduledbackups.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: ScheduledBackup + listKind: ScheduledBackupList + plural: scheduledbackups + singular: scheduledbackup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .status.lastScheduleTime + name: Last Backup + type: date + name: v1 + schema: + openAPIV3Schema: + description: ScheduledBackup is the Schema for the scheduledbackups API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Specification of the desired behavior of the ScheduledBackup. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + backupOwnerReference: + default: none + description: |- + Indicates which ownerReference should be put inside the created backup resources.
+ - none: no owner reference for created backup objects (same behavior as before the field was introduced)
+ - self: sets the Scheduled backup object as owner of the backup
+ - cluster: set the cluster as owner of the backup
+ enum: + - none + - self + - cluster + type: string + cluster: + description: The cluster to backup + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + immediate: + description: If the first backup has to be immediately start after + creation or not + type: boolean + method: + default: barmanObjectStore + description: |- + The backup method to be used, possible options are `barmanObjectStore`, + `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`. + enum: + - barmanObjectStore + - volumeSnapshot + - plugin + type: string + online: + description: |- + Whether the default type of backup with volume snapshots is + online/hot (`true`, default) or offline/cold (`false`) + Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online' + type: boolean + onlineConfiguration: + description: |- + Configuration parameters to control the online/hot backup with volume snapshots + Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza + properties: + immediateCheckpoint: + description: |- + Control whether the I/O workload for the backup initial checkpoint will + be limited, according to the `checkpoint_completion_target` setting on + the PostgreSQL server. If set to true, an immediate checkpoint will be + used, meaning PostgreSQL will complete the checkpoint as soon as + possible. `false` by default. + type: boolean + waitForArchive: + default: true + description: |- + If false, the function will return immediately after the backup is completed, + without waiting for WAL to be archived. + This behavior is only useful with backup software that independently monitors WAL archiving. + Otherwise, WAL required to make the backup consistent might be missing and make the backup useless. + By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is + enabled. + On a standby, this means that it will wait only when archive_mode = always. + If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger + an immediate segment switch. + type: boolean + type: object + pluginConfiguration: + description: Configuration parameters passed to the plugin managing + this backup + properties: + name: + description: Name is the name of the plugin managing this backup + type: string + parameters: + additionalProperties: + type: string + description: |- + Parameters are the configuration parameters passed to the backup + plugin for this backup + type: object + required: + - name + type: object + schedule: + description: |- + The schedule does not follow the same format used in Kubernetes CronJobs + as it includes an additional seconds specifier, + see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format + type: string + suspend: + description: If this backup is suspended or not + type: boolean + target: + description: |- + The policy to decide which instance should perform this backup. If empty, + it defaults to `cluster.spec.backup.target`. + Available options are empty string, `primary` and `prefer-standby`. + `primary` to have backups run always on primary instances, + `prefer-standby` to have backups run preferably on the most updated + standby, if available. + enum: + - primary + - prefer-standby + type: string + required: + - cluster + - schedule + type: object + status: + description: |- + Most recently observed status of the ScheduledBackup. This data may not be up + to date. Populated by the system. Read-only. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + lastCheckTime: + description: The latest time the schedule + format: date-time + type: string + lastScheduleTime: + description: Information when was the last time that backup was successfully + scheduled. + format: date-time + type: string + nextScheduleTime: + description: Next time we will run a backup + format: date-time + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: subscriptions.postgresql.cnpg.io +spec: + group: postgresql.cnpg.io + names: + kind: Subscription + listKind: SubscriptionList + plural: subscriptions + singular: subscription + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.cluster.name + name: Cluster + type: string + - jsonPath: .spec.name + name: PG Name + type: string + - jsonPath: .status.applied + name: Applied + type: boolean + - description: Latest reconciliation message + jsonPath: .status.message + name: Message + type: string + name: v1 + schema: + openAPIV3Schema: + description: Subscription is the Schema for the subscriptions API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SubscriptionSpec defines the desired state of Subscription + properties: + cluster: + description: The name of the PostgreSQL cluster that identifies the + "subscriber" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + dbname: + description: |- + The name of the database where the publication will be installed in + the "subscriber" cluster + type: string + x-kubernetes-validations: + - message: dbname is immutable + rule: self == oldSelf + externalClusterName: + description: The name of the external cluster with the publication + ("publisher") + type: string + name: + description: The name of the subscription inside PostgreSQL + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + parameters: + additionalProperties: + type: string + description: |- + Subscription parameters included in the `WITH` clause of the PostgreSQL + `CREATE SUBSCRIPTION` command. Most parameters cannot be changed + after the subscription is created and will be ignored if modified + later, except for a limited set documented at: + https://www.postgresql.org/docs/current/sql-altersubscription.html#SQL-ALTERSUBSCRIPTION-PARAMS-SET + type: object + publicationDBName: + description: |- + The name of the database containing the publication on the external + cluster. Defaults to the one in the external cluster definition. + type: string + publicationName: + description: |- + The name of the publication inside the PostgreSQL database in the + "publisher" + type: string + subscriptionReclaimPolicy: + default: retain + description: The policy for end-of-life maintenance of this subscription + enum: + - delete + - retain + type: string + required: + - cluster + - dbname + - externalClusterName + - name + - publicationName + type: object + status: + description: SubscriptionStatus defines the observed state of Subscription + properties: + applied: + description: Applied is true if the subscription was reconciled correctly + type: boolean + message: + description: Message is the reconciliation output message + type: string + observedGeneration: + description: |- + A sequence number representing the latest + desired state that was synchronized + format: int64 + type: integer + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-database-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - databases + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - databases/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cnpg-manager +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps/status + - secrets/status + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - pods + - pods/exec + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - pods/status + verbs: + - get +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - get + - patch +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +- apiGroups: + - monitoring.coreos.com + resources: + - podmonitors + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups + - clusters + - databases + - poolers + - publications + - scheduledbackups + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - backups/status + - databases/status + - publications/status + - scheduledbackups/status + - subscriptions/status + verbs: + - get + - patch + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusterimagecatalogs + - imagecatalogs + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/finalizers + - poolers/finalizers + verbs: + - update +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/status + - failoverquorums/status + - poolers/status + verbs: + - get + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - failoverquorums + verbs: + - create + - delete + - get + - list + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - create + - get + - list + - patch + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-publication-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - publications + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - publications/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-editor-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: cloudnative-pg-kubebuilderv4 + name: cnpg-subscription-viewer-role +rules: +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - subscriptions/status + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cnpg-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cnpg-manager +subjects: +- kind: ServiceAccount + name: cnpg-manager + namespace: cnpg-system +--- +apiVersion: v1 +data: + queries: | + backends: + query: | + SELECT sa.datname + , sa.usename + , sa.application_name + , states.state + , COALESCE(sa.count, 0) AS total + , COALESCE(sa.max_tx_secs, 0) AS max_tx_duration_seconds + FROM ( VALUES ('active') + , ('idle') + , ('idle in transaction') + , ('idle in transaction (aborted)') + , ('fastpath function call') + , ('disabled') + ) AS states(state) + LEFT JOIN ( + SELECT datname + , state + , usename + , COALESCE(application_name, '') AS application_name + , COUNT(*) + , COALESCE(EXTRACT (EPOCH FROM (max(now() - xact_start))), 0) AS max_tx_secs + FROM pg_catalog.pg_stat_activity + GROUP BY datname, state, usename, application_name + ) sa ON states.state = sa.state + WHERE sa.usename IS NOT NULL + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - usename: + usage: "LABEL" + description: "Name of the user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - state: + usage: "LABEL" + description: "State of the backend" + - total: + usage: "GAUGE" + description: "Number of backends" + - max_tx_duration_seconds: + usage: "GAUGE" + description: "Maximum duration of a transaction in seconds" + + backends_waiting: + query: | + SELECT count(*) AS total + FROM pg_catalog.pg_locks blocked_locks + JOIN pg_catalog.pg_locks blocking_locks + ON blocking_locks.locktype = blocked_locks.locktype + AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database + AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation + AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page + AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple + AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid + AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid + AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid + AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid + AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid + AND blocking_locks.pid != blocked_locks.pid + JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid + WHERE NOT blocked_locks.granted + metrics: + - total: + usage: "GAUGE" + description: "Total number of backends that are currently waiting on other queries" + + pg_database: + query: | + SELECT datname + , pg_catalog.pg_database_size(datname) AS size_bytes + , pg_catalog.age(datfrozenxid) AS xid_age + , pg_catalog.mxid_age(datminmxid) AS mxid_age + FROM pg_catalog.pg_database + WHERE datallowconn + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - size_bytes: + usage: "GAUGE" + description: "Disk space used by the database" + - xid_age: + usage: "GAUGE" + description: "Number of transactions from the frozen XID to the current one" + - mxid_age: + usage: "GAUGE" + description: "Number of multiple transactions (Multixact) from the frozen XID to the current one" + + pg_postmaster: + query: | + SELECT EXTRACT(EPOCH FROM pg_postmaster_start_time) AS start_time + FROM pg_catalog.pg_postmaster_start_time() + metrics: + - start_time: + usage: "GAUGE" + description: "Time at which postgres started (based on epoch)" + + pg_replication: + query: "SELECT CASE WHEN ( + NOT pg_catalog.pg_is_in_recovery() + OR pg_catalog.pg_last_wal_receive_lsn() = pg_catalog.pg_last_wal_replay_lsn()) + THEN 0 + ELSE GREATEST (0, + EXTRACT(EPOCH FROM (now() - pg_catalog.pg_last_xact_replay_timestamp()))) + END AS lag, + pg_catalog.pg_is_in_recovery() AS in_recovery, + EXISTS (TABLE pg_stat_wal_receiver) AS is_wal_receiver_up, + (SELECT count(*) FROM pg_catalog.pg_stat_replication) AS streaming_replicas" + metrics: + - lag: + usage: "GAUGE" + description: "Replication lag behind primary in seconds" + - in_recovery: + usage: "GAUGE" + description: "Whether the instance is in recovery" + - is_wal_receiver_up: + usage: "GAUGE" + description: "Whether the instance wal_receiver is up" + - streaming_replicas: + usage: "GAUGE" + description: "Number of streaming replicas connected to the instance" + + pg_replication_slots: + query: | + SELECT slot_name, + slot_type, + database, + active, + (CASE pg_catalog.pg_is_in_recovery() + WHEN TRUE THEN pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_last_wal_receive_lsn(), restart_lsn) + ELSE pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), restart_lsn) + END) as pg_wal_lsn_diff + FROM pg_catalog.pg_replication_slots + WHERE NOT temporary + metrics: + - slot_name: + usage: "LABEL" + description: "Name of the replication slot" + - slot_type: + usage: "LABEL" + description: "Type of the replication slot" + - database: + usage: "LABEL" + description: "Name of the database" + - active: + usage: "GAUGE" + description: "Flag indicating whether the slot is active" + - pg_wal_lsn_diff: + usage: "GAUGE" + description: "Replication lag in bytes" + + pg_stat_archiver: + query: | + SELECT archived_count + , failed_count + , COALESCE(EXTRACT(EPOCH FROM (now() - last_archived_time)), -1) AS seconds_since_last_archival + , COALESCE(EXTRACT(EPOCH FROM (now() - last_failed_time)), -1) AS seconds_since_last_failure + , COALESCE(EXTRACT(EPOCH FROM last_archived_time), -1) AS last_archived_time + , COALESCE(EXTRACT(EPOCH FROM last_failed_time), -1) AS last_failed_time + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_archived_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_archived_wal_start_lsn + , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_failed_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_failed_wal_start_lsn + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_archiver + metrics: + - archived_count: + usage: "COUNTER" + description: "Number of WAL files that have been successfully archived" + - failed_count: + usage: "COUNTER" + description: "Number of failed attempts for archiving WAL files" + - seconds_since_last_archival: + usage: "GAUGE" + description: "Seconds since the last successful archival operation" + - seconds_since_last_failure: + usage: "GAUGE" + description: "Seconds since the last failed archival operation" + - last_archived_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving succeeded" + - last_failed_time: + usage: "GAUGE" + description: "Epoch of the last time WAL archiving failed" + - last_archived_wal_start_lsn: + usage: "GAUGE" + description: "Archived WAL start LSN" + - last_failed_wal_start_lsn: + usage: "GAUGE" + description: "Last failed WAL LSN" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_bgwriter: + runonserver: "<17.0.0" + query: | + SELECT checkpoints_timed + , checkpoints_req + , checkpoint_write_time + , checkpoint_sync_time + , buffers_checkpoint + , buffers_clean + , maxwritten_clean + , buffers_backend + , buffers_backend_fsync + , buffers_alloc + FROM pg_catalog.pg_stat_bgwriter + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - checkpoint_write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds" + - checkpoint_sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds" + - buffers_checkpoint: + usage: "COUNTER" + description: "Number of buffers written during checkpoints" + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_backend: + usage: "COUNTER" + description: "Number of buffers written directly by a backend" + - buffers_backend_fsync: + usage: "COUNTER" + description: "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + + pg_stat_bgwriter_17: + runonserver: ">=17.0.0" + name: pg_stat_bgwriter + query: | + SELECT buffers_clean + , maxwritten_clean + , buffers_alloc + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_bgwriter + metrics: + - buffers_clean: + usage: "COUNTER" + description: "Number of buffers written by the background writer" + - maxwritten_clean: + usage: "COUNTER" + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" + - buffers_alloc: + usage: "COUNTER" + description: "Number of buffers allocated" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_checkpointer: + runonserver: ">=17.0.0" + query: | + SELECT num_timed AS checkpoints_timed + , num_requested AS checkpoints_req + , restartpoints_timed + , restartpoints_req + , restartpoints_done + , write_time + , sync_time + , buffers_written + , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time + FROM pg_catalog.pg_stat_checkpointer + metrics: + - checkpoints_timed: + usage: "COUNTER" + description: "Number of scheduled checkpoints that have been performed" + - checkpoints_req: + usage: "COUNTER" + description: "Number of requested checkpoints that have been performed" + - restartpoints_timed: + usage: "COUNTER" + description: "Number of scheduled restartpoints due to timeout or after a failed attempt to perform it" + - restartpoints_req: + usage: "COUNTER" + description: "Number of requested restartpoints that have been performed" + - restartpoints_done: + usage: "COUNTER" + description: "Number of restartpoints that have been performed" + - write_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are written to disk, in milliseconds" + - sync_time: + usage: "COUNTER" + description: "Total amount of time that has been spent in the portion of processing checkpoints and restartpoints where files are synchronized to disk, in milliseconds" + - buffers_written: + usage: "COUNTER" + description: "Number of buffers written during checkpoints and restartpoints" + - stats_reset_time: + usage: "GAUGE" + description: "Time at which these statistics were last reset" + + pg_stat_database: + query: | + SELECT datname + , xact_commit + , xact_rollback + , blks_read + , blks_hit + , tup_returned + , tup_fetched + , tup_inserted + , tup_updated + , tup_deleted + , conflicts + , temp_files + , temp_bytes + , deadlocks + , blk_read_time + , blk_write_time + FROM pg_catalog.pg_stat_database + metrics: + - datname: + usage: "LABEL" + description: "Name of this database" + - xact_commit: + usage: "COUNTER" + description: "Number of transactions in this database that have been committed" + - xact_rollback: + usage: "COUNTER" + description: "Number of transactions in this database that have been rolled back" + - blks_read: + usage: "COUNTER" + description: "Number of disk blocks read in this database" + - blks_hit: + usage: "COUNTER" + description: "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)" + - tup_returned: + usage: "COUNTER" + description: "Number of rows returned by queries in this database" + - tup_fetched: + usage: "COUNTER" + description: "Number of rows fetched by queries in this database" + - tup_inserted: + usage: "COUNTER" + description: "Number of rows inserted by queries in this database" + - tup_updated: + usage: "COUNTER" + description: "Number of rows updated by queries in this database" + - tup_deleted: + usage: "COUNTER" + description: "Number of rows deleted by queries in this database" + - conflicts: + usage: "COUNTER" + description: "Number of queries canceled due to conflicts with recovery in this database" + - temp_files: + usage: "COUNTER" + description: "Number of temporary files created by queries in this database" + - temp_bytes: + usage: "COUNTER" + description: "Total amount of data written to temporary files by queries in this database" + - deadlocks: + usage: "COUNTER" + description: "Number of deadlocks detected in this database" + - blk_read_time: + usage: "COUNTER" + description: "Time spent reading data file blocks by backends in this database, in milliseconds" + - blk_write_time: + usage: "COUNTER" + description: "Time spent writing data file blocks by backends in this database, in milliseconds" + + pg_stat_replication: + primary: true + query: | + SELECT usename + , COALESCE(application_name, '') AS application_name + , COALESCE(client_addr::text, '') AS client_addr + , COALESCE(client_port::text, '') AS client_port + , EXTRACT(EPOCH FROM backend_start) AS backend_start + , COALESCE(pg_catalog.age(backend_xmin), 0) AS backend_xmin_age + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), sent_lsn) AS sent_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), write_lsn) AS write_diff_bytes + , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), flush_lsn) AS flush_diff_bytes + , COALESCE(pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), replay_lsn),0) AS replay_diff_bytes + , COALESCE((EXTRACT(EPOCH FROM write_lag)),0)::float AS write_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM flush_lag)),0)::float AS flush_lag_seconds + , COALESCE((EXTRACT(EPOCH FROM replay_lag)),0)::float AS replay_lag_seconds + FROM pg_catalog.pg_stat_replication + metrics: + - usename: + usage: "LABEL" + description: "Name of the replication user" + - application_name: + usage: "LABEL" + description: "Name of the application" + - client_addr: + usage: "LABEL" + description: "Client IP address" + - client_port: + usage: "LABEL" + description: "Client TCP port" + - backend_start: + usage: "COUNTER" + description: "Time when this process was started" + - backend_xmin_age: + usage: "COUNTER" + description: "The age of this standby's xmin horizon" + - sent_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location sent on this connection" + - write_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location written to disk by this standby server" + - flush_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location flushed to disk by this standby server" + - replay_diff_bytes: + usage: "GAUGE" + description: "Difference in bytes from the last write-ahead log location replayed into the database on this standby server" + - write_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it" + - flush_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it" + - replay_lag_seconds: + usage: "GAUGE" + description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it" + + pg_settings: + query: | + SELECT name, + CASE setting WHEN 'on' THEN '1' WHEN 'off' THEN '0' ELSE setting END AS setting + FROM pg_catalog.pg_settings + WHERE vartype IN ('integer', 'real', 'bool') + ORDER BY 1 + metrics: + - name: + usage: "LABEL" + description: "Name of the setting" + - setting: + usage: "GAUGE" + description: "Setting value" + + pg_extensions: + query: | + SELECT + current_database() as datname, + name as extname, + default_version, + installed_version, + CASE + WHEN default_version = installed_version THEN 0 + ELSE 1 + END AS update_available + FROM pg_catalog.pg_available_extensions + WHERE installed_version IS NOT NULL + metrics: + - datname: + usage: "LABEL" + description: "Name of the database" + - extname: + usage: "LABEL" + description: "Extension name" + - default_version: + usage: "LABEL" + description: "Default version" + - installed_version: + usage: "LABEL" + description: "Installed version" + - update_available: + usage: "GAUGE" + description: "An update is available" + target_databases: + - '*' +kind: ConfigMap +metadata: + labels: + cnpg.io/reload: "" + name: cnpg-default-monitoring + namespace: cnpg-system +--- +apiVersion: v1 +kind: Service +metadata: + name: cnpg-webhook-service + namespace: cnpg-system +spec: + ports: + - port: 443 + targetPort: 9443 + selector: + app.kubernetes.io/name: cloudnative-pg +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + name: cnpg-controller-manager + namespace: cnpg-system +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: cloudnative-pg + template: + metadata: + labels: + app.kubernetes.io/name: cloudnative-pg + spec: + containers: + - args: + - controller + - --leader-elect + - --max-concurrent-reconciles=10 + - --config-map-name=cnpg-controller-manager-config + - --secret-name=cnpg-controller-manager-config + - --webhook-port=9443 + command: + - /manager + env: + - name: OPERATOR_IMAGE_NAME + value: ghcr.io/cloudnative-pg/cloudnative-pg:1.27.0 + - name: OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MONITORING_QUERIES_CONFIGMAP + value: cnpg-default-monitoring + image: ghcr.io/cloudnative-pg/cloudnative-pg:1.27.0 + imagePullPolicy: Always + livenessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + name: manager + ports: + - containerPort: 8080 + name: metrics + protocol: TCP + - containerPort: 9443 + name: webhook-server + protocol: TCP + readinessProbe: + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + resources: + limits: + cpu: 100m + memory: 200Mi + requests: + cpu: 100m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 10001 + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + startupProbe: + failureThreshold: 6 + httpGet: + path: /readyz + port: 9443 + scheme: HTTPS + periodSeconds: 5 + volumeMounts: + - mountPath: /controller + name: scratch-data + - mountPath: /run/secrets/cnpg.io/webhook + name: webhook-certificates + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: cnpg-manager + terminationGracePeriodSeconds: 10 + volumes: + - emptyDir: {} + name: scratch-data + - name: webhook-certificates + secret: + defaultMode: 420 + optional: true + secretName: cnpg-webhook-cert +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: cnpg-mutating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: mbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: mcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-database + failurePolicy: Fail + name: mdatabase.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - databases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /mutate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: mscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: cnpg-validating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-backup + failurePolicy: Fail + name: vbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - backups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-cluster + failurePolicy: Fail + name: vcluster.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clusters + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-database + failurePolicy: Fail + name: vdatabase.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - databases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-pooler + failurePolicy: Fail + name: vpooler.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - poolers + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: cnpg-webhook-service + namespace: cnpg-system + path: /validate-postgresql-cnpg-io-v1-scheduledbackup + failurePolicy: Fail + name: vscheduledbackup.cnpg.io + rules: + - apiGroups: + - postgresql.cnpg.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - scheduledbackups + sideEffects: None From 5013c26d7870ad416b1be5a9e69992f1cbf635a5 Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Wed, 13 Aug 2025 14:11:14 +0200 Subject: [PATCH 793/836] chore: add release-1.27 branch to templates and workflows (#8340) Signed-off-by: Jonathan Gonzalez V. --- .github/ISSUE_TEMPLATE/bug.yml | 2 ++ .github/renovate.json5 | 1 + .github/workflows/backport.yml | 6 ++++-- .github/workflows/continuous-delivery.yml | 2 +- .github/workflows/continuous-integration.yml | 2 +- 5 files changed, 9 insertions(+), 4 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml index cc3f5a3fc4..0abbc90ae1 100644 --- a/.github/ISSUE_TEMPLATE/bug.yml +++ b/.github/ISSUE_TEMPLATE/bug.yml @@ -48,9 +48,11 @@ body: label: Version description: What is the version of CloudNativePG you are running? options: + - "1.27 (latest patch)" - "1.26 (latest patch)" - "1.25 (latest patch)" - "trunk (main)" + - "older in 1.27.x" - "older in 1.26.x" - "older in 1.25.x" - "older minor (unsupported)" diff --git a/.github/renovate.json5 b/.github/renovate.json5 index 310171db0e..590c18e0b2 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -13,6 +13,7 @@ 'release-1.22', 'release-1.25', 'release-1.26', + 'release-1.27', ], ignorePaths: [ 'docs/**', diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 688ca4c3f4..4b217dccc6 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -19,7 +19,7 @@ jobs: # we backport everything, except those PR that are created or contain `do not backport` explicitly. label-source-pr: name: Add labels to PR - if: | + if: | github.event.pull_request.merged == false && !contains(github.event.pull_request.labels.*.name, 'backport-requested') && !contains(github.event.pull_request.labels.*.name, 'do not backport') @@ -39,6 +39,7 @@ jobs: release-1.22 release-1.25 release-1.26 + release-1.27 - name: Create comment uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4 @@ -63,6 +64,7 @@ jobs: release-1.22 release-1.25 release-1.26 + release-1.27 ## backport pull request in condition when pr contains 'backport-requested' label and contains target branches labels back-porting-pr: @@ -78,7 +80,7 @@ jobs: strategy: fail-fast: false matrix: - branch: [release-1.22, release-1.25, release-1.26] + branch: [release-1.22, release-1.25, release-1.26, release-1.27] env: PR: ${{ github.event.pull_request.number }} outputs: diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index b951103258..b7ab13dcb8 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -77,7 +77,7 @@ jobs: strategy: fail-fast: false matrix: - branch: [release-1.22, release-1.25, release-1.26] + branch: [release-1.22, release-1.25, release-1.26, release-1.27] steps: - name: Invoke workflow with inputs uses: benc-uk/workflow-dispatch@e2e5e9a103e331dad343f381a29e654aea3cf8fc # v1 diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 78ff2ccc11..739417e9f8 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -60,7 +60,7 @@ jobs: strategy: fail-fast: false matrix: - branch: [release-1.22, release-1.25, release-1.26] + branch: [release-1.22, release-1.25, release-1.26, release-1.27] steps: - name: Invoke workflow with inputs uses: benc-uk/workflow-dispatch@e2e5e9a103e331dad343f381a29e654aea3cf8fc # v1 From fdb2fad18946667121befee3c9595e5861072ec0 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 14 Aug 2025 10:57:41 +0200 Subject: [PATCH 794/836] chore(deps): update actions/checkout action to v5 (main) (#8343) --- .github/workflows/backport.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/continuous-delivery.yml | 16 ++++++------ .github/workflows/continuous-integration.yml | 26 +++++++++---------- .github/workflows/k8s-versions-check.yml | 2 +- .../latest-postgres-version-check.yml | 2 +- .github/workflows/ossf_scorecard.yml | 2 +- .github/workflows/refresh-licenses.yml | 2 +- .github/workflows/release-pr.yml | 2 +- .github/workflows/release-publish.yml | 12 ++++----- .github/workflows/release-tag.yml | 2 +- .github/workflows/snyk.yml | 2 +- .github/workflows/spellcheck.yml | 4 +-- 13 files changed, 38 insertions(+), 38 deletions(-) diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 4b217dccc6..3a8a751ff7 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -89,7 +89,7 @@ jobs: - name: Checkout code if: contains( github.event.pull_request.labels.*.name, matrix.branch ) - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 with: fetch-depth: 0 ref: ${{ matrix.branch }} diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index c4f62b560e..51f6bad642 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -66,7 +66,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 - name: Install Go uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index b7ab13dcb8..a1b94bc40e 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -279,7 +279,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 with: ref: ${{ needs.evaluate_options.outputs.git_ref }} # To identify the commit we need the history and all the tags. @@ -492,7 +492,7 @@ jobs: steps: - name: Checkout artifact - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 with: repository: cloudnative-pg/artifacts token: ${{ secrets.REPO_GHA_PAT }} @@ -575,7 +575,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 with: ref: ${{ needs.evaluate_options.outputs.git_ref }} - @@ -653,7 +653,7 @@ jobs: echo "-----------------------------------------------------" - name: Checkout code - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 with: ref: ${{ needs.evaluate_options.outputs.git_ref }} - @@ -880,7 +880,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 with: ref: ${{ needs.evaluate_options.outputs.git_ref }} - @@ -1227,7 +1227,7 @@ jobs: echo "CLUSTER_NAME=${{ env.E2E_SUFFIX }}-test-${{ github.run_number }}-$( echo ${{ matrix.id }} | tr -d '_.-' )" >> $GITHUB_ENV - name: Checkout code - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 with: ref: ${{ needs.evaluate_options.outputs.git_ref }} - @@ -1614,7 +1614,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 with: ref: ${{ needs.evaluate_options.outputs.git_ref }} - @@ -1927,7 +1927,7 @@ jobs: echo "CLUSTER_NAME=${{ env.E2E_SUFFIX }}-ocp-${{ github.run_number}}-$( echo ${{ matrix.k8s_version }} | tr -d '.' )" >> $GITHUB_ENV - name: Checkout code - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 with: ref: ${{ needs.evaluate_options.outputs.git_ref }} fetch-depth: 0 diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 739417e9f8..e1f4d7f12f 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -103,7 +103,7 @@ jobs: renovate-changed: ${{ steps.filter.outputs.renovate-changed }} steps: - name: Checkout - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 - name: Check for changes uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2 @@ -154,7 +154,7 @@ jobs: runs-on: ubuntu-24.04 steps: - name: Checkout code - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 - name: Install Go uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 @@ -184,7 +184,7 @@ jobs: runs-on: ubuntu-24.04 steps: - name: Checkout code - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 - name: Validate Renovate JSON run: npx --yes --package renovate@40.48.6 -- renovate-config-validator @@ -222,7 +222,7 @@ jobs: SHELLCHECK_OPTS: -a -S style steps: - name: Checkout code - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 - name: Run ShellCheck uses: ludeeus/action-shellcheck@00cae500b08a931fb5698e11e79bfbd38e612a38 # 2.0.0 @@ -245,7 +245,7 @@ jobs: latest_k8s_version: ${{ steps.get-k8s-versions.outputs.latest_k8s_version }} steps: - name: Checkout code - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 - name: Get k8s versions for unit test id: get-k8s-versions @@ -285,7 +285,7 @@ jobs: k8s-version: ${{ fromJSON(needs.generate-unit-tests-jobs.outputs.k8sMatrix) }} steps: - name: Checkout code - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 - name: Install Go uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 @@ -324,7 +324,7 @@ jobs: runs-on: ubuntu-24.04 steps: - name: Checkout code - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 - name: Install Go uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 @@ -359,7 +359,7 @@ jobs: runs-on: ubuntu-24.04 steps: - name: Checkout code - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 - name: Install Go uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 @@ -426,7 +426,7 @@ jobs: push: ${{ env.PUSH }} steps: - name: Checkout - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 with: # To identify the commit we need the history and all the tags. fetch-depth: 0 @@ -659,7 +659,7 @@ jobs: needs.buildx.outputs.push == 'true' steps: - name: Checkout code - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 with: fetch-depth: 0 ref: ${{ needs.buildx.outputs.commit }} @@ -715,7 +715,7 @@ jobs: needs.olm-bundle.result == 'success' steps: - name: Checkout code - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 - name: Install Go uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 @@ -772,7 +772,7 @@ jobs: github.repository_owner == 'cloudnative-pg' steps: - name: Checkout code - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 - name: Setting up KinD cluster uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0 @@ -830,7 +830,7 @@ jobs: OPP_RELEASE_INDEX_NAME: "catalog_tmp" steps: - name: Checkout community-operators - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 with: repository: k8s-operatorhub/community-operators persist-credentials: false diff --git a/.github/workflows/k8s-versions-check.yml b/.github/workflows/k8s-versions-check.yml index e8da20a92b..8546b7d261 100644 --- a/.github/workflows/k8s-versions-check.yml +++ b/.github/workflows/k8s-versions-check.yml @@ -34,7 +34,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 - # There is no command to get EKS k8s versions, we have to parse the documentation name: Get updated EKS versions diff --git a/.github/workflows/latest-postgres-version-check.yml b/.github/workflows/latest-postgres-version-check.yml index ae689309e3..29021732b8 100644 --- a/.github/workflows/latest-postgres-version-check.yml +++ b/.github/workflows/latest-postgres-version-check.yml @@ -19,7 +19,7 @@ jobs: runs-on: ubuntu-24.04 steps: - name: Checkout code - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 - name: Set up Python 3.9 uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5 diff --git a/.github/workflows/ossf_scorecard.yml b/.github/workflows/ossf_scorecard.yml index 5dadff00dd..0d7ae99a7d 100644 --- a/.github/workflows/ossf_scorecard.yml +++ b/.github/workflows/ossf_scorecard.yml @@ -35,7 +35,7 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: persist-credentials: false diff --git a/.github/workflows/refresh-licenses.yml b/.github/workflows/refresh-licenses.yml index f3c0e63a49..c666de0f96 100644 --- a/.github/workflows/refresh-licenses.yml +++ b/.github/workflows/refresh-licenses.yml @@ -18,7 +18,7 @@ jobs: runs-on: ubuntu-24.04 steps: - name: Checkout - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 - name: Install Go uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 diff --git a/.github/workflows/release-pr.yml b/.github/workflows/release-pr.yml index d886e85171..70644b5b61 100644 --- a/.github/workflows/release-pr.yml +++ b/.github/workflows/release-pr.yml @@ -18,7 +18,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 - name: Get tag run: | diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml index 66c66c0d1f..87eacac4e7 100644 --- a/.github/workflows/release-publish.yml +++ b/.github/workflows/release-publish.yml @@ -25,7 +25,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 with: # To identify the commit we need the history and all the tags. fetch-depth: 0 @@ -56,7 +56,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 - name: Get tag run: | @@ -104,7 +104,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 with: # To identify the commit we need the history and all the tags. fetch-depth: 0 @@ -252,7 +252,7 @@ jobs: needs.check-version.outputs.is_stable == 'true' steps: - name: Checkout code - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 with: fetch-depth: 0 @@ -315,7 +315,7 @@ jobs: VERSION: ${{ needs.release-binaries.outputs.version }} steps: - name: Checkout community-operators - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 with: repository: k8s-operatorhub/community-operators fetch-depth: 0 @@ -392,7 +392,7 @@ jobs: steps: - name: Checkout artifact - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 with: repository: cloudnative-pg/artifacts token: ${{ secrets.REPO_GHA_PAT }} diff --git a/.github/workflows/release-tag.yml b/.github/workflows/release-tag.yml index 7010621e90..b6f8fc32f8 100644 --- a/.github/workflows/release-tag.yml +++ b/.github/workflows/release-tag.yml @@ -19,7 +19,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 - name: Create tag if: github.event.pull_request.merged == true && startsWith(github.head_ref, 'release/v') diff --git a/.github/workflows/snyk.yml b/.github/workflows/snyk.yml index bcdd54f1e3..cc5a0fcbcd 100644 --- a/.github/workflows/snyk.yml +++ b/.github/workflows/snyk.yml @@ -18,7 +18,7 @@ jobs: security-events: write steps: - name: Checkout code - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 - name: Static Code Analysis uses: snyk/actions/golang@b98d498629f1c368650224d6d212bf7dfa89e4bf # 0.4.0 diff --git a/.github/workflows/spellcheck.yml b/.github/workflows/spellcheck.yml index d8b2c9bc18..c27e21723d 100644 --- a/.github/workflows/spellcheck.yml +++ b/.github/workflows/spellcheck.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-24.04 steps: - name: Checkout - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 - name: woke uses: get-woke/woke-action@b2ec032c4a2c912142b38a6a453ad62017813ed0 # v0 @@ -27,7 +27,7 @@ jobs: runs-on: ubuntu-24.04 steps: - name: Checkout - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 - name: Spellcheck uses: rojopolis/spellcheck-github-actions@35a02bae020e6999c5c37fabaf447f2eb8822ca7 # 0.51.0 From 8ac7273aaf0642b8920af2331488e94818b4d678 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 14 Aug 2025 13:42:38 +0200 Subject: [PATCH 795/836] chore(deps): update all non-major github action (main) (#8329) This PR contains the following updates: https://github.com/github/codeql-action `76621b6` -> `df55935` snyk/actions `77490d9` -> `7e76a00` --- .github/workflows/codeql-analysis.yml | 4 ++-- .github/workflows/continuous-integration.yml | 4 ++-- .github/workflows/ossf_scorecard.yml | 2 +- .github/workflows/snyk.yml | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 51f6bad642..1dca03dd20 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -76,7 +76,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3 + uses: github/codeql-action/init@df559355d593797519d70b90fc8edd5db049e7a2 # v3 with: languages: "go" build-mode: manual @@ -93,6 +93,6 @@ jobs: make - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3 + uses: github/codeql-action/analyze@df559355d593797519d70b90fc8edd5db049e7a2 # v3 with: category: "/language:go" diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index e1f4d7f12f..fb62a02966 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -605,7 +605,7 @@ jobs: accept-keywords: key - name: Run Snyk to check Docker image for vulnerabilities - uses: snyk/actions/docker@77490d94e966421e076e95ad8fa87aa55e5ca409 # master + uses: snyk/actions/docker@7e76a00337235faa7b795d0c300768e54c9b0923 # master if: | !github.event.repository.fork && !github.event.pull_request.head.repo.fork @@ -617,7 +617,7 @@ jobs: args: --severity-threshold=high --file=Dockerfile - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3 + uses: github/codeql-action/upload-sarif@df559355d593797519d70b90fc8edd5db049e7a2 # v3 if: | !github.event.repository.fork && !github.event.pull_request.head.repo.fork diff --git a/.github/workflows/ossf_scorecard.yml b/.github/workflows/ossf_scorecard.yml index 0d7ae99a7d..0cd5e7cfa9 100644 --- a/.github/workflows/ossf_scorecard.yml +++ b/.github/workflows/ossf_scorecard.yml @@ -74,6 +74,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard (optional). # Commenting out will disable upload of results to your repo's Code Scanning dashboard - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3 + uses: github/codeql-action/upload-sarif@df559355d593797519d70b90fc8edd5db049e7a2 # v3 with: sarif_file: results.sarif diff --git a/.github/workflows/snyk.yml b/.github/workflows/snyk.yml index cc5a0fcbcd..09fdf47cd5 100644 --- a/.github/workflows/snyk.yml +++ b/.github/workflows/snyk.yml @@ -30,7 +30,7 @@ jobs: args: --sarif-file-output=snyk-static.sarif - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3 + uses: github/codeql-action/upload-sarif@df559355d593797519d70b90fc8edd5db049e7a2 # v3 with: sarif_file: snyk-static.sarif @@ -43,6 +43,6 @@ jobs: args: --sarif-file-output=snyk-test.sarif - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3 + uses: github/codeql-action/upload-sarif@df559355d593797519d70b90fc8edd5db049e7a2 # v3 with: sarif_file: snyk-test.sarif From 92074ec070772fcbf6ed4bcbc1fb9b25c7788224 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 14 Aug 2025 14:14:36 +0200 Subject: [PATCH 796/836] chore(deps): update dependency redhat-openshift-ecosystem/openshift-preflight to v1.14.1 (main) (#8339) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index e4d9637e2e..ad7c822b8d 100644 --- a/Makefile +++ b/Makefile @@ -67,7 +67,7 @@ OPERATOR_SDK_VERSION ?= v1.41.1 # renovate: datasource=github-tags depName=operator-framework/operator-registry OPM_VERSION ?= v1.56.0 # renovate: datasource=github-tags depName=redhat-openshift-ecosystem/openshift-preflight -PREFLIGHT_VERSION ?= 1.14.0 +PREFLIGHT_VERSION ?= 1.14.1 OPENSHIFT_VERSIONS ?= v4.12-v4.19 ARCH ?= amd64 From 693178a50e1cd876a2588e680788ddd21b89558b Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Mon, 18 Aug 2025 14:31:48 +0200 Subject: [PATCH 797/836] chore: remove any reference to branch release-1.22 (#8367) Closes #8366 Signed-off-by: Jonathan Gonzalez V. Signed-off-by: Marco Nenciarini Co-authored-by: Marco Nenciarini --- .github/renovate.json5 | 1 - .github/workflows/backport.yml | 4 +--- .github/workflows/continuous-delivery.yml | 2 +- .github/workflows/continuous-integration.yml | 2 +- tests/e2e/openshift_upgrade_test.go | 2 +- 5 files changed, 4 insertions(+), 7 deletions(-) diff --git a/.github/renovate.json5 b/.github/renovate.json5 index 590c18e0b2..d15e933194 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -10,7 +10,6 @@ prConcurrentLimit: 5, baseBranches: [ 'main', - 'release-1.22', 'release-1.25', 'release-1.26', 'release-1.27', diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 3a8a751ff7..4ae2e1057b 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -36,7 +36,6 @@ jobs: number: ${{ github.event.pull_request.number }} labels: | backport-requested :arrow_backward: - release-1.22 release-1.25 release-1.26 release-1.27 @@ -61,7 +60,6 @@ jobs: github_token: ${{ secrets.REPO_GHA_PAT }} labels: | backport-requested :arrow_backward: - release-1.22 release-1.25 release-1.26 release-1.27 @@ -80,7 +78,7 @@ jobs: strategy: fail-fast: false matrix: - branch: [release-1.22, release-1.25, release-1.26, release-1.27] + branch: [release-1.25, release-1.26, release-1.27] env: PR: ${{ github.event.pull_request.number }} outputs: diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index a1b94bc40e..5ad3ee1747 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -77,7 +77,7 @@ jobs: strategy: fail-fast: false matrix: - branch: [release-1.22, release-1.25, release-1.26, release-1.27] + branch: [release-1.25, release-1.26, release-1.27] steps: - name: Invoke workflow with inputs uses: benc-uk/workflow-dispatch@e2e5e9a103e331dad343f381a29e654aea3cf8fc # v1 diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index fb62a02966..2757fe3390 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -60,7 +60,7 @@ jobs: strategy: fail-fast: false matrix: - branch: [release-1.22, release-1.25, release-1.26, release-1.27] + branch: [release-1.25, release-1.26, release-1.27] steps: - name: Invoke workflow with inputs uses: benc-uk/workflow-dispatch@e2e5e9a103e331dad343f381a29e654aea3cf8fc # v1 diff --git a/tests/e2e/openshift_upgrade_test.go b/tests/e2e/openshift_upgrade_test.go index 189c851904..0ff9997137 100644 --- a/tests/e2e/openshift_upgrade_test.go +++ b/tests/e2e/openshift_upgrade_test.go @@ -166,7 +166,7 @@ var _ = Describe("Upgrade Paths on OpenShift", Label(tests.LabelUpgrade), Ordere assertClusterIsAligned(namespace, clusterName) } - It("stable-v1 to alpha, currently version 1.22", func() { + It("stable-v1 to alpha", func() { if ocpVersion.GT(ocp412) { Skip("This test runs only on OCP 4.12 or lower") } From 8d76b44ab14274564d2495544f469609dd88a5a3 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 18 Aug 2025 15:08:36 +0200 Subject: [PATCH 798/836] fix(deps): update kubernetes patches to v0.33.4 (main) (#8352) This PR contains the following updates: https://github.com/kubernetes/api`v0.33.3` -> `v0.33.4` https://github.com/kubernetes/apiextensions-apiserver `v0.33.3` -> `v0.33.4` https://github.com/kubernetes/apimachinery `v0.33.3` -> `v0.33.4` https://github.com/kubernetes/cli-runtime `v0.33.3` -> `v0.33.4` https://github.com/kubernetes/client-go `v0.33.3` -> `v0.33.4` --- go.mod | 10 +++++----- go.sum | 20 ++++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index 97ac395bb4..2744e3e629 100644 --- a/go.mod +++ b/go.mod @@ -38,11 +38,11 @@ require ( golang.org/x/term v0.34.0 google.golang.org/grpc v1.74.2 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.33.3 - k8s.io/apiextensions-apiserver v0.33.3 - k8s.io/apimachinery v0.33.3 - k8s.io/cli-runtime v0.33.3 - k8s.io/client-go v0.33.3 + k8s.io/api v0.33.4 + k8s.io/apiextensions-apiserver v0.33.4 + k8s.io/apimachinery v0.33.4 + k8s.io/cli-runtime v0.33.4 + k8s.io/client-go v0.33.4 k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 sigs.k8s.io/controller-runtime v0.21.0 sigs.k8s.io/yaml v1.6.0 diff --git a/go.sum b/go.sum index 8d8b5c2f0b..ed168c61f0 100644 --- a/go.sum +++ b/go.sum @@ -287,16 +287,16 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.33.3 h1:SRd5t//hhkI1buzxb288fy2xvjubstenEKL9K51KBI8= -k8s.io/api v0.33.3/go.mod h1:01Y/iLUjNBM3TAvypct7DIj0M0NIZc+PzAHCIo0CYGE= -k8s.io/apiextensions-apiserver v0.33.3 h1:qmOcAHN6DjfD0v9kxL5udB27SRP6SG/MTopmge3MwEs= -k8s.io/apiextensions-apiserver v0.33.3/go.mod h1:oROuctgo27mUsyp9+Obahos6CWcMISSAPzQ77CAQGz8= -k8s.io/apimachinery v0.33.3 h1:4ZSrmNa0c/ZpZJhAgRdcsFcZOw1PQU1bALVQ0B3I5LA= -k8s.io/apimachinery v0.33.3/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= -k8s.io/cli-runtime v0.33.3 h1:Dgy4vPjNIu8LMJBSvs8W0LcdV0PX/8aGG1DA1W8lklA= -k8s.io/cli-runtime v0.33.3/go.mod h1:yklhLklD4vLS8HNGgC9wGiuHWze4g7x6XQZ+8edsKEo= -k8s.io/client-go v0.33.3 h1:M5AfDnKfYmVJif92ngN532gFqakcGi6RvaOF16efrpA= -k8s.io/client-go v0.33.3/go.mod h1:luqKBQggEf3shbxHY4uVENAxrDISLOarxpTKMiUuujg= +k8s.io/api v0.33.4 h1:oTzrFVNPXBjMu0IlpA2eDDIU49jsuEorGHB4cvKupkk= +k8s.io/api v0.33.4/go.mod h1:VHQZ4cuxQ9sCUMESJV5+Fe8bGnqAARZ08tSTdHWfeAc= +k8s.io/apiextensions-apiserver v0.33.4 h1:rtq5SeXiDbXmSwxsF0MLe2Mtv3SwprA6wp+5qh/CrOU= +k8s.io/apiextensions-apiserver v0.33.4/go.mod h1:mWXcZQkQV1GQyxeIjYApuqsn/081hhXPZwZ2URuJeSs= +k8s.io/apimachinery v0.33.4 h1:SOf/JW33TP0eppJMkIgQ+L6atlDiP/090oaX0y9pd9s= +k8s.io/apimachinery v0.33.4/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= +k8s.io/cli-runtime v0.33.4 h1:V8NSxGfh24XzZVhXmIGzsApdBpGq0RQS2u/Fz1GvJwk= +k8s.io/cli-runtime v0.33.4/go.mod h1:V+ilyokfqjT5OI+XE+O515K7jihtr0/uncwoyVqXaIU= +k8s.io/client-go v0.33.4 h1:TNH+CSu8EmXfitntjUPwaKVPN0AYMbc9F1bBS8/ABpw= +k8s.io/client-go v0.33.4/go.mod h1:LsA0+hBG2DPwovjd931L/AoaezMPX9CmBgyVyBZmbCY= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= From 8a45c278effe2db67d868547f2fff4ad78135de3 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 18 Aug 2025 15:37:55 +0200 Subject: [PATCH 799/836] chore(deps): update all non-major github action (main) (#8376) This PR contains the following updates: https://github.com/github/codeql-action `df55935` -> `96f518a` https://github.com/goreleaser/goreleaser-action `9c156ee` -> `e435ccd` snyk/actions `7e76a00` -> `86b1cee` --- .github/workflows/codeql-analysis.yml | 4 ++-- .github/workflows/continuous-delivery.yml | 4 ++-- .github/workflows/continuous-integration.yml | 8 ++++---- .github/workflows/ossf_scorecard.yml | 2 +- .github/workflows/release-publish.yml | 2 +- .github/workflows/snyk.yml | 4 ++-- 6 files changed, 12 insertions(+), 12 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 1dca03dd20..0a057f6e1f 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -76,7 +76,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@df559355d593797519d70b90fc8edd5db049e7a2 # v3 + uses: github/codeql-action/init@96f518a34f7a870018057716cc4d7a5c014bd61c # v3 with: languages: "go" build-mode: manual @@ -93,6 +93,6 @@ jobs: make - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@df559355d593797519d70b90fc8edd5db049e7a2 # v3 + uses: github/codeql-action/analyze@96f518a34f7a870018057716cc4d7a5c014bd61c # v3 with: category: "/language:go" diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index 5ad3ee1747..cb9ff9b7f5 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -350,7 +350,7 @@ jobs: echo PWD=$(pwd) >> $GITHUB_ENV - name: Run GoReleaser - uses: goreleaser/goreleaser-action@9c156ee8a17a598857849441385a2041ef570552 # v6 + uses: goreleaser/goreleaser-action@e435ccd777264be153ace6237001ef4d979d3a7a # v6 with: distribution: goreleaser version: v2 @@ -439,7 +439,7 @@ jobs: # NOTE: we only fire this in TEST DEPTH = 4, as that is the level of the # upgrade test name: Build binary for upgrade test - uses: goreleaser/goreleaser-action@9c156ee8a17a598857849441385a2041ef570552 # v6 + uses: goreleaser/goreleaser-action@e435ccd777264be153ace6237001ef4d979d3a7a # v6 if: | always() && !cancelled() && needs.evaluate_options.outputs.test_level == '4' diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 2757fe3390..0c0603deab 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -491,7 +491,7 @@ jobs: echo PWD=$(pwd) >> $GITHUB_ENV - name: Run GoReleaser to build kubectl plugin - uses: goreleaser/goreleaser-action@9c156ee8a17a598857849441385a2041ef570552 # v6 + uses: goreleaser/goreleaser-action@e435ccd777264be153ace6237001ef4d979d3a7a # v6 if: | github.event_name == 'schedule' || ( @@ -531,7 +531,7 @@ jobs: SLACK_MESSAGE: Building kubernetes plugin failed! - name: Run GoReleaser - uses: goreleaser/goreleaser-action@9c156ee8a17a598857849441385a2041ef570552 # v6 + uses: goreleaser/goreleaser-action@e435ccd777264be153ace6237001ef4d979d3a7a # v6 with: distribution: goreleaser version: v2 @@ -605,7 +605,7 @@ jobs: accept-keywords: key - name: Run Snyk to check Docker image for vulnerabilities - uses: snyk/actions/docker@7e76a00337235faa7b795d0c300768e54c9b0923 # master + uses: snyk/actions/docker@86b1cee1b8e110a78d528b3e1328a80e218111d2 # master if: | !github.event.repository.fork && !github.event.pull_request.head.repo.fork @@ -617,7 +617,7 @@ jobs: args: --severity-threshold=high --file=Dockerfile - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@df559355d593797519d70b90fc8edd5db049e7a2 # v3 + uses: github/codeql-action/upload-sarif@96f518a34f7a870018057716cc4d7a5c014bd61c # v3 if: | !github.event.repository.fork && !github.event.pull_request.head.repo.fork diff --git a/.github/workflows/ossf_scorecard.yml b/.github/workflows/ossf_scorecard.yml index 0cd5e7cfa9..897658aa75 100644 --- a/.github/workflows/ossf_scorecard.yml +++ b/.github/workflows/ossf_scorecard.yml @@ -74,6 +74,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard (optional). # Commenting out will disable upload of results to your repo's Code Scanning dashboard - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@df559355d593797519d70b90fc8edd5db049e7a2 # v3 + uses: github/codeql-action/upload-sarif@96f518a34f7a870018057716cc4d7a5c014bd61c # v3 with: sarif_file: results.sarif diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml index 87eacac4e7..b428ec1ced 100644 --- a/.github/workflows/release-publish.yml +++ b/.github/workflows/release-publish.yml @@ -154,7 +154,7 @@ jobs: echo "$GPG_PRIVATE_KEY" > gpg_signing_key.asc - name: Run GoReleaser - uses: goreleaser/goreleaser-action@9c156ee8a17a598857849441385a2041ef570552 # v6 + uses: goreleaser/goreleaser-action@e435ccd777264be153ace6237001ef4d979d3a7a # v6 with: distribution: goreleaser version: v2 diff --git a/.github/workflows/snyk.yml b/.github/workflows/snyk.yml index 09fdf47cd5..3aadd73c8e 100644 --- a/.github/workflows/snyk.yml +++ b/.github/workflows/snyk.yml @@ -30,7 +30,7 @@ jobs: args: --sarif-file-output=snyk-static.sarif - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@df559355d593797519d70b90fc8edd5db049e7a2 # v3 + uses: github/codeql-action/upload-sarif@96f518a34f7a870018057716cc4d7a5c014bd61c # v3 with: sarif_file: snyk-static.sarif @@ -43,6 +43,6 @@ jobs: args: --sarif-file-output=snyk-test.sarif - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@df559355d593797519d70b90fc8edd5db049e7a2 # v3 + uses: github/codeql-action/upload-sarif@96f518a34f7a870018057716cc4d7a5c014bd61c # v3 with: sarif_file: snyk-test.sarif From d8136479a2a8b4681ff657d9c9fc722b5dfa3975 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 18 Aug 2025 16:16:23 +0200 Subject: [PATCH 800/836] chore(deps): update dependency golangci/golangci-lint to v2.4.0 (main) (#8359) --- .github/workflows/continuous-integration.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 0c0603deab..9a4980d5d3 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -21,7 +21,7 @@ env: # renovate: datasource=golang-version depName=golang versioning=loose GOLANG_VERSION: "1.24.6" # renovate: datasource=github-releases depName=golangci/golangci-lint versioning=loose - GOLANGCI_LINT_VERSION: "v2.3.1" + GOLANGCI_LINT_VERSION: "v2.4.0" KUBEBUILDER_VERSION: "2.3.1" # renovate: datasource=github-tags depName=kubernetes-sigs/kind versioning=semver KIND_VERSION: "v0.29.0" From c66e25f8db6c60c58e380a35603d4d1ccae62498 Mon Sep 17 00:00:00 2001 From: Ying Zhu Date: Mon, 18 Aug 2025 14:17:47 -0700 Subject: [PATCH 801/836] feat(databases): add declarative FDW management to `Database` CRD (#7942) Add support for managing PostgreSQL Foreign Data Wrappers (FDWs) declaratively via the `spec.fdws` field in the `Database` custom resource. This feature enables Kubernetes-native, privilege-aware lifecycle management of FDWs without requiring direct SQL execution or superuser escalation by users. It supports configuring `name`, `ensure`, `handler`, `validator`, `owner`, `usage` grants/revokes, and `options`, including the PostgreSQL `"-"` convention for removing handlers or validators. Ownership changes are restricted to superusers, following PostgreSQL rules. Only FDWs explicitly listed in `spec.fdws` are reconciled, leaving all others untouched. The operator manages FDWs using PostgreSQL native `CREATE FOREIGN DATA WRAPPER`, `ALTER FOREIGN DATA WRAPPER`, and `DROP FOREIGN DATA WRAPPER` commands. Closes #4683 Implemented as part of the LFX Mentorship Program 2025 Term 2 under the guidance of Leonardo Cecchi, Armando Ruocco, Gabriele Bartolini and Marco Nenciarini. Signed-off-by: YingZhu --- .wordlist-en-custom.txt | 13 +- api/v1/database_types.go | 76 +++++ api/v1/zz_generated.deepcopy.go | 84 +++++ .../bases/postgresql.cnpg.io_databases.yaml | 114 +++++++ docs/src/cloudnative-pg.v1.md | 179 ++++++++++ docs/src/declarative_database_management.md | 70 ++++ docs/src/index.md | 2 +- docs/src/operator_capability_levels.md | 3 +- .../controller/database_controller.go | 22 +- .../controller/database_controller_sql.go | 298 +++++++++++++++++ .../database_controller_sql_test.go | 312 ++++++++++++++++++ internal/webhook/v1/database_webhook.go | 59 ++++ internal/webhook/v1/database_webhook_test.go | 114 +++++++ tests/e2e/asserts_test.go | 4 + .../declarative_database_management_test.go | 10 + ...e-with-delete-reclaim-policy.yaml.template | 14 + .../database.yaml.template | 14 + 17 files changed, 1383 insertions(+), 5 deletions(-) diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt index 71e973ad7e..c9b9647ac1 100644 --- a/.wordlist-en-custom.txt +++ b/.wordlist-en-custom.txt @@ -165,6 +165,9 @@ ExtensionConfiguration ExtensionSpec ExtensionStatus ExternalCluster +FDW +FDWSpec +FDWs FQDN FQDNs FailoverQuorum @@ -306,6 +309,8 @@ Openshift OperatorCapabilities OperatorGroup OperatorHub +OptionSpec +OptionSpecValue OwnNamespace PDB PDBs @@ -524,6 +529,7 @@ UTF Uncomment Unrealizable UpdateStrategy +UsageSpec VLDB VLDBs VM @@ -841,6 +847,8 @@ faq fastpath fb fd +fdw +fdws ffd fieldPath fieldref @@ -854,6 +862,7 @@ firstRecoverabilityPointByMethod fqdn freddie fuzzystrmatch +gRPC gapped gc gcc @@ -873,7 +882,6 @@ googleCredentials goroutines gosec govulncheck -gRPC grafana gzip hashicorp @@ -1000,6 +1008,7 @@ lookups lsn lt lz +mTLS macOS majorVersion majorVersionUpgradeFromImage @@ -1038,7 +1047,6 @@ monitoringconfiguration mountPath msg mspan -mTLS multinamespace mutatingwebhookconfigurations mutex @@ -1443,6 +1451,7 @@ usr utils validUntil validatingwebhookconfigurations +validator valueFrom viceversa virtualized diff --git a/api/v1/database_types.go b/api/v1/database_types.go index 0862d88dbc..9baee51cdd 100644 --- a/api/v1/database_types.go +++ b/api/v1/database_types.go @@ -173,6 +173,10 @@ type DatabaseSpec struct { // The list of extensions to be managed in the database // +optional Extensions []ExtensionSpec `json:"extensions,omitempty"` + + // The list of foreign data wrappers to be managed in the database + // +optional + FDWs []FDWSpec `json:"fdws,omitempty"` } // DatabaseObjectSpec contains the fields which are common to every @@ -220,6 +224,74 @@ type ExtensionSpec struct { Schema string `json:"schema,omitempty"` } +// FDWSpec configures an Foreign Data Wrapper in a database +type FDWSpec struct { + // Common fields + DatabaseObjectSpec `json:",inline"` + + // Name of the handler function (e.g., "postgres_fdw_handler"). + // This will be empty if no handler is specified. In that case, + // the default handler is registered when the FDW extension is created. + // +optional + Handler string `json:"handler,omitempty"` + + // Name of the validator function (e.g., "postgres_fdw_validator"). + // This will be empty if no validator is specified. In that case, + // the default validator is registered when the FDW extension is created. + // +optional + Validator string `json:"validator,omitempty"` + + // Owner specifies the database role that will own the Foreign Data Wrapper. + // The role must have superuser privileges in the target database. + // +optional + Owner string `json:"owner,omitempty"` + + // Options specifies the configuration options for the FDW + // (key is the option name, value is the option value). + // +optional + Options []OptionSpec `json:"options,omitempty"` + + // List of roles for which `USAGE` privileges on the FDW are granted or revoked. + // +optional + Usages []UsageSpec `json:"usage,omitempty"` +} + +// OptionSpec holds the name, value and the ensure field for an option +type OptionSpec struct { + // Name of the option + Name string `json:"name"` + + // Value and ensure field of the option + OptionSpecValue `json:",inline"` +} + +// OptionSpecValue holds the value and the ensure field for an option +type OptionSpecValue struct { + // Value of the option + Value string `json:"value"` + + // Specifies whether an option should be present or absent in + // the database. If set to `present`, the option will be + // created if it does not exist. If set to `absent`, the + // option will be removed if it exists. + // +kubebuilder:default:="present" + // +kubebuilder:validation:Enum=present;absent + // +optional + Ensure EnsureOption `json:"ensure,omitempty"` +} + +// UsageSpec configures a usage for a foreign data wrapper +type UsageSpec struct { + // Name of the usage + Name string `json:"name"` + + // The type of usage + // +kubebuilder:default:="grant" + // +kubebuilder:validation:Enum=grant;revoke + // +optional + Type string `json:"type,omitempty"` +} + // DatabaseStatus defines the observed state of Database type DatabaseStatus struct { // A sequence number representing the latest @@ -242,6 +314,10 @@ type DatabaseStatus struct { // Extensions is the status of the managed extensions // +optional Extensions []DatabaseObjectStatus `json:"extensions,omitempty"` + + // FDWs is the status of the managed FDWs + // +optional + FDWs []DatabaseObjectStatus `json:"fdws,omitempty"` } // DatabaseObjectStatus is the status of the managed database objects diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index 2d601693e1..65c47bb873 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -1166,6 +1166,13 @@ func (in *DatabaseSpec) DeepCopyInto(out *DatabaseSpec) { *out = make([]ExtensionSpec, len(*in)) copy(*out, *in) } + if in.FDWs != nil { + in, out := &in.FDWs, &out.FDWs + *out = make([]FDWSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseSpec. @@ -1196,6 +1203,11 @@ func (in *DatabaseStatus) DeepCopyInto(out *DatabaseStatus) { *out = make([]DatabaseObjectStatus, len(*in)) copy(*out, *in) } + if in.FDWs != nil { + in, out := &in.FDWs, &out.FDWs + *out = make([]DatabaseObjectStatus, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseStatus. @@ -1361,6 +1373,32 @@ func (in *ExternalCluster) DeepCopy() *ExternalCluster { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FDWSpec) DeepCopyInto(out *FDWSpec) { + *out = *in + out.DatabaseObjectSpec = in.DatabaseObjectSpec + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make([]OptionSpec, len(*in)) + copy(*out, *in) + } + if in.Usages != nil { + in, out := &in.Usages, &out.Usages + *out = make([]UsageSpec, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FDWSpec. +func (in *FDWSpec) DeepCopy() *FDWSpec { + if in == nil { + return nil + } + out := new(FDWSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FailoverQuorum) DeepCopyInto(out *FailoverQuorum) { *out = *in @@ -1982,6 +2020,37 @@ func (in *OnlineConfiguration) DeepCopy() *OnlineConfiguration { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OptionSpec) DeepCopyInto(out *OptionSpec) { + *out = *in + out.OptionSpecValue = in.OptionSpecValue +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionSpec. +func (in *OptionSpec) DeepCopy() *OptionSpec { + if in == nil { + return nil + } + out := new(OptionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OptionSpecValue) DeepCopyInto(out *OptionSpecValue) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionSpecValue. +func (in *OptionSpecValue) DeepCopy() *OptionSpecValue { + if in == nil { + return nil + } + out := new(OptionSpecValue) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PasswordState) DeepCopyInto(out *PasswordState) { *out = *in @@ -3332,6 +3401,21 @@ func (in *Topology) DeepCopy() *Topology { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UsageSpec) DeepCopyInto(out *UsageSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UsageSpec. +func (in *UsageSpec) DeepCopy() *UsageSpec { + if in == nil { + return nil + } + out := new(UsageSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeSnapshotConfiguration) DeepCopyInto(out *VolumeSnapshotConfiguration) { *out = *in diff --git a/config/crd/bases/postgresql.cnpg.io_databases.yaml b/config/crd/bases/postgresql.cnpg.io_databases.yaml index 50626148be..d417b9b890 100644 --- a/config/crd/bases/postgresql.cnpg.io_databases.yaml +++ b/config/crd/bases/postgresql.cnpg.io_databases.yaml @@ -161,6 +161,98 @@ spec: - name type: object type: array + fdws: + description: The list of foreign data wrappers to be managed in the + database + items: + description: FDWSpec configures an Foreign Data Wrapper in a database + properties: + ensure: + default: present + description: |- + Specifies whether an extension/schema should be present or absent in + the database. If set to `present`, the extension/schema will be + created if it does not exist. If set to `absent`, the + extension/schema will be removed if it exists. + enum: + - present + - absent + type: string + handler: + description: |- + Name of the handler function (e.g., "postgres_fdw_handler"). + This will be empty if no handler is specified. In that case, + the default handler is registered when the FDW extension is created. + type: string + name: + description: Name of the extension/schema + type: string + options: + description: |- + Options specifies the configuration options for the FDW + (key is the option name, value is the option value). + items: + description: OptionSpec holds the name, value and the ensure + field for an option + properties: + ensure: + default: present + description: |- + Specifies whether an option should be present or absent in + the database. If set to `present`, the option will be + created if it does not exist. If set to `absent`, the + option will be removed if it exists. + enum: + - present + - absent + type: string + name: + description: Name of the option + type: string + value: + description: Value of the option + type: string + required: + - name + - value + type: object + type: array + owner: + description: |- + Owner specifies the database role that will own the Foreign Data Wrapper. + The role must have superuser privileges in the target database. + type: string + usage: + description: List of roles for which `USAGE` privileges on the + FDW are granted or revoked. + items: + description: UsageSpec configures a usage for a foreign data + wrapper + properties: + name: + description: Name of the usage + type: string + type: + default: grant + description: The type of usage + enum: + - grant + - revoke + type: string + required: + - name + type: object + type: array + validator: + description: |- + Name of the validator function (e.g., "postgres_fdw_validator"). + This will be empty if no validator is specified. In that case, + the default validator is registered when the FDW extension is created. + type: string + required: + - name + type: object + type: array icuLocale: description: |- Maps to the `ICU_LOCALE` parameter of `CREATE DATABASE`. This @@ -330,6 +422,28 @@ spec: - name type: object type: array + fdws: + description: FDWs is the status of the managed FDWs + items: + description: DatabaseObjectStatus is the status of the managed database + objects + properties: + applied: + description: |- + True of the object has been installed successfully in + the database + type: boolean + message: + description: Message is the object reconciliation message + type: string + name: + description: The name of the object + type: string + required: + - applied + - name + type: object + type: array message: description: Message is the reconciliation output message type: string diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md index 6ceff12c66..ba0d3211ea 100644 --- a/docs/src/cloudnative-pg.v1.md +++ b/docs/src/cloudnative-pg.v1.md @@ -2426,6 +2426,8 @@ PostgreSQL cluster from an existing storage

- [ExtensionSpec](#postgresql-cnpg-io-v1-ExtensionSpec) +- [FDWSpec](#postgresql-cnpg-io-v1-FDWSpec) + - [SchemaSpec](#postgresql-cnpg-io-v1-SchemaSpec) @@ -2724,6 +2726,13 @@ tablespace used for objects created in this database.

The list of extensions to be managed in the database

+fdws
+[]FDWSpec + + +

The list of foreign data wrappers to be managed in the database

+ + @@ -2777,6 +2786,13 @@ desired state that was synchronized

Extensions is the status of the managed extensions

+fdws
+[]DatabaseObjectStatus + + +

FDWs is the status of the managed FDWs

+ + @@ -2819,6 +2835,8 @@ desired state that was synchronized

- [DatabaseSpec](#postgresql-cnpg-io-v1-DatabaseSpec) +- [OptionSpecValue](#postgresql-cnpg-io-v1-OptionSpecValue) + - [RoleConfiguration](#postgresql-cnpg-io-v1-RoleConfiguration) @@ -3043,6 +3061,71 @@ of WAL archiving and backups for this external cluster

+## FDWSpec {#postgresql-cnpg-io-v1-FDWSpec} + + +**Appears in:** + +- [DatabaseSpec](#postgresql-cnpg-io-v1-DatabaseSpec) + + +

FDWSpec configures an Foreign Data Wrapper in a database

+ + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
DatabaseObjectSpec
+DatabaseObjectSpec +
(Members of DatabaseObjectSpec are embedded into this type.) +

Common fields

+
handler
+string +
+

Name of the handler function (e.g., "postgres_fdw_handler"). +This will be empty if no handler is specified. In that case, +the default handler is registered when the FDW extension is created.

+
validator
+string +
+

Name of the validator function (e.g., "postgres_fdw_validator"). +This will be empty if no validator is specified. In that case, +the default validator is registered when the FDW extension is created.

+
owner
+string +
+

Owner specifies the database role that will own the Foreign Data Wrapper. +The role must have superuser privileges in the target database.

+
options
+[]OptionSpec +
+

Options specifies the configuration options for the FDW +(key is the option name, value is the option value).

+
usage
+[]UsageSpec +
+

List of roles for which USAGE privileges on the FDW are granted or revoked.

+
+ ## FailoverQuorumStatus {#postgresql-cnpg-io-v1-FailoverQuorumStatus} @@ -3932,6 +4015,71 @@ possible. false by default.

+## OptionSpec {#postgresql-cnpg-io-v1-OptionSpec} + + +**Appears in:** + +- [FDWSpec](#postgresql-cnpg-io-v1-FDWSpec) + + +

OptionSpec holds the name, value and the ensure field for an option

+ + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+

Name of the option

+
OptionSpecValue
+OptionSpecValue +
(Members of OptionSpecValue are embedded into this type.) +

Value and ensure field of the option

+
+ +## OptionSpecValue {#postgresql-cnpg-io-v1-OptionSpecValue} + + +**Appears in:** + +- [OptionSpec](#postgresql-cnpg-io-v1-OptionSpec) + + +

OptionSpecValue holds the value and the ensure field for an option

+ + + + + + + + + + + + +
FieldDescription
value [Required]
+string +
+

Value of the option

+
ensure
+EnsureOption +
+

Specifies whether an option should be present or absent in +the database. If set to present, the option will be +created if it does not exist. If set to absent, the +option will be removed if it exists.

+
+ ## PasswordState {#postgresql-cnpg-io-v1-PasswordState} @@ -6402,6 +6550,37 @@ in synchronous replica election in case of failures

+## UsageSpec {#postgresql-cnpg-io-v1-UsageSpec} + + +**Appears in:** + +- [FDWSpec](#postgresql-cnpg-io-v1-FDWSpec) + + +

UsageSpec configures a usage for a foreign data wrapper

+ + + + + + + + + + + + +
FieldDescription
name [Required]
+string +
+

Name of the usage

+
type
+string +
+

The type of usage

+
+ ## VolumeSnapshotConfiguration {#postgresql-cnpg-io-v1-VolumeSnapshotConfiguration} diff --git a/docs/src/declarative_database_management.md b/docs/src/declarative_database_management.md index 85bc01c66a..b938a65d32 100644 --- a/docs/src/declarative_database_management.md +++ b/docs/src/declarative_database_management.md @@ -256,6 +256,76 @@ Each schema entry supports the following properties: [`DROP SCHEMA`](https://www.postgresql.org/docs/current/sql-dropschema.html), [`ALTER SCHEMA`](https://www.postgresql.org/docs/current/sql-alterschema.html). +## Managing Foreign Data Wrappers (FDWs) In a Database + +!!! Info + Foreign Data Wrappers (FDWs) are database-scoped objects that typically + require superuser privileges to create or modify. CloudNativePG provides a + declarative API for managing FDWs, enabling users to define and maintain them + in a controlled, Kubernetes-native way without directly executing SQL commands + or escalating privileges. + +CloudNativePG enables seamless and automated management of PostgreSQL foreign +data wrappers in the target database using declarative configuration. + +To enable this feature, define the `spec.fdws` field with a list of FDW +specifications, as shown in the following example: + +```yaml +# ... +spec: + fdws: + - name: postgres_fdw + usage: + - name: app + - type: grant +# ... +``` + +Each FDW entry supports the following properties: + +- `name`: The name of the foreign data wrapper **(mandatory)**. +- `ensure`: Indicates whether the FDW should be `present` or `absent` in the + database (default is `present`). +- `handler`: The name of the handler function used by the FDW. If not + specified, the default handler defined by the FDW extension (if any) will be + used. +- `validator`: The name of the validator function used by the FDW. If not + specified, the default validator defined by the FDW extension (if any) will + be used. +- `owner`: The owner of the FDW **(must be a superuser)**. +- `usage`: The list of `USAGE` permissions of the FDW, with the following fields: + - `name` : The name of the role to which the usage permission should be + granted or from which it should be revoked. + - `type` : The type of the usage permission. Supports `grant` and `revoke`. +- `options`: A map of FDW-specific options to manage, where each key is the + name of an option. Each option supports the following fields: + - `value`: The string value of the option. + - `ensure`: Indicates whether the option should be `present` or `absent`. + +!!! Info + Both `handler` and `validator` are optional, and if not specified, the + default handler and validator defined by the FDW extension (if any) will be + used. Setting `handler` or `validator` to `"-"` will remove the handler or + validator from the FDW respectively. This follows the PostgreSQL convention, + where "-" denotes the absence of a handler or validator. + +!!! Warning + PostgreSQL restricts ownership of foreign data wrappers to **roles with + superuser privileges only**. Attempting to assign ownership to a non-superuser + (e.g., an app role) will be ignored or rejected, as PostgreSQL does not allow + non-superuser ownership of foreign data wrappers. + +The operator reconciles only the FDWs explicitly listed in `spec.fdws`. Any +existing FDWs not declared in this list are left untouched. + +!!! Info + CloudNativePG manages FDWs using PostgreSQL's native SQL commands: + [`CREATE FOREIGN DATA WRAPPER`](https://www.postgresql.org/docs/current/sql-createforeigndatawrapper.html), + [`ALTER FOREIGN DATA WRAPPER`](https://www.postgresql.org/docs/current/sql-alterforeigndatawrapper.html), + and [`DROP FOREIGN DATA WRAPPER`](https://www.postgresql.org/docs/current/sql-dropforeigndatawrapper.html). + The `ALTER` command supports option updates. + ## Limitations and Caveats ### Renaming a database diff --git a/docs/src/index.md b/docs/src/index.md index 7b8ebfac25..b2af79086a 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -95,7 +95,7 @@ Additionally, the community provides images for the [PostGIS extension](postgis. - Declarative management of key PostgreSQL configurations, including: - PostgreSQL settings. - Roles, users, and groups. - - Databases, extensions, and schemas. + - Databases, extensions, schemas, and foreign data wrappers (FDW). - Tablespaces (including temporary tablespaces). - Flexible instance definition, supporting any number of instances (minimum 1 primary server). diff --git a/docs/src/operator_capability_levels.md b/docs/src/operator_capability_levels.md index 4c4a4ad2e8..a73eba0dd9 100644 --- a/docs/src/operator_capability_levels.md +++ b/docs/src/operator_capability_levels.md @@ -148,7 +148,8 @@ required, as part of the bootstrap. Additional databases can be created or managed via [declarative database management](declarative_database_management.md) using -the `Database` CRD, also supporting extensions and schemas. +the `Database` CRD, also supporting extensions, schemas and foreign data +wrappers (FDW). Although no configuration is required to run the cluster, you can customize both PostgreSQL runtime configuration and PostgreSQL host-based diff --git a/internal/management/controller/database_controller.go b/internal/management/controller/database_controller.go index 30580b9013..7857506d4d 100644 --- a/internal/management/controller/database_controller.go +++ b/internal/management/controller/database_controller.go @@ -67,6 +67,14 @@ var extensionObjectManager = databaseObjectManager[apiv1.ExtensionSpec, extInfo] drop: dropDatabaseExtension, } +// fdwObjectManager is the manager of the fdw objects +var fdwObjectManager = databaseObjectManager[apiv1.FDWSpec, fdwInfo]{ + get: getDatabaseFDWInfo, + create: createDatabaseFDW, + update: updateDatabaseFDW, + drop: dropDatabaseFDW, +} + // databaseReconciliationInterval is the time between the // database reconciliation loop failures const databaseReconciliationInterval = 30 * time.Second @@ -243,6 +251,11 @@ func (r *DatabaseReconciler) reconcileDatabaseResource(ctx context.Context, obj return ErrFailedDatabaseObjectReconciliation } } + for _, status := range obj.Status.FDWs { + if !status.Applied { + return ErrFailedDatabaseObjectReconciliation + } + } return nil } @@ -251,7 +264,12 @@ func (r *DatabaseReconciler) reconcileDatabaseObjects( ctx context.Context, obj *apiv1.Database, ) error { - if len(obj.Spec.Schemas) == 0 && len(obj.Spec.Extensions) == 0 { + objectCount := 0 + objectCount += len(obj.Spec.Schemas) + objectCount += len(obj.Spec.Extensions) + objectCount += len(obj.Spec.FDWs) + + if objectCount == 0 { return nil } @@ -262,6 +280,8 @@ func (r *DatabaseReconciler) reconcileDatabaseObjects( obj.Status.Schemas = schemaObjectManager.reconcileList(ctx, db, obj.Spec.Schemas) obj.Status.Extensions = extensionObjectManager.reconcileList(ctx, db, obj.Spec.Extensions) + obj.Status.FDWs = fdwObjectManager.reconcileList(ctx, db, obj.Spec.FDWs) + return nil } diff --git a/internal/management/controller/database_controller_sql.go b/internal/management/controller/database_controller_sql.go index c2211ce852..83289800ea 100644 --- a/internal/management/controller/database_controller_sql.go +++ b/internal/management/controller/database_controller_sql.go @@ -28,6 +28,7 @@ import ( "github.com/cloudnative-pg/machinery/pkg/log" "github.com/jackc/pgx/v5" + "github.com/lib/pq" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" ) @@ -43,6 +44,14 @@ type schemaInfo struct { Owner string `json:"owner"` } +type fdwInfo struct { + Name string `json:"name"` + Handler string `json:"handler"` + Validator string `json:"validator"` + Owner string `json:"owner"` + Options map[string]apiv1.OptionSpecValue `json:"options"` +} + func detectDatabase( ctx context.Context, db *sql.DB, @@ -404,3 +413,292 @@ func dropDatabaseSchema(ctx context.Context, db *sql.DB, schema apiv1.SchemaSpec contextLogger.Info("dropped schema", "name", schema.Name) return nil } + +const detectDatabaseFDWSQL = ` +SELECT + fdwname, fdwhandler::regproc::text, fdwvalidator::regproc::text, fdwoptions, + a.rolname AS owner +FROM pg_foreign_data_wrapper f +JOIN pg_authid a ON f.fdwowner = a.oid +WHERE fdwname = $1 +` + +func getDatabaseFDWInfo(ctx context.Context, db *sql.DB, fdw apiv1.FDWSpec) (*fdwInfo, error) { + contextLogger := log.FromContext(ctx) + + row := db.QueryRowContext( + ctx, detectDatabaseFDWSQL, + fdw.Name) + if row.Err() != nil { + return nil, fmt.Errorf("while checking if FDW %q exists: %w", fdw.Name, row.Err()) + } + + var ( + result fdwInfo + optionsRaw pq.StringArray + ) + + if err := row.Scan(&result.Name, &result.Handler, &result.Validator, &optionsRaw, &result.Owner); err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + return nil, fmt.Errorf("while scanning if FDW %q exists: %w", fdw.Name, err) + } + + // Extract options from SQL raw format(e.g. -{host=localhost,port=5432}) to type OptSpec + opts := make(map[string]apiv1.OptionSpecValue, len(optionsRaw)) + for _, opt := range optionsRaw { + parts := strings.SplitN(opt, "=", 2) + if len(parts) == 2 { + opts[parts[0]] = apiv1.OptionSpecValue{ + Value: parts[1], + } + } else { + contextLogger.Info( + "skipping unparsable option, expected \"keyword=value\"", + "optionsRaw", optionsRaw, + "fdwName", fdw.Name) + } + } + result.Options = opts + + return &result, nil +} + +// updateDatabaseFDWUsage updates the usage permissions for a foreign data wrapper +// based on the provided FDW specification. +// It supports granting or revoking usage permissions for specified users. +func updateDatabaseFDWUsage(ctx context.Context, db *sql.DB, fdw *apiv1.FDWSpec) error { + contextLogger := log.FromContext(ctx) + + for _, usageSpec := range fdw.Usages { + switch usageSpec.Type { + case "grant": + changeUsageSQL := fmt.Sprintf( + "GRANT USAGE ON FOREIGN DATA WRAPPER %s TO %s", + pgx.Identifier{fdw.Name}.Sanitize(), + pgx.Identifier{usageSpec.Name}.Sanitize()) + if _, err := db.ExecContext(ctx, changeUsageSQL); err != nil { + return fmt.Errorf("granting usage of foreign data wrapper %w", err) + } + contextLogger.Info("granted usage of foreign data wrapper", "name", fdw.Name, "user", usageSpec.Name) + + case "revoke": + changeUsageSQL := fmt.Sprintf( + "REVOKE USAGE ON FOREIGN DATA WRAPPER %s FROM %s", // #nosec G201 + pgx.Identifier{fdw.Name}.Sanitize(), + pgx.Identifier{usageSpec.Name}.Sanitize()) + if _, err := db.ExecContext(ctx, changeUsageSQL); err != nil { + return fmt.Errorf("revoking usage of foreign data wrapper %w", err) + } + contextLogger.Info("revoked usage of foreign data wrapper", "name", fdw.Name, "user", usageSpec.Name) + + default: + contextLogger.Warning( + "unknown usage type", + "type", usageSpec.Type, "fdwName", fdw.Name) + } + } + + return nil +} + +func createDatabaseFDW(ctx context.Context, db *sql.DB, fdw apiv1.FDWSpec) error { + contextLogger := log.FromContext(ctx) + + var sqlCreateFDW strings.Builder + sqlCreateFDW.WriteString(fmt.Sprintf("CREATE FOREIGN DATA WRAPPER %s ", pgx.Identifier{fdw.Name}.Sanitize())) + + // Create Handler + if len(fdw.Handler) > 0 { + switch fdw.Handler { + case "-": + sqlCreateFDW.WriteString("NO HANDLER ") + default: + sqlCreateFDW.WriteString(fmt.Sprintf("HANDLER %s ", pgx.Identifier{fdw.Handler}.Sanitize())) + } + } + + // Create Validator + if len(fdw.Validator) > 0 { + switch fdw.Validator { + case "-": + sqlCreateFDW.WriteString("NO VALIDATOR ") + default: + sqlCreateFDW.WriteString(fmt.Sprintf("VALIDATOR %s ", pgx.Identifier{fdw.Validator}.Sanitize())) + } + } + + // Extract options + opts := make([]string, 0, len(fdw.Options)) + for _, optionSpec := range fdw.Options { + if optionSpec.Ensure == apiv1.EnsureAbsent { + continue + } + opts = append(opts, fmt.Sprintf("%s '%s'", pgx.Identifier{optionSpec.Name}.Sanitize(), + optionSpec.Value)) + } + if len(opts) > 0 { + sqlCreateFDW.WriteString("OPTIONS (" + strings.Join(opts, ", ") + ")") + } + + _, err := db.ExecContext(ctx, sqlCreateFDW.String()) + if err != nil { + contextLogger.Error(err, "while creating foreign data wrapper", "query", sqlCreateFDW.String()) + return err + } + contextLogger.Info("created foreign data wrapper", "name", fdw.Name) + + // Update usage permissions + if len(fdw.Usages) > 0 { + if err := updateDatabaseFDWUsage(ctx, db, &fdw); err != nil { + return err + } + } + + return nil +} + +func updateFDWOptions(ctx context.Context, db *sql.DB, fdw *apiv1.FDWSpec, info *fdwInfo) error { + contextLogger := log.FromContext(ctx) + + // Collect individual ALTER-clauses + var clauses []string + for _, desiredOptSpec := range fdw.Options { + curOptSpec, exists := info.Options[desiredOptSpec.Name] + + switch { + case desiredOptSpec.Ensure == apiv1.EnsurePresent && !exists: + clauses = append(clauses, fmt.Sprintf("ADD %s '%s'", + pgx.Identifier{desiredOptSpec.Name}.Sanitize(), desiredOptSpec.Value)) + + case desiredOptSpec.Ensure == apiv1.EnsurePresent && exists: + if desiredOptSpec.Value != curOptSpec.Value { + clauses = append(clauses, fmt.Sprintf("SET %s '%s'", + pgx.Identifier{desiredOptSpec.Name}.Sanitize(), desiredOptSpec.Value)) + } + + case desiredOptSpec.Ensure == apiv1.EnsureAbsent && exists: + clauses = append(clauses, fmt.Sprintf("DROP %s", pgx.Identifier{desiredOptSpec.Name}.Sanitize())) + } + } + + if len(clauses) == 0 { + return nil + } + + // Build SQL + changeOptionSQL := fmt.Sprintf( + "ALTER FOREIGN DATA WRAPPER %s OPTIONS (%s)", pgx.Identifier{fdw.Name}.Sanitize(), + strings.Join(clauses, ", "), + ) + + if _, err := db.ExecContext(ctx, changeOptionSQL); err != nil { + return fmt.Errorf("altering options of foreign data wrapper %w", err) + } + contextLogger.Info("altered foreign data wrapper options", "name", fdw.Name, "options", fdw.Options) + + return nil +} + +func updateDatabaseFDW(ctx context.Context, db *sql.DB, fdw apiv1.FDWSpec, info *fdwInfo) error { + contextLogger := log.FromContext(ctx) + + // Alter Handler + if len(fdw.Handler) > 0 && fdw.Handler != info.Handler { + switch fdw.Handler { + case "-": + changeHandlerSQL := fmt.Sprintf( + "ALTER FOREIGN DATA WRAPPER %s NO HANDLER", + pgx.Identifier{fdw.Name}.Sanitize(), + ) + if _, err := db.ExecContext(ctx, changeHandlerSQL); err != nil { + return fmt.Errorf("removing handler of foreign data wrapper %w", err) + } + contextLogger.Info("removed foreign data wrapper handler", "name", fdw.Name) + + default: + changeHandlerSQL := fmt.Sprintf( + "ALTER FOREIGN DATA WRAPPER %s HANDLER %s", + pgx.Identifier{fdw.Name}.Sanitize(), + pgx.Identifier{fdw.Handler}.Sanitize(), + ) + if _, err := db.ExecContext(ctx, changeHandlerSQL); err != nil { + return fmt.Errorf("altering handler of foreign data wrapper %w", err) + } + contextLogger.Info("altered foreign data wrapper handler", "name", fdw.Name, "handler", fdw.Handler) + } + } + + // Alter Validator + if len(fdw.Validator) > 0 && fdw.Validator != info.Validator { + switch fdw.Validator { + case "-": + changeValidatorSQL := fmt.Sprintf( + "ALTER FOREIGN DATA WRAPPER %s NO VALIDATOR", + pgx.Identifier{fdw.Name}.Sanitize(), + ) + + if _, err := db.ExecContext(ctx, changeValidatorSQL); err != nil { + return fmt.Errorf("removing validator of foreign data wrapper %w", err) + } + + contextLogger.Info("removed foreign data wrapper validator", "name", fdw.Name) + + default: + changeValidatorSQL := fmt.Sprintf( + "ALTER FOREIGN DATA WRAPPER %s VALIDATOR %s", + pgx.Identifier{fdw.Name}.Sanitize(), + pgx.Identifier{fdw.Validator}.Sanitize(), + ) + if _, err := db.ExecContext(ctx, changeValidatorSQL); err != nil { + return fmt.Errorf("altering validator of foreign data wrapper %w", err) + } + + contextLogger.Info("altered foreign data wrapper validator", "name", fdw.Name, "validator", fdw.Validator) + } + } + + // Alter the owner + if len(fdw.Owner) > 0 && fdw.Owner != info.Owner { + changeOwnerSQL := fmt.Sprintf( + "ALTER FOREIGN DATA WRAPPER %s OWNER TO %v", + pgx.Identifier{fdw.Name}.Sanitize(), + pgx.Identifier{fdw.Owner}.Sanitize(), + ) + + if _, err := db.ExecContext(ctx, changeOwnerSQL); err != nil { + return fmt.Errorf("altering owner of foreign data wrapper %w", err) + } + + contextLogger.Info("altered foreign data wrapper owner", "name", fdw.Name, "owner", fdw.Owner) + } + + // Alter Options + if err := updateFDWOptions(ctx, db, &fdw, info); err != nil { + return err + } + + // Update usage permissions + if len(fdw.Usages) > 0 { + if err := updateDatabaseFDWUsage(ctx, db, &fdw); err != nil { + return err + } + } + + return nil +} + +func dropDatabaseFDW(ctx context.Context, db *sql.DB, fdw apiv1.FDWSpec) error { + contextLogger := log.FromContext(ctx) + query := fmt.Sprintf("DROP FOREIGN DATA WRAPPER IF EXISTS %s", pgx.Identifier{fdw.Name}.Sanitize()) + _, err := db.ExecContext( + ctx, + query) + if err != nil { + contextLogger.Error(err, "while dropping foreign data wrapper", "query", query) + return err + } + contextLogger.Info("dropped foreign data wrapper", "name", fdw.Name) + return nil +} diff --git a/internal/management/controller/database_controller_sql_test.go b/internal/management/controller/database_controller_sql_test.go index 3949d76441..f520039f55 100644 --- a/internal/management/controller/database_controller_sql_test.go +++ b/internal/management/controller/database_controller_sql_test.go @@ -516,3 +516,315 @@ var _ = Describe("Managed schema SQL", func() { }) }) }) + +var _ = Describe("Managed Foreign Data Wrapper SQL", func() { + var ( + dbMock sqlmock.Sqlmock + db *sql.DB + fdw apiv1.FDWSpec + err error + + testError error + ) + + BeforeEach(func() { + db, dbMock, err = sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual)) + Expect(err).ToNot(HaveOccurred()) + + fdw = apiv1.FDWSpec{ + DatabaseObjectSpec: apiv1.DatabaseObjectSpec{ + Name: "testfdw", + Ensure: "present", + }, + Handler: "testhandler", + Validator: "testvalidator", + Options: []apiv1.OptionSpec{ + { + Name: "testoption", + OptionSpecValue: apiv1.OptionSpecValue{ + Value: "testvalue", + }, + }, + }, + Owner: "owner", + } + + testError = fmt.Errorf("test error") + }) + + AfterEach(func() { + Expect(dbMock.ExpectationsWereMet()).To(Succeed()) + }) + + Context("getDatabaseFDWInfo", func() { + It("returns info when the fdw exits", func(ctx SpecContext) { + dbMock. + ExpectQuery(detectDatabaseFDWSQL). + WithArgs(fdw.Name). + WillReturnRows( + sqlmock.NewRows([]string{"fdwname", "fdwhandler", "fdwvalidator", "options", "fdwowner"}). + AddRow("testfdw", "testhandler", "testvalidator", nil, "testowner"), + ) + fdwInfo, err := getDatabaseFDWInfo(ctx, db, fdw) + Expect(err).ToNot(HaveOccurred()) + Expect(fdwInfo).ToNot(BeNil()) + Expect(fdwInfo.Name).To(Equal("testfdw")) + Expect(fdwInfo.Handler).To(Equal("testhandler")) + Expect(fdwInfo.Validator).To(Equal("testvalidator")) + Expect(fdwInfo.Owner).To(Equal("testowner")) + }) + + It("returns nil info when the fdw does not exist", func(ctx SpecContext) { + dbMock. + ExpectQuery(detectDatabaseFDWSQL). + WithArgs(fdw.Name). + WillReturnRows( + sqlmock.NewRows([]string{"fdwname", "fdwhandler", "fdwvalidator", "options", "fdwowner"}), + ) + fdwInfo, err := getDatabaseFDWInfo(ctx, db, fdw) + Expect(err).ToNot(HaveOccurred()) + Expect(fdwInfo).To(BeNil()) + }) + }) + + Context("createDatabaseFDW", func() { + createFDWSQL := "CREATE FOREIGN DATA WRAPPER \"testfdw\" HANDLER \"testhandler\" " + + "VALIDATOR \"testvalidator\" OPTIONS (\"testoption\" 'testvalue')" + + It("returns success when the fdw has been created", func(ctx SpecContext) { + dbMock. + ExpectExec(createFDWSQL). + WillReturnResult(sqlmock.NewResult(0, 1)) + Expect(createDatabaseFDW(ctx, db, fdw)).Error().NotTo(HaveOccurred()) + }) + + It("fails when the fdw could not be created", func(ctx SpecContext) { + dbMock. + ExpectExec(createFDWSQL). + WillReturnError(testError) + Expect(createDatabaseFDW(ctx, db, fdw)).Error().To(Equal(testError)) + }) + + It("success with NO HANDLER and NO VALIDATOR", func(ctx SpecContext) { + dbMock. + ExpectExec("CREATE FOREIGN DATA WRAPPER \"testfdw\" NO HANDLER NO VALIDATOR"). + WillReturnResult(sqlmock.NewResult(0, 1)) + + Expect(createDatabaseFDW(ctx, db, apiv1.FDWSpec{ + DatabaseObjectSpec: apiv1.DatabaseObjectSpec{ + Name: "testfdw", + Ensure: "present", + }, + Handler: "-", + Validator: "-", + Owner: "owner", + })).Error().NotTo(HaveOccurred()) + }) + }) + + Context("updateDatabaseFDW", func() { + It("does nothing when the fdw has been correctly reconciled", func(ctx SpecContext) { + Expect(updateDatabaseFDW(ctx, db, fdw, &fdwInfo{ + Name: fdw.Name, + Handler: fdw.Handler, + Validator: fdw.Validator, + Owner: fdw.Owner, + })).Error().NotTo(HaveOccurred()) + }) + + It("updates the fdw handler", func(ctx SpecContext) { + dbMock. + ExpectExec("ALTER FOREIGN DATA WRAPPER \"testfdw\" HANDLER \"testhandler\""). + WillReturnResult(sqlmock.NewResult(0, 1)) + + Expect(updateDatabaseFDW(ctx, db, fdw, + &fdwInfo{Name: fdw.Name, Handler: "oldhandler", Validator: fdw.Validator, Owner: fdw.Owner})). + Error().NotTo(HaveOccurred()) + }) + + It("handles removal of handler when not specified", func(ctx SpecContext) { + dbMock. + ExpectExec("ALTER FOREIGN DATA WRAPPER \"testfdw\" NO HANDLER"). + WillReturnResult(sqlmock.NewResult(0, 1)) + + fdw.Handler = "-" + Expect(updateDatabaseFDW(ctx, db, fdw, + &fdwInfo{Name: fdw.Name, Handler: "oldhandler", Validator: fdw.Validator, Owner: fdw.Owner})). + Error().NotTo(HaveOccurred()) + }) + + It("fail when setting the handler failed", func(ctx SpecContext) { + dbMock. + ExpectExec("ALTER FOREIGN DATA WRAPPER \"testfdw\" HANDLER \"testhandler\""). + WillReturnError(testError) + + Expect(updateDatabaseFDW(ctx, db, fdw, + &fdwInfo{Name: fdw.Name, Handler: "oldhandler", Validator: fdw.Validator, Owner: fdw.Owner})). + Error().To(MatchError(testError)) + }) + + It("updates the fdw validator", func(ctx SpecContext) { + dbMock.ExpectExec( + "ALTER FOREIGN DATA WRAPPER \"testfdw\" VALIDATOR \"testvalidator\""). + WillReturnResult(sqlmock.NewResult(0, 1)) + + Expect(updateDatabaseFDW(ctx, db, fdw, + &fdwInfo{Name: fdw.Name, Handler: fdw.Handler, Validator: "oldvalidator", Owner: fdw.Owner})). + Error().NotTo(HaveOccurred()) + }) + + It("handles removal of validator when not specified", func(ctx SpecContext) { + dbMock. + ExpectExec("ALTER FOREIGN DATA WRAPPER \"testfdw\" NO VALIDATOR"). + WillReturnResult(sqlmock.NewResult(0, 1)) + + fdw.Validator = "-" + Expect(updateDatabaseFDW(ctx, db, fdw, + &fdwInfo{Name: fdw.Name, Handler: fdw.Handler, Validator: "oldvalidator", Owner: fdw.Owner})). + Error().NotTo(HaveOccurred()) + }) + + It("fail when setting the validator failed", func(ctx SpecContext) { + dbMock. + ExpectExec("ALTER FOREIGN DATA WRAPPER \"testfdw\" VALIDATOR \"testvalidator\""). + WillReturnError(testError) + + Expect(updateDatabaseFDW(ctx, db, fdw, + &fdwInfo{Name: fdw.Name, Handler: fdw.Handler, Validator: "oldvalidator", Owner: fdw.Owner})). + Error().To(MatchError(testError)) + }) + + It("add new fdw options", func(ctx SpecContext) { + fdw.Options = []apiv1.OptionSpec{ + { + Name: "add_option", + OptionSpecValue: apiv1.OptionSpecValue{ + Value: "value", + Ensure: apiv1.EnsurePresent, + }, + }, + } + info := &fdwInfo{ + Name: fdw.Name, + Handler: fdw.Handler, + Validator: fdw.Validator, + Options: map[string]apiv1.OptionSpecValue{ + "modify_option": {Value: "old_value"}, + "remove_option": {Value: "value"}, + }, + Owner: fdw.Owner, + } + + expectedSQL := "ALTER FOREIGN DATA WRAPPER \"testfdw\" OPTIONS (ADD \"add_option\" 'value')" + dbMock.ExpectExec(expectedSQL).WillReturnResult(sqlmock.NewResult(0, 1)) + + Expect(updateDatabaseFDW(ctx, db, fdw, info)).Error().NotTo(HaveOccurred()) + }) + + It("modify the fdw options", func(ctx SpecContext) { + fdw.Options = []apiv1.OptionSpec{ + { + Name: "modify_option", + OptionSpecValue: apiv1.OptionSpecValue{ + Value: "new_value", + Ensure: apiv1.EnsurePresent, + }, + }, + } + info := &fdwInfo{ + Name: fdw.Name, + Handler: fdw.Handler, + Validator: fdw.Validator, + Options: map[string]apiv1.OptionSpecValue{ + "modify_option": {Value: "old_value"}, + "remove_option": {Value: "value"}, + }, + Owner: fdw.Owner, + } + + expectedSQL := "ALTER FOREIGN DATA WRAPPER \"testfdw\" OPTIONS (SET \"modify_option\" 'new_value')" + dbMock.ExpectExec(expectedSQL).WillReturnResult(sqlmock.NewResult(0, 1)) + + Expect(updateDatabaseFDW(ctx, db, fdw, info)).Error().NotTo(HaveOccurred()) + }) + + It("remove new fdw options", func(ctx SpecContext) { + fdw.Options = []apiv1.OptionSpec{ + { + Name: "remove_option", + OptionSpecValue: apiv1.OptionSpecValue{ + Value: "value", + Ensure: apiv1.EnsureAbsent, + }, + }, + } + info := &fdwInfo{ + Name: fdw.Name, + Handler: fdw.Handler, + Validator: fdw.Validator, + Options: map[string]apiv1.OptionSpecValue{ + "modify_option": {Value: "old_value"}, + "remove_option": {Value: "value"}, + }, + Owner: fdw.Owner, + } + + expectedSQL := "ALTER FOREIGN DATA WRAPPER \"testfdw\" OPTIONS (DROP \"remove_option\")" + dbMock.ExpectExec(expectedSQL).WillReturnResult(sqlmock.NewResult(0, 1)) + + Expect(updateDatabaseFDW(ctx, db, fdw, info)).Error().NotTo(HaveOccurred()) + }) + + It("updates the fdw owner", func(ctx SpecContext) { + dbMock.ExpectExec( + "ALTER FOREIGN DATA WRAPPER \"testfdw\" OWNER TO \"owner\""). + WillReturnResult(sqlmock.NewResult(0, 1)) + + Expect(updateDatabaseFDW(ctx, db, fdw, + &fdwInfo{Name: fdw.Name, Handler: fdw.Handler, Validator: fdw.Validator, Owner: "oldowner"})). + Error().NotTo(HaveOccurred()) + }) + + It("fail when setting the owner failed", func(ctx SpecContext) { + dbMock. + ExpectExec("ALTER FOREIGN DATA WRAPPER \"testfdw\" OWNER TO \"owner\""). + WillReturnError(testError) + + Expect(updateDatabaseFDW(ctx, db, fdw, + &fdwInfo{Name: fdw.Name, Handler: fdw.Handler, Validator: fdw.Validator, Owner: "old"})). + Error().To(MatchError(testError)) + }) + + It("updates the usages permissions of the fdw", func(ctx SpecContext) { + dbMock.ExpectExec( + "GRANT USAGE ON FOREIGN DATA WRAPPER \"testfdw\" TO \"owner\""). + WillReturnResult(sqlmock.NewResult(0, 1)) + fdw.Usages = []apiv1.UsageSpec{ + { + Name: "owner", + Type: "grant", + }, + } + Expect(updateDatabaseFDWUsage(ctx, db, &fdw)).Error().NotTo(HaveOccurred()) + }) + }) + + Context("dropDatabaseFDW", func() { + dropFDWSQL := "DROP FOREIGN DATA WRAPPER IF EXISTS \"testfdw\"" + + It("returns success when the foreign data wrapper has been dropped", func(ctx SpecContext) { + dbMock. + ExpectExec(dropFDWSQL). + WillReturnResult(sqlmock.NewResult(0, 1)) + Expect(dropDatabaseFDW(ctx, db, fdw)).Error().NotTo(HaveOccurred()) + }) + + It("returns an error when the DROP statement failed", func(ctx SpecContext) { + dbMock. + ExpectExec(dropFDWSQL). + WillReturnError(testError) + + Expect(dropDatabaseFDW(ctx, db, fdw)).Error().To(Equal(testError)) + }) + }) +}) diff --git a/internal/webhook/v1/database_webhook.go b/internal/webhook/v1/database_webhook.go index 34249556b7..f4250dc61f 100644 --- a/internal/webhook/v1/database_webhook.go +++ b/internal/webhook/v1/database_webhook.go @@ -154,6 +154,7 @@ func (v *DatabaseCustomValidator) validate(d *apiv1.Database) (allErrs field.Err validations := []validationFunc{ v.validateExtensions, v.validateSchemas, + v.validateFDWs, } for _, validate := range validations { @@ -212,3 +213,61 @@ func (v *DatabaseCustomValidator) validateSchemas(d *apiv1.Database) field.Error return result } + +// validateFDWs validates the database Foreign Data Wrappers +// FDWs must be unique in .spec.fdws +func (v *DatabaseCustomValidator) validateFDWs(d *apiv1.Database) field.ErrorList { + var result field.ErrorList + + fdwNames := stringset.New() + for i, fdw := range d.Spec.FDWs { + name := fdw.Name + if fdwNames.Has(name) { + result = append( + result, + field.Duplicate( + field.NewPath("spec", "fdws").Index(i).Child("name"), + name, + ), + ) + } + + // Validate the options of the FDW + optionNames := stringset.New() + for k, option := range fdw.Options { + optionName := option.Name + if optionNames.Has(optionName) { + result = append( + result, + field.Duplicate( + field.NewPath("spec", "fdws").Index(i).Child("options").Index(k).Child("name"), + optionName, + ), + ) + } + + optionNames.Put(optionName) + } + + // Validate the usage of the FDW + usageNames := stringset.New() + for j, usage := range fdw.Usages { + usageName := usage.Name + if usageNames.Has(usageName) { + result = append( + result, + field.Duplicate( + field.NewPath("spec", "fdws").Index(i).Child("usages").Index(j).Child("name"), + usageName, + ), + ) + } + + usageNames.Put(usageName) + } + + fdwNames.Put(name) + } + + return result +} diff --git a/internal/webhook/v1/database_webhook_test.go b/internal/webhook/v1/database_webhook_test.go index d0259ab149..cebb1d24e2 100644 --- a/internal/webhook/v1/database_webhook_test.go +++ b/internal/webhook/v1/database_webhook_test.go @@ -46,6 +46,14 @@ var _ = Describe("Database validation", func() { } } + createFDWSpec := func(name string) apiv1.FDWSpec { + return apiv1.FDWSpec{ + DatabaseObjectSpec: apiv1.DatabaseObjectSpec{ + Name: name, + Ensure: apiv1.EnsurePresent, + }, + } + } BeforeEach(func() { v = &DatabaseCustomValidator{} }) @@ -104,5 +112,111 @@ var _ = Describe("Database validation", func() { }, 1, ), + + Entry( + "doesn't complain with distinct FDWs and usage names", + &apiv1.Database{ + Spec: apiv1.DatabaseSpec{ + FDWs: []apiv1.FDWSpec{ + { + DatabaseObjectSpec: apiv1.DatabaseObjectSpec{ + Name: "fdw1", + Ensure: apiv1.EnsurePresent, + }, + Usages: []apiv1.UsageSpec{ + {Name: "usage1"}, + {Name: "usage2"}, + }, + Options: []apiv1.OptionSpec{ + {Name: "option1"}, + {Name: "option2"}, + }, + }, + { + DatabaseObjectSpec: apiv1.DatabaseObjectSpec{ + Name: "fdw2", + Ensure: apiv1.EnsurePresent, + }, + Usages: []apiv1.UsageSpec{ + {Name: "usage3"}, + {Name: "usage4"}, + }, + Options: []apiv1.OptionSpec{ + {Name: "option3"}, + {Name: "option4"}, + }, + }, + }, + }, + }, + 0, + ), + + Entry( + "complain if there are duplicate FDWs", + &apiv1.Database{ + Spec: apiv1.DatabaseSpec{ + FDWs: []apiv1.FDWSpec{ + createFDWSpec("postgres_fdw"), + createFDWSpec("mysql_fdw"), + createFDWSpec("postgres_fdw"), + }, + }, + }, + 1, + ), + + Entry( + "complain if there are duplicate usage names within an FDW", + &apiv1.Database{ + Spec: apiv1.DatabaseSpec{ + FDWs: []apiv1.FDWSpec{ + { + DatabaseObjectSpec: apiv1.DatabaseObjectSpec{ + Name: "postgre_fdw", + Ensure: apiv1.EnsurePresent, + }, + Usages: []apiv1.UsageSpec{ + {Name: "usage1"}, + {Name: "usage2"}, + {Name: "usage1"}, + }, + }, + }, + }, + }, + 1, + ), + + Entry( + "complains for duplicate FDW and duplicate usage names", + &apiv1.Database{ + Spec: apiv1.DatabaseSpec{ + FDWs: []apiv1.FDWSpec{ + { + DatabaseObjectSpec: apiv1.DatabaseObjectSpec{ + Name: "duplicate_fdw", + Ensure: apiv1.EnsurePresent, + }, + Usages: []apiv1.UsageSpec{ + {Name: "dup_usage"}, + {Name: "dup_usage"}, + }, + Options: []apiv1.OptionSpec{ + {Name: "dup_option"}, + {Name: "dup_option"}, + }, + }, + { + DatabaseObjectSpec: apiv1.DatabaseObjectSpec{ + Name: "duplicate_fdw", + Ensure: apiv1.EnsurePresent, + }, + }, + }, + }, + }, + 3, + ), ) }) diff --git a/tests/e2e/asserts_test.go b/tests/e2e/asserts_test.go index 85bbe3d7d3..9ba006e12b 100644 --- a/tests/e2e/asserts_test.go +++ b/tests/e2e/asserts_test.go @@ -553,6 +553,10 @@ func schemaExistsQuery(namespaceName string) string { return fmt.Sprintf("SELECT EXISTS(SELECT FROM pg_catalog.pg_namespace WHERE nspname='%v')", namespaceName) } +func fdwExistsQuery(fdwName string) string { + return fmt.Sprintf("SELECT EXISTS(SELECT FROM pg_catalog.pg_foreign_data_wrapper WHERE fdwname='%v')", fdwName) +} + // AssertDataExpectedCount verifies that an expected amount of rows exists on the table func AssertDataExpectedCount( env *environment.TestingEnvironment, diff --git a/tests/e2e/declarative_database_management_test.go b/tests/e2e/declarative_database_management_test.go index d896b7986f..9ccebdf60d 100644 --- a/tests/e2e/declarative_database_management_test.go +++ b/tests/e2e/declarative_database_management_test.go @@ -154,6 +154,16 @@ var _ = Describe("Declarative database management", Label(tests.LabelSmoke, test } }) + By("verifying the fdw presence in the target database", func() { + primaryPodInfo, err := clusterutils.GetPrimary(env.Ctx, env.Client, namespace, clusterName) + Expect(err).ToNot(HaveOccurred()) + + for _, fdwSpec := range database.Spec.FDWs { + Eventually(QueryMatchExpectationPredicate(primaryPodInfo, exec.DatabaseName(database.Spec.Name), + fdwExistsQuery(fdwSpec.Name), boolPGOutput(true)), 30).Should(Succeed()) + } + }) + By("removing the Database object", func() { Expect(objects.Delete(env.Ctx, env.Client, &database)).To(Succeed()) }) diff --git a/tests/e2e/fixtures/declarative_databases/database-with-delete-reclaim-policy.yaml.template b/tests/e2e/fixtures/declarative_databases/database-with-delete-reclaim-policy.yaml.template index 40f7cfe7e9..ec59c21a42 100644 --- a/tests/e2e/fixtures/declarative_databases/database-with-delete-reclaim-policy.yaml.template +++ b/tests/e2e/fixtures/declarative_databases/database-with-delete-reclaim-policy.yaml.template @@ -14,6 +14,20 @@ spec: extensions: - name: bloom ensure: present + - name: postgres_fdw + ensure: present schemas: - name: test_schema ensure: present + fdws: + - name: postgres_fdw + ensure: present + usage: + - name: app + type: grant + - name: mywrapper + options: + - name: debug + value: 'true' + ensure: present + ensure: present diff --git a/tests/e2e/fixtures/declarative_databases/database.yaml.template b/tests/e2e/fixtures/declarative_databases/database.yaml.template index a9c43b0d1e..1d67109a2f 100644 --- a/tests/e2e/fixtures/declarative_databases/database.yaml.template +++ b/tests/e2e/fixtures/declarative_databases/database.yaml.template @@ -14,6 +14,20 @@ spec: extensions: - name: bloom ensure: present + - name: postgres_fdw + ensure: present schemas: - name: test_schema ensure: present + fdws: + - name: postgres_fdw + ensure: present + usage: + - name: app + type: grant + - name: mywrapper + options: + - name: debug + value: 'true' + ensure: present + ensure: present From a7c6ea9dcd738604a7b5c63357648d4b1742ffb2 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 19 Aug 2025 23:17:49 +0200 Subject: [PATCH 802/836] chore(deps): update dependency golang to v1.25.0 (main) (#8353) --- .github/workflows/backport.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/continuous-delivery.yml | 2 +- .github/workflows/continuous-integration.yml | 2 +- .github/workflows/refresh-licenses.yml | 2 +- .github/workflows/release-publish.yml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 4ae2e1057b..60679b5462 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -12,7 +12,7 @@ permissions: read-all env: # renovate: datasource=golang-version depName=golang versioning=loose - GOLANG_VERSION: "1.24.6" + GOLANG_VERSION: "1.25.0" jobs: # Label the source pull request with 'backport-requested' and all supported releases label, the goal is, by default diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 0a057f6e1f..178fcbe653 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -35,7 +35,7 @@ permissions: read-all # set up environment variables to be used across all the jobs env: # renovate: datasource=golang-version depName=golang versioning=loose - GOLANG_VERSION: "1.24.6" + GOLANG_VERSION: "1.25.0" jobs: duplicate_runs: diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index cb9ff9b7f5..6e21a8d63e 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -37,7 +37,7 @@ permissions: read-all # set up environment variables to be used across all the jobs env: # renovate: datasource=golang-version depName=golang versioning=loose - GOLANG_VERSION: "1.24.6" + GOLANG_VERSION: "1.25.0" KUBEBUILDER_VERSION: "2.3.1" # renovate: datasource=github-tags depName=kubernetes-sigs/kind versioning=semver KIND_VERSION: "v0.29.0" diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 9a4980d5d3..f9e7de92e7 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -19,7 +19,7 @@ permissions: read-all # set up environment variables to be used across all the jobs env: # renovate: datasource=golang-version depName=golang versioning=loose - GOLANG_VERSION: "1.24.6" + GOLANG_VERSION: "1.25.0" # renovate: datasource=github-releases depName=golangci/golangci-lint versioning=loose GOLANGCI_LINT_VERSION: "v2.4.0" KUBEBUILDER_VERSION: "2.3.1" diff --git a/.github/workflows/refresh-licenses.yml b/.github/workflows/refresh-licenses.yml index c666de0f96..202e176cb4 100644 --- a/.github/workflows/refresh-licenses.yml +++ b/.github/workflows/refresh-licenses.yml @@ -10,7 +10,7 @@ permissions: read-all env: # renovate: datasource=golang-version depName=golang versioning=loose - GOLANG_VERSION: "1.24.6" + GOLANG_VERSION: "1.25.0" jobs: licenses: diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml index b428ec1ced..a387dbbf27 100644 --- a/.github/workflows/release-publish.yml +++ b/.github/workflows/release-publish.yml @@ -11,7 +11,7 @@ permissions: read-all env: # renovate: datasource=golang-version depName=golang versioning=loose - GOLANG_VERSION: "1.24.6" + GOLANG_VERSION: "1.25.0" REGISTRY: "ghcr.io" jobs: From bccdc74f0ea31ed56957e898b33cb7357b6bfa1c Mon Sep 17 00:00:00 2001 From: Maciej Wawrzyniak <48443803+Maaciekk@users.noreply.github.com> Date: Wed, 20 Aug 2025 11:48:24 +0200 Subject: [PATCH 803/836] docs: add Linux Polska to `ADOPTERS.md` (#8321) Signed-off-by: Maciej Wawrzyniak --- ADOPTERS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/ADOPTERS.md b/ADOPTERS.md index a7b572bdaf..17e77c2276 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -67,3 +67,4 @@ This list is sorted in chronological order, based on the submission date. | [Xata](https://xata.io) | [@tsg](https://github.com/tsg) | 2025-05-29 | Xata is a PostgreSQL platform offering instant database branching, separation of storage/compute, and PII anonymization. It uses CloudNativePG for the compute part. | | [Vera Rubin Observatory](https://www.lsst.org) | [@cbarria](https://github.com/cbarria) | 2025-06-17 | At the heart of our operations, CloudNativePG supports the telescope's systems and plays a key role in making astronomical data openly accessible to the world. | | [Brella](https://www.brella.io) | [@vitobotta](https://github.com/vitobotta/) | 2025-08-11 | Brella is an event management platform that works in new and smart ways. Postgres is at the core of how our platform is built. With CloudNativePG, we moved from using a managed Postgres service - Cloud SQL on Google Cloud - to running Postgres clusters directly in Kubernetes. This change saves us money and gives us more control. At the same time, we didn't lose any functionality.| +| [Linux Polska](https://linuxpolska.com) | [@maaciekk](https://github.com/maaciekk) | 2025-08-11 | CloudNativePG is our gold standard for providing highly available databases in a Kubernetes environment, powering mission-critical applications across various industries like healthcare and finance. Independent rebuilds of CloudNativePG images are also part of our SourceMation stack. | From 9e0531293b30b112a94dd0315e86d63ff504b17d Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Thu, 21 Aug 2025 10:19:33 +0200 Subject: [PATCH 804/836] chore: fix azure tests for backup and PITR (#8333) The Azure tests was taking the backup from the wrong cluster when preparing to restore, causing a failure since our last patch to fix the backup names. Close #8332 Signed-off-by: Jonathan Gonzalez V. --- tests/e2e/backup_restore_azure_test.go | 4 ++-- .../recovery_external_clusters/backup-azure-blob-pitr.yaml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/e2e/backup_restore_azure_test.go b/tests/e2e/backup_restore_azure_test.go index 0e7ce0c196..e6eaea7e24 100644 --- a/tests/e2e/backup_restore_azure_test.go +++ b/tests/e2e/backup_restore_azure_test.go @@ -306,7 +306,7 @@ var _ = Describe("Azure - Clusters Recovery From Barman Object Store", Label(tes clusterName, sourceBackupFileAzurePITR, AzureConfiguration, - 1, + 2, currentTimestamp, ) @@ -413,7 +413,7 @@ var _ = Describe("Azure - Clusters Recovery From Barman Object Store", Label(tes clusterName, sourceBackupFileAzurePITRSAS, AzureConfiguration, - 1, + 2, currentTimestamp, ) diff --git a/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azure-blob-pitr.yaml b/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azure-blob-pitr.yaml index a8f134f745..16f042b3d5 100644 --- a/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azure-blob-pitr.yaml +++ b/tests/e2e/fixtures/backup/recovery_external_clusters/backup-azure-blob-pitr.yaml @@ -4,4 +4,4 @@ metadata: name: cluster-backup-pitr spec: cluster: - name: external-cluster-azure + name: source-cluster-azure From d029d2af6b799c4af6d0f046dfd855ecc0fbe398 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 21 Aug 2025 15:18:03 +0200 Subject: [PATCH 805/836] fix(deps): update all non-major go dependencies (main) (#8390) This PR contains the following updates: https://github.com/onsi/ginkgo `v2.23.4` -> `v2.25.0` https://github.com/grpc/grpc-go `v1.74.2` -> `v1.75.0` --- go.mod | 14 +++++++------- go.sum | 50 ++++++++++++++++++++++++++------------------------ 2 files changed, 33 insertions(+), 31 deletions(-) diff --git a/go.mod b/go.mod index 2744e3e629..bb8a740e29 100644 --- a/go.mod +++ b/go.mod @@ -23,7 +23,7 @@ require ( github.com/lib/pq v1.10.9 github.com/logrusorgru/aurora/v4 v4.0.0 github.com/mitchellh/go-ps v1.0.0 - github.com/onsi/ginkgo/v2 v2.23.4 + github.com/onsi/ginkgo/v2 v2.25.0 github.com/onsi/gomega v1.38.0 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.84.1 github.com/prometheus/client_golang v1.23.0 @@ -36,7 +36,7 @@ require ( go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 golang.org/x/term v0.34.0 - google.golang.org/grpc v1.74.2 + google.golang.org/grpc v1.75.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.33.4 k8s.io/apiextensions-apiserver v0.33.4 @@ -98,16 +98,16 @@ require ( go.uber.org/automaxprocs v1.6.0 // indirect go.yaml.in/yaml/v2 v2.4.2 // indirect go.yaml.in/yaml/v3 v3.0.3 // indirect - golang.org/x/crypto v0.39.0 // indirect - golang.org/x/net v0.41.0 // indirect + golang.org/x/crypto v0.41.0 // indirect + golang.org/x/net v0.43.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect golang.org/x/sync v0.16.0 // indirect golang.org/x/sys v0.35.0 // indirect - golang.org/x/text v0.27.0 // indirect + golang.org/x/text v0.28.0 // indirect golang.org/x/time v0.9.0 // indirect - golang.org/x/tools v0.34.0 // indirect + golang.org/x/tools v0.36.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 // indirect google.golang.org/protobuf v1.36.7 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index ed168c61f0..bb90d40c2b 100644 --- a/go.sum +++ b/go.sum @@ -143,8 +143,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus= -github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8= +github.com/onsi/ginkgo/v2 v2.25.0 h1:LJu94oDZUdgnw+GD0Sk/iwG9c5Fnr1vLgMb4FUUwWxE= +github.com/onsi/ginkgo/v2 v2.25.0/go.mod h1:ppTWQ1dh9KM/F1XgpeRqelR+zHVwV81DGRSDnFxK7Sk= github.com/onsi/gomega v1.38.0 h1:c/WX+w8SLAinvuKKQFh77WEucCnPk4j2OTUr7lt7BeY= github.com/onsi/gomega v1.38.0/go.mod h1:OcXcwId0b9QsE7Y49u+BTrL4IdKOBOKnD6VQNTJEB6o= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= @@ -199,16 +199,16 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= -go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= -go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= -go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= -go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= -go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= -go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= -go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= -go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= -go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= +go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= +go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= +go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= +go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= +go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= +go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= +go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= +go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= +go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= +go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= @@ -226,16 +226,16 @@ go.yaml.in/yaml/v3 v3.0.3/go.mod h1:tBHosrYAkRZjRAOREWbDnBXUf08JOwYq++0QNwQiWzI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= -golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= +golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= +golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= -golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -254,26 +254,28 @@ golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4= -golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo= -golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg= +golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= +golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a h1:v2PbRU4K3llS09c7zodFpNePeamkAwG3mPrAery9VeE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= -google.golang.org/grpc v1.74.2 h1:WoosgB65DlWVC9FqI82dGsZhWFNBSLjQ84bjROOpMu4= -google.golang.org/grpc v1.74.2/go.mod h1:CtQ+BGjaAIXHs/5YS3i473GqwBBa1zGQNevxdeBEXrM= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 h1:pFyd6EwwL2TqFf8emdthzeX+gZE1ElRq3iM8pui4KBY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= +google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A= google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From 1e6f2997e114a0c2d97d5b46968abc92ec3f5981 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 21 Aug 2025 19:32:46 +0200 Subject: [PATCH 806/836] chore(deps): update all non-major github action (main) (#8402) This PR contains the following updates: https://github.com/docker/bake-action `37816e7` -> `3acf805` https://github.com/github/codeql-action `96f518a` -> `3c3833e` snyk/actions `86b1cee` -> `ae57bdf` --- .github/workflows/codeql-analysis.yml | 4 ++-- .github/workflows/continuous-delivery.yml | 2 +- .github/workflows/continuous-integration.yml | 6 +++--- .github/workflows/ossf_scorecard.yml | 2 +- .github/workflows/release-publish.yml | 2 +- .github/workflows/snyk.yml | 4 ++-- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 178fcbe653..9f82a82894 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -76,7 +76,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@96f518a34f7a870018057716cc4d7a5c014bd61c # v3 + uses: github/codeql-action/init@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v3 with: languages: "go" build-mode: manual @@ -93,6 +93,6 @@ jobs: make - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@96f518a34f7a870018057716cc4d7a5c014bd61c # v3 + uses: github/codeql-action/analyze@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v3 with: category: "/language:go" diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index 6e21a8d63e..f42523e7a5 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -376,7 +376,7 @@ jobs: password: ${{ env.REGISTRY_PASSWORD }} - name: Build and push - uses: docker/bake-action@37816e747588cb137173af99ab33873600c46ea8 # v6 + uses: docker/bake-action@3acf805d94d93a86cce4ca44798a76464a75b88c # v6 id: bake-push env: environment: "testing" diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index f9e7de92e7..f6dfaa6daf 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -559,7 +559,7 @@ jobs: password: ${{ env.REGISTRY_PASSWORD }} - name: Build and push - uses: docker/bake-action@37816e747588cb137173af99ab33873600c46ea8 # v6 + uses: docker/bake-action@3acf805d94d93a86cce4ca44798a76464a75b88c # v6 id: bake-push env: environment: "testing" @@ -605,7 +605,7 @@ jobs: accept-keywords: key - name: Run Snyk to check Docker image for vulnerabilities - uses: snyk/actions/docker@86b1cee1b8e110a78d528b3e1328a80e218111d2 # master + uses: snyk/actions/docker@ae57bdf611f027808b07a5dc3baa99fb34e69854 # master if: | !github.event.repository.fork && !github.event.pull_request.head.repo.fork @@ -617,7 +617,7 @@ jobs: args: --severity-threshold=high --file=Dockerfile - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@96f518a34f7a870018057716cc4d7a5c014bd61c # v3 + uses: github/codeql-action/upload-sarif@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v3 if: | !github.event.repository.fork && !github.event.pull_request.head.repo.fork diff --git a/.github/workflows/ossf_scorecard.yml b/.github/workflows/ossf_scorecard.yml index 897658aa75..d48b0410d2 100644 --- a/.github/workflows/ossf_scorecard.yml +++ b/.github/workflows/ossf_scorecard.yml @@ -74,6 +74,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard (optional). # Commenting out will disable upload of results to your repo's Code Scanning dashboard - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@96f518a34f7a870018057716cc4d7a5c014bd61c # v3 + uses: github/codeql-action/upload-sarif@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v3 with: sarif_file: results.sarif diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml index a387dbbf27..b71b43bd0a 100644 --- a/.github/workflows/release-publish.yml +++ b/.github/workflows/release-publish.yml @@ -198,7 +198,7 @@ jobs: password: ${{ secrets.GITHUB_TOKEN }} - name: Build and push - uses: docker/bake-action@37816e747588cb137173af99ab33873600c46ea8 # v6 + uses: docker/bake-action@3acf805d94d93a86cce4ca44798a76464a75b88c # v6 id: bake-push env: environment: "production" diff --git a/.github/workflows/snyk.yml b/.github/workflows/snyk.yml index 3aadd73c8e..5122ce9a35 100644 --- a/.github/workflows/snyk.yml +++ b/.github/workflows/snyk.yml @@ -30,7 +30,7 @@ jobs: args: --sarif-file-output=snyk-static.sarif - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@96f518a34f7a870018057716cc4d7a5c014bd61c # v3 + uses: github/codeql-action/upload-sarif@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v3 with: sarif_file: snyk-static.sarif @@ -43,6 +43,6 @@ jobs: args: --sarif-file-output=snyk-test.sarif - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@96f518a34f7a870018057716cc4d7a5c014bd61c # v3 + uses: github/codeql-action/upload-sarif@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v3 with: sarif_file: snyk-test.sarif From 9c7099ed9f34dab84a312e647f35a2540efda2d8 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 25 Aug 2025 10:05:57 +0200 Subject: [PATCH 807/836] fix(deps): update k8s.io/utils digest to 0af2bda (main) (#8404) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index bb8a740e29..da120bc74c 100644 --- a/go.mod +++ b/go.mod @@ -43,7 +43,7 @@ require ( k8s.io/apimachinery v0.33.4 k8s.io/cli-runtime v0.33.4 k8s.io/client-go v0.33.4 - k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 + k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d sigs.k8s.io/controller-runtime v0.21.0 sigs.k8s.io/yaml v1.6.0 ) diff --git a/go.sum b/go.sum index bb90d40c2b..6cf8e0f15b 100644 --- a/go.sum +++ b/go.sum @@ -303,8 +303,8 @@ k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= -k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= -k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d h1:wAhiDyZ4Tdtt7e46e9M5ZSAJ/MnPGPs+Ki1gHw4w1R0= +k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8= sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= From f38f9f918d6b4a1d22f68f7b3b323081a9d0decb Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Wed, 27 Aug 2025 10:18:34 +0200 Subject: [PATCH 808/836] chore: set `STANDBY_TCP_USER_TIMEOUT` from e2e manifests (#8400) Closes #8327 Signed-off-by: Jonathan Gonzalez V. --- hack/e2e/env_override_customized.yaml.template | 3 +++ 1 file changed, 3 insertions(+) diff --git a/hack/e2e/env_override_customized.yaml.template b/hack/e2e/env_override_customized.yaml.template index 3cdae2a68e..dad8b70c1c 100644 --- a/hack/e2e/env_override_customized.yaml.template +++ b/hack/e2e/env_override_customized.yaml.template @@ -12,6 +12,9 @@ spec: envFrom: - configMapRef: name: controller-manager-env + env: + - name: STANDBY_TCP_USER_TIMEOUT + value: "5000" args: - controller - --leader-elect=${LEADER_ELECTION} From 376e2cead596a67ac1dd5ae1e448a4732144ccaa Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 27 Aug 2025 13:16:39 +0200 Subject: [PATCH 809/836] chore(deps): update container distroless digests (main) (#8430) This PR contains the following updates: gcr.io/distroless/static-debian12 `cdf4daa` -> `a9f88e0` gcr.io/distroless/static-debian12 `0895d6f` -> `a855ba8` --- Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 9f34614a46..2aa61ae76a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,8 +1,8 @@ -ARG BASE=gcr.io/distroless/static-debian12:nonroot@sha256:cdf4daaf154e3e27cfffc799c16f343a384228f38646928a1513d925f473cb46 +ARG BASE=gcr.io/distroless/static-debian12:nonroot@sha256:a9f88e0d99c1ceedbce565fad7d3f96744d15e6919c19c7dafe84a6dd9a80c61 # This builder stage it's only because we need a command # to create a symlink and we do not have it in a distroless image -FROM gcr.io/distroless/static-debian12:debug-nonroot@sha256:0895d6fc256a6938a60c87d92e1148eec0d36198bff9c5d3082e6a56db7756bd AS builder +FROM gcr.io/distroless/static-debian12:debug-nonroot@sha256:a855ba843839f3344272cb64183489d91c190af11bec454e5d17f341255944e1 AS builder ARG TARGETARCH SHELL ["/busybox/sh", "-c"] RUN ln -sf operator/manager_${TARGETARCH} manager From 908922deaf46ef0f95fd7197f8d5391ecde58647 Mon Sep 17 00:00:00 2001 From: Peggie Date: Wed, 27 Aug 2025 14:08:46 +0200 Subject: [PATCH 810/836] feat: Public Cloud K8S versions update (#8403) Update the versions used to test the operator on public cloud providers Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Signed-off-by: Jonathan Gonzalez V. Co-authored-by: public-cloud-k8s-versions-check --- .github/eks_versions.json | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/eks_versions.json b/.github/eks_versions.json index 1d12057c20..f452ade413 100644 --- a/.github/eks_versions.json +++ b/.github/eks_versions.json @@ -1,6 +1,5 @@ [ "1.33", "1.32", - "1.31", - "1.30" + "1.31" ] From bb0df10fa4a4634e163ecc39b47f830b751d9868 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 27 Aug 2025 19:35:08 +0200 Subject: [PATCH 811/836] fix(deps): update all non-major go dependencies (main) (#8420) This PR contains the following updates: https://github.com/onsi/ginkgo `v2.25.0` -> `v2.25.1` https://github.com/onsi/gomega `v1.38.0` -> `v1.38.2` --- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index da120bc74c..cc5d542f8a 100644 --- a/go.mod +++ b/go.mod @@ -23,8 +23,8 @@ require ( github.com/lib/pq v1.10.9 github.com/logrusorgru/aurora/v4 v4.0.0 github.com/mitchellh/go-ps v1.0.0 - github.com/onsi/ginkgo/v2 v2.25.0 - github.com/onsi/gomega v1.38.0 + github.com/onsi/ginkgo/v2 v2.25.1 + github.com/onsi/gomega v1.38.2 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.84.1 github.com/prometheus/client_golang v1.23.0 github.com/robfig/cron v1.2.0 @@ -97,7 +97,7 @@ require ( github.com/xlab/treeprint v1.2.0 // indirect go.uber.org/automaxprocs v1.6.0 // indirect go.yaml.in/yaml/v2 v2.4.2 // indirect - go.yaml.in/yaml/v3 v3.0.3 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/crypto v0.41.0 // indirect golang.org/x/net v0.43.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect diff --git a/go.sum b/go.sum index 6cf8e0f15b..c6927d421f 100644 --- a/go.sum +++ b/go.sum @@ -143,10 +143,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.25.0 h1:LJu94oDZUdgnw+GD0Sk/iwG9c5Fnr1vLgMb4FUUwWxE= -github.com/onsi/ginkgo/v2 v2.25.0/go.mod h1:ppTWQ1dh9KM/F1XgpeRqelR+zHVwV81DGRSDnFxK7Sk= -github.com/onsi/gomega v1.38.0 h1:c/WX+w8SLAinvuKKQFh77WEucCnPk4j2OTUr7lt7BeY= -github.com/onsi/gomega v1.38.0/go.mod h1:OcXcwId0b9QsE7Y49u+BTrL4IdKOBOKnD6VQNTJEB6o= +github.com/onsi/ginkgo/v2 v2.25.1 h1:Fwp6crTREKM+oA6Cz4MsO8RhKQzs2/gOIVOUscMAfZY= +github.com/onsi/ginkgo/v2 v2.25.1/go.mod h1:ppTWQ1dh9KM/F1XgpeRqelR+zHVwV81DGRSDnFxK7Sk= +github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= +github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -221,8 +221,8 @@ go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= -go.yaml.in/yaml/v3 v3.0.3 h1:bXOww4E/J3f66rav3pX3m8w6jDE4knZjGOw8b5Y6iNE= -go.yaml.in/yaml/v3 v3.0.3/go.mod h1:tBHosrYAkRZjRAOREWbDnBXUf08JOwYq++0QNwQiWzI= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= From 698374057e668bc22658da688fabe88bfcdbc3c5 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 27 Aug 2025 21:38:21 +0200 Subject: [PATCH 812/836] chore(deps): update all non-major github action (main) (#8419) This PR contains the following updates: https://github.com/google-github-actions/setup-gcloud `6189d56` -> `cb1e50a` https://github.com/kubernetes-sigs/kind `v0.29.0` -> `v0.30.0` snyk/actions `ae57bdf` -> `e222141` --- .github/workflows/continuous-delivery.yml | 4 ++-- .github/workflows/continuous-integration.yml | 4 ++-- .github/workflows/k8s-versions-check.yml | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index f42523e7a5..edc405100c 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -40,7 +40,7 @@ env: GOLANG_VERSION: "1.25.0" KUBEBUILDER_VERSION: "2.3.1" # renovate: datasource=github-tags depName=kubernetes-sigs/kind versioning=semver - KIND_VERSION: "v0.29.0" + KIND_VERSION: "v0.30.0" # renovate: datasource=github-releases depName=rook/rook versioning=loose ROOK_VERSION: "v1.17.7" EXTERNAL_SNAPSHOTTER_VERSION: "v8.3.0" @@ -1665,7 +1665,7 @@ jobs: credentials_json: '${{ secrets.GCP_SERVICE_ACCOUNT }}' - name: Set up Cloud SDK and kubectl - uses: google-github-actions/setup-gcloud@6189d56e4096ee891640bb02ac264be376592d6a # v2 + uses: google-github-actions/setup-gcloud@cb1e50a9932213ecece00a606661ae9ca44f3397 # v2 with: project_id: ${{ secrets.GCP_PROJECT_ID }} install_components: 'kubectl,gke-gcloud-auth-plugin' diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index f6dfaa6daf..8fe52c4d17 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -24,7 +24,7 @@ env: GOLANGCI_LINT_VERSION: "v2.4.0" KUBEBUILDER_VERSION: "2.3.1" # renovate: datasource=github-tags depName=kubernetes-sigs/kind versioning=semver - KIND_VERSION: "v0.29.0" + KIND_VERSION: "v0.30.0" OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing" API_DOC_NAME: "cloudnative-pg.v1.md" SLACK_USERNAME: "cnpg-bot" @@ -605,7 +605,7 @@ jobs: accept-keywords: key - name: Run Snyk to check Docker image for vulnerabilities - uses: snyk/actions/docker@ae57bdf611f027808b07a5dc3baa99fb34e69854 # master + uses: snyk/actions/docker@e2221410bff24446ba09102212d8bc75a567237d # master if: | !github.event.repository.fork && !github.event.pull_request.head.repo.fork diff --git a/.github/workflows/k8s-versions-check.yml b/.github/workflows/k8s-versions-check.yml index 8546b7d261..f2fccef324 100644 --- a/.github/workflows/k8s-versions-check.yml +++ b/.github/workflows/k8s-versions-check.yml @@ -67,7 +67,7 @@ jobs: if: github.event.inputs.limit == null || github.event.inputs.limit == 'gke' - name: Set up Cloud SDK for GKE - uses: google-github-actions/setup-gcloud@6189d56e4096ee891640bb02ac264be376592d6a # v2 + uses: google-github-actions/setup-gcloud@cb1e50a9932213ecece00a606661ae9ca44f3397 # v2 with: project_id: ${{ secrets.GCP_PROJECT_ID }} if: github.event.inputs.limit == null || github.event.inputs.limit == 'gke' From 7009f5bd5dd8a405b432d20e49ef2ce7db525b8a Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Thu, 28 Aug 2025 14:34:07 +0200 Subject: [PATCH 813/836] chore: ignore shellcheck warnings for SC2329 (#8456) With the latest version of shellcheck the test SC2329 was added looking for unused functions, or explicitly ignore that test, in our case, the functions are called indirectly and always used. Closes #8455 Signed-off-by: Jonathan Gonzalez V. --- hack/e2e/run-e2e-kind.sh | 1 + hack/install-cnpg-plugin.sh | 1 + 2 files changed, 2 insertions(+) diff --git a/hack/e2e/run-e2e-kind.sh b/hack/e2e/run-e2e-kind.sh index 767b1fb70b..6afbebe3ab 100755 --- a/hack/e2e/run-e2e-kind.sh +++ b/hack/e2e/run-e2e-kind.sh @@ -51,6 +51,7 @@ export CONTROLLER_IMG_PRIME_DIGEST=${CONTROLLER_IMG_PRIME_DIGEST:-""} export DOCKER_REGISTRY_MIRROR=${DOCKER_REGISTRY_MIRROR:-} export TEST_CLOUD_VENDOR="local" +# shellcheck disable=SC2329 cleanup() { if [ "${PRESERVE_CLUSTER}" = false ]; then "${HACK_DIR}/setup-cluster.sh" destroy || true diff --git a/hack/install-cnpg-plugin.sh b/hack/install-cnpg-plugin.sh index 8d7a987d22..58299a69c1 100644 --- a/hack/install-cnpg-plugin.sh +++ b/hack/install-cnpg-plugin.sh @@ -154,6 +154,7 @@ is_command() { echoerr() { echo "$@" 1>&2 } +# shellcheck disable=SC2329 log_prefix() { echo "$0" } From c593eb231a1e1bfaf17bff63835f25bff382a3f3 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 28 Aug 2025 15:18:59 +0200 Subject: [PATCH 814/836] chore(deps): update kindest/node docker tag to v1.34.0 (main) (#8447) --- hack/e2e/run-e2e-kind.sh | 2 +- hack/setup-cluster.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hack/e2e/run-e2e-kind.sh b/hack/e2e/run-e2e-kind.sh index 6afbebe3ab..b4601e8391 100755 --- a/hack/e2e/run-e2e-kind.sh +++ b/hack/e2e/run-e2e-kind.sh @@ -33,7 +33,7 @@ E2E_DIR="${HACK_DIR}/e2e" export PRESERVE_CLUSTER=${PRESERVE_CLUSTER:-false} export BUILD_IMAGE=${BUILD_IMAGE:-false} -KIND_NODE_DEFAULT_VERSION=v1.33.2 +KIND_NODE_DEFAULT_VERSION=v1.34.0 export K8S_VERSION=${K8S_VERSION:-$KIND_NODE_DEFAULT_VERSION} export CLUSTER_ENGINE=kind export CLUSTER_NAME=pg-operator-e2e-${K8S_VERSION//./-} diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh index fbec426386..7901d8b1da 100755 --- a/hack/setup-cluster.sh +++ b/hack/setup-cluster.sh @@ -27,7 +27,7 @@ if [ "${DEBUG-}" = true ]; then fi # Defaults -KIND_NODE_DEFAULT_VERSION=v1.33.2 +KIND_NODE_DEFAULT_VERSION=v1.34.0 CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=v1.17.0 EXTERNAL_SNAPSHOTTER_VERSION=v8.3.0 EXTERNAL_PROVISIONER_VERSION=v5.3.0 From 11e62c538b1f2284795fb4b77052143c3910bb5a Mon Sep 17 00:00:00 2001 From: Peggie Date: Thu, 28 Aug 2025 16:19:28 +0200 Subject: [PATCH 815/836] feat: Public Cloud K8S versions update (#8450) Update the versions used to test the operator on public cloud providers Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Signed-off-by: Jonathan Gonzalez V. Co-authored-by: public-cloud-k8s-versions-check --- .github/kind_versions.json | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/kind_versions.json b/.github/kind_versions.json index 2a05db31e3..9c0a6319a3 100644 --- a/.github/kind_versions.json +++ b/.github/kind_versions.json @@ -1,7 +1,8 @@ [ - "v1.33.2", - "v1.32.5", - "v1.31.9", + "v1.34.0", + "v1.33.4", + "v1.32.8", + "v1.31.12", "v1.30.13", "v1.29.14", "v1.28.15", From fe5eb9d6b7ddcee6165fd3c0fd99741a5aec055c Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 29 Aug 2025 09:51:15 +0200 Subject: [PATCH 816/836] fix(deps): update kubernetes packages to v0.34.0 (main) (#8449) This PR contains the following updates: https://github.com/kubernetes/api `v0.33.4` -> `v0.34.0` https://github.com/kubernetes/apiextensions-apiserver `v0.33.4` -> `v0.34.0` https://github.com/kubernetes/apimachinery `v0.33.4` -> `v0.34.0` https://github.com/kubernetes/cli-runtime `v0.33.4` -> `v0.34.0` https://github.com/kubernetes/client-go `v0.33.4` -> `v0.34.0` Signed-off-by: Jonathan Gonzalez V. Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: Jonathan Gonzalez V. --- .../bases/postgresql.cnpg.io_clusters.yaml | 189 ++++++- .../crd/bases/postgresql.cnpg.io_poolers.yaml | 473 ++++++++++++++++-- go.mod | 29 +- go.sum | 58 ++- 4 files changed, 634 insertions(+), 115 deletions(-) diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml index 57da62e77c..92725b23b0 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml @@ -431,8 +431,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -2127,7 +2127,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -2185,6 +2187,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -2264,8 +2303,9 @@ spec: type: object x-kubernetes-map-type: atomic prefix: - description: Optional text to prepend to the name of each environment - variable. Must be a C_IDENTIFIER. + description: |- + Optional text to prepend to the name of each environment variable. + May consist of any printable ASCII characters except '='. type: string secretRef: description: The Secret to select from @@ -2501,15 +2541,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -4743,6 +4781,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be addressed + to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -4953,7 +5096,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -5274,15 +5417,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -5532,15 +5673,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -5942,15 +6081,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- diff --git a/config/crd/bases/postgresql.cnpg.io_poolers.yaml b/config/crd/bases/postgresql.cnpg.io_poolers.yaml index 054cfa85d4..11f05cc775 100644 --- a/config/crd/bases/postgresql.cnpg.io_poolers.yaml +++ b/config/crd/bases/postgresql.cnpg.io_poolers.yaml @@ -1348,8 +1348,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -1747,8 +1747,9 @@ spec: present in a Container. properties: name: - description: Name of the environment variable. - Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -1807,6 +1808,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount + containing the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -1869,8 +1907,8 @@ spec: envFrom: description: |- List of sources to populate environment variables in the container. - The keys defined within a source must be a C_IDENTIFIER. All invalid keys - will be reported as an event when the container is starting. When a key exists in multiple + The keys defined within a source may consist of any printable ASCII characters except '='. + When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. @@ -1897,8 +1935,9 @@ spec: type: object x-kubernetes-map-type: atomic prefix: - description: Optional text to prepend to the name - of each environment variable. Must be a C_IDENTIFIER. + description: |- + Optional text to prepend to the name of each environment variable. + May consist of any printable ASCII characters except '='. type: string secretRef: description: The Secret to select from @@ -2577,7 +2616,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -2632,10 +2671,10 @@ spec: restartPolicy: description: |- RestartPolicy defines the restart behavior of individual containers in a pod. - This field may only be set for init containers, and the only allowed value is "Always". - For non-init containers or when this field is not specified, + This overrides the pod-level restart policy. When this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. - Setting the RestartPolicy as "Always" for the init container will have the following effect: + Additionally, setting the RestartPolicy as "Always" for the init container will + have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy "Always" @@ -2647,6 +2686,59 @@ spec: init container is started, or after any startupProbe has successfully completed. type: string + restartPolicyRules: + description: |- + Represents a list of rules to be checked to determine if the + container should be restarted on exit. The rules are evaluated in + order. Once a rule matches a container exit condition, the remaining + rules are ignored. If no rule matches the container exit condition, + the Container-level restart policy determines the whether the container + is restarted or not. Constraints on the rules: + - At most 20 rules are allowed. + - Rules can have the same action. + - Identical rules are not forbidden in validations. + When rules are specified, container MUST set RestartPolicy explicitly + even it if matches the Pod's RestartPolicy. + items: + description: ContainerRestartRule describes how a + container exit is handled. + properties: + action: + description: |- + Specifies the action taken on a container exit if the requirements + are satisfied. The only possible value is "Restart" to restart the + container. + type: string + exitCodes: + description: Represents the exit codes to check + on container exits. + properties: + operator: + description: |- + Represents the relationship between the container exit code(s) and the + specified values. Possible values are: + - In: the requirement is satisfied if the container exit code is in the + set of specified values. + - NotIn: the requirement is satisfied if the container exit code is + not in the set of specified values. + type: string + values: + description: |- + Specifies the set of values to check for container exit codes. + At most 255 elements are allowed. + items: + format: int32 + type: integer + type: array + x-kubernetes-list-type: set + required: + - operator + type: object + required: + - action + type: object + type: array + x-kubernetes-list-type: atomic securityContext: description: |- SecurityContext defines the security options the container should be run with. @@ -3267,8 +3359,9 @@ spec: present in a Container. properties: name: - description: Name of the environment variable. - Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -3327,6 +3420,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount + containing the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -3389,8 +3519,8 @@ spec: envFrom: description: |- List of sources to populate environment variables in the container. - The keys defined within a source must be a C_IDENTIFIER. All invalid keys - will be reported as an event when the container is starting. When a key exists in multiple + The keys defined within a source may consist of any printable ASCII characters except '='. + When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. @@ -3417,8 +3547,9 @@ spec: type: object x-kubernetes-map-type: atomic prefix: - description: Optional text to prepend to the name - of each environment variable. Must be a C_IDENTIFIER. + description: |- + Optional text to prepend to the name of each environment variable. + May consist of any printable ASCII characters except '='. type: string secretRef: description: The Secret to select from @@ -4077,7 +4208,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -4133,9 +4264,53 @@ spec: description: |- Restart policy for the container to manage the restart behavior of each container within a pod. - This may only be set for init containers. You cannot set this field on - ephemeral containers. + You cannot set this field on ephemeral containers. type: string + restartPolicyRules: + description: |- + Represents a list of rules to be checked to determine if the + container should be restarted on exit. You cannot set this field on + ephemeral containers. + items: + description: ContainerRestartRule describes how a + container exit is handled. + properties: + action: + description: |- + Specifies the action taken on a container exit if the requirements + are satisfied. The only possible value is "Restart" to restart the + container. + type: string + exitCodes: + description: Represents the exit codes to check + on container exits. + properties: + operator: + description: |- + Represents the relationship between the container exit code(s) and the + specified values. Possible values are: + - In: the requirement is satisfied if the container exit code is in the + set of specified values. + - NotIn: the requirement is satisfied if the container exit code is + not in the set of specified values. + type: string + values: + description: |- + Specifies the set of values to check for container exit codes. + At most 255 elements are allowed. + items: + format: int32 + type: integer + type: array + x-kubernetes-list-type: set + required: + - operator + type: object + required: + - action + type: object + type: array + x-kubernetes-list-type: atomic securityContext: description: |- Optional: SecurityContext defines the security options the ephemeral container should be run with. @@ -4674,7 +4849,9 @@ spec: hostNetwork: description: |- Host networking requested for this pod. Use the host's network namespace. - If this option is set, the ports that will be used must be specified. + When using HostNetwork you should specify ports so the scheduler is aware. + When `hostNetwork` is true, specified `hostPort` fields in port definitions must match `containerPort`, + and unspecified `hostPort` fields in port definitions are defaulted to match `containerPort`. Default to false. type: boolean hostPID: @@ -4699,6 +4876,19 @@ spec: Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value. type: string + hostnameOverride: + description: |- + HostnameOverride specifies an explicit override for the pod's hostname as perceived by the pod. + This field only specifies the pod's hostname and does not affect its DNS records. + When this field is set to a non-empty string: + - It takes precedence over the values set in `hostname` and `subdomain`. + - The Pod's hostname will be set to this value. + - `setHostnameAsFQDN` must be nil or set to false. + - `hostNetwork` must be set to false. + + This field must be a valid DNS subdomain as defined in RFC 1123 and contain at most 64 characters. + Requires the HostnameOverride feature gate to be enabled. + type: string imagePullSecrets: description: |- ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. @@ -4780,8 +4970,9 @@ spec: present in a Container. properties: name: - description: Name of the environment variable. - Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -4840,6 +5031,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount + containing the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -4902,8 +5130,8 @@ spec: envFrom: description: |- List of sources to populate environment variables in the container. - The keys defined within a source must be a C_IDENTIFIER. All invalid keys - will be reported as an event when the container is starting. When a key exists in multiple + The keys defined within a source may consist of any printable ASCII characters except '='. + When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. @@ -4930,8 +5158,9 @@ spec: type: object x-kubernetes-map-type: atomic prefix: - description: Optional text to prepend to the name - of each environment variable. Must be a C_IDENTIFIER. + description: |- + Optional text to prepend to the name of each environment variable. + May consist of any printable ASCII characters except '='. type: string secretRef: description: The Secret to select from @@ -5610,7 +5839,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -5665,10 +5894,10 @@ spec: restartPolicy: description: |- RestartPolicy defines the restart behavior of individual containers in a pod. - This field may only be set for init containers, and the only allowed value is "Always". - For non-init containers or when this field is not specified, + This overrides the pod-level restart policy. When this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. - Setting the RestartPolicy as "Always" for the init container will have the following effect: + Additionally, setting the RestartPolicy as "Always" for the init container will + have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy "Always" @@ -5680,6 +5909,59 @@ spec: init container is started, or after any startupProbe has successfully completed. type: string + restartPolicyRules: + description: |- + Represents a list of rules to be checked to determine if the + container should be restarted on exit. The rules are evaluated in + order. Once a rule matches a container exit condition, the remaining + rules are ignored. If no rule matches the container exit condition, + the Container-level restart policy determines the whether the container + is restarted or not. Constraints on the rules: + - At most 20 rules are allowed. + - Rules can have the same action. + - Identical rules are not forbidden in validations. + When rules are specified, container MUST set RestartPolicy explicitly + even it if matches the Pod's RestartPolicy. + items: + description: ContainerRestartRule describes how a + container exit is handled. + properties: + action: + description: |- + Specifies the action taken on a container exit if the requirements + are satisfied. The only possible value is "Restart" to restart the + container. + type: string + exitCodes: + description: Represents the exit codes to check + on container exits. + properties: + operator: + description: |- + Represents the relationship between the container exit code(s) and the + specified values. Possible values are: + - In: the requirement is satisfied if the container exit code is in the + set of specified values. + - NotIn: the requirement is satisfied if the container exit code is + not in the set of specified values. + type: string + values: + description: |- + Specifies the set of values to check for container exit codes. + At most 255 elements are allowed. + items: + format: int32 + type: integer + type: array + x-kubernetes-list-type: set + required: + - operator + type: object + required: + - action + type: object + type: array + x-kubernetes-list-type: atomic securityContext: description: |- SecurityContext defines the security options the container should be run with. @@ -6213,6 +6495,7 @@ spec: - spec.hostPID - spec.hostIPC - spec.hostUsers + - spec.resources - spec.securityContext.appArmorProfile - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile @@ -6366,7 +6649,7 @@ spec: description: |- Resources is the total amount of CPU and Memory resources required by all containers in the pod. It supports specifying Requests and Limits for - "cpu" and "memory" resource names only. ResourceClaims are not supported. + "cpu", "memory" and "hugepages-" resource names only. ResourceClaims are not supported. This field enables fine-grained control over resource allocation for the entire pod, allowing resource sharing among containers in a pod. @@ -6379,7 +6662,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -7656,15 +7939,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -7846,12 +8127,10 @@ spec: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. - More info: https://examples.k8s.io/volumes/glusterfs/README.md properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that + details Glusterfs topology. type: string path: description: |- @@ -7930,7 +8209,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support @@ -8356,6 +8635,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs + will be addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -8490,7 +8874,6 @@ spec: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. - More info: https://examples.k8s.io/volumes/rbd/README.md properties: fsType: description: |- diff --git a/go.mod b/go.mod index cc5d542f8a..3809f63e50 100644 --- a/go.mod +++ b/go.mod @@ -38,11 +38,11 @@ require ( golang.org/x/term v0.34.0 google.golang.org/grpc v1.75.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.33.4 - k8s.io/apiextensions-apiserver v0.33.4 - k8s.io/apimachinery v0.33.4 - k8s.io/cli-runtime v0.33.4 - k8s.io/client-go v0.33.4 + k8s.io/api v0.34.0 + k8s.io/apiextensions-apiserver v0.34.0 + k8s.io/apimachinery v0.34.0 + k8s.io/cli-runtime v0.34.0 + k8s.io/client-go v0.34.0 k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d sigs.k8s.io/controller-runtime v0.21.0 sigs.k8s.io/yaml v1.6.0 @@ -53,10 +53,10 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/emicklei/go-restful/v3 v3.12.1 // indirect + github.com/emicklei/go-restful/v3 v3.12.2 // indirect github.com/fatih/color v1.18.0 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/fxamacker/cbor/v2 v2.8.0 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect github.com/go-errors/errors v1.5.1 // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect @@ -65,7 +65,7 @@ require ( github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/google/btree v1.1.3 // indirect - github.com/google/gnostic-models v0.6.9 // indirect + github.com/google/gnostic-models v0.7.0 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect github.com/google/uuid v1.6.0 // indirect @@ -83,12 +83,13 @@ require ( github.com/moby/spdystream v0.5.0 // indirect github.com/moby/term v0.5.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/common v0.65.0 // indirect github.com/prometheus/procfs v0.16.1 // indirect @@ -112,10 +113,10 @@ require ( gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect + k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect - sigs.k8s.io/kustomize/api v0.19.0 // indirect - sigs.k8s.io/kustomize/kyaml v0.19.0 // indirect + sigs.k8s.io/kustomize/api v0.20.1 // indirect + sigs.k8s.io/kustomize/kyaml v0.20.1 // indirect sigs.k8s.io/randfill v1.0.0 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.7.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect ) diff --git a/go.sum b/go.sum index c6927d421f..5a4a4d3240 100644 --- a/go.sum +++ b/go.sum @@ -31,18 +31,18 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= -github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= +github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/fxamacker/cbor/v2 v2.8.0 h1:fFtUGXUzXPHTIUdne5+zzMPTfffl3RD5qYnkY40vtxU= -github.com/fxamacker/cbor/v2 v2.8.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= @@ -65,9 +65,8 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= -github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -135,8 +134,9 @@ github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFL github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= @@ -289,35 +289,33 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.33.4 h1:oTzrFVNPXBjMu0IlpA2eDDIU49jsuEorGHB4cvKupkk= -k8s.io/api v0.33.4/go.mod h1:VHQZ4cuxQ9sCUMESJV5+Fe8bGnqAARZ08tSTdHWfeAc= -k8s.io/apiextensions-apiserver v0.33.4 h1:rtq5SeXiDbXmSwxsF0MLe2Mtv3SwprA6wp+5qh/CrOU= -k8s.io/apiextensions-apiserver v0.33.4/go.mod h1:mWXcZQkQV1GQyxeIjYApuqsn/081hhXPZwZ2URuJeSs= -k8s.io/apimachinery v0.33.4 h1:SOf/JW33TP0eppJMkIgQ+L6atlDiP/090oaX0y9pd9s= -k8s.io/apimachinery v0.33.4/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= -k8s.io/cli-runtime v0.33.4 h1:V8NSxGfh24XzZVhXmIGzsApdBpGq0RQS2u/Fz1GvJwk= -k8s.io/cli-runtime v0.33.4/go.mod h1:V+ilyokfqjT5OI+XE+O515K7jihtr0/uncwoyVqXaIU= -k8s.io/client-go v0.33.4 h1:TNH+CSu8EmXfitntjUPwaKVPN0AYMbc9F1bBS8/ABpw= -k8s.io/client-go v0.33.4/go.mod h1:LsA0+hBG2DPwovjd931L/AoaezMPX9CmBgyVyBZmbCY= +k8s.io/api v0.34.0 h1:L+JtP2wDbEYPUeNGbeSa/5GwFtIA662EmT2YSLOkAVE= +k8s.io/api v0.34.0/go.mod h1:YzgkIzOOlhl9uwWCZNqpw6RJy9L2FK4dlJeayUoydug= +k8s.io/apiextensions-apiserver v0.34.0 h1:B3hiB32jV7BcyKcMU5fDaDxk882YrJ1KU+ZSkA9Qxoc= +k8s.io/apiextensions-apiserver v0.34.0/go.mod h1:hLI4GxE1BDBy9adJKxUxCEHBGZtGfIg98Q+JmTD7+g0= +k8s.io/apimachinery v0.34.0 h1:eR1WO5fo0HyoQZt1wdISpFDffnWOvFLOOeJ7MgIv4z0= +k8s.io/apimachinery v0.34.0/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/cli-runtime v0.34.0 h1:N2/rUlJg6TMEBgtQ3SDRJwa8XyKUizwjlOknT1mB2Cw= +k8s.io/cli-runtime v0.34.0/go.mod h1:t/skRecS73Piv+J+FmWIQA2N2/rDjdYSQzEE67LUUs8= +k8s.io/client-go v0.34.0 h1:YoWv5r7bsBfb0Hs2jh8SOvFbKzzxyNo0nSb0zC19KZo= +k8s.io/client-go v0.34.0/go.mod h1:ozgMnEKXkRjeMvBZdV1AijMHLTh3pbACPvK7zFR+QQY= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= -k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d h1:wAhiDyZ4Tdtt7e46e9M5ZSAJ/MnPGPs+Ki1gHw4w1R0= k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8= sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= -sigs.k8s.io/kustomize/api v0.19.0 h1:F+2HB2mU1MSiR9Hp1NEgoU2q9ItNOaBJl0I4Dlus5SQ= -sigs.k8s.io/kustomize/api v0.19.0/go.mod h1:/BbwnivGVcBh1r+8m3tH1VNxJmHSk1PzP5fkP6lbL1o= -sigs.k8s.io/kustomize/kyaml v0.19.0 h1:RFge5qsO1uHhwJsu3ipV7RNolC7Uozc0jUBC/61XSlA= -sigs.k8s.io/kustomize/kyaml v0.19.0/go.mod h1:FeKD5jEOH+FbZPpqUghBP8mrLjJ3+zD3/rf9NNu1cwY= -sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/kustomize/api v0.20.1 h1:iWP1Ydh3/lmldBnH/S5RXgT98vWYMaTUL1ADcr+Sv7I= +sigs.k8s.io/kustomize/api v0.20.1/go.mod h1:t6hUFxO+Ph0VxIk1sKp1WS0dOjbPCtLJ4p8aADLwqjM= +sigs.k8s.io/kustomize/kyaml v0.20.1 h1:PCMnA2mrVbRP3NIB6v9kYCAc38uvFLVs8j/CD567A78= +sigs.k8s.io/kustomize/kyaml v0.20.1/go.mod h1:0EmkQHRUsJxY8Ug9Niig1pUMSCGHxQ5RklbpV/Ri6po= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= -sigs.k8s.io/structured-merge-diff/v4 v4.7.0 h1:qPeWmscJcXP0snki5IYF79Z8xrl8ETFxgMd7wez1XkI= -sigs.k8s.io/structured-merge-diff/v4 v4.7.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= -sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= From e050af674d8f42f1c5ef3e8ff4fb5fd9de4b4d72 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 29 Aug 2025 10:24:26 +0200 Subject: [PATCH 817/836] chore(deps): update google-github-actions/auth action to v3 (main) (#8467) --- .github/workflows/continuous-delivery.yml | 2 +- .github/workflows/k8s-versions-check.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index edc405100c..f6336d54c3 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -1660,7 +1660,7 @@ jobs: - name: Authenticate to Google Cloud id: 'auth' - uses: google-github-actions/auth@6fc4af4b145ae7821d527454aa9bd537d1f2dc5f # v2 + uses: google-github-actions/auth@7c6bc770dae815cd3e89ee6cdf493a5fab2cc093 # v3 with: credentials_json: '${{ secrets.GCP_SERVICE_ACCOUNT }}' - diff --git a/.github/workflows/k8s-versions-check.yml b/.github/workflows/k8s-versions-check.yml index f2fccef324..e092f268b2 100644 --- a/.github/workflows/k8s-versions-check.yml +++ b/.github/workflows/k8s-versions-check.yml @@ -61,7 +61,7 @@ jobs: if: github.event.inputs.limit == null || github.event.inputs.limit == 'aks' - name: 'Auth GKE' - uses: 'google-github-actions/auth@6fc4af4b145ae7821d527454aa9bd537d1f2dc5f' # v2 + uses: 'google-github-actions/auth@7c6bc770dae815cd3e89ee6cdf493a5fab2cc093' # v3 with: credentials_json: '${{ secrets.GCP_SERVICE_ACCOUNT }}' if: github.event.inputs.limit == null || github.event.inputs.limit == 'gke' From 0ee4b72518d61750c808a5bcf96dc8228a4cb42a Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Fri, 29 Aug 2025 10:52:22 +0200 Subject: [PATCH 818/836] refactor(backup_controller): simplify flow (#8320) Signed-off-by: Armando Ruocco Signed-off-by: Marco Nenciarini Co-authored-by: Marco Nenciarini --- api/v1/backup_funcs.go | 10 + internal/controller/backup_controller.go | 424 ++++++++++++++--------- internal/controller/cluster_create.go | 2 +- 3 files changed, 265 insertions(+), 171 deletions(-) diff --git a/api/v1/backup_funcs.go b/api/v1/backup_funcs.go index 1e08d97868..cdce7b6de3 100644 --- a/api/v1/backup_funcs.go +++ b/api/v1/backup_funcs.go @@ -292,3 +292,13 @@ func (backup *Backup) EnsureGVKIsPresent() { func (configuration *BackupPluginConfiguration) IsEmpty() bool { return configuration == nil || len(configuration.Name) == 0 } + +// IsManagedByInstance returns true if the backup is managed by the instance manager +func (b BackupMethod) IsManagedByInstance() bool { + return b == BackupMethodPlugin || b == BackupMethodBarmanObjectStore +} + +// IsManagedByOperator returns true if the backup is managed by the operator +func (b BackupMethod) IsManagedByOperator() bool { + return b == BackupMethodVolumeSnapshot +} diff --git a/internal/controller/backup_controller.go b/internal/controller/backup_controller.go index 5dec2c29e7..e96e278d03 100644 --- a/internal/controller/backup_controller.go +++ b/internal/controller/backup_controller.go @@ -64,17 +64,7 @@ const backupPhase = ".status.phase" // clusterName indicates the path inside the Backup kind // where the name of the cluster is written -const clusterName = ".spec.cluster.name" - -// getIsRunningResult gets the result that is returned to periodically -// check for running backups. -// This is particularly important when the target Pod is destroyed -// or stops responding. -// -// This result should be used almost always when a backup is running -func getIsRunningResult() ctrl.Result { - return ctrl.Result{RequeueAfter: 10 * time.Minute} -} +const clusterNameField = ".spec.cluster.name" // BackupReconciler reconciles a Backup object type BackupReconciler struct { @@ -86,6 +76,7 @@ type BackupReconciler struct { Plugins repository.Interface instanceStatusClient remote.InstanceClient + vsr *volumesnapshot.Reconciler } // NewBackupReconciler properly initializes the BackupReconciler @@ -94,13 +85,16 @@ func NewBackupReconciler( discoveryClient *discovery.DiscoveryClient, plugins repository.Interface, ) *BackupReconciler { + cli := mgr.GetClient() + recorder := mgr.GetEventRecorderFor("cloudnative-pg-backup") return &BackupReconciler{ - Client: mgr.GetClient(), + Client: cli, DiscoveryClient: discoveryClient, Scheme: mgr.GetScheme(), - Recorder: mgr.GetEventRecorderFor("cloudnative-pg-backup"), + Recorder: recorder, instanceStatusClient: remote.NewClient().Instance(), Plugins: plugins, + vsr: volumesnapshot.NewReconcilerBuilder(cli, recorder).Build(), } } @@ -113,7 +107,6 @@ func NewBackupReconciler( // +kubebuilder:rbac:groups="",resources=pods,verbs=get // Reconcile is the main reconciliation loop -// nolint: gocognit,gocyclo func (r *BackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { contextLogger, ctx := log.SetupLogger(ctx) contextLogger.Debug(fmt.Sprintf("reconciling object %#q", req.NamespacedName)) @@ -131,39 +124,21 @@ func (r *BackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr return ctrl.Result{}, nil } - clusterName := backup.Spec.Cluster.Name var cluster apiv1.Cluster - if err := r.Get(ctx, client.ObjectKey{ - Namespace: backup.Namespace, - Name: clusterName, - }, &cluster); err != nil { - if apierrs.IsNotFound(err) { - r.Recorder.Eventf(&backup, "Warning", "FindingCluster", - "Unknown cluster %v, will retry in 30 seconds", clusterName) - return ctrl.Result{RequeueAfter: 30 * time.Second}, nil + if res, err := r.getCluster(ctx, &backup, &cluster); err != nil || res != nil { + if res != nil { + return *res, err } - - _ = resourcestatus.FlagBackupAsFailed(ctx, r.Client, &backup, nil, - fmt.Errorf("while getting cluster %s: %w", clusterName, err)) - r.Recorder.Eventf(&backup, "Warning", "FindingCluster", - "Error getting cluster %v, will not retry: %s", clusterName, err.Error()) - return ctrl.Result{}, nil + return ctrl.Result{}, err } - if backup.Spec.Method == apiv1.BackupMethodPlugin && len(cluster.Spec.Plugins) == 0 { - message := "cannot proceed with the backup as the cluster has no plugin configured" - contextLogger.Warning(message) - r.Recorder.Event(&backup, "Warning", "ClusterHasNoBackupExecutorPlugin", message) - _ = resourcestatus.FlagBackupAsFailed(ctx, r.Client, &backup, &cluster, errors.New(message)) - return ctrl.Result{}, nil - } + ctx = cluster.SetInContext(ctx) - if backup.Spec.Method != apiv1.BackupMethodPlugin && cluster.Spec.Backup == nil { - message := "cannot proceed with the backup as the cluster has no backup section" - contextLogger.Warning(message) - r.Recorder.Event(&backup, "Warning", "ClusterHasBackupConfigured", message) - _ = resourcestatus.FlagBackupAsFailed(ctx, r.Client, &backup, &cluster, errors.New(message)) - return ctrl.Result{}, nil + if res, err := r.checkPrerequisites(ctx, backup, cluster); err != nil || res != nil { + if res != nil { + return *res, err + } + return ctrl.Result{}, err } // Load the required plugins @@ -181,23 +156,8 @@ func (r *BackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr }() ctx = cnpgiClient.SetPluginClientInContext(ctx, pluginClient) - ctx = cluster.SetInContext(ctx) - // Plugin pre-hooks - if hookResult := preReconcilePluginHooks(ctx, &cluster, &backup); hookResult.StopReconciliation { - return hookResult.Result, hookResult.Err - } - - // This check is still needed for when the backup resource creation is forced through the webhook - if backup.Spec.Method == apiv1.BackupMethodVolumeSnapshot && !utils.HaveVolumeSnapshot() { - message := "cannot proceed with the backup as the Kubernetes cluster has no VolumeSnapshot support" - contextLogger.Warning(message) - r.Recorder.Event(&backup, "Warning", "ClusterHasNoVolumeSnapshotCRD", message) - _ = resourcestatus.FlagBackupAsFailed(ctx, r.Client, &backup, &cluster, errors.New(message)) - return ctrl.Result{}, nil - } - - contextLogger.Debug("Found cluster for backup", "cluster", clusterName) + contextLogger.Debug("Found cluster for backup", "cluster", cluster.Name) // Store in the context the TLS configuration required communicating with the Pods ctx, err = certs.NewTLSConfigForContext( @@ -209,122 +169,236 @@ func (r *BackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr return ctrl.Result{}, err } - isRunning, err := r.isValidBackupRunning(ctx, &backup, &cluster) + // preflight checks that AREN'T formal. + // We ask questions like: "are there other backups running?", "is the current backup running?", + // "is the target instance healthy?" + if res, err := r.waitIfOtherBackupsRunning(ctx, &backup, &cluster); err != nil || !res.IsZero() { + return res, err + } + isRunning, err := r.isCurrentBackupRunning(ctx, backup, cluster) if err != nil { - contextLogger.Error(err, "while running isValidBackupRunning") return ctrl.Result{}, err } - if isRunning && backup.GetOnlineOrDefault(&cluster) { - if err := r.ensureTargetPodHealthy(ctx, r.Client, &backup, &cluster); err != nil { - contextLogger.Error(err, "while ensuring target pod is healthy") - _ = resourcestatus.FlagBackupAsFailed(ctx, r.Client, &backup, nil, - fmt.Errorf("while ensuring target pod is healthy: %w", err)) - r.Recorder.Eventf(&backup, "Warning", "TargetPodNotHealthy", - "Error ensuring target pod is healthy: %s", err.Error()) - // this ensures that we will retry in case of errors - // if everything was flagged correctly we will not come back again in this state - return ctrl.Result{RequeueAfter: 30 * time.Second}, nil - } + if hookResult := preReconcilePluginHooks(ctx, &cluster, &backup); hookResult.StopReconciliation { + return hookResult.Result, hookResult.Err } - if backup.Spec.Method == apiv1.BackupMethodBarmanObjectStore { - if cluster.Spec.Backup == nil || cluster.Spec.Backup.BarmanObjectStore == nil { - _ = resourcestatus.FlagBackupAsFailed(ctx, r.Client, &backup, &cluster, - errors.New("no barmanObjectStore section defined on the target cluster")) - return ctrl.Result{}, nil + // When the instance manager is working we have to wait for it to finish + if isRunning && backup.Spec.Method.IsManagedByInstance() { + return ctrl.Result{RequeueAfter: 10 * time.Minute}, nil + } + + switch { + case backup.Spec.Method.IsManagedByInstance(): + res, err := r.startBackupManagedByInstance(ctx, cluster, backup) + if err != nil { + return ctrl.Result{}, err + } + if res != nil { + return *res, nil + } + case backup.Spec.Method.IsManagedByOperator(): + res, err := r.reconcileSnapshotBackup(ctx, &cluster, &backup) + if err != nil { + return ctrl.Result{}, err + } + if res != nil { + return *res, nil } + default: + return ctrl.Result{}, fmt.Errorf("unrecognized method: %s", backup.Spec.Method) + } + + // plugin post hooks + contextLogger.Debug(fmt.Sprintf("object %#q has been reconciled", req.NamespacedName)) - if isRunning { - return getIsRunningResult(), nil + hookResult := postReconcilePluginHooks(ctx, &cluster, &backup) + return hookResult.Result, hookResult.Err +} + +func (r *BackupReconciler) startBackupManagedByInstance( + ctx context.Context, + cluster apiv1.Cluster, + backup apiv1.Backup, +) (*ctrl.Result, error) { + contextLogger, ctx := log.SetupLogger(ctx) + + origBackup := backup.DeepCopy() + + // If no good running backups are found we elect a pod for the backup + pod, err := r.getBackupTargetPod(ctx, &cluster, &backup) + if apierrs.IsNotFound(err) { + r.Recorder.Eventf(&backup, "Warning", "FindingPod", + "Couldn't find target pod %s, will retry in 30 seconds", cluster.Status.TargetPrimary) + contextLogger.Info("Couldn't find target pod, will retry in 30 seconds", "target", + cluster.Status.TargetPrimary) + backup.Status.Phase = apiv1.BackupPhasePending + if err := r.Status().Patch(ctx, &backup, client.MergeFrom(origBackup)); err != nil { + return nil, err } + return &ctrl.Result{RequeueAfter: 30 * time.Second}, nil + } - r.Recorder.Eventf(&backup, "Normal", "Starting", - "Starting backup for cluster %v", cluster.Name) + if err != nil { + _ = resourcestatus.FlagBackupAsFailed(ctx, r.Client, &backup, &cluster, fmt.Errorf("while getting pod: %w", err)) + r.Recorder.Eventf(&backup, "Warning", "FindingPod", "Error getting target pod: %s", + cluster.Status.TargetPrimary) + return &ctrl.Result{}, nil } - if backup.Spec.Method == apiv1.BackupMethodPlugin { - if isRunning { - return getIsRunningResult(), nil + contextLogger.Debug("Found pod for backup", "pod", pod.Name) + + if !utils.IsPodReady(*pod) { + contextLogger.Info("Backup target is not ready, will retry in 30 seconds", "target", pod.Name) + backup.Status.Phase = apiv1.BackupPhasePending + r.Recorder.Eventf(&backup, "Warning", "BackupPending", "Backup target pod not ready: %s", + cluster.Status.TargetPrimary) + if err := r.Status().Patch(ctx, &backup, client.MergeFrom(origBackup)); err != nil { + return nil, err } + return &ctrl.Result{RequeueAfter: 30 * time.Second}, nil + } - r.Recorder.Eventf(&backup, "Normal", "Starting", - "Starting backup for cluster %v", cluster.Name) + contextLogger.Info("Starting backup", + "cluster", cluster.Name, + "pod", pod.Name) + + r.Recorder.Eventf(&backup, "Normal", "Starting", + "Starting backup for cluster %v", cluster.Name) + + // This backup can be started + if err := startInstanceManagerBackup(ctx, r.Client, &backup, pod, &cluster); err != nil { + r.Recorder.Eventf(&backup, "Warning", "Error", "Backup exit with error %v", err) + _ = resourcestatus.FlagBackupAsFailed(ctx, r.Client, &backup, &cluster, + fmt.Errorf("encountered an error while taking the backup: %w", err)) + return &ctrl.Result{}, nil } + return nil, nil +} - origBackup := backup.DeepCopy() +func (r *BackupReconciler) isCurrentBackupRunning( + ctx context.Context, + backup apiv1.Backup, + cluster apiv1.Cluster, +) (bool, error) { + contextLogger := log.FromContext(ctx) - // From now on, we differentiate backups managed by the instance manager (barman and plugins) - // from the ones managed directly by the operator (VolumeSnapshot) + isRunning, err := r.isValidBackupRunning(ctx, &backup, &cluster) + if err != nil { + contextLogger.Error(err, "while running isValidBackupRunning") + return false, err + } + if !isRunning { + return false, nil + } - switch backup.Spec.Method { - case apiv1.BackupMethodBarmanObjectStore, apiv1.BackupMethodPlugin: - // If no good running backups are found we elect a pod for the backup - pod, err := r.getBackupTargetPod(ctx, &cluster, &backup) - if apierrs.IsNotFound(err) { - r.Recorder.Eventf(&backup, "Warning", "FindingPod", - "Couldn't find target pod %s, will retry in 30 seconds", cluster.Status.TargetPrimary) - contextLogger.Info("Couldn't find target pod, will retry in 30 seconds", "target", - cluster.Status.TargetPrimary) - backup.Status.Phase = apiv1.BackupPhasePending - if err := r.Status().Patch(ctx, &backup, client.MergeFrom(origBackup)); err != nil { - return ctrl.Result{}, err - } - return ctrl.Result{RequeueAfter: 30 * time.Second}, nil - } - if err != nil { - _ = resourcestatus.FlagBackupAsFailed(ctx, r.Client, &backup, &cluster, fmt.Errorf("while getting pod: %w", err)) - r.Recorder.Eventf(&backup, "Warning", "FindingPod", "Error getting target pod: %s", - cluster.Status.TargetPrimary) - return ctrl.Result{}, nil - } - contextLogger.Debug("Found pod for backup", "pod", pod.Name) - - if !utils.IsPodReady(*pod) { - contextLogger.Info("Backup target is not ready, will retry in 30 seconds", "target", pod.Name) - backup.Status.Phase = apiv1.BackupPhasePending - r.Recorder.Eventf(&backup, "Warning", "BackupPending", "Backup target pod not ready: %s", - cluster.Status.TargetPrimary) - if err := r.Status().Patch(ctx, &backup, client.MergeFrom(origBackup)); err != nil { - return ctrl.Result{}, err + if backup.GetOnlineOrDefault(&cluster) { + if err := r.ensureTargetPodHealthy(ctx, r.Client, &backup, &cluster); err != nil { + contextLogger.Error(err, "while ensuring target pod is healthy") + + if flagErr := resourcestatus.FlagBackupAsFailed(ctx, r.Client, &backup, nil, + fmt.Errorf("while ensuring target pod is healthy: %w", err)); flagErr != nil { + contextLogger.Error(flagErr, "while flagging backup as failed, retrying...") + return false, flagErr } - return ctrl.Result{RequeueAfter: 30 * time.Second}, nil + + r.Recorder.Eventf(&backup, "Warning", "TargetPodNotHealthy", + "Error ensuring target pod is healthy: %s", err.Error()) + + return false, fmt.Errorf("interrupting backup as target pod is not healthy: %w", err) } + } - contextLogger.Info("Starting backup", - "cluster", cluster.Name, - "pod", pod.Name) + return true, nil +} - // This backup can be started - if err := startInstanceManagerBackup(ctx, r.Client, &backup, pod, &cluster); err != nil { - r.Recorder.Eventf(&backup, "Warning", "Error", "Backup exit with error %v", err) - _ = resourcestatus.FlagBackupAsFailed(ctx, r.Client, &backup, &cluster, - fmt.Errorf("encountered an error while taking the backup: %w", err)) - return ctrl.Result{}, nil +// checkPrerequisites checks that the backup and cluster spec are FORMALLY valid and the kubernetes cluster supports +// the chosen backup method. +// These checks cannot be executed in the webhook given that we cannot fetch the cluster. +func (r *BackupReconciler) checkPrerequisites( + ctx context.Context, + backup apiv1.Backup, + cluster apiv1.Cluster, +) (*ctrl.Result, error) { + contextLogger := log.FromContext(ctx) + + flagMissingPrerequisite := func(message string, reason string) (*ctrl.Result, error) { + contextLogger.Warning(message) + r.Recorder.Event(&backup, "Warning", reason, message) + err := resourcestatus.FlagBackupAsFailed(ctx, r.Client, &backup, &cluster, errors.New(message)) + return &ctrl.Result{}, err + } + + if cluster.Spec.Backup == nil { + const message = "cannot proceed with the backup as the cluster has no backup section" + return flagMissingPrerequisite(message, "ClusterHasBackupConfigured") + } + + if backup.Spec.Method == apiv1.BackupMethodPlugin { + if len(cluster.Spec.Plugins) == 0 { + const message = "cannot proceed with the backup as the cluster has no plugin configured" + return flagMissingPrerequisite(message, "ClusterHasNoBackupExecutorPlugin") } - case apiv1.BackupMethodVolumeSnapshot: - if cluster.Spec.Backup == nil || cluster.Spec.Backup.VolumeSnapshot == nil { - _ = resourcestatus.FlagBackupAsFailed(ctx, r.Client, &backup, &cluster, - errors.New("no volumeSnapshot section defined on the target cluster")) - return ctrl.Result{}, nil + } + + if backup.Spec.Method == apiv1.BackupMethodVolumeSnapshot { + // This check is still needed for when the backup resource creation is forced through the webhook + if !utils.HaveVolumeSnapshot() { + const message = "cannot proceed with the backup as the Kubernetes cluster has no VolumeSnapshot support" + return flagMissingPrerequisite(message, "ClusterHasNoVolumeSnapshotCRD") } - res, err := r.reconcileSnapshotBackup(ctx, &cluster, &backup) - if err != nil { - return ctrl.Result{}, err + if cluster.Spec.Backup.VolumeSnapshot == nil { + const message = "no volumeSnapshot section defined on the target cluster" + return flagMissingPrerequisite(message, "ClusterHasNoVolumeSnapshotSection") } - if res != nil { - return *res, nil + } + + if backup.Spec.Method == apiv1.BackupMethodBarmanObjectStore { + if cluster.Spec.Backup.BarmanObjectStore == nil { + const message = "no barmanObjectStore section defined on the target cluster" + return flagMissingPrerequisite(message, "ClusterHasNoBarmanSection") } - default: - return ctrl.Result{}, fmt.Errorf("unrecognized method: %s", backup.Spec.Method) } - // plugin post hooks - contextLogger.Debug(fmt.Sprintf("object %#q has been reconciled", req.NamespacedName)) + return nil, nil +} - hookResult := postReconcilePluginHooks(ctx, &cluster, &backup) - return hookResult.Result, hookResult.Err +func (r *BackupReconciler) getCluster( + ctx context.Context, + backup *apiv1.Backup, + cluster *apiv1.Cluster, +) (*ctrl.Result, error) { + contextLogger := log.FromContext(ctx) + + clusterName := backup.Spec.Cluster.Name + err := r.Get(ctx, client.ObjectKey{ + Namespace: backup.Namespace, + Name: clusterName, + }, cluster) + if err == nil { + return nil, nil + } + + if apierrs.IsNotFound(err) { + r.Recorder.Eventf(backup, "Warning", "FindingCluster", + "Unknown cluster %v, will retry in 30 seconds", clusterName) + return &ctrl.Result{RequeueAfter: 30 * time.Second}, nil + } + + contextLogger.Error(err, "error getting cluster, proceeding to flag backup as failed.") + + if flagErr := resourcestatus.FlagBackupAsFailed(ctx, r.Client, backup, nil, + fmt.Errorf("while getting cluster %s: %w", clusterName, err)); flagErr != nil { + contextLogger.Error(flagErr, "while flagging backup as failed, retrying...") + return nil, flagErr + } + + r.Recorder.Eventf(backup, "Warning", "FindingCluster", + "Error getting cluster %v, will not retry: %s", clusterName, err.Error()) + + return &ctrl.Result{}, nil } func (r *BackupReconciler) isValidBackupRunning( @@ -421,6 +495,7 @@ func (r *BackupReconciler) reconcileSnapshotBackup( "target", cluster.Status.TargetPrimary, ) + // TODO: shouldn't this be a failed backup? origBackup := backup.DeepCopy() backup.Status.Phase = apiv1.BackupPhasePending if err := r.Patch(ctx, backup, client.MergeFrom(origBackup)); err != nil { @@ -430,7 +505,10 @@ func (r *BackupReconciler) reconcileSnapshotBackup( return &ctrl.Result{RequeueAfter: 30 * time.Second}, nil } if err != nil { - _ = resourcestatus.FlagBackupAsFailed(ctx, r.Client, backup, cluster, fmt.Errorf("while getting pod: %w", err)) + messageErr := fmt.Errorf("while getting pod: %w", err) + if flagErr := resourcestatus.FlagBackupAsFailed(ctx, r.Client, backup, cluster, messageErr); flagErr != nil { + return nil, fmt.Errorf("while flagging backup as failed: %w", flagErr) + } r.Recorder.Eventf(backup, "Warning", "FindingPod", "Error getting target pod: %s", cluster.Status.TargetPrimary) return &ctrl.Result{}, nil @@ -438,25 +516,6 @@ func (r *BackupReconciler) reconcileSnapshotBackup( ctx = log.IntoContext(ctx, contextLogger.WithValues("targetPodName", targetPod.Name)) - // Validate we don't have other running backups - var clusterBackups apiv1.BackupList - if err := r.List( - ctx, - &clusterBackups, - client.InNamespace(backup.GetNamespace()), - client.MatchingFields{clusterName: cluster.Name}, - ); err != nil { - return nil, err - } - - if !clusterBackups.CanExecuteBackup(backup.Name) { - contextLogger.Info( - "A backup is already in progress or waiting to be started, retrying", - "targetBackup", backup.Name, - ) - return &ctrl.Result{RequeueAfter: 10 * time.Second}, nil - } - if !utils.PodHasContainerStatuses(*targetPod) { return nil, fmt.Errorf("target pod lacks container statuses") } @@ -490,11 +549,7 @@ func (r *BackupReconciler) reconcileSnapshotBackup( return nil, fmt.Errorf("cannot get PVCs: %w", err) } - reconciler := volumesnapshot. - NewReconcilerBuilder(r.Client, r.Recorder). - Build() - - res, err := reconciler.Reconcile(ctx, cluster, backup, targetPod, pvcs) + res, err := r.vsr.Reconcile(ctx, cluster, backup, targetPod, pvcs) if err != nil { // Volume Snapshot errors are not retryable, we need to set this backup as failed // and un-fence the Pod @@ -707,7 +762,7 @@ func (r *BackupReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manage if err := mgr.GetFieldIndexer().IndexField( ctx, &apiv1.Backup{}, - clusterName, func(rawObj client.Object) []string { + clusterNameField, func(rawObj client.Object) []string { return []string{rawObj.(*apiv1.Backup).Spec.Cluster.Name} }); err != nil { return err @@ -777,3 +832,32 @@ func (r *BackupReconciler) ensureTargetPodHealthy( ) return nil } + +func (r *BackupReconciler) waitIfOtherBackupsRunning( + ctx context.Context, + backup *apiv1.Backup, + cluster *apiv1.Cluster, +) (ctrl.Result, error) { + contextLogger := log.FromContext(ctx) + + // Validate we don't have other running backups + var clusterBackups apiv1.BackupList + if err := r.List( + ctx, + &clusterBackups, + client.InNamespace(backup.GetNamespace()), + client.MatchingFields{clusterNameField: cluster.Name}, + ); err != nil { + return ctrl.Result{}, err + } + + if !clusterBackups.CanExecuteBackup(backup.Name) { + contextLogger.Info( + "A backup is already in progress or waiting to be started, retrying", + "targetBackup", backup.Name, + ) + return ctrl.Result{RequeueAfter: 10 * time.Second}, nil + } + + return ctrl.Result{}, nil +} diff --git a/internal/controller/cluster_create.go b/internal/controller/cluster_create.go index c62bab33f6..c7f1bb53a7 100644 --- a/internal/controller/cluster_create.go +++ b/internal/controller/cluster_create.go @@ -1237,7 +1237,7 @@ func (r *ClusterReconciler) joinReplicaInstance( var backupList apiv1.BackupList if err := r.List(ctx, &backupList, - client.MatchingFields{clusterName: cluster.Name}, + client.MatchingFields{clusterNameField: cluster.Name}, client.InNamespace(cluster.Namespace), ); err != nil { contextLogger.Error(err, "Error while getting backup list, when bootstrapping a new replica") From e6d19c87611d18109652aac2696d09db5e316276 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 29 Aug 2025 11:08:09 +0200 Subject: [PATCH 819/836] chore(deps): update google-github-actions/setup-gcloud action to v3 (main) (#8471) --- .github/workflows/continuous-delivery.yml | 2 +- .github/workflows/k8s-versions-check.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml index f6336d54c3..6cb8f9aa6b 100644 --- a/.github/workflows/continuous-delivery.yml +++ b/.github/workflows/continuous-delivery.yml @@ -1665,7 +1665,7 @@ jobs: credentials_json: '${{ secrets.GCP_SERVICE_ACCOUNT }}' - name: Set up Cloud SDK and kubectl - uses: google-github-actions/setup-gcloud@cb1e50a9932213ecece00a606661ae9ca44f3397 # v2 + uses: google-github-actions/setup-gcloud@aa5489c8933f4cc7a4f7d45035b3b1440c9c10db # v3 with: project_id: ${{ secrets.GCP_PROJECT_ID }} install_components: 'kubectl,gke-gcloud-auth-plugin' diff --git a/.github/workflows/k8s-versions-check.yml b/.github/workflows/k8s-versions-check.yml index e092f268b2..7da5b855e6 100644 --- a/.github/workflows/k8s-versions-check.yml +++ b/.github/workflows/k8s-versions-check.yml @@ -67,7 +67,7 @@ jobs: if: github.event.inputs.limit == null || github.event.inputs.limit == 'gke' - name: Set up Cloud SDK for GKE - uses: google-github-actions/setup-gcloud@cb1e50a9932213ecece00a606661ae9ca44f3397 # v2 + uses: google-github-actions/setup-gcloud@aa5489c8933f4cc7a4f7d45035b3b1440c9c10db # v3 with: project_id: ${{ secrets.GCP_PROJECT_ID }} if: github.event.inputs.limit == null || github.event.inputs.limit == 'gke' From e204969eeca6c86126c925ff4421ce13e0925c30 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 29 Aug 2025 13:28:42 +0200 Subject: [PATCH 820/836] fix(deps): update module github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring to v0.85.0 (main) (#8411) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 3809f63e50..20b36f46cb 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( github.com/mitchellh/go-ps v1.0.0 github.com/onsi/ginkgo/v2 v2.25.1 github.com/onsi/gomega v1.38.2 - github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.84.1 + github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.85.0 github.com/prometheus/client_golang v1.23.0 github.com/robfig/cron v1.2.0 github.com/sethvargo/go-password v0.3.1 diff --git a/go.sum b/go.sum index 5a4a4d3240..eb7fbce532 100644 --- a/go.sum +++ b/go.sum @@ -155,8 +155,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.84.1 h1:NEQAo0Cl1gf9sJ3oI1QzczS3BF6ySvSSH36mwOZDuhI= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.84.1/go.mod h1:MruMqbSS9aYrKhBImrO9X9g52hwz3I0B+tcoeAwkmuM= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.85.0 h1:oY+F5FZFmCjCyzkHWPjVQpzvnvEB/0FP+iyzDUUlqFc= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.85.0/go.mod h1:VB7wtBmDT6W2RJHzsvPZlBId+EnmeQA0d33fFTXvraM= github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc= github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= From 5cfb1769cd087394151cdb764415363e3bd8570f Mon Sep 17 00:00:00 2001 From: Peggie Date: Fri, 29 Aug 2025 14:41:27 +0200 Subject: [PATCH 821/836] test: Updated Postgres versions used in E2E tests (#8373) Update the Postgres versions used in E2E tests Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Signed-off-by: Jonathan Gonzalez V. Co-authored-by: postgres-versions-updater --- .github/pg_versions.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/pg_versions.json b/.github/pg_versions.json index 9a98e449fe..4c40dcda6e 100644 --- a/.github/pg_versions.json +++ b/.github/pg_versions.json @@ -1,7 +1,7 @@ { "18": [ "18beta2", - "18beta2-4" + "18beta2-6" ], "17": [ "17.5", From 51685216a69b0f2591b4eec6c76d9fcabdb5223d Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sat, 30 Aug 2025 11:22:24 +0200 Subject: [PATCH 822/836] fix(deps): update module github.com/onsi/ginkgo/v2 to v2.25.2 (main) (#8477) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 20b36f46cb..fc4ba26627 100644 --- a/go.mod +++ b/go.mod @@ -23,7 +23,7 @@ require ( github.com/lib/pq v1.10.9 github.com/logrusorgru/aurora/v4 v4.0.0 github.com/mitchellh/go-ps v1.0.0 - github.com/onsi/ginkgo/v2 v2.25.1 + github.com/onsi/ginkgo/v2 v2.25.2 github.com/onsi/gomega v1.38.2 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.85.0 github.com/prometheus/client_golang v1.23.0 diff --git a/go.sum b/go.sum index eb7fbce532..54b0fb63b8 100644 --- a/go.sum +++ b/go.sum @@ -143,8 +143,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.25.1 h1:Fwp6crTREKM+oA6Cz4MsO8RhKQzs2/gOIVOUscMAfZY= -github.com/onsi/ginkgo/v2 v2.25.1/go.mod h1:ppTWQ1dh9KM/F1XgpeRqelR+zHVwV81DGRSDnFxK7Sk= +github.com/onsi/ginkgo/v2 v2.25.2 h1:hepmgwx1D+llZleKQDMEvy8vIlCxMGt7W5ZxDjIEhsw= +github.com/onsi/ginkgo/v2 v2.25.2/go.mod h1:43uiyQC4Ed2tkOzLsEYm7hnrb7UJTWHYNsuy3bG/snE= github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= From 01543d17f8c6bcdd45a1ca14670ea24e0e0d0e45 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sun, 31 Aug 2025 13:22:14 +0200 Subject: [PATCH 823/836] fix(deps): update module sigs.k8s.io/controller-runtime to v0.22.0 (main) (#8459) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index fc4ba26627..8ff1e7aecb 100644 --- a/go.mod +++ b/go.mod @@ -44,7 +44,7 @@ require ( k8s.io/cli-runtime v0.34.0 k8s.io/client-go v0.34.0 k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d - sigs.k8s.io/controller-runtime v0.21.0 + sigs.k8s.io/controller-runtime v0.22.0 sigs.k8s.io/yaml v1.6.0 ) diff --git a/go.sum b/go.sum index 54b0fb63b8..3e16321168 100644 --- a/go.sum +++ b/go.sum @@ -305,8 +305,8 @@ k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOP k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d h1:wAhiDyZ4Tdtt7e46e9M5ZSAJ/MnPGPs+Ki1gHw4w1R0= k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8= -sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM= +sigs.k8s.io/controller-runtime v0.22.0 h1:mTOfibb8Hxwpx3xEkR56i7xSjB+nH4hZG37SrlCY5e0= +sigs.k8s.io/controller-runtime v0.22.0/go.mod h1:FwiwRjkRPbiN+zp2QRp7wlTCzbUXxZ/D4OzuQUDwBHY= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/kustomize/api v0.20.1 h1:iWP1Ydh3/lmldBnH/S5RXgT98vWYMaTUL1ADcr+Sv7I= From 8d15f48800a35b09a5a185288ef8034d2b4a83df Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 1 Sep 2025 08:41:49 +0200 Subject: [PATCH 824/836] chore(deps): update module sigs.k8s.io/controller-tools to v0.19.0 (main) (#8458) --- Makefile | 2 +- config/crd/bases/postgresql.cnpg.io_backups.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_clusters.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_databases.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_failoverquorums.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_poolers.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_publications.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml | 2 +- config/crd/bases/postgresql.cnpg.io_subscriptions.yaml | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Makefile b/Makefile index ad7c822b8d..eb59750717 100644 --- a/Makefile +++ b/Makefile @@ -54,7 +54,7 @@ POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions # renovate: datasource=github-releases depName=kubernetes-sigs/kustomize versioning=loose KUSTOMIZE_VERSION ?= v5.6.0 # renovate: datasource=go depName=sigs.k8s.io/controller-tools -CONTROLLER_TOOLS_VERSION ?= v0.18.0 +CONTROLLER_TOOLS_VERSION ?= v0.19.0 GENREF_VERSION ?= 015aaac611407c4fe591bc8700d2c67b7521efca # renovate: datasource=go depName=github.com/goreleaser/goreleaser GORELEASER_VERSION ?= v2.11.2 diff --git a/config/crd/bases/postgresql.cnpg.io_backups.yaml b/config/crd/bases/postgresql.cnpg.io_backups.yaml index 2cffcd3aea..c371fcefcf 100644 --- a/config/crd/bases/postgresql.cnpg.io_backups.yaml +++ b/config/crd/bases/postgresql.cnpg.io_backups.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.18.0 + controller-gen.kubebuilder.io/version: v0.19.0 name: backups.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml b/config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml index c5358863e3..af9fbd84e4 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.18.0 + controller-gen.kubebuilder.io/version: v0.19.0 name: clusterimagecatalogs.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml index 92725b23b0..ba89604e0a 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.18.0 + controller-gen.kubebuilder.io/version: v0.19.0 name: clusters.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_databases.yaml b/config/crd/bases/postgresql.cnpg.io_databases.yaml index d417b9b890..81c50608b2 100644 --- a/config/crd/bases/postgresql.cnpg.io_databases.yaml +++ b/config/crd/bases/postgresql.cnpg.io_databases.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.18.0 + controller-gen.kubebuilder.io/version: v0.19.0 name: databases.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_failoverquorums.yaml b/config/crd/bases/postgresql.cnpg.io_failoverquorums.yaml index 91da8c5817..738db5fc02 100644 --- a/config/crd/bases/postgresql.cnpg.io_failoverquorums.yaml +++ b/config/crd/bases/postgresql.cnpg.io_failoverquorums.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.18.0 + controller-gen.kubebuilder.io/version: v0.19.0 name: failoverquorums.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml b/config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml index cd869be0b8..fdea556ad5 100644 --- a/config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml +++ b/config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.18.0 + controller-gen.kubebuilder.io/version: v0.19.0 name: imagecatalogs.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_poolers.yaml b/config/crd/bases/postgresql.cnpg.io_poolers.yaml index 11f05cc775..7b2b6d620b 100644 --- a/config/crd/bases/postgresql.cnpg.io_poolers.yaml +++ b/config/crd/bases/postgresql.cnpg.io_poolers.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.18.0 + controller-gen.kubebuilder.io/version: v0.19.0 name: poolers.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_publications.yaml b/config/crd/bases/postgresql.cnpg.io_publications.yaml index a468639eca..7bb6d3c11a 100644 --- a/config/crd/bases/postgresql.cnpg.io_publications.yaml +++ b/config/crd/bases/postgresql.cnpg.io_publications.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.18.0 + controller-gen.kubebuilder.io/version: v0.19.0 name: publications.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml b/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml index 051567e4bd..aa6bc55005 100644 --- a/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml +++ b/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.18.0 + controller-gen.kubebuilder.io/version: v0.19.0 name: scheduledbackups.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_subscriptions.yaml b/config/crd/bases/postgresql.cnpg.io_subscriptions.yaml index 72d7b72fe5..d5d0b7872d 100644 --- a/config/crd/bases/postgresql.cnpg.io_subscriptions.yaml +++ b/config/crd/bases/postgresql.cnpg.io_subscriptions.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.18.0 + controller-gen.kubebuilder.io/version: v0.19.0 name: subscriptions.postgresql.cnpg.io spec: group: postgresql.cnpg.io From 6a2cfdc290cf0ea58c3527101dde5ea04d47ae86 Mon Sep 17 00:00:00 2001 From: Peggie Date: Mon, 1 Sep 2025 09:10:35 +0200 Subject: [PATCH 825/836] chore: refresh licenses directory (#8500) Refresh the licenses directory Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Signed-off-by: Jonathan Gonzalez V. Co-authored-by: license-updater --- .../pmezard/go-difflib/difflib/LICENSE | 27 +++++++++++++++++++ .../structured-merge-diff/{v4 => v6}/LICENSE | 0 2 files changed, 27 insertions(+) create mode 100644 licenses/go-licenses/github.com/pmezard/go-difflib/difflib/LICENSE rename licenses/go-licenses/sigs.k8s.io/structured-merge-diff/{v4 => v6}/LICENSE (100%) diff --git a/licenses/go-licenses/github.com/pmezard/go-difflib/difflib/LICENSE b/licenses/go-licenses/github.com/pmezard/go-difflib/difflib/LICENSE new file mode 100644 index 0000000000..c67dad612a --- /dev/null +++ b/licenses/go-licenses/github.com/pmezard/go-difflib/difflib/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013, Patrick Mezard +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + The names of its contributors may not be used to endorse or promote +products derived from this software without specific prior written +permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/go-licenses/sigs.k8s.io/structured-merge-diff/v4/LICENSE b/licenses/go-licenses/sigs.k8s.io/structured-merge-diff/v6/LICENSE similarity index 100% rename from licenses/go-licenses/sigs.k8s.io/structured-merge-diff/v4/LICENSE rename to licenses/go-licenses/sigs.k8s.io/structured-merge-diff/v6/LICENSE From ee53e297d262ad287a2e10ac71c4bcafc22bd7e9 Mon Sep 17 00:00:00 2001 From: Pascal Bourdier Date: Tue, 2 Sep 2025 09:49:46 +0200 Subject: [PATCH 826/836] chore: add usestdlibvars linter (#7826) Add `usestdlibvars` linter to the list of golangci linters Fix issues throw by the new linter Closes #8484 Signed-off-by: Pascal Bourdier --- .golangci.yml | 1 + internal/cmd/manager/backup/cmd.go | 2 +- internal/cmd/manager/instance/status/cmd.go | 2 +- .../postgres/webserver/client/remote/backup.go | 6 +++--- .../postgres/webserver/client/remote/instance.go | 12 ++++++------ 5 files changed, 12 insertions(+), 11 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 59f0bd4d11..896adb3cc2 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -38,6 +38,7 @@ linters: - unconvert - unparam - unused + - usestdlibvars - wastedassign - whitespace settings: diff --git a/internal/cmd/manager/backup/cmd.go b/internal/cmd/manager/backup/cmd.go index c39e0de0da..f74bd8ab23 100644 --- a/internal/cmd/manager/backup/cmd.go +++ b/internal/cmd/manager/backup/cmd.go @@ -64,7 +64,7 @@ func NewCmd() *cobra.Command { return err } - if resp.StatusCode != 200 { + if resp.StatusCode != http.StatusOK { contextLogger.Info( "Error while requesting backup", "backupURL", backupURL, diff --git a/internal/cmd/manager/instance/status/cmd.go b/internal/cmd/manager/instance/status/cmd.go index b23f88751d..e3a4792f3f 100644 --- a/internal/cmd/manager/instance/status/cmd.go +++ b/internal/cmd/manager/instance/status/cmd.go @@ -101,7 +101,7 @@ func statusSubCommand(ctx context.Context) error { return err } - if resp.StatusCode != 200 { + if resp.StatusCode != http.StatusOK { contextLogger.Info( "Error while extracting status", "statusCode", resp.StatusCode, diff --git a/pkg/management/postgres/webserver/client/remote/backup.go b/pkg/management/postgres/webserver/client/remote/backup.go index af8a98b92d..d023d228ac 100644 --- a/pkg/management/postgres/webserver/client/remote/backup.go +++ b/pkg/management/postgres/webserver/client/remote/backup.go @@ -60,7 +60,7 @@ func (c *backupClientImpl) StatusWithErrors( ) (*webserver.Response[webserver.BackupResultData], error) { scheme := GetStatusSchemeFromPod(pod) httpURL := url.Build(scheme.ToString(), pod.Status.PodIP, url.PathPgModeBackup, url.StatusPort) - req, err := http.NewRequestWithContext(ctx, "GET", httpURL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, httpURL, nil) if err != nil { return nil, err } @@ -83,7 +83,7 @@ func (c *backupClientImpl) Start( return nil, fmt.Errorf("failed to marshal start payload: %w", err) } - req, err := http.NewRequestWithContext(ctx, "POST", httpURL, bytes.NewReader(jsonBody)) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, httpURL, bytes.NewReader(jsonBody)) if err != nil { return nil, err } @@ -106,7 +106,7 @@ func (c *backupClientImpl) Stop( return nil, fmt.Errorf("failed to marshal stop payload: %w", err) } - req, err := http.NewRequestWithContext(ctx, "PUT", httpURL, bytes.NewReader(jsonBody)) + req, err := http.NewRequestWithContext(ctx, http.MethodPut, httpURL, bytes.NewReader(jsonBody)) if err != nil { return nil, err } diff --git a/pkg/management/postgres/webserver/client/remote/instance.go b/pkg/management/postgres/webserver/client/remote/instance.go index 913ca19d9f..40b238fbe9 100644 --- a/pkg/management/postgres/webserver/client/remote/instance.go +++ b/pkg/management/postgres/webserver/client/remote/instance.go @@ -194,7 +194,7 @@ func (r *instanceClientImpl) GetPgControlDataFromInstance( scheme := GetStatusSchemeFromPod(pod) httpURL := url.Build(scheme.ToString(), pod.Status.PodIP, url.PathPGControlData, url.StatusPort) - req, err := http.NewRequestWithContext(ctx, "GET", httpURL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, httpURL, nil) if err != nil { return "", err } @@ -215,7 +215,7 @@ func (r *instanceClientImpl) GetPgControlDataFromInstance( return "", err } - if resp.StatusCode != 200 { + if resp.StatusCode != http.StatusOK { return "", &StatusError{StatusCode: resp.StatusCode, Body: string(body)} } @@ -301,7 +301,7 @@ func (r *instanceClientImpl) rawInstanceStatusRequest( ) (result postgres.PostgresqlStatus) { scheme := GetStatusSchemeFromPod(&pod) statusURL := url.Build(scheme.ToString(), pod.Status.PodIP, url.PathPgStatus, url.StatusPort) - req, err := http.NewRequestWithContext(ctx, "GET", statusURL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, statusURL, nil) if err != nil { result.Error = err return result @@ -327,7 +327,7 @@ func (r *instanceClientImpl) rawInstanceStatusRequest( return result } - if resp.StatusCode != 200 { + if resp.StatusCode != http.StatusOK { result.Error = &StatusError{StatusCode: resp.StatusCode, Body: string(body)} return result } @@ -383,7 +383,7 @@ func (r *instanceClientImpl) ArchivePartialWAL(ctx context.Context, pod *corev1. statusURL := url.Build( GetStatusSchemeFromPod(pod).ToString(), pod.Status.PodIP, url.PathPgArchivePartial, url.StatusPort) - req, err := http.NewRequestWithContext(ctx, "POST", statusURL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, statusURL, nil) if err != nil { return "", err } @@ -403,7 +403,7 @@ func (r *instanceClientImpl) ArchivePartialWAL(ctx context.Context, pod *corev1. return "", err } - if resp.StatusCode != 200 { + if resp.StatusCode != http.StatusOK { return "", &StatusError{StatusCode: resp.StatusCode, Body: string(body)} } From 677554b7d1f92a98051997d9e18a4b2019942b6f Mon Sep 17 00:00:00 2001 From: "Jonathan Gonzalez V." Date: Tue, 2 Sep 2025 09:55:09 +0200 Subject: [PATCH 827/836] test: enable unit test on 1.34 (#8460) Closes #8457 Signed-off-by: Jonathan Gonzalez V. --- .github/ISSUE_TEMPLATE/bug.yml | 1 + .github/k8s_versions_scope.json | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml index 0abbc90ae1..6dbed05d9e 100644 --- a/.github/ISSUE_TEMPLATE/bug.yml +++ b/.github/ISSUE_TEMPLATE/bug.yml @@ -63,6 +63,7 @@ body: attributes: label: What version of Kubernetes are you using? options: + - "1.34" - "1.33" - "1.32" - "1.31" diff --git a/.github/k8s_versions_scope.json b/.github/k8s_versions_scope.json index 03d41581e4..b3cf9b61ec 100644 --- a/.github/k8s_versions_scope.json +++ b/.github/k8s_versions_scope.json @@ -6,5 +6,5 @@ "GKE": {"min": "1.29", "max": ""}, "OPENSHIFT": {"min": "4.16", "max": ""} }, - "unit_test": {"min": "1.29", "max": "1.33"} + "unit_test": {"min": "1.29", "max": "1.34"} } From e1a6c0a45a2546904816044c3ac077d7b6195720 Mon Sep 17 00:00:00 2001 From: Peggie Date: Tue, 2 Sep 2025 10:09:16 +0200 Subject: [PATCH 828/836] test: Updated Postgres versions used in E2E tests (#8499) Update the Postgres versions used in E2E tests Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Signed-off-by: Jonathan Gonzalez V. Co-authored-by: postgres-versions-updater --- .github/pg_versions.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/pg_versions.json b/.github/pg_versions.json index 4c40dcda6e..7c8fd728b0 100644 --- a/.github/pg_versions.json +++ b/.github/pg_versions.json @@ -1,7 +1,7 @@ { "18": [ "18beta2", - "18beta2-6" + "18beta2-8" ], "17": [ "17.5", From 7ad3b43d6a98e2ae3bcf7393c589bebe6fcda57d Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 3 Sep 2025 09:28:27 +0200 Subject: [PATCH 829/836] chore(deps): update dependency operator-framework/operator-registry to v1.57.0 (main) (#8489) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index eb59750717..1277c69ec5 100644 --- a/Makefile +++ b/Makefile @@ -65,7 +65,7 @@ WOKE_VERSION ?= 0.19.0 # renovate: datasource=github-releases depName=operator-framework/operator-sdk versioning=loose OPERATOR_SDK_VERSION ?= v1.41.1 # renovate: datasource=github-tags depName=operator-framework/operator-registry -OPM_VERSION ?= v1.56.0 +OPM_VERSION ?= v1.57.0 # renovate: datasource=github-tags depName=redhat-openshift-ecosystem/openshift-preflight PREFLIGHT_VERSION ?= 1.14.1 OPENSHIFT_VERSIONS ?= v4.12-v4.19 From 47d18016dd1dce6e72aa2a1c6e72bce4bd9e22cc Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 3 Sep 2025 10:02:29 +0200 Subject: [PATCH 830/836] chore(deps): update github/codeql-action digest to 2d92b76 (main) (#8466) --- .github/workflows/codeql-analysis.yml | 4 ++-- .github/workflows/continuous-integration.yml | 2 +- .github/workflows/ossf_scorecard.yml | 2 +- .github/workflows/snyk.yml | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 9f82a82894..0d93c0ff29 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -76,7 +76,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v3 + uses: github/codeql-action/init@2d92b76c45b91eb80fc44c74ce3fce0ee94e8f9d # v3 with: languages: "go" build-mode: manual @@ -93,6 +93,6 @@ jobs: make - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v3 + uses: github/codeql-action/analyze@2d92b76c45b91eb80fc44c74ce3fce0ee94e8f9d # v3 with: category: "/language:go" diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 8fe52c4d17..0ef7e20e3d 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -617,7 +617,7 @@ jobs: args: --severity-threshold=high --file=Dockerfile - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v3 + uses: github/codeql-action/upload-sarif@2d92b76c45b91eb80fc44c74ce3fce0ee94e8f9d # v3 if: | !github.event.repository.fork && !github.event.pull_request.head.repo.fork diff --git a/.github/workflows/ossf_scorecard.yml b/.github/workflows/ossf_scorecard.yml index d48b0410d2..be3454df15 100644 --- a/.github/workflows/ossf_scorecard.yml +++ b/.github/workflows/ossf_scorecard.yml @@ -74,6 +74,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard (optional). # Commenting out will disable upload of results to your repo's Code Scanning dashboard - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v3 + uses: github/codeql-action/upload-sarif@2d92b76c45b91eb80fc44c74ce3fce0ee94e8f9d # v3 with: sarif_file: results.sarif diff --git a/.github/workflows/snyk.yml b/.github/workflows/snyk.yml index 5122ce9a35..e64635fdab 100644 --- a/.github/workflows/snyk.yml +++ b/.github/workflows/snyk.yml @@ -30,7 +30,7 @@ jobs: args: --sarif-file-output=snyk-static.sarif - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v3 + uses: github/codeql-action/upload-sarif@2d92b76c45b91eb80fc44c74ce3fce0ee94e8f9d # v3 with: sarif_file: snyk-static.sarif @@ -43,6 +43,6 @@ jobs: args: --sarif-file-output=snyk-test.sarif - name: Upload result to GitHub Code Scanning - uses: github/codeql-action/upload-sarif@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v3 + uses: github/codeql-action/upload-sarif@2d92b76c45b91eb80fc44c74ce3fce0ee94e8f9d # v3 with: sarif_file: snyk-test.sarif From d1b5dfbf8d446eb1d1d37b72e5186820f7b16ba5 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Wed, 3 Sep 2025 13:26:11 +0200 Subject: [PATCH 831/836] fix(backup): do not check Cluster `spec.backup` if method is `plugin` (#8486) This patch addresses a regression introduced by #8320 that does not affect any released version. The regression made it impossible to perform backups using a plugin, as the controller would incorrectly require and validate the `spec.backup` field even when the backup method was set to plugin. With this change, validating the `spec.backup` field is skipped when using plugin-based backups, restoring the expected behavior for plugin users. Signed-off-by: Armando Ruocco --- internal/controller/backup_controller.go | 13 ++-- internal/controller/backup_controller_test.go | 70 +++++++++++++++++++ 2 files changed, 77 insertions(+), 6 deletions(-) diff --git a/internal/controller/backup_controller.go b/internal/controller/backup_controller.go index e96e278d03..ef14de30c2 100644 --- a/internal/controller/backup_controller.go +++ b/internal/controller/backup_controller.go @@ -329,17 +329,18 @@ func (r *BackupReconciler) checkPrerequisites( err := resourcestatus.FlagBackupAsFailed(ctx, r.Client, &backup, &cluster, errors.New(message)) return &ctrl.Result{}, err } - - if cluster.Spec.Backup == nil { - const message = "cannot proceed with the backup as the cluster has no backup section" - return flagMissingPrerequisite(message, "ClusterHasBackupConfigured") - } - if backup.Spec.Method == apiv1.BackupMethodPlugin { if len(cluster.Spec.Plugins) == 0 { const message = "cannot proceed with the backup as the cluster has no plugin configured" return flagMissingPrerequisite(message, "ClusterHasNoBackupExecutorPlugin") } + + return nil, nil + } + + if cluster.Spec.Backup == nil { + const message = "cannot proceed with the backup as the cluster has no backup section" + return flagMissingPrerequisite(message, "ClusterHasBackupConfigured") } if backup.Spec.Method == apiv1.BackupMethodVolumeSnapshot { diff --git a/internal/controller/backup_controller_test.go b/internal/controller/backup_controller_test.go index b5615a8958..c228bb573a 100644 --- a/internal/controller/backup_controller_test.go +++ b/internal/controller/backup_controller_test.go @@ -26,6 +26,7 @@ import ( volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -468,3 +469,72 @@ var _ = Describe("update snapshot backup metadata", func() { To(Equal(oneHourAgo)) }) }) + +var _ = Describe("checkPrerequisites for plugin backups", func() { + var env *testingEnvironment + BeforeEach(func() { env = buildTestEnvironment() }) + + It("allows plugin backups without cluster.spec.backup when a plugin is configured", func(ctx context.Context) { + ns := newFakeNamespace(env.client) + + cluster := newFakeCNPGCluster(env.client, ns, func(c *apiv1.Cluster) { + c.Spec.Backup = nil + c.Spec.Plugins = []apiv1.PluginConfiguration{{ + Name: "test", + Enabled: ptr.To(true), + Parameters: map[string]string{"key": "value"}, + }} + }) + + backup := &apiv1.Backup{ + ObjectMeta: metav1.ObjectMeta{Name: "test-plugin-backup", Namespace: ns}, + Spec: apiv1.BackupSpec{ + Cluster: apiv1.LocalObjectReference{Name: cluster.Name}, + Method: apiv1.BackupMethodPlugin, + }, + } + // Create the backup so that status updates in prerequisites can patch it if needed + expectErr := env.client.Create(ctx, backup) + Expect(expectErr).ToNot(HaveOccurred()) + + res, err := env.backupReconciler.checkPrerequisites(ctx, *backup, *cluster) + Expect(err).ToNot(HaveOccurred()) + Expect(res).To(BeNil()) + + // Ensure backup was not marked as failed + var stored apiv1.Backup + expectErr = env.client.Get(ctx, client.ObjectKeyFromObject(backup), &stored) + Expect(expectErr).ToNot(HaveOccurred()) + Expect(stored.Status.Phase).To(BeEmpty()) + }) + + It("fails plugin backups when no plugin is configured on the cluster", func(ctx context.Context) { + ns := newFakeNamespace(env.client) + + cluster := newFakeCNPGCluster(env.client, ns, func(c *apiv1.Cluster) { + c.Spec.Backup = nil + c.Spec.Plugins = nil + }) + + backup := &apiv1.Backup{ + ObjectMeta: metav1.ObjectMeta{Name: "test-plugin-backup-missing", Namespace: ns}, + Spec: apiv1.BackupSpec{ + Cluster: apiv1.LocalObjectReference{Name: cluster.Name}, + Method: apiv1.BackupMethodPlugin, + }, + } + expectErr := env.client.Create(ctx, backup) + Expect(expectErr).ToNot(HaveOccurred()) + + res, err := env.backupReconciler.checkPrerequisites(ctx, *backup, *cluster) + // We expect the reconciler to flag failure and return a non-nil result without bubbling an error + Expect(err).ToNot(HaveOccurred()) + Expect(res).ToNot(BeNil()) + + var stored apiv1.Backup + expectErr = env.client.Get(ctx, client.ObjectKeyFromObject(backup), &stored) + Expect(expectErr).ToNot(HaveOccurred()) + Expect(stored.Status.Phase).To(BeEquivalentTo(apiv1.BackupPhaseFailed)) + Expect(stored.Status.Method).To(BeEquivalentTo(apiv1.BackupMethodPlugin)) + }) +}) From 6eed116d11be870c4f0faaaed0032289e31a3e6e Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Wed, 3 Sep 2025 17:45:22 +0200 Subject: [PATCH 832/836] fix(webhook): use correct API group in webhook errors (#8485) Fix the incorrect API group reported by the Pooler and Backup admission webhook. Signed-off-by: Armando Ruocco --- internal/webhook/v1/backup_webhook.go | 2 +- internal/webhook/v1/pooler_webhook.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/webhook/v1/backup_webhook.go b/internal/webhook/v1/backup_webhook.go index b9582a81ef..29fd04c7f7 100644 --- a/internal/webhook/v1/backup_webhook.go +++ b/internal/webhook/v1/backup_webhook.go @@ -117,7 +117,7 @@ func (v *BackupCustomValidator) ValidateUpdate( } return nil, apierrors.NewInvalid( - schema.GroupKind{Group: "backup.cnpg.io", Kind: "Backup"}, + schema.GroupKind{Group: "postgresql.cnpg.io", Kind: "Backup"}, backup.Name, allErrs) } diff --git a/internal/webhook/v1/pooler_webhook.go b/internal/webhook/v1/pooler_webhook.go index 34594b0618..b8c1070c99 100644 --- a/internal/webhook/v1/pooler_webhook.go +++ b/internal/webhook/v1/pooler_webhook.go @@ -170,7 +170,7 @@ func (v *PoolerCustomValidator) ValidateUpdate( } return warns, apierrors.NewInvalid( - schema.GroupKind{Group: "pooler.cnpg.io", Kind: "Pooler"}, + schema.GroupKind{Group: "postgresql.cnpg.io", Kind: "Pooler"}, pooler.Name, allErrs) } From 6168604b83af8276ba5723444a3bd78df8382272 Mon Sep 17 00:00:00 2001 From: Armando Ruocco Date: Wed, 3 Sep 2025 19:22:36 +0200 Subject: [PATCH 833/836] fix(persistentvolumeclaim): consider WAL-archiver plugins for replica recovery source (#8506) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Treat WAL archiving as active if either Backup.BarmanObjectStore is set or a WAL-archiver plugin is enabled. Closes #8507 Signed-off-by: Armando Ruocco Signed-off-by: Marco Nenciarini Signed-off-by: Niccolò Fei Co-authored-by: Marco Nenciarini Co-authored-by: Niccolò Fei --- .../persistentvolumeclaim/storagesource.go | 7 ++++-- .../storagesource_test.go | 25 +++++++++++++++++++ tests/e2e/volume_snapshot_test.go | 13 ++++++++++ 3 files changed, 43 insertions(+), 2 deletions(-) diff --git a/pkg/reconciler/persistentvolumeclaim/storagesource.go b/pkg/reconciler/persistentvolumeclaim/storagesource.go index 8f808f9142..d3706f0aca 100644 --- a/pkg/reconciler/persistentvolumeclaim/storagesource.go +++ b/pkg/reconciler/persistentvolumeclaim/storagesource.go @@ -85,8 +85,11 @@ func GetCandidateStorageSourceForReplica( // the cluster itself. Other backups are fine because the required // WALs have been archived in the cluster object store. - // Unless WAL archiving is active, we can't recover a replica from a backup - if cluster.Spec.Backup == nil || cluster.Spec.Backup.BarmanObjectStore == nil { + // Unless WAL archiving is active (via BarmanObjectStore or a WAL-archiver plugin), + // we can't recover a replica from a backup + walArchivingActive := (cluster.Spec.Backup != nil && cluster.Spec.Backup.BarmanObjectStore != nil) || + cluster.GetEnabledWALArchivePluginName() != "" + if !walArchivingActive { return nil } diff --git a/pkg/reconciler/persistentvolumeclaim/storagesource_test.go b/pkg/reconciler/persistentvolumeclaim/storagesource_test.go index 6e33ce34a4..5044a0c955 100644 --- a/pkg/reconciler/persistentvolumeclaim/storagesource_test.go +++ b/pkg/reconciler/persistentvolumeclaim/storagesource_test.go @@ -96,6 +96,20 @@ var _ = Describe("Storage source", func() { }, } + clusterWithPluginOnly := &apiv1.Cluster{ + Spec: apiv1.ClusterSpec{ + StorageConfiguration: apiv1.StorageConfiguration{}, + WalStorage: &apiv1.StorageConfiguration{}, + Backup: nil, + Plugins: []apiv1.PluginConfiguration{ + { + Name: "test-wal-archiver", + IsWALArchiver: ptr.To(true), + }, + }, + }, + } + backupList := apiv1.BackupList{ Items: []apiv1.Backup{ { @@ -193,6 +207,17 @@ var _ = Describe("Storage source", func() { Expect(source).ToNot(BeNil()) Expect(source.Name).To(Equal("completed-backup")) }) + + It("should return the backup as storage source when WAL archiving is via plugin only", func(ctx context.Context) { + source, err := NewPgDataCalculator().GetSource(GetCandidateStorageSourceForReplica( + ctx, + clusterWithPluginOnly, + backupList, + )) + Expect(err).ToNot(HaveOccurred()) + Expect(source).ToNot(BeNil()) + Expect(source.Name).To(Equal("completed-backup")) + }) }) When("there's no WAL archiving", func() { diff --git a/tests/e2e/volume_snapshot_test.go b/tests/e2e/volume_snapshot_test.go index bfac11594b..7ee4f33fae 100644 --- a/tests/e2e/volume_snapshot_test.go +++ b/tests/e2e/volume_snapshot_test.go @@ -34,6 +34,7 @@ import ( k8client "sigs.k8s.io/controller-runtime/pkg/client" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/cloudnative-pg/cloudnative-pg/pkg/specs" "github.com/cloudnative-pg/cloudnative-pg/pkg/utils" "github.com/cloudnative-pg/cloudnative-pg/tests" "github.com/cloudnative-pg/cloudnative-pg/tests/utils/backups" @@ -846,6 +847,18 @@ var _ = Describe("Verify Volume Snapshot", AssertClusterIsReady(namespace, clusterToSnapshotName, testTimeouts[timeouts.ClusterIsReady], env) }) + By("checking the new replicas have been created using the snapshot", func() { + pvcList, err := storage.GetPVCList(env.Ctx, env.Client, namespace) + Expect(err).ToNot(HaveOccurred()) + for _, pvc := range pvcList.Items { + if pvc.Labels[utils.ClusterInstanceRoleLabelName] == specs.ClusterRoleLabelReplica && + pvc.Labels[utils.ClusterLabelName] == clusterToSnapshotName { + Expect(pvc.Spec.DataSource.Kind).To(Equal(apiv1.VolumeSnapshotKind)) + Expect(pvc.Spec.DataSourceRef.Kind).To(Equal(apiv1.VolumeSnapshotKind)) + } + } + }) + // we need to verify the streaming replica continue works By("verifying the correct data exists in the new pod of the scaled cluster", func() { podList, err := clusterutils.GetReplicas(env.Ctx, env.Client, namespace, From 954854cebc80666698df3ba3431dce033a3ec356 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 3 Sep 2025 20:52:33 +0200 Subject: [PATCH 834/836] fix(deps): update all non-major go dependencies (main) (#8512) This PR contains the following updates: https://github.com/goreleaser/goreleaser `v2.11.2` -> `v2.12.0` https://github.com/spf13/cobra `v1.9.1` -> `v1.10.1` --- Makefile | 2 +- go.mod | 4 ++-- go.sum | 9 ++++----- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/Makefile b/Makefile index 1277c69ec5..3839e8b88a 100644 --- a/Makefile +++ b/Makefile @@ -57,7 +57,7 @@ KUSTOMIZE_VERSION ?= v5.6.0 CONTROLLER_TOOLS_VERSION ?= v0.19.0 GENREF_VERSION ?= 015aaac611407c4fe591bc8700d2c67b7521efca # renovate: datasource=go depName=github.com/goreleaser/goreleaser -GORELEASER_VERSION ?= v2.11.2 +GORELEASER_VERSION ?= v2.12.0 # renovate: datasource=docker depName=jonasbn/github-action-spellcheck versioning=docker SPELLCHECK_VERSION ?= 0.51.0 # renovate: datasource=docker depName=getwoke/woke versioning=docker diff --git a/go.mod b/go.mod index 8ff1e7aecb..9370554cd7 100644 --- a/go.mod +++ b/go.mod @@ -29,7 +29,7 @@ require ( github.com/prometheus/client_golang v1.23.0 github.com/robfig/cron v1.2.0 github.com/sethvargo/go-password v0.3.1 - github.com/spf13/cobra v1.9.1 + github.com/spf13/cobra v1.10.1 github.com/stern/stern v1.32.0 github.com/thoas/go-funk v0.9.3 go.uber.org/atomic v1.11.0 @@ -93,7 +93,7 @@ require ( github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/common v0.65.0 // indirect github.com/prometheus/procfs v0.16.1 // indirect - github.com/spf13/pflag v1.0.7 // indirect + github.com/spf13/pflag v1.0.9 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xlab/treeprint v1.2.0 // indirect go.uber.org/automaxprocs v1.6.0 // indirect diff --git a/go.sum b/go.sum index 3e16321168..5c818d1472 100644 --- a/go.sum +++ b/go.sum @@ -174,11 +174,10 @@ github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sethvargo/go-password v0.3.1 h1:WqrLTjo7X6AcVYfC6R7GtSyuUQR9hGyAj/f1PYQZCJU= github.com/sethvargo/go-password v0.3.1/go.mod h1:rXofC1zT54N7R8K/h1WDUdkf9BOx5OptoxrMBcrXzvs= -github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= -github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= -github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M= -github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stern/stern v1.32.0 h1:xNw0CizB7/4CkWpI46cAo8tArDnS14eYKLaaDevEnrM= github.com/stern/stern v1.32.0/go.mod h1:Nv6yoHcb2E1HvklagJyd4rjoysJM4WxvcGVQtE651Xw= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= From 1a4583dc868e37e4c165d64ff5c68afa75270866 Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Mon, 8 Sep 2025 16:31:32 +0200 Subject: [PATCH 835/836] docs: integrate LFX Mentorship page with application info (#8545) Closes #8544 Signed-off-by: Gabriele Bartolini --- contribute/lfx-mentorship-program.md | 34 ++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/contribute/lfx-mentorship-program.md b/contribute/lfx-mentorship-program.md index 736bd054d3..da3e0eb0e3 100644 --- a/contribute/lfx-mentorship-program.md +++ b/contribute/lfx-mentorship-program.md @@ -38,6 +38,40 @@ the areas outlined below. --- +## Applying to the Mentorship + +We’re excited that you’re considering applying for the mentorship programme! To +give yourself the best chance, here are some tips to help you prepare a strong +application and move smoothly through the process: + +- **Start with a thoughtful CV and cover letter.** These are your first + opportunity to show who you are, what motivates you, and what you hope to + achieve during the mentorship. Take your time to make them clear, personal, and + well-structured. +- **Use AI wisely.** Tools can be great for polishing your writing or checking + grammar, but make sure your application reflects *you*. Mentors can usually + spot when something is fully generated. The mentorship is about your personal + growth over three months, so let your own voice shine through. +- **Apply through the official LFX portal.** To be considered, make sure you + complete the application process there and upload both your CV and cover + letter. +- **Follow the process in the upstream issue.** Any clarifications should be + asked directly in the issue. This keeps everything transparent and ensures + mentors don’t miss your questions. +- **Respect the communication boundaries.** Please avoid asking about the + status of your application in public chats or reaching out to individual + mentors on social media. We know it can feel hard to wait, but sticking to the + official process helps everyone. +- **Understand the selection realities.** There are many projects, lots of + applicants, and only a short time for the selection process. That means not + everyone will be contacted directly. If you don’t hear back, don’t be + discouraged—keep applying and building your skills. + +We’re looking forward to learning more about you and wish you the very best of +luck with your application! + +--- + ## Recommended Preparation While each project has its own skill requirements, the program aims to deepen From effb15d36e005a4bc725f3e83688dd4a2b55b450 Mon Sep 17 00:00:00 2001 From: Gabriele Bartolini Date: Mon, 8 Sep 2025 16:36:42 +0200 Subject: [PATCH 836/836] docs: update LFX Mentorship Program with 2025 term 3 (#8543) Signed-off-by: Gabriele Bartolini --- contribute/lfx-mentorship-program.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/contribute/lfx-mentorship-program.md b/contribute/lfx-mentorship-program.md index da3e0eb0e3..76327ccb5f 100644 --- a/contribute/lfx-mentorship-program.md +++ b/contribute/lfx-mentorship-program.md @@ -25,16 +25,16 @@ the areas outlined below. | Year | Term | Project | Mentee | | ---- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------ | ---------------------------------------- | -| 2025 | 3 (Sep–Nov) | [Chaos Testing](https://mentorship.lfx.linuxfoundation.org/project/0858ce07-0c90-47fa-a1a0-95c6762f00ff) | - | -| 2025 | 3 (Sep–Nov) | [Rebuild documentation for multi-version support with Docusaurus](https://mentorship.lfx.linuxfoundation.org/project/86a647c1-88c7-474f-b093-6abb58197083) | - | -| 2025 | 3 (Sep–Nov) | [Refresh cnpg-i-hello-world to align with current CNPG-I](https://mentorship.lfx.linuxfoundation.org/project/cabc7391-4956-42b2-b91c-d261816b7289) | - | -| 2025 | 2 (Jun–Aug) | [Declarative Management of PostgreSQL FDWs](https://mentorship.lfx.linuxfoundation.org/project/53fa853e-b5fa-4d68-be71-f005c75aea89) | [Ying Zhu](https://github.com/EdwinaZhu) | +| 2025 | 3 (Sep–Nov) | [Chaos Testing](https://mentorship.lfx.linuxfoundation.org/project/0858ce07-0c90-47fa-a1a0-95c6762f00ff) | [Yash Agarwal](https://github.com/xploy04) | +| 2025 | 3 (Sep–Nov) | [Rebuild documentation for multi-version support with Docusaurus](https://mentorship.lfx.linuxfoundation.org/project/86a647c1-88c7-474f-b093-6abb58197083) | [Anushka Saxena](https://www.linkedin.com/in/-anushka-saxena/) | --- ## Past Mentorship Programs -*To be updated when available.* +| Year | Term | Project | Mentee | +| ---- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------ | ---------------------------------------- | +| 2025 | 2 (Jun–Aug) | [Declarative Management of PostgreSQL FDWs](https://mentorship.lfx.linuxfoundation.org/project/53fa853e-b5fa-4d68-be71-f005c75aea89) | [Ying Zhu](https://github.com/EdwinaZhu) | ---